file_path
stringlengths
21
207
content
stringlengths
5
1.02M
size
int64
5
1.02M
lang
stringclasses
9 values
avg_line_length
float64
1.33
100
max_line_length
int64
4
993
alphanum_fraction
float64
0.27
0.93
NVIDIA-Omniverse/PhysX/physx/source/lowleveldynamics/src/DySolverControl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef DY_SOLVER_CONTROL_H #define DY_SOLVER_CONTROL_H #include "DySolverCore.h" #include "DySolverConstraintDesc.h" namespace physx { namespace Dy { class BatchIterator { PX_NOCOPY(BatchIterator) public: PxConstraintBatchHeader* constraintBatchHeaders; PxU32 mSize; PxU32 mCurrentIndex; BatchIterator(PxConstraintBatchHeader* _constraintBatchHeaders, PxU32 size) : constraintBatchHeaders(_constraintBatchHeaders), mSize(size), mCurrentIndex(0) { } PX_FORCE_INLINE const PxConstraintBatchHeader& GetCurrentHeader(const PxU32 constraintIndex) { PxU32 currentIndex = mCurrentIndex; while((constraintIndex - constraintBatchHeaders[currentIndex].startIndex) >= constraintBatchHeaders[currentIndex].stride) currentIndex = (currentIndex + 1)%mSize; PxPrefetchLine(&constraintBatchHeaders[currentIndex], 128); mCurrentIndex = currentIndex; return constraintBatchHeaders[currentIndex]; } }; inline void SolveBlockParallel( PxSolverConstraintDesc* PX_RESTRICT constraintList, const PxI32 batchCount, const PxI32 index, const PxI32 headerCount, SolverContext& cache, BatchIterator& iterator, SolveBlockMethod solveTable[], const PxI32 iteration) { const PxI32 indA = index - (iteration * headerCount); const PxConstraintBatchHeader* PX_RESTRICT headers = iterator.constraintBatchHeaders; const PxI32 endIndex = indA + batchCount; for(PxI32 i = indA; i < endIndex; ++i) { PX_ASSERT(i < PxI32(iterator.mSize)); const PxConstraintBatchHeader& header = headers[i]; const PxI32 numToGrab = header.stride; PxSolverConstraintDesc* PX_RESTRICT block = &constraintList[header.startIndex]; // PT: TODO: revisit this one PxPrefetch(block[0].constraint, 384); for(PxI32 b = 0; b < numToGrab; ++b) { PxPrefetchLine(block[b].bodyA); PxPrefetchLine(block[b].bodyB); } //OK. We have a number of constraints to run... solveTable[header.constraintType](block, PxU32(numToGrab), cache); } } // PT: TODO: these "solver core" classes are mostly stateless, at this point they could just be function pointers like the solve methods. class SolverCoreGeneral : public SolverCore { public: bool mFrictionEveryIteration; SolverCoreGeneral(bool fricEveryIteration) : mFrictionEveryIteration(fricEveryIteration) {} // SolverCore virtual void solveVParallelAndWriteBack (SolverIslandParams& params, Cm::SpatialVectorF* Z, Cm::SpatialVectorF* deltaV) const PX_OVERRIDE PX_FINAL; virtual void solveV_Blocks (SolverIslandParams& params) const PX_OVERRIDE PX_FINAL; //~SolverCore }; // PT: TODO: we use "extern" instead of functions for TGS. Unify. SolveBlockMethod* getSolveBlockTable(); SolveBlockMethod* getSolverConcludeBlockTable(); SolveWriteBackBlockMethod* getSolveWritebackBlockTable(); } } #endif
4,478
C
36.016529
137
0.7682
NVIDIA-Omniverse/PhysX/physx/source/lowleveldynamics/src/DySleep.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef DY_SLEEP_H #define DY_SLEEP_H #include "PxvDynamics.h" #include "PxsRigidBody.h" #include "DySleepingConfigulation.h" namespace physx { namespace Dy { void sleepCheck(PxsRigidBody* originalBody, PxReal dt, bool enableStabilization, const Cm::SpatialVector& motionVelocity, bool hasStaticTouch); } } #endif
2,016
C
43.822221
144
0.768849
NVIDIA-Omniverse/PhysX/physx/source/lowleveldynamics/src/DySolverContact.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef DY_SOLVER_CONTACT_H #define DY_SOLVER_CONTACT_H #include "foundation/PxSimpleTypes.h" #include "foundation/PxVec3.h" #include "PxvConfig.h" #include "foundation/PxVecMath.h" namespace physx { using namespace aos; namespace Sc { class ShapeInteraction; } /** \brief A header to represent a friction patch for the solver. */ namespace Dy { struct SolverContactHeader { enum DySolverContactFlags { eHAS_FORCE_THRESHOLDS = 0x1 }; PxU8 type; //Note: mType should be first as the solver expects a type in the first byte. PxU8 flags; PxU8 numNormalConstr; PxU8 numFrictionConstr; //4 PxReal angDom0; //8 PxReal angDom1; //12 PxReal invMass0; //16 Vec4V staticFrictionX_dynamicFrictionY_dominance0Z_dominance1W; //32 //KS - minAppliedImpulseForFrictionW is non-zero only for articulations. This is a workaround for a case in articulations where //the impulse is propagated such that many links do not apply friction because their normal forces were corrected by the solver in a previous //link. This results in some links sliding unnaturally. This occurs with prismatic or revolute joints where the impulse propagation one one link //resolves the normal constraint on all links Vec4V normal_minAppliedImpulseForFrictionW; //48 PxReal invMass1; //52 PxU32 broken; //56 PxU8* frictionBrokenWritebackByte; //60 64 Sc::ShapeInteraction* shapeInteraction; //64 72 #if PX_P64_FAMILY PxU32 pad[2]; //64 80 #endif // PX_X64 PX_FORCE_INLINE void setStaticFriction(const FloatV f) { staticFrictionX_dynamicFrictionY_dominance0Z_dominance1W = V4SetX(staticFrictionX_dynamicFrictionY_dominance0Z_dominance1W, f); } PX_FORCE_INLINE void setDynamicFriction(const FloatV f) { staticFrictionX_dynamicFrictionY_dominance0Z_dominance1W = V4SetY(staticFrictionX_dynamicFrictionY_dominance0Z_dominance1W, f); } PX_FORCE_INLINE void setDominance0(const FloatV f) { staticFrictionX_dynamicFrictionY_dominance0Z_dominance1W = V4SetZ(staticFrictionX_dynamicFrictionY_dominance0Z_dominance1W, f); } PX_FORCE_INLINE void setDominance1(const FloatV f) { staticFrictionX_dynamicFrictionY_dominance0Z_dominance1W = V4SetW(staticFrictionX_dynamicFrictionY_dominance0Z_dominance1W, f); } PX_FORCE_INLINE FloatV getStaticFriction() const { return V4GetX(staticFrictionX_dynamicFrictionY_dominance0Z_dominance1W); } PX_FORCE_INLINE FloatV getDynamicFriction() const { return V4GetY(staticFrictionX_dynamicFrictionY_dominance0Z_dominance1W); } PX_FORCE_INLINE FloatV getDominance0() const { return V4GetZ(staticFrictionX_dynamicFrictionY_dominance0Z_dominance1W); } PX_FORCE_INLINE FloatV getDominance1() const { return V4GetW(staticFrictionX_dynamicFrictionY_dominance0Z_dominance1W); } PX_FORCE_INLINE void setStaticFriction(PxF32 f) { V4WriteX(staticFrictionX_dynamicFrictionY_dominance0Z_dominance1W, f); } PX_FORCE_INLINE void setDynamicFriction(PxF32 f) { V4WriteY(staticFrictionX_dynamicFrictionY_dominance0Z_dominance1W, f); } PX_FORCE_INLINE void setDominance0(PxF32 f) { V4WriteZ(staticFrictionX_dynamicFrictionY_dominance0Z_dominance1W, f); } PX_FORCE_INLINE void setDominance1(PxF32 f) { V4WriteW(staticFrictionX_dynamicFrictionY_dominance0Z_dominance1W, f); } PX_FORCE_INLINE PxF32 getStaticFrictionPxF32() const { return V4ReadX(staticFrictionX_dynamicFrictionY_dominance0Z_dominance1W); } PX_FORCE_INLINE PxF32 getDynamicFrictionPxF32() const { return V4ReadY(staticFrictionX_dynamicFrictionY_dominance0Z_dominance1W); } PX_FORCE_INLINE PxF32 getDominance0PxF32() const { return V4ReadZ(staticFrictionX_dynamicFrictionY_dominance0Z_dominance1W); } PX_FORCE_INLINE PxF32 getDominance1PxF32() const { return V4ReadW(staticFrictionX_dynamicFrictionY_dominance0Z_dominance1W); } }; #if !PX_P64_FAMILY PX_COMPILE_TIME_ASSERT(sizeof(SolverContactHeader) == 64); #else PX_COMPILE_TIME_ASSERT(sizeof(SolverContactHeader) == 80); #endif /** \brief A single rigid body contact point for the solver. */ struct SolverContactPoint { Vec4V raXn_velMultiplierW; Vec4V rbXn_maxImpulseW; PxF32 biasedErr; PxF32 unbiasedErr; PxF32 impulseMultiplier; PxU32 pad; PX_FORCE_INLINE FloatV getVelMultiplier() const { return V4GetW(raXn_velMultiplierW); } PX_FORCE_INLINE FloatV getImpulseMultiplier() const { return FLoad(impulseMultiplier); } PX_FORCE_INLINE FloatV getBiasedErr() const { return FLoad(biasedErr); } PX_FORCE_INLINE FloatV getMaxImpulse() const { return V4GetW(rbXn_maxImpulseW); } PX_FORCE_INLINE Vec3V getRaXn() const { return Vec3V_From_Vec4V(raXn_velMultiplierW); } PX_FORCE_INLINE Vec3V getRbXn() const { return Vec3V_From_Vec4V(rbXn_maxImpulseW); } /*PX_FORCE_INLINE void setRaXn(const PxVec3& v) {V4WriteXYZ(raXn_velMultiplierW, v);} PX_FORCE_INLINE void setRbXn(const PxVec3& v) {V4WriteXYZ(rbXn_maxImpulseW, v);} PX_FORCE_INLINE void setVelMultiplier(PxF32 f) {V4WriteW(raXn_velMultiplierW, f);} PX_FORCE_INLINE void setBiasedErr(PxF32 f) {biasedErr = f;} PX_FORCE_INLINE void setUnbiasedErr(PxF32 f) {unbiasedErr = f;} PX_FORCE_INLINE PxF32 getVelMultiplierPxF32() const {return V4ReadW(raXn_velMultiplierW);} PX_FORCE_INLINE const PxVec3& getRaXnPxVec3() const {return V3ReadXYZ(raXn);} PX_FORCE_INLINE const PxVec3& getRbXnPxVec3() const {return V3ReadXYZ(rbXn);} PX_FORCE_INLINE PxF32 getBiasedErrPxF32() const {return biasedErr;}*/ }; PX_COMPILE_TIME_ASSERT(sizeof(SolverContactPoint) == 48); /** \brief A single extended articulation contact point for the solver. */ struct SolverContactPointExt : public SolverContactPoint { Vec3V linDeltaVA; Vec3V angDeltaVA; Vec3V linDeltaVB; Vec3V angDeltaVB; }; PX_COMPILE_TIME_ASSERT(sizeof(SolverContactPointExt) == 112); /** \brief A single friction constraint for the solver. */ struct SolverContactFriction { Vec4V normalXYZ_appliedForceW; //16 Vec4V raXnXYZ_velMultiplierW; //32 Vec4V rbXnXYZ_biasW; //48 PxReal targetVel; //52 PxU32 mPad[3]; //64 PX_FORCE_INLINE void setAppliedForce(const FloatV f) {normalXYZ_appliedForceW=V4SetW(normalXYZ_appliedForceW,f);} PX_FORCE_INLINE void setVelMultiplier(const FloatV f) {raXnXYZ_velMultiplierW=V4SetW(raXnXYZ_velMultiplierW,f);} PX_FORCE_INLINE void setBias(const FloatV f) {rbXnXYZ_biasW=V4SetW(rbXnXYZ_biasW,f);} PX_FORCE_INLINE FloatV getAppliedForce() const {return V4GetW(normalXYZ_appliedForceW);} PX_FORCE_INLINE FloatV getVelMultiplier() const {return V4GetW(raXnXYZ_velMultiplierW);} PX_FORCE_INLINE FloatV getBias() const {return V4GetW(rbXnXYZ_biasW);} PX_FORCE_INLINE Vec3V getNormal() const {return Vec3V_From_Vec4V(normalXYZ_appliedForceW);} PX_FORCE_INLINE Vec3V getRaXn() const {return Vec3V_From_Vec4V(raXnXYZ_velMultiplierW);} PX_FORCE_INLINE Vec3V getRbXn() const {return Vec3V_From_Vec4V(rbXnXYZ_biasW);} PX_FORCE_INLINE void setNormal(const PxVec3& v) {V4WriteXYZ(normalXYZ_appliedForceW, v);} PX_FORCE_INLINE void setRaXn(const PxVec3& v) {V4WriteXYZ(raXnXYZ_velMultiplierW, v);} PX_FORCE_INLINE void setRbXn(const PxVec3& v) {V4WriteXYZ(rbXnXYZ_biasW, v);} PX_FORCE_INLINE const PxVec3& getNormalPxVec3() const {return V4ReadXYZ(normalXYZ_appliedForceW);} PX_FORCE_INLINE const PxVec3& getRaXnPxVec3() const {return V4ReadXYZ(raXnXYZ_velMultiplierW);} PX_FORCE_INLINE const PxVec3& getRbXnPxVec3() const {return V4ReadXYZ(rbXnXYZ_biasW);} PX_FORCE_INLINE void setAppliedForce(PxF32 f) {V4WriteW(normalXYZ_appliedForceW, f);} PX_FORCE_INLINE void setVelMultiplier(PxF32 f) {V4WriteW(raXnXYZ_velMultiplierW, f);} PX_FORCE_INLINE void setBias(PxF32 f) {V4WriteW(rbXnXYZ_biasW, f);} PX_FORCE_INLINE PxF32 getAppliedForcePxF32() const {return V4ReadW(normalXYZ_appliedForceW);} PX_FORCE_INLINE PxF32 getVelMultiplierPxF32() const {return V4ReadW(raXnXYZ_velMultiplierW);} PX_FORCE_INLINE PxF32 getBiasPxF32() const {return V4ReadW(rbXnXYZ_biasW);} }; PX_COMPILE_TIME_ASSERT(sizeof(SolverContactFriction) == 64); /** \brief A single extended articulation friction constraint for the solver. */ struct SolverContactFrictionExt : public SolverContactFriction { Vec3V linDeltaVA; Vec3V angDeltaVA; Vec3V linDeltaVB; Vec3V angDeltaVB; }; PX_COMPILE_TIME_ASSERT(sizeof(SolverContactFrictionExt) == 128); } } #endif
10,079
C
44.405405
188
0.767437
NVIDIA-Omniverse/PhysX/physx/source/lowleveldynamics/include/DyFeatherstoneArticulation.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef DY_FEATHERSTONE_ARTICULATION_H #define DY_FEATHERSTONE_ARTICULATION_H #include "foundation/PxVec3.h" #include "foundation/PxQuat.h" #include "foundation/PxTransform.h" #include "foundation/PxVecMath.h" #include "CmUtils.h" #include "DyVArticulation.h" #include "DyFeatherstoneArticulationUtils.h" #include "DyFeatherstoneArticulationJointData.h" #include "solver/PxSolverDefs.h" #include "DyArticulationTendon.h" #include "CmSpatialVector.h" #ifndef FEATHERSTONE_DEBUG #define FEATHERSTONE_DEBUG 0 #endif #define DY_STATIC_CONTACTS_IN_INTERNAL_SOLVER true namespace physx { class PxContactJoint; class PxcConstraintBlockStream; class PxcScratchAllocator; class PxsConstraintBlockManager; struct SolverConstraint1DExtStep; struct PxSolverConstraintPrepDesc; struct PxSolverBody; struct PxSolverBodyData; class PxConstraintAllocator; class PxsContactManagerOutputIterator; struct PxSolverConstraintDesc; namespace Dy { //#if PX_VC //#pragma warning(push) //#pragma warning( disable : 4324 ) // Padding was added at the end of a structure because of a __declspec(align) value. //#endif class ArticulationLinkData; struct SpatialSubspaceMatrix; struct SolverConstraint1DExt; struct SolverConstraint1DStep; class FeatherstoneArticulation; struct SpatialMatrix; struct SpatialTransform; struct Constraint; class ThreadContext; struct ArticulationInternalTendonConstraint { Cm::UnAlignedSpatialVector row0; //24 24 Cm::UnAlignedSpatialVector row1; //24 48 Cm::UnAlignedSpatialVector deltaVB; //24 72 PxU32 linkID0; //4 74 PxU32 linkID1; //4 78 PxReal accumulatedLength; //4 82 //accumulate distance for spatial tendon, accumualate joint pose for fixed tendon PxReal biasCoefficient; //4 94 PxReal velMultiplier; //4 98 PxReal impulseMultiplier; //4 102 PxReal appliedForce; //4 106 PxReal recipResponse; //4 110 PxReal deltaVA; //4 114 PxReal limitBiasCoefficient; PxReal limitImpulseMultiplier; PxReal limitAppliedForce; PxReal restDistance; PxReal lowLimit; PxReal highLimit; PxReal velImpulseMultiplier; PxReal limitVelImpulseMultiplier; }; struct ArticulationInternalConstraintBase { //Common/shared directional info between, frictions and drives Cm::UnAlignedSpatialVector row0; //24 24 Cm::UnAlignedSpatialVector row1; //24 48 Cm::UnAlignedSpatialVector deltaVA; //24 72 Cm::UnAlignedSpatialVector deltaVB; //24 96 //Response information PxReal recipResponse; //4 100 PxReal response; //4 104 }; struct ArticulationInternalLimit { // Initial error for high and low limits. Negative means limit is violated. PxReal errorLow; PxReal errorHigh; // Impulses are updated during solver iterations. PxReal lowImpulse; PxReal highImpulse; }; struct ArticulationImplicitDriveDesc { PX_CUDA_CALLABLE PX_FORCE_INLINE ArticulationImplicitDriveDesc(PxZERO) : driveTargetVelPlusInitialBias(0.0f), driveBiasCoefficient(0.0f), driveVelMultiplier(0.0f), driveImpulseMultiplier(0.0f) { } PX_CUDA_CALLABLE PX_FORCE_INLINE ArticulationImplicitDriveDesc (const PxReal targetVelPlusInitialBias, const PxReal biasCoefficient, const PxReal velMultiplier, const PxReal impulseMultiplier) : driveTargetVelPlusInitialBias(targetVelPlusInitialBias), driveBiasCoefficient(biasCoefficient), driveVelMultiplier(velMultiplier), driveImpulseMultiplier(impulseMultiplier) { } PxReal driveTargetVelPlusInitialBias; PxReal driveBiasCoefficient; PxReal driveVelMultiplier; PxReal driveImpulseMultiplier; }; struct ArticulationInternalConstraint : public ArticulationInternalConstraintBase { ArticulationImplicitDriveDesc implicitDriveDesc; PxReal driveMaxForce; PxReal driveForce; PxReal frictionForceCoefficient; PxReal frictionMaxForce; PxReal frictionForce; bool isLinearConstraint; void setImplicitDriveDesc(const ArticulationImplicitDriveDesc& driveDesc) { implicitDriveDesc = driveDesc; } const ArticulationImplicitDriveDesc& getImplicitDriveDesc() const { return implicitDriveDesc; } }; PX_COMPILE_TIME_ASSERT(0 == (sizeof(ArticulationInternalConstraint) & 0x0f)); //linkID can be PxU32. However, each thread is going to read 16 bytes so we just keep ArticulationSensor 16 byte align. //if not, newArticulationsLaunch kernel will fail to read the sensor data correctly struct ArticulationSensor { PxTransform mRelativePose; //28 28 PxU16 mLinkID; //02 30 PxU16 mFlags; //02 32 }; struct PX_ALIGN_PREFIX(16) JointSpaceSpatialZ { PxReal mVals [6][4]; PxReal dot(Cm::SpatialVectorF& v, PxU32 id) { return v.top.x * mVals[0][id] + v.top.y * mVals[1][id] + v.top.z * mVals[2][id] + v.bottom.x * mVals[3][id] + v.bottom.y * mVals[4][id] + v.bottom.z * mVals[5][id]; } } PX_ALIGN_SUFFIX(16); class ArticulationData { public: ArticulationData() : mPathToRootElements(NULL), mNumPathToRootElements(0), mLinksData(NULL), mJointData(NULL), mSpatialTendons(NULL), mNumSpatialTendons(0), mNumTotalAttachments(0), mFixedTendons(NULL), mNumFixedTendons(0), mSensors(NULL), mSensorForces(NULL), mNbSensors(0), mDt(0.f), mDofs(0xffffffff), mDataDirty(true) { mRootPreMotionVelocity = Cm::SpatialVectorF::Zero(); } ~ArticulationData(); PX_FORCE_INLINE void init(); void resizeLinkData(const PxU32 linkCount); void resizeJointData(const PxU32 dofs); PX_FORCE_INLINE PxReal* getJointAccelerations() { return mJointAcceleration.begin(); } PX_FORCE_INLINE const PxReal* getJointAccelerations() const { return mJointAcceleration.begin(); } PX_FORCE_INLINE PxReal* getJointVelocities() { return mJointVelocity.begin(); } PX_FORCE_INLINE const PxReal* getJointVelocities() const { return mJointVelocity.begin(); } PX_FORCE_INLINE PxReal* getJointNewVelocities() { return mJointNewVelocity.begin(); } PX_FORCE_INLINE const PxReal* getJointNewVelocities() const { return mJointNewVelocity.begin(); } PX_FORCE_INLINE PxReal* getJointPositions() { return mJointPosition.begin(); } PX_FORCE_INLINE const PxReal* getJointPositions() const { return mJointPosition.begin(); } PX_FORCE_INLINE PxReal* getJointForces() { return mJointForce.begin(); } PX_FORCE_INLINE const PxReal* getJointForces() const { return mJointForce.begin(); } PX_FORCE_INLINE PxReal* getJointConstraintForces() { return mJointConstraintForces.begin(); } PX_FORCE_INLINE const PxReal* getJointConstraintForces() const { return mJointConstraintForces.begin(); } PX_FORCE_INLINE PxReal* getJointTargetPositions() { return mJointTargetPositions.begin(); } PX_FORCE_INLINE const PxReal* getJointTargetPositions() const { return mJointTargetPositions.begin(); } PX_FORCE_INLINE PxReal* getJointTargetVelocities() { return mJointTargetVelocities.begin(); } PX_FORCE_INLINE const PxReal* getJointTargetVelocities() const { return mJointTargetVelocities.begin(); } PX_FORCE_INLINE ArticulationInternalConstraint& getInternalConstraint(const PxU32 dofId) { return mInternalConstraints[dofId]; } PX_FORCE_INLINE const ArticulationInternalConstraint& getInternalConstraint(const PxU32 dofId) const { return mInternalConstraints[dofId]; } PX_FORCE_INLINE Cm::SpatialVectorF* getMotionVelocities() { return mMotionVelocities.begin(); } PX_FORCE_INLINE Cm::SpatialVectorF* getMotionAccelerations() { return mMotionAccelerations.begin(); } PX_FORCE_INLINE const Cm::SpatialVectorF* getMotionAccelerations() const { return mMotionAccelerations.begin(); } PX_FORCE_INLINE Cm::SpatialVectorF* getLinkIncomingJointForces() { return mLinkIncomingJointForces.begin(); } PX_FORCE_INLINE Cm::SpatialVectorF* getCorioliseVectors() { return mCorioliseVectors.begin(); } PX_FORCE_INLINE Cm::SpatialVectorF* getSpatialZAVectors() { return mZAForces.begin(); } PX_FORCE_INLINE Cm::SpatialVectorF* getSpatialZAInternalVectors() { return mZAInternalForces.begin(); } PX_FORCE_INLINE Cm::SpatialVectorF* getTransmittedForces() { return mJointTransmittedForce.begin(); } PX_FORCE_INLINE Cm::SpatialVectorF* getPosIterMotionVelocities() { return mPosIterMotionVelocities.begin(); } PX_FORCE_INLINE const Cm::SpatialVectorF* getPosIterMotionVelocities() const { return mPosIterMotionVelocities.begin(); } PX_FORCE_INLINE PxReal* getPosIterJointVelocities() { return mPosIterJointVelocities.begin(); } PX_FORCE_INLINE Cm::SpatialVectorF& getPosIterMotionVelocity(const PxU32 index) { return mPosIterMotionVelocities[index]; } PX_FORCE_INLINE const Cm::SpatialVectorF& getMotionVelocity(const PxU32 index) const { return mMotionVelocities[index]; } PX_FORCE_INLINE const Cm::SpatialVectorF& getMotionAcceleration(const PxU32 index) const { return mMotionAccelerations[index]; } PX_FORCE_INLINE const Cm::SpatialVectorF& getCorioliseVector(const PxU32 index) const { return mCorioliseVectors[index]; } PX_FORCE_INLINE const Cm::SpatialVectorF& getSpatialZAVector(const PxU32 index) const { return mZAForces[index]; } PX_FORCE_INLINE const Cm::SpatialVectorF& getTransmittedForce(const PxU32 index) const { return mJointTransmittedForce[index]; } PX_FORCE_INLINE Cm::SpatialVectorF& getMotionVelocity(const PxU32 index) { return mMotionVelocities[index]; } PX_FORCE_INLINE Cm::SpatialVectorF& getMotionAcceleration(const PxU32 index) { return mMotionAccelerations[index]; } PX_FORCE_INLINE Cm::SpatialVectorF& getCorioliseVector(const PxU32 index) { return mCorioliseVectors[index]; } PX_FORCE_INLINE Cm::SpatialVectorF& getSpatialZAVector(const PxU32 index) { return mZAForces[index]; } PX_FORCE_INLINE Cm::SpatialVectorF& getTransmittedForce(const PxU32 index) { return mJointTransmittedForce[index]; } //PX_FORCE_INLINE Dy::SpatialMatrix* getTempSpatialMatrix() { mTempSpatialMatrix.begin(); } PX_FORCE_INLINE PxTransform& getPreTransform(const PxU32 index) { return mPreTransform[index]; } PX_FORCE_INLINE const PxTransform& getPreTransform(const PxU32 index) const { return mPreTransform[index]; } // PX_FORCE_INLINE void setPreTransform(const PxU32 index, const PxTransform& t){ mPreTransform[index] = t; } PX_FORCE_INLINE PxTransform* getPreTransform() { return mPreTransform.begin(); } PX_FORCE_INLINE const Cm::SpatialVectorF& getDeltaMotionVector(const PxU32 index) const { return mDeltaMotionVector[index]; } PX_FORCE_INLINE void setDeltaMotionVector(const PxU32 index, const Cm::SpatialVectorF& vec) { mDeltaMotionVector[index] = vec; } PX_FORCE_INLINE Cm::SpatialVectorF* getDeltaMotionVector() { return mDeltaMotionVector.begin(); } PX_FORCE_INLINE ArticulationLink* getLinks() const { return mLinks; } PX_FORCE_INLINE PxU32 getLinkCount() const { return mLinkCount; } PX_FORCE_INLINE ArticulationLink& getLink(PxU32 index) const { return mLinks[index]; } PX_FORCE_INLINE ArticulationSpatialTendon** getSpatialTendons() const { return mSpatialTendons; } PX_FORCE_INLINE PxU32 getSpatialTendonCount() const { return mNumSpatialTendons; } PX_FORCE_INLINE ArticulationSpatialTendon* getSpatialTendon(PxU32 index) const { return mSpatialTendons[index]; } PX_FORCE_INLINE ArticulationFixedTendon** getFixedTendons() const { return mFixedTendons; } PX_FORCE_INLINE PxU32 getFixedTendonCount() const { return mNumFixedTendons; } PX_FORCE_INLINE ArticulationFixedTendon* getFixedTendon(PxU32 index) const { return mFixedTendons[index]; } PX_FORCE_INLINE ArticulationSensor** getSensors() const { return mSensors; } PX_FORCE_INLINE PxU32 getSensorCount() const { return mNbSensors; } PX_FORCE_INLINE ArticulationLinkData* getLinkData() const { return mLinksData; } ArticulationLinkData& getLinkData(PxU32 index) const; PX_FORCE_INLINE ArticulationJointCoreData* getJointData() const { return mJointData; } PX_FORCE_INLINE ArticulationJointCoreData& getJointData(PxU32 index) const { return mJointData[index]; } // PT: PX-1399 PX_FORCE_INLINE PxArticulationFlags getArticulationFlags() const { return *mFlags; } PX_FORCE_INLINE Cm::SpatialVector* getExternalAccelerations() { return mExternalAcceleration; } PX_FORCE_INLINE Cm::SpatialVector& getExternalAcceleration(const PxU32 linkID) { return mExternalAcceleration[linkID]; } PX_FORCE_INLINE const Cm::SpatialVector& getExternalAcceleration(const PxU32 linkID) const { return mExternalAcceleration[linkID]; } PX_FORCE_INLINE PxReal getDt() const { return mDt; } PX_FORCE_INLINE void setDt(const PxReal dt) { mDt = dt; } PX_FORCE_INLINE bool getDataDirty() const { return mDataDirty; } PX_FORCE_INLINE void setDataDirty(const bool dirty) { mDataDirty = dirty; } PX_FORCE_INLINE PxU32 getDofs() const { return mDofs; } PX_FORCE_INLINE void setDofs(const PxU32 dof) { mDofs = dof; } PX_FORCE_INLINE FeatherstoneArticulation* getArticulation() { return mArticulation; } PX_FORCE_INLINE void setArticulation(FeatherstoneArticulation* articulation) { mArticulation = articulation; } PX_FORCE_INLINE const SpatialMatrix& getBaseInvSpatialArticulatedInertiaW() const { return mBaseInvSpatialArticulatedInertiaW; } PX_FORCE_INLINE SpatialMatrix& getBaseInvSpatialArticulatedInertiaW() { return mBaseInvSpatialArticulatedInertiaW; } PX_FORCE_INLINE PxTransform* getAccumulatedPoses() { return mAccumulatedPoses.begin(); } PX_FORCE_INLINE const PxTransform* getAccumulatedPoses() const { return mAccumulatedPoses.begin(); } PX_FORCE_INLINE Cm::SpatialVectorF* getJointSpaceJacobians() { return mJointSpaceJacobians.begin(); } PX_FORCE_INLINE const Cm::SpatialVectorF* getJointSpaceJacobians() const { return mJointSpaceJacobians.begin(); } PX_FORCE_INLINE JointSpaceSpatialZ* getJointSpaceDeltaV() { return mJointSpaceDeltaVMatrix.begin(); } PX_FORCE_INLINE const JointSpaceSpatialZ* getJointSpaceDeltaV() const { return mJointSpaceDeltaVMatrix.begin(); } PX_FORCE_INLINE Cm::SpatialVectorF* getJointSpaceResponse() { return mJointSpaceResponseMatrix.begin(); } PX_FORCE_INLINE const Cm::SpatialVectorF* getJointSpaceResponse() const { return mJointSpaceResponseMatrix.begin(); } PX_FORCE_INLINE SpatialImpulseResponseMatrix* getRootResponseMatrix() { return mRootResponseMatrix.begin(); } PX_FORCE_INLINE const SpatialImpulseResponseMatrix* getRootResponseMatrix() const { return mRootResponseMatrix.begin(); } PX_FORCE_INLINE const Cm::SpatialVectorF& getRootDeferredZ() const { return mRootDeferredZ; } PX_FORCE_INLINE Cm::SpatialVectorF& getRootDeferredZ() { return mRootDeferredZ; } PX_FORCE_INLINE const SpatialMatrix* getWorldSpatialArticulatedInertia() const { return mWorldSpatialArticulatedInertia.begin(); } PX_FORCE_INLINE SpatialMatrix* getWorldSpatialArticulatedInertia() { return mWorldSpatialArticulatedInertia.begin(); } PX_FORCE_INLINE const Cm::UnAlignedSpatialVector* getWorldMotionMatrix() const { return mWorldMotionMatrix.begin(); } PX_FORCE_INLINE Cm::UnAlignedSpatialVector* getWorldMotionMatrix() { return mWorldMotionMatrix.begin(); } PX_FORCE_INLINE const Cm::UnAlignedSpatialVector* getMotionMatrix() const { return mMotionMatrix.begin(); } PX_FORCE_INLINE Cm::UnAlignedSpatialVector* getMotionMatrix() { return mMotionMatrix.begin(); } PX_FORCE_INLINE const Cm::SpatialVectorF* getIsW() const { return mIsW.begin(); } PX_FORCE_INLINE Cm::SpatialVectorF* getIsW() { return mIsW.begin(); } PX_FORCE_INLINE const PxVec3* getRw() const { return mRw.begin(); } PX_FORCE_INLINE PxVec3* getRw() { return mRw.begin(); } PX_FORCE_INLINE const PxReal* getMinusStZExt() const { return qstZIc.begin(); } PX_FORCE_INLINE PxReal* getMinusStZExt() { return qstZIc.begin(); } PX_FORCE_INLINE const PxReal* getQstZIc() const { return qstZIc.begin(); } PX_FORCE_INLINE PxReal* getQstZIc() { return qstZIc.begin(); } PX_FORCE_INLINE const PxReal* getQStZIntIc() const { return qstZIntIc.begin();} PX_FORCE_INLINE PxReal* getQStZIntIc() { return qstZIntIc.begin();} PX_FORCE_INLINE const InvStIs* getInvStIS() const { return mInvStIs.begin(); } PX_FORCE_INLINE InvStIs* getInvStIS() { return mInvStIs.begin(); } PX_FORCE_INLINE const Cm::SpatialVectorF* getISInvStIS() const { return mISInvStIS.begin(); } PX_FORCE_INLINE Cm::SpatialVectorF* getISInvStIS() { return mISInvStIS.begin(); } PX_FORCE_INLINE SpatialImpulseResponseMatrix* getImpulseResponseMatrixWorld() { return mResponseMatrixW.begin(); } PX_FORCE_INLINE const SpatialImpulseResponseMatrix* getImpulseResponseMatrixWorld() const { return mResponseMatrixW.begin(); } PX_FORCE_INLINE const SpatialMatrix& getWorldSpatialArticulatedInertia(const PxU32 linkID) const { return mWorldSpatialArticulatedInertia[linkID]; } PX_FORCE_INLINE const InvStIs& getInvStIs(const PxU32 linkID) const { return mInvStIs[linkID]; } PX_FORCE_INLINE const Cm::UnAlignedSpatialVector& getMotionMatrix(const PxU32 dofId) const { return mMotionMatrix[dofId]; } PX_FORCE_INLINE const Cm::UnAlignedSpatialVector& getWorldMotionMatrix(const PxU32 dofId) const { return mWorldMotionMatrix[dofId]; } PX_FORCE_INLINE Cm::UnAlignedSpatialVector& getJointAxis(const PxU32 dofId) { return mJointAxis[dofId]; } PX_FORCE_INLINE const Cm::UnAlignedSpatialVector& getJointAxis(const PxU32 dofId) const { return mJointAxis[dofId]; } PX_FORCE_INLINE const PxVec3& getRw(const PxU32 linkID) const { return mRw[linkID]; } PX_FORCE_INLINE const Cm::SpatialVectorF& getIsW(const PxU32 dofId) const { return mIsW[dofId]; } PX_FORCE_INLINE const Cm::SpatialVectorF& getWorldIsInvD(const PxU32 dofId) const { return mISInvStIS[dofId]; } PX_FORCE_INLINE PxReal* getDeferredQstZ() { return mDeferredQstZ.begin(); } PX_FORCE_INLINE Cm::SpatialVectorF& getSolverSpatialForce(const PxU32 linkID) { return mSolverLinkSpatialForces[linkID]; } PX_FORCE_INLINE PxSpatialForce* getSensorForces() { return mSensorForces; } PX_FORCE_INLINE void setRootPreMotionVelocity(const Cm::UnAlignedSpatialVector& vel) { mRootPreMotionVelocity.top = vel.top; mRootPreMotionVelocity.bottom = vel.bottom; } PX_FORCE_INLINE PxU32* getPathToRootElements() const { return mPathToRootElements; } PX_FORCE_INLINE PxU32 getPathToRootElementCount() const { return mNumPathToRootElements; } PX_FORCE_INLINE const Cm::SpatialVectorF* getSolverSpatialForces() const {return mSolverLinkSpatialForces.begin();} PX_FORCE_INLINE Cm::SpatialVectorF* getSolverSpatialForces() {return mSolverLinkSpatialForces.begin();} PX_FORCE_INLINE void incrementSolverSpatialDeltaVel(const PxU32 linkID, const Cm::SpatialVectorF& deltaV) {mSolverLinkSpatialDeltaVels[linkID] += deltaV;} private: Cm::SpatialVectorF mRootPreMotionVelocity; Cm::SpatialVectorF mRootDeferredZ; PxArray<PxReal> mJointAcceleration; // joint acceleration PxArray<PxReal> mJointInternalAcceleration; //joint internal force acceleration PxArray<PxReal> mJointVelocity; // joint velocity PxArray<PxReal> mJointNewVelocity; // joint velocity due to contacts PxArray<PxReal> mJointPosition; // joint position PxArray<PxReal> mJointForce; // joint force PxArray<PxReal> mJointTargetPositions; // joint target positions PxArray<PxReal> mJointTargetVelocities; // joint target velocities PxArray<PxReal> mPosIterJointVelocities; //joint delta velocity after postion iternation before velocity iteration PxArray<Cm::SpatialVectorF> mPosIterMotionVelocities; //link motion velocites after position iteration before velocity iteration PxArray<Cm::SpatialVectorF> mMotionVelocities; //link motion velocites PxArray<Cm::SpatialVectorF> mSolverLinkSpatialDeltaVels; //link DeltaVels arising from solver PxArray<Cm::SpatialVectorF> mSolverLinkSpatialImpulses; //link impulses arising from solver. PxArray<Cm::SpatialVectorF> mSolverLinkSpatialForces; PxArray<Cm::SpatialVectorF> mMotionAccelerations; //link motion accelerations PxArray<Cm::SpatialVectorF> mLinkIncomingJointForces; PxArray<Cm::SpatialVectorF> mMotionAccelerationsInternal; //link motion accelerations PxArray<Cm::SpatialVectorF> mCorioliseVectors; //link coriolise vector PxArray<Cm::SpatialVectorF> mZAInternalForces; //link internal spatial forces PxArray<Cm::SpatialVectorF> mZAForces; //link spatial zero acceleration force/ spatial articulated force PxArray<Cm::SpatialVectorF> mJointTransmittedForce; PxArray<ArticulationInternalConstraint> mInternalConstraints; PxArray<ArticulationInternalLimit> mInternalLimits; PxArray<ArticulationInternalTendonConstraint> mInternalSpatialTendonConstraints; PxArray<ArticulationInternalTendonConstraint> mInternalFixedTendonConstraints; PxArray<PxReal> mDeferredQstZ; PxArray<PxReal> mJointConstraintForces; PxArray<Cm::SpatialVectorF> mDeltaMotionVector; //this is for TGS solver PxArray<PxTransform> mPreTransform; //this is the previous transform list for links PxArray<SpatialImpulseResponseMatrix> mResponseMatrixW; PxArray<Cm::SpatialVectorF> mJointSpaceJacobians; PxArray<JointSpaceSpatialZ> mJointSpaceDeltaVMatrix; PxArray<Cm::SpatialVectorF> mJointSpaceResponseMatrix; PxArray<Cm::SpatialVectorF> mPropagationAccelerator; PxArray<SpatialImpulseResponseMatrix> mRootResponseMatrix; PxArray<SpatialMatrix> mWorldSpatialArticulatedInertia; PxArray<PxMat33> mWorldIsolatedSpatialArticulatedInertia; PxArray<PxReal> mMasses; PxArray<InvStIs> mInvStIs; PxArray<Cm::SpatialVectorF> mIsW; PxArray<PxReal> qstZIc;//jointForce - stZIc PxArray<PxReal> qstZIntIc; PxArray<Cm::UnAlignedSpatialVector> mJointAxis; PxArray<Cm::UnAlignedSpatialVector> mMotionMatrix; PxArray<Cm::UnAlignedSpatialVector> mWorldMotionMatrix; PxArray<Cm::SpatialVectorF> mISInvStIS; PxArray<PxVec3> mRw; PxArray<PxU32> mNbStatic1DConstraints; PxArray<PxU32> mNbStaticContactConstraints; PxArray<PxU32> mStatic1DConstraintStartIndex; PxArray<PxU32> mStaticContactConstraintStartIndex; PxArray<PxQuat> mRelativeQuat; ArticulationLink* mLinks; PxU32 mLinkCount; PxU32* mPathToRootElements; PxU32 mNumPathToRootElements; ArticulationLinkData* mLinksData; ArticulationJointCoreData* mJointData; ArticulationSpatialTendon** mSpatialTendons; PxU32 mNumSpatialTendons; PxU32 mNumTotalAttachments; ArticulationFixedTendon** mFixedTendons; PxU32 mNumFixedTendons; ArticulationSensor** mSensors; PxSpatialForce* mSensorForces; PxU32 mNbSensors; PxReal mDt; PxU32 mDofs; const PxArticulationFlags* mFlags; // PT: PX-1399 Cm::SpatialVector* mExternalAcceleration; bool mDataDirty; //this means we need to call commonInit() bool mJointDirty; //this means joint delta velocity has been changed by contacts so we need to update joint velocity/joint acceleration FeatherstoneArticulation* mArticulation; PxArray<PxTransform> mAccumulatedPoses; PxArray<PxQuat> mDeltaQ; SpatialMatrix mBaseInvSpatialArticulatedInertiaW; PxReal mInvSumMass; PxVec3 mCOM; friend class FeatherstoneArticulation; }; void ArticulationData::init() { //zero delta motion vector for TGS solver PxMemZero(getDeltaMotionVector(), sizeof(Cm::SpatialVectorF) * mLinkCount); PxMemZero(getPosIterMotionVelocities(), sizeof(Cm::SpatialVectorF) * mLinkCount); mJointDirty = false; } struct ScratchData { public: ScratchData() { motionVelocities = NULL; motionAccelerations = NULL; coriolisVectors = NULL; spatialZAVectors = NULL; externalAccels = NULL; compositeSpatialInertias = NULL; jointVelocities = NULL; jointAccelerations = NULL; jointForces = NULL; jointPositions = NULL; jointFrictionForces = NULL; } Cm::SpatialVectorF* motionVelocities; Cm::SpatialVectorF* motionAccelerations; Cm::SpatialVectorF* coriolisVectors; Cm::SpatialVectorF* spatialZAVectors; Cm::SpatialVector* externalAccels; Dy::SpatialMatrix* compositeSpatialInertias; PxReal* jointVelocities; PxReal* jointAccelerations; PxReal* jointForces; PxReal* jointPositions; PxReal* jointFrictionForces; }; struct InternalConstraintSolverData { const PxReal dt; const PxReal invDt; const PxReal elapsedTime; const PxReal erp; Cm::SpatialVectorF* impulses; Cm::SpatialVectorF* deltaV; const bool isVelIter; const bool isTGS; PxU32 dofId; PxU32 complexId; PxU32 limitId; PxU32 articId; InternalConstraintSolverData(const PxReal dt_, const PxReal invDt_, const PxReal elapsedTime_, const PxReal erp_, Cm::SpatialVectorF* impulses_, Cm::SpatialVectorF* deltaV_, bool velocityIteration_, bool isTGS_) : dt(dt_), invDt(invDt_), elapsedTime(elapsedTime_), erp(erp_), impulses(impulses_), deltaV(deltaV_), isVelIter(velocityIteration_), isTGS(isTGS_), dofId(0), complexId(0), limitId(0) { } PX_NOCOPY(InternalConstraintSolverData) }; struct FixedTendonSolveData { ArticulationLink* links; ArticulationTendonJoint* tendonJoints; PxReal rootVel; PxReal rootImp; PxReal erp; PxReal error; PxReal limitError; }; #if PX_VC #pragma warning(push) #pragma warning( disable : 4324 ) // Padding was added at the end of a structure because of a __declspec(align) value. #endif //Articulation dirty flag - used to tag which properties of the articulation are dirty. Used only to transfer selected data to the GPU... struct ArticulationDirtyFlag { enum Enum { eDIRTY_JOINTS = 1 << 0, eDIRTY_POSITIONS = 1 << 1, eDIRTY_VELOCITIES = 1 << 2, eDIRTY_FORCES = 1 << 3, eDIRTY_ROOT_TRANSFORM = 1 << 4, eDIRTY_ROOT_VELOCITIES = 1 << 5, eDIRTY_LINKS = 1 << 6, eIN_DIRTY_LIST = 1 << 7, eDIRTY_WAKECOUNTER = 1 << 8, eDIRTY_EXT_ACCEL = 1 << 9, eDIRTY_LINK_FORCE = 1 << 10, eDIRTY_LINK_TORQUE = 1 << 11, eDIRTY_JOINT_TARGET_VEL = 1 << 12, eDIRTY_JOINT_TARGET_POS = 1 << 13, eDIRTY_SPATIAL_TENDON = 1 << 14, eDIRTY_SPATIAL_TENDON_ATTACHMENT = 1 << 15, eDIRTY_FIXED_TENDON = 1 << 16, eDIRTY_FIXED_TENDON_JOINT = 1 << 17, eDIRTY_SENSOR = 1 << 18, eDIRTY_VELOCITY_LIMITS = 1 << 19, eDIRTY_USER_FLAGS = 1 << 20, eNEEDS_KINEMATIC_UPDATE = 1 << 21, eALL = (1<<22)-1 }; }; PX_INLINE PX_CUDA_CALLABLE void computeArticJacobianAxes(PxVec3 row[3], const PxQuat& qa, const PxQuat& qb) { // Compute jacobian matrix for (qa* qb) [[* means conjugate in this expr]] // d/dt (qa* qb) = 1/2 L(qa*) R(qb) (omega_b - omega_a) // result is L(qa*) R(qb), where L(q) and R(q) are left/right q multiply matrix const PxReal wa = qa.w, wb = qb.w; const PxVec3 va(qa.x, qa.y, qa.z), vb(qb.x, qb.y, qb.z); const PxVec3 c = vb*wa + va*wb; const PxReal d0 = wa*wb; const PxReal d1 = va.dot(vb); const PxReal d = d0 - d1; row[0] = (va * vb.x + vb * va.x + PxVec3(d, c.z, -c.y)) * 0.5f; row[1] = (va * vb.y + vb * va.y + PxVec3(-c.z, d, c.x)) * 0.5f; row[2] = (va * vb.z + vb * va.z + PxVec3(c.y, -c.x, d)) * 0.5f; if ((d0 + d1) != 0.0f) // check if relative rotation is 180 degrees which can lead to singular matrix return; else { row[0].x += PX_EPS_F32; row[1].y += PX_EPS_F32; row[2].z += PX_EPS_F32; } } PX_CUDA_CALLABLE PX_FORCE_INLINE float compAng(PxReal swingYZ, PxReal swingW) { return 4.0f * PxAtan2(swingYZ, 1.0f + swingW); // tan (t/2) = sin(t)/(1+cos t), so this is the quarter angle } PX_ALIGN_PREFIX(64) class FeatherstoneArticulation { PX_NOCOPY(FeatherstoneArticulation) public: // public interface explicit FeatherstoneArticulation(void*); ~FeatherstoneArticulation(); // get data sizes for allocation at higher levels void getDataSizes(PxU32 linkCount, PxU32& solverDataSize, PxU32& totalSize, PxU32& scratchSize); bool resize(const PxU32 linkCount); void assignTendons(const PxU32 /*nbTendons*/, Dy::ArticulationSpatialTendon** /*tendons*/); void assignTendons(const PxU32 /*nbTendons*/, Dy::ArticulationFixedTendon** /*tendons*/); void assignSensors(const PxU32 nbSensors, Dy::ArticulationSensor** sensors, PxSpatialForce* sensorForces); PxU32 getDofs() const; PxU32 getDof(const PxU32 linkID); bool applyCache(PxArticulationCache& cache, const PxArticulationCacheFlags flag, bool& shouldWake); void copyInternalStateToCache(PxArticulationCache& cache, const PxArticulationCacheFlags flag, const bool isGpuSimEnabled); static PxU32 getCacheDataSize(PxU32 totalDofs, PxU32 linkCount, PxU32 sensorCount); static PxArticulationCache* createCache(PxU32 totalDofs, PxU32 linkCount, PxU32 sensorCount); void packJointData(const PxReal* maximum, PxReal* reduced); void unpackJointData(const PxReal* reduced, PxReal* maximum); void initializeCommonData(); //gravity as input, joint force as output void getGeneralizedGravityForce(const PxVec3& gravity, PxArticulationCache& cache); //joint velocity as input, generalised force(coriolis and centrigugal force) as output void getCoriolisAndCentrifugalForce(PxArticulationCache& cache); //external force as input, joint force as output void getGeneralizedExternalForce(PxArticulationCache& /*cache*/); //joint force as input, joint acceleration as output void getJointAcceleration(const PxVec3& gravity, PxArticulationCache& cache); //joint acceleration as input, joint force as out void getJointForce(PxArticulationCache& cache); void getDenseJacobian(PxArticulationCache& cache, PxU32 & nRows, PxU32 & nCols); //These two functions are for closed loop system void getKMatrix(ArticulationJointCore* loopJoint, const PxU32 parentIndex, const PxU32 childIndex, PxArticulationCache& cache); void getCoefficientMatrix(const PxReal dt, const PxU32 linkID, const PxContactJoint* contactJoints, const PxU32 nbContacts, PxArticulationCache& cache); void getCoefficientMatrixWithLoopJoints(ArticulationLoopConstraint* lConstraints, const PxU32 nbJoints, PxArticulationCache& cache); bool getLambda(ArticulationLoopConstraint* lConstraints, const PxU32 nbJoints, PxArticulationCache& cache, PxArticulationCache& rollBackCache, const PxReal* jointTorque, const PxVec3& gravity, const PxU32 maxIter, const PxReal invLengthScale); void getGeneralizedMassMatrix(PxArticulationCache& cache); void getGeneralizedMassMatrixCRB(PxArticulationCache& cache); bool storeStaticConstraint(const PxSolverConstraintDesc& desc); bool willStoreStaticConstraint() { return DY_STATIC_CONTACTS_IN_INTERNAL_SOLVER; } void setRootLinearVelocity(const PxVec3& velocity); void setRootAngularVelocity(const PxVec3& velocity); void teleportRootLink(); void getImpulseResponse( PxU32 linkID, Cm::SpatialVectorF* Z, const Cm::SpatialVector& impulse, Cm::SpatialVector& deltaV) const; void getImpulseResponse( PxU32 linkID, Cm::SpatialVectorV* /*Z*/, const Cm::SpatialVectorV& impulse, Cm::SpatialVectorV& deltaV) const; void getImpulseSelfResponse( PxU32 linkID0, PxU32 linkID1, Cm::SpatialVectorF* Z, const Cm::SpatialVector& impulse0, const Cm::SpatialVector& impulse1, Cm::SpatialVector& deltaV0, Cm::SpatialVector& deltaV1) const; Cm::SpatialVectorV getLinkVelocity(const PxU32 linkID) const; Cm::SpatialVector getLinkScalarVelocity(const PxU32 linkID) const; Cm::SpatialVectorV getLinkMotionVector(const PxU32 linkID) const; //this is called by island gen to determine whether the articulation should be awake or sleep Cm::SpatialVector getMotionVelocity(const PxU32 linkID) const; Cm::SpatialVector getMotionAcceleration(const PxU32 linkID, const bool isGpuSimEnabled) const; void fillIndexType(const PxU32 linkId, PxU8& indexType); PxReal getLinkMaxPenBias(const PxU32 linkID) const; PxReal getCfm(const PxU32 linkID) const; static PxU32 computeUnconstrainedVelocities( const ArticulationSolverDesc& desc, PxReal dt, PxU32& acCount, const PxVec3& gravity, Cm::SpatialVectorF* Z, Cm::SpatialVectorF* deltaV, const PxReal invLengthScale); static void computeUnconstrainedVelocitiesTGS( const ArticulationSolverDesc& desc, PxReal dt, const PxVec3& gravity, PxU64 contextID, Cm::SpatialVectorF* Z, Cm::SpatialVectorF* deltaV, const PxReal invLengthScale); static PxU32 setupSolverConstraintsTGS(const ArticulationSolverDesc& articDesc, PxReal dt, PxReal invDt, PxReal totalDt, PxU32& acCount, Cm::SpatialVectorF* Z); static void saveVelocity(FeatherstoneArticulation* articulation, Cm::SpatialVectorF* deltaV); static void saveVelocityTGS(FeatherstoneArticulation* articulation, PxReal invDtF32); static void updateBodies(const ArticulationSolverDesc& desc, Cm::SpatialVectorF* tempDeltaV, PxReal dt); static void updateBodiesTGS(const ArticulationSolverDesc& desc, Cm::SpatialVectorF* tempDeltaV, PxReal dt); static void updateBodies(FeatherstoneArticulation* articulation, Cm::SpatialVectorF* tempDeltaV, PxReal dt, bool integrateJointPosition); static void recordDeltaMotion(const ArticulationSolverDesc& desc, const PxReal dt, Cm::SpatialVectorF* deltaV, const PxReal totalInvDt); static void deltaMotionToMotionVelocity(const ArticulationSolverDesc& desc, PxReal invDt); void pxcFsApplyImpulse(PxU32 linkID, aos::Vec3V linear, aos::Vec3V angular, Cm::SpatialVectorF* Z, Cm::SpatialVectorF* deltaV); void pxcFsApplyImpulses(PxU32 linkID, const aos::Vec3V& linear, const aos::Vec3V& angular, PxU32 linkID2, const aos::Vec3V& linear2, const aos::Vec3V& angular2, Cm::SpatialVectorF* Z, Cm::SpatialVectorF* deltaV); void pxcFsApplyImpulses(Cm::SpatialVectorF* Z); Cm::SpatialVectorV pxcFsGetVelocity(PxU32 linkID); void pxcFsGetVelocities(PxU32 linkID, PxU32 linkID1, Cm::SpatialVectorV& v0, Cm::SpatialVectorV& v1); Cm::SpatialVectorV pxcFsGetVelocityTGS(PxU32 linkID); const PxTransform& getCurrentTransform(PxU32 linkID) const; const PxQuat& getDeltaQ(PxU32 linkID) const; //Applies a set of N impulses, all in local space and updates the links' motion and joint velocities void applyImpulses(Cm::SpatialVectorF* Z, Cm::SpatialVectorF* deltaV); void getDeltaV(Cm::SpatialVectorF* Z, Cm::SpatialVectorF* deltaV); //This method calculate the velocity change due to collision/constraint impulse, record joint velocity and acceleration static Cm::SpatialVectorF propagateVelocityW(const PxVec3& c2p, const Dy::SpatialMatrix& spatialInertia, const InvStIs& invStIs, const Cm::UnAlignedSpatialVector* motionMatrix, const Cm::SpatialVectorF& Z, PxReal* jointVelocity, const Cm::SpatialVectorF& hDeltaV, const PxU32 dofCount); static Cm::SpatialVectorF propagateAccelerationW(const PxVec3& c2p, const InvStIs& invStIs, const Cm::UnAlignedSpatialVector* motionMatrix, PxReal* jointVelocity, const Cm::SpatialVectorF& pAcceleration, const PxU32 dofCount, const Cm::SpatialVectorF* IsW, PxReal* qstZIc); static void propagateAccelerationW(const PxVec3& c2p, const InvStIs& invStIs, PxReal* jointVelocity, const Cm::SpatialVectorF& pAcceleration, const PxU32 dofCount, const Cm::SpatialVectorF* IsW); static Cm::SpatialVectorF propagateAccelerationW(const PxVec3& c2p, const InvStIs& invStIs, const Cm::UnAlignedSpatialVector* motionMatrix, const Cm::SpatialVectorF& pAcceleration, const PxU32 dofCount, const Cm::SpatialVectorF* IsW, PxReal* qstZIc); static Cm::SpatialVectorF propagateAccelerationW(const PxVec3& c2p, const InvStIs& invStIs, const Cm::UnAlignedSpatialVector* motionMatrix, PxReal* jointVelocity, const Cm::SpatialVectorF& pAcceleration, Cm::SpatialVectorF& Z, const PxU32 dofCount, const Cm::SpatialVectorF* IsW); //This method calculate the velocity change due to collision/constraint impulse static Cm::SpatialVectorF propagateVelocityTestImpulseW(const PxVec3& c2p, const Dy::SpatialMatrix& spatialInertia, const InvStIs& invStIs, const Cm::UnAlignedSpatialVector* motionMatrix, const Cm::SpatialVectorF& Z, const Cm::SpatialVectorF& hDeltaV, const PxU32 dofCount); /** \brief Propagate a spatial impulse applied to a child link to its parent link. The Mirtich equivalent is the equation for Y in Figure 5.7, page 141. Optionally accumulate -s^T*Y for each dof of the child link's incoming joint. Y = translateChildToParent{[ 1 - [(I * s) / (s^T * I * s)] * s^T] * Y} \param[in] childToParent is the vector from child link to parent link \param[in] linkYW is the impulse to apply to the child link. \param[in] jointDofISInvStISW is (I * s) / (s^T * I * s) with one entry for each dof of the child link's incoming joint. \param[in] jointDofMotionMatrixW is the motion matrix s with one entry for each dof of the child link's incoming joint. \param[in] dofCount is the number of dofs of the child link's incoming joint. \param[in,out] jointDofQStY accumulates -s^T*Y for each dof of the child link's incoming joint. \note jointDofQStY may be NULL if there is no need to accumulate and record -s^T*Y for each dof of the child link's incoming joint. \return The propagated spatial impulse. */ static Cm::SpatialVectorF propagateImpulseW( const PxVec3& childToParent, const Cm::SpatialVectorF& linkYW, const Cm::SpatialVectorF* jointDofISInvStISW, const Cm::UnAlignedSpatialVector* jointDofMotionMatrixW, const PxU8 dofCount, PxReal* jointDofQStY = NULL); bool applyCacheToDest(ArticulationData& data, PxArticulationCache& cache, PxReal* jVelocities, PxReal* jPosition, PxReal* jointForce, PxReal* jTargetPositions, PxReal* jTargetVelocities, const PxArticulationCacheFlags flag, bool& shouldWake); PX_FORCE_INLINE ArticulationData& getArticulationData() { return mArticulationData; } PX_FORCE_INLINE const ArticulationData& getArticulationData() const { return mArticulationData; } PX_FORCE_INLINE void setGpuDirtyFlag(ArticulationDirtyFlag::Enum flag) { mGPUDirtyFlags |= flag; } //void setGpuRemapId(const PxU32 id) { mGpuRemapId = id; } //PxU32 getGpuRemapId() { return mGpuRemapId; } static PX_CUDA_CALLABLE PX_FORCE_INLINE Cm::SpatialVectorF translateSpatialVector(const PxVec3& offset, const Cm::SpatialVectorF& vec) { return Cm::SpatialVectorF(vec.top, vec.bottom + offset.cross(vec.top)); } static PX_CUDA_CALLABLE PX_FORCE_INLINE Cm::UnAlignedSpatialVector translateSpatialVector(const PxVec3& offset, const Cm::UnAlignedSpatialVector& vec) { return Cm::UnAlignedSpatialVector(vec.top, vec.bottom + offset.cross(vec.top)); } static PX_FORCE_INLINE PxMat33 constructSkewSymmetricMatrix(const PxVec3 r) { return PxMat33(PxVec3(0.0f, r.z, -r.y), PxVec3(-r.z, 0.0f, r.x), PxVec3(r.y, -r.x, 0.0f)); } bool raiseGPUDirtyFlag(ArticulationDirtyFlag::Enum flag) { bool nothingRaised = !(mGPUDirtyFlags); mGPUDirtyFlags |= flag; return nothingRaised; } void clearGPUDirtyFlags() { mGPUDirtyFlags = 0; } public: void constraintPrep(ArticulationLoopConstraint* lConstraints, const PxU32 nbJoints, Cm::SpatialVectorF* Z, PxSolverConstraintPrepDesc& prepDesc, PxSolverBody& sBody, PxSolverBodyData& sBodyData, PxSolverConstraintDesc* desc, PxConstraintAllocator& allocator); void updateArticulation(const PxVec3& gravity, const PxReal invLengthScale); void computeUnconstrainedVelocitiesInternal( const PxVec3& gravity, Cm::SpatialVectorF* Z, Cm::SpatialVectorF* DeltaV, const PxReal invLengthScale); //copy joint data from fromJointData to toJointData void copyJointData(const ArticulationData& data, PxReal* toJointData, const PxReal* fromJointData); PxU32 computeDofs(); //this function calculates motion subspace matrix(s) for all tree joint template<bool immediateMode = false> void jcalc(ArticulationData& data); //this function calculates loop joint constraint subspace matrix(s) and active force //subspace matrix void jcalcLoopJointSubspace(ArticulationJointCore* joint, ArticulationJointCoreData& jointDatum, SpatialSubspaceMatrix& T, const Cm::UnAlignedSpatialVector* jointAxis); void computeSpatialInertia(ArticulationData& data); //compute zero acceleration force void computeZ(const ArticulationData& data, const PxVec3& gravity, ScratchData& scratchData); void computeZD(const ArticulationData& data, const PxVec3& gravity, ScratchData& scratchData); void solveInternalConstraints(const PxReal dt, const PxReal invDt, Cm::SpatialVectorF* impulses, Cm::SpatialVectorF* DeltaV, bool velocityIteration, bool isTGS, const PxReal elapsedTime, const PxReal biasCoefficient); void solveInternalJointConstraints(const PxReal dt, const PxReal invDt, Cm::SpatialVectorF* impulses, Cm::SpatialVectorF* DeltaV, bool velocityIteration, bool isTGS, const PxReal elapsedTime, const PxReal biasCoefficient); Cm::SpatialVectorF solveInternalJointConstraintRecursive(InternalConstraintSolverData& data, const PxU32 linkID, const Cm::SpatialVectorF& parentDeltaV, const bool isTGS, const bool isVelIter); void solveInternalSpatialTendonConstraints(bool isTGS); void solveInternalFixedTendonConstraints(bool isTGS); void writebackInternalConstraints(bool isTGS); void concludeInternalConstraints(bool isTGS); //compute coriolis force void computeC(ArticulationData& data, ScratchData& scratchData); //compute relative transform child to parent /** \brief a) copy the latest link pose to a handy array b) update the link separation vectors using the latest link poses. c) update the motion matrices in the world frame using the latest link poses. \param[in] links is an array of articulation links that contain the latest link poses. \param[in] linkCount is the number of links in the articulation \param[in] jointCoreDatas is an array of joint descriptions \param[in] jointDofMotionMatrices is an array of motion matrices in the joint frame. \param[out] linkAccumulatedPoses is an array used to store the latest link poses taken from ArticulationLink::PxsBodyCore. \param[out] linkRws is an array of link separations. \param[out] jointDofmotionMatricesW is an array of motion matrices in the world frame. */ static void computeRelativeTransformC2P( const ArticulationLink* links, const PxU32 linkCount, const ArticulationJointCoreData* jointCoreDatas, const Cm::UnAlignedSpatialVector* jointDofMotionMatrices, PxTransform* linkAccumulatedPoses, PxVec3* linkRws, Cm::UnAlignedSpatialVector* jointDofmotionMatricesW); //compute relative transform child to base void computeRelativeTransformC2B(ArticulationData& data); void computeLinkVelocities(ArticulationData& data, ScratchData& scratchData); /** /brief Prepare links for the next timestep. \param[in] dt is the timestep of the current simulation step that will advance sim from t to t+dt. \param[in] invLengthScale is the reciprocal of the lengthscale used by the simulation. \param[in] gravity is the gravitational acceleration to apply to all links. \param[in] fixBase determines whether the root link is to be fixed to the world (true) or will move freely (false). \param[in] linkCount is the total number of links in the articulation \param[in] linkAccumulatedPosesW is the pose of each link, specified in the world frame. \param[in] linkExternalAccelsW is the external acceleration to apply to each link, specified in the world frame. \param[in] linkRsW is the vector from each parent link to each child link, specified in the world frame. \param[in] jointDofMotionMatricesW is the motion matrix of each dof, specified in the world frame. \param[in] jointCoreData is the ArticulationJointCoreData instance of each link in the articulation. \param[in,out] linkData is the ArticulationLinkData instance of each link in the articulation. \param[in,out] links is the ArticulationLink instance of each link in the articulation. \param[in,out] jointDofMotionAccelerations is the acceleration of each link, specified in the world frame. \param[out] jointDofMotionVelocities is velocity of each link computed from the parent link velocity and joint velocity of the inbound joint. Specified in the world frame. \param[out] linkZAForcesExtW is the computed spatial zero acceleration force of each link, accounting for only external forces applied to the links. Specified in the world frame. \param[out] linkZAForcesIntW is the computed spatial zero acceleration force of each link, accounting for only internal forces applied to the links. Specified in the world frame. \param[out] linkCoriolisVectorsW is the computed coriolis vector of each link. Specified in the world frame. \param[out] linkIsolatedArticulatedInertiasW is the inertia tensor (I) for the trivial sub-chain of each link. Specified in the world frame. \param[out] linkMasses is the mass of each link. \param[out] linkSpatialArticulatedInertiasW is the spatial matrix containing the inertia tensor I and the mass matrix M for the trivial sub-chain of each link. Specified in the world frame. \param[out] jointDofCount is the number of degrees of freedom for the entire articulation. \param[in,out] jointDofVelocities is the velocity of each degree of freedom. \param[out] rootPreMotionVelocityW is assigned the spatial velocity of the root link. \param[out] comW is the centre of mass of the assembly of links, specified in the world frame. \param[out] invSumMass is the reciprocal of the total mass of all links. \note invLengthScale should have value 1/100 for centimetres scale and 1/1 for metres scale. \note If fixBase is true, the root link is assigned zero velocity. If false, the root link inherits the velocity of the associated body core. \note If fixBase is true, the root link is assigned zero acceleration. If false, the acceleration is propagated from the previous simulation step. The acceleration of all other links is left unchanged. \note If fix base is true, the root link is assigned a zero coriolis vector. \note ArticulationLinkData::maxPenBias of each link inherits the value of the associated PxsBodyCore::maxPenBias. \note ArticulationLink::cfm of each link is assigned the value PxsBodyCore::cfmScale*invLengthScale, except for the root link. The root link is assigned a value of 0 if it is fixed to the world ie fixBase == true. \note The spatial zero acceleration force accounts for the external acceleration; the damping force arising from the velocity and from the velocity that will accumulate from the external acceleration; the scaling force that will bring velocity back to the maximum allowed velocity if velocity exceeds the maximum allowed. \note If the velocity of any degree of freedom exceeds the maximum velocity of the associated joint, the velocity of each degree of freedom will be scaled so that none exceeds the maximum. */ static void computeLinkStates( const PxF32 dt, const PxReal invLengthScale, const PxVec3& gravity, const bool fixBase, const PxU32 linkCount, const PxTransform* linkAccumulatedPosesW, const Cm::SpatialVector* linkExternalAccelsW, const PxVec3* linkRsW, const Cm::UnAlignedSpatialVector* jointDofMotionMatricesW, const Dy::ArticulationJointCoreData* jointCoreData, Dy::ArticulationLinkData *linkData, Dy::ArticulationLink* links, Cm::SpatialVectorF* jointDofMotionAccelerations, Cm::SpatialVectorF* jointDofMotionVelocities, Cm::SpatialVectorF* linkZAForcesExtW, Cm::SpatialVectorF* linkZAForcesIntW, Cm::SpatialVectorF* linkCoriolisVectorsW, PxMat33* linkIsolatedArticulatedInertiasW, PxF32* linkMasses, Dy::SpatialMatrix* linkSpatialArticulatedInertiasW, const PxU32 jointDofCount, PxReal* jointDofVelocities, Cm::SpatialVectorF& rootPreMotionVelocityW, PxVec3& comW, PxF32& invSumMass); /** \brief Propagate articulated z.a. spatial force and articulated spatial inertia from parent link to child link. Repeated calls to computePropagateSpatialInertia_ZA_ZIc allow z.a. spatial force and articulated spatial inertia to be propagated from tip to root. The computation proceeds by considering a link/joint pair composed of a child link and its incoming joint. The articulated z.a. spatial force is split into an internal and external part. Gravity is added to the external part, while user-applied external joint forces are added to the internal part. \note Reference maths can be found in Eq 4.29 in Mirtich thesis. \note Mirtich works in the joint frame while every quantity here is in the world frame. \note linkArticulatedInertia has equivalent I_i^A in Mirtich \note jointMotionMatrix has equivalent s_i and its transpose is s_i^T. \param[in] jointType is the type of joint \param[in] nbJointDofs is the number of dofs supported by the joint. \param[in] jointMotionMatricesW is an array of motion matrices with one entry per dof. \param[in] jointISW is a cached term linkArticulatedInertia*jointDofMotionMatrix with one entry per dof. \param[in] jointTargetArmatures is an array of armature values with one entry per dof. \param[in] jointExternalForces is an array of user-applied external forces applied to the joint wtih one entry per dof. \param[in] linkArticulatedInertiaW is the articulated inertia of the link. \param[in] linkZExtW is the external articulated z.a. force of the link. \param[in] linkZIntIcW is the sum of the internal z.a force of the link and linkArticulatedInertia*coriolisForce \param[out] linkInvStISW will be computed as 1/[jointMotionMatrix^T * linkArticulatedInertia * jointMotionMatrix] \param[out] jointDofISInvStISW will be computed as linkArticulatedInertia*jointMotionMatrix^T/[jointMotionMatrix^T * linkArticulatedInertia * jointMotionMatrix] \param[out] jointDofMinusStZExtW will be computed as [-jointMotionMatrix^T * ZExt] \param[out] jointDofQStZIntIcW will be computed as [jointForce - jointMotionMatrix^T *ZIntIc] \param[out] deltaZAExtParent is a term that is to be translated to parent link and added to the ZExt value of the parent link. \param[out] deltaZAIntIcParent is a term that is to be translated to parent link and added to the ZInt value of the parent link. \return A term to be translated to parent link and added to the articulated inertia of the parent. */ static SpatialMatrix computePropagateSpatialInertia_ZA_ZIc (const PxArticulationJointType::Enum jointType, const PxU8 nbJointDofs, const Cm::UnAlignedSpatialVector* jointMotionMatricesW, const Cm::SpatialVectorF* jointISW, const PxReal* jointTargetArmatures, const PxReal* jointExternalForces, const SpatialMatrix& linkArticulatedInertiaW, const Cm::SpatialVectorF& linkZExtW, const Cm::SpatialVectorF& linkZIntIcW, InvStIs& linkInvStISW, Cm::SpatialVectorF* jointDofISInvStISW, PxReal* jointDofMinusStZExtW, PxReal* jointDofQStZIntIcW, Cm::SpatialVectorF& deltaZAExtParent, Cm::SpatialVectorF& deltaZAIntIcParent); /** \brief Propagate articulated z.a. spatial force and articulated spatial inertia from child link to parent link. Repeated calls to computePropagateSpatialInertia_ZA_ZIc allow z.a. spatial force and articulated spatial inertia to be propagated from tip to root. The computation proceeds by considering a link/joint pair composed of a child link and its incoming joint. \note Reference maths can be found in Eq 4.29 in Mirtich thesis. \note Mirtich works in the joint frame while every quantity here is in the world frame. \note linkArticulatedInertia has equivalent I_i^A in Mirtich \note jointMotionMatrix has equivalent s_i \param[in] jointType is the type of joint \param[in] nbJointDofs is the number of dofs supported by the joint. \param[in] jointMotionMatrices is an array of motion matrices with one entry per dof. \param[in] jointIs is a cached term linkArticulatedInertia*jointDofMotionMatrix with one entry per dof. \param[in] jointTargetArmatures is an array of armature values with one entry per dof. \param[in] jointExternalForces is an array of user-applied external forces applied to the joint with one entry per dof. \param[in] linkArticulatedInertia is the articulated inertia of the link. \param[in] ZIc is the sum of the z.a force of the link and linkArticulatedInertia*coriolisForce. \param[out] invStIs will be computed as 1/[jointMotionMatrix^T * linkArticulatedInertia * jointMotionMatrix] \param[out] isInvD will be computed as linkArticulatedInertia*jointMotionMatrix^T/[jointMotionMatrix^T * linkArticulatedInertia * jointMotionMatrix] \param[out] qstZIc will be computed as [jointForce - jointMotionMatrix^T *ZIc]/[jointMotionMatrix^T * linkArticulatedInertia * jointMotionMatrix] \param[out] deltaZParent is a term that is to be translated to parent link and added to the articulated z.a force of the parent link. \return A term to be translated to parent link and added to the articulated inertia of the parent. */ static SpatialMatrix computePropagateSpatialInertia_ZA_ZIc_NonSeparated (const PxArticulationJointType::Enum jointType, const PxU8 nbJointDofs, const Cm::UnAlignedSpatialVector* jointMotionMatrices, const Cm::SpatialVectorF* jointIs, const PxReal* jointTargetArmatures, const PxReal* jointExternalForces, const SpatialMatrix& linkArticulatedInertia, const Cm::SpatialVectorF& ZIc, InvStIs& invStIs, Cm::SpatialVectorF* isInvD, PxReal* qstZIc, Cm::SpatialVectorF& deltaZParent); /* \brief Propagate articulated spatial inertia (but not the articulated z.a. spatial force) from child link to parent link. Repeated calls to computePropagateSpatialInertia allow the articulated spatial inertia to be propagated from tip to root. The computation proceeds by considering a link/joint pair composed of a child link and its incoming joint. \param[in] jointType is the type of joint \param[in] nbJointDofs is the number of dofs supported by the joint. \param[in] linkArticulatedInertia is the articulated inertia of the link. \param[in] jointMotionMatrices is an array of motion matrices with one entry per dof. \param[in] jointIs is a cached term linkArticulatedInertia*jointDofMotionMatrix with one entry per dof. \param[out] invStIs will be computed as 1/[jointMotionMatrix^T * linkArticulatedInertia * jointMotionMatrix] \param[out] isInvD will be computed as linkArticulatedInertia*jointMotionMatrix^T/[jointMotionMatrix^T * linkArticulatedInertia * jointMotionMatrix] \return A term to be translated to parent link and added to the articulated inertia of the parent. */ static SpatialMatrix computePropagateSpatialInertia( const PxArticulationJointType::Enum jointType, const PxU8 nbDofs, const SpatialMatrix& linkArticulatedInertia, const Cm::UnAlignedSpatialVector* jointMotionMatrices, const Cm::SpatialVectorF* jointIs, InvStIs& invStIs, Cm::SpatialVectorF* isInvD); static void transformInertia(const SpatialTransform& sTod, SpatialMatrix& inertia); static void translateInertia(const PxMat33& offset, SpatialMatrix& inertia); static PxMat33 translateInertia(const PxMat33& inertia, const PxReal mass, const PxVec3& t); /* \brief Propagate articulated spatial inertia and articulated z.a. spatial force from tip to root. \param[in] links is an array of articulation links with size denoted by linkCount. \param[in] linkCount is the number of articulation links. \param[in] linkRsW is an array of link separations in the world frame with one entry per link. \param[in] jointData is an array of joint descriptions with one entry per joint. \param[in] jointDofMotionMatricesW ins an array of motion matrices in the world frame with one entry per dof. \param[in] linkCoriolisVectorsW is an array fo coriolis terms with one entry per link. \param[in] jointForces is an array forces to be applied to joints with one entry per dof. \param[out] jointDofIsW is a cached term linkArticulatedInertia*jointDofMotionMatrix to be computed with one entry per dof. \param[out] linkInvStIsW will be computed as 1/[jointMotionMatrix^T * linkArticulatedInertia * jointMotionMatrix] with one entry per link. \param[out] jointDofISInvStIS will be computed as linkArticulatedInertia*jointMotionMatrix^T/[jointMotionMatrix^T * linkArticulatedInertia * jointMotionMatrix] with one entry per dof. \param[out] joIntDofMinusStZExtW will be computed as [-jointMotionMatrix^T * ZExt] with one entry per dof. \param[out] jointDofQStZIntIcW will be computed as [jointForce - jointMotionMatrix^T *ZIntIc] with one entry per dof. \param[in,out] linkZAExtForcsW is the articulated z.a spatial force of each link arising from external forces. \param[in,out] linkZAIntForcesW is the articulated z.a spatial force of each link arising from internal forces. \param[in,out] linkSpatialArticulatedInertiaW is the articulated spatial inertia of each link. \param[out] baseInvSpatialArticulatedInertiaW is the inverse of the articulated spatial inertia of the root link. */ static void computeArticulatedSpatialInertiaAndZ (const ArticulationLink* links, const PxU32 linkCount, const PxVec3* linkRsW, const ArticulationJointCoreData* jointData, const Cm::UnAlignedSpatialVector* jointDofMotionMatricesW, const Cm::SpatialVectorF* linkCoriolisVectorsW, const PxReal* jointDofForces, Cm::SpatialVectorF* jointDofIsW, InvStIs* linkInvStIsW, Cm::SpatialVectorF* jointDofISInvStIS, PxReal* joIntDofMinusStZExtW, PxReal* jointDofQStZIntIcW, Cm::SpatialVectorF* linkZAExtForcesW, Cm::SpatialVectorF* linkZAIntForcesW, SpatialMatrix* linkSpatialArticulatedInertiaW, SpatialMatrix& baseInvSpatialArticulatedInertiaW); void computeArticulatedSpatialInertiaAndZ_NonSeparated(ArticulationData& data, ScratchData& scratchData); void computeArticulatedSpatialInertia(ArticulationData& data); /* \brief Compute the response matrix of each link of an articulation. \param[in] articulationFlags describes whether the articulation has a fixed base. \param[in] linkCount is the number of links in the articulation. \param[in] jointData is an array of joint descriptions with one entry per joint. \param[in] baseInvSpatialArticulatedInertiaW is the inverse of the articulated spatial inertia of the root link. \param[in] linkRsW is an array of link separations in the world frame with one entry per link. \param[in] jointDofMotionMatricesW is an array of motion matrices with one entry per dof. \param[in] jointDofISW is a cached term linkArticulatedInertia*jointDofMotionMatrix to be computed with one entry per dof. \param[in] linkInvStISW will be computed as 1/[jointMotionMatrix^T * linkArticulatedInertia * jointMotionMatrix] with one entry per link. \param[in] jointDofIsInvDW will be computed as linkArticulatedInertia*jointMotionMatrix^T/[jointMotionMatrix^T * linkArticulatedInertia * jointMotionMatrix] with one entry per dof. \param[out] links is an array of articulation links with one entry per link. The cfm value of each link will be updated. \param[out] linkResponsesW if an array of link responses with one entry per link. */ static void computeArticulatedResponseMatrix (const PxArticulationFlags& articulationFlags, const PxU32 linkCount, const ArticulationJointCoreData* jointData, const SpatialMatrix& baseInvArticulatedInertiaW, const PxVec3* linkRsW, const Cm::UnAlignedSpatialVector* jointDofMotionMatricesW, const Cm::SpatialVectorF* jointDofISW, const InvStIs* linkInvStISW, const Cm::SpatialVectorF* jointDofIsInvDW, ArticulationLink* links, SpatialImpulseResponseMatrix* linkResponsesW); void computeJointSpaceJacobians(ArticulationData& data); void computeArticulatedSpatialZ(ArticulationData& data, ScratchData& scratchData); /*void computeJointAcceleration(ArticulationLinkData& linkDatum, ArticulationJointCoreData& jointDatum, const Cm::SpatialVectorF& pMotionAcceleration, PxReal* jointAcceleration, const PxU32 linkID);*/ /** \brief Compute the joint acceleration \note Reference maths found in Eq 4.27 of Mirtich thesis. \param[in] pMotionAcceleration is the acceleration of the parent link already transformed into the (child) link frame. \param[in] jointDofISW is an array of cached terms linkArticulatedInertia*jointDofMotionMatrix with one entry per dof. \param[in] linkInvStISW is a cached term equivalent to 1/[jointMotionMatrix^T * linkArticulatedInertia * jointMotionMatrix] \param[in] jointDofQStZIcW is an array of cached terms equivlaent to [jointExternalForce - jointDofMotionMatrix^T * (zeroAccelSpatialForce + spatialInertia*coriolisForce] with one entry per dof. \param[out] jointAcceleration is an array of output joint dof accelerations equivalent to Eq 4.27 in Mirtich thesis. */ static void computeJointAccelerationW(const PxU8 nbJointDofs, const Cm::SpatialVectorF& pMotionAcceleration, const Cm::SpatialVectorF* jointDofISW, const InvStIs& linkInvStISW, const PxReal* jointDofQStZIcW, PxReal* jointAcceleration); //compute joint acceleration, joint velocity and link acceleration, velocity based //on spatial force and spatial articulated inertia tensor /** \brief Compute joint and link accelerations. Accelerations are computed by iterating from root to tip using the formulae in Mirtich Figure 4.8 to compute first the joint dof acceleration and then the link acceleration. The accelerations are used to forward integrate the link and joint velocities. This function may be used to determine either the effect of external forces only or the effect of the external and internal forces combined. The function may not be used to determine the effect of internal forces. For internal forces only use computeLinkInternalAcceleration(). If external forces only are to be considered then set doIC to false to avoid adding the Coriolis vector to the link acceleration. This is important because Coriolis forces are accounted as part of the update arising from internal forces. \param[in] doIC determines whether the link Coriolis force is added to the link acceleration. Set to false if considering external forces only and true if considering the combination of internal and external forces. \param[in] dt is the timestep used to accumulate joint/link velocities from joint/link accelerations. \param[in] fixBase describes whether the root of the articulation is fixed or free to rotate and translate. \param[in] links is an array of articulation links with one entry for each link. \param[in] linkCount is the number of links in the articulation. \param[in] jointDatas is an array of joint descriptions with one entry per joint. \param[in] linkSpatialZAForcesW is an array of spatial z.a. forces arising from the forces acting on each link with one entry for each link. linkSpatialZAForces will either be internal z.a forces or the sum of internal and external forces. \param[in] linkCoriolisForcesW is an array of coriolis forces with one entry for each link. \param[in] linkRsW is an array of link separations with one entry for each link. \param[in] jointDofMotionMatricesW is an array of motion matrices with one entry per joint dof. \param[in] baseInvSpatialArticulatedInertiaW is the inverse of the articulated spatial inertia of the root link. \param[in] linkInvStISW is 1/[jointMotionMatrix^T * linkArticulatedInertia * jointMotionMatrix] with one entry per link. \param[in] jointDofISW linkArticulatedInertia*jointMotionMatrix^T/ with one entry per joint dof. \param[in] jointDofQStZIcW has one of two forms: a) [-jointDofMotionMatrix^T * linkSpatialZAForceExternal] b) [jointDofForce - jointDofMotionMatrix^T*(linkSpatialZAForceTotal + linkSpatialInertia*linkCoriolisForce)] with one entry for each joint dof. \param[out] linkMotionAccelerationsW is an array of link accelerations with one entry per link. The link accelerations are computed using the formula in Figure 4.8 of the Mirtich thesis. \param[in,out] linkMotionVelocitiesW is an array of link velocities that are forward integrated by dt using the link accelerations. \param[out] jointDofAccelerations is an array of joint dof accelerations with one entry per link. The joint dof accelerations are computed using the formula in Figure 4.8 of the Mirtich thesis. \param[in,out] jointDofVelocities is an array of joint dof velocities that are forward integrated by dt using the joint dof accelerations. \param[out] jointDofNewVelocities is another array of joint dof velocities that are forward integrated by dt using the joint dof accelerations. \note If doIC is false then linkSpatialZAForces must be the external z.a. forces and jointDofQstZics must be [-jointDofMotionMatrix^T * linkSpatialZAForceExternal] \note If doIC is true then linkSpatialZAForces must be the internal z.a. forces and jointDofQstZics must be [jointDofForce - jointDofMotionMatrix^T*(linkSpatialZAForceTotal + linkSpatialInertia*linkCoriolisForce)] */ static void computeLinkAcceleration (const bool doIC, const PxReal dt, const bool fixBase, const ArticulationLink* links, const PxU32 linkCount, const ArticulationJointCoreData* jointDatas, const Cm::SpatialVectorF* linkSpatialZAForcesW, const Cm::SpatialVectorF* linkCoriolisForcesW, const PxVec3* linkRsW, const Cm::UnAlignedSpatialVector* jointDofMotionMatricesW, const SpatialMatrix& baseInvSpatialArticulatedInertiaW, const InvStIs* linkInvStISW, const Cm::SpatialVectorF* jointDofISW, const PxReal* jointDofQStZIcW, Cm::SpatialVectorF* linkMotionAccelerationsW, Cm::SpatialVectorF* linkMotionVelocitiesW, PxReal* jointDofAccelerations, PxReal* jointDofVelocities, PxReal* jointDofNewVelocities); /** \brief Compute joint and link accelerations arising from internal z.a. forces. Accelerations are computed by iterating from root to tip using the formulae in Mirtich Figure 4.8 to compute first the joint dof acceleration and then the link acceleration. The accelerations are used to forward integrate the link and joint velocities. The resulting link velocities are rescaled if any link violates the maximum allowed linear or angular velocity. \param[in] dt is the timestep used to accumulate joint/link velocities from joint/link accelerations. \param[in] fixBase describes whether the root of the articulation is fixed or free to rotate and translate. \param[in] comW is the centre of mass of the ensemble of links in the articulation. com is used only to enforce the max linear and angular velocity. \param[in] invSumMass is the inverse of the mass sum of the ensemble of links in the articulation. invSumMass is used only to enforce the max linear and angular velocity. \param[in] linkMaxLinearVelocity is the maximum allowed linear velocity of any link. The link linear velocities are rescaled to ensure none breaches the limit. \param[in] linkMaxAngularVelocity is the maximum allowed angular velocity of any link. The link angular velocities are rescaled to ensure none breaches the limit. \param[in] linkIsolatedSpatialArticulatedInertiasW is an array of link inertias. The link inertias are used only to enforce the max linear and angular velocity. \param[in] baseInvSpatialArticulatedInertiaW is the inverse of the articulated spatial inertia of the root link. \param[in] links is an array of articulation links with one entry for each link. \param[in] linkCount is the number of links in the articulation. \param[in] linkMasses is an array of link masses with one entry per link. \param[in] linkRsW is an array of link separations with one entry per link. \param[in] linkAccumulatedPosesW is an array of link poses with one entry per link. \param[in] linkSpatialZAIntForcesW is an array of spatial z.a. forces arising from internal forces only with one netry per link. \param[in] linkCoriolisVectorsW is an array of link Coriolis forces with one entry per link. \param[in] jointDatas is an array of joint descriptions with one entry per joint. \param[in] jointDofMotionMatricesW is an array of motion matrices with one entry per dof. \param[in] linkInvStISW is 1/[jointMotionMatrix^T * linkArticulatedInertia * jointMotionMatrix] with one entry per link. \param[in] jointDofISW linkArticulatedInertia*jointMotionMatrix^T with one entry per joint dof. \param[in] jointDoQStZIntIcW has form: [jointDofForce - jointDofMotionMatrix^T*(linkSpatialZAForceInternal + linkSpatialInertia*linkCoriolisForce)] with one entry for each joint dof. \param[in,out] linkMotionAccelerationsW accumulates with the computed acceleration arising from internal forces. \param[out] linkMotionAccelerationIntsW is the computed acceleration arising from internal forces. \param[in,out] jointDofVelocities is an array of joint dof velocities that are forward integrated by dt using the joint dof accelerations arising from internal forces. \param[out] jointDofNewVelocities is another array of joint dof velocities that are forward integrated by dt using the joint dof accelerations arising from internal forces. \note computeLinkInternalAcceleration must be called after computeLinkAcceleration to allow the effect of internal forces to be accumulated on top of external forces. */ static void computeLinkInternalAcceleration (const PxReal dt, const bool fixBase, const PxVec3& comW, const PxReal invSumMass, const PxReal linkMaxLinearVelocity, const PxReal linkMaxAngularVelocity, const PxMat33* linkIsolatedSpatialArticulatedInertiasW, const SpatialMatrix& baseInvSpatialArticulatedInertiaW, const ArticulationLink* links, const PxU32 linkCount, const PxReal* linkMasses, const PxVec3* linkRsW, const PxTransform* linkAccumulatedPosesW, const Cm::SpatialVectorF* linkSpatialZAIntForcesW, const Cm::SpatialVectorF* linkCoriolisVectorsW, const ArticulationJointCoreData* jointDatas, const Cm::UnAlignedSpatialVector* jointDofMotionMatricesW, const InvStIs* linkInvStISW, const Cm::SpatialVectorF* jointDofISW, const PxReal* jointDoQStZIntIcW, Cm::SpatialVectorF* linkMotionAccelerationsW, Cm::SpatialVectorF* linkMotionAccelerationIntsW, Cm::SpatialVectorF* linkMotionVelocitiesW, PxReal* jointDofAccelerations, PxReal* jointDofInternalAccelerations, PxReal* jointDofVelocities, PxReal* jointDofNewVelocities); /** \brief For each link compute the incoming joint force in the joint frame. \param[in] linkCount is the number of links in the articulation \param[in] linkZAForcesExtW are the external spatial zero acceleration forces in the world frame with one entry per link. \param[in] linkZAForcesIntW are the internal spatial zero acceleration forces in the world frame with one entry per link. \param[in] linkMotionAccelerationsW are the spatial accelerations ion the world framewith one entry per link. \param[in] linkSpatialInertiasW are the spatial articulated inertias in the world frame with one entry per link. \param[out] linkIncomingJointForces are the incoming joint forces specified in the joint frame that are applied to each link. */ static void computeLinkIncomingJointForce( const PxU32 linkCount, const Cm::SpatialVectorF* linkZAForcesExtW, const Cm::SpatialVectorF* linkZAForcesIntW, const Cm::SpatialVectorF* linkMotionAccelerationsW, const SpatialMatrix* linkSpatialInertiasW, Cm::SpatialVectorF* linkIncomingJointForces); //void computeTempLinkAcceleration(ArticulationData& data, ScratchData& scratchData); void computeJointTransmittedFrictionForce(ArticulationData& data, ScratchData& scratchData, Cm::SpatialVectorF* Z, Cm::SpatialVectorF* DeltaV); static Cm::SpatialVectorF getDeltaVWithDeltaJV(const bool fixBase, const PxU32 linkID, const ArticulationData& data, Cm::SpatialVectorF* Z, PxReal* jointVelocities); //impulse need to be in the linkID space static void getZ(const PxU32 linkID, const ArticulationData& data, Cm::SpatialVectorF* Z, const Cm::SpatialVectorF& impulse); //This method use in impulse self response. The input impulse is in the link space static Cm::SpatialVectorF getImpulseResponseW( const PxU32 linkID, const ArticulationData& data, const Cm::SpatialVectorF& impulse); //This method use in impulse self response. The input impulse is in the link space static Cm::SpatialVectorF getImpulseResponseWithJ( const PxU32 linkID, const bool fixBase, const ArticulationData& data, Cm::SpatialVectorF* Z, const Cm::SpatialVectorF& impulse, PxReal* jointVelocities); void getImpulseSelfResponseInv(const bool fixBase, PxU32 linkID0, PxU32 linkID1, Cm::SpatialVectorF* Z, const Cm::SpatialVector& impulse0, const Cm::SpatialVector& impulse1, Cm::SpatialVector& deltaV0, Cm::SpatialVector& deltaV1, PxReal* jointVelocities); void getImpulseResponseSlowInv(Dy::ArticulationLink* links, const ArticulationData& data, PxU32 linkID0_, const Cm::SpatialVector& impulse0, Cm::SpatialVector& deltaV0, PxU32 linkID1_, const Cm::SpatialVector& impulse1, Cm::SpatialVector& deltaV1, PxReal* jointVelocities, Cm::SpatialVectorF* Z); Cm::SpatialVectorF getImpulseResponseInv(const bool fixBase, const PxU32 linkID, Cm::SpatialVectorF* Z, const Cm::SpatialVector& impulse, PxReal* jointVelocites); void inverseDynamic(ArticulationData& data, const PxVec3& gravity, ScratchData& scratchData, bool computeCoriolis); void inverseDynamicFloatingBase(ArticulationData& data, const PxVec3& gravity, ScratchData& scratchData, bool computeCoriolis); //compute link body force with motion velocity and acceleration void computeZAForceInv(ArticulationData& data, ScratchData& scratchData); void initCompositeSpatialInertia(ArticulationData& data, Dy::SpatialMatrix* compositeSpatialInertia); void computeCompositeSpatialInertiaAndZAForceInv(ArticulationData& data, ScratchData& scratchData); void computeRelativeGeneralizedForceInv(ArticulationData& data, ScratchData& scratchData); //provided joint velocity and joint acceleartion, compute link acceleration void computeLinkAccelerationInv(ArticulationData& data, ScratchData& scratchData); void computeGeneralizedForceInv(ArticulationData& data, ScratchData& scratchData); void calculateMassMatrixColInv(ScratchData& scratchData); void calculateHFixBase(PxArticulationCache& cache); void calculateHFloatingBase(PxArticulationCache& cache); //joint limits void enforcePrismaticLimits(PxReal& jPosition, ArticulationJointCore* joint); public: PX_FORCE_INLINE void addBody() { mAcceleration.pushBack(Cm::SpatialVector(PxVec3(0.f), PxVec3(0.f))); mUpdateSolverData = true; } PX_FORCE_INLINE void removeBody() { mUpdateSolverData = true; } PX_FORCE_INLINE bool updateSolverData() { return mUpdateSolverData; } PX_FORCE_INLINE PxU32 getMaxDepth() const { return mMaxDepth; } PX_FORCE_INLINE void setMaxDepth(const PxU32 depth) { mMaxDepth = depth; } // solver methods PX_FORCE_INLINE PxU32 getBodyCount() const { return mSolverDesc.linkCount; } PX_FORCE_INLINE void getSolverDesc(ArticulationSolverDesc& d) const { d = mSolverDesc; } PX_FORCE_INLINE ArticulationSolverDesc& getSolverDesc() { return mSolverDesc; } PX_FORCE_INLINE ArticulationCore* getCore() { return mSolverDesc.core; } PX_FORCE_INLINE PxU16 getIterationCounts() const { return mSolverDesc.core->solverIterationCounts; } PX_FORCE_INLINE void* getUserData() const { return mUserData; } PX_FORCE_INLINE void setDyContext(Dy::Context* context) { mContext = context; } void setupLinks(PxU32 nbLinks, Dy::ArticulationLink* links); void allocatePathToRootElements(const PxU32 totalPathToRootElements); void initPathToRoot(); static void getImpulseSelfResponse(ArticulationLink* links, Cm::SpatialVectorF* Z, ArticulationData& data, PxU32 linkID0, const Cm::SpatialVectorV& impulse0, Cm::SpatialVectorV& deltaV0, PxU32 linkID1, const Cm::SpatialVectorV& impulse1, Cm::SpatialVectorV& deltaV1); static void getImpulseResponseSlow(Dy::ArticulationLink* links, ArticulationData& data, PxU32 linkID0_, const Cm::SpatialVector& impulse0, Cm::SpatialVector& deltaV0, PxU32 linkID1_, const Cm::SpatialVector& impulse1, Cm::SpatialVector& deltaV1, Cm::SpatialVectorF* Z); PxU32 setupSolverConstraints( ArticulationLink* links, const PxU32 linkCount, const bool fixBase, ArticulationData& data, Cm::SpatialVectorF* Z, PxU32& acCount); void setupInternalConstraints( ArticulationLink* links, const PxU32 linkCount, const bool fixBase, ArticulationData& data, Cm::SpatialVectorF* Z, PxReal stepDt, PxReal dt, PxReal invDt, bool isTGSSolver); void setupInternalConstraintsRecursive( ArticulationLink* links, const PxU32 linkCount, const bool fixBase, ArticulationData& data, Cm::SpatialVectorF* Z, const PxReal stepDt, const PxReal dt, const PxReal invDt, const bool isTGSSolver, const PxU32 linkID, const PxReal maxForceScale); void setupInternalSpatialTendonConstraintsRecursive( ArticulationLink* links, ArticulationAttachment* attachments, const PxU32 attachmentCount, const PxVec3& parentAttachmentPoint, const bool fixBase, ArticulationData& data, Cm::SpatialVectorF* Z, const PxReal stepDt, const bool isTGSSolver, const PxU32 attachmentID, const PxReal stiffness, const PxReal damping, const PxReal limitStiffness, const PxReal err, const PxU32 startLink, const PxVec3& startAxis, const PxVec3& startRaXn); void setupInternalFixedTendonConstraintsRecursive( ArticulationLink* links, ArticulationTendonJoint* tendonJoints, const bool fixBase, ArticulationData& data, Cm::SpatialVectorF* Z, const PxReal stepDt, const bool isTGSSolver, const PxU32 tendonJointID, const PxReal stiffness, const PxReal damping, const PxReal limitStiffness, const PxU32 startLink, const PxVec3& startAxis, const PxVec3& startRaXn); void updateSpatialTendonConstraintsRecursive(ArticulationAttachment* attachments, ArticulationData& data, const PxU32 attachmentID, const PxReal accumErr, const PxVec3& parentAttachmentPoint); //void updateFixedTendonConstraintsRecursive(ArticulationLink* links, ArticulationTendonJoint* tendonJoint, ArticulationData& data, const PxU32 tendonJointID, const PxReal accumErr); PxVec3 calculateFixedTendonVelocityAndPositionRecursive(FixedTendonSolveData& solveData, const Cm::SpatialVectorF& parentV, const Cm::SpatialVectorF& parentDeltaV, const PxU32 tendonJointID); Cm::SpatialVectorF solveFixedTendonConstraintsRecursive(FixedTendonSolveData& solveData, const PxU32 tendonJointID); void prepareStaticConstraints(const PxReal dt, const PxReal invDt, PxsContactManagerOutputIterator& outputs, Dy::ThreadContext& threadContext, PxReal correlationDist, PxReal bounceThreshold, PxReal frictionOffsetThreshold, PxReal ccdMaxSeparation, PxSolverBodyData* solverBodyData, PxsConstraintBlockManager& blockManager, Dy::ConstraintWriteback* constraintWritebackPool); void prepareStaticConstraintsTGS(const PxReal stepDt, const PxReal totalDt, const PxReal invStepDt, const PxReal invTotalDt, PxsContactManagerOutputIterator& outputs, Dy::ThreadContext& threadContext, PxReal correlationDist, PxReal bounceThreshold, PxReal frictionOffsetThreshold, PxTGSSolverBodyData* solverBodyData, PxTGSSolverBodyTxInertia* txInertia, PxsConstraintBlockManager& blockManager, Dy::ConstraintWriteback* constraintWritebackPool, const PxReal biasCoefficient, const PxReal lengthScale); //integration void propagateLinksDown(ArticulationData& data, PxReal* jointVelocities, PxReal* jointPositions, Cm::SpatialVectorF* motionVelocities); void updateJointProperties( PxReal* jointNewVelocities, PxReal* jointVelocities, PxReal* jointAccelerations); void computeAndEnforceJointPositions(ArticulationData& data); //update link position based on joint position provided by the cache void teleportLinks(ArticulationData& data); void computeLinkVelocities(ArticulationData& data); PxU8* allocateScratchSpatialData(PxcScratchAllocator* allocator, const PxU32 linkCount, ScratchData& scratchData, bool fallBackToHeap = false); //This method calculate the velocity change from parent to child using parent current motion velocity PxTransform propagateTransform(const PxU32 linkID, ArticulationLink* links, ArticulationJointCoreData& jointDatum, Cm::SpatialVectorF* motionVelocities, const PxReal dt, const PxTransform& pBody2World, const PxTransform& currentTransform, PxReal* jointVelocity, PxReal* jointPosition, const Cm::UnAlignedSpatialVector* motionMatrix, const Cm::UnAlignedSpatialVector* worldMotionMatrix); static void updateRootBody(const Cm::SpatialVectorF& motionVelocity, const PxTransform& preTransform, ArticulationData& data, const PxReal dt); //These variables are used in the constraint partition PxU16 maxSolverFrictionProgress; PxU16 maxSolverNormalProgress; PxU32 solverProgress; PxU16 mArticulationIndex; PxU8 numTotalConstraints; void* mUserData; Dy::Context* mContext; ArticulationSolverDesc mSolverDesc; PxArray<Cm::SpatialVector> mAcceleration; // supplied by Sc-layer to feed into articulations bool mUpdateSolverData; PxU32 mMaxDepth; ArticulationData mArticulationData; PxArray<PxSolverConstraintDesc> mStaticContactConstraints; PxArray<PxSolverConstraintDesc> mStatic1DConstraints; PxU32 mGPUDirtyFlags; bool mJcalcDirty; } PX_ALIGN_SUFFIX(64); #if PX_VC #pragma warning(pop) #endif } //namespace Dy } #endif
85,250
C
51.786997
216
0.762545
NVIDIA-Omniverse/PhysX/physx/source/lowleveldynamics/include/DyHairSystem.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #ifndef DY_HAIR_SYSTEM_H #define DY_HAIR_SYSTEM_H #include "DyHairSystemCore.h" #include "PxvGeometry.h" namespace physx { namespace Sc { class HairSystemSim; } namespace Dy { typedef size_t HairSystemHandle; // forward-declarations class Context; class HairSystem; struct HairSystemDirtyFlag { enum Enum { // Changes are processed from highest (most fundamental) to lowest to ensure correct dependencies eNONE = 0, //!> default, everything up-to-date ePARAMETERS = 1 << 0, //!> Parameters were changed eGRID_SIZE = 1 << 1 | ePARAMETERS, //!> Grid size was changed. sets ePARAMETERS because settings are stored there eRIGID_ATTACHMENTS = 1 << 2, //!> Rigid attachment was changed eSOFTBODY_ATTACHMENTS = 1 << 3, //!> Softbody attachments added or removed ePOSITIONS_VELOCITIES_MASS = 1 << 4, //!> Positions, velocities or masses changed eLOD_SWITCH = 1 << 5, //!> Level of detail was changed, update lodX pos/vel from lod0 eLOD_DATA = 1 << 6 | eLOD_SWITCH, //!> Definition of detail levels changed . Must come after setting any kind of rest positions. Triggers one-off initialization of levels eBENDING_REST_ANGLES = 1 << 7 | eLOD_DATA, //!> Bending rest angles were changed eTWISTING_REST_POSITIONS = 1 << 8 | eLOD_DATA, //!> Twisting rest positions were changed eREST_POSITIONS = 1 << 9 | eLOD_DATA, //!> Rest positions changed eSHAPE_MATCHING_SIZES = 1 << 10 | ePARAMETERS | eLOD_DATA, //!> Shape matching group size or overlap changed. sets ePARAMETERS because settings are stored there eSTRAND_LENGTHS = 1 << 11 | eLOD_DATA | eSHAPE_MATCHING_SIZES | eBENDING_REST_ANGLES | eTWISTING_REST_POSITIONS, //!> Topology of vertex arrangement was changed eNUM_STRANDS_OR_VERTS = 1 << 12 | eSHAPE_MATCHING_SIZES | ePOSITIONS_VELOCITIES_MASS | eBENDING_REST_ANGLES | eTWISTING_REST_POSITIONS | eREST_POSITIONS | eLOD_DATA | eSTRAND_LENGTHS, //!> Number of strands or vertices changed eALL = (1 << 13) - 1 //!> everything needs updating }; }; struct HairSystemSolverDesc { HairSystem* hairSystem; }; class HairSystem { PX_NOCOPY(HairSystem) public: HairSystem(Sc::HairSystemSim* sim, Dy::HairSystemCore& core) : mSim(sim) , mCore(core) , mShapeCore(NULL) , mElementId(0xffFFffFF) , mGpuRemapId(0xffFFffFF) { mCore.mDirtyFlags = HairSystemDirtyFlag::eALL; } ~HairSystem() {} PX_FORCE_INLINE void setShapeCore(PxsShapeCore* shapeCore) { mShapeCore = shapeCore; } PX_FORCE_INLINE PxU32 getGpuRemapId() const { return mGpuRemapId; } PX_FORCE_INLINE void setGpuRemapId(PxU32 remapId) { mGpuRemapId = remapId; PxHairSystemGeometryLL& geom = mShapeCore->mGeometry.get<PxHairSystemGeometryLL>(); geom.gpuRemapId = remapId; } PX_FORCE_INLINE PxU32 getElementId() const { return mElementId; } PX_FORCE_INLINE void setElementId(const PxU32 elementId) { mElementId = elementId; } PX_FORCE_INLINE Sc::HairSystemSim* getHairSystemSim() const { return mSim; } PX_FORCE_INLINE const HairSystemCore& getCore() const { return mCore; } PX_FORCE_INLINE HairSystemCore& getCore() { return mCore; } PX_FORCE_INLINE PxsShapeCore& getShapeCore() { return *mShapeCore; } PX_FORCE_INLINE PxU16 getIterationCounts() { return mCore.mSolverIterationCounts; } private: // variables Sc::HairSystemSim* mSim; HairSystemCore& mCore; PxsShapeCore* mShapeCore; PxU32 mElementId; //this is used for the bound array PxU32 mGpuRemapId; }; PX_FORCE_INLINE HairSystem* getHairSystem(HairSystemHandle handle) { return reinterpret_cast<HairSystem*>(handle); } } } #endif
5,346
C
37.192857
180
0.708754
NVIDIA-Omniverse/PhysX/physx/source/lowleveldynamics/include/DyParticleSystem.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #ifndef PXD_PARTICLESYSTEM_H #define PXD_PARTICLESYSTEM_H #include "foundation/PxSimpleTypes.h" #include "DyParticleSystemCore.h" #include "PxvGeometry.h" #define MAX_SPARSEGRID_DIM 1024 #define MIN_SPARSEGRID_ID -512 #define MAX_SPARSEGRID_ID 511 namespace physx { namespace Sc { class ParticleSystemSim; } namespace Dy { typedef size_t ParticleSystemHandle; class ParticleSystemCore; struct ParticleSystemFlag { enum Enum { eUPDATE_PARAMS = 1 << 1, eUPDATE_MATERIAL = 1 << 2, eUPDATE_PHASE = 1 << 3, eUPDATE_ACTIVE_PARTICLECOUNT = 1 << 4 }; }; class ParticleSystem { PX_NOCOPY(ParticleSystem) public: ParticleSystem(Dy::ParticleSystemCore& core) : mCore(core), mShapeCore(NULL), mElementId(0xffffffff), mGpuRemapId(0xffffffff) { mFlag = 0; } ~ParticleSystem() {} PX_FORCE_INLINE void setShapeCore(PxsShapeCore* shapeCore) { mShapeCore = shapeCore; } PX_FORCE_INLINE void setGpuRemapId(const PxU32 remapId) { mGpuRemapId = remapId; PxParticleSystemGeometryLL& geom = mShapeCore->mGeometry.get<PxParticleSystemGeometryLL>(); geom.materialsLL.gpuRemapId = remapId; } PX_FORCE_INLINE PxU32 getGpuRemapId() const { return mGpuRemapId; } PX_FORCE_INLINE void setElementId(const PxU32 elementId) { mElementId = elementId; } PX_FORCE_INLINE PxU32 getElementId() { return mElementId; } PX_FORCE_INLINE ParticleSystemCore& getCore() const { return mCore; } PX_FORCE_INLINE PxsShapeCore& getShapeCore() { return *mShapeCore; } PX_FORCE_INLINE PxU16 getIterationCounts() { return mCore.solverIterationCounts; } PxU32 mFlag; private: ParticleSystemCore& mCore; PxsShapeCore* mShapeCore; PxU32 mElementId; //this is used for the bound array PxU32 mGpuRemapId; }; struct ParticleSystemSolverDesc { ParticleSystem* particleSystem; }; PX_FORCE_INLINE ParticleSystem* getParticleSystem(ParticleSystemHandle handle) { return reinterpret_cast<ParticleSystem*>(handle); } } } #endif
3,682
C
28.701613
95
0.724606
NVIDIA-Omniverse/PhysX/physx/source/lowleveldynamics/include/DyHairSystemCore.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #ifndef DY_HAIR_SYSTEM_CORE_H #define DY_HAIR_SYSTEM_CORE_H #include "PxAttachment.h" #include "PxHairSystemFlag.h" #include "PxNodeIndex.h" #include "foundation/PxArray.h" #include "foundation/PxSimpleTypes.h" #include "foundation/PxVec2.h" #include "foundation/PxVec4.h" namespace physx { namespace Dy { // These parameters are needed on GPU for simulation and are grouped in a struct // to reduce the number of assignments in update user data. struct HairSystemSimParameters { PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal getCellSize() const { return mSegmentLength + 2.0f * mSegmentRadius; } PX_CUDA_CALLABLE PX_FORCE_INLINE int getGridSize() const { return mGridSize[0] * mGridSize[1] * mGridSize[2]; } PxHairSystemFlags::InternalType mFlags; PxReal mSegmentLength; PxReal mSegmentRadius; PxReal mInterHairRepulsion; // strength of the repulsion field PxReal mInterHairVelocityDamping; // friction based on interpolated vel field PxReal mFrictionCoeff; // coulomb friction coefficient for collisions (internal and external) PxReal mMaxDepenetrationVelocity; // max velocity delta coming out of collision responses PxReal mAeroDrag; // the aerodynamic drag coefficient PxReal mAeroLift; // the aerodynamic lift coefficient PxReal mBendingCompliance; PxReal mTwistingCompliance; int mGridSize[3]; // number of cells in x,y,z directions PxVec2 mShapeCompliance; // compliance for shape matching PxReal mSelfCollisionContactDist; // contact distance for self collisions expressed as a multiple of the segment // radius PxReal mSelfCollisionRelaxation; PxReal mLraRelaxation; PxReal mShapeMatchingCompliance; PxReal mShapeMatchingBeta; // balance between rigid rotation and linear stretching PxU16 mShapeMatchingNumVertsPerGroup; PxU16 mShapeMatchingNumVertsOverlap; PxU32 mRestPositionTransformNumVertsPerStrand; // how many vertices of each strand to use for computing the rest // position targets for global shape preservation HairSystemSimParameters() : mFlags(0) , mSegmentLength(0.1f) , mSegmentRadius(0.02f) , mInterHairRepulsion(0.0f) , mInterHairVelocityDamping(0.03f) , mFrictionCoeff(0.0f) , mMaxDepenetrationVelocity(PX_MAX_F32) , mAeroDrag(0.0f) , mAeroLift(0.0f) , mBendingCompliance(-1.0f) , mTwistingCompliance(-1.0f) , mShapeCompliance(-1.0f) , mSelfCollisionContactDist(1.5f) , mSelfCollisionRelaxation(0.7f) , mLraRelaxation(1.0f) , mShapeMatchingCompliance(-1.0f) , mShapeMatchingBeta(0.0f) , mShapeMatchingNumVertsPerGroup(10) , mShapeMatchingNumVertsOverlap(5) , mRestPositionTransformNumVertsPerStrand(2) { // grid size must be powers of two mGridSize[0] = 32; mGridSize[1] = 64; mGridSize[0] = 32; } }; PX_ALIGN_PREFIX(16) struct SoftbodyHairAttachment { PxVec4 tetBarycentric; // must be aligned, is being loaded as float4 PxU32 tetId; PxU32 softbodyNodeIdx; PxU32 hairVtxIdx; PxReal constraintOffset; PxVec4 low_high_angle; PxVec4 attachmentBarycentric; } PX_ALIGN_SUFFIX(16); PX_COMPILE_TIME_ASSERT(sizeof(SoftbodyHairAttachment) % 16 == 0); struct HairSystemCore { public: PxU32 mDirtyFlags; PxHairSystemDataFlags mReadRequests; PxU32 mNumVertices; PxU32 mNumStrands; // Parameters HairSystemSimParameters mParams; PxU16 mSolverIterationCounts; PxVec4 mWind; // Topology data const PxU32* mStrandPastEndIndices; const PxReal* mBendingRestAngles; PxArray<PxU16> mMaterialhandles; // Buffers for simulation (device or pinned host mirrors) PxVec4* mPositionInvMass; PxVec4* mVelocity; // pointers to PxgHairSystemCore buffers that are exposed to the user PxU32* mStrandPastEndIndicesGpuSim; PxVec4* mPositionInvMassGpuSim; PxReal* mTwistingRestPositionsGpuSim; // rest positions PxVec4* mRestPositionsD; // Gpu buffer // Attachments to rigids PxParticleRigidAttachment* mRigidAttachments; // Gpu buffer PxU32 mNumRigidAttachments; // Attachments to softbodies SoftbodyHairAttachment* mSoftbodyAttachments; PxU32 mNumSoftbodyAttachments; // LOD data PxU32 mLodLevel; // the selected level, zero by default meaning full detail PxU32 mLodNumLevels; // number of levels excluding level zero const PxReal* mLodProportionOfStrands; const PxReal* mLodProportionOfVertices; // sleeping PxReal mSleepThreshold; PxReal mWakeCounter; void setMaterial(PxU16 materialhandle) { mMaterialhandles.pushBack(materialhandle); } void clearMaterials() { mMaterialhandles.clear(); } HairSystemCore() : mDirtyFlags(PX_MAX_U32) , mNumVertices(0) , mNumStrands(0) , mSolverIterationCounts(8) , mWind(0.0f) , mStrandPastEndIndices(NULL) , mBendingRestAngles(NULL) , mPositionInvMass(NULL) , mVelocity(NULL) , mStrandPastEndIndicesGpuSim(NULL) , mPositionInvMassGpuSim(NULL) , mTwistingRestPositionsGpuSim(NULL) , mRestPositionsD(NULL) , mRigidAttachments(NULL) , mNumRigidAttachments(0) , mSoftbodyAttachments(NULL) , mNumSoftbodyAttachments(0) , mLodLevel(0) , mLodNumLevels(0) , mLodProportionOfStrands(NULL) , mLodProportionOfVertices(NULL) , mSleepThreshold(0.1f) , mWakeCounter(1.0f) { } }; } // namespace Dy } // namespace physx #endif
6,817
C
32.258536
113
0.761185
NVIDIA-Omniverse/PhysX/physx/source/lowleveldynamics/include/DyThresholdTable.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef DY_THRESHOLD_TABLE_H #define DY_THRESHOLD_TABLE_H #include "foundation/PxPinnedArray.h" #include "foundation/PxUserAllocated.h" #include "foundation/PxHash.h" #include "foundation/PxMemory.h" #include "PxNodeIndex.h" namespace physx { class PxsRigidBody; namespace Sc { class ShapeInteraction; } namespace Dy { struct ThresholdStreamElement { Sc::ShapeInteraction* shapeInteraction; //4/8 4/8 PxReal normalForce; //4 8/12 PxReal threshold; //4 12/16 PxNodeIndex nodeIndexA; //8 24 This is the unique node index in island gen which corresonding to that body and it is persistent 16 20 PxNodeIndex nodeIndexB; //8 32 This is the unique node index in island gen which corresonding to that body and it is persistent 20 24 PxReal accumulatedForce; //4 36 PxU32 pad; //4 40 PX_CUDA_CALLABLE bool operator <= (const ThresholdStreamElement& otherPair) const { return ((nodeIndexA < otherPair.nodeIndexA) ||(nodeIndexA == otherPair.nodeIndexA && nodeIndexB <= otherPair.nodeIndexB)); } PX_CUDA_CALLABLE bool operator < (const ThresholdStreamElement& otherPair) const { return ((nodeIndexA < otherPair.nodeIndexA) || (nodeIndexA == otherPair.nodeIndexA && nodeIndexB < otherPair.nodeIndexB)); } PX_CUDA_CALLABLE bool operator == (const ThresholdStreamElement& otherPair) const { return ((nodeIndexA == otherPair.nodeIndexA && nodeIndexB == otherPair.nodeIndexB)); } }; typedef PxPinnedArray<ThresholdStreamElement> ThresholdArray; class ThresholdStream : public ThresholdArray, public PxUserAllocated { public: ThresholdStream(PxVirtualAllocatorCallback& allocatorCallback) : ThresholdArray(PxVirtualAllocator(&allocatorCallback)) { } }; class ThresholdTable { public: ThresholdTable() : mBuffer(NULL), mHash(NULL), mHashSize(0), mHashCapactiy(0), mPairs(NULL), mNexts(NULL), mPairsSize(0), mPairsCapacity(0) { } ~ThresholdTable() { PX_FREE(mBuffer); } void build(const ThresholdStream& stream); bool check(const ThresholdStream& stream, const PxU32 nodexIndexA, const PxU32 nodexIndexB, PxReal dt); bool check(const ThresholdStream& stream, const ThresholdStreamElement& elem, PxU32& thresholdIndex); //private: static const PxU32 NO_INDEX = 0xffffffff; struct Pair { PxU32 thresholdStreamIndex; PxReal accumulatedForce; //PxU32 next; // hash key & next ptr }; PxU8* mBuffer; PxU32* mHash; PxU32 mHashSize; PxU32 mHashCapactiy; Pair* mPairs; PxU32* mNexts; PxU32 mPairsSize; PxU32 mPairsCapacity; }; namespace { static PX_FORCE_INLINE PxU32 computeHashKey(const PxU32 nodeIndexA, const PxU32 nodeIndexB, const PxU32 hashCapacity) { return (PxComputeHash(PxU64(nodeIndexA)<<32 | PxU64(nodeIndexB)) % hashCapacity); } } inline bool ThresholdTable::check(const ThresholdStream& stream, const ThresholdStreamElement& elem, PxU32& thresholdIndex) { PxU32* PX_RESTRICT hashes = mHash; PxU32* PX_RESTRICT nextIndices = mNexts; Pair* PX_RESTRICT pairs = mPairs; PX_ASSERT(elem.nodeIndexA < elem.nodeIndexB); PxU32 hashKey = computeHashKey(elem.nodeIndexA.index(), elem.nodeIndexB.index(), mHashSize); PxU32 pairIndex = hashes[hashKey]; while(NO_INDEX != pairIndex) { Pair& pair = pairs[pairIndex]; const PxU32 thresholdStreamIndex = pair.thresholdStreamIndex; PX_ASSERT(thresholdStreamIndex < stream.size()); const ThresholdStreamElement& otherElement = stream[thresholdStreamIndex]; if(otherElement.nodeIndexA==elem.nodeIndexA && otherElement.nodeIndexB==elem.nodeIndexB && otherElement.shapeInteraction == elem.shapeInteraction) { thresholdIndex = thresholdStreamIndex; return true; } pairIndex = nextIndices[pairIndex]; } thresholdIndex = NO_INDEX; return false; } inline void ThresholdTable::build(const ThresholdStream& stream) { //Handle the case of an empty stream. if(0==stream.size()) { mPairsSize=0; mPairsCapacity=0; mHashSize=0; mHashCapactiy=0; PX_FREE(mBuffer); return; } //Realloc/resize if necessary. const PxU32 pairsCapacity = stream.size(); const PxU32 hashCapacity = pairsCapacity*2+1; if((pairsCapacity > mPairsCapacity) || (pairsCapacity < (mPairsCapacity >> 2))) { PX_FREE(mBuffer); const PxU32 pairsByteSize = sizeof(Pair)*pairsCapacity; const PxU32 nextsByteSize = sizeof(PxU32)*pairsCapacity; const PxU32 hashByteSize = sizeof(PxU32)*hashCapacity; const PxU32 totalByteSize = pairsByteSize + nextsByteSize + hashByteSize; mBuffer = reinterpret_cast<PxU8*>(PX_ALLOC(totalByteSize, "PxThresholdStream")); PxU32 offset = 0; mPairs = reinterpret_cast<Pair*>(mBuffer + offset); offset += pairsByteSize; mNexts = reinterpret_cast<PxU32*>(mBuffer + offset); offset += nextsByteSize; mHash = reinterpret_cast<PxU32*>(mBuffer + offset); offset += hashByteSize; PX_ASSERT(totalByteSize == offset); mPairsCapacity = pairsCapacity; mHashCapactiy = hashCapacity; } //Set each entry of the hash table to 0xffffffff PxMemSet(mHash, 0xff, sizeof(PxU32)*hashCapacity); //Init the sizes of the pairs array and hash array. mPairsSize = 0; mHashSize = hashCapacity; PxU32* PX_RESTRICT hashes = mHash; PxU32* PX_RESTRICT nextIndices = mNexts; Pair* PX_RESTRICT pairs = mPairs; //Add all the pairs from the stream. PxU32 pairsSize = 0; for(PxU32 i = 0; i < pairsCapacity; i++) { const ThresholdStreamElement& element = stream[i]; const PxNodeIndex nodeIndexA = element.nodeIndexA; const PxNodeIndex nodeIndexB = element.nodeIndexB; const PxF32 force = element.normalForce; PX_ASSERT(nodeIndexA < nodeIndexB); const PxU32 hashKey = computeHashKey(nodeIndexA.index(), nodeIndexB.index(), hashCapacity); //Get the index of the first pair found that resulted in a hash that matched hashKey. PxU32 prevPairIndex = hashKey; PxU32 pairIndex = hashes[hashKey]; //Search through all pairs found that resulted in a hash that matched hashKey. //Search until the exact same body pair is found. //Increment the accumulated force if the exact same body pair is found. while(NO_INDEX != pairIndex) { Pair& pair = pairs[pairIndex]; const PxU32 thresholdStreamIndex = pair.thresholdStreamIndex; PX_ASSERT(thresholdStreamIndex < stream.size()); const ThresholdStreamElement& otherElement = stream[thresholdStreamIndex]; if(nodeIndexA == otherElement.nodeIndexA && nodeIndexB==otherElement.nodeIndexB) { pair.accumulatedForce += force; prevPairIndex = NO_INDEX; pairIndex = NO_INDEX; break; } prevPairIndex = pairIndex; pairIndex = nextIndices[pairIndex]; } if(NO_INDEX != prevPairIndex) { nextIndices[pairsSize] = hashes[hashKey]; hashes[hashKey] = pairsSize; Pair& newPair = pairs[pairsSize]; newPair.thresholdStreamIndex = i; newPair.accumulatedForce = force; pairsSize++; } } mPairsSize = pairsSize; } } } #endif
8,602
C
29.399293
149
0.740293
NVIDIA-Omniverse/PhysX/physx/source/lowleveldynamics/include/DyFeatherstoneArticulationJointData.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef DY_FEATHERSTONE_ARTICULATION_JOINT_DATA_H #define DY_FEATHERSTONE_ARTICULATION_JOINT_DATA_H #include "foundation/PxVec3.h" #include "foundation/PxQuat.h" #include "foundation/PxTransform.h" #include "foundation/PxVecMath.h" #include "CmUtils.h" #include "CmSpatialVector.h" #include "DyVArticulation.h" #include "DyFeatherstoneArticulationUtils.h" #include "DyArticulationJointCore.h" #include <stdio.h> namespace physx { namespace Dy { class ArticulationJointCoreData { public: ArticulationJointCoreData() : jointOffset(0xffffffff), dofInternalConstraintMask(0) { } PX_CUDA_CALLABLE PX_FORCE_INLINE PxU8 computeJointDofs(ArticulationJointCore* joint) const { PxU8 tDof = 0; for (PxU32 i = 0; i < DY_MAX_DOF; ++i) { if (joint->motion[i] != PxArticulationMotion::eLOCKED) { tDof++; } } return tDof; } PX_CUDA_CALLABLE PX_FORCE_INLINE void computeJointAxis(const ArticulationJointCore* joint, Cm::UnAlignedSpatialVector* jointAxis) { for (PxU32 i = 0; i < dof; ++i) { PxU32 ind = joint->dofIds[i]; Cm::UnAlignedSpatialVector axis = Cm::UnAlignedSpatialVector::Zero(); //axis is in the local space of joint axis[ind] = 1.f; jointAxis[i] = axis; } } PX_FORCE_INLINE PxU32 computeJointDof(ArticulationJointCore* joint, Cm::UnAlignedSpatialVector* jointAxis) { if (joint->jointDirtyFlag & ArticulationJointCoreDirtyFlag::eMOTION) { dof = 0; limitMask = 0; //KS - no need to zero memory here. //PxMemZero(jointAxis, sizeof(jointAxis)); for (PxU8 i = 0; i < DY_MAX_DOF; ++i) { if (joint->motion[i] != PxArticulationMotion::eLOCKED) { Cm::UnAlignedSpatialVector axis = Cm::UnAlignedSpatialVector::Zero(); //axis is in the local space of joint axis[i] = 1.f; jointAxis[dof] = axis; joint->invDofIds[i] = dof; joint->dofIds[dof] = i; if (joint->motion[i] == PxArticulationMotion::eLIMITED) limitMask |= 1 << dof; dof++; } } } return dof; } PX_FORCE_INLINE void setArmature(ArticulationJointCore* joint) { if (joint->jointDirtyFlag & ArticulationJointCoreDirtyFlag::eARMATURE) { for (PxU32 i = 0; i < dof; ++i) { PxU32 ind = joint->dofIds[i]; armature[i] = joint->armature[ind]; } joint->jointDirtyFlag &= ~ArticulationJointCoreDirtyFlag::eARMATURE; } } PxU32 jointOffset; //4 PxReal armature[3]; // indexed by internal dof id. //degree of freedom PxU8 dof; //1 PxU8 dofInternalConstraintMask; //1 PxU8 limitMask; //1 }; }//namespace Dy } #endif
4,452
C
29.087838
132
0.68239
NVIDIA-Omniverse/PhysX/physx/source/lowleveldynamics/include/DyArticulationTendon.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PXD_ARTICULATION_TENDON_H #define PXD_ARTICULATION_TENDON_H #include "foundation/PxVec3.h" #include "foundation/PxQuat.h" #include "foundation/PxTransform.h" #include "foundation/PxVecMath.h" #include "foundation/PxUtilities.h" #include "CmUtils.h" #include "CmIDPool.h" #include "solver/PxSolverDefs.h" namespace physx { namespace Dy { typedef PxU64 ArticulationAttachmentBitField; #define DY_ARTICULATION_ATTACHMENT_NONE 0xffffffff struct ArticulationAttachment { PxVec3 relativeOffset; //relative offset to the link PxReal lowLimit; PxReal highLimit; PxReal restLength; PxReal coefficient; PxU32 parent; //parent index PxU32 myInd; PxU32 mConstraintInd; PxU16 linkInd; PxU16 childCount; ArticulationAttachmentBitField children; }; class ArticulationTendon { public: ArticulationTendon() : mStiffness(0.f), mDamping(0.f), mOffset(0.f), mLimitStiffness(0.f) { } PxReal mStiffness; PxReal mDamping; PxReal mOffset; PxReal mLimitStiffness; }; class ArticulationSpatialTendon : public ArticulationTendon { public: ArticulationSpatialTendon() { mAttachments.reserve(64); mAttachments.forceSize_Unsafe(64); } PX_FORCE_INLINE ArticulationAttachment* getAttachments() { return mAttachments.begin(); } PX_FORCE_INLINE ArticulationAttachment& getAttachment(const PxU32 index) { return mAttachments[index]; } PX_FORCE_INLINE PxU32 getNumAttachments() { return mIDPool.getNumUsedID(); } PX_FORCE_INLINE PxU32 getNewID() { const PxU32 index = mIDPool.getNewID(); if (mAttachments.capacity() <= index) { mAttachments.resize(index * 2 + 1); } return index; } PX_FORCE_INLINE void freeID(const PxU32 index) { mIDPool.freeID(index); } PX_FORCE_INLINE PxU32 getTendonIndex() { return mIndex; } PX_FORCE_INLINE void setTendonIndex(const PxU32 index) { mIndex = index; } private: PxArray<ArticulationAttachment> mAttachments; Cm::IDPool mIDPool; PxU32 mIndex; }; class ArticulationTendonJoint { public: PxU16 axis; PxU16 startJointOffset; PxReal coefficient; PxReal recipCoefficient; PxU32 mConstraintInd; PxU32 parent; //parent index PxU16 linkInd; PxU16 childCount; ArticulationAttachmentBitField children; }; class ArticulationFixedTendon : public ArticulationTendon { public: ArticulationFixedTendon() :mLowLimit(PX_MAX_F32), mHighLimit(-PX_MAX_F32), mRestLength(0.f) { mTendonJoints.reserve(64); mTendonJoints.forceSize_Unsafe(64); } PX_FORCE_INLINE ArticulationTendonJoint* getTendonJoints() { return mTendonJoints.begin(); } PX_FORCE_INLINE ArticulationTendonJoint& getTendonJoint(const PxU32 index) { return mTendonJoints[index]; } PX_FORCE_INLINE PxU32 getNumJoints() { return mIDPool.getNumUsedID(); } PX_FORCE_INLINE PxU32 getNewID() { const PxU32 index = mIDPool.getNewID(); if (mTendonJoints.capacity() <= index) { mTendonJoints.resize(index * 2 + 1); } return index; } PX_FORCE_INLINE void freeID(const PxU32 index) { mIDPool.freeID(index); } PX_FORCE_INLINE PxU32 getTendonIndex() { return mIndex; } PX_FORCE_INLINE void setTendonIndex(const PxU32 index) { mIndex = index; } PxReal mLowLimit; PxReal mHighLimit; PxReal mRestLength; PxReal mError; private: PxArray<ArticulationTendonJoint> mTendonJoints; Cm::IDPool mIDPool; PxU32 mIndex; }; }//namespace Dy }//namespace physx #endif
5,281
C
26.367876
109
0.723727
NVIDIA-Omniverse/PhysX/physx/source/lowleveldynamics/include/DyArticulationCore.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef DY_ARTICULATION_CORE_H #define DY_ARTICULATION_CORE_H #include "PxArticulationReducedCoordinate.h" namespace physx { namespace Dy { struct ArticulationCore { // PX_SERIALIZATION ArticulationCore(const PxEMPTY) : flags(PxEmpty) {} ArticulationCore() {} //~PX_SERIALIZATION PxU16 solverIterationCounts; //KS - made a U16 so that it matches PxsRigidCore PxArticulationFlags flags; PxReal sleepThreshold; PxReal freezeThreshold; PxReal wakeCounter; PxU32 gpuRemapIndex; PxReal maxLinearVelocity; PxReal maxAngularVelocity; }; struct ArticulationJointCoreDirtyFlag { enum Enum { eNONE = 0, eMOTION = 1 << 0, eFRAME = 1 << 1, eTARGETPOSE = 1 << 2, eTARGETVELOCITY = 1 << 3, eARMATURE = 1 << 4, eALL = eMOTION | eFRAME | eTARGETPOSE | eTARGETVELOCITY | eARMATURE }; }; typedef PxFlags<ArticulationJointCoreDirtyFlag::Enum, PxU8> ArticulationJointCoreDirtyFlags; PX_FLAGS_OPERATORS(ArticulationJointCoreDirtyFlag::Enum, PxU8) } } #endif
2,757
C
35.289473
94
0.738121
NVIDIA-Omniverse/PhysX/physx/source/lowleveldynamics/include/DyContext.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef DY_CONTEXT_H #define DY_CONTEXT_H #include "PxSceneDesc.h" #include "DyThresholdTable.h" #include "PxcNpThreadContext.h" #include "PxsSimulationController.h" #include "DyConstraintWriteBack.h" #include "foundation/PxAllocator.h" #include "foundation/PxUserAllocated.h" namespace physx { class PxcNpMemBlockPool; namespace Cm { class FlushPool; } namespace IG { class SimpleIslandManager; } class PxcScratchAllocator; struct PxvSimStats; class PxTaskManager; class PxsContactManager; struct PxsContactManagerOutputCounts; class PxvNphaseImplementationContext; namespace Dy { class Context : public PxUserAllocated { PX_NOCOPY(Context) public: // PT: TODO: consider making all of these public at this point // PT: please avoid useless comments like "returns Blah" for a function called "getBlah". PX_FORCE_INLINE PxReal getMaxBiasCoefficient() const { return mMaxBiasCoefficient; } PX_FORCE_INLINE void setMaxBiasCoefficient(PxReal coeff) { mMaxBiasCoefficient = coeff; } PX_FORCE_INLINE PxReal getCorrelationDistance() const { return mCorrelationDistance; } PX_FORCE_INLINE void setCorrelationDistance(PxReal f) { mCorrelationDistance = f; } PX_FORCE_INLINE PxReal getBounceThreshold() const { return mBounceThreshold; } PX_FORCE_INLINE void setBounceThreshold(PxReal f) { mBounceThreshold = f; } PX_FORCE_INLINE PxReal getFrictionOffsetThreshold() const { return mFrictionOffsetThreshold; } PX_FORCE_INLINE void setFrictionOffsetThreshold(PxReal offset) { mFrictionOffsetThreshold = offset; } PX_FORCE_INLINE PxReal getCCDSeparationThreshold() const { return mCCDSeparationThreshold; } PX_FORCE_INLINE void setCCDSeparationThreshold(PxReal offset) { mCCDSeparationThreshold = offset; } PX_FORCE_INLINE PxU32 getSolverBatchSize() const { return mSolverBatchSize; } PX_FORCE_INLINE void setSolverBatchSize(PxU32 f) { mSolverBatchSize = f; } PX_FORCE_INLINE PxU32 getSolverArticBatchSize() const { return mSolverArticBatchSize; } PX_FORCE_INLINE void setSolverArticBatchSize(PxU32 f) { mSolverArticBatchSize = f; } PX_FORCE_INLINE PxFrictionType::Enum getFrictionType() const { return mFrictionType; } PX_FORCE_INLINE void setFrictionType(PxFrictionType::Enum f) { mFrictionType = f; } PX_FORCE_INLINE PxReal getDt() const { return mDt; } PX_FORCE_INLINE void setDt(const PxReal dt) { mDt = dt; } // PT: TODO: we have a setDt function but it doesn't set the inverse dt, what's the story here? PX_FORCE_INLINE PxReal getInvDt() const { return mInvDt; } //Forces any cached body state to be updated! PX_FORCE_INLINE void setStateDirty(bool dirty) { mBodyStateDirty = dirty; } PX_FORCE_INLINE bool isStateDirty() const { return mBodyStateDirty; } // Returns the maximum solver constraint size in this island in bytes. PX_FORCE_INLINE PxU32 getMaxSolverConstraintSize() const { return mMaxSolverConstraintSize; } PX_FORCE_INLINE PxReal getLengthScale() const { return mLengthScale; } PX_FORCE_INLINE const PxVec3& getGravity() const { return mGravity; } PX_FORCE_INLINE PxU64 getContextId() const { return mContextID; } PX_FORCE_INLINE ThresholdStream& getThresholdStream() { return *mThresholdStream; } PX_FORCE_INLINE ThresholdStream& getForceChangedThresholdStream() { return *mForceChangedThresholdStream; } PX_FORCE_INLINE ThresholdTable& getThresholdTable() { return mThresholdTable; } void createThresholdStream(PxVirtualAllocatorCallback& callback) { PX_ASSERT(!mThresholdStream); mThresholdStream = PX_NEW(ThresholdStream)(callback); } void createForceChangeThresholdStream(PxVirtualAllocatorCallback& callback) { PX_ASSERT(!mForceChangedThresholdStream); mForceChangedThresholdStream = PX_NEW(ThresholdStream)(callback); } PX_FORCE_INLINE PxcDataStreamPool& getContactStreamPool() { return mContactStreamPool; } PX_FORCE_INLINE PxcDataStreamPool& getPatchStreamPool() { return mPatchStreamPool; } PX_FORCE_INLINE PxcDataStreamPool& getForceStreamPool() { return mForceStreamPool; } PX_FORCE_INLINE PxPinnedArray<Dy::ConstraintWriteback>& getConstraintWriteBackPool() { return mConstraintWriteBackPool; } /** \brief Destroys this dynamics context */ virtual void destroy() = 0; /** \brief The entry point for the constraint solver. \param[in] dt The simulation time-step \param[in] continuation The continuation task for the solver \param[in] processLostTouchTask The task that processes lost touches This method is called after the island generation has completed. Its main responsibilities are: (1) Reserving the solver body pools (2) Initializing the static and kinematic solver bodies, which are shared resources between islands. (3) Construct the solver task chains for each island Each island is solved as an independent solver task chain. In addition, large islands may be solved using multiple parallel tasks. Island solving is asynchronous. Once all islands have been solved, the continuation task will be called. */ virtual void update(IG::SimpleIslandManager& simpleIslandManager, PxBaseTask* continuation, PxBaseTask* processLostTouchTask, PxvNphaseImplementationContext* nPhaseContext, PxU32 maxPatchesPerCM, PxU32 maxArticulationLinks, PxReal dt, const PxVec3& gravity, PxBitMapPinned& changedHandleMap) = 0; virtual void processLostPatches(IG::SimpleIslandManager& /*simpleIslandManager*/, PxsContactManager** /*lostPatchManagers*/, PxU32 /*nbLostPatchManagers*/, PxsContactManagerOutputCounts* /*outCounts*/) {} virtual void processFoundPatches(IG::SimpleIslandManager& /*simpleIslandManager*/, PxsContactManager** /*foundPatchManagers*/, PxU32 /*nbFoundPatchManagers*/, PxsContactManagerOutputCounts* /*outCounts*/) {} /** \brief This method copy gpu solver body data to cpu body core */ virtual void updateBodyCore(PxBaseTask* /*continuation*/) {} /** \brief Called after update's task chain has completed. This collects the results of the solver together. This method combines the results of several islands, e.g. constructing scene-level simulation statistics and merging together threshold streams for contact notification. */ virtual void mergeResults() = 0; virtual void setSimulationController(PxsSimulationController* simulationController) = 0; virtual void getDataStreamBase(void*& /*contactStreamBase*/, void*& /*patchStreamBase*/, void*& /*forceAndIndicesStreamBase*/) {} virtual PxSolverType::Enum getSolverType() const = 0; protected: Context(IG::SimpleIslandManager* islandManager, PxVirtualAllocatorCallback* allocatorCallback, PxvSimStats& simStats, bool enableStabilization, bool useEnhancedDeterminism, PxReal maxBiasCoefficient, PxReal lengthScale, PxU64 contextID) : mThresholdStream (NULL), mForceChangedThresholdStream(NULL), mIslandManager (islandManager), mDt (1.0f), mInvDt (1.0f), mMaxBiasCoefficient (maxBiasCoefficient), mEnableStabilization (enableStabilization), mUseEnhancedDeterminism (useEnhancedDeterminism), mBounceThreshold (-2.0f), mLengthScale (lengthScale), mSolverBatchSize (32), mConstraintWriteBackPool (PxVirtualAllocator(allocatorCallback)), mSimStats (simStats), mContextID (contextID), mBodyStateDirty (false) { } virtual ~Context() { PX_DELETE(mThresholdStream); PX_DELETE(mForceChangedThresholdStream); } ThresholdStream* mThresholdStream; ThresholdStream* mForceChangedThresholdStream; ThresholdTable mThresholdTable; IG::SimpleIslandManager* mIslandManager; PxsSimulationController* mSimulationController; /** \brief Time-step. */ PxReal mDt; /** \brief 1/time-step. */ PxReal mInvDt; PxReal mMaxBiasCoefficient; const bool mEnableStabilization; const bool mUseEnhancedDeterminism; PxVec3 mGravity; /** \brief max solver constraint size */ PxU32 mMaxSolverConstraintSize; /** \brief Threshold controlling the relative velocity at which the solver transitions between restitution and bias for solving normal contact constraint. */ PxReal mBounceThreshold; /** \brief Threshold controlling whether friction anchors are constructed or not. If the separation is above mFrictionOffsetThreshold, the contact will not be considered to become a friction anchor */ PxReal mFrictionOffsetThreshold; /** \brief Threshold controlling whether distant contacts are processed using bias, restitution or a combination of the two. This only has effect on pairs involving bodies that have enabled speculative CCD simulation mode. */ PxReal mCCDSeparationThreshold; /** \brief Threshold for controlling friction correlation */ PxReal mCorrelationDistance; /** \brief The length scale from PxTolerancesScale::length. */ PxReal mLengthScale; /** \brief The minimum size of an island to generate a solver task chain. */ PxU32 mSolverBatchSize; /** \brief The minimum number of articulations required to generate a solver task chain. */ PxU32 mSolverArticBatchSize; /** \brief The current friction model being used */ PxFrictionType::Enum mFrictionType; /** \brief Structure to encapsulate contact stream allocations. Used by GPU solver to reference pre-allocated pinned host memory */ PxcDataStreamPool mContactStreamPool; /** \brief Struct to encapsulate the contact patch stream allocations. Used by GPU solver to reference pre-allocated pinned host memory */ PxcDataStreamPool mPatchStreamPool; /** \brief Structure to encapsulate force stream allocations. Used by GPU solver to reference pre-allocated pinned host memory for force reports. */ PxcDataStreamPool mForceStreamPool; /** \brief Structure to encapsulate constraint write back allocations. Used by GPU/CPU solver to reference pre-allocated pinned host memory for breakable joint reports. */ PxPinnedArray<Dy::ConstraintWriteback> mConstraintWriteBackPool; PxvSimStats& mSimStats; const PxU64 mContextID; bool mBodyStateDirty; }; Context* createDynamicsContext( PxcNpMemBlockPool* memBlockPool, PxcScratchAllocator& scratchAllocator, Cm::FlushPool& taskPool, PxvSimStats& simStats, PxTaskManager* taskManager, PxVirtualAllocatorCallback* allocatorCallback, PxsMaterialManager* materialManager, IG::SimpleIslandManager* islandManager, PxU64 contextID, bool enableStabilization, bool useEnhancedDeterminism, PxReal maxBiasCoefficient, bool frictionEveryIteration, PxReal lengthScale); Context* createTGSDynamicsContext( PxcNpMemBlockPool* memBlockPool, PxcScratchAllocator& scratchAllocator, Cm::FlushPool& taskPool, PxvSimStats& simStats, PxTaskManager* taskManager, PxVirtualAllocatorCallback* allocatorCallback, PxsMaterialManager* materialManager, IG::SimpleIslandManager* islandManager, PxU64 contextID, bool enableStabilization, bool useEnhancedDeterminism, PxReal lengthScale); } } #endif
12,823
C
41.18421
219
0.763394
NVIDIA-Omniverse/PhysX/physx/source/lowleveldynamics/include/DyFEMClothCore.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #ifndef PXDV_FEMCLOTH_CORE_H #define PXDV_FEMCLOTH_CORE_H #include "foundation/PxSimpleTypes.h" #include "foundation/PxTransform.h" #include "foundation/PxArray.h" #if PX_ENABLE_FEATURES_UNDER_CONSTRUCTION #include "PxFEMCloth.h" #endif #include "PxFEMParameter.h" #include "PxFEMClothFlags.h" #include "PxsFEMClothMaterialCore.h" namespace physx { namespace Dy { struct FEMClothCore { public: PxFEMParameters parameters; PxU16 solverIterationCounts; bool dirty; PxReal wakeCounter; PxFEMClothFlags mFlags; PxFEMClothDataFlags mDirtyFlags; PxArray<PxU16> mMaterialHandles; // device pointers PxVec4* mPositionInvMass; PxVec4* mVelocity; PxVec4* mRestPosition; // multimaterial bending effects PxArray<PxReal> mBendingScales; PxReal maxVelocity; PxReal maxDepenetrationVelocity; // negative values mean no activation angle: apply bending force toward rest bending angle PxReal mBendingActivationAngle; // number of collision pair updates per timestep. Collision pair is updated at least once per timestep and increasing the frequency provides better collision pairs. PxU32 nbCollisionPairUpdatesPerTimestep; // number of collision substeps in each sub-timestep. Collision constraints can be applied multiple times in each sub-timestep. PxU32 nbCollisionSubsteps; FEMClothCore() { maxVelocity = 0.f; maxDepenetrationVelocity = 0.f; mBendingActivationAngle = -1.f; nbCollisionPairUpdatesPerTimestep = 1; nbCollisionSubsteps = 1; dirty = 0; mDirtyFlags = PxFEMClothDataFlags(0); } void setMaterial(const PxU16 materialHandle) { mMaterialHandles.pushBack(materialHandle); } void clearMaterials() { mMaterialHandles.clear(); } }; } } #endif
3,471
C
33.039215
167
0.730625
NVIDIA-Omniverse/PhysX/physx/source/lowleveldynamics/include/DyConstraint.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef DY_CONSTRAINT_H #define DY_CONSTRAINT_H #include "foundation/PxVec3.h" #include "foundation/PxTransform.h" #include "PxvConfig.h" #include "PxvDynamics.h" #include "PxConstraint.h" #include "DyConstraintWriteBack.h" namespace physx { class PxsRigidBody; namespace Dy { #if PX_VC #pragma warning(push) #pragma warning( disable : 4324 ) // Padding was added at the end of a structure because of a __declspec(align) value. #endif PX_ALIGN_PREFIX(16) struct Constraint { public: PxReal linBreakForce; PxReal angBreakForce; PxU16 constantBlockSize; PxU16 flags; PxConstraintSolverPrep solverPrep; void* constantBlock; PxsRigidBody* body0; PxsRigidBody* body1; PxsBodyCore* bodyCore0; PxsBodyCore* bodyCore1; PxU32 index; PxReal minResponseThreshold; } PX_ALIGN_SUFFIX(16); #if PX_VC #pragma warning(pop) #endif #if !PX_P64_FAMILY PX_COMPILE_TIME_ASSERT(48==sizeof(Constraint)); #endif } } #endif
2,671
C
30.069767
119
0.749532
NVIDIA-Omniverse/PhysX/physx/source/lowleveldynamics/include/DySoftBody.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #ifndef DY_SOFTBODY_H #define DY_SOFTBODY_H #include "foundation/PxSimpleTypes.h" #include "foundation/PxPinnedArray.h" #include "DySoftBodyCore.h" #include "PxvGeometry.h" namespace physx { namespace Sc { class SoftBodySim; } namespace Dy { typedef size_t SoftBodyHandle; struct SoftBodyCore; typedef PxPinnedArray<PxU64> SoftBodyFilterArray; class SoftBody { PX_NOCOPY(SoftBody) public: SoftBody(Sc::SoftBodySim* sim, Dy::SoftBodyCore& core) : mSoftBodySoftBodyFilterPairs(NULL), mSim(sim), mCore(core), mElementId(0xffffffff), mGpuRemapId(0xffffffff) { mFilterDirty = false; mFilterInDirtyList = false; mDirtySoftBodyForFilterPairs = NULL; mSoftBodySoftBodyFilterPairs = NULL; } ~SoftBody() { if (mDirtySoftBodyForFilterPairs) { Dy::SoftBody** dirtySoftBodies = mDirtySoftBodyForFilterPairs->begin(); const PxU32 size = mDirtySoftBodyForFilterPairs->size(); for (PxU32 i = 0; i < size; ++i) { if (dirtySoftBodies[i] == this) { dirtySoftBodies[i] = NULL; } } if (mSoftBodySoftBodyFilterPairs) PX_FREE(mSoftBodySoftBodyFilterPairs); } } PX_FORCE_INLINE PxReal getMaxPenetrationBias() const { return mCore.maxPenBias; } PX_FORCE_INLINE Sc::SoftBodySim* getSoftBodySim() const { return mSim; } PX_FORCE_INLINE void setGpuRemapId(const PxU32 remapId) { mGpuRemapId = remapId; PxTetrahedronMeshGeometryLL& geom = mShapeCore->mGeometry.get<PxTetrahedronMeshGeometryLL>(); geom.materialsLL.gpuRemapId = remapId; } PX_FORCE_INLINE PxU32 getGpuRemapId() { return mGpuRemapId; } PX_FORCE_INLINE void setElementId(const PxU32 elementId) { mElementId = elementId; } PX_FORCE_INLINE PxU32 getElementId() { return mElementId; } PX_FORCE_INLINE PxsShapeCore& getShapeCore() { return *mShapeCore; } PX_FORCE_INLINE void setShapeCore(PxsShapeCore* shapeCore) { mShapeCore = shapeCore; } PX_FORCE_INLINE void setSimShapeCore(PxTetrahedronMesh* simulationMesh, PxSoftBodyAuxData* simulationState) { mSimulationMesh = simulationMesh; mSoftBodyAuxData = simulationState; } PX_FORCE_INLINE const PxTetrahedronMesh* getCollisionMesh() const { return mShapeCore->mGeometry.get<PxTetrahedronMeshGeometryLL>().tetrahedronMesh; } PX_FORCE_INLINE PxTetrahedronMesh* getCollisionMesh() { return mShapeCore->mGeometry.get<PxTetrahedronMeshGeometryLL>().tetrahedronMesh; } PX_FORCE_INLINE const PxTetrahedronMesh* getSimulationMesh() const { return mSimulationMesh; } PX_FORCE_INLINE PxTetrahedronMesh* getSimulationMesh() { return mSimulationMesh; } PX_FORCE_INLINE const PxSoftBodyAuxData* getSoftBodyAuxData() const { return mSoftBodyAuxData; } PX_FORCE_INLINE PxSoftBodyAuxData* getSoftBodyAuxData() { return mSoftBodyAuxData; } PX_FORCE_INLINE const SoftBodyCore& getCore() const { return mCore; } PX_FORCE_INLINE SoftBodyCore& getCore() { return mCore; } PX_FORCE_INLINE PxU16 getIterationCounts() const { return mCore.solverIterationCounts; } PX_FORCE_INLINE PxU32 getGpuSoftBodyIndex() const { return mGpuRemapId; } //These variables are used in the constraint partition PxU16 maxSolverFrictionProgress; PxU16 maxSolverNormalProgress; PxU32 solverProgress; PxU8 numTotalConstraints; PxArray<PxU32> mParticleSoftBodyAttachments; PxArray<PxU32> mRigidSoftBodyAttachments; PxArray<PxU32> mClothSoftBodyAttachments; PxArray<PxU32> mSoftSoftBodyAttachments; SoftBodyFilterArray* mSoftBodySoftBodyFilterPairs; PxArray <Dy::SoftBody*>* mDirtySoftBodyForFilterPairs; //pointer to the array of mDirtySoftBodyForFilterPairs in PxgSimulationController.cpp PxArray<PxU32> mSoftBodySoftBodyAttachmentIdReferences; bool mFilterDirty; bool mFilterInDirtyList; private: Sc::SoftBodySim* mSim; SoftBodyCore& mCore; PxsShapeCore* mShapeCore; PxTetrahedronMesh* mSimulationMesh; PxSoftBodyAuxData* mSoftBodyAuxData; PxU32 mElementId; //this is used for the bound array, contactDist PxU32 mGpuRemapId; }; PX_FORCE_INLINE SoftBody* getSoftBody(SoftBodyHandle handle) { return reinterpret_cast<SoftBody*>(handle); } } } #endif
5,824
C
33.880239
153
0.747596
NVIDIA-Omniverse/PhysX/physx/source/lowleveldynamics/include/DyConstraintWriteBack.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef DY_CONSTRAINT_WRITE_BACK_H #define DY_CONSTRAINT_WRITE_BACK_H #include "foundation/PxVec3.h" #include "PxvConfig.h" #include "PxvDynamics.h" namespace physx { namespace Dy { PX_ALIGN_PREFIX(16) struct ConstraintWriteback { public: void initialize() { linearImpulse = PxVec3(0); angularImpulse = PxVec3(0); broken = false; } PxVec3 linearImpulse; PxU32 broken; PxVec3 angularImpulse; PxU32 pad; } PX_ALIGN_SUFFIX(16); } } #endif
2,193
C
33.281249
74
0.744186
NVIDIA-Omniverse/PhysX/physx/source/lowleveldynamics/include/DySoftBodyCore.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #ifndef DY_SOFTBODY_CORE_H #define DY_SOFTBODY_CORE_H #include "foundation/PxSimpleTypes.h" #include "foundation/PxTransform.h" #include "PxSoftBody.h" #include "PxSoftBodyFlag.h" #include "PxsFEMSoftBodyMaterialCore.h" #include "foundation/PxArray.h" namespace physx { namespace Dy { struct SoftBodyCore { public: PxQuat initialRotation; PxFEMParameters parameters; PxReal sleepThreshold; PxReal freezeThreshold; PxReal wakeCounter; PxReal maxPenBias; PxU16 solverIterationCounts; //vel iters are in low word and pos iters in high word. bool dirty; PxSoftBodyFlags mFlags; PxSoftBodyDataFlags mDirtyFlags; void setMaterial(const PxU16 materialHandle) { mMaterialHandles.pushBack(materialHandle); } void clearMaterials() { mMaterialHandles.clear(); } PxArray<PxU16> mMaterialHandles; //device - managed by PhysX PxVec4* mPositionInvMass; // collision mesh positions, alloc on attachShape(), dealloc detachShape() PxVec4* mRestPosition; // collision mesh rest positions, alloc on attachShape(), dealloc detachShape() PxVec4* mSimPositionInvMass; // simulation mesh positions, alloc on attachSimulationMesh(), dealloc detachSimulationMesh() PxVec4* mSimVelocity; // simulation mesh velocities, alloc on attachSimulationMesh(), dealloc detachSimulationMesh() // device - just the pointer, user responsible. const PxVec4* mKinematicTarget; }; } } #endif
3,086
C
37.111111
131
0.741737
NVIDIA-Omniverse/PhysX/physx/source/lowleveldynamics/include/DyParticleSystemCore.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #ifndef DY_PARTICLESYSTEM_CORE_H #define DY_PARTICLESYSTEM_CORE_H #include "foundation/PxSimpleTypes.h" #include "foundation/PxTransform.h" #include "foundation/PxArray.h" #include "foundation/PxMemory.h" #include "PxParticleSystem.h" #include "PxParticleBuffer.h" #include "CmIDPool.h" #if PX_ENABLE_FEATURES_UNDER_CONSTRUCTION #include "PxFLIPParticleSystem.h" #include "PxMPMParticleSystem.h" #endif #include "PxParticleSolverType.h" #include "PxSparseGridParams.h" namespace physx { namespace Dy { class ParticleSystemCore { public: PxReal sleepThreshold; PxReal freezeThreshold; PxReal wakeCounter; PxU32 gridSizeX; PxU32 gridSizeY; PxU32 gridSizeZ; PxParticleSolverType::Enum solverType; bool enableCCD; PxU16 solverIterationCounts; PxSparseGridParams sparseGridParams; PxReal restOffset; PxReal particleContactOffset; PxReal particleContactOffset_prev; PxReal solidRestOffset; PxReal fluidRestOffset; PxReal fluidRestOffset_prev; PxReal fluidBoundaryDensityScale; PxReal maxDepenetrationVelocity; PxReal maxVelocity; PxParticleFlags mFlags; PxVec3 mWind; PxU32 mMaxNeighborhood; PxArray<PxU16> mPhaseGroupToMaterialHandle; PxArray<PxU16> mUniqueMaterialHandles; //just for reporting void addParticleBuffer(PxParticleBuffer* particleBuffer) { if (particleBuffer->bufferIndex == 0xffffffff) { switch (particleBuffer->getConcreteType()) { case (PxConcreteType::ePARTICLE_BUFFER): { particleBuffer->bufferIndex = mParticleBuffers.size(); mParticleBuffers.pushBack(particleBuffer); mParticleBufferUpdate = true; particleBuffer->setInternalData(this); return; } case (PxConcreteType::ePARTICLE_DIFFUSE_BUFFER): { particleBuffer->bufferIndex = mParticleAndDiffuseBuffers.size(); mParticleAndDiffuseBuffers.pushBack(reinterpret_cast<PxParticleAndDiffuseBuffer*>(particleBuffer)); mParticleAndDiffuseBufferUpdate = true; particleBuffer->setInternalData(this); return; } case (PxConcreteType::ePARTICLE_CLOTH_BUFFER): { particleBuffer->bufferIndex = mClothBuffers.size(); mClothBuffers.pushBack(reinterpret_cast<PxParticleClothBuffer*>(particleBuffer)); mClothBufferUpdate = true; particleBuffer->setInternalData(this); return; } case (PxConcreteType::ePARTICLE_RIGID_BUFFER): { particleBuffer->bufferIndex = mRigidBuffers.size(); mRigidBuffers.pushBack(reinterpret_cast<PxParticleRigidBuffer*>(particleBuffer)); mRigidBufferUpdate = true; particleBuffer->setInternalData(this); return; } default: { PxGetFoundation().error(PxErrorCode::eINVALID_OPERATION, PX_FL, "addParticleBuffer : Error, this buffer does not have a valid type!"); return; } } } else { PxGetFoundation().error(PxErrorCode::eINVALID_OPERATION, PX_FL, "addParticleBuffer : Error, this buffer cannot be added to multiple particle systems!"); } } void removeParticleBuffer(PxParticleBuffer* particleBuffer) { const PxU32 index = particleBuffer->bufferIndex; switch (particleBuffer->getConcreteType()) { case (PxConcreteType::ePARTICLE_BUFFER): { if (index < mParticleBuffers.size()) { mParticleBuffers.replaceWithLast(particleBuffer->bufferIndex); if (mParticleBuffers.size() > index) mParticleBuffers[index]->bufferIndex = index; mParticleBufferUpdate = true; particleBuffer->bufferIndex = 0xffffffff; particleBuffer->onParticleSystemDestroy(); } return; } case (PxConcreteType::ePARTICLE_DIFFUSE_BUFFER): { if (index < mParticleAndDiffuseBuffers.size()) { mParticleAndDiffuseBuffers.replaceWithLast(particleBuffer->bufferIndex); if (mParticleAndDiffuseBuffers.size() > index) mParticleAndDiffuseBuffers[index]->bufferIndex = index; mParticleAndDiffuseBufferUpdate = true; particleBuffer->bufferIndex = 0xffffffff; particleBuffer->onParticleSystemDestroy(); } return; } case (PxConcreteType::ePARTICLE_CLOTH_BUFFER): { if (index < mClothBuffers.size()) { mClothBuffers.replaceWithLast(particleBuffer->bufferIndex); if (mClothBuffers.size() > index) mClothBuffers[index]->bufferIndex = index; mClothBufferUpdate = true; particleBuffer->bufferIndex = 0xffffffff; particleBuffer->onParticleSystemDestroy(); } return; } case (PxConcreteType::ePARTICLE_RIGID_BUFFER): { if (index < mParticleBuffers.size()) { mRigidBuffers.replaceWithLast(particleBuffer->bufferIndex); if (mRigidBuffers.size() > index) mRigidBuffers[index]->bufferIndex = index; mRigidBufferUpdate = true; particleBuffer->bufferIndex = 0xffffffff; particleBuffer->onParticleSystemDestroy(); } return; } default: { PxGetFoundation().error(PxErrorCode::eINVALID_OPERATION, PX_FL, "removeParticleBuffer : Error, this buffer does not have a valid type!"); return; } } } PxU32 getNumUserBuffers() const { return mParticleBuffers.size() + mClothBuffers.size() + mRigidBuffers.size() + mParticleAndDiffuseBuffers.size(); } //device PxArray<PxParticleBuffer*> mParticleBuffers; PxArray<PxParticleClothBuffer*> mClothBuffers; PxArray<PxParticleRigidBuffer*> mRigidBuffers; PxArray<PxParticleAndDiffuseBuffer*> mParticleAndDiffuseBuffers; bool mParticleBufferUpdate; bool mClothBufferUpdate; bool mRigidBufferUpdate; bool mParticleAndDiffuseBufferUpdate; PxParticleSystemCallback* mCallback; ParticleSystemCore() { PxMemSet(this, 0, sizeof(*this)); mParticleBufferUpdate = false; mClothBufferUpdate = false; mRigidBufferUpdate = false; mParticleAndDiffuseBufferUpdate = false; } ~ParticleSystemCore() { for(PxU32 i = 0; i < mParticleBuffers.size(); ++i) { mParticleBuffers[i]->onParticleSystemDestroy(); } for (PxU32 i = 0; i < mClothBuffers.size(); ++i) { mClothBuffers[i]->onParticleSystemDestroy(); } for (PxU32 i = 0; i < mRigidBuffers.size(); ++i) { mRigidBuffers[i]->onParticleSystemDestroy(); } for (PxU32 i = 0; i < mParticleAndDiffuseBuffers.size(); ++i) { mParticleAndDiffuseBuffers[i]->onParticleSystemDestroy(); } } #if PX_ENABLE_FEATURES_UNDER_CONSTRUCTION //Leave these members at the end to remain binary compatible with public builds PxFLIPParams flipParams; PxMPMParams mpmParams; #endif }; } // namespace Dy } // namespace physx #endif
8,152
C
28.327338
155
0.727797
NVIDIA-Omniverse/PhysX/physx/source/lowleveldynamics/include/DyFEMCloth.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #ifndef PXD_FEMCLOTH_H #define PXD_FEMCLOTH_H #include "foundation/PxSimpleTypes.h" #include "DyFEMClothCore.h" #include "PxvGeometry.h" namespace physx { namespace Sc { class FEMClothSim; } namespace Dy { typedef size_t FEMClothHandle; struct FEMClothCore; class FEMCloth { PX_NOCOPY(FEMCloth) public: FEMCloth(Sc::FEMClothSim* sim, Dy::FEMClothCore& core) : mSim(sim), mCore(core), mElementId(0xffffffff), mGpuRemapId(0xffffffff) {} ~FEMCloth() {} //PX_FORCE_INLINE PxReal getMaxPenetrationBias() const { return mCore.maxPenBias; } PX_FORCE_INLINE Sc::FEMClothSim* getFEMClothSim() const { return mSim; } PX_FORCE_INLINE void setGpuRemapId(const PxU32 remapId) { mGpuRemapId = remapId; PxTriangleMeshGeometryLL& geom = mShapeCore->mGeometry.get<PxTriangleMeshGeometryLL>(); geom.materialsLL.gpuRemapId = remapId; } PX_FORCE_INLINE PxTriangleMesh* getTriangleMesh() { PxTriangleMeshGeometryLL& geom = mShapeCore->mGeometry.get<PxTriangleMeshGeometryLL>(); return geom.triangleMesh; } PX_FORCE_INLINE PxU32 getGpuRemapId() { return mGpuRemapId; } PX_FORCE_INLINE void setElementId(const PxU32 elementId) { mElementId = elementId; } PX_FORCE_INLINE PxU32 getElementId() { return mElementId; } PX_FORCE_INLINE PxsShapeCore& getShapeCore() { return *mShapeCore; } PX_FORCE_INLINE void setShapeCore(PxsShapeCore* shapeCore) { mShapeCore = shapeCore; } PX_FORCE_INLINE const FEMClothCore& getCore() const { return mCore; } PX_FORCE_INLINE FEMClothCore& getCore() { return mCore; } PX_FORCE_INLINE PxU16 getIterationCounts() const { return mCore.solverIterationCounts; } void addAttachmentHandle(PxU32 handle); void removeAttachmentHandle(PxU32 handle); //These variables are used in the constraint partition PxU16 maxSolverFrictionProgress; PxU16 maxSolverNormalProgress; PxU32 solverProgress; PxU8 numTotalConstraints; PxArray<PxU32> mAttachmentHandles; PxArray<PxU32> mClothClothAttachments; private: Sc::FEMClothSim* mSim; FEMClothCore& mCore; PxsShapeCore* mShapeCore; PxU32 mElementId; //this is used for the bound array, contactDist PxU32 mGpuRemapId; }; struct FEMClothSolverDesc { FEMCloth* femCloth; }; PX_FORCE_INLINE FEMCloth* getFEMCloth(FEMClothHandle handle) { return reinterpret_cast<FEMCloth*>(handle); } PX_FORCE_INLINE void FEMCloth::addAttachmentHandle(PxU32 handle) { mAttachmentHandles.pushBack(handle); } PX_FORCE_INLINE void FEMCloth::removeAttachmentHandle(PxU32 handle) { for (PxU32 i = 0; i < mAttachmentHandles.size(); ++i) { if (mAttachmentHandles[i] == handle) { mAttachmentHandles.replaceWithLast(i); } } } } } #endif
4,411
C
30.971014
95
0.728406
NVIDIA-Omniverse/PhysX/physx/source/lowleveldynamics/include/DyVArticulation.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef DY_V_ARTICULATION_H #define DY_V_ARTICULATION_H #include "foundation/PxVec3.h" #include "foundation/PxQuat.h" #include "foundation/PxTransform.h" #include "foundation/PxVecMath.h" #include "foundation/PxUtilities.h" #include "CmUtils.h" #include "CmSpatialVector.h" #include "foundation/PxMemory.h" #include "DyArticulationCore.h" #include "DyArticulationJointCore.h" namespace physx { struct PxsBodyCore; class PxsConstraintBlockManager; class PxsContactManagerOutputIterator; struct PxSolverConstraintDesc; struct PxSolverBodyData; class PxContactJoint; struct PxTGSSolverBodyData; struct PxTGSSolverBodyTxInertia; struct PxSolverConstraintDesc; namespace Dy { struct SpatialSubspaceMatrix; struct ConstraintWriteback; class ThreadContext; static const size_t DY_ARTICULATION_TENDON_MAX_SIZE = 64; struct Constraint; class Context; class ArticulationSpatialTendon; class ArticulationFixedTendon; class ArticulationTendonJoint; struct ArticulationSensor; struct ArticulationLoopConstraint { public: PxU32 linkIndex0; PxU32 linkIndex1; Dy::Constraint* constraint; }; #define DY_ARTICULATION_LINK_NONE 0xffffffff typedef PxU64 ArticulationBitField; struct ArticulationLink { ArticulationBitField children; // child bitmap ArticulationBitField pathToRoot; // path to root, including link and root PxU32 mPathToRootStartIndex; PxU32 mChildrenStartIndex; PxU16 mPathToRootCount; PxU16 mNumChildren; PxsBodyCore* bodyCore; ArticulationJointCore* inboundJoint; PxU32 parent; PxReal cfm; }; class FeatherstoneArticulation; struct ArticulationSolverDesc { void initData(ArticulationCore* core_, const PxArticulationFlags* flags_) { articulation = NULL; links = NULL; motionVelocity = NULL; acceleration = NULL; poses = NULL; deltaQ = NULL; core = core_; flags = flags_; linkCount = 0; numInternalConstraints = 0; } FeatherstoneArticulation* articulation; ArticulationLink* links; Cm::SpatialVectorV* motionVelocity; Cm::SpatialVector* acceleration; PxTransform* poses; PxQuat* deltaQ; ArticulationCore* core; const PxArticulationFlags* flags; // PT: PX-1399 PxU8 linkCount; PxU8 numInternalConstraints; }; } } #endif
4,119
C
28.640288
78
0.738772
NVIDIA-Omniverse/PhysX/physx/source/lowleveldynamics/include/DyArticulationJointCore.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef DY_ARTICULATION_JOINT_CORE_H #define DY_ARTICULATION_JOINT_CORE_H #include "DyArticulationCore.h" #include "solver/PxSolverDefs.h" #include "PxArticulationJointReducedCoordinate.h" #include "CmSpatialVector.h" namespace physx { namespace Dy { class ArticulationJointCoreData; PX_ALIGN_PREFIX(16) struct ArticulationJointCore { public: // PX_SERIALIZATION ArticulationJointCore(const PxEMPTY&) : jointDirtyFlag(PxEmpty) { PX_COMPILE_TIME_ASSERT(sizeof(PxArticulationMotions) == sizeof(PxU8)); } //~PX_SERIALIZATION ArticulationJointCore(const PxTransform& parentFrame, const PxTransform& childFrame) { //PxMarkSerializedMemory(this, sizeof(ArticulationJointCore)); init(parentFrame, childFrame); } // PT: these ones don't update the dirty flags PX_FORCE_INLINE void initLimit(PxArticulationAxis::Enum axis, const PxArticulationLimit& limit) { limits[axis] = limit; } PX_FORCE_INLINE void initDrive(PxArticulationAxis::Enum axis, const PxArticulationDrive& drive) { drives[axis] = drive; } PX_FORCE_INLINE void initJointType(PxArticulationJointType::Enum type) { jointType = PxU8(type); } PX_FORCE_INLINE void initMaxJointVelocity(const PxReal maxJointV) { maxJointVelocity = maxJointV; } PX_FORCE_INLINE void initFrictionCoefficient(const PxReal coefficient) { frictionCoefficient = coefficient; } void init(const PxTransform& parentFrame, const PxTransform& childFrame) { PX_ASSERT(parentFrame.isValid()); PX_ASSERT(childFrame.isValid()); parentPose = parentFrame; childPose = childFrame; jointOffset = 0; // PT: TODO: don't we need ArticulationJointCoreDirtyFlag::eFRAME here? jointDirtyFlag = ArticulationJointCoreDirtyFlag::eMOTION; initFrictionCoefficient(0.05f); initMaxJointVelocity(100.0f); initJointType(PxArticulationJointType::eUNDEFINED); for(PxU32 i=0; i<PxArticulationAxis::eCOUNT; i++) { initLimit(PxArticulationAxis::Enum(i), PxArticulationLimit(0.0f, 0.0f)); initDrive(PxArticulationAxis::Enum(i), PxArticulationDrive(0.0f, 0.0f, 0.0f, PxArticulationDriveType::eNONE)); targetP[i] = 0.0f; targetV[i] = 0.0f; armature[i] = 0.0f; jointPos[i] = 0.0f; jointVel[i] = 0.0f; dofIds[i] = 0xff; invDofIds[i] = 0xff; motion[i] = PxArticulationMotion::eLOCKED; } } PX_CUDA_CALLABLE void setJointFrame(Cm::UnAlignedSpatialVector* motionMatrix, const Cm::UnAlignedSpatialVector* jointAxis, PxQuat& relativeQuat, const PxU32 dofs) { if (jointDirtyFlag & ArticulationJointCoreDirtyFlag::eFRAME) { relativeQuat = (childPose.q * (parentPose.q.getConjugate())).getNormalized(); computeMotionMatrix(motionMatrix, jointAxis, dofs); jointDirtyFlag &= ~ArticulationJointCoreDirtyFlag::eFRAME; } } PX_CUDA_CALLABLE PX_FORCE_INLINE void computeMotionMatrix(Cm::UnAlignedSpatialVector* motionMatrix, const Cm::UnAlignedSpatialVector* jointAxis, const PxU32 dofs) { const PxVec3 childOffset = -childPose.p; switch (jointType) { case PxArticulationJointType::ePRISMATIC: { const Cm::UnAlignedSpatialVector& jJointAxis = jointAxis[0]; const PxVec3 u = (childPose.rotate(jJointAxis.bottom)).getNormalized(); motionMatrix[0] = Cm::UnAlignedSpatialVector(PxVec3(0.f), u); PX_ASSERT(dofs == 1); break; } case PxArticulationJointType::eREVOLUTE: case PxArticulationJointType::eREVOLUTE_UNWRAPPED: { const Cm::UnAlignedSpatialVector& jJointAxis = jointAxis[0]; const PxVec3 u = (childPose.rotate(jJointAxis.top)).getNormalized(); const PxVec3 uXd = u.cross(childOffset); motionMatrix[0] = Cm::UnAlignedSpatialVector(u, uXd); break; } case PxArticulationJointType::eSPHERICAL: { for (PxU32 ind = 0; ind < dofs; ++ind) { const Cm::UnAlignedSpatialVector& jJointAxis = jointAxis[ind]; const PxVec3 u = (childPose.rotate(jJointAxis.top)).getNormalized(); const PxVec3 uXd = u.cross(childOffset); motionMatrix[ind] = Cm::UnAlignedSpatialVector(u, uXd); } break; } case PxArticulationJointType::eFIX: { PX_ASSERT(dofs == 0); break; } default: break; } } PX_CUDA_CALLABLE PX_FORCE_INLINE void operator=(ArticulationJointCore& other) { parentPose = other.parentPose; childPose = other.childPose; //KS - temp place to put reduced coordinate limit and drive values for(PxU32 i=0; i<PxArticulationAxis::eCOUNT; i++) { limits[i] = other.limits[i]; drives[i] = other.drives[i]; targetP[i] = other.targetP[i]; targetV[i] = other.targetV[i]; armature[i] = other.armature[i]; jointPos[i] = other.jointPos[i]; jointVel[i] = other.jointVel[i]; dofIds[i] = other.dofIds[i]; invDofIds[i] = other.invDofIds[i]; motion[i] = other.motion[i]; } frictionCoefficient = other.frictionCoefficient; maxJointVelocity = other.maxJointVelocity; jointOffset = other.jointOffset; jointDirtyFlag = other.jointDirtyFlag; jointType = other.jointType; } PX_FORCE_INLINE void setParentPose(const PxTransform& t) { parentPose = t; jointDirtyFlag |= ArticulationJointCoreDirtyFlag::eFRAME; } PX_FORCE_INLINE void setChildPose(const PxTransform& t) { childPose = t; jointDirtyFlag |= ArticulationJointCoreDirtyFlag::eFRAME; } PX_FORCE_INLINE void setMotion(PxArticulationAxis::Enum axis, PxArticulationMotion::Enum m) { motion[axis] = PxU8(m); jointDirtyFlag |= Dy::ArticulationJointCoreDirtyFlag::eMOTION; } PX_FORCE_INLINE void setTargetP(PxArticulationAxis::Enum axis, PxReal value) { targetP[axis] = value; jointDirtyFlag |= Dy::ArticulationJointCoreDirtyFlag::eTARGETPOSE; } PX_FORCE_INLINE void setTargetV(PxArticulationAxis::Enum axis, PxReal value) { targetV[axis] = value; jointDirtyFlag |= Dy::ArticulationJointCoreDirtyFlag::eTARGETVELOCITY; } PX_FORCE_INLINE void setArmature(PxArticulationAxis::Enum axis, PxReal value) { armature[axis] = value; jointDirtyFlag |= Dy::ArticulationJointCoreDirtyFlag::eARMATURE; } // attachment points, don't change the order, otherwise it will break GPU code PxTransform parentPose; //28 28 PxTransform childPose; //28 56 //KS - temp place to put reduced coordinate limit and drive values PxArticulationLimit limits[PxArticulationAxis::eCOUNT]; //48 104 PxArticulationDrive drives[PxArticulationAxis::eCOUNT]; //96 200 PxReal targetP[PxArticulationAxis::eCOUNT]; //24 224 PxReal targetV[PxArticulationAxis::eCOUNT]; //24 248 PxReal armature[PxArticulationAxis::eCOUNT]; //24 272 PxReal jointPos[PxArticulationAxis::eCOUNT]; //24 296 PxReal jointVel[PxArticulationAxis::eCOUNT]; //24 320 PxReal frictionCoefficient; //4 324 PxReal maxJointVelocity; //4 328 //this is the dof offset for the joint in the cache. PxU32 jointOffset; //4 332 PxU8 dofIds[PxArticulationAxis::eCOUNT]; //6 338 PxU8 motion[PxArticulationAxis::eCOUNT]; //6 344 PxU8 invDofIds[PxArticulationAxis::eCOUNT]; //6 350 ArticulationJointCoreDirtyFlags jointDirtyFlag; //1 351 PxU8 jointType; //1 352 }PX_ALIGN_SUFFIX(16); } } #endif
9,319
C
38.491525
187
0.693744
NVIDIA-Omniverse/PhysX/physx/source/lowleveldynamics/include/DyFeatherstoneArticulationUtils.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef DY_FEATHERSTONE_ARTICULATION_UTIL_H #define DY_FEATHERSTONE_ARTICULATION_UTIL_H #include "foundation/PxVecMath.h" #include "CmSpatialVector.h" #include "foundation/PxBitUtils.h" #include "foundation/PxMemory.h" namespace physx { namespace Dy { static const size_t DY_MAX_DOF = 6; struct SpatialSubspaceMatrix { static const PxU32 MaxColumns = 3; public: #ifndef __CUDACC__ PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialSubspaceMatrix() :numColumns(0) { //PxMemZero(columns, sizeof(Cm::SpatialVectorF) * 6); PxMemSet(columns, 0, sizeof(Cm::UnAlignedSpatialVector) * MaxColumns); } #endif PX_CUDA_CALLABLE PX_FORCE_INLINE void setNumColumns(const PxU32 nc) { numColumns = nc; } PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 getNumColumns() const { return numColumns; } PX_CUDA_CALLABLE PX_FORCE_INLINE Cm::SpatialVectorF transposeMultiply(Cm::SpatialVectorF& v) const { PxReal result[6]; for (PxU32 i = 0; i < numColumns; ++i) { const Cm::UnAlignedSpatialVector& row = columns[i]; result[i] = row.dot(v); } Cm::SpatialVectorF res; res.top.x = result[0]; res.top.y = result[1]; res.top.z = result[2]; res.bottom.x = result[3]; res.bottom.y = result[4]; res.bottom.z = result[5]; return res; } PX_CUDA_CALLABLE PX_FORCE_INLINE void setColumn(const PxU32 index, const PxVec3& top, const PxVec3& bottom) { PX_ASSERT(index < MaxColumns); columns[index] = Cm::SpatialVectorF(top, bottom); } PX_CUDA_CALLABLE PX_FORCE_INLINE Cm::UnAlignedSpatialVector& operator[](unsigned int num) { PX_ASSERT(num < MaxColumns); return columns[num]; } PX_CUDA_CALLABLE PX_FORCE_INLINE const Cm::UnAlignedSpatialVector& operator[](unsigned int num) const { PX_ASSERT(num < MaxColumns); return columns[num]; } PX_CUDA_CALLABLE PX_FORCE_INLINE const Cm::UnAlignedSpatialVector* getColumns() const { return columns; } //private: Cm::UnAlignedSpatialVector columns[MaxColumns]; //3x24 = 72 PxU32 numColumns; //76 PxU32 padding; //80 }; //this should be 6x6 matrix //|R, 0| //|-R*rX, R| struct SpatialTransform { PxMat33 R; PxQuat q; PxMat33 T; public: PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialTransform() : R(PxZero), T(PxZero) { } PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialTransform(const PxMat33& R_, const PxMat33& T_) : R(R_), T(T_) { q = PxQuat(R_); } PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialTransform(const PxQuat& q_, const PxMat33& T_) : q(q_), T(T_) { R = PxMat33(q_); } //This assume angular is the top vector and linear is the bottom vector /*PX_CUDA_CALLABLE PX_FORCE_INLINE Cm::SpatialVector operator *(const Cm::SpatialVector& s) const { const PxVec3 angular = R * s.angular; const PxVec3 linear = T * s.angular + R * s.linear; return Cm::SpatialVector(linear, angular); }*/ ////This assume angular is the top vector and linear is the bottom vector //PX_FORCE_INLINE Cm::SpatialVectorF operator *(Cm::SpatialVectorF& s) const //{ // const PxVec3 top = R * s.top; // const PxVec3 bottom = T * s.top + R * s.bottom; // const PxVec3 top1 = q.rotate(s.top); // const PxVec3 bottom1 = T * s.top + q.rotate(s.bottom); ///* const PxVec3 tDif = (top - top1).abs(); // const PxVec3 bDif = (bottom - bottom1).abs(); // const PxReal eps = 0.001f; // PX_ASSERT(tDif.x < eps && tDif.y < eps && tDif.z < eps); // PX_ASSERT(bDif.x < eps && bDif.y < eps && bDif.z < eps);*/ // return Cm::SpatialVectorF(top1, bottom1); //} //This assume angular is the top vector and linear is the bottom vector PX_CUDA_CALLABLE PX_FORCE_INLINE Cm::SpatialVectorF operator *(const Cm::SpatialVectorF& s) const { //const PxVec3 top = R * s.top; //const PxVec3 bottom = T * s.top + R * s.bottom; const PxVec3 top1 = q.rotate(s.top); const PxVec3 bottom1 = T * s.top + q.rotate(s.bottom); return Cm::SpatialVectorF(top1, bottom1); } PX_CUDA_CALLABLE PX_FORCE_INLINE Cm::UnAlignedSpatialVector operator *(const Cm::UnAlignedSpatialVector& s) const { //const PxVec3 top = R * s.top; //const PxVec3 bottom = T * s.top + R * s.bottom; const PxVec3 top1 = q.rotate(s.top); const PxVec3 bottom1 = T * s.top + q.rotate(s.bottom); return Cm::UnAlignedSpatialVector(top1, bottom1); } //transpose is the same as inverse, R(inverse) = R(transpose) //|R(t), 0 | //|rXR(t), R(t)| PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialTransform getTranspose() const { SpatialTransform ret; ret.q = q.getConjugate(); ret.R = R.getTranspose(); ret.T = T.getTranspose(); return ret; } PX_CUDA_CALLABLE PX_FORCE_INLINE Cm::SpatialVectorF transposeTransform(const Cm::SpatialVectorF& s) const { const PxVec3 top1 = q.rotateInv(s.top); const PxVec3 bottom1 = T.transformTranspose(s.top) + q.rotateInv(s.bottom); return Cm::SpatialVectorF(top1, bottom1); } PX_CUDA_CALLABLE PX_FORCE_INLINE Cm::UnAlignedSpatialVector transposeTransform(const Cm::UnAlignedSpatialVector& s) const { const PxVec3 top1 = q.rotateInv(s.top); const PxVec3 bottom1 = T.transformTranspose(s.top) + q.rotateInv(s.bottom); return Cm::UnAlignedSpatialVector(top1, bottom1); } PX_CUDA_CALLABLE PX_FORCE_INLINE void operator =(SpatialTransform& other) { R = other.R; q = other.q; T = other.T; } }; struct InvStIs { PxReal invStIs[3][3]; }; //this should be 6x6 matrix and initialize to //|0, M| //|I, 0| //this should be 6x6 matrix but bottomRight is the transpose of topLeft //so we can get rid of bottomRight struct SpatialMatrix { PxMat33 topLeft; // intialize to 0 PxMat33 topRight; // initialize to mass matrix PxMat33 bottomLeft; // initialize to inertia PxU32 padding; //4 112 public: PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialMatrix() { } PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialMatrix(PxZERO r) : topLeft(PxZero), topRight(PxZero), bottomLeft(PxZero) { PX_UNUSED(r); } PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialMatrix(const PxMat33& topLeft_, const PxMat33& topRight_, const PxMat33& bottomLeft_) { topLeft = topLeft_; topRight = topRight_; bottomLeft = bottomLeft_; } PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat33 getBottomRight() const { return topLeft.getTranspose(); } PX_FORCE_INLINE PX_CUDA_CALLABLE void setZero() { topLeft = PxMat33(0.f); topRight = PxMat33(0.f); bottomLeft = PxMat33(0.f); } //This assume angular is the top vector and linear is the bottom vector PX_CUDA_CALLABLE PX_FORCE_INLINE Cm::SpatialVector operator *(const Cm::SpatialVector& s) const { const PxVec3 angular = topLeft * s.angular + topRight * s.linear; const PxVec3 linear = bottomLeft * s.angular + topLeft.transformTranspose(s.linear); return Cm::SpatialVector(linear, angular); } //This assume angular is the top vector and linear is the bottom vector PX_CUDA_CALLABLE PX_FORCE_INLINE Cm::SpatialVectorF operator *(const Cm::SpatialVectorF& s) const { const PxVec3 top = topLeft * s.top + topRight * s.bottom; const PxVec3 bottom = bottomLeft * s.top + topLeft.transformTranspose(s.bottom); return Cm::SpatialVectorF(top, bottom); } PX_CUDA_CALLABLE PX_FORCE_INLINE Cm::UnAlignedSpatialVector operator *(const Cm::UnAlignedSpatialVector& s) const { const PxVec3 top = topLeft * s.top + topRight * s.bottom; const PxVec3 bottom = bottomLeft * s.top + topLeft.transformTranspose(s.bottom); return Cm::UnAlignedSpatialVector(top, bottom); } PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialMatrix operator *(const PxReal& s) const { const PxMat33 newTopLeft = topLeft * s; const PxMat33 newTopRight = topRight * s; const PxMat33 newBottomLeft = bottomLeft * s; return SpatialMatrix(newTopLeft, newTopRight, newBottomLeft); } PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialMatrix operator -(const SpatialMatrix& s) const { const PxMat33 newTopLeft = topLeft - s.topLeft; const PxMat33 newTopRight = topRight - s.topRight; const PxMat33 newBottomLeft = bottomLeft - s.bottomLeft; return SpatialMatrix(newTopLeft, newTopRight, newBottomLeft); } PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialMatrix operator +(const SpatialMatrix& s) const { const PxMat33 newTopLeft = topLeft + s.topLeft; const PxMat33 newTopRight = topRight + s.topRight; const PxMat33 newBottomLeft = bottomLeft + s.bottomLeft; return SpatialMatrix(newTopLeft, newTopRight, newBottomLeft); } PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialMatrix operator-() { const PxMat33 newTopLeft = -topLeft; const PxMat33 newTopRight = -topRight; const PxMat33 newBottomLeft = -bottomLeft; return SpatialMatrix(newTopLeft, newTopRight, newBottomLeft); } PX_CUDA_CALLABLE PX_FORCE_INLINE void operator +=(const SpatialMatrix& s) { topLeft += s.topLeft; topRight += s.topRight; bottomLeft += s.bottomLeft; } PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialMatrix operator *(const SpatialMatrix& s) { const PxMat33 sBottomRight = s.topLeft.getTranspose(); const PxMat33 bottomRight = topLeft.getTranspose(); const PxMat33 newTopLeft = topLeft * s.topLeft + topRight * s.bottomLeft; const PxMat33 newTopRight = topLeft * s.topRight + topRight * sBottomRight; const PxMat33 newBottomLeft = bottomLeft * s.topLeft + bottomRight * s.bottomLeft; return SpatialMatrix(newTopLeft, newTopRight, newBottomLeft); } static SpatialMatrix constructSpatialMatrix(const Cm::SpatialVector& Is, const Cm::SpatialVector& stI) { //construct top left const PxVec3 tLeftC0 = Is.angular * stI.angular.x; const PxVec3 tLeftC1 = Is.angular * stI.angular.y; const PxVec3 tLeftC2 = Is.angular * stI.angular.z; const PxMat33 topLeft(tLeftC0, tLeftC1, tLeftC2); //construct top right const PxVec3 tRightC0 = Is.angular * stI.linear.x; const PxVec3 tRightC1 = Is.angular * stI.linear.y; const PxVec3 tRightC2 = Is.angular * stI.linear.z; const PxMat33 topRight(tRightC0, tRightC1, tRightC2); //construct bottom left const PxVec3 bLeftC0 = Is.linear * stI.angular.x; const PxVec3 bLeftC1 = Is.linear * stI.angular.y; const PxVec3 bLeftC2 = Is.linear * stI.angular.z; const PxMat33 bottomLeft(bLeftC0, bLeftC1, bLeftC2); return SpatialMatrix(topLeft, topRight, bottomLeft); } static PX_CUDA_CALLABLE SpatialMatrix constructSpatialMatrix(const Cm::SpatialVectorF& Is, const Cm::SpatialVectorF& stI) { //construct top left const PxVec3 tLeftC0 = Is.top * stI.top.x; const PxVec3 tLeftC1 = Is.top * stI.top.y; const PxVec3 tLeftC2 = Is.top * stI.top.z; const PxMat33 topLeft(tLeftC0, tLeftC1, tLeftC2); //construct top right const PxVec3 tRightC0 = Is.top * stI.bottom.x; const PxVec3 tRightC1 = Is.top * stI.bottom.y; const PxVec3 tRightC2 = Is.top * stI.bottom.z; const PxMat33 topRight(tRightC0, tRightC1, tRightC2); //construct bottom left const PxVec3 bLeftC0 = Is.bottom * stI.top.x; const PxVec3 bLeftC1 = Is.bottom * stI.top.y; const PxVec3 bLeftC2 = Is.bottom * stI.top.z; const PxMat33 bottomLeft(bLeftC0, bLeftC1, bLeftC2); return SpatialMatrix(topLeft, topRight, bottomLeft); } template <typename SpatialVector> static PX_CUDA_CALLABLE SpatialMatrix constructSpatialMatrix(const SpatialVector* columns) { const PxMat33 topLeft(columns[0].top, columns[1].top, columns[2].top); const PxMat33 bottomLeft(columns[0].bottom, columns[1].bottom, columns[2].bottom); const PxMat33 topRight(columns[3].top, columns[4].top, columns[5].top); return SpatialMatrix(topLeft, topRight, bottomLeft); } PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialMatrix getTranspose() { const PxMat33 newTopLeft = topLeft.getTranspose(); const PxMat33 newTopRight = bottomLeft.getTranspose(); const PxMat33 newBottomLeft = topRight.getTranspose(); //const PxMat33 newBottomRight = bottomRight.getTranspose(); return SpatialMatrix(newTopLeft, newTopRight, newBottomLeft);// , newBottomRight); } //static bool isTranspose(const PxMat33& a, const PxMat33& b) //{ // PxReal eps = 0.01f; // //test bottomRight is the transpose of topLeft // for (PxU32 i = 0; i <3; ++i) // { // for (PxU32 j = 0; j <3; ++j) // { // if (PxAbs(a[i][j] - b[j][i]) > eps) // return false; // } // } // return true; //} PX_FORCE_INLINE bool isIdentity(const PxMat33& matrix) { const PxReal eps = 0.00001f; const float x = PxAbs(1.f - matrix.column0.x); const float y = PxAbs(1.f - matrix.column1.y); const float z = PxAbs(1.f - matrix.column2.z); const bool identity = ((x < eps) && PxAbs(matrix.column0.y - 0.f) < eps && PxAbs(matrix.column0.z - 0.f) < eps) && (PxAbs(matrix.column1.x - 0.f) < eps && (y < eps) && PxAbs(matrix.column1.z - 0.f) < eps) && (PxAbs(matrix.column2.x - 0.f) < eps && PxAbs(matrix.column2.y - 0.f) < eps && (z < eps)); return identity; } PX_FORCE_INLINE bool isZero(const PxMat33& matrix) { const PxReal eps = 0.0001f; for (PxU32 i = 0; i < 3; ++i) { for (PxU32 j = 0; j < 3; ++j) { if (PxAbs(matrix[i][j]) > eps) return false; } } return true; } PX_FORCE_INLINE bool isIdentity() { const bool topLeftIsIdentity = isIdentity(topLeft); const bool topRightIsZero = isZero(topRight); const bool bottomLeftIsZero = isZero(bottomLeft); return topLeftIsIdentity && topRightIsZero && bottomLeftIsZero; } static bool isEqual(const PxMat33& s0, const PxMat33& s1) { const PxReal eps = 0.00001f; for (PxU32 i = 0; i < 3; ++i) { for (PxU32 j = 0; j < 3; ++j) { const PxReal t = s0[i][j] - s1[i][j]; if (PxAbs(t) > eps) return false; } } return true; } PX_FORCE_INLINE bool isEqual(const SpatialMatrix& s) { const bool topLeftEqual = isEqual(topLeft, s.topLeft); const bool topRightEqual = isEqual(topRight, s.topRight); const bool bottomLeftEqual = isEqual(bottomLeft, s.bottomLeft); return topLeftEqual && topRightEqual && bottomLeftEqual; } static PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat33 invertSym33(const PxMat33& in) { const PxVec3 v0 = in[1].cross(in[2]); const PxVec3 v1 = in[2].cross(in[0]); const PxVec3 v2 = in[0].cross(in[1]); const PxReal det = v0.dot(in[0]); if (det != 0) { const PxReal recipDet = 1.0f / det; return PxMat33(v0 * recipDet, PxVec3(v0.y, v1.y, v1.z) * recipDet, PxVec3(v0.z, v1.z, v2.z) * recipDet); } else { return PxMat33(PxIdentity); } } static PX_FORCE_INLINE aos::Mat33V invertSym33(const aos::Mat33V& in) { using namespace aos; const Vec3V v0 = V3Cross(in.col1, in.col2); const Vec3V v1 = V3Cross(in.col2, in.col0); const Vec3V v2 = V3Cross(in.col0, in.col1); const FloatV det = V3Dot(v0, in.col0); const FloatV recipDet = FRecip(det); if (!FAllEq(det, FZero())) { return Mat33V(V3Scale(v0, recipDet), V3Scale(V3Merge(V3GetY(v0), V3GetY(v1), V3GetZ(v1)), recipDet), V3Scale(V3Merge(V3GetZ(v0), V3GetZ(v1), V3GetZ(v2)), recipDet)); } else { return Mat33V(V3UnitX(), V3UnitY(), V3UnitZ()); } //return M33Inverse(in); } PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialMatrix invertInertia() { PxMat33 aa = bottomLeft, ll = topRight, la = topLeft; aa = (aa + aa.getTranspose())*0.5f; ll = (ll + ll.getTranspose())*0.5f; const PxMat33 AAInv = invertSym33(aa); const PxMat33 z = -la * AAInv; const PxMat33 S = ll + z * la.getTranspose(); // Schur complement of mAA const PxMat33 LL = invertSym33(S); const PxMat33 LA = LL * z; const PxMat33 AA = AAInv + z.getTranspose() * LA; const SpatialMatrix result(LA.getTranspose(), AA, LL);// , LA); return result; } PX_FORCE_INLINE void M33Store(const aos::Mat33V& src, PxMat33& dest) { aos::V3StoreU(src.col0, dest.column0); aos::V3StoreU(src.col1, dest.column1); aos::V3StoreU(src.col2, dest.column2); } PX_FORCE_INLINE void invertInertiaV(SpatialMatrix& result) { using namespace aos; Mat33V aa = M33Load(bottomLeft), ll = M33Load(topRight), la = M33Load(topLeft); aa = M33Scale(M33Add(aa, M33Trnsps(aa)), FHalf()); ll = M33Scale(M33Add(ll, M33Trnsps(ll)), FHalf()); const Mat33V AAInv = invertSym33(aa); const Mat33V z = M33MulM33(M33Neg(la), AAInv); const Mat33V S = M33Add(ll, M33MulM33(z, M33Trnsps(la))); // Schur complement of mAA const Mat33V LL = invertSym33(S); const Mat33V LA = M33MulM33(LL, z); const Mat33V AA = M33Add(AAInv, M33MulM33(M33Trnsps(z), LA)); M33Store(M33Trnsps(LA), result.topLeft); M33Store(AA, result.topRight); M33Store(LL, result.bottomLeft); } SpatialMatrix getInverse() { const PxMat33 bottomRight = topLeft.getTranspose(); const PxMat33 blInverse = bottomLeft.getInverse(); const PxMat33 lComp0 = blInverse * (-bottomRight); const PxMat33 lComp1 = topLeft * lComp0 + topRight; //This can be simplified const PxMat33 newBottomLeft = lComp1.getInverse(); const PxMat33 newTopLeft = lComp0 * newBottomLeft; const PxMat33 trInverse = topRight.getInverse(); const PxMat33 rComp0 = trInverse * (-topLeft); const PxMat33 rComp1 = bottomLeft + bottomRight * rComp0; const PxMat33 newTopRight = rComp1.getInverse(); return SpatialMatrix(newTopLeft, newTopRight, newBottomLeft); } void zero() { topLeft = PxMat33(PxZero); topRight = PxMat33(PxZero); bottomLeft = PxMat33(PxZero); } }; struct SpatialImpulseResponseMatrix { Cm::SpatialVectorF rows[6]; Cm::SpatialVectorF getResponse(const Cm::SpatialVectorF& impulse) const { /*return rows[0] * impulse.top.x + rows[1] * impulse.top.y + rows[2] * impulse.top.z + rows[3] * impulse.bottom.x + rows[4] * impulse.bottom.y + rows[5] * impulse.bottom.z;*/ using namespace aos; const Cm::SpatialVectorV row0(V3LoadA(&rows[0].top.x), V3LoadA(&rows[0].bottom.x)); const Cm::SpatialVectorV row1(V3LoadA(&rows[1].top.x), V3LoadA(&rows[1].bottom.x)); const Cm::SpatialVectorV row2(V3LoadA(&rows[2].top.x), V3LoadA(&rows[2].bottom.x)); const Cm::SpatialVectorV row3(V3LoadA(&rows[3].top.x), V3LoadA(&rows[3].bottom.x)); const Cm::SpatialVectorV row4(V3LoadA(&rows[4].top.x), V3LoadA(&rows[4].bottom.x)); const Cm::SpatialVectorV row5(V3LoadA(&rows[5].top.x), V3LoadA(&rows[5].bottom.x)); const Vec4V top = V4LoadA(&impulse.top.x); const Vec4V bottom = V4LoadA(&impulse.bottom.x); const FloatV ix = V4GetX(top); const FloatV iy = V4GetY(top); const FloatV iz = V4GetZ(top); const FloatV ia = V4GetX(bottom); const FloatV ib = V4GetY(bottom); const FloatV ic = V4GetZ(bottom); Cm::SpatialVectorV res = row0 * ix + row1 * iy + row2 * iz + row3 * ia + row4 * ib + row5 * ic; Cm::SpatialVectorF returnVal; V4StoreA(Vec4V_From_Vec3V(res.linear), &returnVal.top.x); V4StoreA(Vec4V_From_Vec3V(res.angular), &returnVal.bottom.x); return returnVal; } Cm::SpatialVectorV getResponse(const Cm::SpatialVectorV& impulse) const { using namespace aos; const Cm::SpatialVectorV row0(V3LoadA(&rows[0].top.x), V3LoadA(&rows[0].bottom.x)); const Cm::SpatialVectorV row1(V3LoadA(&rows[1].top.x), V3LoadA(&rows[1].bottom.x)); const Cm::SpatialVectorV row2(V3LoadA(&rows[2].top.x), V3LoadA(&rows[2].bottom.x)); const Cm::SpatialVectorV row3(V3LoadA(&rows[3].top.x), V3LoadA(&rows[3].bottom.x)); const Cm::SpatialVectorV row4(V3LoadA(&rows[4].top.x), V3LoadA(&rows[4].bottom.x)); const Cm::SpatialVectorV row5(V3LoadA(&rows[5].top.x), V3LoadA(&rows[5].bottom.x)); const Vec3V top = impulse.linear; const Vec3V bottom = impulse.angular; const FloatV ix = V3GetX(top); const FloatV iy = V3GetY(top); const FloatV iz = V3GetZ(top); const FloatV ia = V3GetX(bottom); const FloatV ib = V3GetY(bottom); const FloatV ic = V3GetZ(bottom); Cm::SpatialVectorV res = row0 * ix + row1 * iy + row2 * iz + row3 * ia + row4 * ib + row5 * ic; return res; } }; struct Temp6x6Matrix; struct Temp6x3Matrix { PxReal column[3][6]; public: Temp6x3Matrix() { } Temp6x3Matrix(const Cm::SpatialVectorF* spatialAxis) { constructColumn(column[0], spatialAxis[0]); constructColumn(column[1], spatialAxis[1]); constructColumn(column[2], spatialAxis[2]); } void constructColumn(PxReal* dest, const Cm::SpatialVectorF& v) { dest[0] = v.top.x; dest[1] = v.top.y; dest[2] = v.top.z; dest[3] = v.bottom.x; dest[4] = v.bottom.y; dest[5] = v.bottom.z; } Temp6x6Matrix operator * (PxReal s[6][3]); ////s is 3x6 matrix //PX_FORCE_INLINE Temp6x6Matrix operator * (PxReal s[6][3]) //{ // Temp6x6Matrix temp; // for (PxU32 i = 0; i < 6; ++i) // { // PxReal* tc = temp.column[i]; // for (PxU32 j = 0; j < 6; ++j) // { // tc[j] = 0.f; // for (PxU32 k = 0; k < 3; ++k) // { // tc[j] += column[k][j] * s[i][k]; // } // } // } // return temp; //} PX_FORCE_INLINE Temp6x3Matrix operator * (const PxMat33& s) { Temp6x3Matrix temp; for (PxU32 i = 0; i < 3; ++i) { PxReal* tc = temp.column[i]; const PxVec3 sc = s[i]; for (PxU32 j = 0; j < 6; ++j) { tc[j] = 0.f; for (PxU32 k = 0; k < 3; ++k) { tc[j] += column[k][j] * sc[k]; } } } return temp; } PX_FORCE_INLINE bool isColumnEqual(const PxU32 ind, const Cm::SpatialVectorF& col) { PxReal temp[6]; constructColumn(temp, col); const PxReal eps = 0.00001f; for (PxU32 i = 0; i < 6; ++i) { const PxReal dif = column[ind][i] - temp[i]; if (PxAbs(dif) > eps) return false; } return true; } }; struct Temp6x6Matrix { PxReal column[6][6]; public: Temp6x6Matrix() { } Temp6x6Matrix(const SpatialMatrix& spatialMatrix) { constructColumn(column[0], spatialMatrix.topLeft.column0, spatialMatrix.bottomLeft.column0); constructColumn(column[1], spatialMatrix.topLeft.column1, spatialMatrix.bottomLeft.column1); constructColumn(column[2], spatialMatrix.topLeft.column2, spatialMatrix.bottomLeft.column2); const PxMat33 bottomRight = spatialMatrix.getBottomRight(); constructColumn(column[3], spatialMatrix.topRight.column0, bottomRight.column0); constructColumn(column[4], spatialMatrix.topRight.column1, bottomRight.column1); constructColumn(column[5], spatialMatrix.topRight.column2, bottomRight.column2); } void constructColumn(const PxU32 ind, const PxReal* const values) { for (PxU32 i = 0; i < 6; ++i) { column[ind][i] = values[i]; } } void constructColumn(PxReal* dest, const PxVec3& top, const PxVec3& bottom) { dest[0] = top.x; dest[1] = top.y; dest[2] = top.z; dest[3] = bottom.x; dest[4] = bottom.y; dest[5] = bottom.z; } Temp6x6Matrix getTranspose() const { Temp6x6Matrix temp; for (PxU32 i = 0; i < 6; ++i) { for (PxU32 j = 0; j < 6; ++j) { temp.column[i][j] = column[j][i]; } } return temp; } PX_FORCE_INLINE Cm::SpatialVector operator * (const Cm::SpatialVector& s) const { Temp6x6Matrix tempMatrix = getTranspose(); PxReal st[6]; st[0] = s.angular.x; st[1] = s.angular.y; st[2] = s.angular.z; st[3] = s.linear.x; st[4] = s.linear.y; st[5] = s.linear.z; PxReal result[6]; for (PxU32 i = 0; i < 6; i++) { result[i] = 0; for (PxU32 j = 0; j < 6; ++j) { result[i] += tempMatrix.column[i][j] * st[j]; } } Cm::SpatialVector temp; temp.angular.x = result[0]; temp.angular.y = result[1]; temp.angular.z = result[2]; temp.linear.x = result[3]; temp.linear.y = result[4]; temp.linear.z = result[5]; return temp; } PX_FORCE_INLINE Cm::SpatialVectorF operator * (const Cm::SpatialVectorF& s) const { PxReal st[6]; st[0] = s.top.x; st[1] = s.top.y; st[2] = s.top.z; st[3] = s.bottom.x; st[4] = s.bottom.y; st[5] = s.bottom.z; PxReal result[6]; for (PxU32 i = 0; i < 6; ++i) { result[i] = 0.f; for (PxU32 j = 0; j < 6; ++j) { result[i] += column[j][i] * st[j]; } } Cm::SpatialVectorF temp; temp.top.x = result[0]; temp.top.y = result[1]; temp.top.z = result[2]; temp.bottom.x = result[3]; temp.bottom.y = result[4]; temp.bottom.z = result[5]; return temp; } PX_FORCE_INLINE Temp6x3Matrix operator * (const Temp6x3Matrix& s) const { Temp6x3Matrix temp; for (PxU32 i = 0; i < 3; ++i) { PxReal* result = temp.column[i]; const PxReal* input = s.column[i]; for (PxU32 j = 0; j < 6; ++j) { result[j] = 0.f; for (PxU32 k = 0; k < 6; ++k) { result[j] += column[k][j] * input[k]; } } } return temp; } PX_FORCE_INLINE Cm::SpatialVector spatialVectorMul(const Cm::SpatialVector& s) { PxReal st[6]; st[0] = s.angular.x; st[1] = s.angular.y; st[2] = s.angular.z; st[3] = s.linear.x; st[4] = s.linear.y; st[5] = s.linear.z; PxReal result[6]; for (PxU32 i = 0; i < 6; ++i) { result[i] = 0.f; for (PxU32 j = 0; j < 6; j++) { result[i] += column[i][j] * st[j]; } } Cm::SpatialVector temp; temp.angular.x = result[0]; temp.angular.y = result[1]; temp.angular.z = result[2]; temp.linear.x = result[3]; temp.linear.y = result[4]; temp.linear.z = result[5]; return temp; } PX_FORCE_INLINE bool isEqual(const Cm::SpatialVectorF* m) { PxReal temp[6]; const PxReal eps = 0.00001f; for (PxU32 i = 0; i < 6; ++i) { temp[0] = m[i].top.x; temp[1] = m[i].top.y; temp[2] = m[i].top.z; temp[3] = m[i].bottom.x; temp[4] = m[i].bottom.y; temp[5] = m[i].bottom.z; for (PxU32 j = 0; j < 6; ++j) { const PxReal dif = column[i][j] - temp[j]; if (PxAbs(dif) > eps) return false; } } return true; } }; //s is 3x6 matrix PX_FORCE_INLINE Temp6x6Matrix Temp6x3Matrix::operator * (PxReal s[6][3]) { Temp6x6Matrix temp; for (PxU32 i = 0; i < 6; ++i) { PxReal* tc = temp.column[i]; for (PxU32 j = 0; j < 6; ++j) { tc[j] = 0.f; for (PxU32 k = 0; k < 3; ++k) { tc[j] += column[k][j] * s[i][k]; } } } return temp; } PX_FORCE_INLINE void calculateNewVelocity(const PxTransform& newTransform, const PxTransform& oldTransform, const PxReal dt, PxVec3& linear, PxVec3& angular) { //calculate the new velocity linear = (newTransform.p - oldTransform.p) / dt; PxQuat quat = newTransform.q * oldTransform.q.getConjugate(); if (quat.w < 0) //shortest angle. quat = -quat; PxReal angle; PxVec3 axis; quat.toRadiansAndUnitAxis(angle, axis); angular = (axis * angle) / dt; } // generates a pair of quaternions (swing, twist) such that in = swing * twist, with // swing.x = 0 // twist.y = twist.z = 0, and twist is a unit quat PX_CUDA_CALLABLE PX_FORCE_INLINE void separateSwingTwist(const PxQuat& q, PxQuat& twist, PxQuat& swing1, PxQuat& swing2) { twist = q.x != 0.0f ? PxQuat(q.x, 0, 0, q.w).getNormalized() : PxQuat(PxIdentity); PxQuat swing = q * twist.getConjugate(); swing1 = swing.y != 0.f ? PxQuat(0.f, swing.y, 0.f, swing.w).getNormalized() : PxQuat(PxIdentity); swing = swing * swing1.getConjugate(); swing2 = swing.z != 0.f ? PxQuat(0.f, 0.f, swing.z, swing.w).getNormalized() : PxQuat(PxIdentity); } PX_CUDA_CALLABLE PX_FORCE_INLINE void separateSwingTwist2(const PxQuat& q, PxQuat& twist, PxQuat& swing1, PxQuat& swing2) { swing2 = q.z != 0.0f ? PxQuat(0.f, 0.f, q.z, q.w).getNormalized() : PxQuat(PxIdentity); PxQuat swing = q * swing2.getConjugate(); swing1 = swing.y != 0.f ? PxQuat(0.f, swing.y, 0.f, swing.w).getNormalized() : PxQuat(PxIdentity); swing = swing * swing1.getConjugate(); twist = swing.x != 0.f ? PxQuat(swing.x, 0.f, 0.f, swing.w).getNormalized() : PxQuat(PxIdentity); } } //namespace Dy } #endif
29,576
C
27.997059
127
0.658811
NVIDIA-Omniverse/PhysX/physx/source/lowleveldynamics/include/DySleepingConfigulation.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef DY_SLEEPING_CONFIGURATION_H #define DY_SLEEPING_CONFIGURATION_H #define PXD_FREEZE_INTERVAL 1.5f #define PXD_FREE_EXIT_THRESHOLD 4.f #define PXD_FREEZE_TOLERANCE 0.25f #define PXD_SLEEP_DAMPING 0.5f #define PXD_ACCEL_LOSS 0.9f #define PXD_FREEZE_SCALE 0.1f #endif
1,974
C
47.170731
74
0.767984
NVIDIA-Omniverse/PhysX/physx/source/lowlevelaabb/src/BpBroadPhaseShared.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef BP_BROADPHASE_SHARED_H #define BP_BROADPHASE_SHARED_H #include "BpBroadPhaseIntegerAABB.h" #include "foundation/PxUserAllocated.h" #include "foundation/PxHash.h" #include "foundation/PxVecMath.h" namespace physx { namespace Bp { #define INVALID_ID 0xffffffff #define INVALID_USER_ID 0xffffffff struct InternalPair : public PxUserAllocated { PX_FORCE_INLINE PxU32 getId0() const { return id0_isNew & ~PX_SIGN_BITMASK; } PX_FORCE_INLINE PxU32 getId1() const { return id1_isUpdated & ~PX_SIGN_BITMASK; } PX_FORCE_INLINE PxU32 isNew() const { return id0_isNew & PX_SIGN_BITMASK; } PX_FORCE_INLINE PxU32 isUpdated() const { return id1_isUpdated & PX_SIGN_BITMASK; } PX_FORCE_INLINE void setNewPair(PxU32 id0, PxU32 id1) { PX_ASSERT(!(id0 & PX_SIGN_BITMASK)); PX_ASSERT(!(id1 & PX_SIGN_BITMASK)); id0_isNew = id0 | PX_SIGN_BITMASK; id1_isUpdated = id1; } PX_FORCE_INLINE void setNewPair2(PxU32 id0, PxU32 id1) { PX_ASSERT(!(id0 & PX_SIGN_BITMASK)); PX_ASSERT(!(id1 & PX_SIGN_BITMASK)); id0_isNew = id0; id1_isUpdated = id1; } PX_FORCE_INLINE void setUpdated() { id1_isUpdated |= PX_SIGN_BITMASK; } PX_FORCE_INLINE void clearUpdated() { id1_isUpdated &= ~PX_SIGN_BITMASK; } PX_FORCE_INLINE void clearNew() { id0_isNew &= ~PX_SIGN_BITMASK; } protected: PxU32 id0_isNew; PxU32 id1_isUpdated; }; PX_FORCE_INLINE bool differentPair(const InternalPair& p, PxU32 id0, PxU32 id1) { return (id0!=p.getId0()) || (id1!=p.getId1()); } PX_FORCE_INLINE PxU32 hash(PxU32 id0, PxU32 id1) { return PxComputeHash( (id0&0xffff)|(id1<<16)); } //PX_FORCE_INLINE PxU32 hash(PxU32 id0, PxU32 id1) { return PxComputeHash(PxU64(id0)|(PxU64(id1)<<32)) ; } PX_FORCE_INLINE void sort(PxU32& id0, PxU32& id1) { if(id0>id1) PxSwap(id0, id1); } class PairManagerData { public: PairManagerData(); ~PairManagerData(); PX_FORCE_INLINE PxU32 getPairIndex(const InternalPair* pair) const { return (PxU32((size_t(pair) - size_t(mActivePairs)))/sizeof(InternalPair)); } // Internal version saving hash computation PX_FORCE_INLINE InternalPair* findPair(PxU32 id0, PxU32 id1, PxU32 hashValue) const { if(!mHashTable) return NULL; // Nothing has been allocated yet InternalPair* PX_RESTRICT activePairs = mActivePairs; const PxU32* PX_RESTRICT next = mNext; // Look for it in the table PxU32 offset = mHashTable[hashValue]; while(offset!=INVALID_ID && differentPair(activePairs[offset], id0, id1)) { PX_ASSERT(activePairs[offset].getId0()!=INVALID_USER_ID); offset = next[offset]; // Better to have a separate array for this } if(offset==INVALID_ID) return NULL; PX_ASSERT(offset<mNbActivePairs); // Match mActivePairs[offset] => the pair is persistent return &activePairs[offset]; } PX_FORCE_INLINE InternalPair* addPairInternal(PxU32 id0, PxU32 id1) { // Order the ids sort(id0, id1); const PxU32 fullHashValue = hash(id0, id1); PxU32 hashValue = fullHashValue & mMask; { InternalPair* PX_RESTRICT p = findPair(id0, id1, hashValue); if(p) { p->setUpdated(); return p; // Persistent pair } } // This is a new pair if(mNbActivePairs >= mHashSize) hashValue = growPairs(fullHashValue); const PxU32 pairIndex = mNbActivePairs++; InternalPair* PX_RESTRICT p = &mActivePairs[pairIndex]; p->setNewPair(id0, id1); mNext[pairIndex] = mHashTable[hashValue]; mHashTable[hashValue] = pairIndex; return p; } PxU32 mHashSize; PxU32 mMask; PxU32 mNbActivePairs; PxU32* mHashTable; PxU32* mNext; InternalPair* mActivePairs; PxU32 mReservedMemory; void purge(); void reallocPairs(); void shrinkMemory(); void reserveMemory(PxU32 memSize); PX_NOINLINE PxU32 growPairs(PxU32 fullHashValue); void removePair(PxU32 id0, PxU32 id1, PxU32 hashValue, PxU32 pairIndex); }; struct AABB_Xi { PX_FORCE_INLINE AABB_Xi() {} PX_FORCE_INLINE ~AABB_Xi() {} PX_FORCE_INLINE void initFromFloats(const void* PX_RESTRICT minX, const void* PX_RESTRICT maxX) { mMinX = encodeFloat(*reinterpret_cast<const PxU32*>(minX)); mMaxX = encodeFloat(*reinterpret_cast<const PxU32*>(maxX)); } PX_FORCE_INLINE void initFromPxVec4(const PxVec4& min, const PxVec4& max) { initFromFloats(&min.x, &max.x); } PX_FORCE_INLINE void operator = (const AABB_Xi& box) { mMinX = box.mMinX; mMaxX = box.mMaxX; } PX_FORCE_INLINE void initSentinel() { mMinX = 0xffffffff; } PX_FORCE_INLINE bool isSentinel() const { return mMinX == 0xffffffff; } PxU32 mMinX; PxU32 mMaxX; }; struct AABB_YZn { PX_FORCE_INLINE AABB_YZn() {} PX_FORCE_INLINE ~AABB_YZn() {} PX_FORCE_INLINE void initFromPxVec4(const PxVec4& min, const PxVec4& max) { mMinY = -min.y; mMinZ = -min.z; mMaxY = max.y; mMaxZ = max.z; } PX_FORCE_INLINE void operator = (const AABB_YZn& box) { using namespace physx::aos; V4StoreA(V4LoadA(&box.mMinY), &mMinY); } float mMinY; float mMinZ; float mMaxY; float mMaxZ; }; struct AABB_YZr { PX_FORCE_INLINE AABB_YZr() {} PX_FORCE_INLINE ~AABB_YZr() {} PX_FORCE_INLINE void initFromPxVec4(const PxVec4& min, const PxVec4& max) { mMinY = min.y; mMinZ = min.z; mMaxY = max.y; mMaxZ = max.z; } PX_FORCE_INLINE void operator = (const AABB_YZr& box) { using namespace physx::aos; V4StoreA(V4LoadA(&box.mMinY), &mMinY); } float mMinY; float mMinZ; float mMaxY; float mMaxZ; }; } //namespace Bp } //namespace physx #endif // BP_BROADPHASE_SHARED_H
7,760
C
29.675889
131
0.655026
NVIDIA-Omniverse/PhysX/physx/source/lowlevelaabb/src/BpBroadPhaseIntegerAABB.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef BP_BROADPHASE_INTEGER_AABB_H #define BP_BROADPHASE_INTEGER_AABB_H #include "BpFiltering.h" #include "foundation/PxBounds3.h" #include "foundation/PxUnionCast.h" namespace physx { namespace Bp { /* \brief Encode a single float value with lossless encoding to integer */ PX_FORCE_INLINE PxU32 encodeFloat(PxU32 ir) { //we may need to check on -0 and 0 //But it should make no practical difference. if(ir & PX_SIGN_BITMASK) //negative? return ~ir;//reverse sequence of negative numbers else return ir | PX_SIGN_BITMASK; // flip sign } /* \brief Encode a single float value with lossless encoding to integer */ PX_FORCE_INLINE PxU32 decodeFloat(PxU32 ir) { if(ir & PX_SIGN_BITMASK) //positive? return ir & ~PX_SIGN_BITMASK; //flip sign else return ~ir; //undo reversal } /** \brief Integer representation of PxBounds3 used by BroadPhase @see BroadPhaseUpdateData */ typedef PxU32 ValType; class IntegerAABB { public: enum { MIN_X = 0, MIN_Y, MIN_Z, MAX_X, MAX_Y, MAX_Z }; IntegerAABB(const PxBounds3& b, PxReal contactDistance) { const PxVec3 dist(contactDistance); encode(PxBounds3(b.minimum - dist, b.maximum + dist)); } /* \brief Return the minimum along a specified axis \param[in] i is the axis */ PX_FORCE_INLINE ValType getMin(PxU32 i) const { return (mMinMax)[MIN_X+i]; } /* \brief Return the maximum along a specified axis \param[in] i is the axis */ PX_FORCE_INLINE ValType getMax(PxU32 i) const { return (mMinMax)[MAX_X+i]; } /* \brief Return one of the six min/max values of the bound \param[in] isMax determines whether a min or max value is returned \param[in] index is the axis */ PX_FORCE_INLINE ValType getExtent(PxU32 isMax, PxU32 index) const { PX_ASSERT(isMax<=1); return (mMinMax)[3*isMax+index]; } /* \brief Return the minimum on the x axis */ PX_FORCE_INLINE ValType getMinX() const { return mMinMax[MIN_X]; } /* \brief Return the minimum on the y axis */ PX_FORCE_INLINE ValType getMinY() const { return mMinMax[MIN_Y]; } /* \brief Return the minimum on the z axis */ PX_FORCE_INLINE ValType getMinZ() const { return mMinMax[MIN_Z]; } /* \brief Return the maximum on the x axis */ PX_FORCE_INLINE ValType getMaxX() const { return mMinMax[MAX_X]; } /* \brief Return the maximum on the y axis */ PX_FORCE_INLINE ValType getMaxY() const { return mMinMax[MAX_Y]; } /* \brief Return the maximum on the z axis */ PX_FORCE_INLINE ValType getMaxZ() const { return mMinMax[MAX_Z]; } /* \brief Encode float bounds so they are stored as integer bounds \param[in] bounds is the bounds to be encoded \note The integer values of minima are always even, while the integer values of maxima are always odd \note The encoding process masks off the last four bits for minima and masks on the last four bits for maxima. This keeps the bounds constant when its shape is subjected to small global pose perturbations. In turn, this helps reduce computational effort in the broadphase update by reducing the amount of sorting required on near-stationary bodies that are aligned along one or more axis. @see decode */ PX_FORCE_INLINE void encode(const PxBounds3& bounds) { const PxU32* PX_RESTRICT min = PxUnionCast<const PxU32*, const PxF32*>(&bounds.minimum.x); const PxU32* PX_RESTRICT max = PxUnionCast<const PxU32*, const PxF32*>(&bounds.maximum.x); //Avoid min=max by enforcing the rule that mins are even and maxs are odd. mMinMax[MIN_X] = encodeFloatMin(min[0]); mMinMax[MIN_Y] = encodeFloatMin(min[1]); mMinMax[MIN_Z] = encodeFloatMin(min[2]); mMinMax[MAX_X] = encodeFloatMax(max[0]) | (1<<2); mMinMax[MAX_Y] = encodeFloatMax(max[1]) | (1<<2); mMinMax[MAX_Z] = encodeFloatMax(max[2]) | (1<<2); } /* \brief Decode from integer bounds to float bounds \param[out] bounds is the decoded float bounds \note Encode followed by decode will produce a float bound larger than the original due to the masking in encode. @see encode */ PX_FORCE_INLINE void decode(PxBounds3& bounds) const { PxU32* PX_RESTRICT min = PxUnionCast<PxU32*, PxF32*>(&bounds.minimum.x); PxU32* PX_RESTRICT max = PxUnionCast<PxU32*, PxF32*>(&bounds.maximum.x); min[0] = decodeFloat(mMinMax[MIN_X]); min[1] = decodeFloat(mMinMax[MIN_Y]); min[2] = decodeFloat(mMinMax[MIN_Z]); max[0] = decodeFloat(mMinMax[MAX_X]); max[1] = decodeFloat(mMinMax[MAX_Y]); max[2] = decodeFloat(mMinMax[MAX_Z]); } /* \brief Encode a single minimum value from integer bounds to float bounds \note The encoding process masks off the last four bits for minima @see encode */ static PX_FORCE_INLINE ValType encodeFloatMin(PxU32 source) { return ((encodeFloat(source) >> eGRID_SNAP_VAL) - 1) << eGRID_SNAP_VAL; } /* \brief Encode a single maximum value from integer bounds to float bounds \note The encoding process masks on the last four bits for maxima @see encode */ static PX_FORCE_INLINE ValType encodeFloatMax(PxU32 source) { return ((encodeFloat(source) >> eGRID_SNAP_VAL) + 1) << eGRID_SNAP_VAL; } /* \brief Shift the encoded bounds by a specified vector \param[in] shift is the vector used to shift the bounds */ PX_FORCE_INLINE void shift(const PxVec3& shift) { ::physx::PxBounds3 elemBounds; decode(elemBounds); elemBounds.minimum -= shift; elemBounds.maximum -= shift; encode(elemBounds); } /* \brief Test if this aabb lies entirely inside another aabb \param[in] box is the other box \return True if this aabb lies entirely inside box */ PX_INLINE bool isInside(const IntegerAABB& box) const { if(box.mMinMax[MIN_X]>mMinMax[MIN_X]) return false; if(box.mMinMax[MIN_Y]>mMinMax[MIN_Y]) return false; if(box.mMinMax[MIN_Z]>mMinMax[MIN_Z]) return false; if(box.mMinMax[MAX_X]<mMinMax[MAX_X]) return false; if(box.mMinMax[MAX_Y]<mMinMax[MAX_Y]) return false; if(box.mMinMax[MAX_Z]<mMinMax[MAX_Z]) return false; return true; } /* \brief Test if this aabb and another intersect \param[in] b is the other box \return True if this aabb and b intersect */ PX_FORCE_INLINE bool intersects(const IntegerAABB& b) const { return !(b.mMinMax[MIN_X] > mMinMax[MAX_X] || mMinMax[MIN_X] > b.mMinMax[MAX_X] || b.mMinMax[MIN_Y] > mMinMax[MAX_Y] || mMinMax[MIN_Y] > b.mMinMax[MAX_Y] || b.mMinMax[MIN_Z] > mMinMax[MAX_Z] || mMinMax[MIN_Z] > b.mMinMax[MAX_Z]); } PX_FORCE_INLINE bool intersects1D(const IntegerAABB& b, const PxU32 axis) const { const PxU32 maxAxis = axis + 3; return !(b.mMinMax[axis] > mMinMax[maxAxis] || mMinMax[axis] > b.mMinMax[maxAxis]); } /* \brief Expand bounds to include another \note This is used to compute the aggregate bounds of multiple shape bounds \param[in] b is the bounds to be included */ PX_FORCE_INLINE void include(const IntegerAABB& b) { mMinMax[MIN_X] = PxMin(mMinMax[MIN_X], b.mMinMax[MIN_X]); mMinMax[MIN_Y] = PxMin(mMinMax[MIN_Y], b.mMinMax[MIN_Y]); mMinMax[MIN_Z] = PxMin(mMinMax[MIN_Z], b.mMinMax[MIN_Z]); mMinMax[MAX_X] = PxMax(mMinMax[MAX_X], b.mMinMax[MAX_X]); mMinMax[MAX_Y] = PxMax(mMinMax[MAX_Y], b.mMinMax[MAX_Y]); mMinMax[MAX_Z] = PxMax(mMinMax[MAX_Z], b.mMinMax[MAX_Z]); } /* \brief Set the bounds to (max, max, max), (min, min, min) */ PX_INLINE void setEmpty() { mMinMax[MIN_X] = mMinMax[MIN_Y] = mMinMax[MIN_Z] = 0xff7fffff; //PX_IR(PX_MAX_F32); mMinMax[MAX_X] = mMinMax[MAX_Y] = mMinMax[MAX_Z] = 0x00800000; ///PX_IR(0.0f); } ValType mMinMax[6]; private: enum { eGRID_SNAP_VAL = 4 }; }; PX_FORCE_INLINE ValType encodeMin(const PxBounds3& bounds, PxU32 axis, PxReal contactDistance) { const PxReal val = bounds.minimum[axis] - contactDistance; const PxU32 min = PxUnionCast<PxU32, PxF32>(val); const PxU32 m = IntegerAABB::encodeFloatMin(min); return m; } PX_FORCE_INLINE ValType encodeMax(const PxBounds3& bounds, PxU32 axis, PxReal contactDistance) { const PxReal val = bounds.maximum[axis] + contactDistance; const PxU32 max = PxUnionCast<PxU32, PxF32>(val); const PxU32 m = IntegerAABB::encodeFloatMax(max) | (1<<2); return m; } } //namespace Bp } //namespace physx #endif
9,809
C
30.543408
117
0.713223
NVIDIA-Omniverse/PhysX/physx/source/lowlevelaabb/src/BpFiltering.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "BpFiltering.h" using namespace physx; using namespace Bp; BpFilter::BpFilter(bool discardKineKine, bool discardStaticKine) { for(int j = 0; j < Bp::FilterType::COUNT; j++) for(int i = 0; i < Bp::FilterType::COUNT; i++) mLUT[j][i] = false; mLUT[Bp::FilterType::STATIC][Bp::FilterType::DYNAMIC] = mLUT[Bp::FilterType::DYNAMIC][Bp::FilterType::STATIC] = true; mLUT[Bp::FilterType::STATIC][Bp::FilterType::KINEMATIC] = mLUT[Bp::FilterType::KINEMATIC][Bp::FilterType::STATIC] = !discardStaticKine; mLUT[Bp::FilterType::DYNAMIC][Bp::FilterType::KINEMATIC] = mLUT[Bp::FilterType::KINEMATIC][Bp::FilterType::DYNAMIC] = true; mLUT[Bp::FilterType::DYNAMIC][Bp::FilterType::DYNAMIC] = true; mLUT[Bp::FilterType::KINEMATIC][Bp::FilterType::KINEMATIC] = !discardKineKine; mLUT[Bp::FilterType::STATIC][Bp::FilterType::AGGREGATE] = mLUT[Bp::FilterType::AGGREGATE][Bp::FilterType::STATIC] = true; mLUT[Bp::FilterType::KINEMATIC][Bp::FilterType::AGGREGATE] = mLUT[Bp::FilterType::AGGREGATE][Bp::FilterType::KINEMATIC] = true; mLUT[Bp::FilterType::DYNAMIC][Bp::FilterType::AGGREGATE] = mLUT[Bp::FilterType::AGGREGATE][Bp::FilterType::DYNAMIC] = true; mLUT[Bp::FilterType::AGGREGATE][Bp::FilterType::AGGREGATE] = true; //Enable soft body interactions mLUT[Bp::FilterType::SOFTBODY][Bp::FilterType::DYNAMIC] = mLUT[Bp::FilterType::DYNAMIC][Bp::FilterType::SOFTBODY] = true; mLUT[Bp::FilterType::SOFTBODY][Bp::FilterType::STATIC] = mLUT[Bp::FilterType::STATIC][Bp::FilterType::SOFTBODY] = true; mLUT[Bp::FilterType::SOFTBODY][Bp::FilterType::KINEMATIC] = mLUT[Bp::FilterType::KINEMATIC][Bp::FilterType::SOFTBODY] = true; mLUT[Bp::FilterType::SOFTBODY][Bp::FilterType::SOFTBODY] = true; //Enable particle system interactions mLUT[Bp::FilterType::PARTICLESYSTEM][Bp::FilterType::DYNAMIC] = mLUT[Bp::FilterType::DYNAMIC][Bp::FilterType::PARTICLESYSTEM] = true; mLUT[Bp::FilterType::PARTICLESYSTEM][Bp::FilterType::STATIC] = mLUT[Bp::FilterType::STATIC][Bp::FilterType::PARTICLESYSTEM] = true; mLUT[Bp::FilterType::PARTICLESYSTEM][Bp::FilterType::KINEMATIC] = mLUT[Bp::FilterType::KINEMATIC][Bp::FilterType::PARTICLESYSTEM] = true; mLUT[Bp::FilterType::PARTICLESYSTEM][Bp::FilterType::PARTICLESYSTEM] = true; } BpFilter::~BpFilter() { }
3,954
C++
56.31884
138
0.747092
NVIDIA-Omniverse/PhysX/physx/source/lowlevelaabb/src/BpBroadPhaseUpdate.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "BpBroadPhase.h" #include "common/PxProfileZone.h" #include "foundation/PxBitMap.h" using namespace physx; using namespace Bp; #if PX_CHECKED bool BroadPhaseUpdateData::isValid(const BroadPhaseUpdateData& updateData, const BroadPhase& bp, const bool skipBoundValidation, PxU64 contextID) { PX_PROFILE_ZONE("BroadPhaseUpdateData::isValid", contextID); return (updateData.isValid(skipBoundValidation) && bp.isValid(updateData)); } static bool testHandles(PxU32 size, const BpHandle* handles, const PxU32 capacity, const Bp::FilterGroup::Enum* groups, const PxBounds3* bounds, PxBitMap& bitmap) { if(!handles && size) return false; /* ValType minVal=0; ValType maxVal=0xffffffff;*/ for(PxU32 i=0;i<size;i++) { const BpHandle h = handles[i]; if(h>=capacity) return false; // Array in ascending order of id. if(i>0 && (h < handles[i-1])) return false; if(groups && groups[h]==FilterGroup::eINVALID) return false; bitmap.set(h); if(bounds) { if(!bounds[h].isFinite()) return false; for(PxU32 j=0;j<3;j++) { //Max must be greater than min. if(bounds[h].minimum[j]>bounds[h].maximum[j]) return false; #if 0 //Bounds have an upper limit. if(bounds[created[i]].getMax(j)>=maxVal) return false; //Bounds have a lower limit. if(bounds[created[i]].getMin(j)<=minVal) return false; //Max must be odd. if(4 != (bounds[created[i]].getMax(j) & 4)) return false; //Min must be even. if(0 != (bounds[created[i]].getMin(j) & 4)) return false; #endif } } } return true; } static bool testBitmap(const PxBitMap& bitmap, PxU32 size, const BpHandle* handles) { while(size--) { const BpHandle h = *handles++; if(bitmap.test(h)) return false; } return true; } bool BroadPhaseUpdateData::isValid(const bool skipBoundValidation) const { const PxBounds3* bounds = skipBoundValidation ? NULL : getAABBs(); const PxU32 boxesCapacity = getCapacity(); const Bp::FilterGroup::Enum* groups = getGroups(); PxBitMap createdBitmap; createdBitmap.resizeAndClear(boxesCapacity); PxBitMap updatedBitmap; updatedBitmap.resizeAndClear(boxesCapacity); PxBitMap removedBitmap; removedBitmap.resizeAndClear(boxesCapacity); if(!testHandles(getNumCreatedHandles(), getCreatedHandles(), boxesCapacity, groups, bounds, createdBitmap)) return false; if(!testHandles(getNumUpdatedHandles(), getUpdatedHandles(), boxesCapacity, groups, bounds, updatedBitmap)) return false; if(!testHandles(getNumRemovedHandles(), getRemovedHandles(), boxesCapacity, NULL, NULL, removedBitmap)) return false; if(1) { // Created/updated if(!testBitmap(createdBitmap, getNumUpdatedHandles(), getUpdatedHandles())) return false; // Created/removed if(!testBitmap(createdBitmap, getNumRemovedHandles(), getRemovedHandles())) return false; // Updated/removed if(!testBitmap(updatedBitmap, getNumRemovedHandles(), getRemovedHandles())) return false; } return true; } #endif
4,695
C++
31.611111
162
0.732055
NVIDIA-Omniverse/PhysX/physx/source/lowlevelaabb/src/BpBroadPhaseSap.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef BP_BROADPHASE_SAP_H #define BP_BROADPHASE_SAP_H #include "BpBroadPhase.h" #include "BpBroadPhaseSapAux.h" #include "CmPool.h" #include "CmTask.h" namespace physx { class PxcScratchAllocator; namespace Gu { class Axes; } namespace Bp { class SapEndPoint; class IntegerAABB; class BroadPhaseBatchUpdateWorkTask: public Cm::Task { public: BroadPhaseBatchUpdateWorkTask(PxU64 contextId=0) : Cm::Task (contextId), mSap (NULL), mAxis (0xffffffff), mPairs (NULL), mPairsSize (0), mPairsCapacity (0) { } virtual void runInternal(); virtual const char* getName() const { return "BpBroadphaseSap.batchUpdate"; } void set(class BroadPhaseSap* sap, const PxU32 axis) {mSap = sap; mAxis = axis;} BroadPhasePair* getPairs() const {return mPairs;} PxU32 getPairsSize() const {return mPairsSize;} PxU32 getPairsCapacity() const {return mPairsCapacity;} void setPairs(BroadPhasePair* pairs, const PxU32 pairsCapacity) {mPairs = pairs; mPairsCapacity = pairsCapacity;} void setNumPairs(const PxU32 pairsSize) {mPairsSize=pairsSize;} private: class BroadPhaseSap* mSap; PxU32 mAxis; BroadPhasePair* mPairs; PxU32 mPairsSize; PxU32 mPairsCapacity; }; //KS - TODO, this could be reduced to U16 in smaller scenes struct BroadPhaseActivityPocket { PxU32 mStartIndex; PxU32 mEndIndex; }; class BroadPhaseSap : public BroadPhase { PX_NOCOPY(BroadPhaseSap) public: friend class BroadPhaseBatchUpdateWorkTask; friend class SapUpdateWorkTask; friend class SapPostUpdateWorkTask; BroadPhaseSap(const PxU32 maxNbBroadPhaseOverlaps, const PxU32 maxNbStaticShapes, const PxU32 maxNbDynamicShapes, PxU64 contextID); virtual ~BroadPhaseSap(); // BroadPhase virtual PxBroadPhaseType::Enum getType() const PX_OVERRIDE { return PxBroadPhaseType::eSAP; } virtual void release() PX_OVERRIDE; virtual void update(PxcScratchAllocator* scratchAllocator, const BroadPhaseUpdateData& updateData, physx::PxBaseTask* continuation) PX_OVERRIDE; virtual void preBroadPhase(const Bp::BroadPhaseUpdateData&) PX_OVERRIDE {} virtual void fetchBroadPhaseResults() PX_OVERRIDE {} virtual const BroadPhasePair* getCreatedPairs(PxU32& nbCreatedPairs) const PX_OVERRIDE { nbCreatedPairs = mCreatedPairsSize; return mCreatedPairsArray; } virtual const BroadPhasePair* getDeletedPairs(PxU32& nbDeletedPairs) const PX_OVERRIDE { nbDeletedPairs = mDeletedPairsSize; return mDeletedPairsArray; } virtual void freeBuffers() PX_OVERRIDE; virtual void shiftOrigin(const PxVec3& shift, const PxBounds3* boundsArray, const PxReal* contactDistances) PX_OVERRIDE; #if PX_CHECKED virtual bool isValid(const BroadPhaseUpdateData& updateData) const PX_OVERRIDE; #endif //~BroadPhase private: void resizeBuffers(); PxcScratchAllocator* mScratchAllocator; //Data passed in from updateV. const BpHandle* mCreated; PxU32 mCreatedSize; const BpHandle* mRemoved; PxU32 mRemovedSize; const BpHandle* mUpdated; PxU32 mUpdatedSize; const PxBounds3* mBoxBoundsMinMax; const Bp::FilterGroup::Enum*mBoxGroups; const BpFilter* mFilter; const PxReal* mContactDistance; PxU32 mBoxesCapacity; //Boxes. SapBox1D* mBoxEndPts[3]; //Position of box min/max in sorted arrays of end pts (needs to have mBoxesCapacity). //End pts (endpts of boxes sorted along each axis). ValType* mEndPointValues[3]; //Sorted arrays of min and max box coords BpHandle* mEndPointDatas[3]; //Corresponding owner id and isMin/isMax for each entry in the sorted arrays of min and max box coords. PxU8* mBoxesUpdated; BpHandle* mSortedUpdateElements; BroadPhaseActivityPocket* mActivityPockets; BpHandle* mListNext; BpHandle* mListPrev; PxU32 mBoxesSize; //Number of sorted boxes + number of unsorted (new) boxes PxU32 mBoxesSizePrev; //Number of sorted boxes PxU32 mEndPointsCapacity; //Capacity of sorted arrays. //Default maximum number of overlap pairs PxU32 mDefaultPairsCapacity; //Box-box overlap pairs created or removed each update. BpHandle* mData; PxU32 mDataSize; PxU32 mDataCapacity; //All current box-box overlap pairs. SapPairManager mPairs; //Created and deleted overlap pairs reported back through api. BroadPhasePair* mCreatedPairsArray; PxU32 mCreatedPairsSize; PxU32 mCreatedPairsCapacity; BroadPhasePair* mDeletedPairsArray; PxU32 mDeletedPairsSize; PxU32 mDeletedPairsCapacity; PxU32 mActualDeletedPairSize; bool setUpdateData(const BroadPhaseUpdateData& updateData); void update(); void postUpdate(); //Batch create/remove/update. void batchCreate(); void batchRemove(); void batchUpdate(); void batchUpdate(const PxU32 Axis, BroadPhasePair*& pairs, PxU32& pairsSize, PxU32& pairsCapacity); void batchUpdateFewUpdates(const PxU32 Axis, BroadPhasePair*& pairs, PxU32& pairsSize, PxU32& pairsCapacity); void ComputeSortedLists( //const PxVec4& globalMin, const PxVec4& globalMax, BpHandle* PX_RESTRICT newBoxIndicesSorted, PxU32& newBoxIndicesCount, BpHandle* PX_RESTRICT oldBoxIndicesSorted, PxU32& oldBoxIndicesCount, bool& allNewBoxesStatics, bool& allOldBoxesStatics); BroadPhaseBatchUpdateWorkTask mBatchUpdateTasks[3]; const PxU64 mContextID; #if PX_DEBUG bool isSelfOrdered() const; bool isSelfConsistent() const; #endif }; } //namespace Bp } //namespace physx #endif //BP_BROADPHASE_SAP_H
7,399
C
33.90566
155
0.73537
NVIDIA-Omniverse/PhysX/physx/source/lowlevelaabb/src/BpBroadPhaseMBP.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "BpBroadPhaseMBP.h" #include "BpBroadPhaseShared.h" #include "foundation/PxUtilities.h" #include "foundation/PxVecMath.h" #include "foundation/PxMemory.h" #include "foundation/PxBitUtils.h" #include "foundation/PxHashSet.h" #include "common/PxProfileZone.h" #include "CmRadixSort.h" #include "CmUtils.h" using namespace physx::aos; //#define CHECK_NB_OVERLAPS #define USE_FULLY_INSIDE_FLAG //#define MBP_USE_NO_CMP_OVERLAP_3D // Seems slower //HWSCAN: reverse bits in fully-inside-flag bitmaps because the code gives us indices for which bits are set (and we want the opposite) #define HWSCAN using namespace physx; using namespace Bp; using namespace Cm; static PX_FORCE_INLINE MBP_Handle encodeHandle(MBP_ObjectIndex objectIndex, PxU32 flipFlop, bool isStatic) { /* objectIndex += objectIndex; objectIndex |= flipFlop; return objectIndex;*/ return (objectIndex<<2)|(flipFlop<<1)|PxU32(isStatic); } static PX_FORCE_INLINE MBP_ObjectIndex decodeHandle_Index(MBP_Handle handle) { // return handle>>1; return handle>>2; } static PX_FORCE_INLINE PxU32 decodeHandle_IsStatic(MBP_Handle handle) { return handle&1; } #define MBP_ALLOC(x) PX_ALLOC(x, "MBP") #define MBP_ALLOC_TMP(x) PX_ALLOC(x, "MBP_TMP") #define MBP_FREE(x) PX_FREE(x) #define INVALID_ID 0xffffffff typedef MBP_Index* MBP_Mapping; /* PX_FORCE_INLINE PxU32 encodeFloat(const float val) { // We may need to check on -0 and 0 // But it should make no practical difference. PxU32 ir = IR(val); if(ir & 0x80000000) //negative? ir = ~ir;//reverse sequence of negative numbers else ir |= 0x80000000; // flip sign return ir; }*/ namespace internalMBP { struct RegionHandle : public PxUserAllocated { PxU16 mHandle; // Handle from region PxU16 mInternalBPHandle; // Index of region data within mRegions }; enum MBPFlags { MBP_FLIP_FLOP = (1<<1), MBP_REMOVED = (1<<2) // ### added for TA24714, not needed otherwise }; // We have one of those for each of the "200K" objects so we should optimize this size as much as possible struct MBP_Object : public PxUserAllocated { BpHandle mUserID; // Handle sent to us by the AABB manager PxU16 mNbHandles; // Number of regions the object is part of PxU16 mFlags; // MBPFlags ### only 1 bit used in the end PX_FORCE_INLINE bool getFlipFlop() const { return (mFlags & MBP_FLIP_FLOP)==0; } union { RegionHandle mHandle; PxU32 mHandlesIndex; }; }; // This one is used in each Region struct MBPEntry : public PxUserAllocated { PX_FORCE_INLINE MBPEntry() { mMBPHandle = INVALID_ID; } // ### mIndex could be PxU16 but beware, we store mFirstFree there PxU32 mIndex; // Out-to-in, maps user handle to internal array. mIndex indexes either the static or dynamic array. MBP_Handle mMBPHandle; // MBP-level handle (the one returned to users) #if PX_DEBUG bool mUpdated; #endif PX_FORCE_INLINE PxU32 isStatic() const { return decodeHandle_IsStatic(mMBPHandle); } }; /////////////////////////////////////////////////////////////////////////////// //#define BIT_ARRAY_STACK 512 static PX_FORCE_INLINE PxU32 bitsToDwords(PxU32 nbBits) { return (nbBits>>5) + ((nbBits&31) ? 1 : 0); } // Use that one instead of an array of bools. Takes less ram, nearly as fast [no bounds checkings and so on]. class BitArray { public: BitArray(); BitArray(PxU32 nbBits); ~BitArray(); bool init(PxU32 nbBits); void empty(); void resize(PxU32 nbBits); PX_FORCE_INLINE void setBitChecked(PxU32 bitNumber) { const PxU32 index = bitNumber>>5; if(index>=mSize) resize(bitNumber); mBits[index] |= 1<<(bitNumber&31); } PX_FORCE_INLINE void clearBitChecked(PxU32 bitNumber) { const PxU32 index = bitNumber>>5; if(index>=mSize) resize(bitNumber); mBits[index] &= ~(1<<(bitNumber&31)); } // Data management PX_FORCE_INLINE void setBit(PxU32 bitNumber) { mBits[bitNumber>>5] |= 1<<(bitNumber&31); } PX_FORCE_INLINE void clearBit(PxU32 bitNumber) { mBits[bitNumber>>5] &= ~(1<<(bitNumber&31)); } PX_FORCE_INLINE void toggleBit(PxU32 bitNumber) { mBits[bitNumber>>5] ^= 1<<(bitNumber&31); } PX_FORCE_INLINE void clearAll() { PxMemZero(mBits, mSize*4); } PX_FORCE_INLINE void setAll() { PxMemSet(mBits, 0xff, mSize*4); } // Data access PX_FORCE_INLINE PxIntBool isSet(PxU32 bitNumber) const { return PxIntBool(mBits[bitNumber>>5] & (1<<(bitNumber&31))); } PX_FORCE_INLINE PxIntBool isSetChecked(PxU32 bitNumber) const { const PxU32 index = bitNumber>>5; if(index>=mSize) return 0; return PxIntBool(mBits[index] & (1<<(bitNumber&31))); } PX_FORCE_INLINE const PxU32* getBits() const { return mBits; } PX_FORCE_INLINE PxU32 getSize() const { return mSize; } // PT: replicate PxBitMap stuff for temp testing PxU32 findLast() const { for(PxU32 i = mSize; i-- > 0;) { if(mBits[i]) return (i<<5)+PxHighestSetBit(mBits[i]); } return PxU32(0); } protected: PxU32* mBits; //!< Array of bits PxU32 mSize; //!< Size of the array in dwords #ifdef BIT_ARRAY_STACK PxU32 mStack[BIT_ARRAY_STACK]; #endif }; /////////////////////////////////////////////////////////////////////////////// BitArray::BitArray() : mBits(NULL), mSize(0) { } BitArray::BitArray(PxU32 nbBits) : mBits(NULL), mSize(0) { init(nbBits); } BitArray::~BitArray() { empty(); } void BitArray::empty() { #ifdef BIT_ARRAY_STACK if(mBits!=mStack) #endif MBP_FREE(mBits); mBits = NULL; mSize = 0; } bool BitArray::init(PxU32 nbBits) { mSize = bitsToDwords(nbBits); // Get ram for n bits #ifdef BIT_ARRAY_STACK if(mBits!=mStack) #endif MBP_FREE(mBits); #ifdef BIT_ARRAY_STACK if(mSize>BIT_ARRAY_STACK) #endif mBits = reinterpret_cast<PxU32*>(MBP_ALLOC(sizeof(PxU32)*mSize)); #ifdef BIT_ARRAY_STACK else mBits = mStack; #endif // Set all bits to 0 clearAll(); return true; } void BitArray::resize(PxU32 nbBits) { const PxU32 newSize = bitsToDwords(nbBits+128); PxU32* newBits = NULL; #ifdef BIT_ARRAY_STACK if(newSize>BIT_ARRAY_STACK) #endif { // Old buffer was stack or allocated, new buffer is allocated newBits = reinterpret_cast<PxU32*>(MBP_ALLOC(sizeof(PxU32)*newSize)); if(mSize) PxMemCopy(newBits, mBits, sizeof(PxU32)*mSize); } #ifdef BIT_ARRAY_STACK else { newBits = mStack; if(mSize>BIT_ARRAY_STACK) { // Old buffer was allocated, new buffer is stack => copy to stack, shrink CopyMemory(newBits, mBits, sizeof(PxU32)*BIT_ARRAY_STACK); } else { // Old buffer was stack, new buffer is stack => keep working on the same stack buffer, nothing to do } } #endif const PxU32 remain = newSize - mSize; if(remain) PxMemZero(newBits + mSize, remain*sizeof(PxU32)); #ifdef BIT_ARRAY_STACK if(mBits!=mStack) #endif MBP_FREE(mBits); mBits = newBits; mSize = newSize; } #ifdef USE_FULLY_INSIDE_FLAG static PX_FORCE_INLINE void setBit(BitArray& bitmap, MBP_ObjectIndex objectIndex) { #ifdef HWSCAN bitmap.clearBitChecked(objectIndex); //HWSCAN #else bitmap.setBitChecked(objectIndex); #endif } static PX_FORCE_INLINE void clearBit(BitArray& bitmap, MBP_ObjectIndex objectIndex) { #ifdef HWSCAN bitmap.setBitChecked(objectIndex); // HWSCAN #else bitmap.clearBitChecked(objectIndex); #endif } #endif /////////////////////////////////////////////////////////////////////////////// #ifdef MBP_SIMD_OVERLAP typedef SIMD_AABB MBP_AABB; #else typedef IAABB MBP_AABB; #endif struct MBPEntry; struct RegionHandle; struct MBP_Object; class MBP_PairManager : public PairManagerData { public: MBP_PairManager(); ~MBP_PairManager(); InternalPair* addPair (PxU32 id0, PxU32 id1); // bool removePair (PxU32 id0, PxU32 id1); bool computeCreatedDeletedPairs (const MBP_Object* objects, BroadPhaseMBP* mbp, const BitArray& updated, const BitArray& removed); const Bp::FilterGroup::Enum* mGroups; const MBP_Object* mObjects; const bool* mLUT; }; /////////////////////////////////////////////////////////////////////////// #define STACK_BUFFER_SIZE 256 struct MBPOS_TmpBuffers { MBPOS_TmpBuffers(); ~MBPOS_TmpBuffers(); void allocateSleeping(PxU32 nbSleeping, PxU32 nbSentinels); void allocateUpdated(PxU32 nbUpdated, PxU32 nbSentinels); // PT: wtf, why doesn't the 128 version compile? // MBP_AABB PX_ALIGN(128, mSleepingDynamicBoxes_Stack[STACK_BUFFER_SIZE]); // MBP_AABB PX_ALIGN(128, mUpdatedDynamicBoxes_Stack[STACK_BUFFER_SIZE]); MBP_AABB PX_ALIGN(16, mSleepingDynamicBoxes_Stack[STACK_BUFFER_SIZE]); MBP_AABB PX_ALIGN(16, mUpdatedDynamicBoxes_Stack[STACK_BUFFER_SIZE]); MBP_Index mInToOut_Dynamic_Sleeping_Stack[STACK_BUFFER_SIZE]; PxU32 mNbSleeping; PxU32 mNbUpdated; MBP_Index* mInToOut_Dynamic_Sleeping; MBP_AABB* mSleepingDynamicBoxes; MBP_AABB* mUpdatedDynamicBoxes; }; struct BIP_Input { BIP_Input() : mObjects (NULL), mNbUpdatedBoxes (0), mNbStaticBoxes (0), mDynamicBoxes (NULL), mStaticBoxes (NULL), mInToOut_Static (NULL), mInToOut_Dynamic(NULL), mNeeded (false) { } const MBPEntry* mObjects; PxU32 mNbUpdatedBoxes; PxU32 mNbStaticBoxes; const MBP_AABB* mDynamicBoxes; const MBP_AABB* mStaticBoxes; const MBP_Index* mInToOut_Static; const MBP_Index* mInToOut_Dynamic; bool mNeeded; }; struct BoxPruning_Input { BoxPruning_Input() : mObjects (NULL), mUpdatedDynamicBoxes (NULL), mSleepingDynamicBoxes (NULL), mInToOut_Dynamic (NULL), mInToOut_Dynamic_Sleeping (NULL), mNbUpdated (0), mNbNonUpdated (0), mNeeded (false) { } const MBPEntry* mObjects; const MBP_AABB* mUpdatedDynamicBoxes; const MBP_AABB* mSleepingDynamicBoxes; const MBP_Index* mInToOut_Dynamic; const MBP_Index* mInToOut_Dynamic_Sleeping; PxU32 mNbUpdated; PxU32 mNbNonUpdated; bool mNeeded; BIP_Input mBIPInput; }; class Region : public PxUserAllocated { PX_NOCOPY(Region) public: Region(); ~Region(); void updateObject(const MBP_AABB& bounds, MBP_Index handle); MBP_Index addObject(const MBP_AABB& bounds, MBP_Handle mbpHandle, bool isStatic); void removeObject(MBP_Index handle); MBP_Handle retrieveBounds(MBP_AABB& bounds, MBP_Index handle) const; void setBounds(MBP_Index handle, const MBP_AABB& bounds); void prepareOverlaps(); void findOverlaps(MBP_PairManager& pairManager); // private: BoxPruning_Input PX_ALIGN(16, mInput); PxU32 mNbObjects; PxU32 mMaxNbObjects; PxU32 mFirstFree; MBPEntry* mObjects; // All objects, indexed by user handle PxU32 mMaxNbStaticBoxes; PxU32 mNbStaticBoxes; PxU32 mMaxNbDynamicBoxes; PxU32 mNbDynamicBoxes; MBP_AABB* mStaticBoxes; MBP_AABB* mDynamicBoxes; MBP_Mapping mInToOut_Static; // Maps static boxes to mObjects MBP_Mapping mInToOut_Dynamic; // Maps dynamic boxes to mObjects PxU32* mPosList; PxU32 mNbUpdatedBoxes; PxU32 mPrevNbUpdatedBoxes; BitArray mStaticBits; RadixSortBuffered mRS; bool mNeedsSorting; bool mNeedsSortingSleeping; MBPOS_TmpBuffers mTmpBuffers; void optimizeMemory(); void resizeObjects(); void staticSort(); void preparePruning(MBPOS_TmpBuffers& buffers); void prepareBIPPruning(const MBPOS_TmpBuffers& buffers); }; /////////////////////////////////////////////////////////////////////////// // We have one of those for each Region within the MBP struct RegionData : public PxUserAllocated { MBP_AABB mBox; // Volume of space controlled by this Region Region* mBP; // Pointer to Region itself PxIntBool mOverlap; // True if overlaps other regions void* mUserData; // Region identifier, reused to contain "first free ID" }; #define MAX_NB_MBP 256 // #define MAX_NB_MBP 16 class MBP : public PxUserAllocated { public: MBP(); ~MBP(); void preallocate(PxU32 nbRegions, PxU32 nbObjects, PxU32 maxNbOverlaps); void reset(); void freeBuffers(); PxU32 addRegion(const PxBroadPhaseRegion& region, bool populateRegion, const PxBounds3* boundsArray, const PxReal* contactDistance); bool removeRegion(PxU32 handle); const Region* getRegion(PxU32 i) const; PX_FORCE_INLINE PxU32 getNbRegions() const { return mNbRegions; } MBP_Handle addObject(const MBP_AABB& box, BpHandle userID, bool isStatic); bool removeObject(MBP_Handle handle); bool updateObject(MBP_Handle handle, const MBP_AABB& box); bool updateObjectAfterRegionRemoval(MBP_Handle handle, Region* removedRegion); bool updateObjectAfterNewRegionAdded(MBP_Handle handle, const MBP_AABB& box, Region* addedRegion, PxU32 regionIndex); void prepareOverlaps(); void findOverlaps(const Bp::FilterGroup::Enum* PX_RESTRICT groups, const bool* PX_RESTRICT lut); PxU32 finalize(BroadPhaseMBP* mbp); void shiftOrigin(const PxVec3& shift, const PxBounds3* boundsArray, const PxReal* contactDistances); // private: PxU32 mNbRegions; MBP_ObjectIndex mFirstFreeIndex; // First free recycled index for mMBP_Objects PxU32 mFirstFreeIndexBP; // First free recycled index for mRegions PxArray<RegionData> mRegions; PxArray<MBP_Object> mMBP_Objects; MBP_PairManager mPairManager; BitArray mUpdatedObjects; // Indexed by MBP_ObjectIndex BitArray mRemoved; // Indexed by MBP_ObjectIndex PxArray<PxU32> mHandles[MAX_NB_MBP+1]; PxU32 mFirstFree[MAX_NB_MBP+1]; PX_FORCE_INLINE RegionHandle* getHandles(MBP_Object& currentObject, PxU32 nbHandles); void purgeHandles(MBP_Object* PX_RESTRICT object, PxU32 nbHandles); void storeHandles(MBP_Object* PX_RESTRICT object, PxU32 nbHandles, const RegionHandle* PX_RESTRICT handles); PxArray<PxU32> mOutOfBoundsObjects; // These are BpHandle but the BP interface expects PxU32s void addToOutOfBoundsArray(BpHandle id); #ifdef USE_FULLY_INSIDE_FLAG BitArray mFullyInsideBitmap; // Indexed by MBP_ObjectIndex #endif void populateNewRegion(const MBP_AABB& box, Region* addedRegion, PxU32 regionIndex, const PxBounds3* boundsArray, const PxReal* contactDistance); #ifdef MBP_REGION_BOX_PRUNING void buildRegionData(); MBP_AABB mSortedRegionBoxes[MAX_NB_MBP]; PxU32 mSortedRegionIndices[MAX_NB_MBP]; PxU32 mNbActiveRegions; bool mDirtyRegions; #endif }; #ifdef MBP_SIMD_OVERLAP #define MBP_OVERLAP_TEST(x) SIMD_OVERLAP_TEST(x) #else #define MBP_OVERLAP_TEST(x) if(intersect2D(box0, x)) #endif #define DEFAULT_NB_ENTRIES 128 #ifdef MBP_SIMD_OVERLAP static PX_FORCE_INLINE void initSentinel(SIMD_AABB& box) { // box.mMinX = encodeFloat(FLT_MAX)>>1; box.mMinX = 0xffffffff; } #if PX_DEBUG static PX_FORCE_INLINE bool isSentinel(const SIMD_AABB& box) { return box.mMinX == 0xffffffff; } #endif #else static PX_FORCE_INLINE void initSentinel(MBP_AABB& box) { // box.mMinX = encodeFloat(FLT_MAX)>>1; box.mMinX = 0xffffffff; } #if PX_DEBUG static PX_FORCE_INLINE bool isSentinel(const MBP_AABB& box) { return box.mMinX == 0xffffffff; } #endif #endif } using namespace internalMBP; /////////////////////////////////////////////////////////////////////////////// MBP_PairManager::MBP_PairManager() : mGroups (NULL), mObjects (NULL), mLUT (NULL) { } /////////////////////////////////////////////////////////////////////////////// MBP_PairManager::~MBP_PairManager() { } /////////////////////////////////////////////////////////////////////////////// InternalPair* MBP_PairManager::addPair(PxU32 id0, PxU32 id1) { PX_ASSERT(id0!=INVALID_ID); PX_ASSERT(id1!=INVALID_ID); PX_ASSERT(mGroups); PX_ASSERT(mObjects); { const MBP_ObjectIndex index0 = decodeHandle_Index(id0); const MBP_ObjectIndex index1 = decodeHandle_Index(id1); const BpHandle object0 = mObjects[index0].mUserID; const BpHandle object1 = mObjects[index1].mUserID; if(!groupFiltering(mGroups[object0], mGroups[object1], mLUT)) return NULL; } return addPairInternal(id0, id1); } /////////////////////////////////////////////////////////////////////////////// /*bool MBP_PairManager::removePair(PxU32 id0, PxU32 id1) { // Order the ids sort(id0, id1); const PxU32 hashValue = hash(id0, id1) & mMask; const InternalPair* p = findPair(id0, id1, hashValue); if(!p) return false; PX_ASSERT(p->getId0()==id0); PX_ASSERT(p->getId1()==id1); PairManagerData::removePair(id0, id1, hashValue, getPairIndex(p)); shrinkMemory(); return true; }*/ /////////////////////////////////////////////////////////////////////////////// #ifdef MBP_SIMD_OVERLAP #define SIMD_OVERLAP_PRELOAD_BOX0 \ __m128i b = _mm_loadu_si128(reinterpret_cast<const __m128i*>(&box0.mMinY)); \ b = _mm_shuffle_epi32(b, 78); // PT: technically we don't need the 16 bits from _mm_movemask_epi8, we only // need the 4 bits from _mm_movemask_ps. Would it be faster? In any case this // works thanks to the _mm_cmpgt_epi32 which puts the same values in each byte // of each separate 32bits components. #define SIMD_OVERLAP_TEST(x) \ const __m128i a = _mm_loadu_si128(reinterpret_cast<const __m128i*>(&x.mMinY)); \ const __m128i d = _mm_cmpgt_epi32(a, b); \ const int mask = _mm_movemask_epi8(d); \ if(mask==0x0000ff00) #else #define SIMD_OVERLAP_PRELOAD_BOX0 #endif #ifdef MBP_USE_NO_CMP_OVERLAP /*static PX_FORCE_INLINE void initBox(IAABB& box, const PxBounds3& src) { box.initFrom2(src); }*/ #else static PX_FORCE_INLINE void initBox(IAABB& box, const PxBounds3& src) { box.initFrom(src); } #endif Region::Region() : mNbObjects (0), mMaxNbObjects (0), mFirstFree (INVALID_ID), mObjects (NULL), mMaxNbStaticBoxes (0), mNbStaticBoxes (0), mMaxNbDynamicBoxes (0), mNbDynamicBoxes (0), mStaticBoxes (NULL), mDynamicBoxes (NULL), mInToOut_Static (NULL), mInToOut_Dynamic (NULL), mPosList (NULL), mNbUpdatedBoxes (0), mPrevNbUpdatedBoxes (0), mNeedsSorting (false), mNeedsSortingSleeping (true) { } Region::~Region() { PX_DELETE_ARRAY(mObjects); MBP_FREE(mPosList); MBP_FREE(mInToOut_Dynamic); MBP_FREE(mInToOut_Static); PX_DELETE_ARRAY(mDynamicBoxes); PX_DELETE_ARRAY(mStaticBoxes); } // Pre-sort static boxes #define STACK_BUFFER_SIZE_STATIC_SORT 8192 #define DEFAULT_NUM_DYNAMIC_BOXES 1024 void Region::staticSort() { // For now this version is only compatible with: // MBP_USE_WORDS // MBP_USE_SENTINELS mNeedsSorting = false; const PxU32 nbStaticBoxes = mNbStaticBoxes; if(!nbStaticBoxes) { mStaticBits.empty(); return; } // PxU32 Time; // StartProfile(Time); // Roadmap: // - gather updated/modified static boxes // - sort those, and those only // - merge sorted set with previously existing (and previously sorted set) // Separate things-to-sort and things-already-sorted const PxU32 totalSize = sizeof(PxU32)*nbStaticBoxes*4; PxU8 stackBuffer[STACK_BUFFER_SIZE_STATIC_SORT]; PxU8* tempMemory = totalSize<=STACK_BUFFER_SIZE_STATIC_SORT ? stackBuffer : reinterpret_cast<PxU8*>(MBP_ALLOC_TMP(totalSize)); PxU32* minPosList_ToSort = reinterpret_cast<PxU32*>(tempMemory); PxU32* minPosList_Sorted = reinterpret_cast<PxU32*>(tempMemory + sizeof(PxU32)*nbStaticBoxes); PxU32* boxIndices_ToSort = reinterpret_cast<PxU32*>(tempMemory + sizeof(PxU32)*nbStaticBoxes*2); PxU32* boxIndices_Sorted = reinterpret_cast<PxU32*>(tempMemory + sizeof(PxU32)*nbStaticBoxes*3); PxU32 nbToSort = 0; PxU32 nbSorted = 0; for(PxU32 i=0;i<nbStaticBoxes;i++) { if(mStaticBits.isSetChecked(i)) // ### optimize check in that thing { minPosList_ToSort[nbToSort] = mStaticBoxes[i].mMinX; boxIndices_ToSort[nbToSort] = i; nbToSort++; } else { minPosList_Sorted[nbSorted] = mStaticBoxes[i].mMinX; boxIndices_Sorted[nbSorted] = i; PX_ASSERT(nbSorted==0 || minPosList_Sorted[nbSorted-1]<=minPosList_Sorted[nbSorted]); nbSorted++; } } PX_ASSERT(nbSorted+nbToSort==nbStaticBoxes); // EndProfile(Time); // printf("Part1: %d\n", Time); // StartProfile(Time); // Sort things that need sorting const PxU32* sorted; RadixSortBuffered RS; if(nbToSort<DEFAULT_NUM_DYNAMIC_BOXES) { sorted = mRS.Sort(minPosList_ToSort, nbToSort, RADIX_UNSIGNED).GetRanks(); } else { sorted = RS.Sort(minPosList_ToSort, nbToSort, RADIX_UNSIGNED).GetRanks(); } // EndProfile(Time); // printf("Part2: %d\n", Time); // StartProfile(Time); // Allocate final buffers that wil contain the 2 (merged) streams MBP_Index* newMapping = reinterpret_cast<MBP_Index*>(MBP_ALLOC(sizeof(MBP_Index)*mMaxNbStaticBoxes)); const PxU32 nbStaticSentinels = 2; MBP_AABB* sortedBoxes = PX_NEW(MBP_AABB)[mMaxNbStaticBoxes+nbStaticSentinels]; initSentinel(sortedBoxes[nbStaticBoxes]); initSentinel(sortedBoxes[nbStaticBoxes+1]); // EndProfile(Time); // printf("Part2b: %d\n", Time); // StartProfile(Time); // Merge streams to final buffers PxU32 offsetSorted = 0; PxU32 offsetNonSorted = 0; PxU32 nextCandidateNonSorted = offsetNonSorted<nbToSort ? minPosList_ToSort[sorted[offsetNonSorted]] : 0xffffffff; PxU32 nextCandidateSorted = offsetSorted<nbSorted ? minPosList_Sorted[offsetSorted] : 0xffffffff; for(PxU32 i=0;i<nbStaticBoxes;i++) { PxU32 boxIndex; { // minPosList_Sorted[offsetSorted] = mStaticBoxes[boxIndices_Sorted[offsetSorted]].mMinX; if(nextCandidateNonSorted<nextCandidateSorted) { boxIndex = boxIndices_ToSort[sorted[offsetNonSorted]]; offsetNonSorted++; nextCandidateNonSorted = offsetNonSorted<nbToSort ? minPosList_ToSort[sorted[offsetNonSorted]] : 0xffffffff; } else { boxIndex = boxIndices_Sorted[offsetSorted]; offsetSorted++; nextCandidateSorted = offsetSorted<nbSorted ? minPosList_Sorted[offsetSorted] : 0xffffffff; } } const MBP_Index OwnerIndex = mInToOut_Static[boxIndex]; sortedBoxes[i] = mStaticBoxes[boxIndex]; newMapping[i] = OwnerIndex; PX_ASSERT(mObjects[OwnerIndex].mIndex==boxIndex); PX_ASSERT(mObjects[OwnerIndex].isStatic()); mObjects[OwnerIndex].mIndex = i; } PX_ASSERT(offsetSorted+offsetNonSorted==nbStaticBoxes); // EndProfile(Time); // printf("Part3: %d\n", Time); // StartProfile(Time); if(tempMemory!=stackBuffer) MBP_FREE(tempMemory); PX_DELETE_ARRAY(mStaticBoxes); mStaticBoxes = sortedBoxes; MBP_FREE(mInToOut_Static); mInToOut_Static = newMapping; mStaticBits.empty(); // EndProfile(Time); // printf("Part4: %d\n", Time); } void Region::optimizeMemory() { // TODO: resize static boxes/mapping, dynamic boxes/mapping, object array } void Region::resizeObjects() { const PxU32 newMaxNbOjects = mMaxNbObjects ? mMaxNbObjects + DEFAULT_NB_ENTRIES : DEFAULT_NB_ENTRIES; // const PxU32 newMaxNbOjects = mMaxNbObjects ? mMaxNbObjects*2 : DEFAULT_NB_ENTRIES; MBPEntry* newObjects = PX_NEW(MBPEntry)[newMaxNbOjects]; if(mNbObjects) PxMemCopy(newObjects, mObjects, mNbObjects*sizeof(MBPEntry)); #if PX_DEBUG for(PxU32 i=mNbObjects;i<newMaxNbOjects;i++) newObjects[i].mUpdated = false; #endif PX_DELETE_ARRAY(mObjects); mObjects = newObjects; mMaxNbObjects = newMaxNbOjects; } static MBP_AABB* resizeBoxes(PxU32 oldNbBoxes, PxU32 newNbBoxes, const MBP_AABB* boxes) { MBP_AABB* newBoxes = PX_NEW(MBP_AABB)[newNbBoxes]; if(oldNbBoxes) PxMemCopy(newBoxes, boxes, oldNbBoxes*sizeof(MBP_AABB)); PX_DELETE_ARRAY(boxes); return newBoxes; } static MBP_Index* resizeMapping(PxU32 oldNbBoxes, PxU32 newNbBoxes, MBP_Index* mapping) { MBP_Index* newMapping = reinterpret_cast<MBP_Index*>(MBP_ALLOC(sizeof(MBP_Index)*newNbBoxes)); if(oldNbBoxes) PxMemCopy(newMapping, mapping, oldNbBoxes*sizeof(MBP_Index)); MBP_FREE(mapping); return newMapping; } static PX_FORCE_INLINE void MTF(MBP_AABB* PX_RESTRICT dynamicBoxes, MBP_Index* PX_RESTRICT inToOut_Dynamic, MBPEntry* PX_RESTRICT objects, const MBP_AABB& bounds, PxU32 frontIndex, MBPEntry& updatedObject) { const PxU32 updatedIndex = updatedObject.mIndex; if(frontIndex!=updatedIndex) { const MBP_AABB box0 = dynamicBoxes[frontIndex]; dynamicBoxes[frontIndex] = bounds; dynamicBoxes[updatedIndex] = box0; const MBP_Index index0 = inToOut_Dynamic[frontIndex]; inToOut_Dynamic[frontIndex] = inToOut_Dynamic[updatedIndex]; inToOut_Dynamic[updatedIndex] = index0; objects[index0].mIndex = updatedIndex; updatedObject.mIndex = frontIndex; } else { dynamicBoxes[frontIndex] = bounds; } } MBP_Index Region::addObject(const MBP_AABB& bounds, MBP_Handle mbpHandle, bool isStatic) { #ifdef MBP_USE_WORDS PX_ASSERT(mNbObjects<0xffff); #endif PX_ASSERT((decodeHandle_IsStatic(mbpHandle) && isStatic) || (!decodeHandle_IsStatic(mbpHandle) && !isStatic)); MBP_Index handle; if(mFirstFree!=INVALID_ID) { handle = MBP_Index(mFirstFree); mFirstFree = mObjects[handle].mIndex; } else { if(mMaxNbObjects==mNbObjects) resizeObjects(); handle = MBP_Index(mNbObjects); } mNbObjects++; /// PxU32 boxIndex; if(isStatic) { if(mMaxNbStaticBoxes==mNbStaticBoxes) { const PxU32 newMaxNbBoxes = mMaxNbStaticBoxes ? mMaxNbStaticBoxes + DEFAULT_NB_ENTRIES : DEFAULT_NB_ENTRIES; // const PxU32 newMaxNbBoxes = mMaxNbStaticBoxes ? mMaxNbStaticBoxes*2 : DEFAULT_NB_ENTRIES; mStaticBoxes = resizeBoxes(mNbStaticBoxes, newMaxNbBoxes, mStaticBoxes); mInToOut_Static = resizeMapping(mNbStaticBoxes, newMaxNbBoxes, mInToOut_Static); mMaxNbStaticBoxes = newMaxNbBoxes; } boxIndex = mNbStaticBoxes++; mStaticBoxes[boxIndex] = bounds; mInToOut_Static[boxIndex] = handle; mNeedsSorting = true; mStaticBits.setBitChecked(boxIndex); } else { if(mMaxNbDynamicBoxes==mNbDynamicBoxes) { const PxU32 newMaxNbBoxes = mMaxNbDynamicBoxes ? mMaxNbDynamicBoxes + DEFAULT_NB_ENTRIES : DEFAULT_NB_ENTRIES; // const PxU32 newMaxNbBoxes = mMaxNbDynamicBoxes ? mMaxNbDynamicBoxes*2 : DEFAULT_NB_ENTRIES; mDynamicBoxes = resizeBoxes(mNbDynamicBoxes, newMaxNbBoxes, mDynamicBoxes); mInToOut_Dynamic = resizeMapping(mNbDynamicBoxes, newMaxNbBoxes, mInToOut_Dynamic); mMaxNbDynamicBoxes = newMaxNbBoxes; MBP_FREE(mPosList); mPosList = reinterpret_cast<PxU32*>(MBP_ALLOC((newMaxNbBoxes+1)*sizeof(PxU32))); } boxIndex = mNbDynamicBoxes++; mDynamicBoxes[boxIndex] = bounds; mInToOut_Dynamic[boxIndex] = handle; } mObjects[handle].mIndex = boxIndex; mObjects[handle].mMBPHandle = mbpHandle; #if PX_DEBUG mObjects[handle].mUpdated = !isStatic; #endif if(!isStatic) { MTF(mDynamicBoxes, mInToOut_Dynamic, mObjects, bounds, mNbUpdatedBoxes, mObjects[handle]); mNbUpdatedBoxes++; mPrevNbUpdatedBoxes = 0; mNeedsSortingSleeping = true; PX_ASSERT(mNbUpdatedBoxes<=mNbDynamicBoxes); } return handle; } // Moves box 'lastIndex' to location 'removedBoxIndex' static PX_FORCE_INLINE void remove(MBPEntry* PX_RESTRICT objects, MBP_Index* PX_RESTRICT mapping, MBP_AABB* PX_RESTRICT boxes, PxU32 removedBoxIndex, PxU32 lastIndex) { const PxU32 movedBoxHandle = mapping[lastIndex]; boxes[removedBoxIndex] = boxes[lastIndex]; // Relocate box data mapping[removedBoxIndex] = MBP_Index(movedBoxHandle); // Relocate mapping data MBPEntry& movedObject = objects[movedBoxHandle]; PX_ASSERT(movedObject.mIndex==lastIndex); // Checks index of moved box was indeed its old location movedObject.mIndex = removedBoxIndex; // Adjust index of moved box to reflect its new location } void Region::removeObject(MBP_Index handle) { PX_ASSERT(handle<mMaxNbObjects); MBPEntry& object = mObjects[handle]; /*const*/ PxU32 removedBoxIndex = object.mIndex; MBP_Index* PX_RESTRICT mapping; MBP_AABB* PX_RESTRICT boxes; PxU32 lastIndex; if(!object.isStatic()) { mPrevNbUpdatedBoxes = 0; mNeedsSortingSleeping = true; PX_ASSERT(mInToOut_Dynamic[removedBoxIndex]==handle); const bool isUpdated = removedBoxIndex<mNbUpdatedBoxes; PX_ASSERT(isUpdated==object.mUpdated); if(isUpdated) { PX_ASSERT(mNbUpdatedBoxes); if(mNbUpdatedBoxes!=mNbDynamicBoxes) { // Removing the object will create this pattern, which is wrong: // UUUUUUUUUUUNNNNNNNNN......... original // UUUUUU.UUUUNNNNNNNNN......... remove U // UUUUUUNUUUUNNNNNNNN.......... move N // // What we want instead is: // UUUUUUUUUUUNNNNNNNNN......... original // UUUUUU.UUUUNNNNNNNNN......... remove U // UUUUUUUUUU.NNNNNNNNN......... move U // UUUUUUUUUUNNNNNNNNN.......... move N const PxU32 lastUpdatedIndex = mNbUpdatedBoxes-1; remove(mObjects, mInToOut_Dynamic, mDynamicBoxes, removedBoxIndex, lastUpdatedIndex); // Move last U to removed U //Remove(mObjects, mInToOut_Dynamic, mDynamicBoxes, lastUpdatedIndex, --mNbDynamicBoxes); // Move last N to last U removedBoxIndex = lastUpdatedIndex; } mNbUpdatedBoxes--; } // remove(mObjects, mInToOut_Dynamic, mDynamicBoxes, removedBoxIndex, --mNbDynamicBoxes); mapping = mInToOut_Dynamic; boxes = mDynamicBoxes; lastIndex = --mNbDynamicBoxes; // ### adjust size of mPosList ? } else { PX_ASSERT(mInToOut_Static[removedBoxIndex]==handle); mNeedsSorting = true; mStaticBits.setBitChecked(removedBoxIndex); // remove(mObjects, mInToOut_Static, mStaticBoxes, removedBoxIndex, --mNbStaticBoxes); mapping = mInToOut_Static; boxes = mStaticBoxes; lastIndex = --mNbStaticBoxes; } remove(mObjects, mapping, boxes, removedBoxIndex, lastIndex); object.mIndex = mFirstFree; object.mMBPHandle = INVALID_ID; // printf("Invalid: %d\n", handle); mFirstFree = handle; mNbObjects--; #if PX_DEBUG object.mUpdated = false; #endif } void Region::updateObject(const MBP_AABB& bounds, MBP_Index handle) { PX_ASSERT(handle<mMaxNbObjects); MBPEntry& object = mObjects[handle]; if(!object.isStatic()) { // MTF on updated box const bool isContinuouslyUpdated = object.mIndex<mPrevNbUpdatedBoxes; if(!isContinuouslyUpdated) mNeedsSortingSleeping = true; // printf("%d: %d\n", handle, isContinuouslyUpdated); const bool isUpdated = object.mIndex<mNbUpdatedBoxes; PX_ASSERT(isUpdated==object.mUpdated); if(!isUpdated) { #if PX_DEBUG object.mUpdated = true; #endif MTF(mDynamicBoxes, mInToOut_Dynamic, mObjects, bounds, mNbUpdatedBoxes, object); mNbUpdatedBoxes++; PX_ASSERT(mNbUpdatedBoxes<=mNbDynamicBoxes); } else { mDynamicBoxes[object.mIndex] = bounds; } } else { mStaticBoxes[object.mIndex] = bounds; mNeedsSorting = true; // ### not always! mStaticBits.setBitChecked(object.mIndex); } } MBP_Handle Region::retrieveBounds(MBP_AABB& bounds, MBP_Index handle) const { PX_ASSERT(handle<mMaxNbObjects); const MBPEntry& object = mObjects[handle]; if(!object.isStatic()) bounds = mDynamicBoxes[object.mIndex]; else bounds = mStaticBoxes[object.mIndex]; return object.mMBPHandle; } void Region::setBounds(MBP_Index handle, const MBP_AABB& bounds) { PX_ASSERT(handle<mMaxNbObjects); const MBPEntry& object = mObjects[handle]; if(!object.isStatic()) { PX_ASSERT(object.mIndex < mNbDynamicBoxes); mDynamicBoxes[object.mIndex] = bounds; } else { PX_ASSERT(object.mIndex < mNbStaticBoxes); mStaticBoxes[object.mIndex] = bounds; } } #ifndef MBP_SIMD_OVERLAP static PX_FORCE_INLINE PxIntBool intersect2D(const MBP_AABB& a, const MBP_AABB& b) { #ifdef MBP_USE_NO_CMP_OVERLAP // PT: warning, only valid with the special encoding in InitFrom2 const PxU32 bits0 = (b.mMaxY - a.mMinY)&0x80000000; const PxU32 bits1 = (b.mMaxZ - a.mMinZ)&0x80000000; const PxU32 bits2 = (a.mMaxY - b.mMinY)&0x80000000; const PxU32 bits3 = (a.mMaxZ - b.mMinZ)&0x80000000; const PxU32 mask = bits0|(bits1>>1)|(bits2>>2)|(bits3>>3); return !mask; /* const PxU32 d0 = (b.mMaxY<<16)|a.mMaxY; const PxU32 d0b = (b.mMaxZ<<16)|a.mMaxZ; const PxU32 d1 = (a.mMinY<<16)|b.mMinY; const PxU32 d1b = (a.mMinZ<<16)|b.mMinZ; const PxU32 mask = (d0 - d1) | (d0b - d1b); return !(mask & 0x80008000);*/ #else if(//mMaxX < a.mMinX || a.mMaxX < mMinX // || b.mMaxY < a.mMinY || a.mMaxY < b.mMinY || b.mMaxZ < a.mMinZ || a.mMaxZ < b.mMinZ ) return FALSE; return TRUE; #endif } #endif #ifdef MBP_USE_NO_CMP_OVERLAP_3D static PX_FORCE_INLINE bool intersect3D(const MBP_AABB& a, const MBP_AABB& b) { // PT: warning, only valid with the special encoding in InitFrom2 const PxU32 bits0 = (b.mMaxY - a.mMinY)&0x80000000; const PxU32 bits1 = (b.mMaxZ - a.mMinZ)&0x80000000; const PxU32 bits2 = (a.mMaxY - b.mMinY)&0x80000000; const PxU32 bits3 = (a.mMaxZ - b.mMinZ)&0x80000000; const PxU32 bits4 = (b.mMaxX - a.mMinX)&0x80000000; const PxU32 bits5 = (a.mMaxX - b.mMinX)&0x80000000; const PxU32 mask = bits0|(bits1>>1)|(bits2>>2)|(bits3>>3)|(bits4>>4)|(bits5>>5); return !mask; } #endif #ifdef CHECK_NB_OVERLAPS static PxU32 gNbOverlaps = 0; #endif static PX_FORCE_INLINE void outputPair( MBP_PairManager& pairManager, PxU32 index0, PxU32 index1, const MBP_Index* PX_RESTRICT inToOut0, const MBP_Index* PX_RESTRICT inToOut1, const MBPEntry* PX_RESTRICT objects) { #ifdef CHECK_NB_OVERLAPS gNbOverlaps++; #endif const MBP_Index objectIndex0 = inToOut0[index0]; const MBP_Index objectIndex1 = inToOut1[index1]; PX_ASSERT(objectIndex0!=objectIndex1); const MBP_Handle id0 = objects[objectIndex0].mMBPHandle; const MBP_Handle id1 = objects[objectIndex1].mMBPHandle; // printf("2: %d %d\n", index0, index1); // printf("3: %d %d\n", objectIndex0, objectIndex1); pairManager.addPair(id0, id1); } MBPOS_TmpBuffers::MBPOS_TmpBuffers() : mNbSleeping (0), mNbUpdated (0), mInToOut_Dynamic_Sleeping (NULL), mSleepingDynamicBoxes (NULL), mUpdatedDynamicBoxes (NULL) { } MBPOS_TmpBuffers::~MBPOS_TmpBuffers() { // printf("mNbSleeping: %d\n", mNbSleeping); if(mInToOut_Dynamic_Sleeping!=mInToOut_Dynamic_Sleeping_Stack) MBP_FREE(mInToOut_Dynamic_Sleeping); if(mSleepingDynamicBoxes!=mSleepingDynamicBoxes_Stack) PX_DELETE_ARRAY(mSleepingDynamicBoxes); if(mUpdatedDynamicBoxes!=mUpdatedDynamicBoxes_Stack) PX_DELETE_ARRAY(mUpdatedDynamicBoxes); mNbSleeping = 0; mNbUpdated = 0; } void MBPOS_TmpBuffers::allocateSleeping(PxU32 nbSleeping, PxU32 nbSentinels) { if(nbSleeping>mNbSleeping) { if(mInToOut_Dynamic_Sleeping!=mInToOut_Dynamic_Sleeping_Stack) MBP_FREE(mInToOut_Dynamic_Sleeping); if(mSleepingDynamicBoxes!=mSleepingDynamicBoxes_Stack) PX_DELETE_ARRAY(mSleepingDynamicBoxes); if(nbSleeping+nbSentinels<=STACK_BUFFER_SIZE) { mSleepingDynamicBoxes = mSleepingDynamicBoxes_Stack; mInToOut_Dynamic_Sleeping = mInToOut_Dynamic_Sleeping_Stack; } else { mSleepingDynamicBoxes = PX_NEW(MBP_AABB)[nbSleeping+nbSentinels]; mInToOut_Dynamic_Sleeping = reinterpret_cast<MBP_Index*>(MBP_ALLOC(sizeof(MBP_Index)*nbSleeping)); } mNbSleeping = nbSleeping; } } void MBPOS_TmpBuffers::allocateUpdated(PxU32 nbUpdated, PxU32 nbSentinels) { if(nbUpdated>mNbUpdated) { if(mUpdatedDynamicBoxes!=mUpdatedDynamicBoxes_Stack) PX_DELETE_ARRAY(mUpdatedDynamicBoxes); if(nbUpdated+nbSentinels<=STACK_BUFFER_SIZE) mUpdatedDynamicBoxes = mUpdatedDynamicBoxes_Stack; else mUpdatedDynamicBoxes = PX_NEW(MBP_AABB)[nbUpdated+nbSentinels]; mNbUpdated = nbUpdated; } } void Region::preparePruning(MBPOS_TmpBuffers& buffers) { PxU32 _saved = mNbUpdatedBoxes; mNbUpdatedBoxes = 0; if(mPrevNbUpdatedBoxes!=_saved) mNeedsSortingSleeping = true; PxU32 nb = mNbDynamicBoxes; if(!nb) { mInput.mNeeded = false; mPrevNbUpdatedBoxes = 0; mNeedsSortingSleeping = true; return; } const MBP_AABB* PX_RESTRICT dynamicBoxes = mDynamicBoxes; PxU32* PX_RESTRICT posList = mPosList; #if PX_DEBUG PxU32 verifyNbUpdated = 0; for(PxU32 i=0;i<mMaxNbObjects;i++) { if(mObjects[i].mUpdated) verifyNbUpdated++; } PX_ASSERT(verifyNbUpdated==_saved); #endif // Build main list using the primary axis PxU32 nbUpdated = 0; PxU32 nbNonUpdated = 0; { nbUpdated = _saved; nbNonUpdated = nb - _saved; for(PxU32 i=0;i<nbUpdated;i++) { #if PX_DEBUG const PxU32 objectIndex = mInToOut_Dynamic[i]; PX_ASSERT(mObjects[objectIndex].mUpdated); mObjects[objectIndex].mUpdated = false; #endif posList[i] = dynamicBoxes[i].mMinX; } if(mNeedsSortingSleeping) { for(PxU32 i=0;i<nbNonUpdated;i++) { #if PX_DEBUG const PxU32 objectIndex = mInToOut_Dynamic[i]; PX_ASSERT(!mObjects[objectIndex].mUpdated); #endif PxU32 j = i + nbUpdated; posList[j] = dynamicBoxes[j].mMinX; } } #if PX_DEBUG else { for(PxU32 i=0;i<nbNonUpdated;i++) { const PxU32 objectIndex = mInToOut_Dynamic[i]; PX_ASSERT(!mObjects[objectIndex].mUpdated); PxU32 j = i + nbUpdated; PX_ASSERT(posList[j] == dynamicBoxes[j].mMinX); } } #endif } PX_ASSERT(nbUpdated==verifyNbUpdated); PX_ASSERT(nbUpdated+nbNonUpdated==nb); mNbUpdatedBoxes = nbUpdated; if(!nbUpdated) { mInput.mNeeded = false; mPrevNbUpdatedBoxes = 0; mNeedsSortingSleeping = true; return; } mPrevNbUpdatedBoxes = mNbUpdatedBoxes; /////// // ### TODO: no need to recreate those buffers each frame! MBP_Index* PX_RESTRICT inToOut_Dynamic_Sleeping = NULL; MBP_AABB* PX_RESTRICT sleepingDynamicBoxes = NULL; if(nbNonUpdated) { if(mNeedsSortingSleeping) { const PxU32* PX_RESTRICT sorted = mRS.Sort(posList+nbUpdated, nbNonUpdated, RADIX_UNSIGNED).GetRanks(); const PxU32 nbSentinels = 2; buffers.allocateSleeping(nbNonUpdated, nbSentinels); sleepingDynamicBoxes = buffers.mSleepingDynamicBoxes; inToOut_Dynamic_Sleeping = buffers.mInToOut_Dynamic_Sleeping; for(PxU32 i=0;i<nbNonUpdated;i++) { const PxU32 sortedIndex = nbUpdated+sorted[i]; sleepingDynamicBoxes[i] = dynamicBoxes[sortedIndex]; inToOut_Dynamic_Sleeping[i] = mInToOut_Dynamic[sortedIndex]; } initSentinel(sleepingDynamicBoxes[nbNonUpdated]); initSentinel(sleepingDynamicBoxes[nbNonUpdated+1]); mNeedsSortingSleeping = false; } else { sleepingDynamicBoxes = buffers.mSleepingDynamicBoxes; inToOut_Dynamic_Sleeping = buffers.mInToOut_Dynamic_Sleeping; #if PX_DEBUG for(PxU32 i=0;i<nbNonUpdated-1;i++) PX_ASSERT(sleepingDynamicBoxes[i].mMinX<=sleepingDynamicBoxes[i+1].mMinX); #endif } } else { mNeedsSortingSleeping = true; } /////// // posList[nbUpdated] = MAX_PxU32; // nb = nbUpdated; // Sort the list // const PxU32* PX_RESTRICT sorted = mRS.Sort(posList, nbUpdated+1, RADIX_UNSIGNED).GetRanks(); const PxU32* PX_RESTRICT sorted = mRS.Sort(posList, nbUpdated, RADIX_UNSIGNED).GetRanks(); const PxU32 nbSentinels = 2; buffers.allocateUpdated(nbUpdated, nbSentinels); MBP_AABB* PX_RESTRICT updatedDynamicBoxes = buffers.mUpdatedDynamicBoxes; MBP_Index* PX_RESTRICT inToOut_Dynamic = reinterpret_cast<MBP_Index*>(mRS.GetRecyclable()); for(PxU32 i=0;i<nbUpdated;i++) { const PxU32 sortedIndex = sorted[i]; updatedDynamicBoxes[i] = dynamicBoxes[sortedIndex]; inToOut_Dynamic[i] = mInToOut_Dynamic[sortedIndex]; } initSentinel(updatedDynamicBoxes[nbUpdated]); initSentinel(updatedDynamicBoxes[nbUpdated+1]); dynamicBoxes = updatedDynamicBoxes; mInput.mObjects = mObjects; // Can be shared (1) mInput.mUpdatedDynamicBoxes = updatedDynamicBoxes; // Can be shared (2) => buffers.mUpdatedDynamicBoxes; mInput.mSleepingDynamicBoxes = sleepingDynamicBoxes; mInput.mInToOut_Dynamic = inToOut_Dynamic; // Can be shared (3) => (MBP_Index*)mRS.GetRecyclable(); mInput.mInToOut_Dynamic_Sleeping = inToOut_Dynamic_Sleeping; mInput.mNbUpdated = nbUpdated; // Can be shared (4) mInput.mNbNonUpdated = nbNonUpdated; mInput.mNeeded = true; } void Region::prepareBIPPruning(const MBPOS_TmpBuffers& buffers) { if(!mNbUpdatedBoxes || !mNbStaticBoxes) { mInput.mBIPInput.mNeeded = false; return; } mInput.mBIPInput.mObjects = mObjects; // Can be shared (1) mInput.mBIPInput.mNbUpdatedBoxes = mNbUpdatedBoxes; // Can be shared (4) mInput.mBIPInput.mNbStaticBoxes = mNbStaticBoxes; // mInput.mBIPInput.mDynamicBoxes = mDynamicBoxes; mInput.mBIPInput.mDynamicBoxes = buffers.mUpdatedDynamicBoxes; // Can be shared (2) mInput.mBIPInput.mStaticBoxes = mStaticBoxes; mInput.mBIPInput.mInToOut_Static = mInToOut_Static; mInput.mBIPInput.mInToOut_Dynamic = reinterpret_cast<const MBP_Index*>(mRS.GetRecyclable()); // Can be shared (3) mInput.mBIPInput.mNeeded = true; } static void doCompleteBoxPruning(MBP_PairManager* PX_RESTRICT pairManager, const BoxPruning_Input& input) { const MBPEntry* PX_RESTRICT objects = input.mObjects; const MBP_AABB* PX_RESTRICT updatedDynamicBoxes = input.mUpdatedDynamicBoxes; const MBP_AABB* PX_RESTRICT sleepingDynamicBoxes = input.mSleepingDynamicBoxes; const MBP_Index* PX_RESTRICT inToOut_Dynamic = input.mInToOut_Dynamic; const MBP_Index* PX_RESTRICT inToOut_Dynamic_Sleeping = input.mInToOut_Dynamic_Sleeping; const PxU32 nbUpdated = input.mNbUpdated; const PxU32 nbNonUpdated = input.mNbNonUpdated; // // PT: find sleeping-dynamics-vs-active-dynamics overlaps if(nbNonUpdated) { const PxU32 nb0 = nbUpdated; const PxU32 nb1 = nbNonUpdated; // PxU32 index0 = 0; PxU32 runningIndex1 = 0; while(runningIndex1<nb1 && index0<nb0) { const MBP_AABB& box0 = updatedDynamicBoxes[index0]; const PxU32 limit = box0.mMaxX; SIMD_OVERLAP_PRELOAD_BOX0 const PxU32 l = box0.mMinX; while(sleepingDynamicBoxes[runningIndex1].mMinX<l) runningIndex1++; PxU32 index1 = runningIndex1; while(sleepingDynamicBoxes[index1].mMinX<=limit) { MBP_OVERLAP_TEST(sleepingDynamicBoxes[index1]) { outputPair(*pairManager, index0, index1, inToOut_Dynamic, inToOut_Dynamic_Sleeping, objects); } index1++; } index0++; } //// index0 = 0; PxU32 runningIndex0 = 0; while(runningIndex0<nb0 && index0<nb1) { const MBP_AABB& box0 = sleepingDynamicBoxes[index0]; const PxU32 limit = box0.mMaxX; SIMD_OVERLAP_PRELOAD_BOX0 const PxU32 l = box0.mMinX; while(updatedDynamicBoxes[runningIndex0].mMinX<=l) runningIndex0++; PxU32 index1 = runningIndex0; while(updatedDynamicBoxes[index1].mMinX<=limit) { MBP_OVERLAP_TEST(updatedDynamicBoxes[index1]) { outputPair(*pairManager, index1, index0, inToOut_Dynamic, inToOut_Dynamic_Sleeping, objects); } index1++; } index0++; } } /////// // PT: find active-dynamics-vs-active-dynamics overlaps PxU32 index0 = 0; PxU32 runningIndex = 0; while(runningIndex<nbUpdated && index0<nbUpdated) { const MBP_AABB& box0 = updatedDynamicBoxes[index0]; const PxU32 limit = box0.mMaxX; SIMD_OVERLAP_PRELOAD_BOX0 const PxU32 l = box0.mMinX; while(updatedDynamicBoxes[runningIndex++].mMinX<l); if(runningIndex<nbUpdated) { PxU32 index1 = runningIndex; while(updatedDynamicBoxes[index1].mMinX<=limit) { MBP_OVERLAP_TEST(updatedDynamicBoxes[index1]) { outputPair(*pairManager, index0, index1, inToOut_Dynamic, inToOut_Dynamic, objects); } index1++; } } index0++; } } static void doBipartiteBoxPruning(MBP_PairManager* PX_RESTRICT pairManager, const BIP_Input& input) { // ### crashes because the code expects the dynamic array to be sorted, but mDynamicBoxes is not // ### we should instead modify mNbUpdatedBoxes so that mNbUpdatedBoxes == mNbDynamicBoxes, and // ### then the proper sorting happens in CompleteBoxPruning (right?) const PxU32 nb0 = input.mNbUpdatedBoxes; const PxU32 nb1 = input.mNbStaticBoxes; const MBPEntry* PX_RESTRICT mObjects = input.mObjects; const MBP_AABB* PX_RESTRICT dynamicBoxes = input.mDynamicBoxes; const MBP_AABB* PX_RESTRICT staticBoxes = input.mStaticBoxes; const MBP_Index* PX_RESTRICT inToOut_Static = input.mInToOut_Static; const MBP_Index* PX_RESTRICT inToOut_Dynamic = input.mInToOut_Dynamic; PX_ASSERT(isSentinel(staticBoxes[nb1])); PX_ASSERT(isSentinel(staticBoxes[nb1+1])); // const MBP_AABB Saved = staticBoxes[nb1]; // const MBP_AABB Saved1 = staticBoxes[nb1+1]; // initSentinel(((MBP_AABB* PX_RESTRICT)staticBoxes)[nb1]); // initSentinel(((MBP_AABB* PX_RESTRICT)staticBoxes)[nb1+1]); // PxU32 index0 = 0; PxU32 runningIndex1 = 0; while(runningIndex1<nb1 && index0<nb0) { const MBP_AABB& box0 = dynamicBoxes[index0]; const PxU32 limit = box0.mMaxX; SIMD_OVERLAP_PRELOAD_BOX0 const PxU32 l = box0.mMinX; while(staticBoxes[runningIndex1].mMinX<l) runningIndex1++; PxU32 index1 = runningIndex1; while(staticBoxes[index1].mMinX<=limit) { MBP_OVERLAP_TEST(staticBoxes[index1]) { outputPair(*pairManager, index0, index1, inToOut_Dynamic, inToOut_Static, mObjects); } index1++; } index0++; } //// index0 = 0; PxU32 runningIndex0 = 0; while(runningIndex0<nb0 && index0<nb1) { const MBP_AABB& box0 = staticBoxes[index0]; const PxU32 limit = box0.mMaxX; SIMD_OVERLAP_PRELOAD_BOX0 const PxU32 l = box0.mMinX; while(dynamicBoxes[runningIndex0].mMinX<=l) runningIndex0++; PxU32 index1 = runningIndex0; while(dynamicBoxes[index1].mMinX<=limit) { MBP_OVERLAP_TEST(dynamicBoxes[index1]) { outputPair(*pairManager, index1, index0, inToOut_Dynamic, inToOut_Static, mObjects); } index1++; } index0++; } // MBP_FREE(inToOut_Dynamic); // ((MBP_AABB* PX_RESTRICT)staticBoxes)[nb1] = Saved; // ((MBP_AABB* PX_RESTRICT)staticBoxes)[nb1+1] = Saved1; } void Region::prepareOverlaps() { if(!mNbUpdatedBoxes && !mNeedsSorting) return; if(mNeedsSorting) { staticSort(); // PT: when a static object is added/removed/updated we need to compute the overlaps again // even if no dynamic box has been updated. The line below forces all dynamic boxes to be // sorted in PreparePruning() and tested for overlaps in BipartiteBoxPruning(). It would be // more efficient to: // a) skip the actual pruning in PreparePruning() (we only need to re-sort) // b) do BipartiteBoxPruning() with the new/modified boxes, not all of them // Well, not done yet. mNbUpdatedBoxes = mNbDynamicBoxes; mPrevNbUpdatedBoxes = 0; mNeedsSortingSleeping = true; #if PX_DEBUG for(PxU32 i=0;i<mNbDynamicBoxes;i++) { const PxU32 objectIndex = mInToOut_Dynamic[i]; mObjects[objectIndex].mUpdated = true; } #endif } preparePruning(mTmpBuffers); prepareBIPPruning(mTmpBuffers); } void Region::findOverlaps(MBP_PairManager& pairManager) { PX_ASSERT(!mNeedsSorting); if(!mNbUpdatedBoxes) return; if(mInput.mNeeded) doCompleteBoxPruning(&pairManager, mInput); if(mInput.mBIPInput.mNeeded) doBipartiteBoxPruning(&pairManager, mInput.mBIPInput); mNbUpdatedBoxes = 0; } /////////////////////////////////////////////////////////////////////////// MBP::MBP() : mNbRegions (0), mFirstFreeIndex (INVALID_ID), mFirstFreeIndexBP (INVALID_ID) #ifdef MBP_REGION_BOX_PRUNING ,mNbActiveRegions (0), mDirtyRegions (true) #endif { for(PxU32 i=0;i<MAX_NB_MBP+1;i++) mFirstFree[i] = INVALID_ID; } MBP::~MBP() { /* for(PxU32 i=1;i<MAX_NB_MBP;i++) { if(mHandles[i].GetNbEntries()) { const PxU32 SizeOfBundle = sizeof(RegionHandle)*i; // printf("Handles %d: %d\n", i, mHandles[i].GetNbEntries()*sizeof(PxU32)/SizeOfBundle); } }*/ reset(); } void MBP::freeBuffers() { mRemoved.empty(); mOutOfBoundsObjects.clear(); } void MBP::preallocate(PxU32 nbRegions, PxU32 nbObjects, PxU32 maxNbOverlaps) { if(nbRegions) { mRegions.clear(); mRegions.reserve(nbRegions); } if(nbObjects) { mMBP_Objects.clear(); mMBP_Objects.reserve(nbObjects); #ifdef USE_FULLY_INSIDE_FLAG mFullyInsideBitmap.init(nbObjects); mFullyInsideBitmap.clearAll(); #endif } mPairManager.reserveMemory(maxNbOverlaps); } PX_COMPILE_TIME_ASSERT(sizeof(BpHandle)<=sizeof(PxU32)); void MBP::addToOutOfBoundsArray(BpHandle id) { PX_ASSERT(mOutOfBoundsObjects.find(PxU32(id)) == mOutOfBoundsObjects.end()); mOutOfBoundsObjects.pushBack(PxU32(id)); } static void setupOverlapFlags(PxU32 nbRegions, RegionData* PX_RESTRICT regions) { for(PxU32 i=0;i<nbRegions;i++) regions[i].mOverlap = false; for(PxU32 i=0;i<nbRegions;i++) { if(!regions[i].mBP) continue; for(PxU32 j=i+1;j<nbRegions;j++) { if(!regions[j].mBP) continue; if(regions[i].mBox.intersectNoTouch(regions[j].mBox)) { regions[i].mOverlap = true; regions[j].mOverlap = true; } } } } //#define PRINT_STATS #ifdef PRINT_STATS #include <stdio.h> #endif // PT: TODO: // - We could try to keep bounds around all objects (for each region), and then test the new region's bounds against these instead of // testing all objects one by one. These new bounds (around all objects of a given region) would be delicate to maintain though. // - Just store these "fully inside flags" (i.e. objects) in a separate list? Or can we do MTF on objects? (probably not, else we // wouldn't have holes in the array due to removed objects) // PT: automatically populate new region with overlapping objects. // Brute-force version checking all existing objects, potentially optimized using "fully inside" flags. //#define FIRST_VERSION #ifdef FIRST_VERSION void MBP::populateNewRegion(const MBP_AABB& box, Region* addedRegion, PxU32 regionIndex) { const RegionData* PX_RESTRICT regions = mRegions.begin(); const PxU32 nbObjects = mMBP_Objects.size(); MBP_Object* PX_RESTRICT objects = mMBP_Objects.begin(); #ifdef PRINT_STATS PxU32 nbObjectsFound = 0; PxU32 nbObjectsTested = 0; #endif #ifdef USE_FULLY_INSIDE_FLAG const PxU32* fullyInsideFlags = mFullyInsideBitmap.getBits(); #endif PxU32 j=0; while(j<nbObjects) { #ifdef USE_FULLY_INSIDE_FLAG const PxU32 blockFlags = fullyInsideFlags[j>>5]; #ifdef HWSCAN if(blockFlags==0) //HWSCAN #else if(blockFlags==0xffffffff) #endif { j+=32; continue; } PxU32 nbToGo = PxMin(nbObjects - j, PxU32(32)); PxU32 mask = 1; while(nbToGo--) { MBP_Object& currentObject = objects[j]; // PT: if an object A is fully contained inside all the regions S it overlaps, we don't need to test it against the new region R. // The rationale is that even if R does overlap A, any new object B must touch S to overlap with A. So B would be added to S and // the (A,B) overlap would be detected in S, even if it's not detected in R. const PxU32 res = blockFlags & mask; PX_ASSERT((mFullyInsideBitmap.isSet(j) && res) || (!mFullyInsideBitmap.isSet(j) && !res)); mask+=mask; j++; #ifdef HWSCAN if(!res) //HWSCAN #else if(res) #endif continue; PX_ASSERT(!(currentObject.mFlags & MBP_REMOVED)); #else MBP_Object& currentObject = objects[j++]; if(currentObject.mFlags & MBP_REMOVED) continue; // PT: object is in the free list #endif #ifdef PRINT_STATS nbObjectsTested++; #endif MBP_AABB bounds; MBP_Handle mbpHandle; const PxU32 nbHandles = currentObject.mNbHandles; if(nbHandles) { RegionHandle* PX_RESTRICT handles = getHandles(currentObject, nbHandles); // PT: no need to test all regions since they should contain the same info. Just retrieve bounds from the first one. PxU32 i=0; // for(PxU32 i=0;i<nbHandles;i++) { const RegionHandle& h = handles[i]; const RegionData& currentRegion = regions[h.mInternalBPHandle]; PX_ASSERT(currentRegion.mBP); mbpHandle = currentRegion.mBP->retrieveBounds(bounds, h.mHandle); } } else { PX_ASSERT(mManager); // PT: if the object is out-of-bounds, we're out-of-luck. We don't have the object bounds, so we need to retrieve them // from the AABB manager - and then re-encode them. This is not very elegant or efficient, but it should rarely happen // so this is good enough for now. const PxBounds3 decodedBounds = mManager->getBPBounds(currentObject.mUserID); bounds.initFrom2(decodedBounds); mbpHandle = currentObject.mHandlesIndex; } if(bounds.intersect(box)) { // updateObject(mbpHandle, bounds); updateObjectAfterNewRegionAdded(mbpHandle, bounds, addedRegion, regionIndex); #ifdef PRINT_STATS nbObjectsFound++; #endif } #ifdef USE_FULLY_INSIDE_FLAG } #endif } #ifdef PRINT_STATS printf("Populating new region with %d objects (tested %d/%d object)\n", nbObjectsFound, nbObjectsTested, nbObjects); #endif } #endif // PT: version using lowestSetBit #define SECOND_VERSION #ifdef SECOND_VERSION /* PX_FORCE_INLINE PxU32 lowestSetBitUnsafe64(PxU64 v) { unsigned long retval; _BitScanForward64(&retval, v); return retval; }*/ void MBP::populateNewRegion(const MBP_AABB& box, Region* addedRegion, PxU32 regionIndex, const PxBounds3* boundsArray, const PxReal* contactDistance) { const RegionData* PX_RESTRICT regions = mRegions.begin(); const PxU32 nbObjects = mMBP_Objects.size(); PX_UNUSED(nbObjects); MBP_Object* PX_RESTRICT objects = mMBP_Objects.begin(); const PxU32* fullyInsideFlags = mFullyInsideBitmap.getBits(); // const PxU64* fullyInsideFlags = (const PxU64*)mFullyInsideBitmap.getBits(); if(!fullyInsideFlags) return; const PxU32 lastSetBit = mFullyInsideBitmap.findLast(); // const PxU64 lastSetBit = mFullyInsideBitmap.findLast(); #ifdef PRINT_STATS PxU32 nbObjectsFound = 0; PxU32 nbObjectsTested = 0; #endif // PT: ### bitmap iterator pattern for(PxU32 w = 0; w <= lastSetBit >> 5; ++w) // for(PxU64 w = 0; w <= lastSetBit >> 6; ++w) { for(PxU32 b = fullyInsideFlags[w]; b; b &= b-1) // for(PxU64 b = fullyInsideFlags[w]; b; b &= b-1) { const PxU32 index = PxU32(w<<5|PxLowestSetBit(b)); // const PxU64 index = (PxU64)(w<<6|::lowestSetBitUnsafe64(b)); PX_ASSERT(index<nbObjects); MBP_Object& currentObject = objects[index]; // PT: if an object A is fully contained inside all the regions S it overlaps, we don't need to test it against the new region R. // The rationale is that even if R does overlap A, any new object B must touch S to overlap with A. So B would be added to S and // the (A,B) overlap would be detected in S, even if it's not detected in R. PX_ASSERT(!(currentObject.mFlags & MBP_REMOVED)); #ifdef HWSCAN PX_ASSERT(mFullyInsideBitmap.isSet(index)); #else PX_ASSERT(!mFullyInsideBitmap.isSet(index)); #endif #ifdef PRINT_STATS nbObjectsTested++; #endif MBP_AABB bounds; MBP_Handle mbpHandle; const PxU32 nbHandles = currentObject.mNbHandles; if(nbHandles) { RegionHandle* PX_RESTRICT handles = getHandles(currentObject, nbHandles); // PT: no need to test all regions since they should contain the same info. Just retrieve bounds from the first one. PxU32 i=0; // for(PxU32 i=0;i<nbHandles;i++) { const RegionHandle& h = handles[i]; const RegionData& currentRegion = regions[h.mInternalBPHandle]; PX_ASSERT(currentRegion.mBP); mbpHandle = currentRegion.mBP->retrieveBounds(bounds, h.mHandle); } } else { // PT: if the object is out-of-bounds, we're out-of-luck. We don't have the object bounds, so we need to retrieve them // from the AABB manager - and then re-encode them. This is not very elegant or efficient, but it should rarely happen // so this is good enough for now. const PxBounds3 rawBounds = boundsArray[currentObject.mUserID]; PxVec3 c(contactDistance[currentObject.mUserID]); const PxBounds3 decodedBounds(rawBounds.minimum - c, rawBounds.maximum + c); bounds.initFrom2(decodedBounds); mbpHandle = currentObject.mHandlesIndex; } if(bounds.intersects(box)) { // updateObject(mbpHandle, bounds); updateObjectAfterNewRegionAdded(mbpHandle, bounds, addedRegion, regionIndex); #ifdef PRINT_STATS nbObjectsFound++; #endif } } } #ifdef PRINT_STATS printf("Populating new region with %d objects (tested %d/%d object)\n", nbObjectsFound, nbObjectsTested, nbObjects); #endif } #endif //#define THIRD_VERSION #ifdef THIRD_VERSION void MBP::populateNewRegion(const MBP_AABB& box, Region* addedRegion, PxU32 regionIndex) { const RegionData* PX_RESTRICT regions = mRegions.begin(); const PxU32 nbObjects = mMBP_Objects.size(); PX_UNUSED(nbObjects); MBP_Object* PX_RESTRICT objects = mMBP_Objects.begin(); const PxU32* fullyInsideFlags = mFullyInsideBitmap.getBits(); if(!fullyInsideFlags) return; #ifdef PRINT_STATS PxU32 nbObjectsFound = 0; PxU32 nbObjectsTested = 0; #endif PxBitMap bm; bm.importData(mFullyInsideBitmap.getSize(), (PxU32*)fullyInsideFlags); PxBitMap::Iterator it(bm); PxU32 index = it.getNext(); while(index != PxBitMap::Iterator::DONE) { PX_ASSERT(index<nbObjects); MBP_Object& currentObject = objects[index]; // PT: if an object A is fully contained inside all the regions S it overlaps, we don't need to test it against the new region R. // The rationale is that even if R does overlap A, any new object B must touch S to overlap with A. So B would be added to S and // the (A,B) overlap would be detected in S, even if it's not detected in R. PX_ASSERT(!(currentObject.mFlags & MBP_REMOVED)); #ifdef PRINT_STATS nbObjectsTested++; #endif MBP_AABB bounds; MBP_Handle mbpHandle; const PxU32 nbHandles = currentObject.mNbHandles; if(nbHandles) { RegionHandle* PX_RESTRICT handles = getHandles(currentObject, nbHandles); // PT: no need to test all regions since they should contain the same info. Just retrieve bounds from the first one. PxU32 i=0; // for(PxU32 i=0;i<nbHandles;i++) { const RegionHandle& h = handles[i]; const RegionData& currentRegion = regions[h.mInternalBPHandle]; PX_ASSERT(currentRegion.mBP); mbpHandle = currentRegion.mBP->retrieveBounds(bounds, h.mHandle); } } else { PX_ASSERT(mManager); // PT: if the object is out-of-bounds, we're out-of-luck. We don't have the object bounds, so we need to retrieve them // from the AABB manager - and then re-encode them. This is not very elegant or efficient, but it should rarely happen // so this is good enough for now. const PxBounds3 decodedBounds = mManager->getBPBounds(currentObject.mUserID); bounds.initFrom2(decodedBounds); mbpHandle = currentObject.mHandlesIndex; } if(bounds.intersect(box)) { // updateObject(mbpHandle, bounds); updateObjectAfterNewRegionAdded(mbpHandle, bounds, addedRegion, regionIndex); #ifdef PRINT_STATS nbObjectsFound++; #endif } index = it.getNext(); } #ifdef PRINT_STATS printf("Populating new region with %d objects (tested %d/%d object)\n", nbObjectsFound, nbObjectsTested, nbObjects); #endif } #endif PxU32 MBP::addRegion(const PxBroadPhaseRegion& region, bool populateRegion, const PxBounds3* boundsArray, const PxReal* contactDistance) { PxU32 regionHandle; RegionData* PX_RESTRICT buffer; if(mFirstFreeIndexBP!=INVALID_ID) { regionHandle = mFirstFreeIndexBP; buffer = mRegions.begin(); buffer += regionHandle; mFirstFreeIndexBP = PxU32(size_t(buffer->mUserData)); // PT: this is safe, we previously stored a PxU32 in there } else { if(mNbRegions>=MAX_NB_MBP) { PxGetFoundation().error(PxErrorCode::eOUT_OF_MEMORY, PX_FL, "MBP::addRegion: max number of regions reached."); return INVALID_ID; } regionHandle = mNbRegions++; buffer = reserveContainerMemory<RegionData>(mRegions, 1); } Region* newRegion = PX_NEW(Region); buffer->mBox.initFrom2(region.mBounds); buffer->mBP = newRegion; buffer->mUserData = region.mUserData; setupOverlapFlags(mNbRegions, mRegions.begin()); // PT: automatically populate new region with overlapping objects if(populateRegion) populateNewRegion(buffer->mBox, newRegion, regionHandle, boundsArray, contactDistance); #ifdef MBP_REGION_BOX_PRUNING mDirtyRegions = true; #endif return regionHandle; } // ### TODO: recycle regions, make sure objects are properly deleted/transferred, etc // ### TODO: what happens if we delete a zone then immediately add it back? Do objects get deleted? // ### TODO: in fact if we remove a zone but we keep the objects, what happens to their current overlaps? Are they kept or discarded? bool MBP::removeRegion(PxU32 handle) { if(handle>=mNbRegions) { PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "MBP::removeRegion: invalid handle."); return false; } RegionData* PX_RESTRICT region = mRegions.begin(); region += handle; Region* bp = region->mBP; if(!bp) { PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "MBP::removeRegion: invalid handle."); return false; } PxBounds3 empty; empty.setEmpty(); region->mBox.initFrom2(empty); { // We are going to remove the region but it can still contain objects. We need to update // those objects so that their handles and out-of-bounds status are modified. // // Unfortunately there is no way to iterate over active objects in a region, so we need // to iterate over the max amount of objects. ### TODO: optimize this const PxU32 maxNbObjects = bp->mMaxNbObjects; MBPEntry* PX_RESTRICT objects = bp->mObjects; for(PxU32 j=0;j<maxNbObjects;j++) { // The handle is INVALID_ID for non-active entries if(objects[j].mMBPHandle!=INVALID_ID) { // printf("Object to update!\n"); updateObjectAfterRegionRemoval(objects[j].mMBPHandle, bp); } } } PX_DELETE(bp); region->mBP = NULL; region->mUserData = reinterpret_cast<void*>(size_t(mFirstFreeIndexBP)); mFirstFreeIndexBP = handle; #ifdef MBP_REGION_BOX_PRUNING mDirtyRegions = true; #endif // A region has been removed so we need to update the overlap flags for all remaining regions // ### TODO: optimize this setupOverlapFlags(mNbRegions, mRegions.begin()); return true; } const Region* MBP::getRegion(PxU32 i) const { if(i>=mNbRegions) return NULL; const RegionData* PX_RESTRICT regions = mRegions.begin(); return regions[i].mBP; } #ifdef MBP_REGION_BOX_PRUNING void MBP::buildRegionData() { const PxU32 size = mNbRegions; PxU32 nbValidRegions = 0; if(size) { const RegionData* PX_RESTRICT regions = mRegions.begin(); // Gather valid regions PxU32 minPosList[MAX_NB_MBP]; for(PxU32 i=0;i<size;i++) { if(regions[i].mBP) minPosList[nbValidRegions++] = regions[i].mBox.mMinX; } // Sort them RadixSortBuffered RS; const PxU32* sorted = RS.Sort(minPosList, nbValidRegions, RADIX_UNSIGNED).GetRanks(); // Store sorted for(PxU32 i=0;i<nbValidRegions;i++) { const PxU32 sortedIndex = *sorted++; mSortedRegionBoxes[i] = regions[sortedIndex].mBox; mSortedRegionIndices[i] = sortedIndex; } } mNbActiveRegions = nbValidRegions; mDirtyRegions = false; } #endif PX_FORCE_INLINE RegionHandle* MBP::getHandles(MBP_Object& currentObject, PxU32 nbHandles) { RegionHandle* handles; if(nbHandles==1) handles = &currentObject.mHandle; else { const PxU32 handlesIndex = currentObject.mHandlesIndex; PxArray<PxU32>& c = mHandles[nbHandles]; handles = reinterpret_cast<RegionHandle*>(c.begin()+handlesIndex); } return handles; } void MBP::purgeHandles(MBP_Object* PX_RESTRICT object, PxU32 nbHandles) { if(nbHandles>1) { const PxU32 handlesIndex = object->mHandlesIndex; PxArray<PxU32>& c = mHandles[nbHandles]; PxU32* recycled = c.begin() + handlesIndex; *recycled = mFirstFree[nbHandles]; mFirstFree[nbHandles] = handlesIndex; } } void MBP::storeHandles(MBP_Object* PX_RESTRICT object, PxU32 nbHandles, const RegionHandle* PX_RESTRICT handles) { if(nbHandles==1) { object->mHandle = handles[0]; } else if(nbHandles) { PxArray<PxU32>& c = mHandles[nbHandles]; const PxU32 firstFree = mFirstFree[nbHandles]; PxU32* handlesMemory; if(firstFree!=INVALID_ID) { object->mHandlesIndex = firstFree; handlesMemory = c.begin() + firstFree; mFirstFree[nbHandles] = *handlesMemory; } else { const PxU32 handlesIndex = c.size(); object->mHandlesIndex = handlesIndex; handlesMemory = reserveContainerMemory<PxU32>(c, sizeof(RegionHandle)*nbHandles/sizeof(PxU32)); } PxMemCopy(handlesMemory, handles, sizeof(RegionHandle)*nbHandles); } } MBP_Handle MBP::addObject(const MBP_AABB& box, BpHandle userID, bool isStatic) { MBP_ObjectIndex objectIndex; MBP_Object* objectMemory; PxU32 flipFlop; if(1) { if(mFirstFreeIndex!=INVALID_ID) { objectIndex = mFirstFreeIndex; MBP_Object* objects = mMBP_Objects.begin(); objectMemory = &objects[objectIndex]; PX_ASSERT(!objectMemory->mNbHandles); mFirstFreeIndex = objectMemory->mHandlesIndex; flipFlop = PxU32(objectMemory->getFlipFlop()); } else { objectIndex = mMBP_Objects.size(); objectMemory = reserveContainerMemory<MBP_Object>(mMBP_Objects, 1); flipFlop = 0; } } else { // PT: must be possible to use the AABB-manager's ID directly. Something like this: objectIndex = userID; if(mMBP_Objects.capacity()<userID+1) { PxU32 newCap = mMBP_Objects.capacity() ? mMBP_Objects.capacity()*2 : 128; if(newCap<userID+1) newCap = userID+1; mMBP_Objects.reserve(newCap); } mMBP_Objects.forceSize_Unsafe(userID+1); objectMemory = &mMBP_Objects[userID]; flipFlop = 0; } const MBP_Handle MBPObjectHandle = encodeHandle(objectIndex, flipFlop, isStatic); // mMBP_Objects.Shrink(); PxU32 nbHandles = 0; #ifdef USE_FULLY_INSIDE_FLAG bool newObjectIsFullyInsideRegions = true; #endif const PxU32 nb = mNbRegions; const RegionData* PX_RESTRICT regions = mRegions.begin(); RegionHandle tmpHandles[MAX_NB_MBP+1]; for(PxU32 i=0;i<nb;i++) { #ifdef MBP_USE_NO_CMP_OVERLAP_3D if(intersect3D(regions[i].mBox, box)) #else if(regions[i].mBox.intersects(box)) #endif { #ifdef USE_FULLY_INSIDE_FLAG if(!box.isInside(regions[i].mBox)) newObjectIsFullyInsideRegions = false; #endif #ifdef MBP_USE_WORDS if(regions[i].mBP->mNbObjects==0xffff) PxGetFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL, "MBP::addObject: 64K objects in single region reached. Some collisions might be lost."); else #endif { RegionHandle& h = tmpHandles[nbHandles++]; h.mHandle = regions[i].mBP->addObject(box, MBPObjectHandle, isStatic); h.mInternalBPHandle = PxTo16(i); } } } storeHandles(objectMemory, nbHandles, tmpHandles); objectMemory->mNbHandles = PxTo16(nbHandles); PxU16 flags = 0; if(flipFlop) flags |= MBP_FLIP_FLOP; #ifdef USE_FULLY_INSIDE_FLAG if(nbHandles && newObjectIsFullyInsideRegions) setBit(mFullyInsideBitmap, objectIndex); else clearBit(mFullyInsideBitmap, objectIndex); #endif if(!nbHandles) { objectMemory->mHandlesIndex = MBPObjectHandle; addToOutOfBoundsArray(userID); } if(!isStatic) mUpdatedObjects.setBitChecked(objectIndex); // objectMemory->mUpdated = !isStatic; objectMemory->mFlags = flags; objectMemory->mUserID = userID; return MBPObjectHandle; } bool MBP::removeObject(MBP_Handle handle) { const MBP_ObjectIndex objectIndex = decodeHandle_Index(handle); MBP_Object* PX_RESTRICT objects = mMBP_Objects.begin(); MBP_Object& currentObject = objects[objectIndex]; const RegionData* PX_RESTRICT regions = mRegions.begin(); // Parse previously overlapping regions. If still overlapping, update object. Else remove from region. const PxU32 nbHandles = currentObject.mNbHandles; if(nbHandles) { RegionHandle* handles = getHandles(currentObject, nbHandles); for(PxU32 i=0;i<nbHandles;i++) { const RegionHandle& h = handles[i]; const RegionData& currentRegion = regions[h.mInternalBPHandle]; // if(currentRegion.mBP) PX_ASSERT(currentRegion.mBP); currentRegion.mBP->removeObject(h.mHandle); } purgeHandles(&currentObject, nbHandles); } currentObject.mNbHandles = 0; currentObject.mFlags |= MBP_REMOVED; currentObject.mHandlesIndex = mFirstFreeIndex; // if(!decodeHandle_IsStatic(handle)) // if(!currentObject.IsStatic()) mUpdatedObjects.setBitChecked(objectIndex); mFirstFreeIndex = objectIndex; mRemoved.setBitChecked(objectIndex); // PT: this is cleared each frame so it's not a replacement for the MBP_REMOVED flag #ifdef USE_FULLY_INSIDE_FLAG // PT: when removing an object we mark it as "fully inside" so that it is automatically // discarded in the "populateNewRegion" function, without the need for MBP_REMOVED. setBit(mFullyInsideBitmap, objectIndex); #endif return true; } static PX_FORCE_INLINE bool stillIntersects(PxU32 handle, PxU32& _nb, PxU32* PX_RESTRICT currentOverlaps) { const PxU32 nb = _nb; for(PxU32 i=0;i<nb;i++) { if(currentOverlaps[i]==handle) { _nb = nb-1; currentOverlaps[i] = currentOverlaps[nb-1]; return true; } } return false; } bool MBP::updateObject(MBP_Handle handle, const MBP_AABB& box) { const MBP_ObjectIndex objectIndex = decodeHandle_Index(handle); const PxU32 isStatic = decodeHandle_IsStatic(handle); const PxU32 nbRegions = mNbRegions; const RegionData* PX_RESTRICT regions = mRegions.begin(); MBP_Object* PX_RESTRICT objects = mMBP_Objects.begin(); MBP_Object& currentObject = objects[objectIndex]; // if(!isStatic) // ### removed for PhysX integration (bugfix) // if(!currentObject.IsStatic()) { mUpdatedObjects.setBitChecked(objectIndex); } // PT: fast path which should happen quite frequently. If: // - the object was touching a single region // - that region doesn't overlap other regions // - the object's new bounds is fully inside the region // then we know that the object can't touch another region, and we can use this fast-path that simply // updates one region and avoids iterating over/testing all the other ones. const PxU32 nbHandles = currentObject.mNbHandles; if(nbHandles==1) { const RegionHandle& h = currentObject.mHandle; const RegionData& currentRegion = regions[h.mInternalBPHandle]; if(!currentRegion.mOverlap && box.isInside(currentRegion.mBox)) { #ifdef USE_FULLY_INSIDE_FLAG // PT: it is possible that this flag is not set already when reaching this place: // - object touches 2 regions // - then in one frame: // - object moves fully inside one region // - the other region is removed // => nbHandles changes from 2 to 1 while MBP_FULLY_INSIDE is not set setBit(mFullyInsideBitmap, objectIndex); #endif currentRegion.mBP->updateObject(box, h.mHandle); return true; } } // Find regions overlapping object's new position #ifdef USE_FULLY_INSIDE_FLAG bool objectIsFullyInsideRegions = true; #endif PxU32 nbCurrentOverlaps = 0; PxU32 currentOverlaps[MAX_NB_MBP+1]; // PT: here, we may still parse regions which have been removed. But their boxes have been set to empty, // so nothing will happen. for(PxU32 i=0;i<nbRegions;i++) { #ifdef MBP_USE_NO_CMP_OVERLAP_3D if(intersect3D(regions[i].mBox, box)) #else if(regions[i].mBox.intersects(box)) #endif { #ifdef USE_FULLY_INSIDE_FLAG if(!box.isInside(regions[i].mBox)) objectIsFullyInsideRegions = false; #endif PX_ASSERT(nbCurrentOverlaps<MAX_NB_MBP); currentOverlaps[nbCurrentOverlaps++] = i; } } // New data for this frame PxU32 nbNewHandles = 0; RegionHandle newHandles[MAX_NB_MBP+1]; // Parse previously overlapping regions. If still overlapping, update object. Else remove from region. RegionHandle* handles = getHandles(currentObject, nbHandles); for(PxU32 i=0;i<nbHandles;i++) { const RegionHandle& h = handles[i]; PX_ASSERT(h.mInternalBPHandle<nbRegions); const RegionData& currentRegion = regions[h.mInternalBPHandle]; // We need to update object even if it then gets removed, as the removal // doesn't actually report lost pairs - and we need this. // currentRegion.mBP->UpdateObject(box, h.mHandle); if(stillIntersects(h.mInternalBPHandle, nbCurrentOverlaps, currentOverlaps)) { currentRegion.mBP->updateObject(box, h.mHandle); // Still collides => keep handle for this frame newHandles[nbNewHandles++] = h; } else { PX_ASSERT(!currentRegion.mBox.intersects(box)); // if(currentRegion.mBP) PX_ASSERT(currentRegion.mBP); currentRegion.mBP->removeObject(h.mHandle); } } // Add to new regions if needed for(PxU32 i=0;i<nbCurrentOverlaps;i++) { // if(currentOverlaps[i]==INVALID_ID) // continue; const PxU32 regionIndex = currentOverlaps[i]; const MBP_Index BPHandle = regions[regionIndex].mBP->addObject(box, handle, isStatic!=0); newHandles[nbNewHandles].mHandle = PxTo16(BPHandle); newHandles[nbNewHandles].mInternalBPHandle = PxTo16(regionIndex); nbNewHandles++; } if(nbHandles==nbNewHandles) { for(PxU32 i=0;i<nbNewHandles;i++) handles[i] = newHandles[i]; } else { purgeHandles(&currentObject, nbHandles); storeHandles(&currentObject, nbNewHandles, newHandles); } currentObject.mNbHandles = PxTo16(nbNewHandles); if(!nbNewHandles && nbHandles) { currentObject.mHandlesIndex = handle; addToOutOfBoundsArray(currentObject.mUserID); } // for(PxU32 i=0;i<nbNewHandles;i++) // currentObject.mHandles[i] = newHandles[i]; #ifdef USE_FULLY_INSIDE_FLAG if(objectIsFullyInsideRegions && nbNewHandles) setBit(mFullyInsideBitmap, objectIndex); else clearBit(mFullyInsideBitmap, objectIndex); #endif return true; } bool MBP::updateObjectAfterRegionRemoval(MBP_Handle handle, Region* removedRegion) { PX_ASSERT(removedRegion); const MBP_ObjectIndex objectIndex = decodeHandle_Index(handle); const PxU32 nbRegions = mNbRegions; PX_UNUSED(nbRegions); const RegionData* PX_RESTRICT regions = mRegions.begin(); MBP_Object* PX_RESTRICT objects = mMBP_Objects.begin(); MBP_Object& currentObject = objects[objectIndex]; // Mark the object as updated so that its pairs are considered for removal. If we don't do this an out-of-bounds object // resting on another non-out-of-bounds object still collides with that object and the memory associated with that pair // is not released. If we mark it as updated the pair is lost, and the out-of-bounds object falls through. // // However if we do this any pair involving the object will be marked as lost, even the ones involving other regions. // Typically the pair will then get lost one frame and get recreated the next frame. // mUpdatedObjects.setBitChecked(objectIndex); const PxU32 nbHandles = currentObject.mNbHandles; PX_ASSERT(nbHandles); // New handles PxU32 nbNewHandles = 0; RegionHandle newHandles[MAX_NB_MBP+1]; // Parse previously overlapping regions. Keep all of them except removed one. RegionHandle* handles = getHandles(currentObject, nbHandles); for(PxU32 i=0;i<nbHandles;i++) { const RegionHandle& h = handles[i]; PX_ASSERT(h.mInternalBPHandle<nbRegions); if(regions[h.mInternalBPHandle].mBP!=removedRegion) newHandles[nbNewHandles++] = h; } #ifdef USE_FULLY_INSIDE_FLAG // PT: in theory we should update the inside flag here but we don't do that for perf reasons. // - If the flag is set, it means the object was fully inside all its regions. Removing one of them does not invalidate the flag. // - If the flag is not set, removing one region might allow us to set the flag now. However not doing so simply makes the // populateNewRegion() function run a bit slower, it does not produce wrong results. This is only until concerned objects are // updated again anyway, so we live with this. #endif PX_ASSERT(nbNewHandles==nbHandles-1); purgeHandles(&currentObject, nbHandles); storeHandles(&currentObject, nbNewHandles, newHandles); currentObject.mNbHandles = PxTo16(nbNewHandles); if(!nbNewHandles) { currentObject.mHandlesIndex = handle; addToOutOfBoundsArray(currentObject.mUserID); #ifdef USE_FULLY_INSIDE_FLAG clearBit(mFullyInsideBitmap, objectIndex); #endif } return true; } bool MBP::updateObjectAfterNewRegionAdded(MBP_Handle handle, const MBP_AABB& box, Region* addedRegion, PxU32 regionIndex) { PX_ASSERT(addedRegion); const MBP_ObjectIndex objectIndex = decodeHandle_Index(handle); const PxU32 isStatic = decodeHandle_IsStatic(handle); MBP_Object* PX_RESTRICT objects = mMBP_Objects.begin(); MBP_Object& currentObject = objects[objectIndex]; // if(!isStatic) // ### removed for PhysX integration (bugfix) // if(!currentObject.IsStatic()) { mUpdatedObjects.setBitChecked(objectIndex); } // PT: here we know that we're touching one more region than before and we'll need to update the handles. // So there is no "fast path" in this case - well the whole function is a fast path if you want. // // We don't need to "find regions overlapping object's new position": we know it's going to be the // same as before, plus the newly added region ("addedRegion"). #ifdef USE_FULLY_INSIDE_FLAG // PT: we know that the object is not marked as "fully inside", otherwise this function would not have been called. #ifdef HWSCAN PX_ASSERT(mFullyInsideBitmap.isSet(objectIndex)); //HWSCAN #else PX_ASSERT(!mFullyInsideBitmap.isSet(objectIndex)); #endif #endif const PxU32 nbHandles = currentObject.mNbHandles; PxU32 nbNewHandles = 0; RegionHandle newHandles[MAX_NB_MBP+1]; // PT: get previously overlapping regions. We didn't actually move so we're still overlapping as before. // We just need to get the handles here. RegionHandle* handles = getHandles(currentObject, nbHandles); for(PxU32 i=0;i<nbHandles;i++) newHandles[nbNewHandles++] = handles[i]; // Add to new region { #if PX_DEBUG const RegionData* PX_RESTRICT regions = mRegions.begin(); const RegionData& currentRegion = regions[regionIndex]; PX_ASSERT(currentRegion.mBox.intersects(box)); #endif const MBP_Index BPHandle = addedRegion->addObject(box, handle, isStatic!=0); newHandles[nbNewHandles].mHandle = PxTo16(BPHandle); newHandles[nbNewHandles].mInternalBPHandle = PxTo16(regionIndex); nbNewHandles++; } // PT: we know that we have one more handle than before, no need to test purgeHandles(&currentObject, nbHandles); storeHandles(&currentObject, nbNewHandles, newHandles); currentObject.mNbHandles = PxTo16(nbNewHandles); // PT: we know that we have at least one handle (from the newly added region), so we can't be "out of bounds" here. PX_ASSERT(nbNewHandles); #ifdef USE_FULLY_INSIDE_FLAG // PT: we know that the object was not "fully inside" before, so even if it is fully inside the new region, it // will not be fully inside all of them => no need to change its fully inside flag // TODO: an exception to this would be the case where the object was out-of-bounds, and it's now fully inside the new region // => we could set the flag in that case. #endif return true; } bool MBP_PairManager::computeCreatedDeletedPairs(const MBP_Object* objects, BroadPhaseMBP* mbp, const BitArray& updated, const BitArray& removed) { // PT: parse all currently active pairs. The goal here is to generate the found/lost pairs, compared to previous frame. PxU32 i=0; PxU32 nbActivePairs = mNbActivePairs; while(i<nbActivePairs) { InternalPair& p = mActivePairs[i]; if(p.isNew()) { // New pair // PT: 'isNew' is set to true in the 'addPair' function. In this case the pair did not previously // exist in the structure, and thus we must report the new pair to the client code. // // PT: group-based filtering is not needed here, since it has already been done in 'addPair' const PxU32 id0 = p.getId0(); const PxU32 id1 = p.getId1(); PX_ASSERT(id0!=INVALID_ID); PX_ASSERT(id1!=INVALID_ID); const MBP_ObjectIndex index0 = decodeHandle_Index(id0); const MBP_ObjectIndex index1 = decodeHandle_Index(id1); const BpHandle object0 = objects[index0].mUserID; const BpHandle object1 = objects[index1].mUserID; mbp->mCreated.pushBack(BroadPhasePair(object0, object1)); p.clearNew(); p.clearUpdated(); i++; } else if(p.isUpdated()) { // Persistent pair // PT: this pair already existed in the structure, and has been found again this frame. Since // MBP reports "all pairs" each frame (as opposed to SAP), this happens quite often, for each // active persistent pair. p.clearUpdated(); i++; } else { // Lost pair // PT: if the pair is not new and not 'updated', it might be a lost (separated) pair. But this // is not always the case since we now handle "sleeping" objects directly within MBP. A pair // of sleeping objects does not generate an 'addPair' call, so it ends up in this codepath. // Nonetheless the sleeping pair should not be deleted. We can only delete pairs involving // objects that have been actually moved during the frame. This is the only case in which // a pair can indeed become 'lost'. const PxU32 id0 = p.getId0(); const PxU32 id1 = p.getId1(); PX_ASSERT(id0!=INVALID_ID); PX_ASSERT(id1!=INVALID_ID); const MBP_ObjectIndex index0 = decodeHandle_Index(id0); const MBP_ObjectIndex index1 = decodeHandle_Index(id1); // PT: if none of the involved objects have been updated, the pair is just sleeping: keep it and skip it. if(updated.isSetChecked(index0) || updated.isSetChecked(index1)) { // PT: by design (for better or worse) we do not report pairs to the client when // one of the involved objects has been deleted. The pair must still be deleted // from the MBP structure though. if(!removed.isSetChecked(index0) && !removed.isSetChecked(index1)) { // PT: doing the group-based filtering here is useless. The pair should not have // been added in the first place. const BpHandle object0 = objects[index0].mUserID; const BpHandle object1 = objects[index1].mUserID; mbp->mDeleted.pushBack(BroadPhasePair(object0, object1)); } const PxU32 hashValue = hash(id0, id1) & mMask; PairManagerData::removePair(id0, id1, hashValue, i); nbActivePairs--; } else i++; } } shrinkMemory(); return true; } void MBP::prepareOverlaps() { const PxU32 nb = mNbRegions; const RegionData* PX_RESTRICT regions = mRegions.begin(); for(PxU32 i=0;i<nb;i++) { if(regions[i].mBP) regions[i].mBP->prepareOverlaps(); } } void MBP::findOverlaps(const Bp::FilterGroup::Enum* PX_RESTRICT groups, const bool* PX_RESTRICT lut) { PxU32 nb = mNbRegions; const RegionData* PX_RESTRICT regions = mRegions.begin(); const MBP_Object* objects = mMBP_Objects.begin(); mPairManager.mObjects = objects; mPairManager.mGroups = groups; mPairManager.mLUT = lut; for(PxU32 i=0;i<nb;i++) { if(regions[i].mBP) regions[i].mBP->findOverlaps(mPairManager); } } PxU32 MBP::finalize(BroadPhaseMBP* mbp) { const MBP_Object* objects = mMBP_Objects.begin(); mPairManager.computeCreatedDeletedPairs(objects, mbp, mUpdatedObjects, mRemoved); mUpdatedObjects.clearAll(); return mPairManager.mNbActivePairs; } void MBP::reset() { PxU32 nb = mNbRegions; RegionData* PX_RESTRICT regions = mRegions.begin(); while(nb--) { // printf("%d objects in region\n", regions->mBP->mNbObjects); PX_DELETE(regions->mBP); regions++; } mNbRegions = 0; mFirstFreeIndex = INVALID_ID; mFirstFreeIndexBP = INVALID_ID; for(PxU32 i=0;i<MAX_NB_MBP+1;i++) { mHandles[i].clear(); mFirstFree[i] = INVALID_ID; } mRegions.clear(); mMBP_Objects.clear(); mPairManager.purge(); mUpdatedObjects.empty(); mRemoved.empty(); mOutOfBoundsObjects.clear(); #ifdef USE_FULLY_INSIDE_FLAG mFullyInsideBitmap.empty(); #endif } void MBP::shiftOrigin(const PxVec3& shift, const PxBounds3* boundsArray, const PxReal* contactDistances) { const PxU32 size = mNbRegions; RegionData* PX_RESTRICT regions = mRegions.begin(); // // regions // for(PxU32 i=0; i < size; i++) { if(regions[i].mBP) { MBP_AABB& box = regions[i].mBox; PxBounds3 bounds; box.decode(bounds); bounds.minimum -= shift; bounds.maximum -= shift; box.initFrom2(bounds); } } // // object bounds // const PxU32 nbObjects = mMBP_Objects.size(); MBP_Object* objects = mMBP_Objects.begin(); for(PxU32 i=0; i < nbObjects; i++) { MBP_Object& obj = objects[i]; const PxU32 nbHandles = obj.mNbHandles; if(nbHandles) { MBP_AABB bounds; const PxBounds3 rawBounds = boundsArray[obj.mUserID]; PxVec3 c(contactDistances[obj.mUserID]); const PxBounds3 decodedBounds(rawBounds.minimum - c, rawBounds.maximum + c); bounds.initFrom2(decodedBounds); RegionHandle* PX_RESTRICT handles = getHandles(obj, nbHandles); for(PxU32 j=0; j < nbHandles; j++) { const RegionHandle& h = handles[j]; const RegionData& currentRegion = regions[h.mInternalBPHandle]; PX_ASSERT(currentRegion.mBP); currentRegion.mBP->setBounds(h.mHandle, bounds); } } } } /////////////////////////////////////////////////////////////////////////////// // Below is the PhysX wrapper = link between AABBManager and MBP #define DEFAULT_CREATED_DELETED_PAIRS_CAPACITY 1024 BroadPhaseMBP::BroadPhaseMBP( PxU32 maxNbRegions, PxU32 maxNbBroadPhaseOverlaps, PxU32 maxNbStaticShapes, PxU32 maxNbDynamicShapes, PxU64 contextID) : mMapping (NULL), mCapacity (0), mGroups (NULL), mFilter (NULL), mContextID (contextID) { mMBP = PX_NEW(MBP); const PxU32 nbObjects = maxNbStaticShapes + maxNbDynamicShapes; mMBP->preallocate(maxNbRegions, nbObjects, maxNbBroadPhaseOverlaps); if(nbObjects) allocateMappingArray(nbObjects); mCreated.reserve(DEFAULT_CREATED_DELETED_PAIRS_CAPACITY); mDeleted.reserve(DEFAULT_CREATED_DELETED_PAIRS_CAPACITY); } BroadPhaseMBP::~BroadPhaseMBP() { PX_DELETE(mMBP); PX_FREE(mMapping); } void BroadPhaseMBP::allocateMappingArray(PxU32 newCapacity) { PX_ASSERT(newCapacity>mCapacity); MBP_Handle* newMapping = reinterpret_cast<MBP_Handle*>(PX_ALLOC(sizeof(MBP_Handle)*newCapacity, "MBP")); if(mCapacity) PxMemCopy(newMapping, mMapping, mCapacity*sizeof(MBP_Handle)); for(PxU32 i=mCapacity;i<newCapacity;i++) newMapping[i] = PX_INVALID_U32; PX_FREE(mMapping); mMapping = newMapping; mCapacity = newCapacity; } void BroadPhaseMBP::getCaps(PxBroadPhaseCaps& caps) const { caps.mMaxNbRegions = 256; } PxU32 BroadPhaseMBP::getNbRegions() const { // PT: we need to count active regions here, as we only keep track of the total number of // allocated regions internally - and some of which might have been removed. const PxU32 size = mMBP->mNbRegions; /* const RegionData* PX_RESTRICT regions = (const RegionData*)mMBP->mRegions.GetEntries(); PxU32 nbActiveRegions = 0; for(PxU32 i=0;i<size;i++) { if(regions[i].mBP) nbActiveRegions++; } return nbActiveRegions;*/ return size; } PxU32 BroadPhaseMBP::getRegions(PxBroadPhaseRegionInfo* userBuffer, PxU32 bufferSize, PxU32 startIndex) const { const PxU32 size = mMBP->mNbRegions; const RegionData* PX_RESTRICT regions = mMBP->mRegions.begin(); regions += startIndex; const PxU32 writeCount = PxMin(size, bufferSize); for(PxU32 i=0;i<writeCount;i++) { const MBP_AABB& box = regions[i].mBox; box.decode(userBuffer[i].mRegion.mBounds); if(regions[i].mBP) { PX_ASSERT(userBuffer[i].mRegion.mBounds.isValid()); userBuffer[i].mRegion.mUserData = regions[i].mUserData; userBuffer[i].mActive = true; userBuffer[i].mOverlap = regions[i].mOverlap!=0; userBuffer[i].mNbStaticObjects = regions[i].mBP->mNbStaticBoxes; userBuffer[i].mNbDynamicObjects = regions[i].mBP->mNbDynamicBoxes; } else { userBuffer[i].mRegion.mBounds.setEmpty(); userBuffer[i].mRegion.mUserData = NULL; userBuffer[i].mActive = false; userBuffer[i].mOverlap = false; userBuffer[i].mNbStaticObjects = 0; userBuffer[i].mNbDynamicObjects = 0; } } return writeCount; } PxU32 BroadPhaseMBP::addRegion(const PxBroadPhaseRegion& region, bool populateRegion, const PxBounds3* boundsArray, const PxReal* contactDistance) { return mMBP->addRegion(region, populateRegion, boundsArray, contactDistance); } bool BroadPhaseMBP::removeRegion(PxU32 handle) { return mMBP->removeRegion(handle); } void BroadPhaseMBP::update(PxcScratchAllocator* scratchAllocator, const BroadPhaseUpdateData& updateData, physx::PxBaseTask* /*continuation*/) { PX_CHECK_AND_RETURN(scratchAllocator, "BroadPhaseMBP::update - scratchAllocator must be non-NULL \n"); PX_UNUSED(scratchAllocator); setUpdateData(updateData); update(); postUpdate(); } static PX_FORCE_INLINE void computeMBPBounds(MBP_AABB& aabb, const PxBounds3* PX_RESTRICT boundsXYZ, const PxReal* PX_RESTRICT contactDistances, const BpHandle index) { const PxBounds3& b = boundsXYZ[index]; const Vec4V contactDistanceV = V4Load(contactDistances[index]); const Vec4V inflatedMinV = V4Sub(V4LoadU(&b.minimum.x), contactDistanceV); const Vec4V inflatedMaxV = V4Add(V4LoadU(&b.maximum.x), contactDistanceV); // PT: this one is safe because we allocated one more box in the array (in BoundsArray::initEntry) PX_ALIGN(16, PxVec4) boxMin; PX_ALIGN(16, PxVec4) boxMax; V4StoreA(inflatedMinV, &boxMin.x); V4StoreA(inflatedMaxV, &boxMax.x); const PxU32* PX_RESTRICT min = PxUnionCast<const PxU32*, const PxF32*>(&boxMin.x); const PxU32* PX_RESTRICT max = PxUnionCast<const PxU32*, const PxF32*>(&boxMax.x); //Avoid min=max by enforcing the rule that mins are even and maxs are odd. aabb.mMinX = IntegerAABB::encodeFloatMin(min[0])>>1; aabb.mMinY = IntegerAABB::encodeFloatMin(min[1])>>1; aabb.mMinZ = IntegerAABB::encodeFloatMin(min[2])>>1; aabb.mMaxX = (IntegerAABB::encodeFloatMax(max[0]) | (1<<2))>>1; aabb.mMaxY = (IntegerAABB::encodeFloatMax(max[1]) | (1<<2))>>1; aabb.mMaxZ = (IntegerAABB::encodeFloatMax(max[2]) | (1<<2))>>1; /* const IntegerAABB bounds(boundsXYZ[index], contactDistances[index]); aabb.mMinX = bounds.mMinMax[IntegerAABB::MIN_X]>>1; aabb.mMinY = bounds.mMinMax[IntegerAABB::MIN_Y]>>1; aabb.mMinZ = bounds.mMinMax[IntegerAABB::MIN_Z]>>1; aabb.mMaxX = bounds.mMinMax[IntegerAABB::MAX_X]>>1; aabb.mMaxY = bounds.mMinMax[IntegerAABB::MAX_Y]>>1; aabb.mMaxZ = bounds.mMinMax[IntegerAABB::MAX_Z]>>1;*/ /* aabb.mMinX &= ~1; aabb.mMinY &= ~1; aabb.mMinZ &= ~1; aabb.mMaxX |= 1; aabb.mMaxY |= 1; aabb.mMaxZ |= 1; */ /*#if PX_DEBUG PxBounds3 decodedBox; PxU32* bin = reinterpret_cast<PxU32*>(&decodedBox.minimum.x); bin[0] = decodeFloat(bounds.mMinMax[IntegerAABB::MIN_X]); bin[1] = decodeFloat(bounds.mMinMax[IntegerAABB::MIN_Y]); bin[2] = decodeFloat(bounds.mMinMax[IntegerAABB::MIN_Z]); bin[3] = decodeFloat(bounds.mMinMax[IntegerAABB::MAX_X]); bin[4] = decodeFloat(bounds.mMinMax[IntegerAABB::MAX_Y]); bin[5] = decodeFloat(bounds.mMinMax[IntegerAABB::MAX_Z]); MBP_AABB PrunerBox; PrunerBox.initFrom2(decodedBox); PX_ASSERT(PrunerBox.mMinX==aabb.mMinX); PX_ASSERT(PrunerBox.mMinY==aabb.mMinY); PX_ASSERT(PrunerBox.mMinZ==aabb.mMinZ); PX_ASSERT(PrunerBox.mMaxX==aabb.mMaxX); PX_ASSERT(PrunerBox.mMaxY==aabb.mMaxY); PX_ASSERT(PrunerBox.mMaxZ==aabb.mMaxZ); #endif*/ } void BroadPhaseMBP::removeObjects(const BroadPhaseUpdateData& updateData) { const BpHandle* PX_RESTRICT removed = updateData.getRemovedHandles(); if(removed) { PxU32 nbToGo = updateData.getNumRemovedHandles(); while(nbToGo--) { const BpHandle index = *removed++; PX_ASSERT(index+1<mCapacity); // PT: we allocated one more box on purpose const bool status = mMBP->removeObject(mMapping[index]); PX_ASSERT(status); PX_UNUSED(status); mMapping[index] = PX_INVALID_U32; } } } void BroadPhaseMBP::updateObjects(const BroadPhaseUpdateData& updateData) { const BpHandle* PX_RESTRICT updated = updateData.getUpdatedHandles(); if(updated) { const PxBounds3* PX_RESTRICT boundsXYZ = updateData.getAABBs(); PxU32 nbToGo = updateData.getNumUpdatedHandles(); while(nbToGo--) { const BpHandle index = *updated++; PX_ASSERT(index+1<mCapacity); // PT: we allocated one more box on purpose MBP_AABB aabb; computeMBPBounds(aabb, boundsXYZ, updateData.getContactDistance(), index); const bool status = mMBP->updateObject(mMapping[index], aabb); PX_ASSERT(status); PX_UNUSED(status); } } } void BroadPhaseMBP::addObjects(const BroadPhaseUpdateData& updateData) { const BpHandle* PX_RESTRICT created = updateData.getCreatedHandles(); if(created) { const PxBounds3* PX_RESTRICT boundsXYZ = updateData.getAABBs(); const Bp::FilterGroup::Enum* PX_RESTRICT groups = updateData.getGroups(); PxU32 nbToGo = updateData.getNumCreatedHandles(); while(nbToGo--) { const BpHandle index = *created++; PX_ASSERT(index+1<mCapacity); // PT: we allocated one more box on purpose MBP_AABB aabb; computeMBPBounds(aabb, boundsXYZ, updateData.getContactDistance(), index); const PxU32 group = groups[index]; const bool isStatic = group==FilterGroup::eSTATICS; mMapping[index] = mMBP->addObject(aabb, index, isStatic); } } } void BroadPhaseMBP::setUpdateData(const BroadPhaseUpdateData& updateData) { PX_PROFILE_ZONE("BroadPhaseMBP::setUpdateData", mContextID); // mMBP->setTransientBounds(updateData.getAABBs(), updateData.getContactDistance()); const PxU32 newCapacity = updateData.getCapacity(); if(newCapacity>mCapacity) allocateMappingArray(newCapacity); #if PX_CHECKED // PT: WARNING: this must be done after the allocateMappingArray call if(!BroadPhaseUpdateData::isValid(updateData, *this, false, mContextID)) { PX_CHECK_MSG(false, "Illegal BroadPhaseUpdateData \n"); return; } #endif mGroups = updateData.getGroups(); mFilter = &updateData.getFilter(); // ### TODO: handle groups inside MBP // ### TODO: get rid of AABB conversions removeObjects(updateData); addObjects(updateData); updateObjects(updateData); PX_ASSERT(!mCreated.size()); PX_ASSERT(!mDeleted.size()); mMBP->prepareOverlaps(); } void BroadPhaseMBP::update() { #ifdef CHECK_NB_OVERLAPS gNbOverlaps = 0; #endif mMBP->findOverlaps(mGroups, mFilter->getLUT()); #ifdef CHECK_NB_OVERLAPS printf("PPU: %d overlaps\n", gNbOverlaps); #endif } void BroadPhaseMBP::postUpdate() { { PxU32 Nb = mMBP->mNbRegions; const RegionData* PX_RESTRICT regions = mMBP->mRegions.begin(); for(PxU32 i=0;i<Nb;i++) { if(regions[i].mBP) regions[i].mBP->mNbUpdatedBoxes = 0; } } mMBP->finalize(this); } const BroadPhasePair* BroadPhaseMBP::getCreatedPairs(PxU32& nbCreatedPairs) const { nbCreatedPairs = mCreated.size(); return mCreated.begin(); } const BroadPhasePair* BroadPhaseMBP::getDeletedPairs(PxU32& nbDeletedPairs) const { nbDeletedPairs = mDeleted.size(); return mDeleted.begin(); } PxU32 BroadPhaseMBP::getNbOutOfBoundsObjects() const { return mMBP->mOutOfBoundsObjects.size(); } const PxU32* BroadPhaseMBP::getOutOfBoundsObjects() const { return mMBP->mOutOfBoundsObjects.begin(); } static void freeBuffer(PxArray<BroadPhasePair>& buffer) { const PxU32 size = buffer.size(); if(size>DEFAULT_CREATED_DELETED_PAIRS_CAPACITY) { buffer.reset(); buffer.reserve(DEFAULT_CREATED_DELETED_PAIRS_CAPACITY); } else { buffer.clear(); } } void BroadPhaseMBP::freeBuffers() { mMBP->freeBuffers(); freeBuffer(mCreated); freeBuffer(mDeleted); } #if PX_CHECKED bool BroadPhaseMBP::isValid(const BroadPhaseUpdateData& updateData) const { const BpHandle* created = updateData.getCreatedHandles(); if(created) { PxHashSet<BpHandle> set; PxU32 nbObjects = mMBP->mMBP_Objects.size(); const MBP_Object* PX_RESTRICT objects = mMBP->mMBP_Objects.begin(); while(nbObjects--) { if(!(objects->mFlags & MBP_REMOVED)) set.insert(objects->mUserID); objects++; } PxU32 nbToGo = updateData.getNumCreatedHandles(); while(nbToGo--) { const BpHandle index = *created++; PX_ASSERT(index<mCapacity); if(set.contains(index)) return false; // This object has been added already } } const BpHandle* updated = updateData.getUpdatedHandles(); if(updated) { PxU32 nbToGo = updateData.getNumUpdatedHandles(); while(nbToGo--) { const BpHandle index = *updated++; PX_ASSERT(index<mCapacity); if(mMapping[index]==PX_INVALID_U32) return false; // This object has been removed already, or never been added } } const BpHandle* removed = updateData.getRemovedHandles(); if(removed) { PxU32 nbToGo = updateData.getNumRemovedHandles(); while(nbToGo--) { const BpHandle index = *removed++; PX_ASSERT(index<mCapacity); if(mMapping[index]==PX_INVALID_U32) return false; // This object has been removed already, or never been added } } return true; } #endif void BroadPhaseMBP::shiftOrigin(const PxVec3& shift, const PxBounds3* boundsArray, const PxReal* contactDistances) { mMBP->shiftOrigin(shift, boundsArray, contactDistances); } PxU32 BroadPhaseMBP::getCurrentNbPairs() const { return mMBP->mPairManager.mNbActivePairs; }
98,031
C++
28.263284
205
0.711204
NVIDIA-Omniverse/PhysX/physx/source/lowlevelaabb/src/BpBroadPhaseMBP.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef BP_BROADPHASE_MBP_H #define BP_BROADPHASE_MBP_H #include "BpBroadPhase.h" #include "BpBroadPhaseMBPCommon.h" #include "foundation/PxArray.h" namespace internalMBP { class MBP; } namespace physx { namespace Bp { class BroadPhaseMBP : public BroadPhase { PX_NOCOPY(BroadPhaseMBP) public: BroadPhaseMBP( PxU32 maxNbRegions, PxU32 maxNbBroadPhaseOverlaps, PxU32 maxNbStaticShapes, PxU32 maxNbDynamicShapes, PxU64 contextID); virtual ~BroadPhaseMBP(); // BroadPhaseBase virtual void getCaps(PxBroadPhaseCaps& caps) const; virtual PxU32 getNbRegions() const; virtual PxU32 getRegions(PxBroadPhaseRegionInfo* userBuffer, PxU32 bufferSize, PxU32 startIndex=0) const; virtual PxU32 addRegion(const PxBroadPhaseRegion& region, bool populateRegion, const PxBounds3* boundsArray, const PxReal* contactDistance); virtual bool removeRegion(PxU32 handle); virtual PxU32 getNbOutOfBoundsObjects() const; virtual const PxU32* getOutOfBoundsObjects() const; //~BroadPhaseBase // BroadPhase virtual PxBroadPhaseType::Enum getType() const PX_OVERRIDE { return PxBroadPhaseType::eMBP; } virtual void release() PX_OVERRIDE { PX_DELETE_THIS; } virtual void update(PxcScratchAllocator* scratchAllocator, const BroadPhaseUpdateData& updateData, physx::PxBaseTask* continuation) PX_OVERRIDE; virtual void preBroadPhase(const Bp::BroadPhaseUpdateData&) PX_OVERRIDE {} virtual void fetchBroadPhaseResults() PX_OVERRIDE {} virtual const BroadPhasePair* getCreatedPairs(PxU32&) const PX_OVERRIDE; virtual const BroadPhasePair* getDeletedPairs(PxU32&) const PX_OVERRIDE; virtual void freeBuffers() PX_OVERRIDE; virtual void shiftOrigin(const PxVec3& shift, const PxBounds3* boundsArray, const PxReal* contactDistances) PX_OVERRIDE; #if PX_CHECKED virtual bool isValid(const BroadPhaseUpdateData& updateData) const PX_OVERRIDE; #endif //~BroadPhase internalMBP::MBP* mMBP; // PT: TODO: aggregate MBP_Handle* mMapping; PxU32 mCapacity; PxArray<BroadPhasePair> mCreated; PxArray<BroadPhasePair> mDeleted; const Bp::FilterGroup::Enum*mGroups; const BpFilter* mFilter; const PxU64 mContextID; void setUpdateData(const BroadPhaseUpdateData& updateData); void addObjects(const BroadPhaseUpdateData& updateData); void removeObjects(const BroadPhaseUpdateData& updateData); void updateObjects(const BroadPhaseUpdateData& updateData); void update(); void postUpdate(); void allocateMappingArray(PxU32 newCapacity); PxU32 getCurrentNbPairs() const; }; } //namespace Bp } //namespace physx #endif // BP_BROADPHASE_MBP_H
4,579
C
40.636363
151
0.727233
NVIDIA-Omniverse/PhysX/physx/source/lowlevelaabb/src/BpBroadPhaseSap.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/PxMath.h" #include "common/PxProfileZone.h" #include "CmRadixSort.h" #include "PxcScratchAllocator.h" #include "PxSceneDesc.h" #include "BpBroadPhaseSap.h" #include "BpBroadPhaseSapAux.h" #include "foundation/PxAllocator.h" //#include <stdio.h> // PT: reactivated this. Ran UTs with SAP as default BP and nothing broke. #define TEST_DELETED_PAIRS namespace physx { namespace Bp { #define DEFAULT_DATA_ARRAY_CAPACITY 1024 #define DEFAULT_CREATEDDELETED_PAIR_ARRAY_CAPACITY 64 #define DEFAULT_CREATEDDELETED1AXIS_CAPACITY 8192 template<typename T, PxU32 stackLimit> class TmpMem { public: PX_FORCE_INLINE TmpMem(PxU32 size): mPtr(size<=stackLimit?mStackBuf : PX_ALLOCATE(T, size, "char")) { } PX_FORCE_INLINE ~TmpMem() { if(mPtr!=mStackBuf) PX_FREE(mPtr); } PX_FORCE_INLINE T& operator*() const { return *mPtr; } PX_FORCE_INLINE T* operator->() const { return mPtr; } PX_FORCE_INLINE T& operator[](PxU32 index) { return mPtr[index]; } T* getBase() { return mPtr; } private: T mStackBuf[stackLimit]; T* mPtr; }; BroadPhaseSap::BroadPhaseSap( const PxU32 maxNbBroadPhaseOverlaps, const PxU32 maxNbStaticShapes, const PxU32 maxNbDynamicShapes, PxU64 contextID) : mScratchAllocator (NULL), mContextID (contextID) { for(PxU32 i=0;i<3;i++) mBatchUpdateTasks[i].setContextId(contextID); //Boxes mBoxesSize=0; mBoxesSizePrev=0; mBoxesCapacity = (((maxNbStaticShapes + maxNbDynamicShapes) + 31) & ~31); mBoxEndPts[0] = reinterpret_cast<SapBox1D*>(PX_ALLOC(ALIGN_SIZE_16((sizeof(SapBox1D)*mBoxesCapacity)), "SapBox1D")); mBoxEndPts[1] = reinterpret_cast<SapBox1D*>(PX_ALLOC(ALIGN_SIZE_16((sizeof(SapBox1D)*mBoxesCapacity)), "SapBox1D")); mBoxEndPts[2] = reinterpret_cast<SapBox1D*>(PX_ALLOC(ALIGN_SIZE_16((sizeof(SapBox1D)*mBoxesCapacity)), "SapBox1D")); for(PxU32 i=0; i<mBoxesCapacity;i++) { mBoxEndPts[0][i].mMinMax[0]=BP_INVALID_BP_HANDLE; mBoxEndPts[0][i].mMinMax[1]=BP_INVALID_BP_HANDLE; mBoxEndPts[1][i].mMinMax[0]=BP_INVALID_BP_HANDLE; mBoxEndPts[1][i].mMinMax[1]=BP_INVALID_BP_HANDLE; mBoxEndPts[2][i].mMinMax[0]=BP_INVALID_BP_HANDLE; mBoxEndPts[2][i].mMinMax[1]=BP_INVALID_BP_HANDLE; } //End points mEndPointsCapacity = mBoxesCapacity*2 + NUM_SENTINELS; mBoxesUpdated = reinterpret_cast<PxU8*>(PX_ALLOC(ALIGN_SIZE_16((sizeof(PxU8)*mBoxesCapacity)), "BoxesUpdated")); mSortedUpdateElements = reinterpret_cast<BpHandle*>(PX_ALLOC(ALIGN_SIZE_16((sizeof(BpHandle)*mEndPointsCapacity)), "SortedUpdateElements")); mActivityPockets = reinterpret_cast<BroadPhaseActivityPocket*>(PX_ALLOC(ALIGN_SIZE_16((sizeof(BroadPhaseActivityPocket)*mEndPointsCapacity)), "BroadPhaseActivityPocket")); mEndPointValues[0] = reinterpret_cast<ValType*>(PX_ALLOC(ALIGN_SIZE_16((sizeof(ValType)*(mEndPointsCapacity))), "ValType")); mEndPointValues[1] = reinterpret_cast<ValType*>(PX_ALLOC(ALIGN_SIZE_16((sizeof(ValType)*(mEndPointsCapacity))), "ValType")); mEndPointValues[2] = reinterpret_cast<ValType*>(PX_ALLOC(ALIGN_SIZE_16((sizeof(ValType)*(mEndPointsCapacity))), "ValType")); mEndPointDatas[0] = reinterpret_cast<BpHandle*>(PX_ALLOC(ALIGN_SIZE_16((sizeof(BpHandle)*(mEndPointsCapacity))), "BpHandle")); mEndPointDatas[1] = reinterpret_cast<BpHandle*>(PX_ALLOC(ALIGN_SIZE_16((sizeof(BpHandle)*(mEndPointsCapacity))), "BpHandle")); mEndPointDatas[2]= reinterpret_cast<BpHandle*>(PX_ALLOC(ALIGN_SIZE_16((sizeof(BpHandle)*(mEndPointsCapacity))), "BpHandle")); // Initialize sentinels setMinSentinel(mEndPointValues[0][0],mEndPointDatas[0][0]); setMaxSentinel(mEndPointValues[0][1],mEndPointDatas[0][1]); setMinSentinel(mEndPointValues[1][0],mEndPointDatas[1][0]); setMaxSentinel(mEndPointValues[1][1],mEndPointDatas[1][1]); setMinSentinel(mEndPointValues[2][0],mEndPointDatas[2][0]); setMaxSentinel(mEndPointValues[2][1],mEndPointDatas[2][1]); mListNext = reinterpret_cast<BpHandle*>(PX_ALLOC(ALIGN_SIZE_16((sizeof(BpHandle)*mEndPointsCapacity)), "NextList")); mListPrev = reinterpret_cast<BpHandle*>(PX_ALLOC(ALIGN_SIZE_16((sizeof(BpHandle)*mEndPointsCapacity)), "PrevList")); for(PxU32 a = 1; a < mEndPointsCapacity; ++a) { mListNext[a-1] = BpHandle(a); mListPrev[a] = BpHandle(a-1); } mListNext[mEndPointsCapacity-1] = BpHandle(mEndPointsCapacity-1); mListPrev[0] = 0; mDefaultPairsCapacity = PxMax(maxNbBroadPhaseOverlaps, PxU32(DEFAULT_CREATEDDELETED_PAIR_ARRAY_CAPACITY)); mPairs.init(mDefaultPairsCapacity); mBatchUpdateTasks[2].set(this,2); mBatchUpdateTasks[1].set(this,1); mBatchUpdateTasks[0].set(this,0); mBatchUpdateTasks[2].setPairs(NULL, 0); mBatchUpdateTasks[1].setPairs(NULL, 0); mBatchUpdateTasks[0].setPairs(NULL, 0); //Initialise data array. mData = NULL; mDataSize = 0; mDataCapacity = 0; //Initialise pairs arrays. mCreatedPairsArray = NULL; mCreatedPairsCapacity = 0; mCreatedPairsSize = 0; mDeletedPairsArray = NULL; mDeletedPairsCapacity = 0; mDeletedPairsSize = 0; mActualDeletedPairSize = 0; mFilter = NULL; } BroadPhaseSap::~BroadPhaseSap() { PX_FREE(mBoxEndPts[0]); PX_FREE(mBoxEndPts[1]); PX_FREE(mBoxEndPts[2]); PX_FREE(mEndPointValues[0]); PX_FREE(mEndPointValues[1]); PX_FREE(mEndPointValues[2]); PX_FREE(mEndPointDatas[0]); PX_FREE(mEndPointDatas[1]); PX_FREE(mEndPointDatas[2]); PX_FREE(mListNext); PX_FREE(mListPrev); PX_FREE(mSortedUpdateElements); PX_FREE(mActivityPockets); PX_FREE(mBoxesUpdated); mPairs.release(); mBatchUpdateTasks[0].setPairs(NULL, 0); mBatchUpdateTasks[1].setPairs(NULL, 0); mBatchUpdateTasks[2].setPairs(NULL, 0); mData = NULL; mCreatedPairsArray = NULL; mDeletedPairsArray = NULL; } void BroadPhaseSap::release() { this->~BroadPhaseSap(); PX_FREE_THIS; } void BroadPhaseSap::resizeBuffers() { const PxU32 defaultPairsCapacity = mDefaultPairsCapacity; mCreatedPairsArray = reinterpret_cast<BroadPhasePair*>(mScratchAllocator->alloc(sizeof(BroadPhasePair)*defaultPairsCapacity, true)); mCreatedPairsCapacity = defaultPairsCapacity; mCreatedPairsSize = 0; mDeletedPairsArray = reinterpret_cast<BroadPhasePair*>(mScratchAllocator->alloc(sizeof(BroadPhasePair)*defaultPairsCapacity, true)); mDeletedPairsCapacity = defaultPairsCapacity; mDeletedPairsSize = 0; mData = reinterpret_cast<BpHandle*>(mScratchAllocator->alloc(sizeof(BpHandle)*defaultPairsCapacity, true)); mDataCapacity = defaultPairsCapacity; mDataSize = 0; mBatchUpdateTasks[0].setPairs(reinterpret_cast<BroadPhasePair*>(mScratchAllocator->alloc(sizeof(BroadPhasePair)*defaultPairsCapacity, true)), defaultPairsCapacity); mBatchUpdateTasks[0].setNumPairs(0); mBatchUpdateTasks[1].setPairs(reinterpret_cast<BroadPhasePair*>(mScratchAllocator->alloc(sizeof(BroadPhasePair)*defaultPairsCapacity, true)), defaultPairsCapacity); mBatchUpdateTasks[1].setNumPairs(0); mBatchUpdateTasks[2].setPairs(reinterpret_cast<BroadPhasePair*>(mScratchAllocator->alloc(sizeof(BroadPhasePair)*defaultPairsCapacity, true)), defaultPairsCapacity); mBatchUpdateTasks[2].setNumPairs(0); } static void DeletePairsLists(const PxU32 numActualDeletedPairs, const BroadPhasePair* deletedPairsList, SapPairManager& pairManager) { // #### try batch removal here for(PxU32 i=0;i<numActualDeletedPairs;i++) { const BpHandle id0 = deletedPairsList[i].mVolA; const BpHandle id1 = deletedPairsList[i].mVolB; #if PX_DEBUG const bool Status = pairManager.RemovePair(id0, id1); PX_ASSERT(Status); #else pairManager.RemovePair(id0, id1); #endif } } void BroadPhaseSap::freeBuffers() { // PT: was: void BroadPhaseSap::deletePairs() #ifndef TEST_DELETED_PAIRS DeletePairsLists(mActualDeletedPairSize, mDeletedPairsArray, mPairs); #endif if(mCreatedPairsArray) mScratchAllocator->free(mCreatedPairsArray); mCreatedPairsArray = NULL; mCreatedPairsSize = 0; mCreatedPairsCapacity = 0; if(mDeletedPairsArray) mScratchAllocator->free(mDeletedPairsArray); mDeletedPairsArray = NULL; mDeletedPairsSize = 0; mDeletedPairsCapacity = 0; mActualDeletedPairSize = 0; if(mData) mScratchAllocator->free(mData); mData = NULL; mDataSize = 0; mDataCapacity = 0; if(mBatchUpdateTasks[0].getPairs()) mScratchAllocator->free(mBatchUpdateTasks[0].getPairs()); mBatchUpdateTasks[0].setPairs(NULL, 0); mBatchUpdateTasks[0].setNumPairs(0); if(mBatchUpdateTasks[1].getPairs()) mScratchAllocator->free(mBatchUpdateTasks[1].getPairs()); mBatchUpdateTasks[1].setPairs(NULL, 0); mBatchUpdateTasks[1].setNumPairs(0); if(mBatchUpdateTasks[2].getPairs()) mScratchAllocator->free(mBatchUpdateTasks[2].getPairs()); mBatchUpdateTasks[2].setPairs(NULL, 0); mBatchUpdateTasks[2].setNumPairs(0); //Shrink pair manager buffers it they are larger than needed but only let them shrink to a minimum size. mPairs.shrinkMemory(); } PX_FORCE_INLINE static void shiftCoord3(const ValType val0, const BpHandle handle0, const ValType val1, const BpHandle handle1, const ValType val2, const BpHandle handle2, const PxF32* shift, ValType& oVal0, ValType& oVal1, ValType& oVal2) { PX_ASSERT(!isSentinel(handle0)); PX_ASSERT(!isSentinel(handle1)); PX_ASSERT(!isSentinel(handle2)); PxF32 fl0, fl1, fl2; ValType* PX_RESTRICT bpVal0 = PxUnionCast<ValType*, PxF32*>(&fl0); ValType* PX_RESTRICT bpVal1 = PxUnionCast<ValType*, PxF32*>(&fl1); ValType* PX_RESTRICT bpVal2 = PxUnionCast<ValType*, PxF32*>(&fl2); *bpVal0 = decodeFloat(val0); *bpVal1 = decodeFloat(val1); *bpVal2 = decodeFloat(val2); fl0 -= shift[0]; fl1 -= shift[1]; fl2 -= shift[2]; oVal0 = (isMax(handle0)) ? (IntegerAABB::encodeFloatMax(*bpVal0) | 1) : ((IntegerAABB::encodeFloatMin(*bpVal0) + 1) & ~1); oVal1 = (isMax(handle1)) ? (IntegerAABB::encodeFloatMax(*bpVal1) | 1) : ((IntegerAABB::encodeFloatMin(*bpVal1) + 1) & ~1); oVal2 = (isMax(handle2)) ? (IntegerAABB::encodeFloatMax(*bpVal2) | 1) : ((IntegerAABB::encodeFloatMin(*bpVal2) + 1) & ~1); } PX_FORCE_INLINE static void testPostShiftOrder(const ValType prevVal, ValType& currVal, const BpHandle prevIsMax, const BpHandle currIsMax) { if(currVal < prevVal) { //The order has been broken by the lossy shift. //Correct currVal so that it is greater than prevVal. //If currVal is a box max then ensure that the box is of finite extent. const ValType shiftCorrection = (prevIsMax==currIsMax) ? ValType(0) : ValType(1); currVal = prevVal + shiftCorrection; } } void BroadPhaseSap::shiftOrigin(const PxVec3& shift, const PxBounds3* /*boundsArray*/, const PxReal* /*contactDistances*/) { // // Note: shifting the bounds does not necessarily preserve the order of the broadphase interval endpoints. The encoding of the float bounds is a lossy // operation, thus it is not possible to get the original float values back and shift them. The only goal of this method is to shift the endpoints // such that the order is preserved. The new intervals might no reflect the correct bounds! Since all bounds have been marked dirty, they will get // recomputed in the next frame anyway. This method makes sure that the next frame update can start from a valid configuration that is close to // the correct one and does not require too many swaps. // if(0==mBoxesSize) { return; } // // Note: processing all the axis at once improved performance on XBox 360 and PS3 because it allows to compensate for stalls // const PxF32 shiftAxis[3] = { shift.x, shift.y, shift.z }; const BpHandle* PX_RESTRICT epData0 = mEndPointDatas[0]; ValType* PX_RESTRICT epValues0 = mEndPointValues[0]; const BpHandle* PX_RESTRICT epData1 = mEndPointDatas[1]; ValType* PX_RESTRICT epValues1 = mEndPointValues[1]; const BpHandle* PX_RESTRICT epData2 = mEndPointDatas[2]; ValType* PX_RESTRICT epValues2 = mEndPointValues[2]; //Shift the first value in the array of sorted values. { //Shifted min (first element must be a min by definition). shiftCoord3(epValues0[1], epData0[1], epValues1[1], epData1[1], epValues2[1], epData2[1], shiftAxis, epValues0[1], epValues1[1], epValues2[1]); PX_ASSERT(!isMax(epData0[1])); PX_ASSERT(!isMax(epData1[1])); PX_ASSERT(!isMax(epData2[1])); } //Shift the remainder. ValType prevVal0 = epValues0[1]; BpHandle prevIsMax0 = isMax(epData0[1]); ValType prevVal1 = epValues1[1]; BpHandle prevIsMax1 = isMax(epData1[1]); ValType prevVal2 = epValues2[1]; BpHandle prevIsMax2 = isMax(epData2[1]); for(PxU32 i=2; i <= mBoxesSize*2; i++) { const BpHandle handle0 = epData0[i]; const BpHandle handle1 = epData1[i]; const BpHandle handle2 = epData2[i]; PX_ASSERT(!isSentinel(handle0)); PX_ASSERT(!isSentinel(handle1)); PX_ASSERT(!isSentinel(handle2)); //Get the relevant prev and curr values after the shift. const BpHandle currIsMax0 = isMax(epData0[i]); const BpHandle currIsMax1 = isMax(epData1[i]); const BpHandle currIsMax2 = isMax(epData2[i]); ValType currVal0, currVal1, currVal2; shiftCoord3(epValues0[i], handle0, epValues1[i], handle1, epValues2[i], handle2, shiftAxis, currVal0, currVal1, currVal2); //Test if the order has been preserved by the lossy shift. testPostShiftOrder(prevVal0, currVal0, prevIsMax0, currIsMax0); testPostShiftOrder(prevVal1, currVal1, prevIsMax1, currIsMax1); testPostShiftOrder(prevVal2, currVal2, prevIsMax2, currIsMax2); prevIsMax0 = currIsMax0; prevVal0 = currVal0; prevIsMax1 = currIsMax1; prevVal1 = currVal1; prevIsMax2 = currIsMax2; prevVal2 = currVal2; epValues0[i] = currVal0; epValues1[i] = currVal1; epValues2[i] = currVal2; } PX_ASSERT(isSelfOrdered()); } #if PX_CHECKED bool BroadPhaseSap::isValid(const BroadPhaseUpdateData& updateData) const { //Test that the created bounds haven't been added already (without first being removed). const BpHandle* created=updateData.getCreatedHandles(); const PxU32 numCreated=updateData.getNumCreatedHandles(); for(PxU32 i=0;i<numCreated;i++) { const BpHandle id=created[i]; //If id >=mBoxesCapacity then we need to resize to add this id, meaning that the id must be new. if(id<mBoxesCapacity) { for(PxU32 j=0;j<3;j++) { const SapBox1D& box1d=mBoxEndPts[j][id]; if(box1d.mMinMax[0] != BP_INVALID_BP_HANDLE && box1d.mMinMax[0] != PX_REMOVED_BP_HANDLE) return false; //This box has been added already but without being removed. if(box1d.mMinMax[1] != BP_INVALID_BP_HANDLE && box1d.mMinMax[1] != PX_REMOVED_BP_HANDLE) return false; //This box has been added already but without being removed. } } } //Test that the updated bounds have valid ids. const BpHandle* updated=updateData.getUpdatedHandles(); const PxU32 numUpdated=updateData.getNumUpdatedHandles(); for(PxU32 i=0;i<numUpdated;i++) { const BpHandle id = updated[i]; if(id >= mBoxesCapacity) return false; } //Test that the updated bounds have been been added without being removed. for(PxU32 i=0;i<numUpdated;i++) { const BpHandle id = updated[i]; for(PxU32 j=0;j<3;j++) { const SapBox1D& box1d=mBoxEndPts[j][id]; if(BP_INVALID_BP_HANDLE == box1d.mMinMax[0] || PX_REMOVED_BP_HANDLE == box1d.mMinMax[0]) return false; //This box has either not been added or has been removed if(BP_INVALID_BP_HANDLE == box1d.mMinMax[1] || PX_REMOVED_BP_HANDLE == box1d.mMinMax[1]) return false; //This box has either not been added or has been removed } } //Test that the removed bounds have valid ids. const BpHandle* removed=updateData.getRemovedHandles(); const PxU32 numRemoved=updateData.getNumRemovedHandles(); for(PxU32 i=0;i<numRemoved;i++) { const BpHandle id = removed[i]; if(id >= mBoxesCapacity) return false; } //Test that the removed bounds have already been added and haven't been removed. for(PxU32 i=0;i<numRemoved;i++) { const BpHandle id = removed[i]; for(PxU32 j=0;j<3;j++) { const SapBox1D& box1d=mBoxEndPts[j][id]; if(BP_INVALID_BP_HANDLE == box1d.mMinMax[0] || PX_REMOVED_BP_HANDLE == box1d.mMinMax[0]) return false; //This box has either not been added or has been removed if(BP_INVALID_BP_HANDLE == box1d.mMinMax[1] || PX_REMOVED_BP_HANDLE == box1d.mMinMax[1]) return false; //This box has either not been added or has been removed } } return true; } #endif void BroadPhaseSap::update(PxcScratchAllocator* scratchAllocator, const BroadPhaseUpdateData& updateData, PxBaseTask* /*continuation*/) { PX_CHECK_AND_RETURN(scratchAllocator, "BroadPhaseSap::update - scratchAllocator must be non-NULL \n"); if(setUpdateData(updateData)) { mScratchAllocator = scratchAllocator; resizeBuffers(); update(); postUpdate(); } } bool BroadPhaseSap::setUpdateData(const BroadPhaseUpdateData& updateData) { PX_PROFILE_ZONE("BroadPhaseSap::setUpdateData", mContextID); PX_ASSERT(0==mCreatedPairsSize); PX_ASSERT(0==mDeletedPairsSize); #if PX_CHECKED if(!BroadPhaseUpdateData::isValid(updateData, *this, false, mContextID)) { PX_CHECK_MSG(false, "Illegal BroadPhaseUpdateData \n"); mCreated = NULL; mCreatedSize = 0; mUpdated = NULL; mUpdatedSize = 0; mRemoved = NULL; mRemovedSize = 0; mBoxBoundsMinMax = updateData.getAABBs(); mBoxGroups = updateData.getGroups(); return false; } #endif //Copy across the data ptrs and sizes. mCreated = updateData.getCreatedHandles(); mCreatedSize = updateData.getNumCreatedHandles(); mUpdated = updateData.getUpdatedHandles(); mUpdatedSize = updateData.getNumUpdatedHandles(); mRemoved = updateData.getRemovedHandles(); mRemovedSize = updateData.getNumRemovedHandles(); mBoxBoundsMinMax = updateData.getAABBs(); mBoxGroups = updateData.getGroups(); mFilter = &updateData.getFilter(); mContactDistance = updateData.getContactDistance(); //Do we need more memory to store the positions of each box min/max in the arrays of sorted boxes min/max? if(updateData.getCapacity() > mBoxesCapacity) { const PxU32 oldBoxesCapacity=mBoxesCapacity; const PxU32 newBoxesCapacity=updateData.getCapacity(); SapBox1D* newBoxEndPts0 = reinterpret_cast<SapBox1D*>(PX_ALLOC(ALIGN_SIZE_16((sizeof(SapBox1D)*newBoxesCapacity)), "SapBox1D")); SapBox1D* newBoxEndPts1 = reinterpret_cast<SapBox1D*>(PX_ALLOC(ALIGN_SIZE_16((sizeof(SapBox1D)*newBoxesCapacity)), "SapBox1D")); SapBox1D* newBoxEndPts2 = reinterpret_cast<SapBox1D*>(PX_ALLOC(ALIGN_SIZE_16((sizeof(SapBox1D)*newBoxesCapacity)), "SapBox1D")); PxMemCopy(newBoxEndPts0, mBoxEndPts[0], sizeof(SapBox1D)*oldBoxesCapacity); PxMemCopy(newBoxEndPts1, mBoxEndPts[1], sizeof(SapBox1D)*oldBoxesCapacity); PxMemCopy(newBoxEndPts2, mBoxEndPts[2], sizeof(SapBox1D)*oldBoxesCapacity); for(PxU32 i=oldBoxesCapacity;i<newBoxesCapacity;i++) { newBoxEndPts0[i].mMinMax[0]=BP_INVALID_BP_HANDLE; newBoxEndPts0[i].mMinMax[1]=BP_INVALID_BP_HANDLE; newBoxEndPts1[i].mMinMax[0]=BP_INVALID_BP_HANDLE; newBoxEndPts1[i].mMinMax[1]=BP_INVALID_BP_HANDLE; newBoxEndPts2[i].mMinMax[0]=BP_INVALID_BP_HANDLE; newBoxEndPts2[i].mMinMax[1]=BP_INVALID_BP_HANDLE; } PX_FREE(mBoxEndPts[0]); PX_FREE(mBoxEndPts[1]); PX_FREE(mBoxEndPts[2]); mBoxEndPts[0] = newBoxEndPts0; mBoxEndPts[1] = newBoxEndPts1; mBoxEndPts[2] = newBoxEndPts2; mBoxesCapacity = newBoxesCapacity; PX_FREE(mBoxesUpdated); mBoxesUpdated = reinterpret_cast<PxU8*>(PX_ALLOC(ALIGN_SIZE_16((sizeof(PxU8))*newBoxesCapacity), "Updated Boxes")); } //Do we need more memory for the array of sorted boxes? if(2*(mBoxesSize + mCreatedSize) + NUM_SENTINELS > mEndPointsCapacity) { const PxU32 newEndPointsCapacity = 2*(mBoxesSize + mCreatedSize) + NUM_SENTINELS; ValType* newEndPointValuesX = reinterpret_cast<ValType*>(PX_ALLOC(ALIGN_SIZE_16((sizeof(ValType)*(newEndPointsCapacity))), "BPValType")); ValType* newEndPointValuesY = reinterpret_cast<ValType*>(PX_ALLOC(ALIGN_SIZE_16((sizeof(ValType)*(newEndPointsCapacity))), "BPValType")); ValType* newEndPointValuesZ = reinterpret_cast<ValType*>(PX_ALLOC(ALIGN_SIZE_16((sizeof(ValType)*(newEndPointsCapacity))), "BPValType")); BpHandle* newEndPointDatasX = reinterpret_cast<BpHandle*>(PX_ALLOC(ALIGN_SIZE_16((sizeof(BpHandle)*(newEndPointsCapacity))), "BpHandle")); BpHandle* newEndPointDatasY = reinterpret_cast<BpHandle*>(PX_ALLOC(ALIGN_SIZE_16((sizeof(BpHandle)*(newEndPointsCapacity))), "BpHandle")); BpHandle* newEndPointDatasZ = reinterpret_cast<BpHandle*>(PX_ALLOC(ALIGN_SIZE_16((sizeof(BpHandle)*(newEndPointsCapacity))), "BpHandle")); PX_FREE(mListNext); PX_FREE(mListPrev); mListNext = reinterpret_cast<BpHandle*>(PX_ALLOC(ALIGN_SIZE_16((sizeof(BpHandle)*newEndPointsCapacity)), "NextList")); mListPrev = reinterpret_cast<BpHandle*>(PX_ALLOC(ALIGN_SIZE_16((sizeof(BpHandle)*newEndPointsCapacity)), "Prev")); for(PxU32 a = 1; a < newEndPointsCapacity; ++a) { mListNext[a-1] = BpHandle(a); mListPrev[a] = BpHandle(a-1); } mListNext[newEndPointsCapacity-1] = BpHandle(newEndPointsCapacity-1); mListPrev[0] = 0; PxMemCopy(newEndPointValuesX, mEndPointValues[0], sizeof(ValType)*(mBoxesSize*2+NUM_SENTINELS)); PxMemCopy(newEndPointValuesY, mEndPointValues[1], sizeof(ValType)*(mBoxesSize*2+NUM_SENTINELS)); PxMemCopy(newEndPointValuesZ, mEndPointValues[2], sizeof(ValType)*(mBoxesSize*2+NUM_SENTINELS)); PxMemCopy(newEndPointDatasX, mEndPointDatas[0], sizeof(BpHandle)*(mBoxesSize*2+NUM_SENTINELS)); PxMemCopy(newEndPointDatasY, mEndPointDatas[1], sizeof(BpHandle)*(mBoxesSize*2+NUM_SENTINELS)); PxMemCopy(newEndPointDatasZ, mEndPointDatas[2], sizeof(BpHandle)*(mBoxesSize*2+NUM_SENTINELS)); PX_FREE(mEndPointValues[0]); PX_FREE(mEndPointValues[1]); PX_FREE(mEndPointValues[2]); PX_FREE(mEndPointDatas[0]); PX_FREE(mEndPointDatas[1]); PX_FREE(mEndPointDatas[2]); mEndPointValues[0] = newEndPointValuesX; mEndPointValues[1] = newEndPointValuesY; mEndPointValues[2] = newEndPointValuesZ; mEndPointDatas[0] = newEndPointDatasX; mEndPointDatas[1] = newEndPointDatasY; mEndPointDatas[2] = newEndPointDatasZ; mEndPointsCapacity = newEndPointsCapacity; PX_FREE(mSortedUpdateElements); PX_FREE(mActivityPockets); mSortedUpdateElements = reinterpret_cast<BpHandle*>(PX_ALLOC(ALIGN_SIZE_16((sizeof(BpHandle)*newEndPointsCapacity)), "SortedUpdateElements")); mActivityPockets = reinterpret_cast<BroadPhaseActivityPocket*>(PX_ALLOC(ALIGN_SIZE_16((sizeof(BroadPhaseActivityPocket)*newEndPointsCapacity)), "BroadPhaseActivityPocket")); } PxMemZero(mBoxesUpdated, sizeof(PxU8) * (mBoxesCapacity)); for(PxU32 a=0;a<mUpdatedSize;a++) { const PxU32 handle=mUpdated[a]; mBoxesUpdated[handle] = 1; } //Update the size of the sorted boxes arrays. PX_ASSERT(mBoxesSize==mBoxesSizePrev); mBoxesSize += mCreatedSize; PX_ASSERT(2*mBoxesSize+NUM_SENTINELS <= mEndPointsCapacity); return true; } void BroadPhaseSap::postUpdate() { PX_PROFILE_ZONE("BroadPhase.SapPostUpdate", mContextID); DataArray da(mData, mDataSize, mDataCapacity); for(PxU32 i=0;i<3;i++) { const PxU32 numPairs=mBatchUpdateTasks[i].getPairsSize(); const BroadPhasePair* PX_RESTRICT pairs=mBatchUpdateTasks[i].getPairs(); for(PxU32 j=0;j<numPairs;j++) { const BroadPhasePair& pair=pairs[j]; const BpHandle volA=pair.mVolA; const BpHandle volB=pair.mVolB; if(volA > volB) addPair(volA, volB, mScratchAllocator, mPairs, da); else removePair(volA, volB, mScratchAllocator, mPairs, da); } } mData = da.mData; mDataSize = da.mSize; mDataCapacity = da.mCapacity; batchCreate(); //Compute the lists of created and deleted overlap pairs. ComputeCreatedDeletedPairsLists( mBoxGroups, mData,mDataSize, mScratchAllocator, mCreatedPairsArray,mCreatedPairsSize,mCreatedPairsCapacity, mDeletedPairsArray,mDeletedPairsSize,mDeletedPairsCapacity, mActualDeletedPairSize, mPairs); #ifdef TEST_DELETED_PAIRS // PT: TODO: why did we move this to another place? DeletePairsLists(mActualDeletedPairSize, mDeletedPairsArray, mPairs); #endif PX_ASSERT(isSelfConsistent()); mBoxesSizePrev=mBoxesSize; } void BroadPhaseBatchUpdateWorkTask::runInternal() { mPairsSize=0; mSap->batchUpdate(mAxis, mPairs, mPairsSize, mPairsCapacity); } void BroadPhaseSap::update() { PX_PROFILE_ZONE("BroadPhase.SapUpdate", mContextID); batchRemove(); //Check that the overlap pairs per axis have been reset. PX_ASSERT(0==mBatchUpdateTasks[0].getPairsSize()); PX_ASSERT(0==mBatchUpdateTasks[1].getPairsSize()); PX_ASSERT(0==mBatchUpdateTasks[2].getPairsSize()); mBatchUpdateTasks[0].runInternal(); mBatchUpdateTasks[1].runInternal(); mBatchUpdateTasks[2].runInternal(); } /////////////////////////////////////////////////////////////////////////////// static PX_FORCE_INLINE void InsertEndPoints(const ValType* PX_RESTRICT newEndPointValues, const BpHandle* PX_RESTRICT newEndPointDatas, PxU32 numNewEndPoints, ValType* PX_RESTRICT endPointValues, BpHandle* PX_RESTRICT endPointDatas, const PxU32 numEndPoints, SapBox1D* PX_RESTRICT boxes) { ValType* const BaseEPValue = endPointValues; BpHandle* const BaseEPData = endPointDatas; const PxU32 OldSize = numEndPoints-NUM_SENTINELS; const PxU32 NewSize = numEndPoints-NUM_SENTINELS+numNewEndPoints; BaseEPValue[NewSize + 1] = BaseEPValue[OldSize + 1]; BaseEPData[NewSize + 1] = BaseEPData[OldSize + 1]; PxI32 WriteIdx = PxI32(NewSize); PxU32 CurrInsIdx = 0; //const SapValType* FirstValue = &BaseEPValue[0]; const BpHandle* FirstData = &BaseEPData[0]; const ValType* CurrentValue = &BaseEPValue[OldSize]; const BpHandle* CurrentData = &BaseEPData[OldSize]; while(CurrentData>=FirstData) { const ValType& SrcValue = *CurrentValue; const BpHandle& SrcData = *CurrentData; const ValType& InsValue = newEndPointValues[CurrInsIdx]; const BpHandle& InsData = newEndPointDatas[CurrInsIdx]; // We need to make sure we insert maxs before mins to handle exactly equal endpoints correctly const bool ShouldInsert = isMax(InsData) ? (SrcValue <= InsValue) : (SrcValue < InsValue); const ValType& MovedValue = ShouldInsert ? InsValue : SrcValue; const BpHandle& MovedData = ShouldInsert ? InsData : SrcData; BaseEPValue[WriteIdx] = MovedValue; BaseEPData[WriteIdx] = MovedData; boxes[getOwner(MovedData)].mMinMax[isMax(MovedData)] = BpHandle(WriteIdx--); if(ShouldInsert) { CurrInsIdx++; if(CurrInsIdx >= numNewEndPoints) break;//we just inserted the last endpoint } else { CurrentValue--; CurrentData--; } } } static PX_FORCE_INLINE bool Intersect3D(const ValType bDir1Min, const ValType bDir1Max, const ValType bDir2Min, const ValType bDir2Max, const ValType bDir3Min, const ValType bDir3Max, const ValType cDir1Min, const ValType cDir1Max, const ValType cDir2Min, const ValType cDir2Max, const ValType cDir3Min, const ValType cDir3Max) { return (bDir1Max >= cDir1Min && cDir1Max >= bDir1Min && bDir2Max >= cDir2Min && cDir2Max >= bDir2Min && bDir3Max >= cDir3Min && cDir3Max >= bDir3Min); } void BroadPhaseSap::ComputeSortedLists( //const PxVec4& globalMin, const PxVec4& /*globalMax*/, BpHandle* PX_RESTRICT newBoxIndicesSorted, PxU32& newBoxIndicesCount, BpHandle* PX_RESTRICT oldBoxIndicesSorted, PxU32& oldBoxIndicesCount, bool& allNewBoxesStatics, bool& allOldBoxesStatics) { //To help us gather the two lists of sorted boxes we are going to use a bitmap and our knowledge of the indices of the new boxes const PxU32 bitmapWordCount = ((mBoxesCapacity*2 + 31) & ~31)/32; TmpMem<PxU32, 8> bitMapMem(bitmapWordCount); PxU32* bitMapWords = bitMapMem.getBase(); PxMemSet(bitMapWords, 0, sizeof(PxU32)*bitmapWordCount); PxBitMap bitmap; bitmap.setWords(bitMapWords, bitmapWordCount); const PxU32 axis0 = 0; const PxU32 axis1 = 2; const PxU32 axis2 = 1; const PxU32 insertAABBStart = 0; const PxU32 insertAABBEnd = mCreatedSize; const BpHandle* PX_RESTRICT createdAABBs = mCreated; SapBox1D** PX_RESTRICT asapBoxes = mBoxEndPts; const Bp::FilterGroup::Enum* PX_RESTRICT asapBoxGroupIds = mBoxGroups; BpHandle* PX_RESTRICT asapEndPointDatas = mEndPointDatas[axis0]; const PxU32 numSortedEndPoints = mBoxesSize*2 + NUM_SENTINELS; //Set the bitmap for new box ids and compute the aabb (of the sorted handles/indices and not of the values) that bounds all new boxes. PxU32 globalAABBMinX = PX_MAX_U32; PxU32 globalAABBMinY = PX_MAX_U32; PxU32 globalAABBMinZ = PX_MAX_U32; PxU32 globalAABBMaxX = 0; PxU32 globalAABBMaxY = 0; PxU32 globalAABBMaxZ = 0; // PT: TODO: compute the global bounds from the initial data, more cache/SIMD-friendly // => maybe doesn't work, we're dealing with indices here not actual float values IIRC for(PxU32 i=insertAABBStart;i<insertAABBEnd;i++) { const PxU32 boxId = createdAABBs[i]; bitmap.set(boxId); globalAABBMinX = PxMin(globalAABBMinX, PxU32(asapBoxes[axis0][boxId].mMinMax[0])); globalAABBMaxX = PxMax(globalAABBMaxX, PxU32(asapBoxes[axis0][boxId].mMinMax[1])); globalAABBMinY = PxMin(globalAABBMinY, PxU32(asapBoxes[axis1][boxId].mMinMax[0])); globalAABBMaxY = PxMax(globalAABBMaxY, PxU32(asapBoxes[axis1][boxId].mMinMax[1])); globalAABBMinZ = PxMin(globalAABBMinZ, PxU32(asapBoxes[axis2][boxId].mMinMax[0])); globalAABBMaxZ = PxMax(globalAABBMaxZ, PxU32(asapBoxes[axis2][boxId].mMinMax[1])); } /* PxU32 _globalAABBMinX = IntegerAABB::encodeFloatMin(PxUnionCast<PxU32, PxF32>(globalMin.x)); PxU32 _globalAABBMinY = IntegerAABB::encodeFloatMin(PxUnionCast<PxU32, PxF32>(globalMin.y)); PxU32 _globalAABBMinZ = IntegerAABB::encodeFloatMin(PxUnionCast<PxU32, PxF32>(globalMin.z)); PxU32 _globalAABBMaxX = IntegerAABB::encodeFloatMin(PxUnionCast<PxU32, PxF32>(globalMax.x)); PxU32 _globalAABBMaxY = IntegerAABB::encodeFloatMin(PxUnionCast<PxU32, PxF32>(globalMax.y)); PxU32 _globalAABBMaxZ = IntegerAABB::encodeFloatMin(PxUnionCast<PxU32, PxF32>(globalMax.z)); (void)_globalAABBMinX;*/ PxU32 oldStaticCount=0; PxU32 newStaticCount=0; //Assign the sorted end pts to the appropriate arrays. // PT: TODO: we could just do this loop before inserting the new endpts, i.e. no need for a bitmap etc // => but we need to insert the pts first to have valid mMinMax data in the above loop. // => but why do we iterate over endpoints and then skip the mins? Why not iterate directly over boxes? ====> probably to get sorted results // => we could then just use the regular bounds data etc for(PxU32 i=1;i<numSortedEndPoints-1;i++) { //Make sure we haven't encountered a sentinel - //they should only be at each end of the array. PX_ASSERT(!isSentinel(asapEndPointDatas[i])); PX_ASSERT(!isSentinel(asapEndPointDatas[i])); PX_ASSERT(!isSentinel(asapEndPointDatas[i])); if(!isMax(asapEndPointDatas[i])) { const BpHandle boxId = BpHandle(getOwner(asapEndPointDatas[i])); if(!bitmap.test(boxId)) { if(Intersect3D( globalAABBMinX, globalAABBMaxX, globalAABBMinY, globalAABBMaxY, globalAABBMinZ, globalAABBMaxZ, asapBoxes[axis0][boxId].mMinMax[0], asapBoxes[axis0][boxId].mMinMax[1], asapBoxes[axis1][boxId].mMinMax[0], asapBoxes[axis1][boxId].mMinMax[1], asapBoxes[axis2][boxId].mMinMax[0], asapBoxes[axis2][boxId].mMinMax[1])) { oldBoxIndicesSorted[oldBoxIndicesCount++] = boxId; oldStaticCount += asapBoxGroupIds[boxId]==FilterGroup::eSTATICS ? 0 : 1; } } else { newBoxIndicesSorted[newBoxIndicesCount++] = boxId; newStaticCount += asapBoxGroupIds[boxId]==FilterGroup::eSTATICS ? 0 : 1; } } } allOldBoxesStatics = oldStaticCount ? false : true; allNewBoxesStatics = newStaticCount ? false : true; //Make sure that we've found the correct number of boxes. PX_ASSERT(newBoxIndicesCount==(insertAABBEnd-insertAABBStart)); PX_ASSERT(oldBoxIndicesCount<=((numSortedEndPoints-NUM_SENTINELS)/2)); } //#include "foundation/PxVecMath.h" //using namespace aos; void BroadPhaseSap::batchCreate() { if(!mCreatedSize) return; // Early-exit if no object has been created // PxVec4 globalMin, globalMax; { //Number of newly-created boxes (still to be sorted) and number of old boxes (already sorted). const PxU32 numNewBoxes = mCreatedSize; //const PxU32 numOldBoxes = mBoxesSize - mCreatedSize; //Array of newly-created box indices. const BpHandle* PX_RESTRICT created = mCreated; //Arrays of min and max coords for each box for each axis. const PxBounds3* PX_RESTRICT minMax = mBoxBoundsMinMax; /* { PxU32 nbToGo = numNewBoxes-1; const PxU32 lastBoxId = created[nbToGo]; const PxVec3 lastMin = minMax[lastBoxId].minimum; const PxVec3 lastMax = minMax[lastBoxId].maximum; PxVec4 minI(lastMin.x, lastMin.y, lastMin.z, 0.0f); PxVec4 maxI(lastMax.x, lastMax.y, lastMax.z, 0.0f); const Vec4V dist4 = V4Load(mContactDistance[lastBoxId]); Vec4V resultMinV = V4Sub(V4LoadU(&lastMin.x), dist4); Vec4V resultMaxV = V4Add(V4LoadU(&lastMax.x), dist4); const BpHandle* src = created; while(nbToGo--) { const PxU32 boxId = *src++; const Vec4V d4 = V4Load(mContactDistance[boxId]); resultMinV = V4Min(resultMinV, V4Sub(V4LoadU(&minMax[boxId].minimum.x), d4)); resultMaxV = V4Max(resultMaxV, V4Add(V4LoadU(&minMax[boxId].maximum.x), d4)); } V4StoreU(resultMinV, &globalMin.x); V4StoreU(resultMaxV, &globalMax.x); }*/ //Insert new boxes into sorted endpoints lists. { const PxU32 numEndPoints = numNewBoxes*2; TmpMem<ValType, 32> nepsv(numEndPoints), bv(numEndPoints); ValType* newEPSortedValues = nepsv.getBase(); ValType* bufferValues = bv.getBase(); // PT: TODO: use the scratch allocator Cm::RadixSortBuffered RS; for(PxU32 Axis=0;Axis<3;Axis++) { for(PxU32 i=0;i<numNewBoxes;i++) { const PxU32 boxIndex = PxU32(created[i]); PX_ASSERT(mBoxEndPts[Axis][boxIndex].mMinMax[0]==BP_INVALID_BP_HANDLE || mBoxEndPts[Axis][boxIndex].mMinMax[0]==PX_REMOVED_BP_HANDLE); PX_ASSERT(mBoxEndPts[Axis][boxIndex].mMinMax[1]==BP_INVALID_BP_HANDLE || mBoxEndPts[Axis][boxIndex].mMinMax[1]==PX_REMOVED_BP_HANDLE); // const ValType minValue = minMax[boxIndex].getMin(Axis); // const ValType maxValue = minMax[boxIndex].getMax(Axis); const PxReal contactDistance = mContactDistance[boxIndex]; newEPSortedValues[i*2+0] = encodeMin(minMax[boxIndex], Axis, contactDistance); newEPSortedValues[i*2+1] = encodeMax(minMax[boxIndex], Axis, contactDistance); } // Sort endpoints backwards BpHandle* bufferDatas; { RS.invalidateRanks(); // PT: there's no coherence between axes const PxU32* Sorted = RS.Sort(newEPSortedValues, numEndPoints, Cm::RADIX_UNSIGNED).GetRanks(); bufferDatas = RS.GetRecyclable(); // PT: TODO: with two passes here we could reuse the "newEPSortedValues" buffer and drop "bufferValues" for(PxU32 i=0;i<numEndPoints;i++) { const PxU32 sortedIndex = Sorted[numEndPoints-1-i]; bufferValues[i] = newEPSortedValues[sortedIndex]; // PT: compute buffer data on-the-fly, store in recyclable buffer const PxU32 boxIndex = PxU32(created[sortedIndex>>1]); bufferDatas[i] = setData(boxIndex, (sortedIndex&1)!=0); } } InsertEndPoints(bufferValues, bufferDatas, numEndPoints, mEndPointValues[Axis], mEndPointDatas[Axis], 2*(mBoxesSize-mCreatedSize)+NUM_SENTINELS, mBoxEndPts[Axis]); } } //Some debug tests. #if PX_DEBUG { for(PxU32 i=0;i<numNewBoxes;i++) { PxU32 BoxIndex = PxU32(created[i]); PX_ASSERT(mBoxEndPts[0][BoxIndex].mMinMax[0]!=BP_INVALID_BP_HANDLE && mBoxEndPts[0][BoxIndex].mMinMax[0]!=PX_REMOVED_BP_HANDLE); PX_ASSERT(mBoxEndPts[0][BoxIndex].mMinMax[1]!=BP_INVALID_BP_HANDLE && mBoxEndPts[0][BoxIndex].mMinMax[1]!=PX_REMOVED_BP_HANDLE); PX_ASSERT(mBoxEndPts[1][BoxIndex].mMinMax[0]!=BP_INVALID_BP_HANDLE && mBoxEndPts[1][BoxIndex].mMinMax[0]!=PX_REMOVED_BP_HANDLE); PX_ASSERT(mBoxEndPts[1][BoxIndex].mMinMax[1]!=BP_INVALID_BP_HANDLE && mBoxEndPts[1][BoxIndex].mMinMax[1]!=PX_REMOVED_BP_HANDLE); PX_ASSERT(mBoxEndPts[2][BoxIndex].mMinMax[0]!=BP_INVALID_BP_HANDLE && mBoxEndPts[2][BoxIndex].mMinMax[0]!=PX_REMOVED_BP_HANDLE); PX_ASSERT(mBoxEndPts[2][BoxIndex].mMinMax[1]!=BP_INVALID_BP_HANDLE && mBoxEndPts[2][BoxIndex].mMinMax[1]!=PX_REMOVED_BP_HANDLE); } for(PxU32 i=0;i<mBoxesSize*2+1;i++) { PX_ASSERT(mEndPointValues[0][i] <= mEndPointValues[0][i+1]); PX_ASSERT(mEndPointValues[1][i] <= mEndPointValues[1][i+1]); PX_ASSERT(mEndPointValues[2][i] <= mEndPointValues[2][i+1]); } } #endif } // Perform box-pruning { // PT: TODO: use the scratch allocator in TmpMem //Number of newly-created boxes (still to be sorted) and number of old boxes (already sorted). const PxU32 numNewBoxes = mCreatedSize; const PxU32 numOldBoxes = mBoxesSize - mCreatedSize; //Gather two list of sorted boxes along the preferred axis direction: //one list for new boxes and one list for existing boxes. //Only gather the existing boxes that overlap the bounding box of //all new boxes. TmpMem<BpHandle, 8> oldBoxesIndicesSortedMem(numOldBoxes); TmpMem<BpHandle, 8> newBoxesIndicesSortedMem(numNewBoxes); BpHandle* oldBoxesIndicesSorted = oldBoxesIndicesSortedMem.getBase(); BpHandle* newBoxesIndicesSorted = newBoxesIndicesSortedMem.getBase(); PxU32 oldBoxCount = 0; PxU32 newBoxCount = 0; bool allNewBoxesStatics = false; bool allOldBoxesStatics = false; // PT: TODO: separate static/dynamic to speed things up, compute "minPosList" etc at the same time // PT: TODO: isn't "newBoxesIndicesSorted" the same as what we already computed in batchCreate() ? //Ready to gather the two lists now. ComputeSortedLists(/*globalMin, globalMax,*/ newBoxesIndicesSorted, newBoxCount, oldBoxesIndicesSorted, oldBoxCount, allNewBoxesStatics, allOldBoxesStatics); //Intersect new boxes with new boxes and new boxes with existing boxes. if(!allNewBoxesStatics || !allOldBoxesStatics) { const AuxData data0(newBoxCount, mBoxEndPts, newBoxesIndicesSorted, mBoxGroups); if(!allNewBoxesStatics) performBoxPruningNewNew(&data0, mScratchAllocator, mFilter->getLUT(), mPairs, mData, mDataSize, mDataCapacity); // the old boxes are not the first ones in the array if(numOldBoxes) { if(oldBoxCount) { const AuxData data1(oldBoxCount, mBoxEndPts, oldBoxesIndicesSorted, mBoxGroups); performBoxPruningNewOld(&data0, &data1, mScratchAllocator, mFilter->getLUT(), mPairs, mData, mDataSize, mDataCapacity); } } } } } /////////////////////////////////////////////////////////////////////////////// void BroadPhaseSap::batchRemove() { if(!mRemovedSize) return; // Early-exit if no object has been removed //The box count is incremented when boxes are added to the create list but these boxes //haven't yet been added to the pair manager or the sorted axis lists. We need to //pretend that the box count is the value it was when the bp was last updated. //Then, at the end, we need to set the box count to the number that includes the boxes //in the create list and subtract off the boxes that have been removed. PxU32 currBoxesSize=mBoxesSize; mBoxesSize=mBoxesSizePrev; for(PxU32 Axis=0;Axis<3;Axis++) { ValType* const BaseEPValue = mEndPointValues[Axis]; BpHandle* const BaseEPData = mEndPointDatas[Axis]; PxU32 MinMinIndex = PX_MAX_U32; for(PxU32 i=0;i<mRemovedSize;i++) { PX_ASSERT(mRemoved[i]<mBoxesCapacity); const PxU32 MinIndex = mBoxEndPts[Axis][mRemoved[i]].mMinMax[0]; PX_ASSERT(MinIndex<mBoxesCapacity*2+2); PX_ASSERT(getOwner(BaseEPData[MinIndex])==mRemoved[i]); const PxU32 MaxIndex = mBoxEndPts[Axis][mRemoved[i]].mMinMax[1]; PX_ASSERT(MaxIndex<mBoxesCapacity*2+2); PX_ASSERT(getOwner(BaseEPData[MaxIndex])==mRemoved[i]); PX_ASSERT(MinIndex<MaxIndex); BaseEPData[MinIndex] = PX_REMOVED_BP_HANDLE; BaseEPData[MaxIndex] = PX_REMOVED_BP_HANDLE; if(MinIndex<MinMinIndex) MinMinIndex = MinIndex; } PxU32 ReadIndex = MinMinIndex; PxU32 DestIndex = MinMinIndex; const PxU32 Limit = mBoxesSize*2+NUM_SENTINELS; while(ReadIndex!=Limit) { PxPrefetchLine(&BaseEPData[ReadIndex],128); while(ReadIndex!=Limit && BaseEPData[ReadIndex] == PX_REMOVED_BP_HANDLE) { PxPrefetchLine(&BaseEPData[ReadIndex],128); ReadIndex++; } if(ReadIndex!=Limit) { if(ReadIndex!=DestIndex) { BaseEPValue[DestIndex] = BaseEPValue[ReadIndex]; BaseEPData[DestIndex] = BaseEPData[ReadIndex]; PX_ASSERT(BaseEPData[DestIndex] != PX_REMOVED_BP_HANDLE); if(!isSentinel(BaseEPData[DestIndex])) { BpHandle BoxOwner = getOwner(BaseEPData[DestIndex]); PX_ASSERT(BoxOwner<mBoxesCapacity); mBoxEndPts[Axis][BoxOwner].mMinMax[isMax(BaseEPData[DestIndex])] = BpHandle(DestIndex); } } DestIndex++; ReadIndex++; } } } for(PxU32 i=0;i<mRemovedSize;i++) { const PxU32 handle=mRemoved[i]; mBoxEndPts[0][handle].mMinMax[0]=PX_REMOVED_BP_HANDLE; mBoxEndPts[0][handle].mMinMax[1]=PX_REMOVED_BP_HANDLE; mBoxEndPts[1][handle].mMinMax[0]=PX_REMOVED_BP_HANDLE; mBoxEndPts[1][handle].mMinMax[1]=PX_REMOVED_BP_HANDLE; mBoxEndPts[2][handle].mMinMax[0]=PX_REMOVED_BP_HANDLE; mBoxEndPts[2][handle].mMinMax[1]=PX_REMOVED_BP_HANDLE; } const PxU32 bitmapWordCount=1+(mBoxesCapacity>>5); TmpMem<PxU32, 128> bitmapWords(bitmapWordCount); PxMemZero(bitmapWords.getBase(),sizeof(PxU32)*bitmapWordCount); PxBitMap bitmap; bitmap.setWords(bitmapWords.getBase(),bitmapWordCount); for(PxU32 i=0;i<mRemovedSize;i++) { PxU32 Index = mRemoved[i]; PX_ASSERT(Index<mBoxesCapacity); PX_ASSERT(0==bitmap.test(Index)); bitmap.set(Index); } mPairs.RemovePairs(bitmap); mBoxesSize=currBoxesSize; mBoxesSize-=mRemovedSize; mBoxesSizePrev=mBoxesSize-mCreatedSize; } static BroadPhasePair* resizeBroadPhasePairArray(const PxU32 oldMaxNb, const PxU32 newMaxNb, PxcScratchAllocator* scratchAllocator, BroadPhasePair* elements) { PX_ASSERT(newMaxNb > oldMaxNb); PX_ASSERT(newMaxNb > 0); PX_ASSERT(0==((newMaxNb*sizeof(BroadPhasePair)) & 15)); BroadPhasePair* newElements = reinterpret_cast<BroadPhasePair*>(scratchAllocator->alloc(sizeof(BroadPhasePair)*newMaxNb, true)); PX_ASSERT(0==(uintptr_t(newElements) & 0x0f)); PxMemCopy(newElements, elements, oldMaxNb*sizeof(BroadPhasePair)); scratchAllocator->free(elements); return newElements; } #define PERFORM_COMPARISONS 1 void BroadPhaseSap::batchUpdate (const PxU32 Axis, BroadPhasePair*& pairs, PxU32& pairsSize, PxU32& pairsCapacity) { //Nothin updated so don't do anything if(mUpdatedSize == 0) return; //If number updated is sufficiently fewer than number of boxes (say less than 20%) if((mUpdatedSize*5) < mBoxesSize) { batchUpdateFewUpdates(Axis, pairs, pairsSize, pairsCapacity); return; } PxU32 numPairs=0; PxU32 maxNumPairs=pairsCapacity; const PxBounds3* PX_RESTRICT boxMinMax3D = mBoxBoundsMinMax; SapBox1D* boxMinMax2D[6]={mBoxEndPts[1],mBoxEndPts[2],mBoxEndPts[2],mBoxEndPts[0],mBoxEndPts[0],mBoxEndPts[1]}; const SapBox1D* PX_RESTRICT boxMinMax0=boxMinMax2D[2*Axis+0]; const SapBox1D* PX_RESTRICT boxMinMax1=boxMinMax2D[2*Axis+1]; #if BP_SAP_TEST_GROUP_ID_CREATEUPDATE const Bp::FilterGroup::Enum* PX_RESTRICT asapBoxGroupIds=mBoxGroups; #endif SapBox1D* PX_RESTRICT asapBoxes=mBoxEndPts[Axis]; ValType* PX_RESTRICT asapEndPointValues=mEndPointValues[Axis]; BpHandle* PX_RESTRICT asapEndPointDatas=mEndPointDatas[Axis]; ValType* const PX_RESTRICT BaseEPValues = asapEndPointValues; BpHandle* const PX_RESTRICT BaseEPDatas = asapEndPointDatas; PxU8* PX_RESTRICT updated = mBoxesUpdated; //KS - can we lazy create these inside the loop? Might benefit us //There are no extents, jus the sentinels, so exit early. if(isSentinel(BaseEPDatas[1])) return; //We are going to skip the 1st element in the array (the sublist will be sorted) //but we must first update its value if it has moved //const PxU32 startIsMax = isMax(BaseEPDatas[1]); PX_ASSERT(!isMax(BaseEPDatas[1])); const BpHandle startHandle = getOwner(BaseEPDatas[1]); //KS - in theory, we should just be able to grab the min element but there's some issue where a body's max < min (i.e. an invalid extents) that //appears in a unit test // ValType ThisValue_ = boxMinMax3D[startHandle].getMin(Axis); ValType ThisValue_ = encodeMin(boxMinMax3D[startHandle], Axis, mContactDistance[startHandle]); BaseEPValues[1] = ThisValue_; PxU32 updateCounter = mUpdatedSize*2; updateCounter -= updated[startHandle]; //We'll never overlap with this sentinel but it just ensures that we don't need to branch to see if //there's a pocket that we need to test against BroadPhaseActivityPocket* PX_RESTRICT currentPocket = mActivityPockets; currentPocket->mEndIndex = 0; currentPocket->mStartIndex = 0; BpHandle ind = 2; PxU8 wasUpdated = updated[startHandle]; for(; !isSentinel(BaseEPDatas[ind]); ++ind) { BpHandle ThisData = BaseEPDatas[ind]; const BpHandle handle = getOwner(ThisData); if(updated[handle] || wasUpdated) { wasUpdated = updated[handle]; updateCounter -= wasUpdated; BpHandle ThisIndex = ind; const BpHandle startIsMax = isMax(ThisData); //Access and write back the updated values. TODO - can we avoid this when we're walking through inactive nodes? //BPValType ThisValue = boxMinMax1D[Axis][twoHandle+startIsMax]; //BPValType ThisValue = startIsMax ? boxMinMax3D[handle].getMax(Axis) : boxMinMax3D[handle].getMin(Axis); //ValType ThisValue = boxMinMax3D[handle].getExtent(startIsMax, Axis); ValType ThisValue = startIsMax ? encodeMax(boxMinMax3D[handle], Axis, mContactDistance[handle]) : encodeMin(boxMinMax3D[handle], Axis, mContactDistance[handle]); BaseEPValues[ThisIndex] = ThisValue; PX_ASSERT(handle!=BP_INVALID_BP_HANDLE); //We always iterate back through the list... BpHandle CurrentIndex = mListPrev[ThisIndex]; ValType CurrentValue = BaseEPValues[CurrentIndex]; //PxBpHandle CurrentData = BaseEPDatas[CurrentIndex]; if(CurrentValue > ThisValue) { wasUpdated = 1; //Get the bounds of the curr aabb. //Get the box1d of the curr aabb. /*const SapBox1D* PX_RESTRICT Object=&asapBoxes[handle]; PX_ASSERT(Object->mMinMax[0]!=BP_INVALID_BP_HANDLE); PX_ASSERT(Object->mMinMax[1]!=BP_INVALID_BP_HANDLE);*/ // const ValType boxMax=boxMinMax3D[handle].getMax(Axis); const ValType boxMax=encodeMax(boxMinMax3D[handle], Axis, mContactDistance[handle]); PxU32 endIndex = ind; PxU32 startIndex = ind; #if BP_SAP_TEST_GROUP_ID_CREATEUPDATE const Bp::FilterGroup::Enum group = asapBoxGroupIds[handle]; #endif if(!isMax(ThisData)) { do { BpHandle CurrentData = BaseEPDatas[CurrentIndex]; const BpHandle IsMax = isMax(CurrentData); #if PERFORM_COMPARISONS if(IsMax) { const BpHandle ownerId=getOwner(CurrentData); SapBox1D* PX_RESTRICT id1 = asapBoxes + ownerId; // Our min passed a max => start overlap if( BaseEPValues[id1->mMinMax[0]] < boxMax && //2D intersection test using up-to-date values Intersect2D_Handle(boxMinMax0[handle].mMinMax[0], boxMinMax0[handle].mMinMax[1], boxMinMax1[handle].mMinMax[0], boxMinMax1[handle].mMinMax[1], boxMinMax0[ownerId].mMinMax[0],boxMinMax0[ownerId].mMinMax[1],boxMinMax1[ownerId].mMinMax[0],boxMinMax1[ownerId].mMinMax[1]) #if BP_SAP_TEST_GROUP_ID_CREATEUPDATE && groupFiltering(group, asapBoxGroupIds[ownerId], mFilter->getLUT()) #else && handle!=ownerId #endif ) { if(numPairs==maxNumPairs) { const PxU32 newMaxNumPairs=maxNumPairs*2; pairs = reinterpret_cast<BroadPhasePair*>(resizeBroadPhasePairArray(maxNumPairs, newMaxNumPairs, mScratchAllocator, pairs)); maxNumPairs=newMaxNumPairs; } PX_ASSERT(numPairs<maxNumPairs); pairs[numPairs].mVolA=BpHandle(PxMax(handle, ownerId)); pairs[numPairs].mVolB=BpHandle(PxMin(handle, ownerId)); numPairs++; //AddPair(handle, getOwner(*CurrentMinData), mPairs, mData, mDataSize, mDataCapacity); } } #endif startIndex--; CurrentIndex = mListPrev[CurrentIndex]; CurrentValue = BaseEPValues[CurrentIndex]; } while(ThisValue < CurrentValue); } else { // Max is moving left: do { BpHandle CurrentData = BaseEPDatas[CurrentIndex]; const BpHandle IsMax = isMax(CurrentData); #if PERFORM_COMPARISONS if(!IsMax) { // Our max passed a min => stop overlap const BpHandle ownerId=getOwner(CurrentData); #if 1 if( #if BP_SAP_USE_OVERLAP_TEST_ON_REMOVES Intersect2D_Handle(boxMinMax0[handle].mMinMax[0], boxMinMax0[handle].mMinMax[1], boxMinMax1[handle].mMinMax[0], boxMinMax1[handle].mMinMax[1], boxMinMax0[ownerId].mMinMax[0],boxMinMax0[ownerId].mMinMax[1],boxMinMax1[ownerId].mMinMax[0],boxMinMax1[ownerId].mMinMax[1]) #endif #if BP_SAP_TEST_GROUP_ID_CREATEUPDATE && groupFiltering(group, asapBoxGroupIds[ownerId], mFilter->getLUT()) #else && handle!=ownerId #endif ) #endif { if(numPairs==maxNumPairs) { const PxU32 newMaxNumPairs=maxNumPairs*2; pairs = reinterpret_cast<BroadPhasePair*>(resizeBroadPhasePairArray(maxNumPairs, newMaxNumPairs, mScratchAllocator, pairs)); maxNumPairs=newMaxNumPairs; } PX_ASSERT(numPairs<maxNumPairs); pairs[numPairs].mVolA=BpHandle(PxMin(handle, ownerId)); pairs[numPairs].mVolB=BpHandle(PxMax(handle, ownerId)); numPairs++; //RemovePair(handle, getOwner(*CurrentMaxData), mPairs, mData, mDataSize, mDataCapacity); } } #endif startIndex--; CurrentIndex = mListPrev[CurrentIndex]; CurrentValue = BaseEPValues[CurrentIndex]; } while(ThisValue < CurrentValue); } //This test is unnecessary. If we entered the outer loop, we're doing the swap in here { //Unlink from old position and re-link to new position BpHandle oldNextIndex = mListNext[ThisIndex]; BpHandle oldPrevIndex = mListPrev[ThisIndex]; BpHandle newNextIndex = mListNext[CurrentIndex]; BpHandle newPrevIndex = CurrentIndex; //Unlink this node mListNext[oldPrevIndex] = oldNextIndex; mListPrev[oldNextIndex] = oldPrevIndex; //Link it to it's new place in the list mListNext[ThisIndex] = newNextIndex; mListPrev[ThisIndex] = newPrevIndex; mListPrev[newNextIndex] = ThisIndex; mListNext[newPrevIndex] = ThisIndex; } //There is a sentinel with 0 index, so we don't need //to worry about walking off the array while(startIndex < currentPocket->mStartIndex) { currentPocket--; } //If our start index > currentPocket->mEndIndex, then we don't overlap so create a new pocket if(currentPocket == mActivityPockets || startIndex > (currentPocket->mEndIndex+1)) { currentPocket++; currentPocket->mStartIndex = startIndex; } currentPocket->mEndIndex = endIndex; }// update max //ind++; } else if (updateCounter == 0) //We've updated all the bodies and neither this nor the previous body was updated, so we're done break; }// updated aabbs pairsSize=numPairs; pairsCapacity=maxNumPairs; BroadPhaseActivityPocket* pocket = mActivityPockets+1; while(pocket <= currentPocket) { for(PxU32 a = pocket->mStartIndex; a <= pocket->mEndIndex; ++a) { mListPrev[a] = BpHandle(a); } //Now copy all the data to the array, updating the remap table PxU32 CurrIndex = pocket->mStartIndex-1; for(PxU32 a = pocket->mStartIndex; a <= pocket->mEndIndex; ++a) { CurrIndex = mListNext[CurrIndex]; PxU32 origIndex = CurrIndex; BpHandle remappedIndex = mListPrev[origIndex]; if(origIndex != a) { const BpHandle ownerId=getOwner(BaseEPDatas[remappedIndex]); const BpHandle IsMax = isMax(BaseEPDatas[remappedIndex]); ValType tmp = BaseEPValues[a]; BpHandle tmpHandle = BaseEPDatas[a]; BaseEPValues[a] = BaseEPValues[remappedIndex]; BaseEPDatas[a] = BaseEPDatas[remappedIndex]; BaseEPValues[remappedIndex] = tmp; BaseEPDatas[remappedIndex] = tmpHandle; mListPrev[remappedIndex] = mListPrev[a]; //Write back remap index (should be an immediate jump to original index) mListPrev[mListPrev[a]] = remappedIndex; asapBoxes[ownerId].mMinMax[IsMax] = BpHandle(a); } } ////Reset next and prev ptrs back for(PxU32 a = pocket->mStartIndex-1; a <= pocket->mEndIndex; ++a) { mListPrev[a+1] = BpHandle(a); mListNext[a] = BpHandle(a+1); } pocket++; } mListPrev[0] = 0; } void BroadPhaseSap::batchUpdateFewUpdates(const PxU32 Axis, BroadPhasePair*& pairs, PxU32& pairsSize, PxU32& pairsCapacity) { PxU32 numPairs=0; PxU32 maxNumPairs=pairsCapacity; const PxBounds3* PX_RESTRICT boxMinMax3D = mBoxBoundsMinMax; SapBox1D* boxMinMax2D[6]={mBoxEndPts[1],mBoxEndPts[2],mBoxEndPts[2],mBoxEndPts[0],mBoxEndPts[0],mBoxEndPts[1]}; #if BP_SAP_TEST_GROUP_ID_CREATEUPDATE const Bp::FilterGroup::Enum* PX_RESTRICT asapBoxGroupIds=mBoxGroups; #endif SapBox1D* PX_RESTRICT asapBoxes=mBoxEndPts[Axis]; /*const BPValType* PX_RESTRICT boxMinMax0=boxMinMax2D[2*Axis]; const BPValType* PX_RESTRICT boxMinMax1=boxMinMax2D[2*Axis+1];*/ ValType* PX_RESTRICT asapEndPointValues=mEndPointValues[Axis]; BpHandle* PX_RESTRICT asapEndPointDatas=mEndPointDatas[Axis]; ValType* const PX_RESTRICT BaseEPValues = asapEndPointValues; BpHandle* const PX_RESTRICT BaseEPDatas = asapEndPointDatas; const SapBox1D* PX_RESTRICT boxMinMax0=boxMinMax2D[2*Axis+0]; const SapBox1D* PX_RESTRICT boxMinMax1=boxMinMax2D[2*Axis+1]; PxU8* PX_RESTRICT updated = mBoxesUpdated; const PxU32 endPointSize = mBoxesSize*2 + 1; //There are no extents, just the sentinels, so exit early. if(isSentinel(BaseEPDatas[1])) return; PxU32 ind_ = 0; PxU32 index = 1; if(mUpdatedSize < 512) { //The array of updated elements is small, so use qsort to sort them for(PxU32 a = 0; a < mUpdatedSize; ++a) { const PxU32 handle=mUpdated[a]; const SapBox1D* Object=&asapBoxes[handle]; PX_ASSERT(Object->mMinMax[0]!=BP_INVALID_BP_HANDLE); PX_ASSERT(Object->mMinMax[1]!=BP_INVALID_BP_HANDLE); //Get the bounds of the curr aabb. // const ValType boxMin=boxMinMax3D[handle].getMin(Axis); // const ValType boxMax=boxMinMax3D[handle].getMax(Axis); const ValType boxMin = encodeMin(boxMinMax3D[handle], Axis, mContactDistance[handle]); const ValType boxMax = encodeMax(boxMinMax3D[handle], Axis, mContactDistance[handle]); BaseEPValues[Object->mMinMax[0]] = boxMin; BaseEPValues[Object->mMinMax[1]] = boxMax; mSortedUpdateElements[ind_++] = Object->mMinMax[0]; mSortedUpdateElements[ind_++] = Object->mMinMax[1]; } PxSort(mSortedUpdateElements, ind_); } else { //The array of updated elements is large so use a bucket sort to sort them for(; index < endPointSize; ++index) { if(isSentinel( BaseEPDatas[index] )) break; BpHandle ThisData = BaseEPDatas[index]; BpHandle owner = BpHandle(getOwner(ThisData)); if(updated[owner]) { //BPValType ThisValue = isMax(ThisData) ? boxMinMax3D[owner].getMax(Axis) : boxMinMax3D[owner].getMin(Axis); ValType ThisValue = isMax(ThisData) ? encodeMax(boxMinMax3D[owner], Axis, mContactDistance[owner]) : encodeMin(boxMinMax3D[owner], Axis, mContactDistance[owner]); BaseEPValues[index] = ThisValue; mSortedUpdateElements[ind_++] = BpHandle(index); } } } const PxU32 updateCounter = ind_; //We'll never overlap with this sentinel but it just ensures that we don't need to branch to see if //there's a pocket that we need to test against BroadPhaseActivityPocket* PX_RESTRICT currentPocket = mActivityPockets; currentPocket->mEndIndex = 0; currentPocket->mStartIndex = 0; for(PxU32 a = 0; a < updateCounter; ++a) { BpHandle ind = mSortedUpdateElements[a]; BpHandle NextData; BpHandle PrevData; do { BpHandle ThisData = BaseEPDatas[ind]; const BpHandle handle = getOwner(ThisData); BpHandle ThisIndex = ind; ValType ThisValue = BaseEPValues[ThisIndex]; //Get the box1d of the curr aabb. const SapBox1D* PX_RESTRICT Object=&asapBoxes[handle]; PX_ASSERT(handle!=BP_INVALID_BP_HANDLE); PX_ASSERT(Object->mMinMax[0]!=BP_INVALID_BP_HANDLE); PX_ASSERT(Object->mMinMax[1]!=BP_INVALID_BP_HANDLE); PX_UNUSED(Object); //Get the bounds of the curr aabb. //const PxU32 twoHandle = 2*handle; const ValType boxMax=encodeMax(boxMinMax3D[handle], Axis, mContactDistance[handle]); //We always iterate back through the list... BpHandle CurrentIndex = mListPrev[ThisIndex]; ValType CurrentValue = BaseEPValues[CurrentIndex]; if(CurrentValue > ThisValue) { //We're performing some swaps so we need an activity pocket here. This structure allows us to keep track of the range of //modifications in the sorted lists. Doesn't help when everything's moving but makes a really big difference to reconstituting the //list when only a small number of things are moving PxU32 endIndex = ind; PxU32 startIndex = ind; //const BPValType* PX_RESTRICT box0MinMax0 = &boxMinMax0[twoHandle]; //const BPValType* PX_RESTRICT box0MinMax1 = &boxMinMax1[twoHandle]; #if BP_SAP_TEST_GROUP_ID_CREATEUPDATE const Bp::FilterGroup::Enum group = asapBoxGroupIds[handle]; #endif if(!isMax(ThisData)) { do { BpHandle CurrentData = BaseEPDatas[CurrentIndex]; const BpHandle IsMax = isMax(CurrentData); #if PERFORM_COMPARISONS if(IsMax) { const BpHandle ownerId=getOwner(CurrentData); SapBox1D* PX_RESTRICT id1 = asapBoxes + ownerId; // Our min passed a max => start overlap if( BaseEPValues[id1->mMinMax[0]] < boxMax && //2D intersection test using up-to-date values Intersect2D_Handle(boxMinMax0[handle].mMinMax[0], boxMinMax0[handle].mMinMax[1], boxMinMax1[handle].mMinMax[0], boxMinMax1[handle].mMinMax[1], boxMinMax0[ownerId].mMinMax[0],boxMinMax0[ownerId].mMinMax[1],boxMinMax1[ownerId].mMinMax[0],boxMinMax1[ownerId].mMinMax[1]) #if BP_SAP_TEST_GROUP_ID_CREATEUPDATE && groupFiltering(group, asapBoxGroupIds[ownerId], mFilter->getLUT()) #else && Object!=id1 #endif ) { if(numPairs==maxNumPairs) { const PxU32 newMaxNumPairs=maxNumPairs*2; pairs = reinterpret_cast<BroadPhasePair*>(resizeBroadPhasePairArray(maxNumPairs, newMaxNumPairs, mScratchAllocator, pairs)); maxNumPairs=newMaxNumPairs; } PX_ASSERT(numPairs<maxNumPairs); pairs[numPairs].mVolA=BpHandle(PxMax(handle, ownerId)); pairs[numPairs].mVolB=BpHandle(PxMin(handle, ownerId)); numPairs++; //AddPair(handle, getOwner(*CurrentMinData), mPairs, mData, mDataSize, mDataCapacity); } } #endif startIndex--; CurrentIndex = mListPrev[CurrentIndex]; CurrentValue = BaseEPValues[CurrentIndex]; } while(ThisValue < CurrentValue); } else { // Max is moving left: do { BpHandle CurrentData = BaseEPDatas[CurrentIndex]; const BpHandle IsMax = isMax(CurrentData); #if PERFORM_COMPARISONS if(!IsMax) { // Our max passed a min => stop overlap const BpHandle ownerId=getOwner(CurrentData); #if 1 if( #if BP_SAP_USE_OVERLAP_TEST_ON_REMOVES Intersect2D_Handle(boxMinMax0[handle].mMinMax[0], boxMinMax0[handle].mMinMax[1], boxMinMax1[handle].mMinMax[0], boxMinMax1[handle].mMinMax[1], boxMinMax0[ownerId].mMinMax[0],boxMinMax0[ownerId].mMinMax[1],boxMinMax1[ownerId].mMinMax[0],boxMinMax1[ownerId].mMinMax[1]) #endif #if BP_SAP_TEST_GROUP_ID_CREATEUPDATE && groupFiltering(group, asapBoxGroupIds[ownerId], mFilter->getLUT()) #else && Object!=id1 #endif ) #endif { if(numPairs==maxNumPairs) { const PxU32 newMaxNumPairs=maxNumPairs*2; pairs = reinterpret_cast<BroadPhasePair*>(resizeBroadPhasePairArray(maxNumPairs, newMaxNumPairs, mScratchAllocator, pairs)); maxNumPairs=newMaxNumPairs; } PX_ASSERT(numPairs<maxNumPairs); pairs[numPairs].mVolA=BpHandle(PxMin(handle, ownerId)); pairs[numPairs].mVolB=BpHandle(PxMax(handle, ownerId)); numPairs++; //RemovePair(handle, getOwner(*CurrentMaxData), mPairs, mData, mDataSize, mDataCapacity); } } #endif startIndex--; CurrentIndex = mListPrev[CurrentIndex]; CurrentValue = BaseEPValues[CurrentIndex]; } while(ThisValue < CurrentValue); } //This test is unnecessary. If we entered the outer loop, we're doing the swap in here { //Unlink from old position and re-link to new position BpHandle oldNextIndex = mListNext[ThisIndex]; BpHandle oldPrevIndex = mListPrev[ThisIndex]; BpHandle newNextIndex = mListNext[CurrentIndex]; BpHandle newPrevIndex = CurrentIndex; //Unlink this node mListNext[oldPrevIndex] = oldNextIndex; mListPrev[oldNextIndex] = oldPrevIndex; //Link it to it's new place in the list mListNext[ThisIndex] = newNextIndex; mListPrev[ThisIndex] = newPrevIndex; mListPrev[newNextIndex] = ThisIndex; mListNext[newPrevIndex] = ThisIndex; } //Loop over the activity pocket stack to make sure this set of shuffles didn't //interfere with the previous set. If it did, we roll this pocket into the previous //pockets. If everything in the scene is moving, we should result in just 1 pocket while(startIndex < currentPocket->mStartIndex) { currentPocket--; } //If our start index > currentPocket->mEndIndex, then we don't overlap so create a new pocket if(currentPocket == mActivityPockets || startIndex > (currentPocket->mEndIndex+1)) { currentPocket++; currentPocket->mStartIndex = startIndex; } currentPocket->mEndIndex = endIndex; }// update max //Get prev and next ptr... NextData = BaseEPDatas[++ind]; PrevData = BaseEPDatas[mListPrev[ind]]; }while(!isSentinel(NextData) && !updated[getOwner(NextData)] && updated[getOwner(PrevData)]); }// updated aabbs pairsSize=numPairs; pairsCapacity=maxNumPairs; BroadPhaseActivityPocket* pocket = mActivityPockets+1; while(pocket <= currentPocket) { //PxU32 CurrIndex = mListPrev[pocket->mStartIndex]; for(PxU32 a = pocket->mStartIndex; a <= pocket->mEndIndex; ++a) { mListPrev[a] = BpHandle(a); } //Now copy all the data to the array, updating the remap table PxU32 CurrIndex = pocket->mStartIndex-1; for(PxU32 a = pocket->mStartIndex; a <= pocket->mEndIndex; ++a) { CurrIndex = mListNext[CurrIndex]; PxU32 origIndex = CurrIndex; BpHandle remappedIndex = mListPrev[origIndex]; if(origIndex != a) { const BpHandle ownerId=getOwner(BaseEPDatas[remappedIndex]); const BpHandle IsMax = isMax(BaseEPDatas[remappedIndex]); ValType tmp = BaseEPValues[a]; BpHandle tmpHandle = BaseEPDatas[a]; BaseEPValues[a] = BaseEPValues[remappedIndex]; BaseEPDatas[a] = BaseEPDatas[remappedIndex]; BaseEPValues[remappedIndex] = tmp; BaseEPDatas[remappedIndex] = tmpHandle; mListPrev[remappedIndex] = mListPrev[a]; //Write back remap index (should be an immediate jump to original index) mListPrev[mListPrev[a]] = remappedIndex; asapBoxes[ownerId].mMinMax[IsMax] = BpHandle(a); } } for(PxU32 a = pocket->mStartIndex-1; a <= pocket->mEndIndex; ++a) { mListPrev[a+1] = BpHandle(a); mListNext[a] = BpHandle(a+1); } pocket++; } } #if PX_DEBUG bool BroadPhaseSap::isSelfOrdered() const { if(0==mBoxesSize) return true; for(PxU32 Axis=0;Axis<3;Axis++) { PxU32 it=1; PX_ASSERT(mEndPointDatas[Axis]); while(!isSentinel(mEndPointDatas[Axis][it])) { //Test the array is sorted. const ValType prevVal=mEndPointValues[Axis][it-1]; const ValType currVal=mEndPointValues[Axis][it]; if(currVal<prevVal) return false; //Test the end point array is consistent. const BpHandle ismax=isMax(mEndPointDatas[Axis][it]); const BpHandle ownerId=getOwner(mEndPointDatas[Axis][it]); if(mBoxEndPts[Axis][ownerId].mMinMax[ismax]!=it) return false; //Test the mins are even, the maxes are odd, and the extents are finite. const ValType boxMin = mEndPointValues[Axis][mBoxEndPts[Axis][ownerId].mMinMax[0]]; const ValType boxMax = mEndPointValues[Axis][mBoxEndPts[Axis][ownerId].mMinMax[1]]; if(boxMin & 1) return false; if(0==(boxMax & 1)) return false; if(boxMax<=boxMin) return false; it++; } } return true; } bool BroadPhaseSap::isSelfConsistent() const { if(0==mBoxesSize) return true; for(PxU32 Axis=0;Axis<3;Axis++) { PxU32 it=1; ValType prevVal=0; const PxBounds3* PX_RESTRICT boxMinMax = mBoxBoundsMinMax; const PxReal* PX_RESTRICT contactDistance = mContactDistance; PX_ASSERT(mEndPointDatas[Axis]); while(!isSentinel(mEndPointDatas[Axis][it])) { const BpHandle ownerId=getOwner(mEndPointDatas[Axis][it]); const BpHandle ismax=isMax(mEndPointDatas[Axis][it]); const ValType boxMinMaxs[2] = { encodeMin(boxMinMax[ownerId], Axis, contactDistance[ownerId]), encodeMax(boxMinMax[ownerId], Axis, contactDistance[ownerId]) }; // const ValType boxMinMaxs[2] = { boxMinMax[ownerId].getMin(Axis), boxMinMax[ownerId].getMax(Axis) }; const ValType test1=boxMinMaxs[ismax]; const ValType test2=mEndPointValues[Axis][it]; if(test1!=test2) return false; if(test2<prevVal) return false; prevVal=test2; if(mBoxEndPts[Axis][ownerId].mMinMax[ismax]!=it) return false; it++; } } for(PxU32 i=0;i<mCreatedPairsSize;i++) { const PxU32 a=mCreatedPairsArray[i].mVolA; const PxU32 b=mCreatedPairsArray[i].mVolB; IntegerAABB aabb0(mBoxBoundsMinMax[a], mContactDistance[a]); IntegerAABB aabb1(mBoxBoundsMinMax[b], mContactDistance[b]); if(!aabb0.intersects(aabb1)) return false; } for(PxU32 i=0;i<mDeletedPairsSize;i++) { const PxU32 a=mDeletedPairsArray[i].mVolA; const PxU32 b=mDeletedPairsArray[i].mVolB; bool isDeleted=false; for(PxU32 j=0;j<mRemovedSize;j++) { if(a==mRemoved[j] || b==mRemoved[j]) isDeleted=true; } if(!isDeleted) { IntegerAABB aabb0(mBoxBoundsMinMax[a], mContactDistance[a]); IntegerAABB aabb1(mBoxBoundsMinMax[b], mContactDistance[b]); if(aabb0.intersects(aabb1)) { // with the past refactors this should have become illegal return false; } } } return true; } #endif } //namespace Bp } //namespace physx
68,288
C++
34.697334
183
0.722733
NVIDIA-Omniverse/PhysX/physx/source/lowlevelaabb/src/BpBroadPhaseMBPCommon.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef BP_BROADPHASE_MBP_COMMON_H #define BP_BROADPHASE_MBP_COMMON_H #include "PxPhysXConfig.h" #include "BpBroadPhaseIntegerAABB.h" #include "foundation/PxUserAllocated.h" namespace physx { namespace Bp { #define MBP_USE_WORDS #define MBP_USE_NO_CMP_OVERLAP #if PX_INTEL_FAMILY && !defined(PX_SIMD_DISABLED) #define MBP_SIMD_OVERLAP #endif #ifdef MBP_USE_WORDS typedef PxU16 MBP_Index; #else typedef PxU32 MBP_Index; #endif typedef PxU32 MBP_ObjectIndex; // PT: index in mMBP_Objects typedef PxU32 MBP_Handle; // PT: returned to MBP users, combination of index/flip-flop/static-bit struct IAABB : public PxUserAllocated { PX_FORCE_INLINE bool isInside(const IAABB& box) const { if(box.mMinX>mMinX) return false; if(box.mMinY>mMinY) return false; if(box.mMinZ>mMinZ) return false; if(box.mMaxX<mMaxX) return false; if(box.mMaxY<mMaxY) return false; if(box.mMaxZ<mMaxZ) return false; return true; } PX_FORCE_INLINE PxIntBool intersects(const IAABB& a) const { if(mMaxX < a.mMinX || a.mMaxX < mMinX || mMaxY < a.mMinY || a.mMaxY < mMinY || mMaxZ < a.mMinZ || a.mMaxZ < mMinZ ) return PxIntFalse; return PxIntTrue; } PX_FORCE_INLINE PxIntBool intersectNoTouch(const IAABB& a) const { if(mMaxX <= a.mMinX || a.mMaxX <= mMinX || mMaxY <= a.mMinY || a.mMaxY <= mMinY || mMaxZ <= a.mMinZ || a.mMaxZ <= mMinZ ) return PxIntFalse; return PxIntTrue; } PX_FORCE_INLINE void initFrom2(const PxBounds3& box) { const PxU32* PX_RESTRICT binary = reinterpret_cast<const PxU32*>(&box.minimum.x); mMinX = encodeFloat(binary[0])>>1; mMinY = encodeFloat(binary[1])>>1; mMinZ = encodeFloat(binary[2])>>1; mMaxX = encodeFloat(binary[3])>>1; mMaxY = encodeFloat(binary[4])>>1; mMaxZ = encodeFloat(binary[5])>>1; } PX_FORCE_INLINE void decode(PxBounds3& box) const { PxU32* PX_RESTRICT binary = reinterpret_cast<PxU32*>(&box.minimum.x); binary[0] = decodeFloat(mMinX<<1); binary[1] = decodeFloat(mMinY<<1); binary[2] = decodeFloat(mMinZ<<1); binary[3] = decodeFloat(mMaxX<<1); binary[4] = decodeFloat(mMaxY<<1); binary[5] = decodeFloat(mMaxZ<<1); } PX_FORCE_INLINE PxU32 getMin(PxU32 i) const { return (&mMinX)[i]; } PX_FORCE_INLINE PxU32 getMax(PxU32 i) const { return (&mMaxX)[i]; } PxU32 mMinX; PxU32 mMinY; PxU32 mMinZ; PxU32 mMaxX; PxU32 mMaxY; PxU32 mMaxZ; }; struct SIMD_AABB : public PxUserAllocated { PX_FORCE_INLINE void initFrom(const PxBounds3& box) { const PxU32* PX_RESTRICT binary = reinterpret_cast<const PxU32*>(&box.minimum.x); mMinX = encodeFloat(binary[0]); mMinY = encodeFloat(binary[1]); mMinZ = encodeFloat(binary[2]); mMaxX = encodeFloat(binary[3]); mMaxY = encodeFloat(binary[4]); mMaxZ = encodeFloat(binary[5]); } PX_FORCE_INLINE void initFrom2(const PxBounds3& box) { const PxU32* PX_RESTRICT binary = reinterpret_cast<const PxU32*>(&box.minimum.x); mMinX = encodeFloat(binary[0])>>1; mMinY = encodeFloat(binary[1])>>1; mMinZ = encodeFloat(binary[2])>>1; mMaxX = encodeFloat(binary[3])>>1; mMaxY = encodeFloat(binary[4])>>1; mMaxZ = encodeFloat(binary[5])>>1; } PX_FORCE_INLINE void decode(PxBounds3& box) const { PxU32* PX_RESTRICT binary = reinterpret_cast<PxU32*>(&box.minimum.x); binary[0] = decodeFloat(mMinX<<1); binary[1] = decodeFloat(mMinY<<1); binary[2] = decodeFloat(mMinZ<<1); binary[3] = decodeFloat(mMaxX<<1); binary[4] = decodeFloat(mMaxY<<1); binary[5] = decodeFloat(mMaxZ<<1); } PX_FORCE_INLINE bool isInside(const SIMD_AABB& box) const { if(box.mMinX>mMinX) return false; if(box.mMinY>mMinY) return false; if(box.mMinZ>mMinZ) return false; if(box.mMaxX<mMaxX) return false; if(box.mMaxY<mMaxY) return false; if(box.mMaxZ<mMaxZ) return false; return true; } PX_FORCE_INLINE PxIntBool intersects(const SIMD_AABB& a) const { if(mMaxX < a.mMinX || a.mMaxX < mMinX || mMaxY < a.mMinY || a.mMaxY < mMinY || mMaxZ < a.mMinZ || a.mMaxZ < mMinZ ) return PxIntFalse; return PxIntTrue; } PX_FORCE_INLINE PxIntBool intersectNoTouch(const SIMD_AABB& a) const { if(mMaxX <= a.mMinX || a.mMaxX <= mMinX || mMaxY <= a.mMinY || a.mMaxY <= mMinY || mMaxZ <= a.mMinZ || a.mMaxZ <= mMinZ ) return PxIntFalse; return PxIntTrue; } PxU32 mMinX; PxU32 mMaxX; PxU32 mMinY; PxU32 mMinZ; PxU32 mMaxY; PxU32 mMaxZ; }; } } // namespace physx #endif // BP_BROADPHASE_MBP_COMMON_H
6,232
C
30.321608
100
0.692715
NVIDIA-Omniverse/PhysX/physx/source/lowlevelaabb/src/BpBroadPhaseABP.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/PxProfiler.h" #include "foundation/PxMemory.h" #include "foundation/PxBitUtils.h" #include "foundation/PxFPU.h" #include "BpBroadPhaseABP.h" #include "BpBroadPhaseShared.h" #include "foundation/PxVecMath.h" #include "PxcScratchAllocator.h" #include "common/PxProfileZone.h" #include "CmRadixSort.h" #include "CmUtils.h" #include "GuBounds.h" #include "foundation/PxThread.h" #include "foundation/PxSync.h" #include "task/PxTask.h" using namespace physx::aos; using namespace physx; using namespace Bp; using namespace Cm; /* PT: to try: - prepare data: sort & compute bounds in parallel? or just MT the last loop? - switch post update & add delayed pairs? - MT computeCreatedDeletedPairs - why do we set the update flag for added/removed objects? - use timestamps instead of bits? */ #define ABP_MT #define CHECKPOINT(x) //#include <stdio.h> //#define CHECKPOINT(x) printf(x); //#pragma warning (disable : 4702) #define CODEALIGN16 //_asm align 16 #if PX_INTEL_FAMILY && !defined(PX_SIMD_DISABLED) #define ABP_SIMD_OVERLAP #endif //#define ABP_BATCHING 128 #define ABP_BATCHING 256 //#define USE_ABP_BUCKETS 5000 // PT: don't use buckets below that number... #define USE_ABP_BUCKETS 512 // PT: don't use buckets below that number... //#define USE_ABP_BUCKETS 64 // PT: don't use buckets below that number... #ifdef USE_ABP_BUCKETS #define NB_BUCKETS 5 // Regular version: 5 buckets a la bucket pruner (4 + cross bucket) // Alternative version: 4 buckets + dup objects a la MBP regions // #define USE_ALTERNATIVE_VERSION #define ABP_USE_INTEGER_XS2 // Works but questionable speedups #else #define ABP_USE_INTEGER_XS #endif #define NB_SENTINELS 6 //#define RECURSE_LIMIT 20000 typedef PxU32 ABP_Index; static const bool gPrepareOverlapsFlag = true; #ifdef ABP_SIMD_OVERLAP static const bool gUseRegularBPKernel = false; // false to use "version 13" in box pruning series static const bool gUnrollLoop = true; // true to use "version 14" in box pruning series #else // PT: tested on Switch, for some reason the regular version is fastest there static const bool gUseRegularBPKernel = true; // false to use "version 13" in box pruning series static const bool gUnrollLoop = false; // true to use "version 14" in box pruning series //ABP_SIMD_OVERLAP //MBP.Add64KObjects 13982 ( +0.0%) 4757795 ( +0.0%) FAIL //MBP.AddBroadPhaseRegion 0 ( +0.0%) 3213795 ( +0.0%) FAIL //MBP.FinalizeOverlaps64KObjects 507 ( +0.0%) 5650723 ( +0.0%) FAIL //MBP.FindOverlaps64KMixedObjects 59258 ( +0.0%) 5170179 ( +0.0%) FAIL //MBP.FindOverlaps64KObjects 31351 ( +0.0%) 7122019 ( +0.0%) FAIL //MBP.Remove64KObjects 4993 ( +0.0%) 5281683 ( +0.0%) FAIL //MBP.Update64KObjects 13711 ( +0.0%) 5521699 ( +0.0%) FAIL //gUseRegularBPKernel: //MBP.Add64KObjects 14406 ( +0.0%) 4757795 ( +0.0%) FAIL //MBP.AddBroadPhaseRegion 0 ( +0.0%) 3213795 ( +0.0%) FAIL //MBP.FinalizeOverlaps64KObjects 504 ( +0.0%) 5650723 ( +0.0%) FAIL //MBP.FindOverlaps64KMixedObjects 48929 ( +0.0%) 5170179 ( +0.0%) FAIL //MBP.FindOverlaps64KObjects 25636 ( +0.0%) 7122019 ( +0.0%) FAIL //MBP.Remove64KObjects 4878 ( +0.0%) 5281683 ( +0.0%) FAIL //MBP.Update64KObjects 13932 ( +0.0%) 5521699 ( +0.0%) FAIL // false/true //MBP.Add64KObjects 14278 ( +0.0%) 4757795 ( +0.0%) FAIL //MBP.AddBroadPhaseRegion 0 ( +0.0%) 3213795 ( +0.0%) FAIL //MBP.FinalizeOverlaps64KObjects 504 ( +0.0%) 5650723 ( +0.0%) FAIL //MBP.FindOverlaps64KMixedObjects 60331 ( +0.0%) 5170179 ( +0.0%) FAIL //MBP.FindOverlaps64KObjects 32064 ( +0.0%) 7122019 ( +0.0%) FAIL //MBP.Remove64KObjects 4930 ( +0.0%) 5281683 ( +0.0%) FAIL //MBP.Update64KObjects 13673 ( +0.0%) 5521699 ( +0.0%) FAIL // false/false //MBP.Add64KObjects 13960 ( +0.0%) 4757795 ( +0.0%) FAIL //MBP.AddBroadPhaseRegion 0 ( +0.0%) 3213795 ( +0.0%) FAIL //MBP.FinalizeOverlaps64KObjects 503 ( +0.0%) 5650723 ( +0.0%) FAIL //MBP.FindOverlaps64KMixedObjects 48549 ( +0.0%) 5170179 ( +0.0%) FAIL //MBP.FindOverlaps64KObjects 25598 ( +0.0%) 7122019 ( +0.0%) FAIL //MBP.Remove64KObjects 4883 ( +0.0%) 5281683 ( +0.0%) FAIL //MBP.Update64KObjects 13667 ( +0.0%) 5521699 ( +0.0%) FAIL #endif #ifdef ABP_USE_INTEGER_XS typedef PxU32 PosXType; #define SentinelValue 0xffffffff #else typedef float PosXType; #define SentinelValue FLT_MAX #endif #ifdef ABP_USE_INTEGER_XS2 typedef PxU32 PosXType2; #define SentinelValue2 0xffffffff #else #ifdef ABP_USE_INTEGER_XS typedef PxU32 PosXType2; #define SentinelValue2 0xffffffff #else typedef float PosXType2; #define SentinelValue2 FLT_MAX #endif #endif namespace internalABP { struct SIMD_AABB4 : public PxUserAllocated { PX_FORCE_INLINE void initFrom2(const PxBounds3& box) { #ifdef ABP_USE_INTEGER_XS mMinX = encodeFloat(PX_IR(box.minimum.x)); mMaxX = encodeFloat(PX_IR(box.maximum.x)); mMinY = box.minimum.y; mMinZ = box.minimum.z; mMaxY = box.maximum.y; mMaxZ = box.maximum.z; #else mMinX = box.minimum.x; mMinY = box.minimum.y; mMinZ = box.minimum.z; mMaxX = box.maximum.x; mMaxY = box.maximum.y; mMaxZ = box.maximum.z; #endif } PX_FORCE_INLINE void operator = (const SIMD_AABB4& box) { mMinX = box.mMinX; mMinY = box.mMinY; mMinZ = box.mMinZ; mMaxX = box.mMaxX; mMaxY = box.mMaxY; mMaxZ = box.mMaxZ; } PX_FORCE_INLINE void initSentinel() { mMinX = SentinelValue; } PX_FORCE_INLINE bool isSentinel() const { return mMinX == SentinelValue; } #ifdef USE_ABP_BUCKETS // PT: to be able to compute bounds easily PosXType mMinX; float mMinY; float mMinZ; PosXType mMaxX; float mMaxY; float mMaxZ; #else PosXType mMinX; PosXType mMaxX; float mMinY; float mMinZ; float mMaxY; float mMaxZ; #endif }; #define USE_SHARED_CLASSES #ifdef USE_SHARED_CLASSES struct SIMD_AABB_X4 : public AABB_Xi { PX_FORCE_INLINE void initFrom(const SIMD_AABB4& box) { #ifdef ABP_USE_INTEGER_XS2 initFromFloats(&box.mMinX, &box.mMaxX); #else mMinX = box.mMinX; mMaxX = box.mMaxX; #endif } }; PX_ALIGN_PREFIX(16) #ifdef ABP_SIMD_OVERLAP struct SIMD_AABB_YZ4 : AABB_YZn { PX_FORCE_INLINE void initFrom(const SIMD_AABB4& box) { #ifdef ABP_SIMD_OVERLAP mMinY = -box.mMinY; mMinZ = -box.mMinZ; #else mMinY = box.mMinY; mMinZ = box.mMinZ; #endif mMaxY = box.mMaxY; mMaxZ = box.mMaxZ; } } #else struct SIMD_AABB_YZ4 : AABB_YZr { PX_FORCE_INLINE void initFrom(const SIMD_AABB4& box) { mMinY = box.mMinY; mMinZ = box.mMinZ; mMaxY = box.mMaxY; mMaxZ = box.mMaxZ; } } #endif PX_ALIGN_SUFFIX(16); #else struct SIMD_AABB_X4 : public PxUserAllocated { PX_FORCE_INLINE void initFromFloats(const void* PX_RESTRICT minX, const void* PX_RESTRICT maxX) { mMinX = encodeFloat(*reinterpret_cast<const PxU32*>(minX)); mMaxX = encodeFloat(*reinterpret_cast<const PxU32*>(maxX)); } PX_FORCE_INLINE void initFrom(const SIMD_AABB4& box) { #ifdef ABP_USE_INTEGER_XS2 initFromFloats(&box.mMinX, &box.mMaxX); #else mMinX = box.mMinX; mMaxX = box.mMaxX; #endif } PX_FORCE_INLINE void initFromPxVec4(const PxVec4& min, const PxVec4& max) { #ifdef ABP_USE_INTEGER_XS2 initFromFloats(&min.x, &max.x); #else #ifdef ABP_USE_INTEGER_XS initFromFloats(&min.x, &max.x); #else mMinX = min.x; mMaxX = max.x; #endif #endif } PX_FORCE_INLINE void operator = (const SIMD_AABB_X4& box) { mMinX = box.mMinX; mMaxX = box.mMaxX; } PX_FORCE_INLINE void initSentinel() { mMinX = SentinelValue2; } PX_FORCE_INLINE bool isSentinel() const { return mMinX == SentinelValue2; } PosXType2 mMinX; PosXType2 mMaxX; }; struct SIMD_AABB_YZ4 : public PxUserAllocated { PX_FORCE_INLINE void initFrom(const SIMD_AABB4& box) { #ifdef ABP_SIMD_OVERLAP mMinY = -box.mMinY; mMinZ = -box.mMinZ; #else mMinY = box.mMinY; mMinZ = box.mMinZ; #endif mMaxY = box.mMaxY; mMaxZ = box.mMaxZ; } PX_FORCE_INLINE void initFromPxVec4(const PxVec4& min, const PxVec4& max) { #ifdef ABP_SIMD_OVERLAP mMinY = -min.y; mMinZ = -min.z; #else mMinY = min.y; mMinZ = min.z; #endif mMaxY = max.y; mMaxZ = max.z; } PX_FORCE_INLINE void operator = (const SIMD_AABB_YZ4& box) { V4StoreA(V4LoadA(&box.mMinY), &mMinY); } float mMinY; float mMinZ; float mMaxY; float mMaxZ; }; #endif #define MBP_ALLOC(x) PX_ALLOC(x, "MBP") #define MBP_ALLOC_TMP(x) PX_ALLOC(x, "MBP_TMP") #define MBP_FREE(x) PX_FREE(x) #define INVALID_ID 0xffffffff /////////////////////////////////////////////////////////////////////////////// #define DEFAULT_NB_ENTRIES 128 class ABP_MM { public: ABP_MM() : mScratchAllocator(NULL) {} ~ABP_MM() {} void* frameAlloc(PxU32 size); void frameFree(void* address); PxcScratchAllocator* mScratchAllocator; }; void* ABP_MM::frameAlloc(PxU32 size) { if(mScratchAllocator) return mScratchAllocator->alloc(size, true); return PX_ALLOC(size, "frameAlloc"); } void ABP_MM::frameFree(void* address) { if(mScratchAllocator) mScratchAllocator->free(address); else PX_FREE(address); } template<class T> static T* resizeBoxesT(PxU32 oldNbBoxes, PxU32 newNbBoxes, T* boxes) { T* newBoxes = reinterpret_cast<T*>(MBP_ALLOC(sizeof(T)*newNbBoxes)); if(oldNbBoxes) PxMemCopy(newBoxes, boxes, oldNbBoxes*sizeof(T)); MBP_FREE(boxes); return newBoxes; } class Boxes { public: Boxes(); ~Boxes(); PX_FORCE_INLINE void init(const Boxes& boxes){ mSize = boxes.mSize; mCapacity = boxes.mCapacity; } PX_FORCE_INLINE PxU32 getSize() const { return mSize; } PX_FORCE_INLINE PxU32 getCapacity() const { return mCapacity; } PX_FORCE_INLINE bool isFull() const { return mSize==mCapacity; } PX_FORCE_INLINE void reset() { mSize = mCapacity = 0; } PX_FORCE_INLINE PxU32 popBack() { return --mSize; } // protected: PxU32 mSize; PxU32 mCapacity; }; Boxes::Boxes() : mSize (0), mCapacity (0) { } Boxes::~Boxes() { reset(); } class StraightBoxes : public Boxes { public: StraightBoxes(); ~StraightBoxes(); void init(PxU32 size, PxU32 capacity, SIMD_AABB4* boxes); void reset(); PxU32 resize(); PxU32 resize(PxU32 incoming); bool allocate(PxU32 nb); PX_FORCE_INLINE const SIMD_AABB4* getBoxes() const { return mBoxes; } PX_FORCE_INLINE SIMD_AABB4* getBoxes() { return mBoxes; } PX_FORCE_INLINE void setBounds(PxU32 index, const SIMD_AABB4& box) { PX_ASSERT(index<mSize); mBoxes[index] = box; } PX_FORCE_INLINE PxU32 pushBack(const SIMD_AABB4& box) { const PxU32 index = mSize++; setBounds(index, box); return index; } private: SIMD_AABB4* mBoxes; }; StraightBoxes::StraightBoxes() : mBoxes (NULL) { } StraightBoxes::~StraightBoxes() { reset(); } void StraightBoxes::reset() { PX_DELETE_ARRAY(mBoxes); Boxes::reset(); } void StraightBoxes::init(PxU32 size, PxU32 capacity, SIMD_AABB4* boxes) { reset(); mSize = size; mCapacity = capacity; mBoxes = boxes; } PxU32 StraightBoxes::resize() { const PxU32 capacity = mCapacity; const PxU32 size = mSize; // const PxU32 newCapacity = capacity ? capacity + DEFAULT_NB_ENTRIES : DEFAULT_NB_ENTRIES; // const PxU32 newCapacity = capacity ? capacity*2 : DEFAULT_NB_ENTRIES; const PxU32 newCapacity = capacity ? capacity*2 : DEFAULT_NB_ENTRIES; // PT: we allocate one extra box for safe SIMD loads mBoxes = resizeBoxesT(size, newCapacity+1, mBoxes); mCapacity = newCapacity; return newCapacity; } PxU32 StraightBoxes::resize(PxU32 incoming) { const PxU32 capacity = mCapacity; const PxU32 size = mSize; const PxU32 minCapacity = size + incoming; if(minCapacity<capacity) return capacity; PxU32 newCapacity = capacity ? capacity*2 : DEFAULT_NB_ENTRIES; if(newCapacity<minCapacity) newCapacity=minCapacity; // PT: we allocate one extra box for safe SIMD loads mBoxes = resizeBoxesT(size, newCapacity+1, mBoxes); mCapacity = newCapacity; return newCapacity; } bool StraightBoxes::allocate(PxU32 nb) { if(nb<=mSize) return false; PX_DELETE_ARRAY(mBoxes); // PT: we allocate NB_SENTINELS more boxes than necessary here so we don't need to allocate one more for SIMD-load safety mBoxes = PX_NEW(SIMD_AABB4)[nb+NB_SENTINELS]; mSize = mCapacity = nb; return true; } class SplitBoxes : public Boxes { public: SplitBoxes(); ~SplitBoxes(); void init(PxU32 size, PxU32 capacity, SIMD_AABB_X4* boxes_X, SIMD_AABB_YZ4* boxes_YZ); void init(const SplitBoxes& boxes); void reset(bool freeMemory = true); PxU32 resize(); PxU32 resize(PxU32 incoming); bool allocate(PxU32 nb); PX_FORCE_INLINE const SIMD_AABB_X4* getBoxes_X() const { return mBoxes_X; } PX_FORCE_INLINE SIMD_AABB_X4* getBoxes_X() { return mBoxes_X; } PX_FORCE_INLINE const SIMD_AABB_YZ4* getBoxes_YZ() const { return mBoxes_YZ; } PX_FORCE_INLINE SIMD_AABB_YZ4* getBoxes_YZ() { return mBoxes_YZ; } PX_FORCE_INLINE void setBounds(PxU32 index, const PxVec4& min, const PxVec4& max) { PX_ASSERT(index<mSize); mBoxes_X[index].initFromPxVec4(min, max); mBoxes_YZ[index].initFromPxVec4(min, max); } PX_FORCE_INLINE void setBounds(PxU32 index, const SIMD_AABB4& box) { PX_ASSERT(index<mSize); mBoxes_X[index].initFrom(box); mBoxes_YZ[index].initFrom(box); } PX_FORCE_INLINE PxU32 pushBack(const SIMD_AABB4& box) { const PxU32 index = mSize++; setBounds(index, box); return index; } private: SIMD_AABB_X4* mBoxes_X; SIMD_AABB_YZ4* mBoxes_YZ; }; SplitBoxes::SplitBoxes() : mBoxes_X (NULL), mBoxes_YZ (NULL) { } SplitBoxes::~SplitBoxes() { reset(); } void SplitBoxes::reset(bool freeMemory) { if(freeMemory) { MBP_FREE(mBoxes_YZ); MBP_FREE(mBoxes_X); } mBoxes_X = NULL; mBoxes_YZ = NULL; Boxes::reset(); } void SplitBoxes::init(PxU32 size, PxU32 capacity, SIMD_AABB_X4* boxes_X, SIMD_AABB_YZ4* boxes_YZ) { reset(); mSize = size; mCapacity = capacity; mBoxes_X = boxes_X; mBoxes_YZ = boxes_YZ; } void SplitBoxes::init(const SplitBoxes& boxes) { reset(); Boxes::init(boxes); mBoxes_X = const_cast<SIMD_AABB_X4*>(boxes.getBoxes_X()); mBoxes_YZ = const_cast<SIMD_AABB_YZ4*>(boxes.getBoxes_YZ()); } PxU32 SplitBoxes::resize() { const PxU32 capacity = mCapacity; const PxU32 size = mSize; // const PxU32 newCapacity = capacity ? capacity + DEFAULT_NB_ENTRIES : DEFAULT_NB_ENTRIES; // const PxU32 newCapacity = capacity ? capacity*2 : DEFAULT_NB_ENTRIES; const PxU32 newCapacity = capacity ? capacity*2 : DEFAULT_NB_ENTRIES; mBoxes_X = resizeBoxesT(size, newCapacity, mBoxes_X); mBoxes_YZ = resizeBoxesT(size, newCapacity, mBoxes_YZ); mCapacity = newCapacity; return newCapacity; } PxU32 SplitBoxes::resize(PxU32 incoming) { const PxU32 capacity = mCapacity; const PxU32 size = mSize; const PxU32 minCapacity = size + incoming; if(minCapacity<capacity) return capacity; PxU32 newCapacity = capacity ? capacity*2 : DEFAULT_NB_ENTRIES; if(newCapacity<minCapacity) newCapacity=minCapacity; mBoxes_X = resizeBoxesT(size, newCapacity, mBoxes_X); mBoxes_YZ = resizeBoxesT(size, newCapacity, mBoxes_YZ); mCapacity = newCapacity; return newCapacity; } bool SplitBoxes::allocate(PxU32 nb) { if(nb<=mSize) return false; MBP_FREE(mBoxes_YZ); MBP_FREE(mBoxes_X); mBoxes_X = reinterpret_cast<SIMD_AABB_X4*>(MBP_ALLOC(sizeof(SIMD_AABB_X4)*(nb+NB_SENTINELS))); mBoxes_YZ = reinterpret_cast<SIMD_AABB_YZ4*>(MBP_ALLOC(sizeof(SIMD_AABB_YZ4)*nb)); PX_ASSERT(!(size_t(mBoxes_YZ) & 15)); mSize = mCapacity = nb; return true; } typedef SplitBoxes StaticBoxes; typedef SplitBoxes DynamicBoxes; /////////////////////////////////////////////////////////////////////////////// struct ABP_Object : public PxUserAllocated { PX_FORCE_INLINE ABP_Object() : mIndex(INVALID_ID) { #if PX_DEBUG mUpdated = false; #endif } private: PxU32 mIndex; // Out-to-in, maps user handle to internal array. mIndex indexes either the static or dynamic array. // PT: the type won't be available for removed objects so we have to store it there. That uses 2 bits. // Then the "data" will need one more bit for marking sleeping objects so that leaves 28bits for the actual index. PX_FORCE_INLINE void setData(PxU32 index, FilterType::Enum type) { // mIndex = index; index <<= 2; index |= type; mIndex = index; } public: // PT: TODO: rename "index" to data everywhere PX_FORCE_INLINE void setActiveIndex(PxU32 index, FilterType::Enum type) { const PxU32 boxData = (index+index); setData(boxData, type); } PX_FORCE_INLINE void setSleepingIndex(PxU32 index, FilterType::Enum type) { const PxU32 boxData = (index+index)|1; PX_ASSERT(getType()==type); setData(boxData, type); } PX_FORCE_INLINE FilterType::Enum getType() const { return FilterType::Enum(mIndex&3); } PX_FORCE_INLINE PxU32 getData() const { return mIndex>>2; } PX_FORCE_INLINE void invalidateIndex() { mIndex = INVALID_ID; } PX_FORCE_INLINE bool isValid() const { return mIndex != INVALID_ID; } #if PX_DEBUG bool mUpdated; #endif }; typedef ABP_Object ABPEntry; /////////////////////////////////////////////////////////////////////////////// //#define BIT_ARRAY_STACK 512 static PX_FORCE_INLINE PxU32 bitsToDwords(PxU32 nbBits) { return (nbBits>>5) + ((nbBits&31) ? 1 : 0); } // Use that one instead of an array of bools. Takes less ram, nearly as fast [no bounds checkings and so on]. class BitArray { public: BitArray(); BitArray(PxU32 nbBits); ~BitArray(); bool init(PxU32 nbBits); void empty(); void resize(PxU32 nbBits); PX_FORCE_INLINE void checkResize(PxU32 bitNumber) { const PxU32 index = bitNumber>>5; if(index>=mSize) resize(bitNumber); } PX_FORCE_INLINE void setBitChecked(PxU32 bitNumber) { const PxU32 index = bitNumber>>5; if(index>=mSize) resize(bitNumber); mBits[index] |= 1<<(bitNumber&31); } PX_FORCE_INLINE void clearBitChecked(PxU32 bitNumber) { const PxU32 index = bitNumber>>5; if(index>=mSize) resize(bitNumber); mBits[index] &= ~(1<<(bitNumber&31)); } // Data management PX_FORCE_INLINE void setBit(PxU32 bitNumber) { mBits[bitNumber>>5] |= 1<<(bitNumber&31); } PX_FORCE_INLINE void clearBit(PxU32 bitNumber) { mBits[bitNumber>>5] &= ~(1<<(bitNumber&31)); } PX_FORCE_INLINE void toggleBit(PxU32 bitNumber) { mBits[bitNumber>>5] ^= 1<<(bitNumber&31); } PX_FORCE_INLINE void clearAll() { PxMemZero(mBits, mSize*4); } PX_FORCE_INLINE void setAll() { PxMemSet(mBits, 0xff, mSize*4); } // Data access PX_FORCE_INLINE PxIntBool isSet(PxU32 bitNumber) const { return PxIntBool(mBits[bitNumber>>5] & (1<<(bitNumber&31))); } PX_FORCE_INLINE PxIntBool isSetChecked(PxU32 bitNumber) const { const PxU32 index = bitNumber>>5; if(index>=mSize) return 0; return PxIntBool(mBits[index] & (1<<(bitNumber&31))); } PX_FORCE_INLINE const PxU32* getBits() const { return mBits; } PX_FORCE_INLINE PxU32 getSize() const { return mSize; } protected: PxU32* mBits; //!< Array of bits PxU32 mSize; //!< Size of the array in dwords #ifdef BIT_ARRAY_STACK PxU32 mStack[BIT_ARRAY_STACK]; #endif }; /////////////////////////////////////////////////////////////////////////////// BitArray::BitArray() : mBits(NULL), mSize(0) { } BitArray::BitArray(PxU32 nbBits) : mBits(NULL), mSize(0) { init(nbBits); } BitArray::~BitArray() { empty(); } void BitArray::empty() { #ifdef BIT_ARRAY_STACK if(mBits!=mStack) #endif MBP_FREE(mBits); mBits = NULL; mSize = 0; } bool BitArray::init(PxU32 nbBits) { mSize = bitsToDwords(nbBits); // Get ram for n bits #ifdef BIT_ARRAY_STACK if(mBits!=mStack) #endif MBP_FREE(mBits); #ifdef BIT_ARRAY_STACK if(mSize>BIT_ARRAY_STACK) #endif mBits = reinterpret_cast<PxU32*>(MBP_ALLOC(sizeof(PxU32)*mSize)); #ifdef BIT_ARRAY_STACK else mBits = mStack; #endif // Set all bits to 0 clearAll(); return true; } void BitArray::resize(PxU32 nbBits) { const PxU32 newSize = bitsToDwords(nbBits+128); PxU32* newBits = NULL; #ifdef BIT_ARRAY_STACK if(newSize>BIT_ARRAY_STACK) #endif { // Old buffer was stack or allocated, new buffer is allocated newBits = reinterpret_cast<PxU32*>(MBP_ALLOC(sizeof(PxU32)*newSize)); if(mSize) PxMemCopy(newBits, mBits, sizeof(PxU32)*mSize); } #ifdef BIT_ARRAY_STACK else { newBits = mStack; if(mSize>BIT_ARRAY_STACK) { // Old buffer was allocated, new buffer is stack => copy to stack, shrink CopyMemory(newBits, mBits, sizeof(PxU32)*BIT_ARRAY_STACK); } else { // Old buffer was stack, new buffer is stack => keep working on the same stack buffer, nothing to do } } #endif const PxU32 remain = newSize - mSize; if(remain) PxMemZero(newBits + mSize, remain*sizeof(PxU32)); #ifdef BIT_ARRAY_STACK if(mBits!=mStack) #endif MBP_FREE(mBits); mBits = newBits; mSize = newSize; } /////////////////////////////////////////////////////////////////////////////// static ABP_Index* resizeMapping(PxU32 oldNbBoxes, PxU32 newNbBoxes, ABP_Index* mapping) { ABP_Index* newMapping = reinterpret_cast<ABP_Index*>(MBP_ALLOC(sizeof(ABP_Index)*newNbBoxes)); if(oldNbBoxes) PxMemCopy(newMapping, mapping, oldNbBoxes*sizeof(ABP_Index)); MBP_FREE(mapping); return newMapping; } struct ABP_Object; #ifdef ABP_MT struct DelayedPair { PxU32 mID0; PxU32 mID1; PxU32 mHash; }; #endif class ABP_PairManager : public PairManagerData { public: ABP_PairManager(); ~ABP_PairManager(); InternalPair* addPair (PxU32 id0, PxU32 id1); void computeCreatedDeletedPairs (PxArray<BroadPhasePair>& createdPairs, PxArray<BroadPhasePair>& deletedPairs, const BitArray& updated, const BitArray& removed); #ifdef ABP_MT void addDelayedPair (PxArray<DelayedPair>& delayedPairs, const ABP_Index* mInToOut0, const ABP_Index* mInToOut1, PxU32 index0, PxU32 index1) const; void addDelayedPairs (const PxArray<DelayedPair>& delayedPairs); void addDelayedPairs2(PxArray<BroadPhasePair>& createdPairs, const PxArray<DelayedPair>& delayedPairs); void resizeForNewPairs(PxU32 nbDelayedPairs); #endif const Bp::FilterGroup::Enum* mGroups; const ABP_Index* mInToOut0; const ABP_Index* mInToOut1; const bool* mLUT; }; /////////////////////////////////////////////////////////////////////////// struct ABP_SharedData { PX_FORCE_INLINE ABP_SharedData() : mABP_Objects (NULL), mABP_Objects_Capacity (0) { } void resize(BpHandle userID); PX_FORCE_INLINE void checkResize(PxU32 maxID) { if(mABP_Objects_Capacity<maxID+1) resize(maxID); } ABP_Object* mABP_Objects; PxU32 mABP_Objects_Capacity; BitArray mUpdatedObjects; // Indexed by ABP_ObjectIndex BitArray mRemovedObjects; // Indexed by ABP_ObjectIndex }; void ABP_SharedData::resize(BpHandle userID) { const PxU32 oldCapacity = mABP_Objects_Capacity; PxU32 newCapacity = mABP_Objects_Capacity ? mABP_Objects_Capacity*2 : 256; if(newCapacity<userID+1) newCapacity = userID+1; ABP_Object* newObjects = PX_NEW(ABP_Object)[newCapacity]; if(mABP_Objects) PxMemCopy(newObjects, mABP_Objects, oldCapacity*sizeof(ABP_Object)); #if PX_DEBUG for(PxU32 i=oldCapacity;i<newCapacity;i++) newObjects[i].mUpdated = false; #endif PX_DELETE_ARRAY(mABP_Objects); mABP_Objects = newObjects; mABP_Objects_Capacity = newCapacity; } class BoxManager { public: BoxManager(FilterType::Enum type); ~BoxManager(); void reset(); void setSourceData(const PxBounds3* bounds, const float* distances) { mAABBManagerBounds = bounds; mAABBManagerDistances = distances; } void addObjects(const BpHandle* PX_RESTRICT userIDs, PxU32 nb, ABP_SharedData* PX_RESTRICT sharedData); void removeObject(ABPEntry& object, BpHandle userID); void updateObject(ABPEntry& object, BpHandle userID); void prepareData(RadixSortBuffered& rs, ABP_Object* PX_RESTRICT objects, PxU32 objectsCapacity, ABP_MM& memoryManager, PxU64 contextID); // PX_FORCE_INLINE PxU32 isThereWorkToDo() const { return mNbUpdated; } PX_FORCE_INLINE bool isThereWorkToDo() const { return mNbUpdated || mNbRemovedSleeping; } // PT: temp & test, maybe we do that differently in the end PX_FORCE_INLINE PxU32 getNbUpdatedBoxes() const { return mNbUpdated; } PX_FORCE_INLINE PxU32 getNbNonUpdatedBoxes() const { return mNbSleeping; } PX_FORCE_INLINE const DynamicBoxes& getUpdatedBoxes() const { return mUpdatedBoxes; } PX_FORCE_INLINE const DynamicBoxes& getSleepingBoxes() const { return mSleepingBoxes; } PX_FORCE_INLINE const ABP_Index* getRemap_Updated() const { return mInToOut_Updated; } PX_FORCE_INLINE const ABP_Index* getRemap_Sleeping() const { return mInToOut_Sleeping; } #ifdef USE_ABP_BUCKETS PX_FORCE_INLINE const PxBounds3& getUpdatedBounds() const { return mUpdatedBounds; } #endif private: FilterType::Enum mType; // PT: refs to source data (not owned). Currently separate arrays, ideally should be merged. const PxBounds3* mAABBManagerBounds; const float* mAABBManagerDistances; // New & updated objects #ifdef USE_ABP_BUCKETS PxBounds3 mUpdatedBounds; // Bounds around updated dynamic objects, computed in prepareData(). #endif ABP_Index* mInToOut_Updated; // Maps boxes to mABP_Objects PxU32 mNbUpdated; PxU32 mMaxNbUpdated; DynamicBoxes mUpdatedBoxes; // Sleeping objects ABP_Index* mInToOut_Sleeping; // Maps boxes to mABP_Objects PxU32 mNbSleeping; DynamicBoxes mSleepingBoxes; // Removed sleeping PxU32 mNbRemovedSleeping; void purgeRemovedFromSleeping(ABP_Object* PX_RESTRICT objects, PxU32 objectsCapacity); }; BoxManager::BoxManager(FilterType::Enum type) : mType (type), mAABBManagerBounds (NULL), mAABBManagerDistances (NULL), mInToOut_Updated (NULL), mNbUpdated (0), mMaxNbUpdated (0), mInToOut_Sleeping (NULL), mNbSleeping (0), mNbRemovedSleeping (0) { } BoxManager::~BoxManager() { reset(); } void BoxManager::reset() { mMaxNbUpdated = mNbUpdated = mNbSleeping = 0; PX_FREE(mInToOut_Updated); PX_FREE(mInToOut_Sleeping); mUpdatedBoxes.reset(); mSleepingBoxes.reset(); } static PX_FORCE_INLINE PxU32 isNewOrUpdated(PxU32 data) { return data & PX_SIGN_BITMASK; } static PX_FORCE_INLINE PxU32 markAsNewOrUpdated(PxU32 data) { return data | PX_SIGN_BITMASK; } static PX_FORCE_INLINE PxU32 removeNewOrUpdatedMark(PxU32 data) { return data & ~PX_SIGN_BITMASK; } // BpHandle = index in main/shared arrays like mAABBManagerBounds / mAABBManagerDistances PX_COMPILE_TIME_ASSERT(sizeof(BpHandle)==sizeof(ABP_Index)); void BoxManager::addObjects(const BpHandle* PX_RESTRICT userIDs, PxU32 nb, ABP_SharedData* PX_RESTRICT sharedData) { // PT: we're called for each batch. // PT: TODO: fix the BpHandle/ABP_Index mix const PxU32 currentSize = mNbUpdated; const PxU32 currentCapacity = mMaxNbUpdated; const PxU32 newSize = currentSize + nb; ABP_Index* remap; if(newSize>currentCapacity) { const PxU32 minCapacity = PxMax(newSize, 1024u); const PxU32 newCapacity = PxMax(minCapacity, currentCapacity*2); PX_ASSERT(newCapacity>=newSize); mMaxNbUpdated = newCapacity; remap = resizeMapping(currentSize, newCapacity, mInToOut_Updated); } else { remap = mInToOut_Updated; } mInToOut_Updated = remap; mNbUpdated = newSize; // PT: we only copy the new handles for now. The bounds will be computed later in "prepareData". // PT: TODO: do we even need to copy them? Can't we just reuse the source ptr directly? { PX_ASSERT(currentSize+nb<=mMaxNbUpdated); remap += currentSize; PxU32 nbToGo = nb; while(nbToGo--) { const BpHandle userID = *userIDs++; PX_ASSERT(!isNewOrUpdated(userID)); *remap++ = markAsNewOrUpdated(userID); if(sharedData) sharedData->mUpdatedObjects.setBit(userID); } } } // PT: TODO: inline this again void BoxManager::removeObject(ABPEntry& object, BpHandle userID) { PX_UNUSED(userID); const PxU32 boxData = object.getData(); const PxU32 boxIndex = boxData>>1; if(boxData&1) { // Sleeping object. PX_ASSERT(boxIndex<mNbSleeping); PX_ASSERT(mInToOut_Sleeping[boxIndex]==userID); PX_ASSERT(mInToOut_Sleeping[boxIndex] != INVALID_ID); // PT: can that happen if we update and remove an object in the same frame or does the AABB take care of it? mInToOut_Sleeping[boxIndex] = INVALID_ID; mNbRemovedSleeping++; PX_ASSERT(mNbRemovedSleeping<=mNbSleeping); } else { // PT: remove active object, i.e. one that was previously in "updated" arrays. PX_ASSERT(boxIndex<mNbUpdated); PX_ASSERT(boxIndex<mMaxNbUpdated); PX_ASSERT(mInToOut_Updated[boxIndex]==userID); PX_ASSERT(mInToOut_Updated[boxIndex] != INVALID_ID); // PT: TODO: do we need this at all? We could use 'userID' to access the removed bitmap... mInToOut_Updated[boxIndex] = INVALID_ID; } } // PT: TODO: inline this again void BoxManager::updateObject(ABPEntry& object, BpHandle userID) { PX_UNUSED(userID); const PxU32 boxData = object.getData(); const PxU32 boxIndex = boxData>>1; if(boxData&1) { // PT: benchmark for this codepath: MBP.UpdateSleeping // Sleeping object. We must reactivate it, i.e: // - remove it from the array of sleeping objects // - add it to the array of active/updated objects // First we remove: { PX_ASSERT(boxIndex<mNbSleeping); PX_ASSERT(mInToOut_Sleeping[boxIndex]==userID); PX_ASSERT(mInToOut_Sleeping[boxIndex] != INVALID_ID); mInToOut_Sleeping[boxIndex] = INVALID_ID; mNbRemovedSleeping++; PX_ASSERT(mNbRemovedSleeping<=mNbSleeping); } // Then we add // PT: TODO: revisit / improve this maybe addObjects(&userID, 1, NULL); // Don't pass sharedData because the bitmap has already been updated by the calling code } else { // Active object, i.e. it was updated in previous frame and it's already in mInToOut_Updated array PX_ASSERT(boxIndex<mNbUpdated); PX_ASSERT(boxIndex<mMaxNbUpdated); PX_ASSERT(mInToOut_Updated[boxIndex]==userID); mInToOut_Updated[boxIndex] = markAsNewOrUpdated(mInToOut_Updated[boxIndex]); } } #if PX_DEBUG static PX_FORCE_INLINE void computeMBPBounds_Check(SIMD_AABB4& aabb, const PxBounds3* PX_RESTRICT boundsXYZ, const PxReal* PX_RESTRICT contactDistances, const BpHandle index) { const PxBounds3& b = boundsXYZ[index]; const Vec4V contactDistanceV = V4Load(contactDistances[index]); const Vec4V inflatedMinV = V4Sub(V4LoadU(&b.minimum.x), contactDistanceV); const Vec4V inflatedMaxV = V4Add(V4LoadU(&b.maximum.x), contactDistanceV); // PT: this one is safe because we allocated one more box in the array (in BoundsArray::initEntry) PX_ALIGN(16, PxVec4) boxMin; PX_ALIGN(16, PxVec4) boxMax; V4StoreA(inflatedMinV, &boxMin.x); V4StoreA(inflatedMaxV, &boxMax.x); aabb.mMinX = boxMin[0]; aabb.mMinY = boxMin[1]; aabb.mMinZ = boxMin[2]; aabb.mMaxX = boxMax[0]; aabb.mMaxY = boxMax[1]; aabb.mMaxZ = boxMax[2]; } #endif static PX_FORCE_INLINE void initSentinels(SIMD_AABB_X4* PX_RESTRICT boxesX, const PxU32 size) { for(PxU32 i=0;i<NB_SENTINELS;i++) boxesX[size+i].initSentinel(); } void BoxManager::purgeRemovedFromSleeping(ABP_Object* PX_RESTRICT objects, PxU32 objectsCapacity) { CHECKPOINT("purgeRemovedFromSleeping\n"); PX_UNUSED(objectsCapacity); PX_ASSERT(mNbRemovedSleeping); PX_ASSERT(mNbSleeping); // PT: TODO: do we need to allocate separate buffers here? // PT: we reach this codepath when: // - no object has been added or updated // - sleeping objects have been removed // So we have to purge the removed objects from the sleeping array. We cannot entirely ignore the removals since we compute collisions // between sleeping arrays and active arrays for bipartite cases. So we either have to remove the invalid entries immediately, or make // sure they don't report collisions. We could ignore collisions when the remapped ID is "INVALID_ID" but that would be an additional // test for each potential pair, i.e. it's a constant cost. We cannot tweak the removed bounding boxes (e.g. mark them as empty) because // they are sorted, and the tweak would break the sorting and the collision loop. Keeping all removed objects in the array also means // there is more data to parse all the time, i.e. there is a performance cost again. So for now we just remove all deleted entries here. // ==> also tweaking the sleeping boxes might break the "merge sleeping" array code PX_ASSERT(mNbRemovedSleeping<=mNbSleeping); if(mNbRemovedSleeping==mNbSleeping) { // PT: remove everything mSleepingBoxes.reset(); PX_FREE(mInToOut_Sleeping); mNbSleeping = mNbRemovedSleeping = 0; return; } const PxU32 expectedTotal = mNbSleeping - mNbRemovedSleeping; PxU32 nbRemovedFound = 0; PxU32 nbSleepingLeft = 0; const PxU32 sleepCapacity = mSleepingBoxes.getCapacity(); if(expectedTotal>=sleepCapacity/2) { // PT: remove holes, keep same data buffers SIMD_AABB_X4* boxesX = mSleepingBoxes.getBoxes_X(); SIMD_AABB_YZ4* boxesYZ = mSleepingBoxes.getBoxes_YZ(); ABP_Index* remap = mInToOut_Sleeping; for(PxU32 i=0;i<mNbSleeping;i++) { const PxU32 boxIndex = remap[i]; if(boxIndex==INVALID_ID) { nbRemovedFound++; } else { PX_ASSERT(nbSleepingLeft<expectedTotal); if(i!=nbSleepingLeft) { remap[nbSleepingLeft] = boxIndex; boxesX[nbSleepingLeft] = boxesX[i]; boxesYZ[nbSleepingLeft] = boxesYZ[i]; } { PX_ASSERT(boxIndex<objectsCapacity); objects[boxIndex].setSleepingIndex(nbSleepingLeft, mType); } nbSleepingLeft++; } } PX_ASSERT(nbSleepingLeft==expectedTotal); PX_ASSERT(nbSleepingLeft+nbRemovedFound==mNbSleeping); initSentinels(boxesX, expectedTotal); mSleepingBoxes.mSize = expectedTotal; } else { // PT: remove holes, get fresh memory buffers SIMD_AABB_X4* dstBoxesX = reinterpret_cast<SIMD_AABB_X4*>(MBP_ALLOC(sizeof(SIMD_AABB_X4)*(expectedTotal+NB_SENTINELS))); SIMD_AABB_YZ4* dstBoxesYZ = reinterpret_cast<SIMD_AABB_YZ4*>(MBP_ALLOC(sizeof(SIMD_AABB_YZ4)*(expectedTotal+NB_SENTINELS))); initSentinels(dstBoxesX, expectedTotal); BpHandle* PX_RESTRICT dstRemap = reinterpret_cast<BpHandle*>(PX_ALLOC(expectedTotal*sizeof(BpHandle), "tmp")); const SIMD_AABB_X4* PX_RESTRICT srcDataX = mSleepingBoxes.getBoxes_X(); const SIMD_AABB_YZ4* PX_RESTRICT srcDataYZ = mSleepingBoxes.getBoxes_YZ(); const ABP_Index* PX_RESTRICT srcRemap = mInToOut_Sleeping; for(PxU32 i=0;i<mNbSleeping;i++) { const PxU32 boxIndex = srcRemap[i]; if(boxIndex==INVALID_ID) { nbRemovedFound++; } else { PX_ASSERT(nbSleepingLeft<expectedTotal); dstRemap[nbSleepingLeft] = boxIndex; dstBoxesX[nbSleepingLeft] = srcDataX[i]; dstBoxesYZ[nbSleepingLeft] = srcDataYZ[i]; { PX_ASSERT(boxIndex<objectsCapacity); objects[boxIndex].setSleepingIndex(nbSleepingLeft, mType); } nbSleepingLeft++; } } PX_ASSERT(nbSleepingLeft==expectedTotal); PX_ASSERT(nbSleepingLeft+nbRemovedFound==mNbSleeping); // PT: TODO: double check all this mSleepingBoxes.init(expectedTotal, expectedTotal, dstBoxesX, dstBoxesYZ); PX_FREE(mInToOut_Sleeping); mInToOut_Sleeping = dstRemap; } mNbSleeping = expectedTotal; mNbRemovedSleeping = 0; } static PX_FORCE_INLINE PosXType2 getNextCandidateSorted(PxU32 offsetSorted, const PxU32 nbSorted, const SIMD_AABB_X4* PX_RESTRICT sortedDataX, const PxU32* PX_RESTRICT sleepingIndices) { return offsetSorted<nbSorted ? sortedDataX[sleepingIndices[offsetSorted]].mMinX : SentinelValue2; } static PX_FORCE_INLINE PosXType2 getNextCandidateNonSorted(PxU32 offsetNonSorted, const PxU32 nbToSort, const SIMD_AABB_X4* PX_RESTRICT toSortDataX) { return offsetNonSorted<nbToSort ? toSortDataX[offsetNonSorted].mMinX : SentinelValue2; } PX_COMPILE_TIME_ASSERT(sizeof(BpHandle)==sizeof(float)); void BoxManager::prepareData(RadixSortBuffered& /*rs*/, ABP_Object* PX_RESTRICT objects, PxU32 objectsCapacity, ABP_MM& memoryManager, PxU64 contextID) { PX_UNUSED(contextID); // PT: mNbUpdated = number of objects in the updated buffer, could have been updated this frame or previous frame const PxU32 size = mNbUpdated; if(!size) { if(mNbRemovedSleeping) { // PT: benchmark for this codepath: MBP.RemoveHalfSleeping purgeRemovedFromSleeping(objects, objectsCapacity); } return; } PX_ASSERT(mAABBManagerBounds); PX_ASSERT(mAABBManagerDistances); PX_ASSERT(mInToOut_Updated); // Prepare new/updated objects const ABP_Index* PX_RESTRICT remap = mInToOut_Updated; const PxBounds3* PX_RESTRICT bounds = mAABBManagerBounds; const float* PX_RESTRICT distances = mAABBManagerDistances; float* PX_RESTRICT keys = NULL; // newOrUpdatedIDs: *userIDs* of objects that have been added or updated this frame. // sleepingIndices: *indices* (not userIDs) of non-updated objects within mInToOut_Updated PxU32* tempBuffer = NULL; PxU32* newOrUpdatedIDs = tempBuffer; PxU32* sleepingIndices = tempBuffer; // PT: mNbUpdated / mInToOut_Updated contains: // 1) objects added this frame (from addObject(s)) // 2) objects updated this frame (from updateObject(s)) // 3) objects updated the frame before, not updated this frame, i.e. they are now "sleeping" // 4) objects updated the frame before, then removed (from removeObject(s)) // // We split the current array into separate groups: // - 1) & 2) go to "temp", count is "nbUpdated" // - 3) go to "temp2", count is "nbSleeping" // - 4) are filtered out. No special processing is needed because the updated data is always parsed/recreated here anyway. // So if we don't actively add removed objects to the new buffers, they get removed as a side-effect. PxU32 nbUpdated = 0; // PT: number of objects updated this frame PxU32 nbSleeping = 0; PxU32 nbRemoved = 0; // PT: number of removed objects that were previously located in the udpated array // PT: TODO: could we do the work within mInToOut_Updated? // - updated objects have invalidated bounds so we don't need to preserve their order // - we need to preserve the order of sleeping objects to avoid re-sorting them // - we cannot use MTF since it breaks the order // - parse backward and move sleeping objects to the back? but then we might have to move the sleeping boxes at the same time for(PxU32 i=0;i<size;i++) { PX_ASSERT(i<mMaxNbUpdated); const PxU32 index = remap[i]; if(index==INVALID_ID) { nbRemoved++; } else { if(!tempBuffer) { tempBuffer = reinterpret_cast<PxU32*>(memoryManager.frameAlloc(size*sizeof(PxU32))); newOrUpdatedIDs = tempBuffer; sleepingIndices = tempBuffer; } if(isNewOrUpdated(index)) { // PT: new or updated object if(!keys) keys = reinterpret_cast<float*>(PX_ALLOC(size*sizeof(float), "tmp")); // PT: in this version we compute the key on-the-fly, i.e. it will be computed twice overall. We could make this // faster by merging bounds and distances inside the AABB manager. const BpHandle userID = removeNewOrUpdatedMark(index); keys[nbUpdated] = bounds[userID].minimum.x - distances[userID]; newOrUpdatedIDs[size - 1 - nbUpdated] = userID; #if PX_DEBUG SIMD_AABB4 aabb; computeMBPBounds_Check(aabb, bounds, distances, userID); PX_ASSERT(aabb.mMinX==keys[nbUpdated]); #endif nbUpdated++; } else { // PT: sleeping object sleepingIndices[nbSleeping++] = i; } } } PX_ASSERT(nbRemoved + nbUpdated + nbSleeping == size); // PT: we must process the sleeping objects first, because the bounds of new sleeping objects are located in the existing updated buffers. // PT: TODO: *HOWEVER* we could sort things right now and then reuse the "keys" buffer? if(nbSleeping) { // PT: must merge these guys to current sleeping array // They should already be in sorted order and we should already have the boxes. #if PX_DEBUG const SIMD_AABB_YZ4* boxesYZ = mUpdatedBoxes.getBoxes_YZ(); float prevKey = -FLT_MAX; for(PxU32 ii=0;ii<nbSleeping;ii++) { const PxU32 i = sleepingIndices[ii]; // PT: TODO: remove this indirection const PxU32 index = remap[i]; PX_ASSERT(index!=INVALID_ID); PX_ASSERT(!(index & PX_SIGN_BITMASK)); const BpHandle userID = index; const float key = bounds[userID].minimum.x - distances[userID]; PX_ASSERT(key>=prevKey); prevKey = key; SIMD_AABB4 aabb; computeMBPBounds_Check(aabb, bounds, distances, userID); PX_ASSERT(aabb.mMinX==key); #ifdef ABP_SIMD_OVERLAP PX_ASSERT(boxesYZ[i].mMinY==-aabb.mMinY); PX_ASSERT(boxesYZ[i].mMinZ==-aabb.mMinZ); #else PX_ASSERT(boxesYZ[i].mMinY==aabb.mMinY); PX_ASSERT(boxesYZ[i].mMinZ==aabb.mMinZ); #endif PX_ASSERT(boxesYZ[i].mMaxY==aabb.mMaxY); PX_ASSERT(boxesYZ[i].mMaxZ==aabb.mMaxZ); } #endif if(mNbSleeping) { // PT: benchmark for this codepath: MBP.MergeSleeping CHECKPOINT("Merging sleeping objects\n"); // PT: here, we need to merge two arrays of sleeping objects together: // - the ones already contained inside mSleepingBoxes // - the new sleeping objects currently contained in mUpdatedBoxes // Both of them should already be sorted. // PT: TODO: super subtle stuff going on there, to revisit // PT: TODO: revisit names PxU32 offsetSorted = 0; const PxU32 nbSorted = nbSleeping; const SIMD_AABB_X4* PX_RESTRICT sortedDataX = mUpdatedBoxes.getBoxes_X(); const SIMD_AABB_YZ4* PX_RESTRICT sortedDataYZ = mUpdatedBoxes.getBoxes_YZ(); const ABP_Index* PX_RESTRICT sortedRemap = mInToOut_Updated; PxU32 offsetNonSorted = 0; const PxU32 nbToSort = mNbSleeping; const SIMD_AABB_X4* PX_RESTRICT toSortDataX = mSleepingBoxes.getBoxes_X(); const SIMD_AABB_YZ4* PX_RESTRICT toSortDataYZ = mSleepingBoxes.getBoxes_YZ(); const ABP_Index* PX_RESTRICT toSortRemap = mInToOut_Sleeping; PX_ASSERT(mNbRemovedSleeping<=mNbSleeping); #if PX_DEBUG { PxU32 nbRemovedFound=0; for(PxU32 i=0;i<mNbSleeping;i++) { if(toSortRemap[i]==INVALID_ID) nbRemovedFound++; } PX_ASSERT(nbRemovedFound==mNbRemovedSleeping); } #endif PosXType2 nextCandidateNonSorted = getNextCandidateNonSorted(offsetNonSorted, nbToSort, toSortDataX); PosXType2 nextCandidateSorted = getNextCandidateSorted(offsetSorted, nbSorted, sortedDataX, sleepingIndices); const PxU32 nbTotal = nbSorted + nbToSort - mNbRemovedSleeping; SIMD_AABB_X4* dstBoxesX = reinterpret_cast<SIMD_AABB_X4*>(MBP_ALLOC(sizeof(SIMD_AABB_X4)*(nbTotal+NB_SENTINELS))); SIMD_AABB_YZ4* dstBoxesYZ = reinterpret_cast<SIMD_AABB_YZ4*>(MBP_ALLOC(sizeof(SIMD_AABB_YZ4)*(nbTotal+NB_SENTINELS))); initSentinels(dstBoxesX, nbTotal); BpHandle* PX_RESTRICT dstRemap = reinterpret_cast<BpHandle*>(PX_ALLOC(nbTotal*sizeof(BpHandle), "tmp")); PxU32 i=0; PxU32 nbRemovedFound=0; PxU32 nbToGo = nbSorted + nbToSort; while(nbToGo--) { PxU32 boxIndex; { if(nextCandidateNonSorted<nextCandidateSorted) { boxIndex = toSortRemap[offsetNonSorted]; if(boxIndex!=INVALID_ID) { dstRemap[i] = boxIndex; dstBoxesX[i] = toSortDataX[offsetNonSorted]; dstBoxesYZ[i] = toSortDataYZ[offsetNonSorted]; } else nbRemovedFound++; offsetNonSorted++; nextCandidateNonSorted = getNextCandidateNonSorted(offsetNonSorted, nbToSort, toSortDataX); } else { const PxU32 j = sleepingIndices[offsetSorted]; PX_ASSERT(j<size); boxIndex = sortedRemap[j]; PX_ASSERT(boxIndex!=INVALID_ID); dstRemap[i] = boxIndex; dstBoxesX[i] = sortedDataX[j]; dstBoxesYZ[i] = sortedDataYZ[j]; offsetSorted++; nextCandidateSorted = getNextCandidateSorted(offsetSorted, nbSorted, sortedDataX, sleepingIndices); } } if(boxIndex!=INVALID_ID) { PX_ASSERT(boxIndex<objectsCapacity); objects[boxIndex].setSleepingIndex(i, mType); i++; } } PX_ASSERT(i==nbTotal); PX_ASSERT(offsetSorted+offsetNonSorted==nbSorted+nbToSort); #if PX_DEBUG { PosXType2 prevSorted = dstBoxesX[0].mMinX; for(PxU32 i2=1;i2<nbTotal;i2++) { PosXType2 v = dstBoxesX[i2].mMinX; PX_ASSERT(prevSorted<=v); prevSorted = v; } } #endif // PT: TODO: double check all this mSleepingBoxes.init(nbTotal, nbTotal, dstBoxesX, dstBoxesYZ); PX_FREE(mInToOut_Sleeping); mInToOut_Sleeping = dstRemap; mNbSleeping = nbTotal; mNbRemovedSleeping = 0; } else { // PT: benchmark for this codepath: MBP.ActiveToSleeping CHECKPOINT("Active objects become sleeping objects\n"); // PT: TODO: optimize allocs BpHandle* inToOut_Sleeping; if(mSleepingBoxes.allocate(nbSleeping)) { inToOut_Sleeping = reinterpret_cast<BpHandle*>(PX_ALLOC(nbSleeping*sizeof(BpHandle), "tmp")); PX_FREE(mInToOut_Sleeping); mInToOut_Sleeping = inToOut_Sleeping; } else { inToOut_Sleeping = mInToOut_Sleeping; } const SIMD_AABB_X4* srcBoxesX = mUpdatedBoxes.getBoxes_X(); const SIMD_AABB_YZ4* srcBoxesYZ = mUpdatedBoxes.getBoxes_YZ(); SIMD_AABB_X4* dstBoxesX = mSleepingBoxes.getBoxes_X(); SIMD_AABB_YZ4* dstBoxesYZ = mSleepingBoxes.getBoxes_YZ(); initSentinels(dstBoxesX, nbSleeping); for(PxU32 ii=0;ii<nbSleeping;ii++) { const PxU32 i = sleepingIndices[ii]; // PT: TODO: remove this indirection const PxU32 index = remap[i]; PX_ASSERT(index!=INVALID_ID); inToOut_Sleeping[ii] = index; dstBoxesX[ii] = srcBoxesX[i]; dstBoxesYZ[ii] = srcBoxesYZ[i]; { PX_ASSERT(index<objectsCapacity); objects[index].setSleepingIndex(ii, mType); } } mNbSleeping = nbSleeping; } } else { // PT: no sleeping objects in updated buffer if(mNbSleeping) { if(mNbRemovedSleeping) { // PT: benchmark for this codepath: MBP.UpdateSleeping purgeRemovedFromSleeping(objects, objectsCapacity); } } else { PX_ASSERT(!mNbRemovedSleeping); } } if(nbUpdated) { // PT: benchmark for this codepath: MBP.Update64KObjects CHECKPOINT("Create updated objects\n"); // PT: we need to sort here because we reuse the "keys" buffer just afterwards PxU32* ranks0 = reinterpret_cast<PxU32*>(memoryManager.frameAlloc(sizeof(PxU32)*nbUpdated)); PxU32* ranks1 = reinterpret_cast<PxU32*>(memoryManager.frameAlloc(sizeof(PxU32)*nbUpdated)); StackRadixSort(rs, ranks0, ranks1); const PxU32* sorted; { PX_PROFILE_ZONE("Sort", contextID); sorted = rs.Sort(keys, nbUpdated).GetRanks(); } // PT: // - shuffle the remap table, store it in sorted order (we can probably use the "recyclable" array here again) // - compute bounds on-the-fly, store them in sorted order // PT: TODO: the "keys" array can be much bigger than stricly necessary here BpHandle* inToOut_Updated_Sorted; if(mUpdatedBoxes.allocate(nbUpdated)) { inToOut_Updated_Sorted = reinterpret_cast<BpHandle*>(keys); PX_FREE(mInToOut_Updated); mInToOut_Updated = inToOut_Updated_Sorted; } else { PX_FREE(keys); inToOut_Updated_Sorted = mInToOut_Updated; } SIMD_AABB_X4* PX_RESTRICT dstBoxesX = mUpdatedBoxes.getBoxes_X(); initSentinels(dstBoxesX, nbUpdated); #ifdef USE_ABP_BUCKETS Vec4V minV = V4Load(FLT_MAX); Vec4V maxV = V4Load(-FLT_MAX); #endif // PT: TODO: parallel? Everything indexed by i should be fine, things indexed by userID might have some false sharing for(PxU32 i=0;i<nbUpdated;i++) { const PxU32 sortedIndex = *sorted++; const BpHandle userID = newOrUpdatedIDs[size - 1 - sortedIndex]; PX_ASSERT(i<size); inToOut_Updated_Sorted[i] = userID; { PX_ASSERT(userID<objectsCapacity); objects[userID].setActiveIndex(i, mType); #if PX_DEBUG objects[userID].mUpdated = false; #endif } // PT: TODO: refactor with computeMBPBounds? { const PxBounds3& b = bounds[userID]; const Vec4V contactDistanceV = V4Load(distances[userID]); const Vec4V inflatedMinV = V4Sub(V4LoadU(&b.minimum.x), contactDistanceV); const Vec4V inflatedMaxV = V4Add(V4LoadU(&b.maximum.x), contactDistanceV); // PT: this one is safe because we allocated one more box in the array (in BoundsArray::initEntry) #ifdef USE_ABP_BUCKETS minV = V4Min(minV, inflatedMinV); maxV = V4Max(maxV, inflatedMaxV); #endif // PT: TODO better PX_ALIGN(16, PxVec4) boxMin; PX_ALIGN(16, PxVec4) boxMax; V4StoreA(inflatedMinV, &boxMin.x); V4StoreA(inflatedMaxV, &boxMax.x); mUpdatedBoxes.setBounds(i, boxMin, boxMax); } } #ifdef USE_ABP_BUCKETS StoreBounds(mUpdatedBounds, minV, maxV) #endif #ifndef TEST_PERSISTENT_MEMORY memoryManager.frameFree(ranks1); memoryManager.frameFree(ranks0); #endif } else { // PT: benchmark for this codepath: MBP.MergeSleeping / MBP.Remove64KObjects CHECKPOINT("Free updated objects\n"); PX_FREE(keys); mUpdatedBoxes.reset(); PX_FREE(mInToOut_Updated); } mNbUpdated = mMaxNbUpdated = nbUpdated; if(tempBuffer) memoryManager.frameFree(tempBuffer); } #ifdef ABP_MT namespace { struct PairManagerMT { const ABP_PairManager* mSharedPM; PxArray<DelayedPair> mDelayedPairs; const ABP_Index* mInToOut0; const ABP_Index* mInToOut1; //char mBuffer[256]; }; } static PX_FORCE_INLINE void outputPair(PairManagerMT& pairManager, PxU32 index0, PxU32 index1) { pairManager.mSharedPM->addDelayedPair(pairManager.mDelayedPairs, pairManager.mInToOut0, pairManager.mInToOut1, index0, index1); } #endif #ifdef ABP_MT2 #define NB_BIP_TASKS 15 enum ABP_TaskID { ABP_TASK_0, ABP_TASK_1, }; class ABP_InternalTask : public PxLightCpuTask { public: ABP_InternalTask(ABP_TaskID id) : mBP(NULL), mID(id) {} virtual const char* getName() const PX_OVERRIDE { return "ABP_InternalTask"; } virtual void run() PX_OVERRIDE; BroadPhaseABP* mBP; ABP_TaskID mID; }; class ABP_CompleteBoxPruningStartTask; class ABP_CompleteBoxPruningTask : public PxLightCpuTask { public: ABP_CompleteBoxPruningTask() : mStartTask(NULL), mType(0), mID(0) { } virtual const char* getName() const PX_OVERRIDE { return "ABP_CompleteBoxPruningTask"; } virtual void run() PX_OVERRIDE; ABP_CompleteBoxPruningStartTask* mStartTask; PxU16 mType; PxU16 mID; PxU32 mCounter; const SIMD_AABB_X4* mBoxListX; const SIMD_AABB_YZ4* mBoxListYZ; const PxU32* mRemap; PxU32 mCounter4; const SIMD_AABB_X4* mBoxListX4; const SIMD_AABB_YZ4* mBoxListYZ4; const PxU32* mRemap4; PairManagerMT mPairs; PX_FORCE_INLINE bool isThereWorkToDo() const { if(!mCounter) return false; if(mType) return mCounter4!=0; return true; } }; class ABP_CompleteBoxPruningEndTask : public PxLightCpuTask { public: ABP_CompleteBoxPruningEndTask() : mStartTask(NULL) {} virtual const char* getName() const PX_OVERRIDE { return "ABP_CompleteBoxPruningEndTask"; } virtual void run() PX_OVERRIDE; ABP_CompleteBoxPruningStartTask* mStartTask; }; class ABP_CompleteBoxPruningStartTask : public PxLightCpuTask { public: ABP_CompleteBoxPruningStartTask(); virtual const char* getName() const PX_OVERRIDE { return "ABP_CompleteBoxPruningStartTask"; } void setup( //ABP_MM& memoryManager, const PxBounds3& updatedBounds, ABP_PairManager* PX_RESTRICT pairManager, PxU32 nb, const SIMD_AABB_X4* PX_RESTRICT listX, const SIMD_AABB_YZ4* PX_RESTRICT listYZ, const ABP_Index* PX_RESTRICT inputRemap, PxU64 contextID); void addDelayedPairs(); void addDelayedPairs2(PxArray<BroadPhasePair>& createdPairs); virtual void run() PX_OVERRIDE; const SIMD_AABB_X4* mListX; const SIMD_AABB_YZ4* mListYZ; const ABP_Index* mInputRemap; ABP_PairManager* mPairManager; PxU32* mRemap; SIMD_AABB_X4* mBoxListXBuffer; SIMD_AABB_YZ4* mBoxListYZBuffer; PxU32 mCounters[NB_BUCKETS]; SIMD_AABB_X4* mBoxListX[NB_BUCKETS]; SIMD_AABB_YZ4* mBoxListYZ[NB_BUCKETS]; PxU32* mRemapBase[NB_BUCKETS]; PxBounds3 mBounds; PxU32 mNb; ABP_CompleteBoxPruningTask mTasks[9]; ABP_CompleteBoxPruningEndTask mEndTask; }; #endif typedef BoxManager DynamicManager; typedef BoxManager StaticManager; class ABP : public PxUserAllocated { PX_NOCOPY(ABP) public: ABP(PxU64 contextID); ~ABP(); void preallocate(PxU32 nbObjects, PxU32 maxNbOverlaps); void reset(); void freeBuffers(); void addStaticObjects(const BpHandle* userIDs, PxU32 nb, PxU32 maxID); void addDynamicObjects(const BpHandle* userIDs, PxU32 nb, PxU32 maxID); void addKinematicObjects(const BpHandle* userIDs, PxU32 nb, PxU32 maxID); void removeObject(BpHandle userID); void updateObject(BpHandle userID); void findOverlaps(PxBaseTask* continuation, const Bp::FilterGroup::Enum* PX_RESTRICT groups, const bool* PX_RESTRICT lut); PxU32 finalize(PxArray<BroadPhasePair>& createdPairs, PxArray<BroadPhasePair>& deletedPairs); void shiftOrigin(const PxVec3& shift, const PxBounds3* boundsArray, const PxReal* contactDistances); void setTransientData(const PxBounds3* bounds, const PxReal* contactDistance); void Region_prepareOverlaps(); ABP_MM mMM; BoxManager mSBM; DynamicManager mDBM; RadixSortBuffered mRS; DynamicManager mKBM; ABP_SharedData mShared; ABP_PairManager mPairManager; const PxU64 mContextID; #ifdef ABP_MT2 ABP_InternalTask mTask0; ABP_InternalTask mTask1; ABP_CompleteBoxPruningStartTask mCompleteBoxPruningTask0; ABP_CompleteBoxPruningStartTask mCompleteBoxPruningTask1; ABP_CompleteBoxPruningTask mBipTasks[NB_BIP_TASKS]; void addDelayedPairs(); void addDelayedPairs2(PxArray<BroadPhasePair>& createdPairs); #endif }; #ifdef ABP_SIMD_OVERLAP #define ABP_OVERLAP_TEST(x) SIMD_OVERLAP_TEST(x) #else #define ABP_OVERLAP_TEST(x) if(intersect2D(box0, x)) #endif /////////////////////////////////////////////////////////////////////////////// ABP_PairManager::ABP_PairManager() : mGroups (NULL), mInToOut0 (NULL), mInToOut1 (NULL), mLUT (NULL) { } /////////////////////////////////////////////////////////////////////////////// ABP_PairManager::~ABP_PairManager() { } /////////////////////////////////////////////////////////////////////////////// InternalPair* ABP_PairManager::addPair(PxU32 index0, PxU32 index1) { const PxU32 id0 = mInToOut0[index0]; const PxU32 id1 = mInToOut1[index1]; PX_ASSERT(id0!=id1); PX_ASSERT(id0!=INVALID_ID); PX_ASSERT(id1!=INVALID_ID); PX_ASSERT(mGroups); { if(!groupFiltering(mGroups[id0], mGroups[id1], mLUT)) return NULL; } return addPairInternal(id0, id1); } #ifdef ABP_MT void ABP_PairManager::addDelayedPair(PxArray<DelayedPair>& delayedPairs, const ABP_Index* inToOut0, const ABP_Index* inToOut1, PxU32 index0, PxU32 index1) const { /*const*/ PxU32 id0 = inToOut0[index0]; /*const*/ PxU32 id1 = inToOut1[index1]; PX_ASSERT(id0!=id1); PX_ASSERT(id0!=INVALID_ID); PX_ASSERT(id1!=INVALID_ID); PX_ASSERT(mGroups); { if(!groupFiltering(mGroups[id0], mGroups[id1], mLUT)) return; } if(1) { // Order the ids sort(id0, id1); const PxU32 fullHashValue = hash(id0, id1); PxU32 hashValue = fullHashValue & mMask; { InternalPair* /*PX_RESTRICT*/ p = findPair(id0, id1, hashValue); if(p) { p->setUpdated(); // ### PT: potential false sharing here //return p; // Persistent pair return; // Persistent pair } } { /*// This is a new pair if(mNbActivePairs >= mHashSize) hashValue = growPairs(fullHashValue); const PxU32 pairIndex = mNbActivePairs++; InternalPair* PX_RESTRICT p = &mActivePairs[pairIndex]; p->setNewPair(id0, id1); mNext[pairIndex] = mHashTable[hashValue]; mHashTable[hashValue] = pairIndex; return p;*/ DelayedPair* newPair = Cm::reserveContainerMemory(delayedPairs, 1); newPair->mID0 = id0; newPair->mID1 = id1; newPair->mHash = fullHashValue; } } } void ABP_PairManager::resizeForNewPairs(PxU32 nbDelayedPairs) { PxU32 currentNbPairs = mNbActivePairs; const PxU32 newNbPairs = currentNbPairs + nbDelayedPairs; // Get more entries mHashSize = PxNextPowerOfTwo(newNbPairs+1); mMask = mHashSize-1; //reallocPairs(); { MBP_FREE(mHashTable); mHashTable = reinterpret_cast<PxU32*>(MBP_ALLOC(mHashSize*sizeof(PxU32))); //storeDwords(mHashTable, mHashSize, INVALID_ID); if(0) { PxU32 nb = mHashSize; PxU32* dest = mHashTable; while(nb--) *dest++ = INVALID_ID; } else PxMemSet(mHashTable, 0xff, mHashSize*sizeof(PxU32)); // Get some bytes for new entries InternalPair* newPairs = reinterpret_cast<InternalPair*>(MBP_ALLOC(mHashSize * sizeof(InternalPair))); PX_ASSERT(newPairs); PxU32* newNext = reinterpret_cast<PxU32*>(MBP_ALLOC(mHashSize * sizeof(PxU32))); PX_ASSERT(newNext); // Copy old data if needed if(currentNbPairs) PxMemCopy(newPairs, mActivePairs, currentNbPairs*sizeof(InternalPair)); // ### check it's actually needed... probably only for pairs whose hash value was cut by the and // yeah, since hash(id0, id1) is a constant // However it might not be needed to recompute them => only less efficient but still ok for(PxU32 i=0;i<currentNbPairs;i++) { const PxU32 hashValue = hash(mActivePairs[i].getId0(), mActivePairs[i].getId1()) & mMask; // New hash value with new mask newNext[i] = mHashTable[hashValue]; mHashTable[hashValue] = i; } // Delete old data MBP_FREE(mNext); MBP_FREE(mActivePairs); // Assign new pointer mActivePairs = newPairs; mNext = newNext; } } void ABP_PairManager::addDelayedPairs(const PxArray<DelayedPair>& delayedPairs) { if(0) { PxU32 nbDelayedPairs = delayedPairs.size(); const DelayedPair* pairs = delayedPairs.begin(); while(nbDelayedPairs--) { const DelayedPair& dp = *pairs++; const PxU32 fullHashValue = dp.mHash; PxU32 hashValue = fullHashValue & mMask; if(mNbActivePairs >= mHashSize) hashValue = growPairs(fullHashValue); const PxU32 pairIndex = mNbActivePairs++; InternalPair* PX_RESTRICT p = &mActivePairs[pairIndex]; p->setNewPair(dp.mID0, dp.mID1); mNext[pairIndex] = mHashTable[hashValue]; mHashTable[hashValue] = pairIndex; } } else { PxU32 nbDelayedPairs = delayedPairs.size(); PxU32 currentNbPairs = mNbActivePairs; //resizeForNewPairs(nbDelayedPairs); { const PxU32 mask = mMask; PxU32* PX_RESTRICT hashTable = mHashTable; PxU32* PX_RESTRICT next = mNext; InternalPair* PX_RESTRICT internalPairs = mActivePairs; const DelayedPair* PX_RESTRICT pairs = delayedPairs.begin(); while(nbDelayedPairs--) { const DelayedPair& dp = *pairs++; const PxU32 fullHashValue = dp.mHash; const PxU32 hashValue = fullHashValue & mask; PX_ASSERT(currentNbPairs < mHashSize); const PxU32 pairIndex = currentNbPairs++; internalPairs[pairIndex].setNewPair(dp.mID0, dp.mID1); next[pairIndex] = hashTable[hashValue]; hashTable[hashValue] = pairIndex; } mNbActivePairs = currentNbPairs; } } } void ABP_PairManager::addDelayedPairs2(PxArray<BroadPhasePair>& createdPairs, const PxArray<DelayedPair>& delayedPairs) { PxU32 nbDelayedPairs = delayedPairs.size(); PxU32 currentNbPairs = mNbActivePairs; //resizeForNewPairs(nbDelayedPairs); BroadPhasePair* newPair = Cm::reserveContainerMemory(createdPairs, nbDelayedPairs); { const PxU32 mask = mMask; PxU32* PX_RESTRICT hashTable = mHashTable; PxU32* PX_RESTRICT next = mNext; InternalPair* PX_RESTRICT internalPairs = mActivePairs; const DelayedPair* PX_RESTRICT pairs = delayedPairs.begin(); while(nbDelayedPairs--) { const DelayedPair& dp = *pairs++; const PxU32 fullHashValue = dp.mHash; const PxU32 hashValue = fullHashValue & mask; PX_ASSERT(currentNbPairs < mHashSize); const PxU32 pairIndex = currentNbPairs++; internalPairs[pairIndex].setNewPair2(dp.mID0, dp.mID1); { newPair->mVolA = dp.mID0; newPair->mVolB = dp.mID1; newPair++; } next[pairIndex] = hashTable[hashValue]; hashTable[hashValue] = pairIndex; } mNbActivePairs = currentNbPairs; } } #endif /////////////////////////////////////////////////////////////////////////////// #if PX_INTEL_FAMILY #define SIMD_OVERLAP_TEST_14a(box) _mm_movemask_ps(_mm_cmpngt_ps(b, _mm_load_ps(box)))==15 #define SIMD_OVERLAP_INIT_9c(box) \ __m128 b = _mm_shuffle_ps(_mm_load_ps(&box.mMinY), _mm_load_ps(&box.mMinY), 78);\ const float Coeff = -1.0f;\ b = _mm_mul_ps(b, _mm_load1_ps(&Coeff)); #define SIMD_OVERLAP_TEST_9c(box) \ const __m128 a = _mm_load_ps(&box.mMinY); \ const __m128 d = _mm_cmpge_ps(a, b); \ if(_mm_movemask_ps(d)==15) #else #define SIMD_OVERLAP_TEST_14a(box) BAllEqFFFF(V4IsGrtr(b, V4LoadA(box))) #define SIMD_OVERLAP_INIT_9c(box) \ Vec4V b = V4PermZWXY(V4LoadA(&box.mMinY)); \ b = V4Mul(b, V4Load(-1.0f)); #define SIMD_OVERLAP_TEST_9c(box) \ const Vec4V a = V4LoadA(&box.mMinY); \ const Vec4V d = V4IsGrtrOrEq(a, b); \ if(BAllEqTTTT(d)) #endif #ifdef ABP_SIMD_OVERLAP #define SIMD_OVERLAP_PRELOAD_BOX0 SIMD_OVERLAP_INIT_9c(box0) #define SIMD_OVERLAP_TEST(x) SIMD_OVERLAP_TEST_9c(x) #else #define SIMD_OVERLAP_PRELOAD_BOX0 #endif #ifndef ABP_SIMD_OVERLAP static PX_FORCE_INLINE int intersect2D(const SIMD_AABB_YZ4& a, const SIMD_AABB_YZ4& b) { /* if( b.mMaxY < a.mMinY || a.mMaxY < b.mMinY || b.mMaxZ < a.mMinZ || a.mMaxZ < b.mMinZ ) return 0; return 1;*/ const bool b0 = b.mMaxY < a.mMinY; const bool b1 = a.mMaxY < b.mMinY; const bool b2 = b.mMaxZ < a.mMinZ; const bool b3 = a.mMaxZ < b.mMinZ; // const bool b4 = b0 || b1 || b2 || b3; const bool b4 = b0 | b1 | b2 | b3; return !b4; } #endif static PX_FORCE_INLINE void outputPair(ABP_PairManager& pairManager, PxU32 index0, PxU32 index1) { pairManager.addPair(index0, index1); } template<const int codepath, class ABP_PairManagerT> static void boxPruningKernel( PxU32 nb0, PxU32 nb1, const SIMD_AABB_X4* PX_RESTRICT boxes0_X, const SIMD_AABB_X4* PX_RESTRICT boxes1_X, const SIMD_AABB_YZ4* PX_RESTRICT boxes0_YZ, const SIMD_AABB_YZ4* PX_RESTRICT boxes1_YZ, const ABP_Index* PX_RESTRICT inToOut0, const ABP_Index* PX_RESTRICT inToOut1, ABP_PairManagerT* PX_RESTRICT pairManager) { pairManager->mInToOut0 = inToOut0; pairManager->mInToOut1 = inToOut1; PxU32 index0 = 0; PxU32 runningIndex1 = 0; while(runningIndex1<nb1 && index0<nb0) { const SIMD_AABB_X4& box0_X = boxes0_X[index0]; const PosXType2 maxLimit = box0_X.mMaxX; const PosXType2 minLimit = box0_X.mMinX; if(!codepath) { while(boxes1_X[runningIndex1].mMinX<minLimit) runningIndex1++; } else { while(boxes1_X[runningIndex1].mMinX<=minLimit) runningIndex1++; } const SIMD_AABB_YZ4& box0 = boxes0_YZ[index0]; SIMD_OVERLAP_PRELOAD_BOX0 if(gUseRegularBPKernel) { PxU32 index1 = runningIndex1; while(boxes1_X[index1].mMinX<=maxLimit) { ABP_OVERLAP_TEST(boxes1_YZ[index1]) { outputPair(*pairManager, index0, index1); } index1++; } } else { PxU32 Offset = 0; const char* const CurrentBoxListYZ = reinterpret_cast<const char*>(&boxes1_YZ[runningIndex1]); const char* const CurrentBoxListX = reinterpret_cast<const char*>(&boxes1_X[runningIndex1]); if(!gUnrollLoop) { while(*reinterpret_cast<const PosXType2*>(CurrentBoxListX + Offset)<=maxLimit) { const float* box = reinterpret_cast<const float*>(CurrentBoxListYZ + Offset*2); #ifdef ABP_SIMD_OVERLAP if(SIMD_OVERLAP_TEST_14a(box)) #else if(intersect2D(box0, *reinterpret_cast<const SIMD_AABB_YZ4*>(box))) #endif { const PxU32 Index1 = PxU32(CurrentBoxListX + Offset - reinterpret_cast<const char*>(boxes1_X))>>3; outputPair(*pairManager, index0, Index1); } Offset += 8; } } else { #define BIP_VERSION4 #ifdef BIP_VERSION4 #ifdef ABP_SIMD_OVERLAP #define BLOCK4(x, label) {const float* box = reinterpret_cast<const float*>(CurrentBoxListYZ + Offset*2 + x*2); \ if(SIMD_OVERLAP_TEST_14a(box)) \ goto label; } #else #define BLOCK4(x, label) {const float* box = reinterpret_cast<const float*>(CurrentBoxListYZ + Offset*2 + x*2); \ if(intersect2D(box0, *reinterpret_cast<const SIMD_AABB_YZ4*>(box))) \ goto label; } #endif goto StartLoop4; CODEALIGN16 FoundOverlap3: Offset += 8; CODEALIGN16 FoundOverlap2: Offset += 8; CODEALIGN16 FoundOverlap1: Offset += 8; CODEALIGN16 FoundOverlap0: Offset += 8; CODEALIGN16 FoundOverlap: { const PxU32 Index1 = PxU32(CurrentBoxListX + Offset - 8 - reinterpret_cast<const char*>(boxes1_X))>>3; outputPair(*pairManager, index0, Index1); } CODEALIGN16 StartLoop4: while(*reinterpret_cast<const PosXType2*>(CurrentBoxListX + Offset + 8*5)<=maxLimit) { BLOCK4(0, FoundOverlap0) BLOCK4(8, FoundOverlap1) BLOCK4(16, FoundOverlap2) BLOCK4(24, FoundOverlap3) Offset += 40; BLOCK4(-8, FoundOverlap) } #undef BLOCK4 #endif #ifdef ABP_SIMD_OVERLAP #define BLOCK if(*reinterpret_cast<const PosXType2*>(CurrentBoxListX + Offset)<=maxLimit) \ {if(SIMD_OVERLAP_TEST_14a(reinterpret_cast<const float*>(CurrentBoxListYZ + Offset*2))) \ goto OverlapFound; \ Offset += 8; #else #define BLOCK if(*reinterpret_cast<const PosXType2*>(CurrentBoxListX + Offset)<=maxLimit) \ {if(intersect2D(box0, *reinterpret_cast<const SIMD_AABB_YZ4*>(CurrentBoxListYZ + Offset*2))) \ goto OverlapFound; \ Offset += 8; #endif goto LoopStart; CODEALIGN16 OverlapFound: { const PxU32 Index1 = PxU32(CurrentBoxListX + Offset - reinterpret_cast<const char*>(boxes1_X))>>3; outputPair(*pairManager, index0, Index1); } Offset += 8; CODEALIGN16 LoopStart: BLOCK BLOCK BLOCK } } goto LoopStart; } #undef BLOCK } } index0++; } } template<class ABP_PairManagerT> static /*PX_FORCE_INLINE*/ void doBipartiteBoxPruning_Leaf( ABP_PairManagerT* PX_RESTRICT pairManager, PxU32 nb0, PxU32 nb1, const SIMD_AABB_X4* PX_RESTRICT boxes0_X, const SIMD_AABB_X4* PX_RESTRICT boxes1_X, const SIMD_AABB_YZ4* PX_RESTRICT boxes0_YZ, const SIMD_AABB_YZ4* PX_RESTRICT boxes1_YZ, const ABP_Index* PX_RESTRICT remap0, const ABP_Index* PX_RESTRICT remap1 ) { PX_ASSERT(boxes0_X[nb0].isSentinel()); PX_ASSERT(boxes1_X[nb1].isSentinel()); boxPruningKernel<0>(nb0, nb1, boxes0_X, boxes1_X, boxes0_YZ, boxes1_YZ, remap0, remap1, pairManager); boxPruningKernel<1>(nb1, nb0, boxes1_X, boxes0_X, boxes1_YZ, boxes0_YZ, remap1, remap0, pairManager); } template<class ABP_PairManagerT> static PX_FORCE_INLINE void doBipartiteBoxPruning_Leaf(ABP_PairManagerT* PX_RESTRICT pairManager, PxU32 nb0, PxU32 nb1, const SplitBoxes& boxes0, const SplitBoxes& boxes1, const ABP_Index* PX_RESTRICT remap0, const ABP_Index* PX_RESTRICT remap1) { doBipartiteBoxPruning_Leaf(pairManager, nb0, nb1, boxes0.getBoxes_X(), boxes1.getBoxes_X(), boxes0.getBoxes_YZ(), boxes1.getBoxes_YZ(), remap0, remap1); } template<class ABP_PairManagerT> static void doCompleteBoxPruning_Leaf( ABP_PairManagerT* PX_RESTRICT pairManager, PxU32 nb, const SIMD_AABB_X4* PX_RESTRICT boxes_X, const SIMD_AABB_YZ4* PX_RESTRICT boxes_YZ, const ABP_Index* PX_RESTRICT remap) { pairManager->mInToOut0 = remap; pairManager->mInToOut1 = remap; PxU32 index0 = 0; PxU32 runningIndex = 0; while(runningIndex<nb && index0<nb) { const SIMD_AABB_X4& box0_X = boxes_X[index0]; const PosXType2 maxLimit = box0_X.mMaxX; const PosXType2 minLimit = box0_X.mMinX; while(boxes_X[runningIndex++].mMinX<minLimit); const SIMD_AABB_YZ4& box0 = boxes_YZ[index0]; SIMD_OVERLAP_PRELOAD_BOX0 if(gUseRegularBPKernel) { PxU32 index1 = runningIndex; while(boxes_X[index1].mMinX<=maxLimit) { ABP_OVERLAP_TEST(boxes_YZ[index1]) { outputPair(*pairManager, index0, index1); } index1++; } } else { PxU32 Offset = 0; const char* const CurrentBoxListYZ = reinterpret_cast<const char*>(&boxes_YZ[runningIndex]); const char* const CurrentBoxListX = reinterpret_cast<const char*>(&boxes_X[runningIndex]); if(!gUnrollLoop) { while(*reinterpret_cast<const PosXType2*>(CurrentBoxListX + Offset)<=maxLimit) { const float* box = reinterpret_cast<const float*>(CurrentBoxListYZ + Offset*2); #ifdef ABP_SIMD_OVERLAP if(SIMD_OVERLAP_TEST_14a(box)) #else if(intersect2D(box0, *reinterpret_cast<const SIMD_AABB_YZ4*>(box))) #endif { const PxU32 Index = PxU32(CurrentBoxListX + Offset - reinterpret_cast<const char*>(boxes_X))>>3; outputPair(*pairManager, index0, Index); } Offset += 8; } } else { #define VERSION4c #ifdef VERSION4c #define VERSION3 // Enable this as our safe loop #ifdef ABP_SIMD_OVERLAP #define BLOCK4(x, label) {const float* box = reinterpret_cast<const float*>(CurrentBoxListYZ + Offset*2 + x*2); \ if(SIMD_OVERLAP_TEST_14a(box)) \ goto label; } #else #define BLOCK4(x, label) {const SIMD_AABB_YZ4* box = reinterpret_cast<const SIMD_AABB_YZ4*>(CurrentBoxListYZ + Offset*2 + x*2); \ if(intersect2D(box0, *box)) \ goto label; } #endif goto StartLoop4; CODEALIGN16 FoundOverlap3: Offset += 8; CODEALIGN16 FoundOverlap2: Offset += 8; CODEALIGN16 FoundOverlap1: Offset += 8; CODEALIGN16 FoundOverlap0: Offset += 8; CODEALIGN16 FoundOverlap: { const PxU32 Index = PxU32(CurrentBoxListX + Offset - 8 - reinterpret_cast<const char*>(boxes_X))>>3; outputPair(*pairManager, index0, Index); } CODEALIGN16 StartLoop4: while(*reinterpret_cast<const PosXType2*>(CurrentBoxListX + Offset + 8*5)<=maxLimit) { BLOCK4(0, FoundOverlap0) BLOCK4(8, FoundOverlap1) BLOCK4(16, FoundOverlap2) BLOCK4(24, FoundOverlap3) Offset += 40; BLOCK4(-8, FoundOverlap) } #endif #define VERSION3 #ifdef VERSION3 #ifdef ABP_SIMD_OVERLAP #define BLOCK if(*reinterpret_cast<const PosXType2*>(CurrentBoxListX + Offset)<=maxLimit) \ {if(SIMD_OVERLAP_TEST_14a(reinterpret_cast<const float*>(CurrentBoxListYZ + Offset*2))) \ goto BeforeLoop; \ Offset += 8; #else #define BLOCK if(*reinterpret_cast<const PosXType2*>(CurrentBoxListX + Offset)<=maxLimit) \ {if(intersect2D(box0, *reinterpret_cast<const SIMD_AABB_YZ4*>(CurrentBoxListYZ + Offset*2))) \ goto BeforeLoop; \ Offset += 8; #endif goto StartLoop; CODEALIGN16 BeforeLoop: { const PxU32 Index = PxU32(CurrentBoxListX + Offset - reinterpret_cast<const char*>(boxes_X))>>3; outputPair(*pairManager, index0, Index); Offset += 8; } CODEALIGN16 StartLoop: BLOCK BLOCK BLOCK BLOCK BLOCK } } } } goto StartLoop; } #endif } } index0++; } } #ifdef USE_ABP_BUCKETS static const PxU8 gCodes[] = { 4, 4, 4, 255, 4, 3, 2, 255, 4, 1, 0, 255, 255, 255, 255, 255 }; static PX_FORCE_INLINE PxU8 classifyBoxNew(const SIMD_AABB_YZ4& boxYZ, const float limitY, const float limitZ) { #ifdef ABP_SIMD_OVERLAP // PT: mins have been negated for SIMD tests const bool upperPart = (-boxYZ.mMinZ) > limitZ; const bool rightPart = (-boxYZ.mMinY) > limitY; #else const bool upperPart = boxYZ.mMinZ > limitZ; const bool rightPart = boxYZ.mMinY > limitY; #endif const bool lowerPart = boxYZ.mMaxZ < limitZ; const bool leftPart = boxYZ.mMaxY < limitY; // Table-based box classification avoids many branches const PxU32 Code = PxU32(rightPart)|(PxU32(leftPart)<<1)|(PxU32(upperPart)<<2)|(PxU32(lowerPart)<<3); PX_ASSERT(gCodes[Code]!=255); return gCodes[Code]; } #ifdef RECURSE_LIMIT static void CompleteBoxPruning_Recursive( ABP_MM& memoryManager, ABP_PairManager* PX_RESTRICT pairManager, PxU32 nb, const SIMD_AABB_X4* PX_RESTRICT listX, const SIMD_AABB_YZ4* PX_RESTRICT listYZ, const ABP_Index* PX_RESTRICT remap, const ABPEntry* PX_RESTRICT objects) { // printf("CompleteBoxPruning_Recursive %d\n", nb); if(!nb) return; /*__declspec(align(16))*/ float mergedMin[4]; /*__declspec(align(16))*/ float mergedMax[4]; { //#ifdef SAFE_VERSION Vec4V maxV = V4LoadA(&listYZ[0].mMinY); for(PxU32 i=1;i<nb;i++) maxV = V4Max(maxV, V4LoadA(&listYZ[i].mMinY)); PX_ALIGN(16, PxVec4) tmp; V4StoreA(maxV, &tmp.x); mergedMin[1] = -tmp.x; mergedMin[2] = -tmp.y; mergedMax[1] = tmp.z; mergedMax[2] = tmp.w; //#endif } const float limitY = (mergedMax[1] + mergedMin[1]) * 0.5f; const float limitZ = (mergedMax[2] + mergedMin[2]) * 0.5f; // PT: TODO: revisit allocs SIMD_AABB_X4* BoxListXBuffer = reinterpret_cast<SIMD_AABB_X4*>(memoryManager.frameAlloc(sizeof(SIMD_AABB_X4)*(nb+NB_SENTINELS*NB_BUCKETS))); SIMD_AABB_YZ4* BoxListYZBuffer = reinterpret_cast<SIMD_AABB_YZ4*>(memoryManager.frameAlloc(sizeof(SIMD_AABB_YZ4)*nb)); PxU32 Counters[NB_BUCKETS]; for(PxU32 i=0;i<NB_BUCKETS;i++) Counters[i] = 0; PxU32* Remap = reinterpret_cast<PxU32*>(memoryManager.frameAlloc(sizeof(PxU32)*nb)); PxU8* Indices = reinterpret_cast<PxU8*>(memoryManager.frameAlloc(sizeof(PxU8)*nb)); for(PxU32 i=0;i<nb;i++) { const PxU8 index = classifyBoxNew(listYZ[i], limitY, limitZ); Indices[i] = index; Counters[index]++; } SIMD_AABB_X4* BoxListX[NB_BUCKETS]; SIMD_AABB_YZ4* BoxListYZ[NB_BUCKETS]; PxU32* RemapBase[NB_BUCKETS]; { SIMD_AABB_X4* CurrentBoxListXBuffer = BoxListXBuffer; SIMD_AABB_YZ4* CurrentBoxListYZBuffer = BoxListYZBuffer; PxU32* CurrentRemap = Remap; for(PxU32 i=0;i<NB_BUCKETS;i++) { const PxU32 Nb = Counters[i]; BoxListX[i] = CurrentBoxListXBuffer; BoxListYZ[i] = CurrentBoxListYZBuffer; RemapBase[i] = CurrentRemap; CurrentBoxListXBuffer += Nb+NB_SENTINELS; CurrentBoxListYZBuffer += Nb; CurrentRemap += Nb; } PX_ASSERT(CurrentBoxListXBuffer == BoxListXBuffer + nb + NB_SENTINELS*NB_BUCKETS); PX_ASSERT(CurrentBoxListYZBuffer == BoxListYZBuffer + nb); PX_ASSERT(CurrentRemap == Remap + nb); } for(PxU32 i=0;i<NB_BUCKETS;i++) Counters[i] = 0; for(PxU32 i=0;i<nb;i++) { const PxU32 SortedIndex = i; const PxU32 TargetBucket = PxU32(Indices[SortedIndex]); const PxU32 IndexInTarget = Counters[TargetBucket]++; SIMD_AABB_X4* TargetBoxListX = BoxListX[TargetBucket]; SIMD_AABB_YZ4* TargetBoxListYZ = BoxListYZ[TargetBucket]; PxU32* TargetRemap = RemapBase[TargetBucket]; TargetRemap[IndexInTarget] = remap[SortedIndex]; TargetBoxListX[IndexInTarget] = listX[SortedIndex]; TargetBoxListYZ[IndexInTarget] = listYZ[SortedIndex]; } memoryManager.frameFree(Indices); for(PxU32 i=0;i<NB_BUCKETS;i++) { SIMD_AABB_X4* TargetBoxListX = BoxListX[i]; const PxU32 IndexInTarget = Counters[i]; for(PxU32 j=0;j<NB_SENTINELS;j++) TargetBoxListX[IndexInTarget+j].initSentinel(); } { const PxU32 limit = RECURSE_LIMIT; for(PxU32 i=0;i<NB_BUCKETS;i++) { if(Counters[i]<limit || Counters[i]==nb) doCompleteBoxPruning_Leaf( pairManager, Counters[i], BoxListX[i], BoxListYZ[i], RemapBase[i], objects); else CompleteBoxPruning_Recursive(memoryManager, pairManager, Counters[i], BoxListX[i], BoxListYZ[i], RemapBase[i], objects); } } { for(PxU32 i=0;i<NB_BUCKETS-1;i++) { doBipartiteBoxPruning_Leaf(pairManager, objects, Counters[i], Counters[NB_BUCKETS-1], BoxListX[i], BoxListX[NB_BUCKETS-1], BoxListYZ[i], BoxListYZ[NB_BUCKETS-1], RemapBase[i], RemapBase[NB_BUCKETS-1] ); } } memoryManager.frameFree(Remap); memoryManager.frameFree(BoxListYZBuffer); memoryManager.frameFree(BoxListXBuffer); } #endif #ifdef ABP_MT2 void ABP_CompleteBoxPruningTask::run() { // printf("Running ABP_CompleteBoxPruningTask\n"); //printf("ABP_Task_%d - thread ID %d\n", mID, PxU32(PxThread::getId())); //printf("Count: %d\n", mCounter); bool runComplete = false; bool runBipartite = false; if(mType==0) runComplete = true; else runBipartite = true; if(runComplete) doCompleteBoxPruning_Leaf(&mPairs, mCounter, mBoxListX, mBoxListYZ, mRemap); if(runBipartite) doBipartiteBoxPruning_Leaf(&mPairs, mCounter, mCounter4, mBoxListX, mBoxListX4, mBoxListYZ, mBoxListYZ4, mRemap, mRemap4); } void ABP_CompleteBoxPruningEndTask::run() { // printf("Running ABP_CompleteBoxPruningEndTask\n"); //memoryManager.frameFree(Remap); //memoryManager.frameFree(BoxListYZBuffer); //memoryManager.frameFree(BoxListXBuffer); // PT: TODO: revisit allocs PX_FREE(mStartTask->mRemap); PX_FREE(mStartTask->mBoxListYZBuffer); PX_FREE(mStartTask->mBoxListXBuffer); } ABP_CompleteBoxPruningStartTask::ABP_CompleteBoxPruningStartTask() : mListX (NULL), mListYZ (NULL), mInputRemap (NULL), mPairManager (NULL), mRemap (NULL), mBoxListXBuffer (NULL), mBoxListYZBuffer(NULL), mNb (0) { } void ABP_CompleteBoxPruningStartTask::setup( //ABP_MM& memoryManager, const PxBounds3& updatedBounds, ABP_PairManager* PX_RESTRICT pairManager, PxU32 nb, const SIMD_AABB_X4* PX_RESTRICT listX, const SIMD_AABB_YZ4* PX_RESTRICT listYZ, const ABP_Index* PX_RESTRICT inputRemap, PxU64 contextID) { mListX = listX; mListYZ = listYZ; mInputRemap = inputRemap; mPairManager = pairManager; mBounds = updatedBounds; mContextID = contextID; mNb = nb; // PT: TODO: revisit allocs //mBoxListXBuffer = reinterpret_cast<SIMD_AABB_X4*>(memoryManager.frameAlloc(sizeof(SIMD_AABB_X4)*(nb+NB_SENTINELS*NB_BUCKETS))); //mBoxListYZBuffer = reinterpret_cast<SIMD_AABB_YZ4*>(memoryManager.frameAlloc(sizeof(SIMD_AABB_YZ4)*nb)); mBoxListXBuffer = reinterpret_cast<SIMD_AABB_X4*>(PX_ALLOC(sizeof(SIMD_AABB_X4)*(nb+NB_SENTINELS*NB_BUCKETS), "mBoxListXBuffer")); mBoxListYZBuffer = reinterpret_cast<SIMD_AABB_YZ4*>(PX_ALLOC(sizeof(SIMD_AABB_YZ4)*nb, "mBoxListYZBuffer")); //mRemap = reinterpret_cast<PxU32*>(memoryManager.frameAlloc(sizeof(PxU32)*nb)); mRemap = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32)*nb, "mRemap")); mEndTask.mStartTask = this; for(PxU32 i=0;i<9;i++) mTasks[i].mStartTask = this; } void ABP_CompleteBoxPruningStartTask::run() { // printf("Running ABP_CompleteBoxPruningStartTask\n"); const SIMD_AABB_X4* PX_RESTRICT listX = mListX; const SIMD_AABB_YZ4* PX_RESTRICT listYZ = mListYZ; const ABP_Index* PX_RESTRICT remap = mInputRemap; const PxU32 nb = mNb; PxU32* PX_RESTRICT Remap = mRemap; SIMD_AABB_X4* PX_RESTRICT BoxListXBuffer = mBoxListXBuffer; SIMD_AABB_YZ4* PX_RESTRICT BoxListYZBuffer = mBoxListYZBuffer; PxU32* PX_RESTRICT Counters = mCounters; SIMD_AABB_X4** PX_RESTRICT BoxListX = mBoxListX; SIMD_AABB_YZ4** PX_RESTRICT BoxListYZ = mBoxListYZ; PxU32** PX_RESTRICT RemapBase = mRemapBase; { PX_PROFILE_ZONE("ABP_CompleteBoxPruningStartTask - Run", mContextID); // PT: TODO: revisit allocs //BoxListXBuffer = reinterpret_cast<SIMD_AABB_X4*>(memoryManager.frameAlloc(sizeof(SIMD_AABB_X4)*(nb+NB_SENTINELS*NB_BUCKETS))); //BoxListYZBuffer = reinterpret_cast<SIMD_AABB_YZ4*>(memoryManager.frameAlloc(sizeof(SIMD_AABB_YZ4)*nb)); const PxVec3& mergedMin = mBounds.minimum; const PxVec3& mergedMax = mBounds.maximum; const float limitY = (mergedMax[1] + mergedMin[1]) * 0.5f; const float limitZ = (mergedMax[2] + mergedMin[2]) * 0.5f; for(PxU32 i=0;i<NB_BUCKETS;i++) Counters[i] = 0; //Remap = reinterpret_cast<PxU32*>(memoryManager.frameAlloc(sizeof(PxU32)*nb)); // PT: TODO: revisit allocs //PxU8* Indices = reinterpret_cast<PxU8*>(memoryManager.frameAlloc(sizeof(PxU8)*nb)); PxU8* Indices = reinterpret_cast<PxU8*>(PX_ALLOC(sizeof(PxU8)*nb, "Indices")); { PX_PROFILE_ZONE("BoxPruning - ClassifyBoxes", mContextID); for(PxU32 i=0;i<nb;i++) { const PxU8 index = classifyBoxNew(listYZ[i], limitY, limitZ); Indices[i] = index; Counters[index]++; } } { SIMD_AABB_X4* CurrentBoxListXBuffer = BoxListXBuffer; SIMD_AABB_YZ4* CurrentBoxListYZBuffer = BoxListYZBuffer; PxU32* CurrentRemap = Remap; for(PxU32 i=0;i<NB_BUCKETS;i++) { const PxU32 Nb = Counters[i]; BoxListX[i] = CurrentBoxListXBuffer; BoxListYZ[i] = CurrentBoxListYZBuffer; RemapBase[i] = CurrentRemap; CurrentBoxListXBuffer += Nb+NB_SENTINELS; CurrentBoxListYZBuffer += Nb; CurrentRemap += Nb; } PX_ASSERT(CurrentBoxListXBuffer == BoxListXBuffer + nb + NB_SENTINELS*NB_BUCKETS); PX_ASSERT(CurrentBoxListYZBuffer == BoxListYZBuffer + nb); PX_ASSERT(CurrentRemap == Remap + nb); } for(PxU32 i=0;i<NB_BUCKETS;i++) Counters[i] = 0; for(PxU32 i=0;i<nb;i++) { const PxU32 SortedIndex = i; const PxU32 TargetBucket = PxU32(Indices[SortedIndex]); const PxU32 IndexInTarget = Counters[TargetBucket]++; SIMD_AABB_X4* TargetBoxListX = BoxListX[TargetBucket]; SIMD_AABB_YZ4* TargetBoxListYZ = BoxListYZ[TargetBucket]; PxU32* TargetRemap = RemapBase[TargetBucket]; TargetRemap[IndexInTarget] = remap[SortedIndex]; TargetBoxListX[IndexInTarget] = listX[SortedIndex]; TargetBoxListYZ[IndexInTarget] = listYZ[SortedIndex]; } //memoryManager.frameFree(Indices); PX_FREE(Indices); for(PxU32 i=0;i<NB_BUCKETS;i++) { SIMD_AABB_X4* TargetBoxListX = BoxListX[i]; const PxU32 IndexInTarget = Counters[i]; for(PxU32 j=0;j<NB_SENTINELS;j++) TargetBoxListX[IndexInTarget+j].initSentinel(); } } for(PxU32 i=0;i<8;i++) { mTasks[i].mCounter = Counters[i/2]; mTasks[i].mBoxListX = BoxListX[i/2]; mTasks[i].mBoxListYZ = BoxListYZ[i/2]; mTasks[i].mRemap = RemapBase[i/2]; mTasks[i].mType = i&1; mTasks[i].mCounter4 = Counters[4]; mTasks[i].mBoxListX4 = BoxListX[4]; mTasks[i].mBoxListYZ4 = BoxListYZ[4]; mTasks[i].mRemap4 = RemapBase[4]; mTasks[i].mPairs.mSharedPM = mPairManager; //mTasks[i].mPairs.mDelayedPairs.reserve(10000); } PxU32 i=8; { mTasks[i].mCounter = Counters[4]; mTasks[i].mBoxListX = BoxListX[4]; mTasks[i].mBoxListYZ = BoxListYZ[4]; mTasks[i].mRemap = RemapBase[4]; mTasks[i].mType = 0; mTasks[i].mCounter4 = Counters[4]; mTasks[i].mBoxListX4 = BoxListX[4]; mTasks[i].mBoxListYZ4 = BoxListYZ[4]; mTasks[i].mRemap4 = RemapBase[4]; mTasks[i].mPairs.mSharedPM = mPairManager; //mTasks[i].mPairs.mDelayedPairs.reserve(10000); } for(PxU32 k=0; k<8+1; k++) { if(mTasks[k].isThereWorkToDo()) { mTasks[k].mID = PxU16(k); mTasks[k].setContinuation(getContinuation()); } } for(PxU32 k=0; k<8+1; k++) { if(mTasks[k].isThereWorkToDo()) mTasks[k].removeReference(); } } void ABP_CompleteBoxPruningStartTask::addDelayedPairs() { PX_PROFILE_ZONE("ABP_CompleteBoxPruningStartTask - add delayed pairs", mContextID); PxU32 nbDelayedPairs = 0; for(PxU32 k=0; k<9; k++) nbDelayedPairs += mTasks[k].mPairs.mDelayedPairs.size(); if(nbDelayedPairs) { { PX_PROFILE_ZONE("BroadPhaseABP - resizeForNewPairs", mContextID); mPairManager->resizeForNewPairs(nbDelayedPairs); } for(PxU32 k=0; k<9; k++) mPairManager->addDelayedPairs(mTasks[k].mPairs.mDelayedPairs); } } void ABP_CompleteBoxPruningStartTask::addDelayedPairs2(PxArray<BroadPhasePair>& createdPairs) { PX_PROFILE_ZONE("ABP_CompleteBoxPruningStartTask - add delayed pairs", mContextID); PxU32 nbDelayedPairs = 0; for(PxU32 k=0; k<9; k++) nbDelayedPairs += mTasks[k].mPairs.mDelayedPairs.size(); if(nbDelayedPairs) { { PX_PROFILE_ZONE("BroadPhaseABP - resizeForNewPairs", mContextID); mPairManager->resizeForNewPairs(nbDelayedPairs); } for(PxU32 k=0; k<9; k++) mPairManager->addDelayedPairs2(createdPairs, mTasks[k].mPairs.mDelayedPairs); } } #endif #ifndef USE_ALTERNATIVE_VERSION static void CompleteBoxPruning_Version16( #ifdef ABP_MT2 ABP_CompleteBoxPruningStartTask& completeBoxPruningTask, #endif ABP_MM& memoryManager, const PxBounds3& updatedBounds, ABP_PairManager* PX_RESTRICT pairManager, PxU32 nb, const SIMD_AABB_X4* PX_RESTRICT listX, const SIMD_AABB_YZ4* PX_RESTRICT listYZ, const ABP_Index* PX_RESTRICT remap, PxBaseTask* continuation, PxU64 contextID) { PX_UNUSED(contextID); PX_UNUSED(continuation); if(!nb) return; #ifdef ABP_MT2 if(continuation) { completeBoxPruningTask.setup(updatedBounds, pairManager, nb, listX, listYZ, remap, contextID); completeBoxPruningTask.mEndTask.setContinuation(continuation); completeBoxPruningTask.setContinuation(&completeBoxPruningTask.mEndTask); completeBoxPruningTask.mEndTask.removeReference(); completeBoxPruningTask.removeReference(); return; } #endif PxU32* Remap; SIMD_AABB_X4* BoxListXBuffer; SIMD_AABB_YZ4* BoxListYZBuffer; PxU32 Counters[NB_BUCKETS]; SIMD_AABB_X4* BoxListX[NB_BUCKETS]; SIMD_AABB_YZ4* BoxListYZ[NB_BUCKETS]; PxU32* RemapBase[NB_BUCKETS]; { PX_PROFILE_ZONE("BoxPruning - PrepareData", contextID); // PT: TODO: revisit allocs BoxListXBuffer = reinterpret_cast<SIMD_AABB_X4*>(memoryManager.frameAlloc(sizeof(SIMD_AABB_X4)*(nb+NB_SENTINELS*NB_BUCKETS))); BoxListYZBuffer = reinterpret_cast<SIMD_AABB_YZ4*>(memoryManager.frameAlloc(sizeof(SIMD_AABB_YZ4)*nb)); const PxVec3& mergedMin = updatedBounds.minimum; const PxVec3& mergedMax = updatedBounds.maximum; const float limitY = (mergedMax[1] + mergedMin[1]) * 0.5f; const float limitZ = (mergedMax[2] + mergedMin[2]) * 0.5f; for(PxU32 i=0;i<NB_BUCKETS;i++) Counters[i] = 0; Remap = reinterpret_cast<PxU32*>(memoryManager.frameAlloc(sizeof(PxU32)*nb)); PxU8* Indices = reinterpret_cast<PxU8*>(memoryManager.frameAlloc(sizeof(PxU8)*nb)); { PX_PROFILE_ZONE("BoxPruning - ClassifyBoxes", contextID); for(PxU32 i=0;i<nb;i++) { const PxU8 index = classifyBoxNew(listYZ[i], limitY, limitZ); Indices[i] = index; Counters[index]++; } } { SIMD_AABB_X4* CurrentBoxListXBuffer = BoxListXBuffer; SIMD_AABB_YZ4* CurrentBoxListYZBuffer = BoxListYZBuffer; PxU32* CurrentRemap = Remap; for(PxU32 i=0;i<NB_BUCKETS;i++) { const PxU32 Nb = Counters[i]; BoxListX[i] = CurrentBoxListXBuffer; BoxListYZ[i] = CurrentBoxListYZBuffer; RemapBase[i] = CurrentRemap; CurrentBoxListXBuffer += Nb+NB_SENTINELS; CurrentBoxListYZBuffer += Nb; CurrentRemap += Nb; } PX_ASSERT(CurrentBoxListXBuffer == BoxListXBuffer + nb + NB_SENTINELS*NB_BUCKETS); PX_ASSERT(CurrentBoxListYZBuffer == BoxListYZBuffer + nb); PX_ASSERT(CurrentRemap == Remap + nb); } for(PxU32 i=0;i<NB_BUCKETS;i++) Counters[i] = 0; for(PxU32 i=0;i<nb;i++) { const PxU32 SortedIndex = i; const PxU32 TargetBucket = PxU32(Indices[SortedIndex]); const PxU32 IndexInTarget = Counters[TargetBucket]++; SIMD_AABB_X4* TargetBoxListX = BoxListX[TargetBucket]; SIMD_AABB_YZ4* TargetBoxListYZ = BoxListYZ[TargetBucket]; PxU32* TargetRemap = RemapBase[TargetBucket]; TargetRemap[IndexInTarget] = remap[SortedIndex]; TargetBoxListX[IndexInTarget] = listX[SortedIndex]; TargetBoxListYZ[IndexInTarget] = listYZ[SortedIndex]; } memoryManager.frameFree(Indices); for(PxU32 i=0;i<NB_BUCKETS;i++) { SIMD_AABB_X4* TargetBoxListX = BoxListX[i]; const PxU32 IndexInTarget = Counters[i]; for(PxU32 j=0;j<NB_SENTINELS;j++) TargetBoxListX[IndexInTarget+j].initSentinel(); } } { for(PxU32 i=0;i<NB_BUCKETS;i++) { #ifdef RECURSE_LIMIT if(Counters[i]<RECURSE_LIMIT || Counters[i]==nb) #endif doCompleteBoxPruning_Leaf( pairManager, Counters[i], BoxListX[i], BoxListYZ[i], RemapBase[i]); #ifdef RECURSE_LIMIT else CompleteBoxPruning_Recursive(memoryManager, pairManager, Counters[i], BoxListX[i], BoxListYZ[i], RemapBase[i]); #endif } for(PxU32 i=0;i<NB_BUCKETS-1;i++) { doBipartiteBoxPruning_Leaf(pairManager, Counters[i], Counters[NB_BUCKETS-1], BoxListX[i], BoxListX[NB_BUCKETS-1], BoxListYZ[i], BoxListYZ[NB_BUCKETS-1], RemapBase[i], RemapBase[NB_BUCKETS-1] ); } } memoryManager.frameFree(Remap); memoryManager.frameFree(BoxListYZBuffer); memoryManager.frameFree(BoxListXBuffer); } #endif #endif #ifdef USE_ALTERNATIVE_VERSION // PT: experimental version that adds all cross-bucket objects to all regular buckets static void CompleteBoxPruning_Version16( ABP_MM& /*memoryManager*/, const PxBounds3& updatedBounds, ABP_PairManager* PX_RESTRICT pairManager, PxU32 nb, const SIMD_AABB_X4* PX_RESTRICT listX, const SIMD_AABB_YZ4* PX_RESTRICT listYZ, const ABP_Index* PX_RESTRICT remap, const ABPEntry* PX_RESTRICT objects) { if(!nb) return; const PxVec3& mergedMin = updatedBounds.minimum; const PxVec3& mergedMax = updatedBounds.maximum; const float limitY = (mergedMax[1] + mergedMin[1]) * 0.5f; const float limitZ = (mergedMax[2] + mergedMin[2]) * 0.5f; PxU32 Counters[NB_BUCKETS]; for(PxU32 i=0;i<NB_BUCKETS;i++) Counters[i] = 0; PxU8* Indices = (PxU8*)PX_ALLOC(sizeof(PxU8)*nb, "temp"); for(PxU32 i=0;i<nb;i++) { const PxU8 index = classifyBoxNew(listYZ[i], limitY, limitZ); Indices[i] = index; Counters[index]++; } PxU32 total = 0; PxU32 Counters2[4]; for(PxU32 i=0;i<4;i++) { Counters2[i] = Counters[i] + Counters[4]; total += Counters2[i]; } // PT: TODO: revisit allocs SIMD_AABB_X4* BoxListXBuffer = (SIMD_AABB_X4*)PX_ALLOC(sizeof(SIMD_AABB_X4)*(total+NB_SENTINELS*NB_BUCKETS), "temp"); SIMD_AABB_YZ4* BoxListYZBuffer = (SIMD_AABB_YZ4*)PX_ALLOC(sizeof(SIMD_AABB_YZ4)*total, "temp"); PxU32* Remap = (PxU32*)PX_ALLOC(sizeof(PxU32)*total, "temp"); SIMD_AABB_X4* CurrentBoxListXBuffer = BoxListXBuffer; SIMD_AABB_YZ4* CurrentBoxListYZBuffer = BoxListYZBuffer; PxU32* CurrentRemap = Remap; SIMD_AABB_X4* BoxListX[4]; SIMD_AABB_YZ4* BoxListYZ[4]; PxU32* RemapBase[4]; for(PxU32 i=0;i<4;i++) { const PxU32 Nb = Counters2[i]; BoxListX[i] = CurrentBoxListXBuffer; BoxListYZ[i] = CurrentBoxListYZBuffer; RemapBase[i] = CurrentRemap; CurrentBoxListXBuffer += Nb+NB_SENTINELS; CurrentBoxListYZBuffer += Nb; CurrentRemap += Nb; } PX_ASSERT(CurrentBoxListXBuffer == BoxListXBuffer + total + NB_SENTINELS*NB_BUCKETS); PX_ASSERT(CurrentBoxListYZBuffer == BoxListYZBuffer + total); PX_ASSERT(CurrentRemap == Remap + total); for(PxU32 i=0;i<4;i++) Counters2[i] = 0; for(PxU32 i=0;i<nb;i++) { const PxU32 SortedIndex = i; const PxU32 TargetBucket = PxU32(Indices[SortedIndex]); if(TargetBucket==4) { for(PxU32 j=0;j<4;j++) { const PxU32 IndexInTarget = Counters2[j]++; SIMD_AABB_X4* TargetBoxListX = BoxListX[j]; SIMD_AABB_YZ4* TargetBoxListYZ = BoxListYZ[j]; PxU32* TargetRemap = RemapBase[j]; TargetRemap[IndexInTarget] = remap[SortedIndex]; TargetBoxListX[IndexInTarget] = listX[SortedIndex]; TargetBoxListYZ[IndexInTarget] = listYZ[SortedIndex]; } } else { const PxU32 IndexInTarget = Counters2[TargetBucket]++; SIMD_AABB_X4* TargetBoxListX = BoxListX[TargetBucket]; SIMD_AABB_YZ4* TargetBoxListYZ = BoxListYZ[TargetBucket]; PxU32* TargetRemap = RemapBase[TargetBucket]; TargetRemap[IndexInTarget] = remap[SortedIndex]; TargetBoxListX[IndexInTarget] = listX[SortedIndex]; TargetBoxListYZ[IndexInTarget] = listYZ[SortedIndex]; } } PX_FREE(Indices); for(PxU32 i=0;i<4;i++) { SIMD_AABB_X4* TargetBoxListX = BoxListX[i]; const PxU32 IndexInTarget = Counters2[i]; for(PxU32 j=0;j<NB_SENTINELS;j++) TargetBoxListX[IndexInTarget+j].initSentinel(); } { for(PxU32 i=0;i<4;i++) { #ifdef RECURSE_LIMIT if(Counters2[i]<RECURSE_LIMIT || Counters2[i]==nb) #endif doCompleteBoxPruning_Leaf( pairManager, Counters2[i], BoxListX[i], BoxListYZ[i], RemapBase[i], objects); #ifdef RECURSE_LIMIT else CompleteBoxPruning_Recursive( pairManager, Counters2[i], BoxListX[i], BoxListYZ[i], RemapBase[i], objects); #endif } } PX_FREE(Remap); PX_FREE(BoxListYZBuffer); PX_FREE(BoxListXBuffer); } #endif static void doCompleteBoxPruning_( #ifdef ABP_MT2 ABP_CompleteBoxPruningStartTask& completeBoxPruningTask, ABP_CompleteBoxPruningTask& bipTask0, ABP_CompleteBoxPruningTask& bipTask1, #endif ABP_MM& memoryManager, ABP_PairManager* PX_RESTRICT pairManager, const DynamicManager& mDBM, PxBaseTask* continuation, PxU64 contextID) { const PxU32 nbUpdated = mDBM.getNbUpdatedBoxes(); if(!nbUpdated) return; const PxU32 nbNonUpdated = mDBM.getNbNonUpdatedBoxes(); const DynamicBoxes& updatedBoxes = mDBM.getUpdatedBoxes(); const SIMD_AABB_X4* PX_RESTRICT updatedDynamicBoxes_X = updatedBoxes.getBoxes_X(); const SIMD_AABB_YZ4* PX_RESTRICT updatedDynamicBoxes_YZ = updatedBoxes.getBoxes_YZ(); // PT: find sleeping-dynamics-vs-active-dynamics overlaps if(nbNonUpdated) { #ifdef ABP_MT2 if(continuation) { bipTask0.mCounter = nbUpdated; bipTask0.mBoxListX = updatedBoxes.getBoxes_X(); bipTask0.mBoxListYZ = updatedBoxes.getBoxes_YZ(); bipTask0.mRemap = mDBM.getRemap_Updated(); bipTask0.mType = 1; bipTask0.mCounter4 = nbNonUpdated; bipTask0.mBoxListX4 = mDBM.getSleepingBoxes().getBoxes_X(); bipTask0.mBoxListYZ4 = mDBM.getSleepingBoxes().getBoxes_YZ(); bipTask0.mRemap4 = mDBM.getRemap_Sleeping(); bipTask0.mPairs.mSharedPM = pairManager; //bipTask0.mPairs.mDelayedPairs.reserve(10000); if(bipTask0.isThereWorkToDo()) { bipTask0.mID = 0; bipTask0.setContinuation(continuation); bipTask0.removeReference(); } } else #endif doBipartiteBoxPruning_Leaf( pairManager, nbUpdated, nbNonUpdated, updatedBoxes, mDBM.getSleepingBoxes(), mDBM.getRemap_Updated(), mDBM.getRemap_Sleeping()); } /////// // PT: find active-dynamics-vs-active-dynamics overlaps if(1) { PX_UNUSED(memoryManager); #ifdef USE_ABP_BUCKETS if(nbUpdated>USE_ABP_BUCKETS) CompleteBoxPruning_Version16( #ifdef ABP_MT2 completeBoxPruningTask, #endif memoryManager, mDBM.getUpdatedBounds(), pairManager, nbUpdated, updatedDynamicBoxes_X, updatedDynamicBoxes_YZ, mDBM.getRemap_Updated(), continuation, contextID); else #endif { #ifdef ABP_MT2 if(continuation) { bipTask1.mCounter = nbUpdated; bipTask1.mBoxListX = updatedDynamicBoxes_X; bipTask1.mBoxListYZ = updatedDynamicBoxes_YZ; bipTask1.mRemap = mDBM.getRemap_Updated(); bipTask1.mType = 0; bipTask1.mPairs.mSharedPM = pairManager; //bipTask1.mPairs.mDelayedPairs.reserve(10000); if(bipTask1.isThereWorkToDo()) { bipTask1.mID = 0; bipTask1.setContinuation(continuation); bipTask1.removeReference(); } } else #endif doCompleteBoxPruning_Leaf( pairManager, nbUpdated, updatedDynamicBoxes_X, updatedDynamicBoxes_YZ, mDBM.getRemap_Updated()); } } } void ABP::Region_prepareOverlaps() { PX_PROFILE_ZONE("ABP - Region_prepareOverlaps", mContextID); if( !mDBM.isThereWorkToDo() && !mKBM.isThereWorkToDo() && !mSBM.isThereWorkToDo() ) return; if(mSBM.isThereWorkToDo()) mSBM.prepareData(mRS, mShared.mABP_Objects, mShared.mABP_Objects_Capacity, mMM, mContextID); mDBM.prepareData(mRS, mShared.mABP_Objects, mShared.mABP_Objects_Capacity, mMM, mContextID); mKBM.prepareData(mRS, mShared.mABP_Objects, mShared.mABP_Objects_Capacity, mMM, mContextID); mRS.reset(); } // Finds static-vs-dynamic and dynamic-vs-dynamic overlaps static void findAllOverlaps( #ifdef ABP_MT2 ABP_CompleteBoxPruningStartTask& completeBoxPruningTask, ABP_CompleteBoxPruningTask& bipTask0, ABP_CompleteBoxPruningTask& bipTask1, ABP_CompleteBoxPruningTask& bipTask2, ABP_CompleteBoxPruningTask& bipTask3, ABP_CompleteBoxPruningTask& bipTask4, #endif ABP_MM& memoryManager, ABP_PairManager& pairManager, const StaticManager& mSBM, const DynamicManager& mDBM, bool doComplete, bool doBipartite, PxBaseTask* continuation, PxU64 contextID) { const PxU32 nbUpdatedBoxesDynamic = mDBM.getNbUpdatedBoxes(); // PT: find dynamics-vs-dynamics overlaps if(doComplete) doCompleteBoxPruning_( #ifdef ABP_MT2 completeBoxPruningTask, bipTask3, bipTask4, #endif memoryManager, &pairManager, mDBM, continuation, contextID); // PT: find dynamics-vs-statics overlaps if(doBipartite) { const PxU32 nbUpdatedBoxesStatic = mSBM.getNbUpdatedBoxes(); const PxU32 nbNonUpdatedBoxesStatic = mSBM.getNbNonUpdatedBoxes(); const PxU32 nbNonUpdatedBoxesDynamic = mDBM.getNbNonUpdatedBoxes(); // PT: in previous versions we did active-dynamics-vs-all-statics here. if(nbUpdatedBoxesDynamic) { if(nbUpdatedBoxesStatic) { // PT: active static vs active dynamic #ifdef ABP_MT2 if(continuation) { bipTask0.mCounter = nbUpdatedBoxesDynamic; bipTask0.mBoxListX = mDBM.getUpdatedBoxes().getBoxes_X(); bipTask0.mBoxListYZ = mDBM.getUpdatedBoxes().getBoxes_YZ(); bipTask0.mRemap = mDBM.getRemap_Updated(); bipTask0.mType = 1; bipTask0.mCounter4 = nbUpdatedBoxesStatic; bipTask0.mBoxListX4 = mSBM.getUpdatedBoxes().getBoxes_X(); bipTask0.mBoxListYZ4 = mSBM.getUpdatedBoxes().getBoxes_YZ(); bipTask0.mRemap4 = mSBM.getRemap_Updated(); bipTask0.mPairs.mSharedPM = &pairManager; //bipTask0.mPairs.mDelayedPairs.reserve(10000); if(bipTask0.isThereWorkToDo()) { bipTask0.mID = 0; bipTask0.setContinuation(continuation); bipTask0.removeReference(); } } else #endif doBipartiteBoxPruning_Leaf( &pairManager, nbUpdatedBoxesDynamic, nbUpdatedBoxesStatic, mDBM.getUpdatedBoxes(), mSBM.getUpdatedBoxes(), mDBM.getRemap_Updated(), mSBM.getRemap_Updated()); } if(nbNonUpdatedBoxesStatic) { // PT: sleeping static vs active dynamic #ifdef ABP_MT2 if(continuation) { bipTask1.mCounter = nbUpdatedBoxesDynamic; bipTask1.mBoxListX = mDBM.getUpdatedBoxes().getBoxes_X(); bipTask1.mBoxListYZ = mDBM.getUpdatedBoxes().getBoxes_YZ(); bipTask1.mRemap = mDBM.getRemap_Updated(); bipTask1.mType = 1; bipTask1.mCounter4 = nbNonUpdatedBoxesStatic; bipTask1.mBoxListX4 = mSBM.getSleepingBoxes().getBoxes_X(); bipTask1.mBoxListYZ4 = mSBM.getSleepingBoxes().getBoxes_YZ(); bipTask1.mRemap4 = mSBM.getRemap_Sleeping(); bipTask1.mPairs.mSharedPM = &pairManager; //bipTask1.mPairs.mDelayedPairs.reserve(10000); if(bipTask1.isThereWorkToDo()) { bipTask1.mID = 0; bipTask1.setContinuation(continuation); bipTask1.removeReference(); } } else #endif doBipartiteBoxPruning_Leaf( &pairManager, nbUpdatedBoxesDynamic, nbNonUpdatedBoxesStatic, mDBM.getUpdatedBoxes(), mSBM.getSleepingBoxes(), mDBM.getRemap_Updated(), mSBM.getRemap_Sleeping()); } } if(nbUpdatedBoxesStatic && nbNonUpdatedBoxesDynamic) { // PT: active static vs sleeping dynamic #ifdef ABP_MT2 if(continuation) { bipTask2.mCounter = nbNonUpdatedBoxesDynamic; bipTask2.mBoxListX = mDBM.getSleepingBoxes().getBoxes_X(); bipTask2.mBoxListYZ = mDBM.getSleepingBoxes().getBoxes_YZ(); bipTask2.mRemap = mDBM.getRemap_Sleeping(); bipTask2.mType = 1; bipTask2.mCounter4 = nbUpdatedBoxesStatic; bipTask2.mBoxListX4 = mSBM.getUpdatedBoxes().getBoxes_X(); bipTask2.mBoxListYZ4 = mSBM.getUpdatedBoxes().getBoxes_YZ(); bipTask2.mRemap4 = mSBM.getRemap_Updated(); bipTask2.mPairs.mSharedPM = &pairManager; //bipTask2.mPairs.mDelayedPairs.reserve(10000); if(bipTask2.isThereWorkToDo()) { bipTask2.mID = 0; bipTask2.setContinuation(continuation); bipTask2.removeReference(); } } else #endif doBipartiteBoxPruning_Leaf( &pairManager, nbNonUpdatedBoxesDynamic, nbUpdatedBoxesStatic, mDBM.getSleepingBoxes(), mSBM.getUpdatedBoxes(), mDBM.getRemap_Sleeping(), mSBM.getRemap_Updated()); } } } /////////////////////////////////////////////////////////////////////////// ABP::ABP(PxU64 contextID) : mSBM (FilterType::STATIC), mDBM (FilterType::DYNAMIC), mKBM (FilterType::KINEMATIC), mContextID (contextID) #ifdef ABP_MT2 ,mTask0 (ABP_TASK_0) ,mTask1 (ABP_TASK_1) #endif { #ifdef ABP_MT2 mTask0.setContextId(mContextID); mTask1.setContextId(mContextID); mCompleteBoxPruningTask0.setContextId(mContextID); mCompleteBoxPruningTask1.setContextId(mContextID); for(PxU32 k=0; k<9; k++) { mCompleteBoxPruningTask0.mTasks[k].setContextId(mContextID); mCompleteBoxPruningTask1.mTasks[k].setContextId(mContextID); } for(PxU32 k=0; k<NB_BIP_TASKS; k++) mBipTasks[k].setContextId(mContextID); #endif } ABP::~ABP() { reset(); } void ABP::freeBuffers() { mShared.mRemovedObjects.empty(); } void ABP::preallocate(PxU32 nbObjects, PxU32 maxNbOverlaps) { if(nbObjects) { PX_DELETE_ARRAY(mShared.mABP_Objects); ABP_Object* objects = PX_NEW(ABP_Object)[nbObjects]; mShared.mABP_Objects = objects; mShared.mABP_Objects_Capacity = nbObjects; #if PX_DEBUG for(PxU32 i=0;i<nbObjects;i++) objects[i].mUpdated = false; #endif } // PT: TODO: here we should preallocate the box arrays but we don't know how many of them will be static / dynamic... mPairManager.reserveMemory(maxNbOverlaps); } void ABP::addStaticObjects(const BpHandle* userID_, PxU32 nb, PxU32 maxID) { mShared.checkResize(maxID); mSBM.addObjects(userID_, nb, NULL); } void ABP::addDynamicObjects(const BpHandle* userID_, PxU32 nb, PxU32 maxID) { mShared.checkResize(maxID); mShared.mUpdatedObjects.checkResize(maxID); mDBM.addObjects(userID_, nb, &mShared); } void ABP::addKinematicObjects(const BpHandle* userID_, PxU32 nb, PxU32 maxID) { mShared.checkResize(maxID); mShared.mUpdatedObjects.checkResize(maxID); mKBM.addObjects(userID_, nb, &mShared); } void ABP::removeObject(BpHandle userID) { mShared.mUpdatedObjects.setBitChecked(userID); mShared.mRemovedObjects.setBitChecked(userID); PX_ASSERT(userID<mShared.mABP_Objects_Capacity); ABPEntry& object = mShared.mABP_Objects[userID]; // PT: TODO better BoxManager* bm; const FilterType::Enum objectType = object.getType(); if(objectType==FilterType::STATIC) { bm = &mSBM; } else if(objectType==FilterType::KINEMATIC) { bm = &mKBM; } else { bm = &mDBM; } bm->removeObject(object, userID); object.invalidateIndex(); #if PX_DEBUG object.mUpdated = false; #endif } void ABP::updateObject(BpHandle userID) { mShared.mUpdatedObjects.setBitChecked(userID); PX_ASSERT(userID<mShared.mABP_Objects_Capacity); ABPEntry& object = mShared.mABP_Objects[userID]; // PT: TODO better BoxManager* bm; const FilterType::Enum objectType = object.getType(); if(objectType==FilterType::STATIC) { bm = &mSBM; } else if(objectType==FilterType::KINEMATIC) { bm = &mKBM; } else { bm = &mDBM; } bm->updateObject(object, userID); } // PT: TODO: replace bits with timestamps? void ABP_PairManager::computeCreatedDeletedPairs(PxArray<BroadPhasePair>& createdPairs, PxArray<BroadPhasePair>& deletedPairs, const BitArray& updated, const BitArray& removed) { // PT: parse all currently active pairs. The goal here is to generate the found/lost pairs, compared to previous frame. // PT: TODO: MT? PxU32 i=0; PxU32 nbActivePairs = mNbActivePairs; while(i<nbActivePairs) { InternalPair& p = mActivePairs[i]; if(p.isNew()) { // New pair // PT: 'isNew' is set to true in the 'addPair' function. In this case the pair did not previously // exist in the structure, and thus we must report the new pair to the client code. // // PT: group-based filtering is not needed here, since it has already been done in 'addPair' const PxU32 id0 = p.getId0(); const PxU32 id1 = p.getId1(); PX_ASSERT(id0!=INVALID_ID); PX_ASSERT(id1!=INVALID_ID); //createdPairs.pushBack(BroadPhasePair(id0, id1)); BroadPhasePair* newPair = Cm::reserveContainerMemory(createdPairs, 1); newPair->mVolA = id0; newPair->mVolB = id1; // PT: TODO: replace this with bitmaps? p.clearNew(); p.clearUpdated(); i++; } else if(p.isUpdated()) { // Persistent pair // PT: this pair already existed in the structure, and has been found again this frame. Since // MBP reports "all pairs" each frame (as opposed to SAP), this happens quite often, for each // active persistent pair. p.clearUpdated(); i++; } else { // Lost pair // PT: if the pair is not new and not 'updated', it might be a lost (separated) pair. But this // is not always the case since we now handle "sleeping" objects directly within MBP. A pair // of sleeping objects does not generate an 'addPair' call, so it ends up in this codepath. // Nonetheless the sleeping pair should not be deleted. We can only delete pairs involving // objects that have been actually moved during the frame. This is the only case in which // a pair can indeed become 'lost'. const PxU32 id0 = p.getId0(); const PxU32 id1 = p.getId1(); PX_ASSERT(id0!=INVALID_ID); PX_ASSERT(id1!=INVALID_ID); // PT: if none of the involved objects have been updated, the pair is just sleeping: keep it and skip it. if(updated.isSetChecked(id0) || updated.isSetChecked(id1)) { // PT: by design (for better or worse) we do not report pairs to the client when // one of the involved objects has been deleted. The pair must still be deleted // from the MBP structure though. if(!removed.isSetChecked(id0) && !removed.isSetChecked(id1)) { // PT: doing the group-based filtering here is useless. The pair should not have // been added in the first place. //deletedPairs.pushBack(BroadPhasePair(id0, id1)); BroadPhasePair* lostPair = Cm::reserveContainerMemory(deletedPairs, 1); lostPair->mVolA = id0; lostPair->mVolB = id1; } const PxU32 hashValue = hash(id0, id1) & mMask; removePair(id0, id1, hashValue, i); nbActivePairs--; } else i++; } } shrinkMemory(); } void ABP::findOverlaps(PxBaseTask* continuation, const Bp::FilterGroup::Enum* PX_RESTRICT groups, const bool* PX_RESTRICT lut) { PX_PROFILE_ZONE("ABP - findOverlaps", mContextID); mPairManager.mGroups = groups; mPairManager.mLUT = lut; if(!gPrepareOverlapsFlag) Region_prepareOverlaps(); bool doKineKine = true; bool doStaticKine = true; { doStaticKine = lut[Bp::FilterType::KINEMATIC*Bp::FilterType::COUNT + Bp::FilterType::STATIC]; doKineKine = lut[Bp::FilterType::KINEMATIC*Bp::FilterType::COUNT + Bp::FilterType::KINEMATIC]; } // Static-vs-dynamic (bipartite) and dynamic-vs-dynamic (complete) findAllOverlaps( #ifdef ABP_MT2 mCompleteBoxPruningTask0, mBipTasks[0], mBipTasks[1], mBipTasks[2], mBipTasks[3], mBipTasks[4], #endif mMM, mPairManager, mSBM, mDBM, true, true, continuation, mContextID); // Static-vs-kinematics (bipartite) and kinematics-vs-kinematics (complete) findAllOverlaps( #ifdef ABP_MT2 mCompleteBoxPruningTask1, mBipTasks[5], mBipTasks[6], mBipTasks[7], mBipTasks[8], mBipTasks[9], #endif mMM, mPairManager, mSBM, mKBM, doKineKine, doStaticKine, continuation, mContextID); if(1) { findAllOverlaps( #ifdef ABP_MT2 mCompleteBoxPruningTask1, mBipTasks[10], mBipTasks[11], mBipTasks[12], mBipTasks[13], mBipTasks[14], #endif mMM, mPairManager, mKBM, mDBM, false, true, continuation, mContextID); } else { const PxU32 nbUpdatedDynamics = mDBM.getNbUpdatedBoxes(); const PxU32 nbNonUpdatedDynamics = mDBM.getNbNonUpdatedBoxes(); const PxU32 nbUpdatedKinematics = mKBM.getNbUpdatedBoxes(); const PxU32 nbNonUpdatedKinematics = mKBM.getNbNonUpdatedBoxes(); if(nbUpdatedDynamics) { // Active dynamics vs active kinematics if(nbUpdatedKinematics) { doBipartiteBoxPruning_Leaf( &mPairManager, nbUpdatedDynamics, nbUpdatedKinematics, mDBM.getUpdatedBoxes(), mKBM.getUpdatedBoxes(), mDBM.getRemap_Updated(), mKBM.getRemap_Updated()); } // Active dynamics vs inactive kinematics if(nbNonUpdatedKinematics) { doBipartiteBoxPruning_Leaf( &mPairManager, nbUpdatedDynamics, nbNonUpdatedKinematics, mDBM.getUpdatedBoxes(), mKBM.getSleepingBoxes(), mDBM.getRemap_Updated(), mKBM.getRemap_Sleeping()); } } if(nbUpdatedKinematics && nbNonUpdatedDynamics) { // Inactive dynamics vs active kinematics doBipartiteBoxPruning_Leaf( &mPairManager, nbNonUpdatedDynamics, nbUpdatedKinematics, mDBM.getSleepingBoxes(), mKBM.getUpdatedBoxes(), mDBM.getRemap_Sleeping(), mKBM.getRemap_Updated()); } } } PxU32 ABP::finalize(PxArray<BroadPhasePair>& createdPairs, PxArray<BroadPhasePair>& deletedPairs) { PX_PROFILE_ZONE("ABP - finalize", mContextID); { PX_PROFILE_ZONE("computeCreatedDeletedPairs", mContextID); mPairManager.computeCreatedDeletedPairs(createdPairs, deletedPairs, mShared.mUpdatedObjects, mShared.mRemovedObjects); } mShared.mUpdatedObjects.clearAll(); return mPairManager.mNbActivePairs; } #ifdef ABP_MT2 void ABP::addDelayedPairs() { PX_PROFILE_ZONE("ABP - addDelayedPairs", mContextID); mCompleteBoxPruningTask0.addDelayedPairs(); mCompleteBoxPruningTask1.addDelayedPairs(); PxU32 nbDelayedPairs = 0; for(PxU32 k=0; k<NB_BIP_TASKS; k++) nbDelayedPairs += mBipTasks[k].mPairs.mDelayedPairs.size(); if(nbDelayedPairs) { { PX_PROFILE_ZONE("ABP - resizeForNewPairs", mContextID); mPairManager.resizeForNewPairs(nbDelayedPairs); } for(PxU32 k=0; k<NB_BIP_TASKS; k++) mPairManager.addDelayedPairs(mBipTasks[k].mPairs.mDelayedPairs); } } void ABP::addDelayedPairs2(PxArray<BroadPhasePair>& createdPairs) { PX_PROFILE_ZONE("ABP - addDelayedPairs", mContextID); mCompleteBoxPruningTask0.addDelayedPairs2(createdPairs); mCompleteBoxPruningTask1.addDelayedPairs2(createdPairs); PxU32 nbDelayedPairs = 0; for(PxU32 k=0; k<NB_BIP_TASKS; k++) nbDelayedPairs += mBipTasks[k].mPairs.mDelayedPairs.size(); if(nbDelayedPairs) { { PX_PROFILE_ZONE("ABP - resizeForNewPairs", mContextID); mPairManager.resizeForNewPairs(nbDelayedPairs); } for(PxU32 k=0; k<NB_BIP_TASKS; k++) mPairManager.addDelayedPairs2(createdPairs, mBipTasks[k].mPairs.mDelayedPairs); } } #endif void ABP::reset() { mSBM.reset(); mDBM.reset(); mKBM.reset(); PX_DELETE_ARRAY(mShared.mABP_Objects); mShared.mABP_Objects_Capacity = 0; mPairManager.purge(); mShared.mUpdatedObjects.empty(); mShared.mRemovedObjects.empty(); } // PT: TODO: is is really ok to use "transient" data in this function? void ABP::shiftOrigin(const PxVec3& shift, const PxBounds3* /*boundsArray*/, const PxReal* /*contactDistances*/) { PX_UNUSED(shift); // PT: unused because the bounds were pre-shifted before calling this function // PT: the AABB manager marks all objects as updated when we shift so the stuff below may not be necessary } void ABP::setTransientData(const PxBounds3* bounds, const PxReal* contactDistance) { mSBM.setSourceData(bounds, contactDistance); mDBM.setSourceData(bounds, contactDistance); mKBM.setSourceData(bounds, contactDistance); } /////////////////////////////////////////////////////////////////////////////// } // Below is the PhysX wrapper = link between AABBManager and ABP using namespace internalABP; #define DEFAULT_CREATED_DELETED_PAIRS_CAPACITY 1024 BroadPhaseABP::BroadPhaseABP( PxU32 maxNbBroadPhaseOverlaps, PxU32 maxNbStaticShapes, PxU32 maxNbDynamicShapes, PxU64 contextID, bool enableMT) : mNbAdded (0), mNbUpdated (0), mNbRemoved (0), mCreatedHandles (NULL), mUpdatedHandles (NULL), mRemovedHandles (NULL), mGroups (NULL), mFilter (NULL), mContextID (contextID), mEnableMT (enableMT) { mABP = PX_NEW(ABP)(contextID); const PxU32 nbObjects = maxNbStaticShapes + maxNbDynamicShapes; mABP->preallocate(nbObjects, maxNbBroadPhaseOverlaps); mCreated.reserve(DEFAULT_CREATED_DELETED_PAIRS_CAPACITY); mDeleted.reserve(DEFAULT_CREATED_DELETED_PAIRS_CAPACITY); } BroadPhaseABP::~BroadPhaseABP() { PX_DELETE(mABP); } void BroadPhaseABP::update(PxcScratchAllocator* scratchAllocator, const BroadPhaseUpdateData& updateData, PxBaseTask* continuation) { PX_PROFILE_ZONE("BroadPhaseABP - update", mContextID); PX_CHECK_AND_RETURN(scratchAllocator, "BroadPhaseABP::update - scratchAllocator must be non-NULL \n"); { PX_PROFILE_ZONE("BroadPhaseABP - setup", mContextID); mABP->mMM.mScratchAllocator = scratchAllocator; mABP->setTransientData(updateData.getAABBs(), updateData.getContactDistance()); const PxU32 newCapacity = updateData.getCapacity(); mABP->mShared.checkResize(newCapacity); #if PX_CHECKED // PT: WARNING: this must be done after the allocateMappingArray call if(!BroadPhaseUpdateData::isValid(updateData, *this, false, mContextID)) { PX_CHECK_MSG(false, "Illegal BroadPhaseUpdateData \n"); return; } #endif mGroups = updateData.getGroups(); mFilter = &updateData.getFilter(); mNbAdded = updateData.getNumCreatedHandles(); mNbUpdated = updateData.getNumUpdatedHandles(); mNbRemoved = updateData.getNumRemovedHandles(); mCreatedHandles = updateData.getCreatedHandles(); mUpdatedHandles = updateData.getUpdatedHandles(); mRemovedHandles = updateData.getRemovedHandles(); } // PT: run single-threaded if forced to do so if(!mEnableMT) continuation = NULL; #ifdef ABP_MT2 if(continuation) { mABP->mTask1.mBP = this; mABP->mTask1.setContinuation(continuation); mABP->mTask0.mBP = this; mABP->mTask0.setContinuation(&mABP->mTask1); mABP->mTask1.removeReference(); mABP->mTask0.removeReference(); } else #endif { { PX_PROFILE_ZONE("BroadPhaseABP - setUpdateData", mContextID); removeObjects(); addObjects(); updateObjects(); PX_ASSERT(!mCreated.size()); PX_ASSERT(!mDeleted.size()); if(gPrepareOverlapsFlag) mABP->Region_prepareOverlaps(); } { PX_PROFILE_ZONE("BroadPhaseABP - update", mContextID); mABP->findOverlaps(continuation, mGroups, mFilter->getLUT()); } { PX_PROFILE_ZONE("BroadPhaseABP - postUpdate", mContextID); mABP->finalize(mCreated, mDeleted); } } } #ifdef ABP_MT2 void ABP_InternalTask::run() { PX_SIMD_GUARD internalABP::ABP* abp = mBP->mABP; if(mID==ABP_TASK_0) { { PX_PROFILE_ZONE("ABP_InternalTask - setUpdateData", mContextID); mBP->removeObjects(); mBP->addObjects(); mBP->updateObjects(); PX_ASSERT(!mBP->mCreated.size()); PX_ASSERT(!mBP->mDeleted.size()); if(gPrepareOverlapsFlag) abp->Region_prepareOverlaps(); } { PX_PROFILE_ZONE("ABP_InternalTask - update", mContextID); for(PxU32 k=0;k<9;k++) { abp->mCompleteBoxPruningTask0.mTasks[k].mPairs.mDelayedPairs.resetOrClear(); abp->mCompleteBoxPruningTask1.mTasks[k].mPairs.mDelayedPairs.resetOrClear(); } for(PxU32 k=0;k<NB_BIP_TASKS;k++) abp->mBipTasks[k].mPairs.mDelayedPairs.resetOrClear(); abp->findOverlaps(getContinuation(), mBP->mGroups, mBP->mFilter->getLUT()); } } else if(mID==ABP_TASK_1) { //abp->addDelayedPairs(); //abp->finalize(mBP->mCreated, mBP->mDeleted); abp->finalize(mBP->mCreated, mBP->mDeleted); abp->addDelayedPairs2(mBP->mCreated); } } #endif void BroadPhaseABP::removeObjects() { PX_PROFILE_ZONE("BroadPhaseABP - removeObjects", mContextID); PxU32 nbRemoved = mNbRemoved; const BpHandle* removed = mRemovedHandles; if(!nbRemoved || !removed) return; while(nbRemoved--) { const BpHandle index = *removed++; PX_ASSERT(index+1<mABP->mShared.mABP_Objects_Capacity); // PT: we allocated one more box on purpose mABP->removeObject(index); } } void BroadPhaseABP::updateObjects() { PX_PROFILE_ZONE("BroadPhaseABP - updateObjects", mContextID); PxU32 nbUpdated = mNbUpdated; const BpHandle* updated = mUpdatedHandles; if(!nbUpdated || !updated) return; while(nbUpdated--) { const BpHandle index = *updated++; PX_ASSERT(index+1<mABP->mShared.mABP_Objects_Capacity); // PT: we allocated one more box on purpose mABP->updateObject(index); } } void BroadPhaseABP::addObjects() { PX_PROFILE_ZONE("BroadPhaseABP - addObjects", mContextID); PxU32 nbAdded = mNbAdded; const BpHandle* created = mCreatedHandles; if(!nbAdded || !created) return; const Bp::FilterGroup::Enum* PX_RESTRICT groups = mGroups; struct Batch { PX_FORCE_INLINE Batch() : mNb(0), mMaxIndex(0) {} PxU32 mNb; PxU32 mMaxIndex; BpHandle mIndices[ABP_BATCHING]; PX_FORCE_INLINE void add(const BpHandle index, internalABP::ABP* PX_RESTRICT abp, FilterType::Enum type) { PxU32 nb = mNb; mMaxIndex = PxMax(mMaxIndex, index); mIndices[nb++] = index; if(nb==ABP_BATCHING) { mNb = 0; // PT: TODO: we could use a function ptr here if(type==FilterType::STATIC) abp->addStaticObjects(mIndices, ABP_BATCHING, mMaxIndex); else if(type==FilterType::KINEMATIC) abp->addKinematicObjects(mIndices, ABP_BATCHING, mMaxIndex); else { PX_ASSERT(type==FilterType::DYNAMIC || type==FilterType::AGGREGATE); abp->addDynamicObjects(mIndices, ABP_BATCHING, mMaxIndex); } mMaxIndex = 0; } else mNb = nb; } }; Batch statics; Batch dynamics; Batch kinematics; Batch* batches[FilterType::COUNT] = {NULL}; batches[FilterType::STATIC] = &statics; batches[FilterType::DYNAMIC] = &dynamics; batches[FilterType::AGGREGATE] = &dynamics; batches[FilterType::KINEMATIC] = &kinematics; while(nbAdded--) { const BpHandle index = *created++; PX_ASSERT(index+1<mABP->mShared.mABP_Objects_Capacity); // PT: we allocated one more box on purpose FilterType::Enum type = FilterType::Enum(groups[index] & BP_FILTERING_TYPE_MASK); if(!batches[type]) type = FilterType::DYNAMIC; batches[type]->add(index, mABP, type); } if(statics.mNb) mABP->addStaticObjects(statics.mIndices, statics.mNb, statics.mMaxIndex); if(kinematics.mNb) mABP->addKinematicObjects(kinematics.mIndices, kinematics.mNb, kinematics.mMaxIndex); if(dynamics.mNb) mABP->addDynamicObjects(dynamics.mIndices, dynamics.mNb, dynamics.mMaxIndex); } const BroadPhasePair* BroadPhaseABP::getCreatedPairs(PxU32& nbCreatedPairs) const { nbCreatedPairs = mCreated.size(); return mCreated.begin(); } const BroadPhasePair* BroadPhaseABP::getDeletedPairs(PxU32& nbDeletedPairs) const { nbDeletedPairs = mDeleted.size(); return mDeleted.begin(); } static void freeBuffer(PxArray<BroadPhasePair>& buffer) { const PxU32 size = buffer.size(); if(size>DEFAULT_CREATED_DELETED_PAIRS_CAPACITY) { buffer.reset(); buffer.reserve(DEFAULT_CREATED_DELETED_PAIRS_CAPACITY); } else { buffer.clear(); } } void BroadPhaseABP::freeBuffers() { PX_PROFILE_ZONE("BroadPhaseABP - freeBuffers", mContextID); mABP->freeBuffers(); freeBuffer(mCreated); freeBuffer(mDeleted); } #if PX_CHECKED bool BroadPhaseABP::isValid(const BroadPhaseUpdateData& updateData) const { const PxU32 nbObjects = mABP->mShared.mABP_Objects_Capacity; PX_UNUSED(nbObjects); const ABP_Object* PX_RESTRICT objects = mABP->mShared.mABP_Objects; const BpHandle* created = updateData.getCreatedHandles(); if(created) { PxU32 nbToGo = updateData.getNumCreatedHandles(); while(nbToGo--) { const BpHandle index = *created++; PX_ASSERT(index<nbObjects); if(objects[index].isValid()) return false; // This object has been added already } } const BpHandle* updated = updateData.getUpdatedHandles(); if(updated) { PxU32 nbToGo = updateData.getNumUpdatedHandles(); while(nbToGo--) { const BpHandle index = *updated++; PX_ASSERT(index<nbObjects); if(!objects[index].isValid()) return false; // This object has been removed already, or never been added } } const BpHandle* removed = updateData.getRemovedHandles(); if(removed) { PxU32 nbToGo = updateData.getNumRemovedHandles(); while(nbToGo--) { const BpHandle index = *removed++; PX_ASSERT(index<nbObjects); if(!objects[index].isValid()) return false; // This object has been removed already, or never been added } } return true; } #endif void BroadPhaseABP::shiftOrigin(const PxVec3& shift, const PxBounds3* boundsArray, const PxReal* contactDistances) { mABP->shiftOrigin(shift, boundsArray, contactDistances); }
123,703
C++
27.582255
186
0.690056
NVIDIA-Omniverse/PhysX/physx/source/lowlevelaabb/src/BpBroadPhaseABP.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef BP_BROADPHASE_ABP_H #define BP_BROADPHASE_ABP_H #include "foundation/PxArray.h" #include "BpBroadPhase.h" #include "PxPhysXConfig.h" #include "BpBroadPhaseUpdate.h" #define ABP_MT2 namespace internalABP{ class ABP; } namespace physx { namespace Bp { class BroadPhaseABP : public BroadPhase { PX_NOCOPY(BroadPhaseABP) public: BroadPhaseABP( PxU32 maxNbBroadPhaseOverlaps, PxU32 maxNbStaticShapes, PxU32 maxNbDynamicShapes, PxU64 contextID, bool enableMT); virtual ~BroadPhaseABP(); // BroadPhase virtual PxBroadPhaseType::Enum getType() const PX_OVERRIDE { return mEnableMT ? PxBroadPhaseType::ePABP : PxBroadPhaseType::eABP; } virtual void release() PX_OVERRIDE { PX_DELETE_THIS; } virtual void update(PxcScratchAllocator* scratchAllocator, const BroadPhaseUpdateData& updateData, physx::PxBaseTask* continuation) PX_OVERRIDE; virtual void preBroadPhase(const Bp::BroadPhaseUpdateData&) PX_OVERRIDE {} virtual void fetchBroadPhaseResults() PX_OVERRIDE {} virtual const BroadPhasePair* getCreatedPairs(PxU32&) const PX_OVERRIDE; virtual const BroadPhasePair* getDeletedPairs(PxU32&) const PX_OVERRIDE; virtual void freeBuffers() PX_OVERRIDE; virtual void shiftOrigin(const PxVec3& shift, const PxBounds3* boundsArray, const PxReal* contactDistances) PX_OVERRIDE; #if PX_CHECKED virtual bool isValid(const BroadPhaseUpdateData& updateData) const PX_OVERRIDE; #endif //~BroadPhase internalABP::ABP* mABP; // PT: TODO: aggregate PxU32 mNbAdded; PxU32 mNbUpdated; PxU32 mNbRemoved; const BpHandle* mCreatedHandles; const BpHandle* mUpdatedHandles; const BpHandle* mRemovedHandles; PxArray<BroadPhasePair> mCreated; PxArray<BroadPhasePair> mDeleted; const Bp::FilterGroup::Enum*mGroups; const BpFilter* mFilter; const PxU64 mContextID; const bool mEnableMT; void addObjects(); void removeObjects(); void updateObjects(); }; } //namespace Bp } //namespace physx #endif // BP_BROADPHASE_ABP_H
3,907
C
38.474747
151
0.723061
NVIDIA-Omniverse/PhysX/physx/source/lowlevelaabb/src/BpAABBManagerBase.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "BpAABBManagerBase.h" #include "BpBroadPhase.h" using namespace physx; using namespace Bp; AABBManagerBase::AABBManagerBase( BroadPhase& bp, BoundsArray& boundsArray, PxFloatArrayPinned& contactDistance, PxU32 maxNbAggregates, PxU32 maxNbShapes, PxVirtualAllocator& allocator, PxU64 contextID, PxPairFilteringMode::Enum kineKineFilteringMode, PxPairFilteringMode::Enum staticKineFilteringMode) : mAddedHandleMap (allocator), mRemovedHandleMap (allocator), mChangedHandleMap (allocator), mGroups (allocator), mContactDistance (contactDistance), mVolumeData (allocator), mFilters (kineKineFilteringMode == PxPairFilteringMode::eKILL, staticKineFilteringMode == PxPairFilteringMode::eKILL), mAddedHandles (allocator), mUpdatedHandles (allocator), mRemovedHandles (allocator), mBroadPhase (bp), mBoundsArray (boundsArray), mUsedSize (0), mNbAggregates (0), #ifdef BP_USE_AGGREGATE_GROUP_TAIL mAggregateGroupTide (PxU32(Bp::FilterGroup::eAGGREGATE_BASE)), #endif mContextID (contextID), mOriginShifted (false) { PX_UNUSED(maxNbAggregates); // PT: TODO: use it or remove it reserveShapeSpace(PxMax(maxNbShapes, 1u)); // mCreatedOverlaps.reserve(16000); } void AABBManagerBase::reserveShapeSpace(PxU32 nbTotalBounds) { nbTotalBounds = PxNextPowerOfTwo(nbTotalBounds); mGroups.resize(nbTotalBounds, Bp::FilterGroup::eINVALID); mVolumeData.resize(nbTotalBounds); //KS - must be initialized so that userData is NULL for SQ-only shapes mContactDistance.resizeUninitialized(nbTotalBounds); mAddedHandleMap.resize(nbTotalBounds); mRemovedHandleMap.resize(nbTotalBounds); } void AABBManagerBase::reserveSpaceForBounds(BoundsIndex index) { if ((index + 1) >= mVolumeData.size()) reserveShapeSpace(index + 1); resetEntry(index); //KS - make sure this entry is flagged as invalid } void AABBManagerBase::freeBuffers() { // PT: TODO: investigate if we need more stuff here mBroadPhase.freeBuffers(); } void AABBManagerBase::shiftOrigin(const PxVec3& shift) { mBroadPhase.shiftOrigin(shift, mBoundsArray.begin(), mContactDistance.begin()); mOriginShifted = true; }
3,855
C++
40.021276
122
0.769131
NVIDIA-Omniverse/PhysX/physx/source/lowlevelaabb/src/BpBroadPhase.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "BpBroadPhase.h" #include "BpBroadPhaseSap.h" #include "BpBroadPhaseMBP.h" #include "BpBroadPhaseABP.h" using namespace physx; using namespace Bp; BroadPhase* BroadPhase::create( const PxBroadPhaseType::Enum bpType, const PxU32 maxNbRegions, const PxU32 maxNbBroadPhaseOverlaps, const PxU32 maxNbStaticShapes, const PxU32 maxNbDynamicShapes, PxU64 contextID) { if(bpType==PxBroadPhaseType::eABP) return PX_NEW(BroadPhaseABP)(maxNbBroadPhaseOverlaps, maxNbStaticShapes, maxNbDynamicShapes, contextID, false); else if(bpType==PxBroadPhaseType::ePABP) return PX_NEW(BroadPhaseABP)(maxNbBroadPhaseOverlaps, maxNbStaticShapes, maxNbDynamicShapes, contextID, true); else if(bpType==PxBroadPhaseType::eMBP) return PX_NEW(BroadPhaseMBP)(maxNbRegions, maxNbBroadPhaseOverlaps, maxNbStaticShapes, maxNbDynamicShapes, contextID); else if(bpType==PxBroadPhaseType::eSAP) return PX_NEW(BroadPhaseSap)(maxNbBroadPhaseOverlaps, maxNbStaticShapes, maxNbDynamicShapes, contextID); else { PX_ASSERT(0); return NULL; } }
2,736
C++
45.38983
120
0.781798
NVIDIA-Omniverse/PhysX/physx/source/lowlevelaabb/src/BpBroadPhaseSapAux.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "BpBroadPhaseSapAux.h" #include "PxcScratchAllocator.h" namespace physx { namespace Bp { PX_FORCE_INLINE void PxBpHandleSwap(BpHandle& a, BpHandle& b) { const BpHandle c = a; a = b; b = c; } PX_FORCE_INLINE void Sort(BpHandle& id0, BpHandle& id1) { if(id0>id1) PxBpHandleSwap(id0, id1); } PX_FORCE_INLINE bool DifferentPair(const BroadPhasePair& p, BpHandle id0, BpHandle id1) { return (id0!=p.mVolA) || (id1!=p.mVolB); } PX_FORCE_INLINE int Hash32Bits_1(int key) { key += ~(key << 15); key ^= (key >> 10); key += (key << 3); key ^= (key >> 6); key += ~(key << 11); key ^= (key >> 16); return key; } PX_FORCE_INLINE PxU32 Hash(BpHandle id0, BpHandle id1) { return PxU32(Hash32Bits_1( int(PxU32(id0)|(PxU32(id1)<<16)) )); } /////////////////////////////////////////////////////////////////////////////// SapPairManager::SapPairManager() : mHashTable (NULL), mNext (NULL), mHashSize (0), mHashCapacity (0), mMinAllowedHashCapacity (0), mActivePairs (NULL), mActivePairStates (NULL), mNbActivePairs (0), mActivePairsCapacity (0), mMask (0) { } /////////////////////////////////////////////////////////////////////////////// SapPairManager::~SapPairManager() { PX_ASSERT(NULL==mHashTable); PX_ASSERT(NULL==mNext); PX_ASSERT(NULL==mActivePairs); PX_ASSERT(NULL==mActivePairStates); } /////////////////////////////////////////////////////////////////////////////// void SapPairManager::init(const PxU32 size) { mHashTable=reinterpret_cast<BpHandle*>(PX_ALLOC(ALIGN_SIZE_16(sizeof(BpHandle)*size), "BpHandle")); mNext=reinterpret_cast<BpHandle*>(PX_ALLOC(ALIGN_SIZE_16(sizeof(BpHandle)*size), "BpHandle")); mActivePairs=reinterpret_cast<BroadPhasePair*>(PX_ALLOC(ALIGN_SIZE_16(sizeof(BroadPhasePair)*size), "BroadPhasePair")); mActivePairStates=reinterpret_cast<PxU8*>(PX_ALLOC(ALIGN_SIZE_16(sizeof(PxU8)*size), "BroadPhaseContextSap ActivePairStates")); mHashCapacity=size; mMinAllowedHashCapacity = size; mActivePairsCapacity=size; } /////////////////////////////////////////////////////////////////////////////// void SapPairManager::release() { PX_FREE(mHashTable); PX_FREE(mNext); PX_FREE(mActivePairs); PX_FREE(mActivePairStates); mHashSize = 0; mHashCapacity = 0; mMinAllowedHashCapacity = 0; mNbActivePairs = 0; mActivePairsCapacity = 0; mMask = 0; } /////////////////////////////////////////////////////////////////////////////// const BroadPhasePair* SapPairManager::FindPair(BpHandle id0, BpHandle id1) const { if(0==mHashSize) return NULL; // Nothing has been allocated yet // Order the ids Sort(id0, id1); // Compute hash value for this pair PxU32 HashValue = Hash(id0, id1) & mMask; PX_ASSERT(HashValue<mHashCapacity); // Look for it in the table PX_ASSERT(HashValue<mHashCapacity); PxU32 Offset = mHashTable[HashValue]; PX_ASSERT(BP_INVALID_BP_HANDLE==Offset || Offset<mActivePairsCapacity); while(Offset!=BP_INVALID_BP_HANDLE && DifferentPair(mActivePairs[Offset], id0, id1)) { PX_ASSERT(mActivePairs[Offset].mVolA!=BP_INVALID_BP_HANDLE); PX_ASSERT(Offset<mHashCapacity); Offset = mNext[Offset]; // Better to have a separate array for this PX_ASSERT(BP_INVALID_BP_HANDLE==Offset || Offset<mActivePairsCapacity); } if(Offset==BP_INVALID_BP_HANDLE) return NULL; PX_ASSERT(Offset<mNbActivePairs); // Match mActivePairs[Offset] => the pair is persistent PX_ASSERT(Offset<mActivePairsCapacity); return &mActivePairs[Offset]; } /////////////////////////////////////////////////////////////////////////////// // Internal version saving hash computation PX_FORCE_INLINE BroadPhasePair* SapPairManager::FindPair(BpHandle id0, BpHandle id1, PxU32 hash_value) const { if(0==mHashSize) return NULL; // Nothing has been allocated yet // Look for it in the table PX_ASSERT(hash_value<mHashCapacity); PxU32 Offset = mHashTable[hash_value]; PX_ASSERT(BP_INVALID_BP_HANDLE==Offset || Offset<mActivePairsCapacity); while(Offset!=BP_INVALID_BP_HANDLE && DifferentPair(mActivePairs[Offset], id0, id1)) { PX_ASSERT(mActivePairs[Offset].mVolA!=BP_INVALID_BP_HANDLE); PX_ASSERT(Offset<mHashCapacity); Offset = mNext[Offset]; // Better to have a separate array for this PX_ASSERT(BP_INVALID_BP_HANDLE==Offset || Offset<mActivePairsCapacity); } if(Offset==BP_INVALID_BP_HANDLE) return NULL; PX_ASSERT(Offset<mNbActivePairs); // Match mActivePairs[Offset] => the pair is persistent PX_ASSERT(Offset<mActivePairsCapacity); return &mActivePairs[Offset]; } /////////////////////////////////////////////////////////////////////////////// const BroadPhasePair* SapPairManager::AddPair(BpHandle id0, BpHandle id1, const PxU8 state) { // Order the ids Sort(id0, id1); PxU32 HashValue = Hash(id0, id1) & mMask; BroadPhasePair* P = FindPair(id0, id1, HashValue); if(P) { return P; // Persistent pair } // This is a new pair if(mNbActivePairs >= mHashSize) { // Get more entries mHashSize = PxNextPowerOfTwo(mNbActivePairs+1); mMask = mHashSize-1; reallocPairs(mHashSize>mHashCapacity); // Recompute hash value with new hash size HashValue = Hash(id0, id1) & mMask; } PX_ASSERT(mNbActivePairs<mActivePairsCapacity); BroadPhasePair* p = &mActivePairs[mNbActivePairs]; p->mVolA = id0; // ### CMOVs would be nice here p->mVolB = id1; mActivePairStates[mNbActivePairs]=state; PX_ASSERT(mNbActivePairs<mHashSize); PX_ASSERT(mNbActivePairs<mHashCapacity); PX_ASSERT(HashValue<mHashCapacity); mNext[mNbActivePairs] = mHashTable[HashValue]; mHashTable[HashValue] = BpHandle(mNbActivePairs++); return p; } /////////////////////////////////////////////////////////////////////////////// void SapPairManager::RemovePair(BpHandle /*id0*/, BpHandle /*id1*/, PxU32 hash_value, PxU32 pair_index) { // Walk the hash table to fix mNext { PX_ASSERT(hash_value<mHashCapacity); PxU32 Offset = mHashTable[hash_value]; PX_ASSERT(Offset!=BP_INVALID_BP_HANDLE); PxU32 Previous=BP_INVALID_BP_HANDLE; while(Offset!=pair_index) { Previous = Offset; PX_ASSERT(Offset<mHashCapacity); Offset = mNext[Offset]; } // Let us go/jump us if(Previous!=BP_INVALID_BP_HANDLE) { PX_ASSERT(Previous<mHashCapacity); PX_ASSERT(pair_index<mHashCapacity); PX_ASSERT(mNext[Previous]==pair_index); mNext[Previous] = mNext[pair_index]; } // else we were the first else { PX_ASSERT(hash_value<mHashCapacity); PX_ASSERT(pair_index<mHashCapacity); mHashTable[hash_value] = mNext[pair_index]; } } // we're now free to reuse mNext[PairIndex] without breaking the list #if PX_DEBUG PX_ASSERT(pair_index<mHashCapacity); mNext[pair_index]=BP_INVALID_BP_HANDLE; #endif // Invalidate entry // Fill holes { // 1) Remove last pair const PxU32 LastPairIndex = mNbActivePairs-1; if(LastPairIndex==pair_index) { mNbActivePairs--; } else { PX_ASSERT(LastPairIndex<mActivePairsCapacity); const BroadPhasePair* Last = &mActivePairs[LastPairIndex]; const PxU32 LastHashValue = Hash(Last->mVolA, Last->mVolB) & mMask; // Walk the hash table to fix mNext PX_ASSERT(LastHashValue<mHashCapacity); PxU32 Offset = mHashTable[LastHashValue]; PX_ASSERT(Offset!=BP_INVALID_BP_HANDLE); PxU32 Previous=BP_INVALID_BP_HANDLE; while(Offset!=LastPairIndex) { Previous = Offset; PX_ASSERT(Offset<mHashCapacity); Offset = mNext[Offset]; } // Let us go/jump us if(Previous!=BP_INVALID_BP_HANDLE) { PX_ASSERT(Previous<mHashCapacity); PX_ASSERT(LastPairIndex<mHashCapacity); PX_ASSERT(mNext[Previous]==LastPairIndex); mNext[Previous] = mNext[LastPairIndex]; } // else we were the first else { PX_ASSERT(LastHashValue<mHashCapacity); PX_ASSERT(LastPairIndex<mHashCapacity); mHashTable[LastHashValue] = mNext[LastPairIndex]; } // we're now free to reuse mNext[LastPairIndex] without breaking the list #if PX_DEBUG PX_ASSERT(LastPairIndex<mHashCapacity); mNext[LastPairIndex]=BP_INVALID_BP_HANDLE; #endif // Don't invalidate entry since we're going to shrink the array // 2) Re-insert in free slot PX_ASSERT(pair_index<mActivePairsCapacity); PX_ASSERT(LastPairIndex<mActivePairsCapacity); mActivePairs[pair_index] = mActivePairs[LastPairIndex]; mActivePairStates[pair_index] = mActivePairStates[LastPairIndex]; #if PX_DEBUG PX_ASSERT(pair_index<mHashCapacity); PX_ASSERT(mNext[pair_index]==BP_INVALID_BP_HANDLE); #endif PX_ASSERT(pair_index<mHashCapacity); PX_ASSERT(LastHashValue<mHashCapacity); mNext[pair_index] = mHashTable[LastHashValue]; mHashTable[LastHashValue] = BpHandle(pair_index); mNbActivePairs--; } } } bool SapPairManager::RemovePair(BpHandle id0, BpHandle id1) { // Order the ids Sort(id0, id1); const PxU32 HashValue = Hash(id0, id1) & mMask; const BroadPhasePair* P = FindPair(id0, id1, HashValue); if(!P) return false; PX_ASSERT(P->mVolA==id0); PX_ASSERT(P->mVolB==id1); RemovePair(id0, id1, HashValue, GetPairIndex(P)); shrinkMemory(); return true; } bool SapPairManager::RemovePairs(const PxBitMap& removedAABBs) { PxU32 i=0; while(i<mNbActivePairs) { const BpHandle id0 = mActivePairs[i].mVolA; const BpHandle id1 = mActivePairs[i].mVolB; if(removedAABBs.test(id0) || removedAABBs.test(id1)) { const PxU32 HashValue = Hash(id0, id1) & mMask; RemovePair(id0, id1, HashValue, i); } else i++; } return true; } void SapPairManager::shrinkMemory() { //Compute the hash size given the current number of active pairs. const PxU32 correctHashSize = PxNextPowerOfTwo(mNbActivePairs); //If we have the correct hash size then no action required. if(correctHashSize==mHashSize || (correctHashSize < mMinAllowedHashCapacity && mHashSize == mMinAllowedHashCapacity)) return; //The hash size can be reduced so take action. //Don't let the hash size fall below a threshold value. PxU32 newHashSize = correctHashSize; if(newHashSize < mMinAllowedHashCapacity) { newHashSize = mMinAllowedHashCapacity; } mHashSize = newHashSize; mMask = newHashSize-1; reallocPairs( (newHashSize > mMinAllowedHashCapacity) || (mHashSize <= (mHashCapacity >> 2)) || (mHashSize <= (mActivePairsCapacity >> 2))); } void SapPairManager::reallocPairs(const bool allocRequired) { if(allocRequired) { PX_FREE(mHashTable); mHashCapacity=mHashSize; mActivePairsCapacity=mHashSize; mHashTable = reinterpret_cast<BpHandle*>(PX_ALLOC(mHashSize*sizeof(BpHandle), "BpHandle")); for(PxU32 i=0;i<mHashSize;i++) { mHashTable[i] = BP_INVALID_BP_HANDLE; } // Get some bytes for new entries BroadPhasePair* NewPairs = reinterpret_cast<BroadPhasePair*>(PX_ALLOC(mHashSize * sizeof(BroadPhasePair), "BroadPhasePair")); PX_ASSERT(NewPairs); BpHandle* NewNext = reinterpret_cast<BpHandle*>(PX_ALLOC(mHashSize * sizeof(BpHandle), "BpHandle")); PX_ASSERT(NewNext); PxU8* NewPairStates = reinterpret_cast<PxU8*>(PX_ALLOC(mHashSize * sizeof(PxU8), "SapPairStates")); PX_ASSERT(NewPairStates); // Copy old data if needed if(mNbActivePairs) { PxMemCopy(NewPairs, mActivePairs, mNbActivePairs*sizeof(BroadPhasePair)); PxMemCopy(NewPairStates, mActivePairStates, mNbActivePairs*sizeof(PxU8)); } // ### check it's actually needed... probably only for pairs whose hash value was cut by the and // yeah, since Hash(id0, id1) is a constant // However it might not be needed to recompute them => only less efficient but still ok for(PxU32 i=0;i<mNbActivePairs;i++) { const PxU32 HashValue = Hash(mActivePairs[i].mVolA, mActivePairs[i].mVolB) & mMask; // New hash value with new mask NewNext[i] = mHashTable[HashValue]; PX_ASSERT(HashValue<mHashCapacity); mHashTable[HashValue] = BpHandle(i); } // Delete old data PX_FREE(mNext); PX_FREE(mActivePairs); PX_FREE(mActivePairStates); // Assign new pointer mActivePairs = NewPairs; mActivePairStates = NewPairStates; mNext = NewNext; } else { for(PxU32 i=0;i<mHashSize;i++) { mHashTable[i] = BP_INVALID_BP_HANDLE; } // ### check it's actually needed... probably only for pairs whose hash value was cut by the and // yeah, since Hash(id0, id1) is a constant // However it might not be needed to recompute them => only less efficient but still ok for(PxU32 i=0;i<mNbActivePairs;i++) { const PxU32 HashValue = Hash(mActivePairs[i].mVolA, mActivePairs[i].mVolB) & mMask; // New hash value with new mask mNext[i] = mHashTable[HashValue]; PX_ASSERT(HashValue<mHashCapacity); mHashTable[HashValue] = BpHandle(i); } } } void resizeCreatedDeleted(BroadPhasePair*& pairs, PxU32& maxNumPairs) { PX_ASSERT(pairs); PX_ASSERT(maxNumPairs>0); const PxU32 newMaxNumPairs=2*maxNumPairs; BroadPhasePair* newPairs=reinterpret_cast<BroadPhasePair*>(PX_ALLOC(sizeof(BroadPhasePair)*newMaxNumPairs, "BroadPhasePair")); PxMemCopy(newPairs, pairs, sizeof(BroadPhasePair)*maxNumPairs); PX_FREE(pairs); pairs=newPairs; maxNumPairs=newMaxNumPairs; } void ComputeCreatedDeletedPairsLists (const Bp::FilterGroup::Enum* PX_RESTRICT boxGroups, const BpHandle* PX_RESTRICT dataArray, const PxU32 dataArraySize, PxcScratchAllocator* scratchAllocator, BroadPhasePair*& createdPairsList, PxU32& numCreatedPairs, PxU32& maxNumCreatedPairs, BroadPhasePair*& deletedPairsList, PxU32& numDeletedPairs, PxU32& maxNumDeletedPairs, PxU32& numActualDeletedPairs, SapPairManager& pairManager) { #if BP_SAP_TEST_GROUP_ID_CREATEUPDATE PX_UNUSED(boxGroups); #endif for(PxU32 i=0;i<dataArraySize;i++) { const PxU32 ID = dataArray[i]; PX_ASSERT(ID<pairManager.mNbActivePairs); const BroadPhasePair* PX_RESTRICT UP = pairManager.mActivePairs + ID; PX_ASSERT(pairManager.IsInArray(UP)); if(pairManager.IsRemoved(UP)) { if(!pairManager.IsNew(UP)) { // No need to call "ClearInArray" in this case, since the pair will get removed anyway if(numDeletedPairs==maxNumDeletedPairs) { BroadPhasePair* newDeletedPairsList = reinterpret_cast<BroadPhasePair*>(scratchAllocator->alloc(sizeof(BroadPhasePair)*2*maxNumDeletedPairs, true)); PxMemCopy(newDeletedPairsList, deletedPairsList, sizeof(BroadPhasePair)*maxNumDeletedPairs); scratchAllocator->free(deletedPairsList); deletedPairsList = newDeletedPairsList; maxNumDeletedPairs = 2*maxNumDeletedPairs; } PX_ASSERT(numDeletedPairs<maxNumDeletedPairs); //PX_ASSERT((uintptr_t)UP->mUserData != 0xcdcdcdcd); deletedPairsList[numDeletedPairs++] = BroadPhasePair(UP->mVolA,UP->mVolB/*, ID*/); } } else { pairManager.ClearInArray(UP); // Add => already there... Might want to create user data, though if(pairManager.IsNew(UP)) { #if !BP_SAP_TEST_GROUP_ID_CREATEUPDATE if(groupFiltering(boxGroups[UP->mVolA], boxGroups[UP->mVolB])) #endif { if(numCreatedPairs==maxNumCreatedPairs) { BroadPhasePair* newCreatedPairsList = reinterpret_cast<BroadPhasePair*>(scratchAllocator->alloc(sizeof(BroadPhasePair)*2*maxNumCreatedPairs, true)); PxMemCopy(newCreatedPairsList, createdPairsList, sizeof(BroadPhasePair)*maxNumCreatedPairs); scratchAllocator->free(createdPairsList); createdPairsList = newCreatedPairsList; maxNumCreatedPairs = 2*maxNumCreatedPairs; } PX_ASSERT(numCreatedPairs<maxNumCreatedPairs); createdPairsList[numCreatedPairs++] = BroadPhasePair(UP->mVolA,UP->mVolB/*, ID*/); } pairManager.ClearNew(UP); } } } //Record pairs that are to be deleted because they were simultaneously created and removed //from different axis sorts. numActualDeletedPairs=numDeletedPairs; for(PxU32 i=0;i<dataArraySize;i++) { const PxU32 ID = dataArray[i]; PX_ASSERT(ID<pairManager.mNbActivePairs); const BroadPhasePair* PX_RESTRICT UP = pairManager.mActivePairs + ID; if(pairManager.IsRemoved(UP) && pairManager.IsNew(UP)) { PX_ASSERT(pairManager.IsInArray(UP)); if(numActualDeletedPairs==maxNumDeletedPairs) { BroadPhasePair* newDeletedPairsList = reinterpret_cast<BroadPhasePair*>(scratchAllocator->alloc(sizeof(BroadPhasePair)*2*maxNumDeletedPairs, true)); PxMemCopy(newDeletedPairsList, deletedPairsList, sizeof(BroadPhasePair)*maxNumDeletedPairs); scratchAllocator->free(deletedPairsList); deletedPairsList = newDeletedPairsList; maxNumDeletedPairs = 2*maxNumDeletedPairs; } PX_ASSERT(numActualDeletedPairs<=maxNumDeletedPairs); deletedPairsList[numActualDeletedPairs++] = BroadPhasePair(UP->mVolA,UP->mVolB/*, ID*/); //KS - should we even get here???? } } // // #### try batch removal here // for(PxU32 i=0;i<numActualDeletedPairs;i++) // { // const BpHandle id0 = deletedPairsList[i].mVolA; // const BpHandle id1 = deletedPairsList[i].mVolB; //#if PX_DEBUG // const bool Status = pairManager.RemovePair(id0, id1); // PX_ASSERT(Status); //#else // pairManager.RemovePair(id0, id1); //#endif // } //Only report deleted pairs from different groups. #if !BP_SAP_TEST_GROUP_ID_CREATEUPDATE for(PxU32 i=0;i<numDeletedPairs;i++) { const PxU32 id0 = deletedPairsList[i].mVolA; const PxU32 id1 = deletedPairsList[i].mVolB; if(!groupFiltering(boxGroups[id0], boxGroups[id1])) { while((numDeletedPairs-1) > i && boxGroups[deletedPairsList[numDeletedPairs-1].mVolA] == boxGroups[deletedPairsList[numDeletedPairs-1].mVolB]) { numDeletedPairs--; } deletedPairsList[i]=deletedPairsList[numDeletedPairs-1]; numDeletedPairs--; } } #endif } //#define PRINT_STATS #ifdef PRINT_STATS #include <stdio.h> static PxU32 gNbIter = 0; static PxU32 gNbTests = 0; static PxU32 gNbPairs = 0; #define START_STATS gNbIter = gNbTests = gNbPairs = 0; #define INCREASE_STATS_NB_ITER gNbIter++; #define INCREASE_STATS_NB_TESTS gNbTests++; #define INCREASE_STATS_NB_PAIRS gNbPairs++; #define DUMP_STATS printf("%d %d %d\n", gNbIter, gNbTests, gNbPairs); #else #define START_STATS #define INCREASE_STATS_NB_ITER #define INCREASE_STATS_NB_TESTS #define INCREASE_STATS_NB_PAIRS #define DUMP_STATS #endif void DataArray::Resize(PxcScratchAllocator* scratchAllocator) { BpHandle* newDataArray = reinterpret_cast<BpHandle*>(scratchAllocator->alloc(sizeof(BpHandle)*mCapacity*2, true)); PxMemCopy(newDataArray, mData, mCapacity*sizeof(BpHandle)); scratchAllocator->free(mData); mData = newDataArray; mCapacity *= 2; } static PX_FORCE_INLINE int intersect2D(const BoxYZ& a, const BoxYZ& b) { const bool b0 = b.mMaxY < a.mMinY; const bool b1 = a.mMaxY < b.mMinY; const bool b2 = b.mMaxZ < a.mMinZ; const bool b3 = a.mMaxZ < b.mMinZ; // const bool b4 = b0 || b1 || b2 || b3; const bool b4 = b0 | b1 | b2 | b3; return !b4; } void addPair(const BpHandle id0, const BpHandle id1, PxcScratchAllocator* scratchAllocator, SapPairManager& pairManager, DataArray& dataArray) { const BroadPhasePair* UP = reinterpret_cast<const BroadPhasePair*>(pairManager.AddPair(id0, id1, SapPairManager::PAIR_UNKNOWN)); //If the hash table has reached its limit then we're unable to add a new pair. if(NULL==UP) return; PX_ASSERT(UP); if(pairManager.IsUnknown(UP)) { pairManager.ClearState(UP); pairManager.SetInArray(UP); dataArray.AddData(pairManager.GetPairIndex(UP), scratchAllocator); pairManager.SetNew(UP); } pairManager.ClearRemoved(UP); } void removePair(BpHandle id0, BpHandle id1, PxcScratchAllocator* scratchAllocator, SapPairManager& pairManager, DataArray& dataArray) { const BroadPhasePair* UP = reinterpret_cast<const BroadPhasePair*>(pairManager.FindPair(id0, id1)); if(UP) { if(!pairManager.IsInArray(UP)) { pairManager.SetInArray(UP); dataArray.AddData(pairManager.GetPairIndex(UP), scratchAllocator); } pairManager.SetRemoved(UP); } } struct AddPairParams { AddPairParams(const PxU32* remap0, const PxU32* remap1, PxcScratchAllocator* alloc, SapPairManager* pm, DataArray* da) : mRemap0 (remap0), mRemap1 (remap1), mScratchAllocator (alloc), mPairManager (pm), mDataArray (da) { } const PxU32* mRemap0; const PxU32* mRemap1; PxcScratchAllocator* mScratchAllocator; SapPairManager* mPairManager; DataArray* mDataArray; }; static void addPair(const AddPairParams* PX_RESTRICT params, const BpHandle id0_, const BpHandle id1_) { SapPairManager& pairManager = *params->mPairManager; const BroadPhasePair* UP = reinterpret_cast<const BroadPhasePair*>(pairManager.AddPair(params->mRemap0[id0_], params->mRemap1[id1_], SapPairManager::PAIR_UNKNOWN)); //If the hash table has reached its limit then we're unable to add a new pair. if(NULL==UP) return; PX_ASSERT(UP); if(pairManager.IsUnknown(UP)) { pairManager.ClearState(UP); pairManager.SetInArray(UP); params->mDataArray->AddData(pairManager.GetPairIndex(UP), params->mScratchAllocator); pairManager.SetNew(UP); } pairManager.ClearRemoved(UP); } // PT: TODO: use SIMD AuxData::AuxData(PxU32 nb, const SapBox1D*const* PX_RESTRICT boxes, const BpHandle* PX_RESTRICT indicesSorted, const Bp::FilterGroup::Enum* PX_RESTRICT groupIds) { // PT: TODO: use scratch allocator / etc BoxX* PX_RESTRICT boxX = reinterpret_cast<BoxX*>(PX_ALLOC(sizeof(BoxX)*(nb+1), "mBoxX")); BoxYZ* PX_RESTRICT boxYZ = reinterpret_cast<BoxYZ*>(PX_ALLOC(sizeof(BoxYZ)*nb, "mBoxYZ")); Bp::FilterGroup::Enum* PX_RESTRICT groups = reinterpret_cast<Bp::FilterGroup::Enum*>(PX_ALLOC(sizeof(Bp::FilterGroup::Enum)*nb, "mGroups")); PxU32* PX_RESTRICT remap = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32)*nb, "mRemap")); mBoxX = boxX; mBoxYZ = boxYZ; mGroups = groups; mRemap = remap; mNb = nb; const PxU32 axis0 = 0; const PxU32 axis1 = 2; const PxU32 axis2 = 1; const SapBox1D* PX_RESTRICT boxes0 = boxes[axis0]; const SapBox1D* PX_RESTRICT boxes1 = boxes[axis1]; const SapBox1D* PX_RESTRICT boxes2 = boxes[axis2]; for(PxU32 i=0;i<nb;i++) { const PxU32 boxID = indicesSorted[i]; groups[i] = groupIds[boxID]; remap[i] = boxID; const SapBox1D& currentBoxX = boxes0[boxID]; boxX[i].mMinX = currentBoxX.mMinMax[0]; boxX[i].mMaxX = currentBoxX.mMinMax[1]; const SapBox1D& currentBoxY = boxes1[boxID]; boxYZ[i].mMinY = currentBoxY.mMinMax[0]; boxYZ[i].mMaxY = currentBoxY.mMinMax[1]; const SapBox1D& currentBoxZ = boxes2[boxID]; boxYZ[i].mMinZ = currentBoxZ.mMinMax[0]; boxYZ[i].mMaxZ = currentBoxZ.mMinMax[1]; } boxX[nb].mMinX = 0xffffffff; } AuxData::~AuxData() { PX_FREE(mRemap); PX_FREE(mGroups); PX_FREE(mBoxYZ); PX_FREE(mBoxX); } void performBoxPruningNewNew( const AuxData* PX_RESTRICT auxData, PxcScratchAllocator* scratchAllocator, const bool* lut, SapPairManager& pairManager, BpHandle*& dataArray, PxU32& dataArraySize, PxU32& dataArrayCapacity) { const PxU32 nb = auxData->mNb; if(!nb) return; DataArray da(dataArray, dataArraySize, dataArrayCapacity); START_STATS { BoxX* boxX = auxData->mBoxX; BoxYZ* boxYZ = auxData->mBoxYZ; Bp::FilterGroup::Enum* groups = auxData->mGroups; PxU32* remap = auxData->mRemap; AddPairParams params(remap, remap, scratchAllocator, &pairManager, &da); PxU32 runningIndex = 0; PxU32 index0 = 0; while(runningIndex<nb && index0<nb) { #if BP_SAP_TEST_GROUP_ID_CREATEUPDATE const Bp::FilterGroup::Enum group0 = groups[index0]; #endif const BoxX& boxX0 = boxX[index0]; const BpHandle minLimit = boxX0.mMinX; while(boxX[runningIndex++].mMinX<minLimit); const BpHandle maxLimit = boxX0.mMaxX; PxU32 index1 = runningIndex; while(boxX[index1].mMinX <= maxLimit) { INCREASE_STATS_NB_ITER #if BP_SAP_TEST_GROUP_ID_CREATEUPDATE if(groupFiltering(group0, groups[index1], lut)) #endif { INCREASE_STATS_NB_TESTS if(intersect2D(boxYZ[index0], boxYZ[index1])) /* __m128i b = _mm_loadu_si128(reinterpret_cast<const __m128i*>(&boxYZ[index0].mMinY)); b = _mm_shuffle_epi32(b, 78); const __m128i a = _mm_loadu_si128(reinterpret_cast<const __m128i*>(&boxYZ[index1].mMinY)); const __m128i d = _mm_cmpgt_epi32(a, b); const int mask = _mm_movemask_epi8(d); if(mask==0x0000ff00)*/ { INCREASE_STATS_NB_PAIRS addPair(&params, index0, index1); } } index1++; } index0++; } } DUMP_STATS dataArray = da.mData; dataArraySize = da.mSize; dataArrayCapacity = da.mCapacity; } template<int codepath> static void bipartitePruning( const PxU32 nb0, const BoxX* PX_RESTRICT boxX0, const BoxYZ* PX_RESTRICT boxYZ0, const PxU32* PX_RESTRICT remap0, const Bp::FilterGroup::Enum* PX_RESTRICT groups0, const PxU32 nb1, const BoxX* PX_RESTRICT boxX1, const BoxYZ* PX_RESTRICT boxYZ1, const PxU32* PX_RESTRICT remap1, const Bp::FilterGroup::Enum* PX_RESTRICT groups1, const bool* lut, PxcScratchAllocator* scratchAllocator, SapPairManager& pairManager, DataArray& dataArray) { AddPairParams params(remap0, remap1, scratchAllocator, &pairManager, &dataArray); PxU32 runningIndex = 0; PxU32 index0 = 0; while(runningIndex<nb1 && index0<nb0) { #if BP_SAP_TEST_GROUP_ID_CREATEUPDATE const Bp::FilterGroup::Enum group0 = groups0[index0]; #endif const BpHandle minLimit = boxX0[index0].mMinX; if(!codepath) { while(boxX1[runningIndex].mMinX<minLimit) runningIndex++; } else { while(boxX1[runningIndex].mMinX<=minLimit) runningIndex++; } const BpHandle maxLimit = boxX0[index0].mMaxX; PxU32 index1 = runningIndex; while(boxX1[index1].mMinX <= maxLimit) { INCREASE_STATS_NB_ITER #if BP_SAP_TEST_GROUP_ID_CREATEUPDATE if(groupFiltering(group0, groups1[index1], lut)) #endif { INCREASE_STATS_NB_TESTS if(intersect2D(boxYZ0[index0], boxYZ1[index1])) { INCREASE_STATS_NB_PAIRS addPair(&params, index0, index1); } } index1++; } index0++; } } void performBoxPruningNewOld( const AuxData* PX_RESTRICT auxData0, const AuxData* PX_RESTRICT auxData1, PxcScratchAllocator* scratchAllocator, const bool* lut, SapPairManager& pairManager, BpHandle*& dataArray, PxU32& dataArraySize, PxU32& dataArrayCapacity) { const PxU32 nb0 = auxData0->mNb; const PxU32 nb1 = auxData1->mNb; if(!nb0 || !nb1) return; DataArray da(dataArray, dataArraySize, dataArrayCapacity); START_STATS { const BoxX* boxX0 = auxData0->mBoxX; const BoxYZ* boxYZ0 = auxData0->mBoxYZ; const Bp::FilterGroup::Enum* groups0 = auxData0->mGroups; const PxU32* remap0 = auxData0->mRemap; const BoxX* boxX1 = auxData1->mBoxX; const BoxYZ* boxYZ1 = auxData1->mBoxYZ; const Bp::FilterGroup::Enum* groups1 = auxData1->mGroups; const PxU32* remap1 = auxData1->mRemap; bipartitePruning<0>(nb0, boxX0, boxYZ0, remap0, groups0, nb1, boxX1, boxYZ1, remap1, groups1, lut, scratchAllocator, pairManager, da); bipartitePruning<1>(nb1, boxX1, boxYZ1, remap1, groups1, nb0, boxX0, boxYZ0, remap0, groups0, lut, scratchAllocator, pairManager, da); } DUMP_STATS dataArray = da.mData; dataArraySize = da.mSize; dataArrayCapacity = da.mCapacity; } } //namespace Bp } //namespace physx
28,634
C++
30.398026
165
0.707446
NVIDIA-Omniverse/PhysX/physx/source/lowlevelaabb/src/BpBroadPhaseShared.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "BpBroadPhaseShared.h" #include "foundation/PxMemory.h" #include "foundation/PxBitUtils.h" using namespace physx; using namespace Bp; #define MBP_ALLOC(x) PX_ALLOC(x, "MBP") #define MBP_FREE(x) PX_FREE(x) static PX_FORCE_INLINE void storeDwords(PxU32* dest, PxU32 nb, PxU32 value) { while(nb--) *dest++ = value; } /////////////////////////////////////////////////////////////////////////////// PairManagerData::PairManagerData() : mHashSize (0), mMask (0), mNbActivePairs (0), mHashTable (NULL), mNext (NULL), mActivePairs (NULL), mReservedMemory (0) { } /////////////////////////////////////////////////////////////////////////////// PairManagerData::~PairManagerData() { purge(); } /////////////////////////////////////////////////////////////////////////////// void PairManagerData::purge() { MBP_FREE(mNext); MBP_FREE(mActivePairs); MBP_FREE(mHashTable); mHashSize = 0; mMask = 0; mNbActivePairs = 0; } /////////////////////////////////////////////////////////////////////////////// void PairManagerData::reallocPairs() { MBP_FREE(mHashTable); mHashTable = reinterpret_cast<PxU32*>(MBP_ALLOC(mHashSize*sizeof(PxU32))); storeDwords(mHashTable, mHashSize, INVALID_ID); // Get some bytes for new entries InternalPair* newPairs = reinterpret_cast<InternalPair*>(MBP_ALLOC(mHashSize * sizeof(InternalPair))); PX_ASSERT(newPairs); PxU32* newNext = reinterpret_cast<PxU32*>(MBP_ALLOC(mHashSize * sizeof(PxU32))); PX_ASSERT(newNext); // Copy old data if needed if(mNbActivePairs) PxMemCopy(newPairs, mActivePairs, mNbActivePairs*sizeof(InternalPair)); // ### check it's actually needed... probably only for pairs whose hash value was cut by the and // yeah, since hash(id0, id1) is a constant // However it might not be needed to recompute them => only less efficient but still ok for(PxU32 i=0;i<mNbActivePairs;i++) { const PxU32 hashValue = hash(mActivePairs[i].getId0(), mActivePairs[i].getId1()) & mMask; // New hash value with new mask newNext[i] = mHashTable[hashValue]; mHashTable[hashValue] = i; } // Delete old data MBP_FREE(mNext); MBP_FREE(mActivePairs); // Assign new pointer mActivePairs = newPairs; mNext = newNext; } /////////////////////////////////////////////////////////////////////////////// void PairManagerData::shrinkMemory() { // Check correct memory against actually used memory const PxU32 correctHashSize = PxNextPowerOfTwo(mNbActivePairs); if(mHashSize==correctHashSize) return; if(mReservedMemory && correctHashSize < mReservedMemory) return; // Reduce memory used mHashSize = correctHashSize; mMask = mHashSize-1; reallocPairs(); } /////////////////////////////////////////////////////////////////////////////// void PairManagerData::reserveMemory(PxU32 memSize) { if(!memSize) return; if(!PxIsPowerOfTwo(memSize)) memSize = PxNextPowerOfTwo(memSize); mHashSize = memSize; mMask = mHashSize-1; mReservedMemory = memSize; reallocPairs(); } /////////////////////////////////////////////////////////////////////////////// PX_NOINLINE PxU32 PairManagerData::growPairs(PxU32 fullHashValue) { // Get more entries mHashSize = PxNextPowerOfTwo(mNbActivePairs+1); mMask = mHashSize-1; reallocPairs(); // Recompute hash value with new hash size return fullHashValue & mMask; } /////////////////////////////////////////////////////////////////////////////// void PairManagerData::removePair(PxU32 /*id0*/, PxU32 /*id1*/, PxU32 hashValue, PxU32 pairIndex) { // Walk the hash table to fix mNext { PxU32 offset = mHashTable[hashValue]; PX_ASSERT(offset!=INVALID_ID); PxU32 previous=INVALID_ID; while(offset!=pairIndex) { previous = offset; offset = mNext[offset]; } // Let us go/jump us if(previous!=INVALID_ID) { PX_ASSERT(mNext[previous]==pairIndex); mNext[previous] = mNext[pairIndex]; } // else we were the first else mHashTable[hashValue] = mNext[pairIndex]; // we're now free to reuse mNext[pairIndex] without breaking the list } #if PX_DEBUG mNext[pairIndex]=INVALID_ID; #endif // Invalidate entry // Fill holes { // 1) Remove last pair const PxU32 lastPairIndex = mNbActivePairs-1; if(lastPairIndex==pairIndex) { mNbActivePairs--; } else { const InternalPair* last = &mActivePairs[lastPairIndex]; const PxU32 lastHashValue = hash(last->getId0(), last->getId1()) & mMask; // Walk the hash table to fix mNext PxU32 offset = mHashTable[lastHashValue]; PX_ASSERT(offset!=INVALID_ID); PxU32 previous=INVALID_ID; while(offset!=lastPairIndex) { previous = offset; offset = mNext[offset]; } // Let us go/jump us if(previous!=INVALID_ID) { PX_ASSERT(mNext[previous]==lastPairIndex); mNext[previous] = mNext[lastPairIndex]; } // else we were the first else mHashTable[lastHashValue] = mNext[lastPairIndex]; // we're now free to reuse mNext[lastPairIndex] without breaking the list #if PX_DEBUG mNext[lastPairIndex]=INVALID_ID; #endif // Don't invalidate entry since we're going to shrink the array // 2) Re-insert in free slot mActivePairs[pairIndex] = mActivePairs[lastPairIndex]; #if PX_DEBUG PX_ASSERT(mNext[pairIndex]==INVALID_ID); #endif mNext[pairIndex] = mHashTable[lastHashValue]; mHashTable[lastHashValue] = pairIndex; mNbActivePairs--; } } }
7,051
C++
27.666667
124
0.65863
NVIDIA-Omniverse/PhysX/physx/source/lowlevelaabb/src/BpBroadPhaseSapAux.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef BP_BROADPHASE_SAP_AUX_H #define BP_BROADPHASE_SAP_AUX_H #include "foundation/PxAssert.h" #include "foundation/PxIntrinsics.h" #include "foundation/PxUserAllocated.h" #include "BpBroadPhase.h" #include "BpBroadPhaseIntegerAABB.h" #include "foundation/PxBitMap.h" namespace physx { class PxcScratchAllocator; namespace Bp { #define ALIGN_SIZE_16(size) ((unsigned(size)+15)&(unsigned(~15))) #define NUM_SENTINELS 2 #define BP_SAP_USE_PREFETCH 1//prefetch in batchUpdate #define BP_SAP_USE_OVERLAP_TEST_ON_REMOVES 1// "Useless" but faster overall because seriously reduces number of calls (from ~10000 to ~3 sometimes!) //Set 1 to test for group ids in batchCreate/batchUpdate so we can avoid group id test in ComputeCreatedDeletedPairsLists //Set 0 to neglect group id test in batchCreate/batchUpdate and delay test until ComputeCreatedDeletedPairsLists #define BP_SAP_TEST_GROUP_ID_CREATEUPDATE 1 #define MAX_BP_HANDLE 0x3fffffff #define PX_REMOVED_BP_HANDLE 0x3ffffffd PX_FORCE_INLINE void setMinSentinel(ValType& v, BpHandle& d) { v = 0x00000000;//0x00800000; //0x00800000 is -FLT_MAX but setting it to 0 means we don't crash when we get a value outside the float range. d = (BP_INVALID_BP_HANDLE & ~1); } PX_FORCE_INLINE void setMaxSentinel(ValType& v, BpHandle& d) { v = 0xffffffff;//0xff7fffff; //0xff7fffff is +FLT_MAX but setting it to 0xffffffff means we don't crash when we get a value outside the float range. d = BP_INVALID_BP_HANDLE; } PX_FORCE_INLINE BpHandle setData(PxU32 owner_box_id, const bool is_max) { BpHandle d = BpHandle(owner_box_id<<1); if(is_max) d |= 1; return d; } PX_FORCE_INLINE bool isSentinel(const BpHandle& d) { return (d&~1)==(BP_INVALID_BP_HANDLE & ~1); } PX_FORCE_INLINE BpHandle isMax(const BpHandle& d) { return BpHandle(d & 1); } PX_FORCE_INLINE BpHandle getOwner(const BpHandle& d) { return BpHandle(d>>1); } class SapBox1D { public: PX_FORCE_INLINE SapBox1D() {} PX_FORCE_INLINE ~SapBox1D() {} BpHandle mMinMax[2];//mMinMax[0]=min, mMinMax[1]=max }; class SapPairManager { public: SapPairManager(); ~SapPairManager(); void init(const PxU32 size); void release(); void shrinkMemory(); const BroadPhasePair* AddPair (BpHandle id0, BpHandle id1, const PxU8 state); bool RemovePair (BpHandle id0, BpHandle id1); bool RemovePairs (const PxBitMap& removedAABBs); const BroadPhasePair* FindPair (BpHandle id0, BpHandle id1) const; PX_FORCE_INLINE PxU32 GetPairIndex(const BroadPhasePair* PX_RESTRICT pair) const { return (PxU32((size_t(pair) - size_t(mActivePairs)))/sizeof(BroadPhasePair)); } BpHandle* mHashTable; BpHandle* mNext; PxU32 mHashSize; PxU32 mHashCapacity; PxU32 mMinAllowedHashCapacity; BroadPhasePair* mActivePairs; PxU8* mActivePairStates; PxU32 mNbActivePairs; PxU32 mActivePairsCapacity; PxU32 mMask; BroadPhasePair* FindPair (BpHandle id0, BpHandle id1, PxU32 hash_value) const; void RemovePair (BpHandle id0, BpHandle id1, PxU32 hash_value, PxU32 pair_index); void reallocPairs(const bool allocRequired); enum { PAIR_INARRAY=1, PAIR_REMOVED=2, PAIR_NEW=4, PAIR_UNKNOWN=8 }; PX_FORCE_INLINE bool IsInArray(const BroadPhasePair* PX_RESTRICT pair) const { const PxU8 state=mActivePairStates[pair-mActivePairs]; return state & PAIR_INARRAY ? true : false; } PX_FORCE_INLINE bool IsRemoved(const BroadPhasePair* PX_RESTRICT pair) const { const PxU8 state=mActivePairStates[pair-mActivePairs]; return state & PAIR_REMOVED ? true : false; } PX_FORCE_INLINE bool IsNew(const BroadPhasePair* PX_RESTRICT pair) const { const PxU8 state=mActivePairStates[pair-mActivePairs]; return state & PAIR_NEW ? true : false; } PX_FORCE_INLINE bool IsUnknown(const BroadPhasePair* PX_RESTRICT pair) const { const PxU8 state=mActivePairStates[pair-mActivePairs]; return state & PAIR_UNKNOWN ? true : false; } PX_FORCE_INLINE void ClearState(const BroadPhasePair* PX_RESTRICT pair) { mActivePairStates[pair-mActivePairs]=0; } PX_FORCE_INLINE void SetInArray(const BroadPhasePair* PX_RESTRICT pair) { mActivePairStates[pair-mActivePairs] |= PAIR_INARRAY; } PX_FORCE_INLINE void SetRemoved(const BroadPhasePair* PX_RESTRICT pair) { mActivePairStates[pair-mActivePairs] |= PAIR_REMOVED; } PX_FORCE_INLINE void SetNew(const BroadPhasePair* PX_RESTRICT pair) { mActivePairStates[pair-mActivePairs] |= PAIR_NEW; } PX_FORCE_INLINE void ClearInArray(const BroadPhasePair* PX_RESTRICT pair) { mActivePairStates[pair-mActivePairs] &= ~PAIR_INARRAY; } PX_FORCE_INLINE void ClearRemoved(const BroadPhasePair* PX_RESTRICT pair) { mActivePairStates[pair-mActivePairs] &= ~PAIR_REMOVED; } PX_FORCE_INLINE void ClearNew(const BroadPhasePair* PX_RESTRICT pair) { mActivePairStates[pair-mActivePairs] &= ~PAIR_NEW; } }; struct DataArray { DataArray(BpHandle* data, PxU32 size, PxU32 capacity) : mData(data), mSize(size), mCapacity(capacity) {} BpHandle* mData; PxU32 mSize; PxU32 mCapacity; PX_NOINLINE void Resize(PxcScratchAllocator* scratchAllocator); PX_FORCE_INLINE void AddData(const PxU32 data, PxcScratchAllocator* scratchAllocator) { if(mSize==mCapacity) Resize(scratchAllocator); PX_ASSERT(mSize<mCapacity); mData[mSize++] = BpHandle(data); } }; void addPair(const BpHandle id0, const BpHandle id1, PxcScratchAllocator* scratchAllocator, SapPairManager& pairManager, DataArray& dataArray); void removePair(BpHandle id0, BpHandle id1, PxcScratchAllocator* scratchAllocator, SapPairManager& pairManager, DataArray& dataArray); void ComputeCreatedDeletedPairsLists (const Bp::FilterGroup::Enum* PX_RESTRICT boxGroups, const BpHandle* PX_RESTRICT dataArray, const PxU32 dataArraySize, PxcScratchAllocator* scratchAllocator, BroadPhasePair* & createdPairsList, PxU32& numCreatedPairs, PxU32& maxNumCreatdPairs, BroadPhasePair* & deletedPairsList, PxU32& numDeletedPairs, PxU32& maxNumDeletedPairs, PxU32&numActualDeletedPairs, SapPairManager& pairManager); struct BoxX { PxU32 mMinX; PxU32 mMaxX; }; struct BoxYZ { PxU32 mMinY; PxU32 mMinZ; PxU32 mMaxY; PxU32 mMaxZ; }; struct AuxData { AuxData(PxU32 nb, const SapBox1D*const* PX_RESTRICT boxes, const BpHandle* PX_RESTRICT indicesSorted, const Bp::FilterGroup::Enum* PX_RESTRICT groupIds); ~AuxData(); BoxX* mBoxX; BoxYZ* mBoxYZ; Bp::FilterGroup::Enum* mGroups; PxU32* mRemap; PxU32 mNb; }; void performBoxPruningNewNew( const AuxData* PX_RESTRICT auxData, PxcScratchAllocator* scratchAllocator, const bool* lut, SapPairManager& pairManager, BpHandle*& dataArray, PxU32& dataArraySize, PxU32& dataArrayCapacity); void performBoxPruningNewOld( const AuxData* PX_RESTRICT auxData0, const AuxData* PX_RESTRICT auxData1, PxcScratchAllocator* scratchAllocator, const bool* lut, SapPairManager& pairManager, BpHandle*& dataArray, PxU32& dataArraySize, PxU32& dataArrayCapacity); PX_FORCE_INLINE bool Intersect2D_Handle (const BpHandle bDir1Min, const BpHandle bDir1Max, const BpHandle bDir2Min, const BpHandle bDir2Max, const BpHandle cDir1Min, const BpHandle cDir1Max, const BpHandle cDir2Min, const BpHandle cDir2Max) { return (bDir1Max > cDir1Min && cDir1Max > bDir1Min && bDir2Max > cDir2Min && cDir2Max > bDir2Min); } } //namespace Bp } //namespace physx #endif //BP_BROADPHASE_SAP_AUX_H
9,128
C
32.076087
155
0.750767
NVIDIA-Omniverse/PhysX/physx/source/lowlevelaabb/src/BpAABBManager.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "BpAABBManager.h" #define NB_SENTINELS 6 #include "foundation/PxHashSet.h" #include "CmUtils.h" #include "CmFlushPool.h" #include "CmVisualization.h" #include "CmRadixSort.h" #include "BpBroadPhaseMBPCommon.h" #include "BpBroadPhase.h" #include "BpBroadPhaseShared.h" #include "foundation/PxSort.h" #include "foundation/PxVecMath.h" #include "GuInternal.h" #include "common/PxProfileZone.h" using namespace physx; using namespace Bp; using namespace Cm; using namespace aos; static const bool gSingleThreaded = false; #if PX_INTEL_FAMILY && !defined(PX_SIMD_DISABLED) #define ABP_SIMD_OVERLAP #endif #ifdef ABP_SIMD_OVERLAP typedef AABB_YZn AABB_YZ; #else typedef AABB_YZr AABB_YZ; #endif #ifdef ABP_SIMD_OVERLAP static const bool gUseRegularBPKernel = false; // false to use "version 13" in box pruning series static const bool gUnrollLoop = true; // true to use "version 14" in box pruning series #else // PT: tested on Switch, for some reason the regular version is fastest there static const bool gUseRegularBPKernel = true; // false to use "version 13" in box pruning series static const bool gUnrollLoop = false; // true to use "version 14" in box pruning series #endif namespace physx { namespace Bp { static PX_FORCE_INLINE uint32_t PxComputeHash(const Pair& p) { return PxU32(physx::PxComputeHash( (p.mID0&0xffff)|(p.mID1<<16)) ); } static PX_FORCE_INLINE uint32_t PxComputeHash(const AggPair& p) { return PxU32(physx::PxComputeHash( (p.mIndex0&0xffff)|(p.mIndex1<<16)) ); } static PX_FORCE_INLINE bool shouldPairBeDeleted(const PxPinnedArray<Bp::FilterGroup::Enum>& groups, ShapeHandle h0, ShapeHandle h1) { PX_ASSERT(h0<groups.size()); PX_ASSERT(h1<groups.size()); return (groups[h0]==Bp::FilterGroup::eINVALID) || (groups[h1]==Bp::FilterGroup::eINVALID); } /// typedef PxU32 InflatedType; // PT: TODO: revisit/optimize all that stuff once it works class Aggregate : public PxUserAllocated { public: // Aggregate(BoundsIndex index, bool selfCollisions); Aggregate(BoundsIndex index, PxAggregateFilterHint filterHint); ~Aggregate(); BoundsIndex mIndex; private: PxArray<BoundsIndex> mAggregated; // PT: TODO: replace with linked list? public: PersistentSelfCollisionPairs* mSelfCollisionPairs; PxU32 mDirtyIndex; // PT: index in mDirtyAggregates private: AABB_Xi* mInflatedBoundsX; AABB_YZ* mInflatedBoundsYZ; PxU32 mAllocatedSize; public: PX_FORCE_INLINE PxU32 getNbAggregated() const { return mAggregated.size(); } PX_FORCE_INLINE BoundsIndex getAggregated(PxU32 i) const { return mAggregated[i]; } PX_FORCE_INLINE const BoundsIndex* getIndices() const { return mAggregated.begin(); } PX_FORCE_INLINE void addAggregated(BoundsIndex i) { mAggregated.pushBack(i); } PX_FORCE_INLINE bool removeAggregated(BoundsIndex i) { return mAggregated.findAndReplaceWithLast(i); } // PT: TODO: optimize? PX_FORCE_INLINE const PxBounds3& getMergedBounds() const { return mBounds; } PX_FORCE_INLINE void resetDirtyState() { mDirtyIndex = PX_INVALID_U32; } PX_FORCE_INLINE bool isDirty() const { return mDirtyIndex != PX_INVALID_U32; } PX_FORCE_INLINE void markAsDirty(PxArray<Aggregate*>& dirtyAggregates) { if(!isDirty()) { mDirtyIndex = dirtyAggregates.size(); dirtyAggregates.pushBack(this); } } void allocateBounds(); void computeBounds(const PxBounds3* PX_RESTRICT bounds, const float* PX_RESTRICT contactDistances) /*PX_RESTRICT*/; PX_FORCE_INLINE const AABB_Xi* getBoundsX() const { return mInflatedBoundsX; } PX_FORCE_INLINE const AABB_YZ* getBoundsYZ() const { return mInflatedBoundsYZ; } PX_FORCE_INLINE void getSortedMinBounds() { if(mDirtySort) sortBounds(); } PX_FORCE_INLINE PxAggregateFilterHint getFilterHint() const { return mFilterHint; } private: PxBounds3 mBounds; PxAggregateFilterHint mFilterHint; bool mDirtySort; void sortBounds(); PX_NOCOPY(Aggregate) }; /// namespace { #define MBP_ALLOC(x) PX_ALLOC(x, "MBP") #define MBP_ALLOC_TMP(x) PX_ALLOC(x, "MBP_TMP") #define MBP_FREE(x) PX_FREE(x) struct MBPEntry; struct RegionHandle; struct MBP_Object; class MBP_PairManager : public PairManagerData { public: MBP_PairManager() {} ~MBP_PairManager() {} PX_FORCE_INLINE InternalPair* addPair(PxU32 id0, PxU32 id1); }; /////////////////////////////////////////////////////////////////////////////// PX_FORCE_INLINE InternalPair* MBP_PairManager::addPair(PxU32 id0, PxU32 id1) { PX_ASSERT(id0!=INVALID_ID); PX_ASSERT(id1!=INVALID_ID); return addPairInternal(id0, id1); } /////////////////////////////////////////////////////////////////////////////// } typedef MBP_PairManager PairArray; /// class PersistentPairs : public PxUserAllocated { public: PersistentPairs() : mTimestamp(PX_INVALID_U32), mShouldBeDeleted(false) {} virtual ~PersistentPairs() {} virtual bool update(AABBManager& /*manager*/, BpCacheData* /*data*/ = NULL) { return false; } PX_FORCE_INLINE void updatePairs(PxU32 timestamp, const PxBounds3* bounds, const float* contactDistances, const Bp::FilterGroup::Enum* groups, const bool* lut, VolumeData* volumeData, PxArray<AABBOverlap>* createdOverlaps, PxArray<AABBOverlap>* destroyedOverlaps); void outputDeletedOverlaps(PxArray<AABBOverlap>* overlaps, const VolumeData* volumeData); private: virtual void findOverlaps(PairArray& pairs, const PxBounds3* PX_RESTRICT bounds, const float* PX_RESTRICT contactDistances, const Bp::FilterGroup::Enum* PX_RESTRICT groups, const bool* PX_RESTRICT lut) = 0; protected: PxU32 mTimestamp; MBP_PairManager mPM; public: bool mShouldBeDeleted; }; ///// #define PosXType2 PxU32 #if PX_INTEL_FAMILY #define SIMD_OVERLAP_TEST_14a(box) _mm_movemask_ps(_mm_cmpngt_ps(b, _mm_load_ps(box)))==15 #define SIMD_OVERLAP_INIT_9c(box) \ __m128 b = _mm_shuffle_ps(_mm_load_ps(&box.mMinY), _mm_load_ps(&box.mMinY), 78);\ const float Coeff = -1.0f;\ b = _mm_mul_ps(b, _mm_load1_ps(&Coeff)); #define SIMD_OVERLAP_TEST_9c(box) \ const __m128 a = _mm_load_ps(&box.mMinY); \ const __m128 d = _mm_cmpge_ps(a, b); \ if(_mm_movemask_ps(d)==15) #else #define SIMD_OVERLAP_TEST_14a(box) BAllEqFFFF(V4IsGrtr(b, V4LoadA(box))) #define SIMD_OVERLAP_INIT_9c(box) \ Vec4V b = V4PermZWXY(V4LoadA(&box.mMinY)); \ b = V4Mul(b, V4Load(-1.0f)); #define SIMD_OVERLAP_TEST_9c(box) \ const Vec4V a = V4LoadA(&box.mMinY); \ const Vec4V d = V4IsGrtrOrEq(a, b); \ if(BAllEqTTTT(d)) #endif #define CODEALIGN16 //_asm align 16 #ifdef ABP_SIMD_OVERLAP #define SIMD_OVERLAP_PRELOAD_BOX0 SIMD_OVERLAP_INIT_9c(box0) #define SIMD_OVERLAP_TEST(x) SIMD_OVERLAP_TEST_9c(x) #else #define SIMD_OVERLAP_PRELOAD_BOX0 #endif #ifndef ABP_SIMD_OVERLAP static PX_FORCE_INLINE int intersect2D(const AABB_YZ& a, const AABB_YZ& b) { const bool b0 = b.mMaxY < a.mMinY; const bool b1 = a.mMaxY < b.mMinY; const bool b2 = b.mMaxZ < a.mMinZ; const bool b3 = a.mMaxZ < b.mMinZ; // const bool b4 = b0 || b1 || b2 || b3; const bool b4 = b0 | b1 | b2 | b3; return !b4; } #endif #ifdef ABP_SIMD_OVERLAP #define ABP_OVERLAP_TEST(x) SIMD_OVERLAP_TEST(x) #else #define ABP_OVERLAP_TEST(x) if(intersect2D(box0, x)) #endif //#define BIP_VERSION_1 struct outputPair_Bipartite { #ifdef BIP_VERSION_1 outputPair_Bipartite(PairArray* pairManager, Aggregate* aggregate0, Aggregate* aggregate1, const Bp::FilterGroup::Enum* groups, const bool* lut) : #else outputPair_Bipartite(PairArray* pairManager, const BoundsIndex* remap0, const BoundsIndex* remap1, const Bp::FilterGroup::Enum* groups, const bool* lut) : #endif mPairManager (pairManager), #ifdef BIP_VERSION_1 mAggregate0 (aggregate0), mAggregate1 (aggregate1), #else mRemap0 (remap0), mRemap1 (remap1), #endif mGroups (groups), mLUT (lut) { } PX_FORCE_INLINE void outputPair(PxU32 index0, PxU32 index1) { #ifdef BIP_VERSION_1 const PxU32 aggIndex0 = mAggregate0->getAggregated(index0); const PxU32 aggIndex1 = mAggregate1->getAggregated(index1); #else const PxU32 aggIndex0 = mRemap0[index0]; const PxU32 aggIndex1 = mRemap1[index1]; #endif if(groupFiltering(mGroups[aggIndex0], mGroups[aggIndex1], mLUT)) mPairManager->addPair(aggIndex0, aggIndex1); } PairArray* mPairManager; #ifdef BIP_VERSION_1 Aggregate* mAggregate0; Aggregate* mAggregate1; #else const BoundsIndex* mRemap0; const BoundsIndex* mRemap1; #endif const Bp::FilterGroup::Enum* mGroups; const bool* mLUT; }; template<int codepath> static void boxPruningKernel( PairArray* PX_RESTRICT pairManager, const bool* PX_RESTRICT lut, #ifdef BIP_VERSION_1 Aggregate* PX_RESTRICT aggregate0, Aggregate* PX_RESTRICT aggregate1, #else PxU32 nb0, const BoundsIndex* PX_RESTRICT remap0, const AABB_Xi* PX_RESTRICT boxes0_X, const AABB_YZ* PX_RESTRICT boxes0_YZ, PxU32 nb1, const BoundsIndex* PX_RESTRICT remap1, const AABB_Xi* PX_RESTRICT boxes1_X, const AABB_YZ* PX_RESTRICT boxes1_YZ, #endif const Bp::FilterGroup::Enum* PX_RESTRICT groups) { #ifdef BIP_VERSION_1 outputPair_Bipartite pm(pairManager, aggregate0, aggregate1, groups, lut); const PxU32 nb0 = aggregate0->getNbAggregated(); const PxU32 nb1 = aggregate1->getNbAggregated(); const AABB_Xi* PX_RESTRICT boxes0_X = aggregate0->getBoundsX(); const AABB_YZ* PX_RESTRICT boxes0_YZ = aggregate0->getBoundsYZ(); const AABB_Xi* PX_RESTRICT boxes1_X = aggregate1->getBoundsX(); const AABB_YZ* PX_RESTRICT boxes1_YZ = aggregate1->getBoundsYZ(); #else // outputPair_Bipartite pm(pairManager, aggregate0->getIndices(), aggregate1->getIndices(), groups, lut); outputPair_Bipartite pm(pairManager, remap0, remap1, groups, lut); #endif PxU32 index0 = 0; PxU32 runningIndex1 = 0; while(runningIndex1<nb1 && index0<nb0) { const AABB_Xi& box0_X = boxes0_X[index0]; const PosXType2 maxLimit = box0_X.mMaxX; const PosXType2 minLimit = box0_X.mMinX; if(!codepath) { while(boxes1_X[runningIndex1].mMinX<minLimit) runningIndex1++; } else { while(boxes1_X[runningIndex1].mMinX<=minLimit) runningIndex1++; } const AABB_YZ& box0 = boxes0_YZ[index0]; SIMD_OVERLAP_PRELOAD_BOX0 if(gUseRegularBPKernel) { PxU32 index1 = runningIndex1; while(boxes1_X[index1].mMinX<=maxLimit) { ABP_OVERLAP_TEST(boxes1_YZ[index1]) { pm.outputPair(index0, index1); } index1++; } } else { PxU32 Offset = 0; const char* const CurrentBoxListYZ = reinterpret_cast<const char*>(&boxes1_YZ[runningIndex1]); const char* const CurrentBoxListX = reinterpret_cast<const char*>(&boxes1_X[runningIndex1]); if(!gUnrollLoop) { while(*reinterpret_cast<const PosXType2*>(CurrentBoxListX + Offset)<=maxLimit) { const float* box = reinterpret_cast<const float*>(CurrentBoxListYZ + Offset*2); #ifdef ABP_SIMD_OVERLAP if(SIMD_OVERLAP_TEST_14a(box)) #else if(intersect2D(box0, *reinterpret_cast<const AABB_YZ*>(box))) #endif { const PxU32 Index1 = PxU32(CurrentBoxListX + Offset - reinterpret_cast<const char*>(boxes1_X))>>3; pm.outputPair(index0, Index1); } Offset += 8; } } else { #define BIP_VERSION4 #ifdef BIP_VERSION4 #ifdef ABP_SIMD_OVERLAP #define BLOCK4(x, label) {const float* box = reinterpret_cast<const float*>(CurrentBoxListYZ + Offset*2 + x*2); \ if(SIMD_OVERLAP_TEST_14a(box)) \ goto label; } #else #define BLOCK4(x, label) {const float* box = reinterpret_cast<const float*>(CurrentBoxListYZ + Offset*2 + x*2); \ if(intersect2D(box0, *reinterpret_cast<const AABB_YZ*>(box))) \ goto label; } #endif goto StartLoop4; CODEALIGN16 FoundOverlap3: Offset += 8; CODEALIGN16 FoundOverlap2: Offset += 8; CODEALIGN16 FoundOverlap1: Offset += 8; CODEALIGN16 FoundOverlap0: Offset += 8; CODEALIGN16 FoundOverlap: { const PxU32 Index1 = PxU32(CurrentBoxListX + Offset - 8 - reinterpret_cast<const char*>(boxes1_X))>>3; pm.outputPair(index0, Index1); } CODEALIGN16 StartLoop4: while(*reinterpret_cast<const PosXType2*>(CurrentBoxListX + Offset + 8*5)<=maxLimit) { BLOCK4(0, FoundOverlap0) BLOCK4(8, FoundOverlap1) BLOCK4(16, FoundOverlap2) BLOCK4(24, FoundOverlap3) Offset += 40; BLOCK4(-8, FoundOverlap) } #undef BLOCK4 #endif #ifdef ABP_SIMD_OVERLAP #define BLOCK if(*reinterpret_cast<const PosXType2*>(CurrentBoxListX + Offset)<=maxLimit) \ {if(SIMD_OVERLAP_TEST_14a(reinterpret_cast<const float*>(CurrentBoxListYZ + Offset*2))) \ goto OverlapFound; \ Offset += 8; #else #define BLOCK if(*reinterpret_cast<const PosXType2*>(CurrentBoxListX + Offset)<=maxLimit) \ {if(intersect2D(box0, *reinterpret_cast<const AABB_YZ*>(CurrentBoxListYZ + Offset*2))) \ goto OverlapFound; \ Offset += 8; #endif goto LoopStart; CODEALIGN16 OverlapFound: { const PxU32 Index1 = PxU32(CurrentBoxListX + Offset - reinterpret_cast<const char*>(boxes1_X))>>3; pm.outputPair(index0, Index1); } Offset += 8; CODEALIGN16 LoopStart: BLOCK BLOCK BLOCK } } goto LoopStart; } #undef BLOCK } } index0++; } } static PX_FORCE_INLINE void doBipartiteBoxPruning_Leaf( PairArray* PX_RESTRICT pairManager, const bool* PX_RESTRICT lut, Aggregate* PX_RESTRICT aggregate0, Aggregate* PX_RESTRICT aggregate1, const Bp::FilterGroup::Enum* PX_RESTRICT groups) { #ifdef BIP_VERSION_1 boxPruningKernel<0>(pairManager, lut, aggregate0, aggregate1, groups); boxPruningKernel<1>(pairManager, lut, aggregate1, aggregate0, groups); #else const PxU32 nb0 = aggregate0->getNbAggregated(); const PxU32 nb1 = aggregate1->getNbAggregated(); const BoundsIndex* PX_RESTRICT remap0 = aggregate0->getIndices(); const BoundsIndex* PX_RESTRICT remap1 = aggregate1->getIndices(); const AABB_Xi* PX_RESTRICT boxes0_X = aggregate0->getBoundsX(); const AABB_YZ* PX_RESTRICT boxes0_YZ = aggregate0->getBoundsYZ(); const AABB_Xi* PX_RESTRICT boxes1_X = aggregate1->getBoundsX(); const AABB_YZ* PX_RESTRICT boxes1_YZ = aggregate1->getBoundsYZ(); boxPruningKernel<0>(pairManager, lut, nb0, remap0, boxes0_X, boxes0_YZ, nb1, remap1, boxes1_X, boxes1_YZ, groups); boxPruningKernel<1>(pairManager, lut, nb1, remap1, boxes1_X, boxes1_YZ, nb0, remap0, boxes0_X, boxes0_YZ, groups); #endif } struct outputPair_Complete { outputPair_Complete(PairArray* pairManager, Aggregate* aggregate, const Bp::FilterGroup::Enum* groups, const bool* lut) : mPairManager (pairManager), mAggregate (aggregate), mGroups (groups), mLUT (lut) { } PX_FORCE_INLINE void outputPair(PxU32 index0, PxU32 index1) { const PxU32 aggIndex0 = mAggregate->getAggregated(index0); const PxU32 aggIndex1 = mAggregate->getAggregated(index1); if(groupFiltering(mGroups[aggIndex0], mGroups[aggIndex1], mLUT)) mPairManager->addPair(aggIndex0, aggIndex1); } PairArray* mPairManager; Aggregate* mAggregate; const Bp::FilterGroup::Enum* mGroups; const bool* mLUT; }; static void doCompleteBoxPruning_Leaf( PairArray* PX_RESTRICT pairManager, const bool* PX_RESTRICT lut, Aggregate* PX_RESTRICT aggregate, const Bp::FilterGroup::Enum* PX_RESTRICT groups) { outputPair_Complete pm(pairManager, aggregate, groups, lut); const PxU32 nb = aggregate->getNbAggregated(); const AABB_Xi* PX_RESTRICT boxes_X = aggregate->getBoundsX(); const AABB_YZ* PX_RESTRICT boxes_YZ = aggregate->getBoundsYZ(); PxU32 index0 = 0; PxU32 runningIndex = 0; while(runningIndex<nb && index0<nb) { const AABB_Xi& box0_X = boxes_X[index0]; const PosXType2 maxLimit = box0_X.mMaxX; const PosXType2 minLimit = box0_X.mMinX; while(boxes_X[runningIndex++].mMinX<minLimit); const AABB_YZ& box0 = boxes_YZ[index0]; SIMD_OVERLAP_PRELOAD_BOX0 if(gUseRegularBPKernel) { PxU32 index1 = runningIndex; while(boxes_X[index1].mMinX<=maxLimit) { ABP_OVERLAP_TEST(boxes_YZ[index1]) { pm.outputPair(index0, index1); } index1++; } } else { PxU32 Offset = 0; const char* const CurrentBoxListYZ = reinterpret_cast<const char*>(&boxes_YZ[runningIndex]); const char* const CurrentBoxListX = reinterpret_cast<const char*>(&boxes_X[runningIndex]); if(!gUnrollLoop) { while(*reinterpret_cast<const PosXType2*>(CurrentBoxListX + Offset)<=maxLimit) { const float* box = reinterpret_cast<const float*>(CurrentBoxListYZ + Offset*2); #ifdef ABP_SIMD_OVERLAP if(SIMD_OVERLAP_TEST_14a(box)) #else if(intersect2D(box0, *reinterpret_cast<const AABB_YZ*>(box))) #endif { const PxU32 Index = PxU32(CurrentBoxListX + Offset - reinterpret_cast<const char*>(boxes_X))>>3; pm.outputPair(index0, Index); } Offset += 8; } } else { #define VERSION4c #ifdef VERSION4c #define VERSION3 // Enable this as our safe loop #ifdef ABP_SIMD_OVERLAP #define BLOCK4(x, label) {const float* box = reinterpret_cast<const float*>(CurrentBoxListYZ + Offset*2 + x*2); \ if(SIMD_OVERLAP_TEST_14a(box)) \ goto label; } #else #define BLOCK4(x, label) {const AABB_YZ* box = reinterpret_cast<const AABB_YZ*>(CurrentBoxListYZ + Offset*2 + x*2); \ if(intersect2D(box0, *box)) \ goto label; } #endif goto StartLoop4; CODEALIGN16 FoundOverlap3: Offset += 8; CODEALIGN16 FoundOverlap2: Offset += 8; CODEALIGN16 FoundOverlap1: Offset += 8; CODEALIGN16 FoundOverlap0: Offset += 8; CODEALIGN16 FoundOverlap: { const PxU32 Index = PxU32(CurrentBoxListX + Offset - 8 - reinterpret_cast<const char*>(boxes_X))>>3; pm.outputPair(index0, Index); } CODEALIGN16 StartLoop4: while(*reinterpret_cast<const PosXType2*>(CurrentBoxListX + Offset + 8*5)<=maxLimit) { BLOCK4(0, FoundOverlap0) BLOCK4(8, FoundOverlap1) BLOCK4(16, FoundOverlap2) BLOCK4(24, FoundOverlap3) Offset += 40; BLOCK4(-8, FoundOverlap) } #endif #define VERSION3 #ifdef VERSION3 #ifdef ABP_SIMD_OVERLAP #define BLOCK if(*reinterpret_cast<const PosXType2*>(CurrentBoxListX + Offset)<=maxLimit) \ {if(SIMD_OVERLAP_TEST_14a(reinterpret_cast<const float*>(CurrentBoxListYZ + Offset*2))) \ goto BeforeLoop; \ Offset += 8; #else #define BLOCK if(*reinterpret_cast<const PosXType2*>(CurrentBoxListX + Offset)<=maxLimit) \ {if(intersect2D(box0, *reinterpret_cast<const AABB_YZ*>(CurrentBoxListYZ + Offset*2))) \ goto BeforeLoop; \ Offset += 8; #endif goto StartLoop; CODEALIGN16 BeforeLoop: { const PxU32 Index = PxU32(CurrentBoxListX + Offset - reinterpret_cast<const char*>(boxes_X))>>3; pm.outputPair(index0, Index); Offset += 8; } CODEALIGN16 StartLoop: BLOCK BLOCK BLOCK BLOCK BLOCK } } } } goto StartLoop; } #endif } } index0++; } } ///// class PersistentActorAggregatePair : public PersistentPairs { public: PersistentActorAggregatePair(Aggregate* aggregate, ShapeHandle actorHandle); virtual ~PersistentActorAggregatePair() {} virtual void findOverlaps(PairArray& pairs, const PxBounds3* PX_RESTRICT bounds, const float* PX_RESTRICT contactDistances, const Bp::FilterGroup::Enum* PX_RESTRICT groups, const bool* PX_RESTRICT lut); virtual bool update(AABBManager& manager, BpCacheData* data); ShapeHandle mAggregateHandle; ShapeHandle mActorHandle; Aggregate* mAggregate; }; PersistentActorAggregatePair::PersistentActorAggregatePair(Aggregate* aggregate, ShapeHandle actorHandle) : mAggregateHandle (aggregate->mIndex), mActorHandle (actorHandle), mAggregate (aggregate) { } void PersistentActorAggregatePair::findOverlaps(PairArray& pairs, const PxBounds3* PX_RESTRICT bounds, const float* PX_RESTRICT contactDistances, const Bp::FilterGroup::Enum* PX_RESTRICT groups, const bool* PX_RESTRICT lut) { if(0) { Aggregate singleActor(INVALID_ID, false); singleActor.addAggregated(mActorHandle); singleActor.allocateBounds(); singleActor.computeBounds(bounds, contactDistances); singleActor.getSortedMinBounds(); mAggregate->getSortedMinBounds(); doBipartiteBoxPruning_Leaf(&pairs, lut, &singleActor, mAggregate, groups); } else { mAggregate->getSortedMinBounds(); const PxU32 nb0 = mAggregate->getNbAggregated(); const BoundsIndex* PX_RESTRICT remap0 = mAggregate->getIndices(); const AABB_Xi* PX_RESTRICT boxes0_X = mAggregate->getBoundsX(); const AABB_YZ* PX_RESTRICT boxes0_YZ = mAggregate->getBoundsYZ(); PX_ALIGN(16, AABB_Xi inflatedBoundsX[1+NB_SENTINELS]); PX_ALIGN(16, AABB_YZ inflatedBoundsYZ); // Compute bounds { PX_ALIGN(16, PxVec4) boxMin; PX_ALIGN(16, PxVec4) boxMax; const BoundsIndex actorHandle = mActorHandle; const PxBounds3& b = bounds[actorHandle]; const Vec4V offsetV = V4Load(contactDistances[actorHandle]); const Vec4V minimumV = V4Sub(V4LoadU(&b.minimum.x), offsetV); const Vec4V maximumV = V4Add(V4LoadU(&b.maximum.x), offsetV); V4StoreA(minimumV, &boxMin.x); V4StoreA(maximumV, &boxMax.x); inflatedBoundsX[0].initFromPxVec4(boxMin, boxMax); inflatedBoundsYZ.initFromPxVec4(boxMin, boxMax); for(PxU32 i=0;i<NB_SENTINELS;i++) inflatedBoundsX[1+i].initSentinel(); } const PxU32 nb1 = 1; const BoundsIndex* PX_RESTRICT remap1 = &mActorHandle; const AABB_Xi* PX_RESTRICT boxes1_X = inflatedBoundsX; const AABB_YZ* PX_RESTRICT boxes1_YZ = &inflatedBoundsYZ; boxPruningKernel<0>(&pairs, lut, nb0, remap0, boxes0_X, boxes0_YZ, nb1, remap1, boxes1_X, boxes1_YZ, groups); boxPruningKernel<1>(&pairs, lut, nb1, remap1, boxes1_X, boxes1_YZ, nb0, remap0, boxes0_X, boxes0_YZ, groups); } } bool PersistentActorAggregatePair::update(AABBManager& manager, BpCacheData* data) { if(mShouldBeDeleted || shouldPairBeDeleted(manager.mGroups, mAggregateHandle, mActorHandle)) return true; if(!mAggregate->getNbAggregated()) // PT: needed with lazy empty actors return true; if(mAggregate->isDirty() || manager.mChangedHandleMap.boundedTest(mActorHandle)) manager.updatePairs(*this, data); return false; } ///// class PersistentAggregateAggregatePair : public PersistentPairs { public: PersistentAggregateAggregatePair(Aggregate* aggregate0, Aggregate* aggregate1); virtual ~PersistentAggregateAggregatePair() {} virtual void findOverlaps(PairArray& pairs, const PxBounds3* PX_RESTRICT bounds, const float* PX_RESTRICT contactDistances, const Bp::FilterGroup::Enum* PX_RESTRICT groups, const bool* PX_RESTRICT lut); virtual bool update(AABBManager& manager, BpCacheData*); ShapeHandle mAggregateHandle0; ShapeHandle mAggregateHandle1; Aggregate* mAggregate0; Aggregate* mAggregate1; }; PersistentAggregateAggregatePair::PersistentAggregateAggregatePair(Aggregate* aggregate0, Aggregate* aggregate1) : mAggregateHandle0 (aggregate0->mIndex), mAggregateHandle1 (aggregate1->mIndex), mAggregate0 (aggregate0), mAggregate1 (aggregate1) { } void PersistentAggregateAggregatePair::findOverlaps(PairArray& pairs, const PxBounds3* PX_RESTRICT /*bounds*/, const float* PX_RESTRICT /*contactDistances*/, const Bp::FilterGroup::Enum* PX_RESTRICT groups, const bool* PX_RESTRICT lut) { mAggregate0->getSortedMinBounds(); mAggregate1->getSortedMinBounds(); doBipartiteBoxPruning_Leaf(&pairs, lut, mAggregate0, mAggregate1, groups); } bool PersistentAggregateAggregatePair::update(AABBManager& manager, BpCacheData* data) { if(mShouldBeDeleted || shouldPairBeDeleted(manager.mGroups, mAggregateHandle0, mAggregateHandle1)) return true; if(!mAggregate0->getNbAggregated() || !mAggregate1->getNbAggregated()) // PT: needed with lazy empty actors return true; if(mAggregate0->isDirty() || mAggregate1->isDirty()) manager.updatePairs(*this, data); return false; } ///// class PersistentSelfCollisionPairs : public PersistentPairs { public: PersistentSelfCollisionPairs(Aggregate* aggregate); virtual ~PersistentSelfCollisionPairs() {} virtual void findOverlaps(PairArray& pairs, const PxBounds3* PX_RESTRICT bounds, const float* PX_RESTRICT contactDistances, const Bp::FilterGroup::Enum* PX_RESTRICT groups, const bool* PX_RESTRICT lut); Aggregate* mAggregate; }; PersistentSelfCollisionPairs::PersistentSelfCollisionPairs(Aggregate* aggregate) : mAggregate (aggregate) { } void PersistentSelfCollisionPairs::findOverlaps(PairArray& pairs, const PxBounds3* PX_RESTRICT/*bounds*/, const float* PX_RESTRICT/*contactDistances*/, const Bp::FilterGroup::Enum* PX_RESTRICT groups, const bool* PX_RESTRICT lut) { mAggregate->getSortedMinBounds(); doCompleteBoxPruning_Leaf(&pairs, lut, mAggregate, groups); } ///// Aggregate::Aggregate(BoundsIndex index, PxAggregateFilterHint filterHint) : mIndex (index), mInflatedBoundsX (NULL), mInflatedBoundsYZ (NULL), mAllocatedSize (0), mFilterHint (filterHint), mDirtySort (false) { resetDirtyState(); const PxU32 selfCollisions = PxGetAggregateSelfCollisionBit(filterHint); mSelfCollisionPairs = selfCollisions ? PX_NEW(PersistentSelfCollisionPairs)(this) : NULL; } Aggregate::~Aggregate() { PX_FREE(mInflatedBoundsYZ); PX_FREE(mInflatedBoundsX); PX_DELETE(mSelfCollisionPairs); } void Aggregate::sortBounds() { mDirtySort = false; const PxU32 nbObjects = getNbAggregated(); if(nbObjects<2) return; { PX_ALLOCA(minPosBounds, InflatedType, nbObjects+1); bool alreadySorted = true; InflatedType previousB = mInflatedBoundsX[0].mMinX; minPosBounds[0] = previousB; for(PxU32 i=1;i<nbObjects;i++) { const InflatedType minB = mInflatedBoundsX[i].mMinX; if(minB<previousB) alreadySorted = false; previousB = minB; minPosBounds[i] = minB; } if(alreadySorted) return; { Cm::RadixSortBuffered mRS; minPosBounds[nbObjects] = 0xffffffff; mRS.Sort(minPosBounds, nbObjects+1, /*RadixHint::*/RADIX_UNSIGNED); if(0) { PxArray<PxU32> copy = mAggregated; AABB_Xi* boundsXCopy = PX_ALLOCATE(AABB_Xi, nbObjects, "mInflatedBounds"); AABB_YZ* boundsYZCopy = PX_ALLOCATE(AABB_YZ, nbObjects, "mInflatedBounds"); PxMemCopy(boundsXCopy, mInflatedBoundsX, nbObjects*sizeof(AABB_Xi)); PxMemCopy(boundsYZCopy, mInflatedBoundsYZ, nbObjects*sizeof(AABB_YZ)); const PxU32* Sorted = mRS.GetRanks(); for(PxU32 i=0;i<nbObjects;i++) { const PxU32 sortedIndex = Sorted[i]; mAggregated[i] = copy[sortedIndex]; mInflatedBoundsX[i] = boundsXCopy[sortedIndex]; mInflatedBoundsYZ[i] = boundsYZCopy[sortedIndex]; } PX_FREE(boundsYZCopy); PX_FREE(boundsXCopy); } else { PxArray<PxU32> copy = mAggregated; // PT: TODO: revisit this, avoid the copy like we do for the other buffers AABB_Xi* sortedBoundsX = PX_ALLOCATE(AABB_Xi, (nbObjects+NB_SENTINELS), "mInflatedBounds"); AABB_YZ* sortedBoundsYZ = PX_ALLOCATE(AABB_YZ, (nbObjects), "mInflatedBounds"); const PxU32* Sorted = mRS.GetRanks(); for(PxU32 i=0;i<nbObjects;i++) { const PxU32 sortedIndex = Sorted[i]; mAggregated[i] = copy[sortedIndex]; sortedBoundsX[i] = mInflatedBoundsX[sortedIndex]; sortedBoundsYZ[i] = mInflatedBoundsYZ[sortedIndex]; } for(PxU32 i=0;i<NB_SENTINELS;i++) sortedBoundsX[nbObjects+i].initSentinel(); mAllocatedSize = nbObjects; PX_FREE(mInflatedBoundsYZ); PX_FREE(mInflatedBoundsX); mInflatedBoundsX = sortedBoundsX; mInflatedBoundsYZ = sortedBoundsYZ; } } } } void Aggregate::allocateBounds() { const PxU32 size = getNbAggregated(); if(size!=mAllocatedSize) { mAllocatedSize = size; PX_FREE(mInflatedBoundsYZ); PX_FREE(mInflatedBoundsX); mInflatedBoundsX = PX_ALLOCATE(AABB_Xi, (size+NB_SENTINELS), "mInflatedBounds"); mInflatedBoundsYZ = PX_ALLOCATE(AABB_YZ, (size), "mInflatedBounds"); } } void Aggregate::computeBounds(const PxBounds3* PX_RESTRICT bounds, const float* PX_RESTRICT contactDistances) /*PX_RESTRICT*/ { // PX_PROFILE_ZONE("Aggregate::computeBounds",0); const PxU32 size = getNbAggregated(); PX_ASSERT(size); // PT: TODO: delay the conversion to integers until we sort (i.e. really need) the aggregated bounds? PX_ALIGN(16, PxVec4) boxMin; PX_ALIGN(16, PxVec4) boxMax; const PxU32 lookAhead = 4; Vec4V minimumV; Vec4V maximumV; { const BoundsIndex index0 = getAggregated(0); const PxU32 last = PxMin(lookAhead, size-1); for(PxU32 i=1;i<=last;i++) { const BoundsIndex index = getAggregated(i); PxPrefetchLine(bounds + index, 0); PxPrefetchLine(contactDistances + index, 0); } const PxBounds3& b = bounds[index0]; const Vec4V offsetV = V4Load(contactDistances[index0]); minimumV = V4Sub(V4LoadU(&b.minimum.x), offsetV); maximumV = V4Add(V4LoadU(&b.maximum.x), offsetV); V4StoreA(minimumV, &boxMin.x); V4StoreA(maximumV, &boxMax.x); mInflatedBoundsX[0].initFromPxVec4(boxMin, boxMax); mInflatedBoundsYZ[0].initFromPxVec4(boxMin, boxMax); } for(PxU32 i=1;i<size;i++) { const BoundsIndex index = getAggregated(i); if(i+lookAhead<size) { const BoundsIndex nextIndex = getAggregated(i+lookAhead); PxPrefetchLine(bounds + nextIndex, 0); PxPrefetchLine(contactDistances + nextIndex, 0); } const PxBounds3& b = bounds[index]; const Vec4V offsetV = V4Load(contactDistances[index]); const Vec4V aggregatedBoundsMinV = V4Sub(V4LoadU(&b.minimum.x), offsetV); const Vec4V aggregatedBoundsMaxV = V4Add(V4LoadU(&b.maximum.x), offsetV); minimumV = V4Min(minimumV, aggregatedBoundsMinV); maximumV = V4Max(maximumV, aggregatedBoundsMaxV); V4StoreA(aggregatedBoundsMinV, &boxMin.x); V4StoreA(aggregatedBoundsMaxV, &boxMax.x); mInflatedBoundsX[i].initFromPxVec4(boxMin, boxMax); mInflatedBoundsYZ[i].initFromPxVec4(boxMin, boxMax); } StoreBounds(mBounds, minimumV, maximumV); // StoreBounds(boundsArray.begin()[mIndex], minimumV, maximumV); // boundsArray.setChangedState(); /* if(0) { const PxBounds3& previousBounds = boundsArray.getBounds(mIndex); if(previousBounds.minimum==aggregateBounds.minimum && previousBounds.maximum==aggregateBounds.maximum) { // PT: same bounds as before printf("SAME BOUNDS\n"); } }*/ for(PxU32 i=0;i<NB_SENTINELS;i++) mInflatedBoundsX[size+i].initSentinel(); mDirtySort = true; } ///// static void buildFreeBitmap(PxBitMap& bitmap, PxU32 currentFree, const PxArray<Aggregate*>& aggregates) { const PxU32 N = aggregates.size(); bitmap.resizeAndClear(N); while(currentFree!=PX_INVALID_U32) { bitmap.set(currentFree); currentFree = PxU32(size_t(aggregates[currentFree])); } } #if PX_VC #pragma warning(disable: 4355 ) // "this" used in base member initializer list #endif AABBManager::AABBManager( BroadPhase& bp, BoundsArray& boundsArray, PxFloatArrayPinned& contactDistance, PxU32 maxNbAggregates, PxU32 maxNbShapes, PxVirtualAllocator& allocator, PxU64 contextID, PxPairFilteringMode::Enum kineKineFilteringMode, PxPairFilteringMode::Enum staticKineFilteringMode) : AABBManagerBase (bp, boundsArray, contactDistance, maxNbAggregates, maxNbShapes, allocator, contextID, kineKineFilteringMode, staticKineFilteringMode), mPostBroadPhase2 (contextID, *this), mPostBroadPhase3 (contextID, this, "AABBManager::postBroadPhaseStage3"), mPreBpUpdateTask (contextID), mTimestamp (0), mFirstFreeAggregate (PX_INVALID_U32), mOutOfBoundsObjects ("AABBManager::mOutOfBoundsObjects"), mOutOfBoundsAggregates ("AABBManager::mOutOfBoundsAggregates") { } static void releasePairs(AggPairMap& map) { for(AggPairMap::Iterator iter = map.getIterator(); !iter.done(); ++iter) PX_DELETE(iter->second); } void AABBManager::destroy() { releasePairs(mActorAggregatePairs); releasePairs(mAggregateAggregatePairs); { PxBitMap bitmap; buildFreeBitmap(bitmap, mFirstFreeAggregate, mAggregates); const PxU32 nb = mAggregates.size(); for(PxU32 i=0;i<nb;i++) { if(bitmap.test(i)) continue; Aggregate* a = mAggregates[i]; PX_DELETE(a); } } BpCacheData* entry = static_cast<BpCacheData*>(mBpThreadContextPool.pop()); while (entry) { entry->~BpCacheData(); PX_FREE(entry); entry = static_cast<BpCacheData*>(mBpThreadContextPool.pop()); } PX_DELETE_THIS; } static void removeAggregateFromDirtyArray(Aggregate* aggregate, PxArray<Aggregate*>& dirtyAggregates) { // PT: TODO: do this lazily like for interactions? if(aggregate->isDirty()) { const PxU32 dirtyIndex = aggregate->mDirtyIndex; PX_ASSERT(dirtyAggregates[dirtyIndex]==aggregate); dirtyAggregates.replaceWithLast(dirtyIndex); if(dirtyIndex<dirtyAggregates.size()) dirtyAggregates[dirtyIndex]->mDirtyIndex = dirtyIndex; aggregate->resetDirtyState(); } else { PX_ASSERT(!dirtyAggregates.findAndReplaceWithLast(aggregate)); } } // PT: userData = Sc::ElementSim bool AABBManager::addBounds(BoundsIndex index, PxReal contactDistance, Bp::FilterGroup::Enum group, void* userData, AggregateHandle aggregateHandle, ElementType::Enum volumeType) { // PX_ASSERT(checkID(index)); initEntry(index, contactDistance, group, userData, volumeType); if(aggregateHandle==PX_INVALID_U32) { mVolumeData[index].setSingleActor(); addBPEntry(index); } else { #if PX_CHECKED if(aggregateHandle>=mAggregates.size()) return PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "AABBManager::addBounds - aggregateId out of bounds\n"); /* { PxU32 firstFreeAggregate = mFirstFreeAggregate; while(firstFreeAggregate!=PX_INVALID_U32) { if(firstFreeAggregate==aggregateHandle) { PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "AABBManager::destroyAggregate - aggregate has already been removed\n"); return BP_INVALID_BP_HANDLE; } firstFreeAggregate = PxU32(size_t(mAggregates[firstFreeAggregate])); } }*/ #endif mVolumeData[index].setAggregated(aggregateHandle); Aggregate* aggregate = getAggregateFromHandle(aggregateHandle); { // PT: schedule the aggregate for BP insertion here, if we just added its first shape if(!aggregate->getNbAggregated()) addBPEntry(aggregate->mIndex); aggregate->addAggregated(index); // PT: new actor added to aggregate => mark dirty to recompute bounds later aggregate->markAsDirty(mDirtyAggregates); } } // PT: TODO: remove or use this return value. Currently useless since always true. Gives birth to unreachable code in callers. return true; } bool AABBManager::removeBounds(BoundsIndex index) { // PT: TODO: shouldn't it be compared to mUsedSize? PX_ASSERT(index < mVolumeData.size()); bool res = false; if(mVolumeData[index].isSingleActor()) { res = removeBPEntry(index); } else { PX_ASSERT(mVolumeData[index].isAggregated()); const AggregateHandle aggregateHandle = mVolumeData[index].getAggregateOwner(); Aggregate* aggregate = getAggregateFromHandle(aggregateHandle); bool status = aggregate->removeAggregated(index); (void)status; // PX_ASSERT(status); // PT: can be false when >128 shapes // PT: remove empty aggregates, otherwise the BP will crash with empty bounds if(!aggregate->getNbAggregated()) { removeBPEntry(aggregate->mIndex); removeAggregateFromDirtyArray(aggregate, mDirtyAggregates); } else aggregate->markAsDirty(mDirtyAggregates); // PT: actor removed from aggregate => mark dirty to recompute bounds later } resetEntry(index); return res; } // PT: TODO: the userData is actually a PxAggregate pointer. Maybe we could expose/use that. AggregateHandle AABBManager::createAggregate(BoundsIndex index, Bp::FilterGroup::Enum group, void* userData, PxU32 /*maxNumShapes*/, PxAggregateFilterHint filterHint) { // PX_ASSERT(checkID(index)); Aggregate* aggregate = PX_NEW(Aggregate)(index, filterHint); AggregateHandle handle; if(mFirstFreeAggregate==PX_INVALID_U32) { handle = mAggregates.size(); mAggregates.pushBack(aggregate); } else { handle = mFirstFreeAggregate; mFirstFreeAggregate = PxU32(size_t(mAggregates[mFirstFreeAggregate])); mAggregates[handle] = aggregate; } #ifdef BP_USE_AGGREGATE_GROUP_TAIL /* PxU32 id = index; id<<=2; id|=FilterType::AGGREGATE; initEntry(index, 0.0f, Bp::FilterGroup::Enum(id), userData); */ initEntry(index, 0.0f, getAggregateGroup(), userData); PX_UNUSED(group); #else initEntry(index, 0.0f, group, userData); #endif mVolumeData[index].setAggregate(handle); mBoundsArray.setBounds(PxBounds3::empty(), index); mNbAggregates++; // PT: we don't add empty aggregates to mAddedHandleMap yet, since they make the BP crash. return handle; } bool AABBManager::destroyAggregate(BoundsIndex& index_, Bp::FilterGroup::Enum& group_, AggregateHandle aggregateHandle) { #if PX_CHECKED if(aggregateHandle>=mAggregates.size()) return PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "AABBManager::destroyAggregate - aggregateId out of bounds\n"); { PxU32 firstFreeAggregate = mFirstFreeAggregate; while(firstFreeAggregate!=PX_INVALID_U32) { if(firstFreeAggregate==aggregateHandle) return PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "AABBManager::destroyAggregate - aggregate has already been removed\n"); firstFreeAggregate = PxU32(size_t(mAggregates[firstFreeAggregate])); } } #endif Aggregate* aggregate = getAggregateFromHandle(aggregateHandle); #if PX_CHECKED if(aggregate->getNbAggregated()) return PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "AABBManager::destroyAggregate - aggregate still has bounds that needs removed\n"); #endif const BoundsIndex index = aggregate->mIndex; removeAggregateFromDirtyArray(aggregate, mDirtyAggregates); if(mAddedHandleMap.test(index)) // PT: if object had been added this frame... mAddedHandleMap.reset(index); // PT: ...then simply revert the previous operation locally (it hasn't been passed to the BP yet). else if(aggregate->getNbAggregated()) // PT: else we need to remove it from the BP if it has been added there. If there's no aggregated mRemovedHandleMap.set(index); // PT: shapes then the aggregate has never been added, or already removed. PX_DELETE(aggregate); mAggregates[aggregateHandle] = reinterpret_cast<Aggregate*>(size_t(mFirstFreeAggregate)); mFirstFreeAggregate = aggregateHandle; // PT: TODO: shouldn't it be compared to mUsedSize? PX_ASSERT(index < mVolumeData.size()); index_ = index; group_ = mGroups[index]; #ifdef BP_USE_AGGREGATE_GROUP_TAIL releaseAggregateGroup(mGroups[index]); #endif resetEntry(index); PX_ASSERT(mNbAggregates); mNbAggregates--; return true; } void AABBManager::handleOriginShift() { mOriginShifted = false; // PT: TODO: isn't the following loop potentially updating removed objects? // PT: TODO: check that aggregates code is correct here for(PxU32 i=0; i<mUsedSize; i++) { if(mGroups[i] == Bp::FilterGroup::eINVALID) continue; { if(mVolumeData[i].isSingleActor()) { if(!mAddedHandleMap.test(i)) mUpdatedHandles.pushBack(i); // PT: TODO: BoundsIndex-to-ShapeHandle confusion here } else if(mVolumeData[i].isAggregate()) { const AggregateHandle aggregateHandle = mVolumeData[i].getAggregate(); Aggregate* aggregate = getAggregateFromHandle(aggregateHandle); if(aggregate->getNbAggregated()) { aggregate->markAsDirty(mDirtyAggregates); aggregate->allocateBounds(); aggregate->computeBounds(mBoundsArray.begin(), mContactDistance.begin()); mBoundsArray.begin()[aggregate->mIndex] = aggregate->getMergedBounds(); if(!mAddedHandleMap.test(i)) mUpdatedHandles.pushBack(i); // PT: TODO: BoundsIndex-to-ShapeHandle confusion here } } } } } void AggregateBoundsComputationTask::runInternal() { const BoundsArray& boundArray = mManager->getBoundsArray(); const float* contactDistances = mManager->getContactDistances(); PxU32 size = mNbToGo; Aggregate** currentAggregate = mAggregates + mStart; while(size--) { if(size) { Aggregate* nextAggregate = *(currentAggregate+1); PxPrefetchLine(nextAggregate, 0); PxPrefetchLine(nextAggregate, 64); } (*currentAggregate)->computeBounds(boundArray.begin(), contactDistances); currentAggregate++; } } void PreBpUpdateTask::runInternal() { mManager->preBpUpdate_CPU(mNumCpuTasks); } void AABBManager::startAggregateBoundsComputationTasks(PxU32 nbToGo, PxU32 numCpuTasks, Cm::FlushPool& flushPool) { const PxU32 nbAggregatesPerTask = nbToGo > numCpuTasks ? nbToGo / numCpuTasks : nbToGo; // PT: TODO: better load balancing PxU32 start = 0; while(nbToGo) { AggregateBoundsComputationTask* T = PX_PLACEMENT_NEW(flushPool.allocate(sizeof(AggregateBoundsComputationTask)), AggregateBoundsComputationTask(mContextID)); const PxU32 nb = nbToGo < nbAggregatesPerTask ? nbToGo : nbAggregatesPerTask; T->Init(this, start, nb, mDirtyAggregates.begin()); start += nb; nbToGo -= nb; T->setContinuation(&mPreBpUpdateTask); T->removeReference(); } } void AABBManager::updateBPFirstPass(PxU32 numCpuTasks, Cm::FlushPool& flushPool, bool /*hasContactDistanceUpdated*/, PxBaseTask* continuation) { PX_PROFILE_ZONE("AABBManager::updateBPFirstPass", mContextID); const bool singleThreaded = gSingleThreaded || numCpuTasks<2; if(!singleThreaded) { PX_ASSERT(numCpuTasks); mPreBpUpdateTask.Init(this, numCpuTasks); mPreBpUpdateTask.setContinuation(continuation); } // Add { PX_PROFILE_ZONE("AABBManager::updateBPFirstPass - add", mContextID); mAddedHandles.resetOrClear(); const PxU32* bits = mAddedHandleMap.getWords(); if(bits) { // PT: ### bitmap iterator pattern const PxU32 lastSetBit = mAddedHandleMap.findLast(); for(PxU32 w = 0; w <= lastSetBit >> 5; ++w) { for(PxU32 b = bits[w]; b; b &= b-1) { const BoundsIndex handle = PxU32(w<<5|PxLowestSetBit(b)); PX_ASSERT(!mVolumeData[handle].isAggregated()); mAddedHandles.pushBack(handle); // PT: TODO: BoundsIndex-to-ShapeHandle confusion here } } } } // Update { PX_PROFILE_ZONE("AABBManager::updateBPFirstPass - update", mContextID); mUpdatedHandles.resetOrClear(); if(!mOriginShifted) { // PT: TODO: // - intercept calls marking aggregateD shapes dirty, in order to mark their aggregates dirty at the same time. That way we don't discover // them while parsing the map, i.e. the map is already fully complete when the parsing begins (no need to parse twice etc). // - once this is done, aggregateD shapes can be ignored during parsing (since we only needed them to go to their aggregates) // - we can then handle aggregates while parsing the map, i.e. there's no need for sorting anymore. // - there will be some thoughts to do about the dirty aggregates coming from the added map parsing: we still need to compute their bounds, // but we don't want to add these to mUpdatedHandles (since they're already in mAddedHandles) // - we still need the set of dirty aggregates post broadphase, but we don't want to re-parse the full map for self-collisions. So we may still // need to put dirty aggregates in an array, but that might be simplified now // - the 'isDirty' checks to updatePairs can use the update map though - but the boundedTest is probably more expensive than the current test // PT: TODO: another idea: just output all aggregate handles by default then have a pass on mUpdatedHandles to remove them if that wasn't actually necessary // ...or just drop the artificial requirement for aggregates... { PX_PROFILE_ZONE("AABBManager::updateBPFirstPass - update - bitmap iteration", mContextID); const PxU32* bits = mChangedHandleMap.getWords(); if(bits) { // PT: ### bitmap iterator pattern const PxU32 lastSetBit = mChangedHandleMap.findLast(); for(PxU32 w = 0; w <= lastSetBit >> 5; ++w) { for(PxU32 b = bits[w]; b; b &= b-1) { const BoundsIndex handle = PxU32(w<<5|PxLowestSetBit(b)); PX_ASSERT(!mRemovedHandleMap.test(handle)); // a handle may only be updated and deleted if it was just added. PX_ASSERT(!mVolumeData[handle].isAggregate()); // PT: make sure changedShapes doesn't contain aggregates if(mAddedHandleMap.test(handle)) // just-inserted handles may also be marked updated, so skip them continue; if(mVolumeData[handle].isSingleActor()) { PX_ASSERT(mGroups[handle] != Bp::FilterGroup::eINVALID); mUpdatedHandles.pushBack(handle); // PT: TODO: BoundsIndex-to-ShapeHandle confusion here } else { PX_ASSERT(mVolumeData[handle].isAggregated()); const AggregateHandle aggregateHandle = mVolumeData[handle].getAggregateOwner(); Aggregate* aggregate = getAggregateFromHandle(aggregateHandle); // PT: an actor from the aggregate has been updated => mark dirty to recompute bounds later // PT: we don't recompute the bounds right away since multiple actors from the same aggregate may have changed. aggregate->markAsDirty(mDirtyAggregates); } } } } } const PxU32 size = mDirtyAggregates.size(); if(size) { PX_PROFILE_ZONE("AABBManager::updateBPFirstPass - update - dirty iteration", mContextID); for(PxU32 i=0;i<size;i++) { Aggregate* aggregate = mDirtyAggregates[i]; if(i!=size-1) { Aggregate* nextAggregate = mDirtyAggregates[i]; PxPrefetchLine(nextAggregate, 0); } aggregate->allocateBounds(); if(singleThreaded) { aggregate->computeBounds(mBoundsArray.begin(), mContactDistance.begin()); mBoundsArray.begin()[aggregate->mIndex] = aggregate->getMergedBounds(); } // PT: Can happen when an aggregate has been created and then its actors have been changed (with e.g. setLocalPose) // before a BP call. if(!mAddedHandleMap.test(aggregate->mIndex)) mUpdatedHandles.pushBack(aggregate->mIndex); // PT: TODO: BoundsIndex-to-ShapeHandle confusion here } if(!singleThreaded) startAggregateBoundsComputationTasks(size, numCpuTasks, flushPool); // PT: we're already sorted if no dirty-aggregates are involved { PX_PROFILE_ZONE("AABBManager::updateAABBsAndBP - update - sort", mContextID); // PT: TODO: remove this PxSort(mUpdatedHandles.begin(), mUpdatedHandles.size()); } } } else { handleOriginShift(); } } // Remove { PX_PROFILE_ZONE("AABBManager::updateBPFirstPass - remove", mContextID); mRemovedHandles.resetOrClear(); const PxU32* bits = mRemovedHandleMap.getWords(); if(bits) { // PT: ### bitmap iterator pattern const PxU32 lastSetBit = mRemovedHandleMap.findLast(); for(PxU32 w = 0; w <= lastSetBit >> 5; ++w) { for(PxU32 b = bits[w]; b; b &= b-1) { const BoundsIndex handle = PxU32(w<<5|PxLowestSetBit(b)); PX_ASSERT(!mVolumeData[handle].isAggregated()); mRemovedHandles.pushBack(handle); // PT: TODO: BoundsIndex-to-ShapeHandle confusion here } } } } ///// // PT: TODO: do we need to run these threads when we origin-shifted everything before? if(singleThreaded) preBpUpdate_CPU(numCpuTasks); else mPreBpUpdateTask.removeReference(); } // PT: previously known as AABBManager::updateAABBsAndBP void AABBManager::updateBPSecondPass(PxcScratchAllocator* scratchAllocator, PxBaseTask* continuation) { PX_PROFILE_ZONE("AABBManager::updateBPSecondPass", mContextID); // PT: TODO: do we need to run these threads when we origin-shifted everything before? //finalizeUpdate(numCpuTasks, scratchAllocator, continuation); // PT: code below used to be "finalizeUpdate" // PT: TODO: move to base? const BroadPhaseUpdateData updateData(mAddedHandles.begin(), mAddedHandles.size(), mUpdatedHandles.begin(), mUpdatedHandles.size(), mRemovedHandles.begin(), mRemovedHandles.size(), mBoundsArray.begin(), mGroups.begin(), mContactDistance.begin(), mBoundsArray.getCapacity(), mFilters, // PT: TODO: this could also be removed now. The key to understanding the refactorings is that none of the two bools below are actualy used by the CPU versions. mBoundsArray.hasChanged(), false); PX_ASSERT(updateData.isValid(false)); const bool b = updateData.getNumCreatedHandles() || updateData.getNumRemovedHandles(); //KS - skip broad phase if there are no updated shapes. // PT: BP UPDATE CALL if(b || updateData.getNumUpdatedHandles()) mBroadPhase.update(scratchAllocator, updateData, continuation); } void AABBManager::preBpUpdate_CPU(PxU32 numCpuTasks) { PX_PROFILE_ZONE("AABBManager::preBpUpdate", mContextID); const bool singleThreaded = gSingleThreaded || numCpuTasks<2; if (!singleThreaded) { const PxU32 size = mDirtyAggregates.size(); for (PxU32 i = 0; i<size; i++) { Aggregate* aggregate = mDirtyAggregates[i]; mBoundsArray.begin()[aggregate->mIndex] = aggregate->getMergedBounds(); } } } static PX_FORCE_INLINE void outputOverlap(PxArray<AABBOverlap>* overlaps, const VolumeData* volumeData, PxU32 id0, PxU32 id1) { // overlaps.pushBack(AABBOverlap(volumeData[id0].userData, volumeData[id1].userData, handle)); const ElementType::Enum volumeType = PxMax(volumeData[id0].getVolumeType(), volumeData[id1].getVolumeType()); //overlaps[volumeType].pushBack(AABBOverlap(reinterpret_cast<void*>(size_t(id0)), reinterpret_cast<void*>(size_t(id1)))); AABBOverlap* overlap = Cm::reserveContainerMemory(overlaps[volumeType], 1); // PT: we don't convert to pointers right away because we need the IDs in postBpStage3 overlap->mUserData0 = reinterpret_cast<void*>(size_t(id0)); overlap->mUserData1 = reinterpret_cast<void*>(size_t(id1)); // PT: note that overlap->mPairUserData remains uninitialized here } static PX_FORCE_INLINE void createOverlap(PxArray<AABBOverlap>* overlaps, const VolumeData* volumeData, PxU32 id0, PxU32 id1) { outputOverlap(overlaps, volumeData, id0, id1); } static PX_FORCE_INLINE void deleteOverlap(PxArray<AABBOverlap>* overlaps, const VolumeData* volumeData, PxU32 id0, PxU32 id1) { // PX_ASSERT(volumeData[id0].getUserData()); // PX_ASSERT(volumeData[id1].getUserData()); if (volumeData[id0].getUserData() && volumeData[id1].getUserData()) // PT: TODO: no idea if this is the right thing to do or if it's normal to get null ptrs here outputOverlap(overlaps, volumeData, id0, id1); } void PersistentPairs::outputDeletedOverlaps(PxArray<AABBOverlap>* overlaps, const VolumeData* volumeData) { const PxU32 nbActivePairs = mPM.mNbActivePairs; for(PxU32 i=0;i<nbActivePairs;i++) { const InternalPair& p = mPM.mActivePairs[i]; deleteOverlap(overlaps, volumeData, p.getId0(), p.getId1()); } } PX_FORCE_INLINE void PersistentPairs::updatePairs( PxU32 timestamp, const PxBounds3* bounds, const float* contactDistances, const Bp::FilterGroup::Enum* groups, const bool* lut, VolumeData* volumeData, PxArray<AABBOverlap>* createdOverlaps, PxArray<AABBOverlap>* destroyedOverlaps) { if(mTimestamp==timestamp) return; mTimestamp = timestamp; findOverlaps(mPM, bounds, contactDistances, groups, lut); PxU32 i=0; PxU32 nbActivePairs = mPM.mNbActivePairs; while(i<nbActivePairs) { InternalPair& p = mPM.mActivePairs[i]; const PxU32 id0 = p.getId0(); const PxU32 id1 = p.getId1(); if(p.isNew()) { createOverlap(createdOverlaps, volumeData, id0, id1); p.clearNew(); p.clearUpdated(); i++; } else if(p.isUpdated()) { p.clearUpdated(); i++; } else { deleteOverlap(destroyedOverlaps, volumeData, id0, id1); const PxU32 hashValue = hash(id0, id1) & mPM.mMask; mPM.removePair(id0, id1, hashValue, i); nbActivePairs--; } } mPM.shrinkMemory(); } void AABBManager::updatePairs(PersistentPairs& p, BpCacheData* data) { if (data) p.updatePairs(mTimestamp, mBoundsArray.begin(), mContactDistance.begin(), mGroups.begin(), mFilters.getLUT(), mVolumeData.begin(), data->mCreatedPairs, data->mDeletedPairs); else p.updatePairs(mTimestamp, mBoundsArray.begin(), mContactDistance.begin(), mGroups.begin(), mFilters.getLUT(), mVolumeData.begin(), mCreatedOverlaps, mDestroyedOverlaps); } static PX_FORCE_INLINE Bp::FilterType::Enum convertFilterType(PxAggregateType::Enum agType) { if(agType==PxAggregateType::eGENERIC) return Bp::FilterType::DYNAMIC; else if(agType==PxAggregateType::eSTATIC) return Bp::FilterType::STATIC; PX_ASSERT(agType==PxAggregateType::eKINEMATIC); return Bp::FilterType::KINEMATIC; } PersistentActorAggregatePair* AABBManager::createPersistentActorAggregatePair(ShapeHandle volA, ShapeHandle volB) { ShapeHandle actorHandle; ShapeHandle aggregateHandle; if(mVolumeData[volA].isAggregate()) { aggregateHandle = volA; actorHandle = volB; } else { PX_ASSERT(mVolumeData[volB].isAggregate()); aggregateHandle = volB; actorHandle = volA; } const AggregateHandle h = mVolumeData[aggregateHandle].getAggregate(); Aggregate* aggregate = getAggregateFromHandle(h); PX_ASSERT(aggregate->mIndex==aggregateHandle); // Single-aggregate filtering { const PxAggregateType::Enum agType = PxGetAggregateType(aggregate->getFilterHint()); const int t0 = convertFilterType(agType); const int t1 = mGroups[actorHandle] & BP_FILTERING_TYPE_MASK; // PT: from "groupFiltering" function if(!mFilters.mLUT[t0][t1]) return NULL; } return PX_NEW(PersistentActorAggregatePair)(aggregate, actorHandle); // PT: TODO: use a pool or something } PersistentAggregateAggregatePair* AABBManager::createPersistentAggregateAggregatePair(ShapeHandle volA, ShapeHandle volB) { PX_ASSERT(mVolumeData[volA].isAggregate() && mVolumeData[volB].isAggregate()); const AggregateHandle h0 = mVolumeData[volA].getAggregate(); const AggregateHandle h1 = mVolumeData[volB].getAggregate(); Aggregate* aggregate0 = getAggregateFromHandle(h0); Aggregate* aggregate1 = getAggregateFromHandle(h1); PX_ASSERT(aggregate0->mIndex==volA); PX_ASSERT(aggregate1->mIndex==volB); // Aggregate-aggregate filtering { const PxAggregateType::Enum agType0 = PxGetAggregateType(aggregate0->getFilterHint()); const PxAggregateType::Enum agType1 = PxGetAggregateType(aggregate1->getFilterHint()); const Bp::FilterType::Enum t0 = convertFilterType(agType0); const Bp::FilterType::Enum t1 = convertFilterType(agType1); if(!mFilters.mLUT[t0][t1]) return NULL; } return PX_NEW(PersistentAggregateAggregatePair)(aggregate0, aggregate1); // PT: TODO: use a pool or something } void AABBManager::processBPCreatedPair(const BroadPhasePair& pair) { PX_ASSERT(!mVolumeData[pair.mVolA].isAggregated()); PX_ASSERT(!mVolumeData[pair.mVolB].isAggregated()); const bool isSingleActorA = mVolumeData[pair.mVolA].isSingleActor(); const bool isSingleActorB = mVolumeData[pair.mVolB].isSingleActor(); if(isSingleActorA && isSingleActorB) { createOverlap(mCreatedOverlaps, mVolumeData.begin(), pair.mVolA, pair.mVolB); // PT: regular actor-actor pair return; } // PT: TODO: check if this is needed ShapeHandle volA = pair.mVolA; ShapeHandle volB = pair.mVolB; if(volB<volA) PxSwap(volA, volB); PersistentPairs* newPair; AggPairMap* pairMap; if(!isSingleActorA && !isSingleActorB) { pairMap = &mAggregateAggregatePairs; // PT: aggregate-aggregate pair newPair = createPersistentAggregateAggregatePair(volA, volB); } else { pairMap = &mActorAggregatePairs; // PT: actor-aggregate pair newPair = createPersistentActorAggregatePair(volA, volB); } if(newPair) { bool status = pairMap->insert(AggPair(volA, volB), newPair); PX_UNUSED(status); PX_ASSERT(status); updatePairs(*newPair); } } void AABBManager::processBPDeletedPair(const BroadPhasePair& pair) { PX_ASSERT(!mVolumeData[pair.mVolA].isAggregated()); PX_ASSERT(!mVolumeData[pair.mVolB].isAggregated()); const bool isSingleActorA = mVolumeData[pair.mVolA].isSingleActor(); const bool isSingleActorB = mVolumeData[pair.mVolB].isSingleActor(); if(isSingleActorA && isSingleActorB) { deleteOverlap(mDestroyedOverlaps, mVolumeData.begin(), pair.mVolA, pair.mVolB); // PT: regular actor-actor pair return; } // PT: TODO: check if this is needed ShapeHandle volA = pair.mVolA; ShapeHandle volB = pair.mVolB; if(volB<volA) PxSwap(volA, volB); AggPairMap* pairMap; if(!isSingleActorA && !isSingleActorB) pairMap = &mAggregateAggregatePairs; // PT: aggregate-aggregate pair else pairMap = &mActorAggregatePairs; // PT: actor-aggregate pair const AggPairMap::Entry* e = pairMap->find(AggPair(volA, volB)); if(e && e->second) { PersistentPairs* p = e->second; p->outputDeletedOverlaps(mDestroyedOverlaps, mVolumeData.begin()); p->mShouldBeDeleted = true; } } struct CreatedPairHandler { static PX_FORCE_INLINE void processPair(AABBManager& manager, const BroadPhasePair& pair) { manager.processBPCreatedPair(pair); } }; struct DeletedPairHandler { static PX_FORCE_INLINE void processPair(AABBManager& manager, const BroadPhasePair& pair) { manager.processBPDeletedPair(pair); } }; template<class FunctionT> static void processBPPairs(PxU32 nbPairs, const BroadPhasePair* pairs, AABBManager& manager) { // PT: TODO: figure out this ShapeHandle/BpHandle thing. Is it ok to use "BP_INVALID_BP_HANDLE" for a "ShapeHandle"? #if PX_DEBUG ShapeHandle previousA = BP_INVALID_BP_HANDLE; ShapeHandle previousB = BP_INVALID_BP_HANDLE; #endif while(nbPairs--) { PX_ASSERT(pairs->mVolA!=BP_INVALID_BP_HANDLE); PX_ASSERT(pairs->mVolB!=BP_INVALID_BP_HANDLE); #if PX_DEBUG // PT: TODO: why is that test needed now? GPU broadphase? PX_ASSERT(pairs->mVolA != previousA || pairs->mVolB != previousB); #endif //if(pairs->mVolA != previousA || pairs->mVolB != previousB) { #if PX_DEBUG previousA = pairs->mVolA; previousB = pairs->mVolB; #endif FunctionT::processPair(manager, *pairs); } pairs++; } } static void processAggregatePairs(AggPairMap& map, AABBManager& manager) { // PT: TODO: hmmm we have a list of dirty aggregates but we don't have a list of dirty pairs. // PT: not sure how the 3.4 trunk solves this but let's just iterate all pairs for now // PT: atm we reuse this loop to delete removed interactions // PT: TODO: in fact we could handle all the "lost pairs" stuff right there with extra aabb-abb tests // PT: TODO: replace with decent hash map - or remove the hashmap entirely and use a linear array PxArray<AggPair> removedEntries; for(AggPairMap::Iterator iter = map.getIterator(); !iter.done(); ++iter) { PersistentPairs* p = iter->second; if(p->update(manager)) { removedEntries.pushBack(iter->first); PX_DELETE(p); } } for(PxU32 i=0;i<removedEntries.size();i++) { bool status = map.erase(removedEntries[i]); PX_ASSERT(status); PX_UNUSED(status); } } struct PairData { PxArray<AABBOverlap>* mArray; PxU32 mStartIdx; PxU32 mCount; PairData() : mArray(NULL), mStartIdx(0), mCount(0) { } }; class ProcessAggPairsBase : public Cm::Task { public: static const PxU32 MaxPairs = 16; PairData mCreatedPairs[2]; PairData mDestroyedPairs[2]; ProcessAggPairsBase(PxU64 contextID) : Cm::Task(contextID) { } void setCache(BpCacheData& data) { for (PxU32 i = 0; i < 2; ++i) { mCreatedPairs[i].mArray = &data.mCreatedPairs[i]; mCreatedPairs[i].mStartIdx = data.mCreatedPairs[i].size(); mDestroyedPairs[i].mArray = &data.mDeletedPairs[i]; mDestroyedPairs[i].mStartIdx = data.mDeletedPairs[i].size(); } } void updateCounters() { for (PxU32 i = 0; i < 2; ++i) { mCreatedPairs[i].mCount = mCreatedPairs[i].mArray->size() - mCreatedPairs[i].mStartIdx; mDestroyedPairs[i].mCount = mDestroyedPairs[i].mArray->size() - mDestroyedPairs[i].mStartIdx; } } }; class ProcessAggPairsParallelTask : public ProcessAggPairsBase { public: PersistentPairs* mPersistentPairs[MaxPairs]; Bp::AggPair mAggPairs[MaxPairs]; PxU32 mNbPairs; AABBManager* mManager; AggPairMap* mMap; PxMutex* mMutex; const char* mName; ProcessAggPairsParallelTask(PxU64 contextID, PxMutex* mutex, AABBManager* manager, AggPairMap* map, const char* name) : ProcessAggPairsBase(contextID), mNbPairs(0), mManager(manager), mMap(map), mMutex(mutex), mName(name) { } void runInternal() { BpCacheData* data = mManager->getBpCacheData(); setCache(*data); PxInlineArray<AggPair, MaxPairs> removedEntries; for (PxU32 i = 0; i < mNbPairs; ++i) { if(mPersistentPairs[i]->update(*mManager, data)) { removedEntries.pushBack(mAggPairs[i]); PX_DELETE(mPersistentPairs[i]); } } updateCounters(); mManager->putBpCacheData(data); if (removedEntries.size()) { PxMutex::ScopedLock lock(*mMutex); for (PxU32 i = 0; i < removedEntries.size(); i++) { bool status = mMap->erase(removedEntries[i]); PX_ASSERT(status); PX_UNUSED(status); } } } virtual const char* getName() const { return mName; } }; class SortAggregateBoundsParallel : public Cm::Task { public: static const PxU32 MaxPairs = 16; Aggregate** mAggregates; PxU32 mNbAggs; SortAggregateBoundsParallel(PxU64 contextID, Aggregate** aggs, PxU32 nbAggs) : Cm::Task(contextID), mAggregates(aggs), mNbAggs(nbAggs) { } void runInternal() { PX_PROFILE_ZONE("SortBounds", mContextID); for (PxU32 i = 0; i < mNbAggs; i++) { Aggregate* aggregate = mAggregates[i]; aggregate->getSortedMinBounds(); } } virtual const char* getName() const { return "SortAggregateBoundsParallel"; } }; class ProcessSelfCollisionPairsParallel : public ProcessAggPairsBase { public: Aggregate** mAggregates; PxU32 mNbAggs; AABBManager* mManager; ProcessSelfCollisionPairsParallel(PxU64 contextID, Aggregate** aggs, PxU32 nbAggs, AABBManager* manager) : ProcessAggPairsBase(contextID), mAggregates(aggs), mNbAggs(nbAggs), mManager(manager) { } void runInternal() { BpCacheData* data = mManager->getBpCacheData(); setCache(*data); PX_PROFILE_ZONE("ProcessSelfCollisionPairs", mContextID); for (PxU32 i = 0; i < mNbAggs; i++) { Aggregate* aggregate = mAggregates[i]; // PT: TODO: don't add filtered ones to this class at all! if(aggregate->mSelfCollisionPairs && PxGetAggregateType(aggregate->getFilterHint())!=PxAggregateType::eSTATIC) mManager->updatePairs(*aggregate->mSelfCollisionPairs, data); } updateCounters(); mManager->putBpCacheData(data); } virtual const char* getName() const { return "ProcessSelfCollisionPairsParallel"; } }; static void processAggregatePairsParallel(AggPairMap& map, AABBManager& manager, Cm::FlushPool& flushPool, PxBaseTask* continuation, const char* taskName, PxArray<ProcessAggPairsBase*>& pairTasks) { // PT: TODO: hmmm we have a list of dirty aggregates but we don't have a list of dirty pairs. // PT: not sure how the 3.4 trunk solves this but let's just iterate all pairs for now // PT: atm we reuse this loop to delete removed interactions // PT: TODO: in fact we could handle all the "lost pairs" stuff right there with extra aabb-abb tests // PT: TODO: replace with decent hash map - or remove the hashmap entirely and use a linear array manager.mMapLock.lock(); ProcessAggPairsParallelTask* task = PX_PLACEMENT_NEW(flushPool.allocate(sizeof(ProcessAggPairsParallelTask)), ProcessAggPairsParallelTask)(0, &manager.mMapLock, &manager, &map, taskName); PxU32 startIdx = pairTasks.size(); for (AggPairMap::Iterator iter = map.getIterator(); !iter.done(); ++iter) { task->mAggPairs[task->mNbPairs] = iter->first; task->mPersistentPairs[task->mNbPairs++] = iter->second; if (task->mNbPairs == ProcessAggPairsParallelTask::MaxPairs) { pairTasks.pushBack(task); task->setContinuation(continuation); task = PX_PLACEMENT_NEW(flushPool.allocate(sizeof(ProcessAggPairsParallelTask)), ProcessAggPairsParallelTask)(0, &manager.mMapLock, &manager, &map, taskName); } } manager.mMapLock.unlock(); for (PxU32 i = startIdx; i < pairTasks.size(); ++i) { pairTasks[i]->removeReference(); } if (task->mNbPairs) { pairTasks.pushBack(task); task->setContinuation(continuation); task->removeReference(); //task->runInternal(); } } void AABBManager::postBroadPhase(PxBaseTask* continuation, Cm::FlushPool& flushPool) { PX_PROFILE_ZONE("AABBManager::postBroadPhase", mContextID); //KS - There is a continuation task for discrete broad phase, but not for CCD broad phase. PostBroadPhase for CCD broad phase runs in-line. //This probably should be revisited but it means that we can't use the parallel codepath within CCD. if (continuation) { mPostBroadPhase3.setContinuation(continuation); mPostBroadPhase2.setContinuation(&mPostBroadPhase3); } mTimestamp++; // PT: TODO: consider merging mCreatedOverlaps & mDestroyedOverlaps // PT: TODO: revisit memory management of mCreatedOverlaps & mDestroyedOverlaps // PT: this is now only used for CPU BPs so I think the fetchBroadPhaseResults call is useless here #ifdef REMOVED //KS - if we ran broad phase, fetch the results now if (mAddedHandles.size() != 0 || mUpdatedHandles.size() != 0 || mRemovedHandles.size() != 0) { PX_PROFILE_ZONE("AABBManager::postBroadPhase - fetchResults", mContextID); mBroadPhase.fetchBroadPhaseResults(); } #endif for(PxU32 i=0; i<ElementType::eCOUNT; i++) { mCreatedOverlaps[i].resetOrClear(); mDestroyedOverlaps[i].resetOrClear(); } { PX_PROFILE_ZONE("AABBManager::postBroadPhase - process deleted pairs", mContextID); PxU32 nbDeletedPairs; const BroadPhasePair* deletedPairs = mBroadPhase.getDeletedPairs(nbDeletedPairs); processBPPairs<DeletedPairHandler>(nbDeletedPairs, deletedPairs, *this); } { //If there is a continuation task, then this is not part of CCD, so we can trigger bounds to be recomputed in parallel before pair generation runs during //stage 2. if (continuation) { const PxU32 size = mDirtyAggregates.size(); for (PxU32 i = 0; i < size; i += SortAggregateBoundsParallel::MaxPairs) { const PxU32 nbToProcess = PxMin(size - i, SortAggregateBoundsParallel::MaxPairs); SortAggregateBoundsParallel* task = PX_PLACEMENT_NEW(flushPool.allocate(sizeof(SortAggregateBoundsParallel)), SortAggregateBoundsParallel) (mContextID, &mDirtyAggregates[i], nbToProcess); task->setContinuation(&mPostBroadPhase2); task->removeReference(); } } } if (continuation) { mPostBroadPhase2.setFlushPool(&flushPool); mPostBroadPhase3.removeReference(); mPostBroadPhase2.removeReference(); } else { postBpStage2(NULL, flushPool); postBpStage3(NULL); } } void AABBManager::reallocateChangedAABBMgActorHandleMap(const PxU32 size) { mChangedHandleMap.resizeAndClear(size); } void PostBroadPhaseStage2Task::runInternal() { mManager.postBpStage2(mCont, *mFlushPool); } void AABBManager::postBpStage2(PxBaseTask* continuation, Cm::FlushPool& flushPool) { { const PxU32 size = mDirtyAggregates.size(); for (PxU32 i = 0; i < size; i += ProcessSelfCollisionPairsParallel::MaxPairs) { const PxU32 nbToProcess = PxMin(size - i, ProcessSelfCollisionPairsParallel::MaxPairs); ProcessSelfCollisionPairsParallel* task = PX_PLACEMENT_NEW(flushPool.allocate(sizeof(ProcessSelfCollisionPairsParallel)), ProcessSelfCollisionPairsParallel) (mContextID, &mDirtyAggregates[i], nbToProcess, this); if (continuation) { task->setContinuation(continuation); task->removeReference(); } else task->runInternal(); mAggPairTasks.pushBack(task); } } { if (continuation) processAggregatePairsParallel(mAggregateAggregatePairs, *this, flushPool, continuation, "AggAggPairs", mAggPairTasks); else processAggregatePairs(mAggregateAggregatePairs, *this); } { if (continuation) processAggregatePairsParallel(mActorAggregatePairs, *this, flushPool, continuation, "AggActorPairs", mAggPairTasks); else processAggregatePairs(mActorAggregatePairs, *this); } } void AABBManager::postBpStage3(PxBaseTask*) { { PX_PROFILE_ZONE("SimpleAABBManager::postBroadPhase - aggregate self-collisions", mContextID); const PxU32 size = mDirtyAggregates.size(); for (PxU32 i = 0; i < size; i++) { Aggregate* aggregate = mDirtyAggregates[i]; aggregate->resetDirtyState(); } mDirtyAggregates.resetOrClear(); } { PX_PROFILE_ZONE("SimpleAABBManager::postBroadPhase - append pairs", mContextID); for (PxU32 a = 0; a < mAggPairTasks.size(); ++a) { ProcessAggPairsBase* task = mAggPairTasks[a]; for (PxU32 t = 0; t < 2; t++) { for (PxU32 i = 0, startIdx = task->mCreatedPairs[t].mStartIdx; i < task->mCreatedPairs[t].mCount; ++i) { mCreatedOverlaps[t].pushBack((*task->mCreatedPairs[t].mArray)[i + startIdx]); } for (PxU32 i = 0, startIdx = task->mDestroyedPairs[t].mStartIdx; i < task->mDestroyedPairs[t].mCount; ++i) { mDestroyedOverlaps[t].pushBack((*task->mDestroyedPairs[t].mArray)[i + startIdx]); } } } mAggPairTasks.forceSize_Unsafe(0); resetBpCacheData(); } { PX_PROFILE_ZONE("AABBManager::postBroadPhase - process created pairs", mContextID); PxU32 nbCreatedPairs; const BroadPhasePair* createdPairs = mBroadPhase.getCreatedPairs(nbCreatedPairs); processBPPairs<CreatedPairHandler>(nbCreatedPairs, createdPairs, *this); } // PT: TODO: revisit this // Filter out pairs in mDestroyedOverlaps that already exist in mCreatedOverlaps. This should be done better using bitmaps // and some imposed ordering on previous operations. Later. // We could also have a dedicated function "reinsertBroadPhase()", which would preserve the existing interactions at Sc-level. if(1) { PX_PROFILE_ZONE("AABBManager::postBroadPhase - post-process", mContextID); PxU32 totalCreatedOverlaps = 0; for (PxU32 idx=0; idx<ElementType::eCOUNT; idx++) totalCreatedOverlaps += mCreatedOverlaps[idx].size(); mCreatedPairsTmp.clear(); mCreatedPairsTmp.reserve(totalCreatedOverlaps); // PT: so this is where we convert the userData IDs to pointers // I don't remember why we need the mCreatedPairs hashset / this filtering pass // PT: TODO: why do we need to convert to ptrs at all anyway? for(PxU32 idx=0; idx<ElementType::eCOUNT; idx++) { { const PxU32 nbDestroyedOverlaps = mDestroyedOverlaps[idx].size(); PxU32 size = mCreatedOverlaps[idx].size(); AABBOverlap* overlaps = mCreatedOverlaps[idx].begin(); while(size--) { const PxU32 id0 = PxU32(size_t(overlaps->mUserData0)); const PxU32 id1 = PxU32(size_t(overlaps->mUserData1)); overlaps->mUserData0 = mVolumeData[id0].getUserData(); overlaps->mUserData1 = mVolumeData[id1].getUserData(); overlaps++; if(nbDestroyedOverlaps) mCreatedPairsTmp.insert(Pair(id0, id1)); } } { AABBOverlap* overlapsSrc = mDestroyedOverlaps[idx].begin(); AABBOverlap* overlapsDst = overlapsSrc; PxU32 size = mDestroyedOverlaps[idx].size(); PxU32 newSize = 0; while(size--) { const PxU32 id0 = PxU32(size_t(overlapsSrc->mUserData0)); const PxU32 id1 = PxU32(size_t(overlapsSrc->mUserData1)); overlapsSrc++; if(!mCreatedPairsTmp.contains(Pair(id0, id1))) { overlapsDst->mUserData0 = mVolumeData[id0].getUserData(); overlapsDst->mUserData1 = mVolumeData[id1].getUserData(); overlapsDst++; newSize++; } } mDestroyedOverlaps[idx].forceSize_Unsafe(newSize); } } } // Handle out-of-bounds objects { PX_PROFILE_ZONE("AABBManager::postBroadPhase - out-of-bounds", mContextID); PxU32 nbObjects = mBroadPhase.getNbOutOfBoundsObjects(); const PxU32* objects = mBroadPhase.getOutOfBoundsObjects(); while(nbObjects--) { const PxU32 index = *objects++; if(!mRemovedHandleMap.test(index)) { if(mVolumeData[index].isSingleActor()) mOutOfBoundsObjects.pushBack(mVolumeData[index].getUserData()); else { PX_ASSERT(mVolumeData[index].isAggregate()); mOutOfBoundsAggregates.pushBack(mVolumeData[index].getUserData()); } } } } { PX_PROFILE_ZONE("AABBManager::postBroadPhase - clear", mContextID); mAddedHandleMap.clear(); mRemovedHandleMap.clear(); } } BpCacheData* AABBManager::getBpCacheData() { BpCacheData* rv = static_cast<BpCacheData*>(mBpThreadContextPool.pop()); if (rv == NULL) { rv = PX_PLACEMENT_NEW(PX_ALLOC(sizeof(BpCacheData), "BpCacheData"), BpCacheData)(); } return rv; } void AABBManager::putBpCacheData(BpCacheData* data) { mBpThreadContextPool.push(*data); } void AABBManager::resetBpCacheData() { PxInlineArray<BpCacheData*, 16> bpCache; BpCacheData* entry = static_cast<BpCacheData*>(mBpThreadContextPool.pop()); while (entry) { entry->reset(); bpCache.pushBack(entry); entry = static_cast<BpCacheData*>(mBpThreadContextPool.pop()); } //Now reinsert back into queue... for (PxU32 i = 0; i < bpCache.size(); ++i) { mBpThreadContextPool.push(*bpCache[i]); } } bool AABBManager::getOutOfBoundsObjects(OutOfBoundsData& data) { data.mNbOutOfBoundsObjects = mOutOfBoundsObjects.size(); data.mOutOfBoundsObjects = mOutOfBoundsObjects.begin(); data.mNbOutOfBoundsAggregates = mOutOfBoundsAggregates.size(); data.mOutOfBoundsAggregates = mOutOfBoundsAggregates.begin(); return data.mNbOutOfBoundsObjects || data.mNbOutOfBoundsAggregates; } void AABBManager::clearOutOfBoundsObjects() { mOutOfBoundsObjects.clear(); mOutOfBoundsAggregates.clear(); } // PT: disabled this by default, since it bypasses all per-shape/per-actor visualization flags //static const bool gVisualizeAggregateElems = false; void AABBManager::visualize(PxRenderOutput& out) { const PxTransform idt = PxTransform(PxIdentity); out << idt; PxBitMap bitmap; buildFreeBitmap(bitmap, mFirstFreeAggregate, mAggregates); const PxU32 N = mAggregates.size(); for(PxU32 i=0;i<N;i++) { if(bitmap.test(i)) continue; Aggregate* aggregate = mAggregates[i]; if(aggregate->getNbAggregated()) { out << PxU32(PxDebugColor::eARGB_GREEN); const PxBounds3& b = mBoundsArray.getBounds(aggregate->mIndex); renderOutputDebugBox(out, b); } } /* const PxU32 N = mAggregateManager.getAggregatesCapacity(); for(PxU32 i=0;i<N;i++) { const Aggregate* aggregate = mAggregateManager.getAggregate(i); if(aggregate->nbElems) { if(!mAggregatesUpdated.isInList(BpHandle(i))) out << PxU32(PxDebugColor::eARGB_GREEN); else out << PxU32(PxDebugColor::eARGB_RED); PxBounds3 decoded; const IntegerAABB& iaabb = mBPElems.getAABB(aggregate->bpElemId); iaabb.decode(decoded); out << DebugBox(decoded, true); if(gVisualizeAggregateElems) { PxU32 elem = aggregate->elemHeadID; while(BP_INVALID_BP_HANDLE!=elem) { out << PxU32(PxDebugColor::eARGB_CYAN); const IntegerAABB elemBounds = mAggregateElems.getAABB(elem); elemBounds.decode(decoded); out << DebugBox(decoded, true); elem = mAggregateElems.getNextId(elem); } } } }*/ } } //namespace Bp } //namespace physx
76,745
C++
30.543773
235
0.719917
NVIDIA-Omniverse/PhysX/physx/source/lowlevelaabb/include/BpFiltering.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef BP_FILTERING_H #define BP_FILTERING_H #include "PxvConfig.h" #include "foundation/PxAssert.h" namespace physx { namespace Bp { #define BP_USE_AGGREGATE_GROUP_TAIL #define BP_FILTERING_TYPE_SHIFT_BIT 3 #define BP_FILTERING_TYPE_MASK 7 /* \brief AABBManager volumes with the same filter group value are guaranteed never to generate an overlap pair. \note To ensure that static pairs never overlap, add static shapes with eSTATICS. The value eDYNAMICS_BASE provides a minimum recommended group value for dynamic shapes. If dynamics shapes are assigned group values greater than or equal to eDYNAMICS_BASE then they are allowed to generate broadphase overlaps with statics, and other dynamic shapes provided they have different group values. @see AABBManager::createVolume */ struct FilterGroup { enum Enum { eSTATICS = 0, eDYNAMICS_BASE = 1, #ifdef BP_USE_AGGREGATE_GROUP_TAIL eAGGREGATE_BASE = 0xfffffffe, #endif eINVALID = 0xffffffff }; }; struct FilterType { enum Enum { STATIC = 0, KINEMATIC = 1, DYNAMIC = 2, AGGREGATE = 3, SOFTBODY = 4, PARTICLESYSTEM = 5, FEMCLOTH = 6, HAIRSYSTEM = 7, COUNT = 8 }; }; PX_FORCE_INLINE Bp::FilterGroup::Enum getFilterGroup_Statics() { return Bp::FilterGroup::eSTATICS; } PX_FORCE_INLINE Bp::FilterGroup::Enum getFilterGroup_Dynamics(PxU32 rigidId, bool isKinematic) { const PxU32 group = rigidId + Bp::FilterGroup::eDYNAMICS_BASE; const PxU32 type = isKinematic ? FilterType::KINEMATIC : FilterType::DYNAMIC; return Bp::FilterGroup::Enum((group<< BP_FILTERING_TYPE_SHIFT_BIT)|type); } PX_FORCE_INLINE Bp::FilterGroup::Enum getFilterGroup(bool isStatic, PxU32 rigidId, bool isKinematic) { return isStatic ? getFilterGroup_Statics() : getFilterGroup_Dynamics(rigidId, isKinematic); } PX_FORCE_INLINE bool groupFiltering(const Bp::FilterGroup::Enum group0, const Bp::FilterGroup::Enum group1, const bool* PX_RESTRICT lut) { /* const int g0 = group0 & ~3; const int g1 = group1 & ~3; if(g0==g1) return false;*/ if(group0==group1) { PX_ASSERT((group0 & ~BP_FILTERING_TYPE_MASK)==(group1 & ~BP_FILTERING_TYPE_MASK)); return false; } const int type0 = group0 & BP_FILTERING_TYPE_MASK; const int type1 = group1 & BP_FILTERING_TYPE_MASK; return lut[type0*Bp::FilterType::COUNT+type1]; } class BpFilter { public: BpFilter(bool discardKineKine, bool discardStaticKine); ~BpFilter(); PX_FORCE_INLINE const bool* getLUT() const { return &mLUT[0][0]; } bool mLUT[Bp::FilterType::COUNT][Bp::FilterType::COUNT]; }; } } #endif
4,338
C
31.871212
137
0.729599
NVIDIA-Omniverse/PhysX/physx/source/lowlevelaabb/include/BpAABBManagerTasks.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef BP_AABB_MANAGER_TASKS_H #define BP_AABB_MANAGER_TASKS_H #include "foundation/PxUserAllocated.h" #include "CmTask.h" namespace physx { class PxcScratchAllocator; namespace Bp { class AABBManager; class Aggregate; class AggregateBoundsComputationTask : public Cm::Task, public PxUserAllocated { public: AggregateBoundsComputationTask(PxU64 contextId) : Cm::Task (contextId), mManager (NULL), mStart (0), mNbToGo (0), mAggregates (NULL) {} ~AggregateBoundsComputationTask() {} virtual const char* getName() const { return "AggregateBoundsComputationTask"; } virtual void runInternal(); void Init(AABBManager* manager, PxU32 start, PxU32 nb, Aggregate** aggregates) { mManager = manager; mStart = start; mNbToGo = nb; mAggregates = aggregates; } private: AABBManager* mManager; PxU32 mStart; PxU32 mNbToGo; Aggregate** mAggregates; AggregateBoundsComputationTask& operator=(const AggregateBoundsComputationTask&); }; class PreBpUpdateTask : public Cm::Task, public PxUserAllocated { public: PreBpUpdateTask(PxU64 contextId) : Cm::Task(contextId), mManager(NULL), mNumCpuTasks(0) {} ~PreBpUpdateTask() {} virtual const char* getName() const { return "PreBpUpdateTask"; } virtual void runInternal(); void Init(AABBManager* manager, PxU32 numCpuTasks) { mManager = manager; mNumCpuTasks = numCpuTasks; } private: AABBManager * mManager; PxU32 mNumCpuTasks; PreBpUpdateTask& operator=(const PreBpUpdateTask&); }; } } //namespace physx #endif // BP_AABB_MANAGER_TASKS_H
3,449
C
32.495145
86
0.709191
NVIDIA-Omniverse/PhysX/physx/source/lowlevelaabb/include/BpBroadPhase.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef BP_BROADPHASE_H #define BP_BROADPHASE_H #include "foundation/PxUserAllocated.h" #include "PxBroadPhase.h" #include "BpBroadPhaseUpdate.h" namespace physx { class PxcScratchAllocator; class PxBaseTask; namespace Bp { class BroadPhaseUpdateData; /** \brief Base broad phase class. Functions only relevant to MBP. */ class BroadPhaseBase : public PxBroadPhaseRegions, public PxUserAllocated { public: BroadPhaseBase() {} virtual ~BroadPhaseBase() {} /** \brief Gets broad-phase caps. \param caps [out] Broad-phase caps */ virtual void getCaps(PxBroadPhaseCaps& caps) const { caps.mMaxNbRegions = 0; } // PxBroadPhaseRegions virtual PxU32 getNbRegions() const PX_OVERRIDE { return 0; } virtual PxU32 getRegions(PxBroadPhaseRegionInfo*, PxU32, PxU32) const PX_OVERRIDE { return 0; } virtual PxU32 addRegion(const PxBroadPhaseRegion&, bool, const PxBounds3*, const PxReal*) PX_OVERRIDE { return 0xffffffff;} virtual bool removeRegion(PxU32) PX_OVERRIDE { return false; } virtual PxU32 getNbOutOfBoundsObjects() const PX_OVERRIDE { return 0; } virtual const PxU32* getOutOfBoundsObjects() const PX_OVERRIDE { return NULL; } //~PxBroadPhaseRegions }; /* \brief Structure used to report created and deleted broadphase pairs \note The indices mVolA and mVolB correspond to the bounds indices BroadPhaseUpdateData::mCreated used by BroadPhase::update @see BroadPhase::getCreatedPairs, BroadPhase::getDeletedPairs */ struct BroadPhasePair { BroadPhasePair(ShapeHandle volA, ShapeHandle volB) : mVolA (PxMin(volA, volB)), mVolB (PxMax(volA, volB)) { } BroadPhasePair() : mVolA (BP_INVALID_BP_HANDLE), mVolB (BP_INVALID_BP_HANDLE) { } ShapeHandle mVolA; // NB: mVolA < mVolB ShapeHandle mVolB; }; class BroadPhase : public BroadPhaseBase { public: /** \brief Instantiate a BroadPhase instance. \param[in] bpType - the bp type (either mbp or sap). This is typically specified in PxSceneDesc. \param[in] maxNbRegions is the expected maximum number of broad-phase regions. \param[in] maxNbBroadPhaseOverlaps is the expected maximum number of broad-phase overlaps. \param[in] maxNbStaticShapes is the expected maximum number of static shapes. \param[in] maxNbDynamicShapes is the expected maximum number of dynamic shapes. \param[in] contextID is the context ID parameter sent to the profiler \return The instantiated BroadPhase. \note maxNbRegions is only used if mbp is the chosen broadphase (PxBroadPhaseType::eMBP) \note maxNbRegions, maxNbBroadPhaseOverlaps, maxNbStaticShapes and maxNbDynamicShapes are typically specified in PxSceneLimits */ static BroadPhase* create( const PxBroadPhaseType::Enum bpType, const PxU32 maxNbRegions, const PxU32 maxNbBroadPhaseOverlaps, const PxU32 maxNbStaticShapes, const PxU32 maxNbDynamicShapes, PxU64 contextID); virtual PxBroadPhaseType::Enum getType() const = 0; /** \brief Shutdown of the broadphase. */ virtual void release() = 0; /** \brief Updates the broadphase and computes the lists of created/deleted pairs. \param[in] scratchAllocator - a PxcScratchAllocator instance used for temporary memory allocations. This must be non-null. \param[in] updateData a description of changes to the collection of aabbs since the last broadphase update. The changes detail the indices of the bounds that have been added/updated/removed as well as an array of all bound coordinates and an array of group ids used to filter pairs with the same id. @see BroadPhaseUpdateData \param[in] continuation the task that is in the queue to be executed immediately after the broadphase has completed its update. NULL is not supported. \note In PX_CHECKED and PX_DEBUG build configurations illegal input data (that does not conform to the BroadPhaseUpdateData specifications) triggers a special code-path that entirely bypasses the broadphase and issues a warning message to the error stream. No guarantees can be made about the correctness/consistency of broadphase behavior with illegal input data in PX_RELEASE and PX_PROFILE configs because validity checks are not active in these builds. */ virtual void update(PxcScratchAllocator* scratchAllocator, const BroadPhaseUpdateData& updateData, physx::PxBaseTask* continuation) = 0; /** \brief prepare broad phase data. */ virtual void preBroadPhase(const Bp::BroadPhaseUpdateData& updateData) = 0; /** \brief Fetch the results of any asynchronous broad phase work. */ virtual void fetchBroadPhaseResults() = 0; /* \brief Get created pairs. Note that each overlap pair is reported only on the frame when the overlap first occurs. The overlap persists until the pair appears in the list of deleted pairs or either of the bounds in the pair is removed from the broadphase. A created overlap must involve at least one of the bounds of the overlap pair appearing in either the created or updated list. It is impossible for the same pair to appear simultaneously in the list of created and deleted overlap pairs. An overlap is defined as a pair of bounds that overlap on all three axes; that is when maxA > minB and maxB > minA for all three axes. \param nbCreatedPairs [out] The number of created aabb overlap pairs computed in the execution of update() that has just completed. \return The array of created aabb overlap pairs computed in the execution of update() that has just completed. */ virtual const BroadPhasePair* getCreatedPairs(PxU32& nbCreatedPairs) const = 0; /** \brief Get deleted pairs. Note that a deleted pair can only be reported if that pair has already appeared in the list of created pairs in an earlier update. A lost overlap occurs when a pair of bounds previously overlapped on all three axes but have now separated on at least one axis. A lost overlap must involve at least one of the bounds of the lost overlap pair appearing in the updated list. Lost overlaps arising from removal of bounds from the broadphase do not appear in the list of deleted pairs. It is impossible for the same pair to appear simultaneously in the list of created and deleted pairs. \param nbDeletedPairs [out] The number of deleted overlap pairs computed in the execution of update() that has just completed. \return The array of deleted overlap pairs computed in the execution of update() that has just completed. */ virtual const BroadPhasePair* getDeletedPairs(PxU32& nbDeletedPairs) const = 0; /** \brief After the broadphase has completed its update() function and the created/deleted pairs have been queried with getCreatedPairs/getDeletedPairs it is desirable to free any memory that was temporarily acquired for the update but is is no longer required post-update. This can be achieved with the function freeBuffers(). */ virtual void freeBuffers() = 0; /** \brief Adjust internal structures after all bounds have been adjusted due to a scene origin shift. */ virtual void shiftOrigin(const PxVec3& shift, const PxBounds3* boundsArray, const PxReal* contactDistances) = 0; #if PX_CHECKED /** \brief Test that the created/updated/removed lists obey the rules that 1. object ids can only feature in the created list if they have never been previously added or if they were previously removed. 2. object ids can only be added to the updated list if they have been previously added without being removed. 3. objects ids can only be added to the removed list if they have been previously added without being removed. */ virtual bool isValid(const BroadPhaseUpdateData& updateData) const = 0; #endif }; } //namespace Bp } //namespace physx #endif //BP_BROADPHASE_H
9,389
C
42.674418
151
0.768985
NVIDIA-Omniverse/PhysX/physx/source/lowlevelaabb/include/BpAABBManagerBase.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef BP_AABBMANAGER_BASE_H #define BP_AABBMANAGER_BASE_H #include "foundation/PxPinnedArray.h" #include "foundation/PxBitMap.h" #include "foundation/PxSList.h" #include "foundation/PxBitUtils.h" #include "BpVolumeData.h" #include "BpBroadPhaseUpdate.h" #include "GuBounds.h" #include "PxFiltering.h" #include "PxAggregate.h" namespace physx { class PxcScratchAllocator; class PxRenderOutput; class PxBaseTask; namespace Cm { class FlushPool; } namespace Bp { typedef PxU32 BoundsIndex; //typedef PxU32 ActorHandle; /** \brief Changes to the configuration of overlap pairs are reported as void* pairs. \note Each void* in the pair corresponds to the void* passed to AABBManager::createVolume. @see AABBManager::createVolume, AABBManager::getCreatedOverlaps, AABBManager::getDestroyedOverlaps */ struct AABBOverlap { PX_FORCE_INLINE AABBOverlap() {} PX_FORCE_INLINE AABBOverlap(void* userData0, void* userData1/*, ActorHandle pairHandle*/) : mUserData0(userData0), mUserData1(userData1)/*, mPairHandle(pairHandle)*/ { // PT: TODO: why is this forbidden? PX_ASSERT(userData0 != userData1); } // PT: these will eventually be the userData pointers passed to addBounds(), i.e. Sc::ElementSim pointers in PhysX. This may not be // necessary at all, since in the current design the bounds indices themselves come from BP clients (they're not handles managed by the BP). // So there's a 1:1 mapping between bounds-indices (which are effectively edlement IDs in PhysX) and the userData pointers (Sc::ElementSim). // Thus we could just return bounds indices directly to users - at least in the context of PhysX, maybe the standalone BP is different. void* mUserData0; void* mUserData1; // PT: TODO: not sure what happened there but mPairUserData is not used inside the BP itself so we need to revisit this. /* union { ActorHandle mPairHandle; //For created pairs, this is the index into the pair in the pair manager void* mUserData; //For deleted pairs, this is the user data written by the application to the pair };*/ void* mPairUserData; //For deleted pairs, this is the user data written by the application to the pair }; struct BpCacheData : public PxSListEntry { PxArray<AABBOverlap> mCreatedPairs[2]; PxArray<AABBOverlap> mDeletedPairs[2]; void reset() { mCreatedPairs[0].resizeUninitialized(0); mCreatedPairs[1].resizeUninitialized(0); mDeletedPairs[0].resizeUninitialized(0); mDeletedPairs[1].resizeUninitialized(0); } }; typedef PxPinnedArray<Bp::FilterGroup::Enum> GroupsArrayPinned; typedef PxPinnedArray<VolumeData> VolumeDataArrayPinned; typedef PxPinnedArray<ShapeHandle> ShapeHandleArrayPinned; class BoundsArray : public PxUserAllocated { PX_NOCOPY(BoundsArray) public: BoundsArray(PxVirtualAllocator& allocator) : mBounds(allocator) {} PX_FORCE_INLINE void initEntry(PxU32 index) { index++; // PT: always pretend we need one more entry, to make sure reading the last used entry will be SIMD-safe. const PxU32 oldCapacity = mBounds.capacity(); if (index >= oldCapacity) { const PxU32 newCapacity = PxNextPowerOfTwo(index); mBounds.reserve(newCapacity); mBounds.forceSize_Unsafe(newCapacity); } } PX_FORCE_INLINE void updateBounds(const PxTransform& transform, const PxGeometry& geom, PxU32 index) { Gu::computeBounds(mBounds[index], geom, transform, 0.0f, 1.0f); mHasAnythingChanged = true; } PX_FORCE_INLINE void setBounds(const PxBounds3& bounds, PxU32 index) { // PX_CHECK_AND_RETURN(bounds.isValid() && !bounds.isEmpty(), "BoundsArray::setBounds - illegal bounds\n"); mBounds[index] = bounds; mHasAnythingChanged = true; } PX_FORCE_INLINE const PxBounds3* begin() const { return mBounds.begin(); } PX_FORCE_INLINE PxBounds3* begin() { return mBounds.begin(); } PX_FORCE_INLINE PxBoundsArrayPinned& getBounds() { return mBounds; } PX_FORCE_INLINE const PxBounds3& getBounds(PxU32 index) const { return mBounds[index]; } PX_FORCE_INLINE PxU32 getCapacity() const { return mBounds.size(); } PX_FORCE_INLINE bool hasChanged() const { return mHasAnythingChanged; } PX_FORCE_INLINE void resetChangedState() { mHasAnythingChanged = false; } PX_FORCE_INLINE void setChangedState() { mHasAnythingChanged = true; } void shiftOrigin(const PxVec3& shift) { // we shift some potential NaNs here because we don't know what's active, but should be harmless const PxU32 nbBounds = mBounds.size(); for(PxU32 i=0; i<nbBounds; i++) { mBounds[i].minimum -= shift; mBounds[i].maximum -= shift; } mHasAnythingChanged = true; } private: PxBoundsArrayPinned mBounds; bool mHasAnythingChanged; }; /** \brief A structure responsible for: * storing an aabb representation for each active shape in the related scene * managing the creation/removal of aabb representations when their related shapes are created/removed * updating all aabbs that require an update due to modification of shape geometry or transform * updating the aabb of all aggregates from the union of the aabbs of all shapes that make up each aggregate * computing and reporting the incremental changes to the set of overlapping aabb pairs */ class AABBManagerBase : public PxUserAllocated { PX_NOCOPY(AABBManagerBase) public: AABBManagerBase(BroadPhase& bp, BoundsArray& boundsArray, PxFloatArrayPinned& contactDistance, PxU32 maxNbAggregates, PxU32 maxNbShapes, PxVirtualAllocator& allocator, PxU64 contextID, PxPairFilteringMode::Enum kineKineFilteringMode, PxPairFilteringMode::Enum staticKineFilteringMode); virtual ~AABBManagerBase() {} virtual void destroy() = 0; virtual AggregateHandle createAggregate(BoundsIndex index, Bp::FilterGroup::Enum group, void* userData, PxU32 maxNumShapes, PxAggregateFilterHint filterHint) = 0; virtual bool destroyAggregate(BoundsIndex& index, Bp::FilterGroup::Enum& group, AggregateHandle aggregateHandle) = 0; virtual bool addBounds(BoundsIndex index, PxReal contactDistance, Bp::FilterGroup::Enum group, void* userdata, AggregateHandle aggregateHandle, ElementType::Enum volumeType) = 0; virtual bool removeBounds(BoundsIndex index) = 0; void reserveSpaceForBounds(BoundsIndex index); PX_FORCE_INLINE PxIntBool isMarkedForRemove(BoundsIndex index) const { return mRemovedHandleMap.boundedTest(index); } // PX_FORCE_INLINE PxIntBool isMarkedForAdd(BoundsIndex index) const { return mAddedHandleMap.boundedTest(index); } PX_FORCE_INLINE BroadPhase* getBroadPhase() const { return &mBroadPhase; } PX_FORCE_INLINE BoundsArray& getBoundsArray() { return mBoundsArray; } PX_FORCE_INLINE PxU32 getNbActiveAggregates() const { return mNbAggregates; } PX_FORCE_INLINE const float* getContactDistances() const { return mContactDistance.begin(); } PX_FORCE_INLINE PxBitMapPinned& getChangedAABBMgActorHandleMap() { return mChangedHandleMap; } PX_FORCE_INLINE void* getUserData(const BoundsIndex index) const { return (index<mVolumeData.size()) ? mVolumeData[index].getUserData() : NULL; } void setContactDistance(BoundsIndex handle, PxReal offset) { // PT: this works even for aggregated shapes, since the corresponding bit will also be set in the 'updated' map. mContactDistance.begin()[handle] = offset; setPersistentStateChanged(); mChangedHandleMap.growAndSet(handle); } void setBPGroup(BoundsIndex index, Bp::FilterGroup::Enum group) { PX_ASSERT((index + 1) < mVolumeData.size()); PX_ASSERT(group != Bp::FilterGroup::eINVALID); // PT: we use group == Bp::FilterGroup::eINVALID to mark removed/invalid entries mGroups[index] = group; } virtual void updateBPFirstPass(PxU32 numCpuTasks, Cm::FlushPool& flushPool, bool hasContactDistanceUpdated, PxBaseTask* continuation) = 0; virtual void updateBPSecondPass(PxcScratchAllocator* scratchAllocator, PxBaseTask* continuation) = 0; virtual void postBroadPhase(PxBaseTask*, Cm::FlushPool& flushPool) = 0; virtual void reallocateChangedAABBMgActorHandleMap(const PxU32 size) = 0; AABBOverlap* getCreatedOverlaps(ElementType::Enum type, PxU32& count) { PX_ASSERT(type < ElementType::eCOUNT); count = mCreatedOverlaps[type].size(); return mCreatedOverlaps[type].begin(); } AABBOverlap* getDestroyedOverlaps(ElementType::Enum type, PxU32& count) { PX_ASSERT(type < ElementType::eCOUNT); count = mDestroyedOverlaps[type].size(); return mDestroyedOverlaps[type].begin(); } void freeBuffers(); struct OutOfBoundsData { PxU32 mNbOutOfBoundsObjects; PxU32 mNbOutOfBoundsAggregates; void** mOutOfBoundsObjects; void** mOutOfBoundsAggregates; }; virtual bool getOutOfBoundsObjects(OutOfBoundsData&) { return false; } virtual void clearOutOfBoundsObjects() {} void shiftOrigin(const PxVec3& shift); virtual void visualize(PxRenderOutput& out) = 0; virtual void releaseDeferredAggregateIds() = 0; virtual void setGPUStateChanged() {} virtual void setPersistentStateChanged() {} protected: void reserveShapeSpace(PxU32 nbShapes); // PT: we have bitmaps here probably to quickly handle added/removed objects during same frame. // PT: TODO: consider replacing with plain arrays (easier to parse, already existing below, etc) PxBitMapPinned mAddedHandleMap; // PT: indexed by BoundsIndex PxBitMapPinned mRemovedHandleMap; // PT: indexed by BoundsIndex PxBitMapPinned mChangedHandleMap; //Returns true if the bounds was pending insert, false otherwise PX_FORCE_INLINE bool removeBPEntry(BoundsIndex index) // PT: only for objects passed to the BP { if (mAddedHandleMap.test(index)) // PT: if object had been added this frame... { mAddedHandleMap.reset(index); // PT: ...then simply revert the previous operation locally (it hasn't been passed to the BP yet). return true; } else mRemovedHandleMap.set(index); // PT: else we need to remove it from the BP return false; } PX_FORCE_INLINE void addBPEntry(BoundsIndex index) { if (mRemovedHandleMap.test(index)) mRemovedHandleMap.reset(index); else mAddedHandleMap.set(index); } //ML: we create mGroups and mContactDistance in the AABBManager constructor. PxArray will take PxVirtualAllocator as a parameter. Therefore, if GPU BP is using, //we will passed a pinned host memory allocator, otherwise, we will just pass a normal allocator. GroupsArrayPinned mGroups; // NOTE: we stick Bp::FilterGroup::eINVALID in this slot to indicate that the entry is invalid (removed or never inserted.) PxFloatArrayPinned& mContactDistance; VolumeDataArrayPinned mVolumeData; BpFilter mFilters; PX_FORCE_INLINE void initEntry(BoundsIndex index, PxReal contactDistance, Bp::FilterGroup::Enum group, void* userData) { if ((index + 1) >= mVolumeData.size()) reserveShapeSpace(index + 1); // PT: TODO: why is this needed at all? Why aren't size() and capacity() enough? mUsedSize = PxMax(index + 1, mUsedSize); PX_ASSERT(group != Bp::FilterGroup::eINVALID); // PT: we use group == Bp::FilterGroup::eINVALID to mark removed/invalid entries mGroups[index] = group; mContactDistance.begin()[index] = contactDistance; mVolumeData[index].setUserData(userData); } PX_FORCE_INLINE void initEntry(BoundsIndex index, PxReal contactDistance, Bp::FilterGroup::Enum group, void* userData, ElementType::Enum volumeType) { initEntry(index, contactDistance, group, userData); mVolumeData[index].setVolumeType(volumeType); // PT: must be done after setUserData } PX_FORCE_INLINE void resetEntry(BoundsIndex index) { mGroups[index] = Bp::FilterGroup::eINVALID; mContactDistance.begin()[index] = 0.0f; mVolumeData[index].reset(); } // PT: TODO: remove confusion between BoundsIndex and ShapeHandle here! ShapeHandleArrayPinned mAddedHandles; ShapeHandleArrayPinned mUpdatedHandles; // PT: TODO: only on CPU ShapeHandleArrayPinned mRemovedHandles; BroadPhase& mBroadPhase; BoundsArray& mBoundsArray; PxArray<AABBOverlap> mCreatedOverlaps[ElementType::eCOUNT]; PxArray<AABBOverlap> mDestroyedOverlaps[ElementType::eCOUNT]; PxU32 mUsedSize; // highest used value + 1 PxU32 mNbAggregates; #ifdef BP_USE_AGGREGATE_GROUP_TAIL // PT: TODO: even in the 3.4 trunk this stuff is a clumsy mess: groups are "BpHandle" suddenly passed // to BroadPhaseUpdateData as "ShapeHandle". //Free aggregate group ids. PxU32 mAggregateGroupTide; PxArray<Bp::FilterGroup::Enum> mFreeAggregateGroups; // PT: TODO: remove this useless array #endif PxU64 mContextID; bool mOriginShifted; #ifdef BP_USE_AGGREGATE_GROUP_TAIL PX_FORCE_INLINE void releaseAggregateGroup(const Bp::FilterGroup::Enum group) { PX_ASSERT(group != Bp::FilterGroup::eINVALID); mFreeAggregateGroups.pushBack(group); } PX_FORCE_INLINE Bp::FilterGroup::Enum getAggregateGroup() { PxU32 id; if (mFreeAggregateGroups.size()) id = mFreeAggregateGroups.popBack(); else { id = mAggregateGroupTide--; id <<= BP_FILTERING_TYPE_SHIFT_BIT; id |= FilterType::AGGREGATE; } const Bp::FilterGroup::Enum group = Bp::FilterGroup::Enum(id); PX_ASSERT(group != Bp::FilterGroup::eINVALID); return group; } #endif }; } //namespace Bp } //namespace physx #endif //BP_AABBMANAGER_BASE_H
16,588
C
43.714286
186
0.67796
NVIDIA-Omniverse/PhysX/physx/source/lowlevelaabb/include/BpAABBManager.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef BP_AABBMANAGER_H #define BP_AABBMANAGER_H #include "foundation/PxHashSet.h" #include "foundation/PxHashMap.h" #include "BpAABBManagerTasks.h" #include "BpAABBManagerBase.h" namespace physx { namespace Cm { class FlushPool; } namespace Bp { struct BroadPhasePair; class Aggregate; class PersistentPairs; class PersistentActorAggregatePair; class PersistentAggregateAggregatePair; class PersistentSelfCollisionPairs; struct AggPair { PX_FORCE_INLINE AggPair() {} PX_FORCE_INLINE AggPair(ShapeHandle index0, ShapeHandle index1) : mIndex0(index0), mIndex1(index1) {} ShapeHandle mIndex0; ShapeHandle mIndex1; PX_FORCE_INLINE bool operator==(const AggPair& p) const { return (p.mIndex0 == mIndex0) && (p.mIndex1 == mIndex1); } }; typedef PxCoalescedHashMap<AggPair, PersistentPairs*> AggPairMap; // PT: TODO: isn't there a generic pair structure somewhere? refactor with AggPair anyway struct Pair { PX_FORCE_INLINE Pair(PxU32 id0, PxU32 id1) : mID0(id0), mID1(id1) {} PX_FORCE_INLINE Pair(){} PX_FORCE_INLINE bool operator<(const Pair& p) const { const PxU64 value0 = *reinterpret_cast<const PxU64*>(this); const PxU64 value1 = *reinterpret_cast<const PxU64*>(&p); return value0 < value1; } PX_FORCE_INLINE bool operator==(const Pair& p) const { return (p.mID0 == mID0) && (p.mID1 == mID1); } PX_FORCE_INLINE bool operator!=(const Pair& p) const { return (p.mID0 != mID0) || (p.mID1 != mID1); } PxU32 mID0; PxU32 mID1; }; class AABBManager; class PostBroadPhaseStage2Task : public Cm::Task { Cm::FlushPool* mFlushPool; AABBManager& mManager; PX_NOCOPY(PostBroadPhaseStage2Task) public: PostBroadPhaseStage2Task(PxU64 contextID, AABBManager& manager) : Cm::Task(contextID), mFlushPool(NULL), mManager(manager) { } virtual const char* getName() const { return "PostBroadPhaseStage2Task"; } void setFlushPool(Cm::FlushPool* pool) { mFlushPool = pool; } virtual void runInternal(); }; class ProcessAggPairsBase; /** \brief A structure responsible for: * storing an aabb representation for each active shape in the related scene * managing the creation/removal of aabb representations when their related shapes are created/removed * updating all aabbs that require an update due to modification of shape geometry or transform * updating the aabb of all aggregates from the union of the aabbs of all shapes that make up each aggregate * computing and reporting the incremental changes to the set of overlapping aabb pairs */ class AABBManager : public AABBManagerBase { PX_NOCOPY(AABBManager) public: AABBManager(BroadPhase& bp, BoundsArray& boundsArray, PxFloatArrayPinned& contactDistance, PxU32 maxNbAggregates, PxU32 maxNbShapes, PxVirtualAllocator& allocator, PxU64 contextID, PxPairFilteringMode::Enum kineKineFilteringMode, PxPairFilteringMode::Enum staticKineFilteringMode); virtual ~AABBManager() {} // AABBManagerBase virtual void destroy() PX_OVERRIDE; virtual AggregateHandle createAggregate(BoundsIndex index, Bp::FilterGroup::Enum group, void* userData, PxU32 maxNumShapes, PxAggregateFilterHint filterHint) PX_OVERRIDE; virtual bool destroyAggregate(BoundsIndex& index, Bp::FilterGroup::Enum& group, AggregateHandle aggregateHandle) PX_OVERRIDE; virtual bool addBounds(BoundsIndex index, PxReal contactDistance, Bp::FilterGroup::Enum group, void* userdata, AggregateHandle aggregateHandle, ElementType::Enum volumeType) PX_OVERRIDE; virtual bool removeBounds(BoundsIndex index) PX_OVERRIDE; virtual void updateBPFirstPass(PxU32 numCpuTasks, Cm::FlushPool& flushPool, bool hasContactDistanceUpdated, PxBaseTask* continuation) PX_OVERRIDE; virtual void updateBPSecondPass(PxcScratchAllocator* scratchAllocator, PxBaseTask* continuation) PX_OVERRIDE; virtual void postBroadPhase(PxBaseTask*, Cm::FlushPool& flushPool) PX_OVERRIDE; virtual void reallocateChangedAABBMgActorHandleMap(const PxU32 size) PX_OVERRIDE; virtual bool getOutOfBoundsObjects(OutOfBoundsData& data) PX_OVERRIDE; virtual void clearOutOfBoundsObjects() PX_OVERRIDE; virtual void visualize(PxRenderOutput& out) PX_OVERRIDE; virtual void releaseDeferredAggregateIds() PX_OVERRIDE{} //~AABBManagerBase void preBpUpdate_CPU(PxU32 numCpuTasks); // PT: TODO: what is that BpCacheData for? BpCacheData* getBpCacheData(); void putBpCacheData(BpCacheData*); void resetBpCacheData(); PxMutex mMapLock; private: //void reserveShapeSpace(PxU32 nbShapes); void postBpStage2(PxBaseTask*, Cm::FlushPool&); void postBpStage3(PxBaseTask*); PostBroadPhaseStage2Task mPostBroadPhase2; Cm::DelegateTask<AABBManager, &AABBManager::postBpStage3> mPostBroadPhase3; PreBpUpdateTask mPreBpUpdateTask; PxU32 mTimestamp; PxU32 mFirstFreeAggregate; PxArray<Aggregate*> mAggregates; // PT: indexed by AggregateHandle PxArray<Aggregate*> mDirtyAggregates; AggPairMap mActorAggregatePairs; AggPairMap mAggregateAggregatePairs; PxArray<ProcessAggPairsBase*> mAggPairTasks; PxHashSet<Pair> mCreatedPairsTmp; // PT: temp hashset for dubious post filtering, persistent to minimize allocs PxSList mBpThreadContextPool; PxArray<void*> mOutOfBoundsObjects; PxArray<void*> mOutOfBoundsAggregates; PX_FORCE_INLINE Aggregate* getAggregateFromHandle(AggregateHandle handle) { PX_ASSERT(handle<mAggregates.size()); return mAggregates[handle]; } void startAggregateBoundsComputationTasks(PxU32 nbToGo, PxU32 numCpuTasks, Cm::FlushPool& flushPool); PersistentActorAggregatePair* createPersistentActorAggregatePair(ShapeHandle volA, ShapeHandle volB); PersistentAggregateAggregatePair* createPersistentAggregateAggregatePair(ShapeHandle volA, ShapeHandle volB); void updatePairs(PersistentPairs& p, BpCacheData* data = NULL); void handleOriginShift(); public: void processBPCreatedPair(const BroadPhasePair& pair); void processBPDeletedPair(const BroadPhasePair& pair); friend class PersistentActorAggregatePair; friend class PersistentAggregateAggregatePair; friend class ProcessSelfCollisionPairsParallel; friend class PostBroadPhaseStage2Task; }; } //namespace Bp } //namespace physx #endif //BP_AABBMANAGER_H
8,406
C
38.285047
196
0.730193
NVIDIA-Omniverse/PhysX/physx/source/lowlevelaabb/include/BpBroadPhaseUpdate.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef BP_BROADPHASE_UPDATE_H #define BP_BROADPHASE_UPDATE_H #include "BpFiltering.h" #include "foundation/PxBounds3.h" #include "foundation/PxUnionCast.h" namespace physx { namespace Bp { typedef PxU32 ShapeHandle; typedef PxU32 BpHandle; #define BP_INVALID_BP_HANDLE 0x3fffffff class BroadPhase; class BroadPhaseUpdateData { public: /** \brief A structure detailing the changes to the collection of aabbs, whose overlaps are computed in the broadphase. The structure consists of per-object arrays of object bounds and object groups, and three arrays that index into the per-object arrays, denoting the bounds which are to be created, updated and removed in the broad phase. * each entry in the object arrays represents the same shape or aggregate from frame to frame. * each entry in an index array must be less than the capacity of the per-object arrays. * no index value may appear in more than one index array, and may not occur more than once in that array. An index value is said to be "in use" if it has appeared in a created list in a previous update, and has not since occurred in a removed list. \param[in] created an array of indices describing the bounds that must be inserted into the broadphase. Each index in the array must not be in use. \param[in] updated an array of indices (referencing the boxBounds and boxGroups arrays) describing the bounds that have moved since the last broadphase update. Each index in the array must be in use, and each object whose index is in use and whose AABB has changed must appear in the update list. \param[in] removed an array of indices describing the bounds that must be removed from the broad phase. Each index in the array must be in use. \param[in] boxBounds an array of bounds coordinates for the AABBs to be processed by the broadphase. An entry is valid if its values are integer bitwise representations of floating point numbers that satisfy max>min in each dimension, along with a further rule that minima(maxima) must have even(odd) values. Each entry whose index is either in use or appears in the created array must be valid. An entry whose index is either not in use or appears in the removed array need not be valid. \param[in] boxGroups an array of group ids, one for each bound, used for pair filtering. Bounds with the same group id will not be reported as overlap pairs by the broad phase. Zero is reserved for static bounds. Entries in this array are immutable: the only way to change the group of an object is to remove it from the broad phase and reinsert it at a different index (recall that each index must appear at most once in the created/updated/removed lists). \param[in] boxesCapacity the length of the boxBounds and boxGroups arrays. @see BroadPhase::update */ BroadPhaseUpdateData( const ShapeHandle* created, PxU32 createdSize, const ShapeHandle* updated, PxU32 updatedSize, const ShapeHandle* removed, PxU32 removedSize, const PxBounds3* boxBounds, const Bp::FilterGroup::Enum* boxGroups, const PxReal* boxContactDistances, PxU32 boxesCapacity, const BpFilter& filter, bool stateChanged, bool gpuStateChanged ) : mCreated (created), mCreatedSize (createdSize), mUpdated (updated), mUpdatedSize (updatedSize), mRemoved (removed), mRemovedSize (removedSize), mBoxBounds (boxBounds), mBoxGroups (boxGroups), mBoxDistances (boxContactDistances), mBoxesCapacity (boxesCapacity), mFilter (filter), mStateChanged (stateChanged), mGpuStateChanged(gpuStateChanged) { } BroadPhaseUpdateData(const BroadPhaseUpdateData& other) : mCreated (other.mCreated), mCreatedSize (other.mCreatedSize), mUpdated (other.mUpdated), mUpdatedSize (other.mUpdatedSize), mRemoved (other.mRemoved), mRemovedSize (other.mRemovedSize), mBoxBounds (other.mBoxBounds), mBoxGroups (other.mBoxGroups), mBoxDistances (other.mBoxDistances), mBoxesCapacity (other.mBoxesCapacity), mFilter (other.mFilter), mStateChanged (other.mStateChanged), mGpuStateChanged(other.mGpuStateChanged) { } BroadPhaseUpdateData& operator=(const BroadPhaseUpdateData& other); PX_FORCE_INLINE const ShapeHandle* getCreatedHandles() const { return mCreated; } PX_FORCE_INLINE PxU32 getNumCreatedHandles() const { return mCreatedSize; } PX_FORCE_INLINE const ShapeHandle* getUpdatedHandles() const { return mUpdated; } PX_FORCE_INLINE PxU32 getNumUpdatedHandles() const { return mUpdatedSize; } PX_FORCE_INLINE const ShapeHandle* getRemovedHandles() const { return mRemoved; } PX_FORCE_INLINE PxU32 getNumRemovedHandles() const { return mRemovedSize; } PX_FORCE_INLINE const PxBounds3* getAABBs() const { return mBoxBounds; } PX_FORCE_INLINE const Bp::FilterGroup::Enum* getGroups() const { return mBoxGroups; } PX_FORCE_INLINE const PxReal* getContactDistance() const { return mBoxDistances; } PX_FORCE_INLINE PxU32 getCapacity() const { return mBoxesCapacity; } PX_FORCE_INLINE const BpFilter& getFilter() const { return mFilter; } PX_FORCE_INLINE bool getStateChanged() const { return mStateChanged; } PX_FORCE_INLINE bool getGpuStateChanged() const { return mGpuStateChanged; } #if PX_CHECKED static bool isValid(const BroadPhaseUpdateData& updateData, const BroadPhase& bp, const bool skipBoundValidation, PxU64 contextID); bool isValid(const bool skipBoundValidation) const; #endif private: const ShapeHandle* mCreated; const PxU32 mCreatedSize; const ShapeHandle* mUpdated; const PxU32 mUpdatedSize; const ShapeHandle* mRemoved; const PxU32 mRemovedSize; const PxBounds3* mBoxBounds; const Bp::FilterGroup::Enum* mBoxGroups; const PxReal* mBoxDistances; const PxU32 mBoxesCapacity; const BpFilter& mFilter; const bool mStateChanged; const bool mGpuStateChanged; }; } //namespace Bp } //namespace physx #endif
7,699
C
40.621621
135
0.756072
NVIDIA-Omniverse/PhysX/physx/source/lowlevelaabb/include/BpVolumeData.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef BP_VOLUME_DATA_H #define BP_VOLUME_DATA_H #include "PxvConfig.h" #include "foundation/PxAssert.h" namespace physx { namespace Bp { typedef PxU32 AggregateHandle; // PT: currently an index in mAggregates array struct ElementType { enum Enum { eSHAPE = 0, eTRIGGER, eCOUNT }; }; PX_COMPILE_TIME_ASSERT(ElementType::eCOUNT <= 4); // 2 bits reserved for type #define PX_CUDA_INLINE PX_CUDA_CALLABLE PX_FORCE_INLINE struct VolumeData { PX_CUDA_INLINE void reset() { mAggregate = PX_INVALID_U32; mUserData = NULL; } PX_CUDA_INLINE void setSingleActor() { mAggregate = PX_INVALID_U32; } PX_CUDA_INLINE bool isSingleActor() const { return mAggregate == PX_INVALID_U32; } PX_CUDA_INLINE void setUserData(void* userData) { // PX_ASSERT(!(size_t(userData) & 3)); mUserData = userData; } PX_CUDA_INLINE void* getUserData() const { return reinterpret_cast<void*>(size_t(mUserData)& (~size_t(3))); } PX_CUDA_INLINE void setVolumeType(ElementType::Enum volumeType) { PX_ASSERT(volumeType < 2); mUserData = reinterpret_cast<void*>(size_t(getUserData()) | size_t(volumeType)); } PX_CUDA_INLINE ElementType::Enum getVolumeType() const { return ElementType::Enum(size_t(mUserData) & 3); } PX_CUDA_INLINE void setAggregate(AggregateHandle handle) { PX_ASSERT(handle != PX_INVALID_U32); mAggregate = (handle << 1) | 1; } PX_CUDA_INLINE bool isAggregate() const { return !isSingleActor() && ((mAggregate & 1) != 0); } PX_CUDA_INLINE void setAggregated(AggregateHandle handle) { PX_ASSERT(handle != PX_INVALID_U32); mAggregate = (handle << 1) | 0; } PX_CUDA_INLINE bool isAggregated() const { return !isSingleActor() && ((mAggregate & 1) == 0); } PX_CUDA_INLINE AggregateHandle getAggregateOwner() const { return mAggregate >> 1; } PX_CUDA_INLINE AggregateHandle getAggregate() const { return mAggregate >> 1; } private: void* mUserData; // PT: in PhysX this is an Sc::ElementSim ptr // PT: TODO: consider moving this to a separate array, which wouldn't be allocated at all for people not using aggregates. // PT: current encoding: // aggregate == PX_INVALID_U32 => single actor // aggregate != PX_INVALID_U32 => aggregate index<<1|LSB. LSB==1 for aggregates, LSB==0 for aggregated actors. AggregateHandle mAggregate; }; } } #endif
4,447
C
34.870967
125
0.656173
NVIDIA-Omniverse/PhysX/physx/source/foundation/FdAssert.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/PxAssert.h" #include "foundation/PxString.h" #include <stdio.h> #if PX_WINDOWS_FAMILY #include <crtdbg.h> #elif PX_SWITCH #include "foundation/switch/PxSwitchAbort.h" #endif void physx::PxAssert(const char* expr, const char* file, int line, bool& ignore) { PX_UNUSED(ignore); // is used only in debug windows config char buffer[1024]; #if PX_WINDOWS_FAMILY sprintf_s(buffer, "%s(%d) : Assertion failed: %s\n", file, line, expr); #else sprintf(buffer, "%s(%d) : Assertion failed: %s\n", file, line, expr); #endif physx::PxPrintString(buffer); #if PX_WINDOWS_FAMILY&& PX_DEBUG && PX_DEBUG_CRT // _CrtDbgReport returns -1 on error, 1 on 'retry', 0 otherwise including 'ignore'. // Hitting 'abort' will terminate the process immediately. int result = _CrtDbgReport(_CRT_ASSERT, file, line, NULL, "%s", buffer); int mode = _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_REPORT_MODE); ignore = _CRTDBG_MODE_WNDW == mode && result == 0; if(ignore) return; __debugbreak(); #elif PX_WINDOWS_FAMILY&& PX_CHECKED __debugbreak(); #elif PX_SWITCH abort(buffer); #else abort(); #endif }
2,801
C++
41.454545
84
0.740807
NVIDIA-Omniverse/PhysX/physx/source/foundation/FdMathUtils.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/PxSIMDHelpers.h" #include "foundation/PxMathUtils.h" #include "foundation/PxVec4.h" #include "foundation/PxAssert.h" #include "foundation/PxBasicTemplates.h" #include "foundation/PxUtilities.h" #include "foundation/PxTransform.h" using namespace physx; using namespace physx::intrinsics; PX_FOUNDATION_API PxTransform physx::PxTransformFromPlaneEquation(const PxPlane& plane) { PxPlane p = plane; p.normalize(); // special case handling for axis aligned planes const PxReal halfsqrt2 = 0.707106781f; PxQuat q; if(2 == (p.n.x == 0.0f) + (p.n.y == 0.0f) + (p.n.z == 0.0f)) // special handling for axis aligned planes { if(p.n.x > 0) q = PxQuat(PxIdentity); else if(p.n.x < 0) q = PxQuat(0, 0, 1.0f, 0); else q = PxQuat(0.0f, -p.n.z, p.n.y, 1.0f) * halfsqrt2; } else q = PxShortestRotation(PxVec3(1.f,0,0), p.n); return PxTransform(-p.n * p.d, q); } PX_FOUNDATION_API PxTransform physx::PxTransformFromSegment(const PxVec3& p0, const PxVec3& p1, PxReal* halfHeight) { const PxVec3 axis = p1-p0; const PxReal height = axis.magnitude(); if(halfHeight) *halfHeight = height/2; return PxTransform((p1+p0) * 0.5f, height<1e-6f ? PxQuat(PxIdentity) : PxShortestRotation(PxVec3(1.f,0,0), axis/height)); } PX_FOUNDATION_API PxQuat physx::PxShortestRotation(const PxVec3& v0, const PxVec3& v1) { const PxReal d = v0.dot(v1); const PxVec3 cross = v0.cross(v1); const PxQuat q = d > -1 ? PxQuat(cross.x, cross.y, cross.z, 1 + d) : PxAbs(v0.x) < 0.1f ? PxQuat(0.0f, v0.z, -v0.y, 0.0f) : PxQuat(v0.y, -v0.x, 0.0f, 0.0f); return q.getNormalized(); } // indexed rotation around axis, with sine and cosine of half-angle static PxQuat indexedRotation(PxU32 axis, PxReal s, PxReal c) { PxReal v[3] = { 0, 0, 0 }; v[axis] = s; return PxQuat(v[0], v[1], v[2], c); } PX_FOUNDATION_API PxVec3 physx::PxDiagonalize(const PxMat33& m, PxQuat& massFrame) { // jacobi rotation using quaternions (from an idea of Stan Melax, with fix for precision issues) const PxU32 MAX_ITERS = 24; PxQuat q(PxIdentity); PxMat33 d; for(PxU32 i = 0; i < MAX_ITERS; i++) { // PT: removed for now, it makes one UT fail because the error is slightly above the threshold //const PxMat33Padded axes(q); const PxMat33 axes(q); d = axes.getTranspose() * m * axes; const PxReal d0 = PxAbs(d[1][2]), d1 = PxAbs(d[0][2]), d2 = PxAbs(d[0][1]); const PxU32 a = PxU32(d0 > d1 && d0 > d2 ? 0 : d1 > d2 ? 1 : 2); // rotation axis index, from largest off-diagonal // element const PxU32 a1 = PxGetNextIndex3(a), a2 = PxGetNextIndex3(a1); if(d[a1][a2] == 0.0f || PxAbs(d[a1][a1] - d[a2][a2]) > 2e6f * PxAbs(2.0f * d[a1][a2])) break; PxReal w = (d[a1][a1] - d[a2][a2]) / (2.0f * d[a1][a2]); // cot(2 * phi), where phi is the rotation angle PxReal absw = PxAbs(w); PxQuat r; if(absw > 1000) r = indexedRotation(a, 1 / (4 * w), 1.f); // h will be very close to 1, so use small angle approx instead else { const PxReal t = 1 / (absw + PxSqrt(w * w + 1)); // absolute value of tan phi const PxReal h = 1 / PxSqrt(t * t + 1); // absolute value of cos phi PX_ASSERT(h != 1); // |w|<1000 guarantees this with typical IEEE754 machine eps (approx 6e-8) r = indexedRotation(a, PxSqrt((1 - h) / 2) * PxSign(w), PxSqrt((1 + h) / 2)); } q = (q * r).getNormalized(); } massFrame = q; return PxVec3(d.column0.x, d.column1.y, d.column2.z); } /** \brief computes a oriented bounding box around the scaled basis. \param basis Input = skewed basis, Output = (normalized) orthogonal basis. \return Bounding box extent. */ PxVec3 physx::PxOptimizeBoundingBox(PxMat33& basis) { PxVec3* PX_RESTRICT vec = &basis[0]; // PT: don't copy vectors if not needed... // PT: since we store the magnitudes to memory, we can avoid the FCMPs afterwards PxVec3 magnitude(vec[0].magnitudeSquared(), vec[1].magnitudeSquared(), vec[2].magnitudeSquared()); // find indices sorted by magnitude unsigned int i = magnitude[1] > magnitude[0] ? 1 : 0u; unsigned int j = magnitude[2] > magnitude[1 - i] ? 2 : 1 - i; const unsigned int k = 3 - i - j; if(magnitude[i] < magnitude[j]) PxSwap(i, j); PX_ASSERT(magnitude[i] >= magnitude[j] && magnitude[i] >= magnitude[k] && magnitude[j] >= magnitude[k]); // ortho-normalize basis PxReal invSqrt = PxRecipSqrt(magnitude[i]); magnitude[i] *= invSqrt; vec[i] *= invSqrt; // normalize the first axis PxReal dotij = vec[i].dot(vec[j]); PxReal dotik = vec[i].dot(vec[k]); magnitude[i] += PxAbs(dotij) + PxAbs(dotik); // elongate the axis by projection of the other two vec[j] -= vec[i] * dotij; // orthogonize the two remaining axii relative to vec[i] vec[k] -= vec[i] * dotik; magnitude[j] = vec[j].normalize(); PxReal dotjk = vec[j].dot(vec[k]); magnitude[j] += PxAbs(dotjk); // elongate the axis by projection of the other one vec[k] -= vec[j] * dotjk; // orthogonize vec[k] relative to vec[j] magnitude[k] = vec[k].normalize(); return magnitude; } void physx::PxIntegrateTransform(const PxTransform& curTrans, const PxVec3& linvel, const PxVec3& angvel, PxReal timeStep, PxTransform& result) { result.p = curTrans.p + linvel * timeStep; // from void DynamicsContext::integrateAtomPose(PxsRigidBody* atom, Cm::BitMap &shapeChangedMap) const: // Integrate the rotation using closed form quaternion integrator PxReal w = angvel.magnitudeSquared(); if (w != 0.0f) { w = PxSqrt(w); if (w != 0.0f) { const PxReal v = timeStep * w * 0.5f; const PxReal q = PxCos(v); const PxReal s = PxSin(v) / w; const PxVec3 pqr = angvel * s; const PxQuat quatVel(pqr.x, pqr.y, pqr.z, 0); PxQuat out; // need to have temporary, otherwise we may overwrite input if &curTrans == &result. out = quatVel * curTrans.q; out.x += curTrans.q.x * q; out.y += curTrans.q.y * q; out.z += curTrans.q.z * q; out.w += curTrans.q.w * q; result.q = out; return; } } // orientation stays the same - convert from quat to matrix: result.q = curTrans.q; }
7,798
C++
35.962085
122
0.673891
NVIDIA-Omniverse/PhysX/physx/source/foundation/FdTempAllocator.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/PxMath.h" #include "foundation/PxIntrinsics.h" #include "foundation/PxBitUtils.h" #include "foundation/PxArray.h" #include "foundation/PxMutex.h" #include "foundation/PxAtomic.h" #include "foundation/PxTempAllocator.h" #include "FdFoundation.h" #if PX_VC #pragma warning(disable : 4706) // assignment within conditional expression #endif namespace physx { namespace { typedef PxTempAllocatorChunk Chunk; typedef PxArray<Chunk*, PxAllocator> AllocFreeTable; PX_INLINE Foundation::AllocFreeTable& getFreeTable() { return getFoundation().getTempAllocFreeTable(); } PX_INLINE Foundation::Mutex& getMutex() { return getFoundation().getTempAllocMutex(); } const PxU32 sMinIndex = 8; // 256B min const PxU32 sMaxIndex = 17; // 128kB max } void* PxTempAllocator::allocate(size_t size, const char* filename, PxI32 line) { if(!size) return 0; PxU32 index = PxMax(PxHighestSetBit(PxU32(size) + sizeof(Chunk) - 1), sMinIndex); Chunk* chunk = 0; if(index < sMaxIndex) { Foundation::Mutex::ScopedLock lock(getMutex()); // find chunk up to 16x bigger than necessary Chunk** it = getFreeTable().begin() + index - sMinIndex; Chunk** end = PxMin(it + 3, getFreeTable().end()); while(it < end && !(*it)) ++it; if(it < end) { // pop top off freelist chunk = *it; *it = chunk->mNext; index = PxU32(it - getFreeTable().begin() + sMinIndex); } else // create new chunk chunk = reinterpret_cast<Chunk*>(PxAllocator().allocate(size_t(2 << index), filename, line)); } else { // too big for temp allocation, forward to base allocator chunk = reinterpret_cast<Chunk*>(PxAllocator().allocate(size + sizeof(Chunk), filename, line)); } chunk->mIndex = index; void* ret = chunk + 1; PX_ASSERT((size_t(ret) & 0xf) == 0); // SDK types require at minimum 16 byte alignment. return ret; } void PxTempAllocator::deallocate(void* ptr) { if(!ptr) return; Chunk* chunk = reinterpret_cast<Chunk*>(ptr) - 1; PxU32 index = chunk->mIndex; if(index >= sMaxIndex) return PxAllocator().deallocate(chunk); Foundation::Mutex::ScopedLock lock(getMutex()); index -= sMinIndex; if(getFreeTable().size() <= index) getFreeTable().resize(index + 1); chunk->mNext = getFreeTable()[index]; getFreeTable()[index] = chunk; } } // namespace physx
3,997
C++
30.984
97
0.725794
NVIDIA-Omniverse/PhysX/physx/source/foundation/FdAllocator.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
1,696
C++
57.517239
74
0.767099
NVIDIA-Omniverse/PhysX/physx/source/foundation/FdFoundation.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_FOUNDATION_PSFOUNDATION_H #define PX_FOUNDATION_PSFOUNDATION_H #include "foundation/PxErrors.h" #include "foundation/PxProfiler.h" #include "foundation/PxFoundation.h" #include "foundation/PxAllocator.h" #include "foundation/PxHashMap.h" #include "foundation/PxUserAllocated.h" #include "foundation/PxBroadcast.h" #include "foundation/PxTempAllocator.h" #include "foundation/PxMutex.h" #include <stdarg.h> namespace physx { #if PX_VC #pragma warning(push) #pragma warning(disable : 4251) // class needs to have dll-interface to be used by clients of class #endif class PX_FOUNDATION_API Foundation : public PxFoundation, public PxUserAllocated { PX_NOCOPY(Foundation) public: typedef PxMutexT<PxAllocator> Mutex; typedef PxArray<PxTempAllocatorChunk*, PxAllocator> AllocFreeTable; public: // factory // note, you MUST eventually call release if createInstance returned true! static Foundation* createInstance(PxU32 version, PxErrorCallback& errc, PxAllocatorCallback& alloc); static void setInstance(Foundation& foundation); void release(); static void incRefCount(); // this call requires a foundation object to exist already static void decRefCount(); // this call requires a foundation object to exist already static PxU32 getRefCount(); // Begin Errors virtual PxErrorCallback& getErrorCallback() { return mErrorCallback; } // Return the user's error callback PxErrorCallback& getInternalErrorCallback() { return mBroadcastingError; } // Return the broadcasting error callback virtual void registerErrorCallback(PxErrorCallback& listener); virtual void deregisterErrorCallback(PxErrorCallback& listener); virtual void setErrorLevel(PxErrorCode::Enum mask) { mErrorMask = mask; } virtual PxErrorCode::Enum getErrorLevel() const { return mErrorMask; } virtual bool error(PxErrorCode::Enum, const char* file, int line, const char* messageFmt, ...); // Report errors with the // broadcasting virtual bool error(PxErrorCode::Enum, const char* file, int line, const char* messageFmt, va_list); // error callback static PxU32 getWarnOnceTimestamp(); // End errors // Begin Allocations virtual PxAllocatorCallback& getAllocatorCallback() { return mAllocatorCallback; } // Return the user's allocator callback PxAllocatorCallback& getBroadcastAllocator() { return mBroadcastingAllocator; } // Return the broadcasting allocator virtual void registerAllocationListener(physx::PxAllocationListener& listener); virtual void deregisterAllocationListener(physx::PxAllocationListener& listener); virtual bool getReportAllocationNames() const { return mReportAllocationNames; } virtual void setReportAllocationNames(bool value) { mReportAllocationNames = value; } PX_INLINE AllocFreeTable& getTempAllocFreeTable() { return mTempAllocFreeTable; } PX_INLINE Mutex& getTempAllocMutex() { return mTempAllocMutex; } // End allocations //private: static void destroyInstance(); Foundation(PxErrorCallback& errc, PxAllocatorCallback& alloc); ~Foundation(); // init order is tricky here: the mutexes require the allocator, the allocator may require the error stream PxAllocatorCallback& mAllocatorCallback; PxErrorCallback& mErrorCallback; PxBroadcastingAllocator mBroadcastingAllocator; PxBroadcastingErrorCallback mBroadcastingError; bool mReportAllocationNames; PxErrorCode::Enum mErrorMask; Mutex mErrorMutex; AllocFreeTable mTempAllocFreeTable; Mutex mTempAllocMutex; Mutex mListenerMutex; PxU32 mRefCount; static PxU32 mWarnOnceTimestap; }; #if PX_VC #pragma warning(pop) #endif Foundation& getFoundation(); } // namespace physx #endif
5,433
C
31.345238
123
0.767532
NVIDIA-Omniverse/PhysX/physx/source/foundation/FdFoundation.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/PxProfiler.h" #include "foundation/PxErrorCallback.h" #include "foundation/PxString.h" #include "foundation/PxAllocator.h" #include "foundation/PxPhysicsVersion.h" #include "FdFoundation.h" using namespace physx; static PxProfilerCallback* gProfilerCallback = NULL; static Foundation* gInstance = NULL; Foundation& physx::getFoundation() { PX_ASSERT(gInstance); return *gInstance; } Foundation::Foundation(PxErrorCallback& errc, PxAllocatorCallback& alloc) : mAllocatorCallback (alloc), mErrorCallback (errc), mBroadcastingAllocator (alloc, errc), mBroadcastingError (errc), #if PX_CHECKED mReportAllocationNames (true), #else mReportAllocationNames (false), #endif mErrorMask (PxErrorCode::Enum(~0)), mErrorMutex ("Foundation::mErrorMutex"), mTempAllocMutex ("Foundation::mTempAllocMutex"), mRefCount (0) { } Foundation::~Foundation() { // deallocate temp buffer allocations PxAllocator alloc; for(PxU32 i = 0; i < mTempAllocFreeTable.size(); ++i) { for(PxTempAllocatorChunk* ptr = mTempAllocFreeTable[i]; ptr;) { PxTempAllocatorChunk* next = ptr->mNext; alloc.deallocate(ptr); ptr = next; } } mTempAllocFreeTable.reset(); } void Foundation::setInstance(Foundation& foundation) { gInstance = &foundation; } PxU32 Foundation::getWarnOnceTimestamp() { PX_ASSERT(gInstance); return mWarnOnceTimestap; } bool Foundation::error(PxErrorCode::Enum c, const char* file, int line, const char* messageFmt, ...) { va_list va; va_start(va, messageFmt); error(c, file, line, messageFmt, va); va_end(va); return false; } bool Foundation::error(PxErrorCode::Enum e, const char* file, int line, const char* messageFmt, va_list va) { PX_ASSERT(messageFmt); if(e & mErrorMask) { // this function is reentrant but user's error callback may not be, so... Mutex::ScopedLock lock(mErrorMutex); // using a static fixed size buffer here because: // 1. vsnprintf return values differ between platforms // 2. va_start is only usable in functions with ellipses // 3. ellipses (...) cannot be passed to called function // which would be necessary to dynamically grow the buffer here static const size_t bufSize = 1024; char stringBuffer[bufSize]; Pxvsnprintf(stringBuffer, bufSize, messageFmt, va); mBroadcastingError.reportError(e, stringBuffer, file, line); } return false; } Foundation* Foundation::createInstance(PxU32 version, PxErrorCallback& errc, PxAllocatorCallback& alloc) { if(version != PX_PHYSICS_VERSION) { char* buffer = new char[256]; Pxsnprintf(buffer, 256, "Wrong version: physics version is 0x%08x, tried to create 0x%08x", PX_PHYSICS_VERSION, version); errc.reportError(PxErrorCode::eINVALID_PARAMETER, buffer, PX_FL); return 0; } if(!gInstance) { // if we don't assign this here, the Foundation object can't create member // subobjects which require the allocator gInstance = reinterpret_cast<Foundation*>(alloc.allocate(sizeof(Foundation), "Foundation", PX_FL)); if(gInstance) { PX_PLACEMENT_NEW(gInstance, Foundation)(errc, alloc); PX_ASSERT(gInstance->mRefCount == 0); gInstance->mRefCount = 1; // skip 0 which marks uninitialized timestaps in PX_WARN_ONCE mWarnOnceTimestap = (mWarnOnceTimestap == PX_MAX_U32) ? 1 : mWarnOnceTimestap + 1; return gInstance; } else { errc.reportError(PxErrorCode::eINTERNAL_ERROR, "Memory allocation for foundation object failed.", PX_FL); } } else { errc.reportError(PxErrorCode::eINVALID_OPERATION, "Foundation object exists already. Only one instance per process can be created.", PX_FL); } return 0; } void Foundation::destroyInstance() { PX_ASSERT(gInstance); if(gInstance->mRefCount == 1) { PxAllocatorCallback& alloc = gInstance->getAllocatorCallback(); gInstance->~Foundation(); alloc.deallocate(gInstance); gInstance = NULL; } else { gInstance->error(PxErrorCode::eINVALID_OPERATION, PX_FL, "Foundation destruction failed due to pending module references. Close/release all depending modules first."); } } void Foundation::incRefCount() { PX_ASSERT(gInstance); if(gInstance->mRefCount > 0) gInstance->mRefCount++; else gInstance->error(PxErrorCode::eINVALID_OPERATION, PX_FL, "Foundation: Invalid registration detected."); } void Foundation::decRefCount() { PX_ASSERT(gInstance); if(gInstance->mRefCount > 0) gInstance->mRefCount--; else gInstance->error(PxErrorCode::eINVALID_OPERATION, PX_FL, "Foundation: Invalid deregistration detected."); } void Foundation::release() { Foundation::destroyInstance(); } PxU32 Foundation::getRefCount() { return gInstance->mRefCount; } PxU32 Foundation::mWarnOnceTimestap = 0; void Foundation::registerAllocationListener(PxAllocationListener& listener) { Mutex::ScopedLock lock(mListenerMutex); mBroadcastingAllocator.registerListener(listener); } void Foundation::deregisterAllocationListener(PxAllocationListener& listener) { Mutex::ScopedLock lock(mListenerMutex); mBroadcastingAllocator.deregisterListener(listener); } void Foundation::registerErrorCallback(PxErrorCallback& callback) { Mutex::ScopedLock lock(mListenerMutex); mBroadcastingError.registerListener(callback); } void Foundation::deregisterErrorCallback(PxErrorCallback& callback) { Mutex::ScopedLock lock(mListenerMutex); mBroadcastingError.deregisterListener(callback); } PxFoundation* PxCreateFoundation(PxU32 version, PxAllocatorCallback& allocator, PxErrorCallback& errorCallback) { return Foundation::createInstance(version, errorCallback, allocator); } void PxSetFoundationInstance(PxFoundation& foundation) { Foundation::setInstance(static_cast<Foundation&>(foundation)); } PxAllocatorCallback* PxGetAllocatorCallback() { return &gInstance->getAllocatorCallback(); } PxAllocatorCallback* PxGetBroadcastAllocator(bool* reportAllocationNames) { PX_ASSERT(gInstance); if(reportAllocationNames) *reportAllocationNames = gInstance->mReportAllocationNames; return &gInstance->getBroadcastAllocator(); } PxErrorCallback* PX_CALL_CONV PxGetErrorCallback() { return &gInstance->getErrorCallback(); } PxErrorCallback* PX_CALL_CONV PxGetBroadcastError() { return &gInstance->getInternalErrorCallback(); } PxFoundation& PxGetFoundation() { PX_ASSERT(gInstance); return *gInstance; } PxFoundation* PxIsFoundationValid() { return gInstance; } PxProfilerCallback* PxGetProfilerCallback() { return gProfilerCallback; } void PxSetProfilerCallback(PxProfilerCallback* profiler) { gProfilerCallback = profiler; } PxU32 PxGetWarnOnceTimeStamp() { return Foundation::getWarnOnceTimestamp(); } void PxDecFoundationRefCount() { Foundation::decRefCount(); } void PxIncFoundationRefCount() { Foundation::incRefCount(); }
8,462
C++
26.3
142
0.75455
NVIDIA-Omniverse/PhysX/physx/source/foundation/FdString.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/PxString.h" #include <stdarg.h> #include <stdio.h> #include <string.h> #if PX_WINDOWS_FAMILY #pragma warning(push) #pragma warning(disable : 4996) // unsafe string functions #endif #if PX_APPLE_FAMILY #pragma clang diagnostic push // error : format string is not a string literal #pragma clang diagnostic ignored "-Wformat-nonliteral" #endif namespace physx { // cross-platform implementations int32_t Pxstrcmp(const char* str1, const char* str2) { return (str1 && str2) ? ::strcmp(str1, str2) : -1; } int32_t Pxstrncmp(const char* str1, const char* str2, size_t count) { return ::strncmp(str1, str2, count); } int32_t Pxsnprintf(char* dst, size_t dstSize, const char* format, ...) { va_list arg; va_start(arg, format); int32_t r = Pxvsnprintf(dst, dstSize, format, arg); va_end(arg); return r; } int32_t Pxsscanf(const char* buffer, const char* format, ...) { va_list arg; va_start(arg, format); #if (PX_VC < 12) && !PX_LINUX int32_t r = ::sscanf(buffer, format, arg); #else int32_t r = ::vsscanf(buffer, format, arg); #endif va_end(arg); return r; } size_t Pxstrlcpy(char* dst, size_t dstSize, const char* src) { size_t i = 0; if(dst && dstSize) { for(; i + 1 < dstSize && src[i]; i++) // copy up to dstSize-1 bytes dst[i] = src[i]; dst[i] = 0; // always null-terminate } while(src[i]) // read any remaining characters in the src string to get the length i++; return i; } size_t Pxstrlcat(char* dst, size_t dstSize, const char* src) { size_t i = 0, s = 0; if(dst && dstSize) { s = strlen(dst); for(; i + s + 1 < dstSize && src[i]; i++) // copy until total is at most dstSize-1 dst[i + s] = src[i]; dst[i + s] = 0; // always null-terminate } while(src[i]) // read any remaining characters in the src string to get the length i++; return i + s; } void Pxstrlwr(char* str) { for(; *str; str++) if(*str >= 'A' && *str <= 'Z') *str += 32; } void Pxstrupr(char* str) { for(; *str; str++) if(*str >= 'a' && *str <= 'z') *str -= 32; } int32_t Pxvsnprintf(char* dst, size_t dstSize, const char* src, va_list arg) { #if PX_VC // MSVC is not C99-compliant... int32_t result = dst ? ::vsnprintf(dst, dstSize, src, arg) : -1; if(dst && (result == int32_t(dstSize) || result < 0)) dst[dstSize - 1] = 0; // string was truncated or there wasn't room for the NULL if(result < 0) result = _vscprintf(src, arg); // work out how long the answer would have been. #else int32_t result = ::vsnprintf(dst, dstSize, src, arg); #endif return result; } int32_t Pxstricmp(const char* str, const char* str1) { #if PX_VC return (::_stricmp(str, str1)); #else return (::strcasecmp(str, str1)); #endif } int32_t Pxstrnicmp(const char* str, const char* str1, size_t n) { #if PX_VC return (::_strnicmp(str, str1, n)); #else return (::strncasecmp(str, str1, n)); #endif } }//namespace physx #if PX_APPLE_FAMILY #pragma clang diagnostic pop #endif #if PX_WINDOWS_FAMILY #pragma warning(pop) #endif
4,674
C++
26.339181
84
0.68849
NVIDIA-Omniverse/PhysX/physx/source/foundation/unix/FdUnixTime.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/PxTime.h" #include <time.h> #include <sys/time.h> #if PX_APPLE_FAMILY #include <mach/mach_time.h> #endif // Use real-time high-precision timer. #if !PX_APPLE_FAMILY #define CLOCKID CLOCK_REALTIME #endif namespace physx { static const PxCounterFrequencyToTensOfNanos gCounterFreq = PxTime::getCounterFrequency(); const PxCounterFrequencyToTensOfNanos& PxTime::getBootCounterFrequency() { return gCounterFreq; } static PxTime::Second getTimeSeconds() { static struct timeval _tv; gettimeofday(&_tv, NULL); return double(_tv.tv_sec) + double(_tv.tv_usec) * 0.000001; } PxTime::PxTime() { mLastTime = getTimeSeconds(); } PxTime::Second PxTime::getElapsedSeconds() { PxTime::Second curTime = getTimeSeconds(); PxTime::Second diff = curTime - mLastTime; mLastTime = curTime; return diff; } PxTime::Second PxTime::peekElapsedSeconds() { PxTime::Second curTime = getTimeSeconds(); PxTime::Second diff = curTime - mLastTime; return diff; } PxTime::Second PxTime::getLastTime() const { return mLastTime; } #if PX_APPLE_FAMILY PxCounterFrequencyToTensOfNanos PxTime::getCounterFrequency() { mach_timebase_info_data_t info; mach_timebase_info(&info); // mach_absolute_time * (info.numer/info.denom) is in units of nano seconds return PxCounterFrequencyToTensOfNanos(info.numer, info.denom * 10); } uint64_t PxTime::getCurrentCounterValue() { return mach_absolute_time(); } #else PxCounterFrequencyToTensOfNanos PxTime::getCounterFrequency() { return PxCounterFrequencyToTensOfNanos(1, 10); } uint64_t PxTime::getCurrentCounterValue() { struct timespec mCurrTimeInt; clock_gettime(CLOCKID, &mCurrTimeInt); // Convert to nanos as this doesn't cause a large divide here return (static_cast<uint64_t>(mCurrTimeInt.tv_sec) * 1000000000) + (static_cast<uint64_t>(mCurrTimeInt.tv_nsec)); } #endif } // namespace physx
3,556
C++
29.663793
114
0.760405
NVIDIA-Omniverse/PhysX/physx/source/foundation/unix/FdUnixPrintString.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/PxString.h" #include <stdio.h> namespace physx { void PxPrintString(const char* str) { puts(str); } } // namespace physx
1,842
C++
43.951218
74
0.762215
NVIDIA-Omniverse/PhysX/physx/source/foundation/unix/FdUnixFPU.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/PxFPU.h" #if !defined(__CYGWIN__) #include <fenv.h> PX_COMPILE_TIME_ASSERT(8 * sizeof(uint32_t) >= sizeof(fenv_t)); #endif #if PX_OSX // osx defines SIMD as standard for floating point operations. #include <xmmintrin.h> #endif physx::PxFPUGuard::PxFPUGuard() { #if defined(__CYGWIN__) #pragma message "FPUGuard::FPUGuard() is not implemented" #elif PX_OSX mControlWords[0] = _mm_getcsr(); // set default (disable exceptions: _MM_MASK_MASK) and FTZ (_MM_FLUSH_ZERO_ON), DAZ (_MM_DENORMALS_ZERO_ON: (1<<6)) _mm_setcsr(_MM_MASK_MASK | _MM_FLUSH_ZERO_ON | (1 << 6)); #elif defined(__EMSCRIPTEN__) // not supported #else PX_COMPILE_TIME_ASSERT(sizeof(fenv_t) <= sizeof(mControlWords)); fegetenv(reinterpret_cast<fenv_t*>(mControlWords)); fesetenv(FE_DFL_ENV); #if PX_LINUX // need to explicitly disable exceptions because fesetenv does not modify // the sse control word on 32bit linux (64bit is fine, but do it here just be sure) fedisableexcept(FE_ALL_EXCEPT); #endif #endif } physx::PxFPUGuard::~PxFPUGuard() { #if defined(__CYGWIN__) #pragma message "PxFPUGuard::~PxFPUGuard() is not implemented" #elif PX_OSX // restore control word and clear exception flags // (setting exception state flags cause exceptions on the first following fp operation) _mm_setcsr(mControlWords[0] & ~_MM_EXCEPT_MASK); #elif defined(__EMSCRIPTEN__) // not supported #else fesetenv(reinterpret_cast<fenv_t*>(mControlWords)); #endif } PX_FOUNDATION_API void physx::PxEnableFPExceptions() { #if PX_LINUX && !defined(__EMSCRIPTEN__) feclearexcept(FE_ALL_EXCEPT); feenableexcept(FE_INVALID | FE_DIVBYZERO | FE_OVERFLOW); #elif PX_OSX // clear any pending exceptions // (setting exception state flags cause exceptions on the first following fp operation) uint32_t control = _mm_getcsr() & ~_MM_EXCEPT_MASK; // enable all fp exceptions except inexact and underflow (common, benign) // note: denorm has to be disabled as well because underflow can create denorms _mm_setcsr((control & ~_MM_MASK_MASK) | _MM_MASK_INEXACT | _MM_MASK_UNDERFLOW | _MM_MASK_DENORM); #endif } PX_FOUNDATION_API void physx::PxDisableFPExceptions() { #if PX_LINUX && !defined(__EMSCRIPTEN__) fedisableexcept(FE_ALL_EXCEPT); #elif PX_OSX // clear any pending exceptions // (setting exception state flags cause exceptions on the first following fp operation) uint32_t control = _mm_getcsr() & ~_MM_EXCEPT_MASK; _mm_setcsr(control | _MM_MASK_MASK); #endif }
4,152
C++
37.453703
116
0.740848
NVIDIA-Omniverse/PhysX/physx/source/foundation/unix/FdUnixSync.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/PxAssert.h" #include "foundation/PxUserAllocated.h" #include "foundation/PxSync.h" #include <errno.h> #include <stdio.h> #include <pthread.h> #include <time.h> #include <sys/time.h> namespace physx { namespace { class SyncImpl { public: pthread_mutex_t mutex; pthread_cond_t cond; volatile int setCounter; volatile bool is_set; }; SyncImpl* getSync(PxSyncImpl* impl) { return reinterpret_cast<SyncImpl*>(impl); } } uint32_t PxSyncImpl::getSize() { return sizeof(SyncImpl); } struct PxUnixScopeLock { PxUnixScopeLock(pthread_mutex_t& m) : mMutex(m) { pthread_mutex_lock(&mMutex); } ~PxUnixScopeLock() { pthread_mutex_unlock(&mMutex); } private: pthread_mutex_t& mMutex; }; PxSyncImpl::PxSyncImpl() { int status = pthread_mutex_init(&getSync(this)->mutex, 0); PX_ASSERT(!status); status = pthread_cond_init(&getSync(this)->cond, 0); PX_ASSERT(!status); PX_UNUSED(status); getSync(this)->is_set = false; getSync(this)->setCounter = 0; } PxSyncImpl::~PxSyncImpl() { pthread_cond_destroy(&getSync(this)->cond); pthread_mutex_destroy(&getSync(this)->mutex); } void PxSyncImpl::reset() { PxUnixScopeLock lock(getSync(this)->mutex); getSync(this)->is_set = false; } void PxSyncImpl::set() { PxUnixScopeLock lock(getSync(this)->mutex); if(!getSync(this)->is_set) { getSync(this)->is_set = true; getSync(this)->setCounter++; pthread_cond_broadcast(&getSync(this)->cond); } } bool PxSyncImpl::wait(uint32_t ms) { PxUnixScopeLock lock(getSync(this)->mutex); int lastSetCounter = getSync(this)->setCounter; if(!getSync(this)->is_set) { if(ms == uint32_t(-1)) { // have to loop here and check is_set since pthread_cond_wait can return successfully // even if it was not signaled by pthread_cond_broadcast (OS efficiency design decision) int status = 0; while(!status && !getSync(this)->is_set && (lastSetCounter == getSync(this)->setCounter)) status = pthread_cond_wait(&getSync(this)->cond, &getSync(this)->mutex); PX_ASSERT((!status && getSync(this)->is_set) || (lastSetCounter != getSync(this)->setCounter)); } else { timespec ts; timeval tp; gettimeofday(&tp, NULL); uint32_t sec = ms / 1000; uint32_t usec = (ms - 1000 * sec) * 1000; // sschirm: taking into account that us might accumulate to a second // otherwise the pthread_cond_timedwait complains on osx. usec = tp.tv_usec + usec; uint32_t div_sec = usec / 1000000; uint32_t rem_usec = usec - div_sec * 1000000; ts.tv_sec = tp.tv_sec + sec + div_sec; ts.tv_nsec = rem_usec * 1000; // have to loop here and check is_set since pthread_cond_timedwait can return successfully // even if it was not signaled by pthread_cond_broadcast (OS efficiency design decision) int status = 0; while(!status && !getSync(this)->is_set && (lastSetCounter == getSync(this)->setCounter)) status = pthread_cond_timedwait(&getSync(this)->cond, &getSync(this)->mutex, &ts); PX_ASSERT((!status && getSync(this)->is_set) || (status == ETIMEDOUT) || (lastSetCounter != getSync(this)->setCounter)); } } return getSync(this)->is_set || (lastSetCounter != getSync(this)->setCounter); } } // namespace physx
4,906
C++
29.66875
98
0.710355
NVIDIA-Omniverse/PhysX/physx/source/foundation/unix/FdUnixThread.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/PxAssert.h" #include "foundation/PxErrorCallback.h" #include "foundation/PxAtomic.h" #include "foundation/PxThread.h" #include <math.h> #if !PX_APPLE_FAMILY && !defined(__CYGWIN__) && !PX_EMSCRIPTEN #include <bits/local_lim.h> // PTHREAD_STACK_MIN #endif #include <stdio.h> #include <pthread.h> #include <unistd.h> #include <sys/syscall.h> #if !PX_APPLE_FAMILY && !PX_EMSCRIPTEN #include <asm/unistd.h> #include <sys/resource.h> #endif #if PX_APPLE_FAMILY #include <sys/types.h> #include <sys/sysctl.h> #include <TargetConditionals.h> #include <pthread.h> #endif #define PxSpinLockPause() asm("nop") namespace physx { namespace { typedef enum { ePxThreadNotStarted, ePxThreadStarted, ePxThreadStopped } PxThreadState; class ThreadImpl { public: PxThreadImpl::ExecuteFn fn; void* arg; volatile int32_t quitNow; volatile int32_t threadStarted; volatile int32_t state; pthread_t thread; pid_t tid; uint32_t affinityMask; const char* name; }; ThreadImpl* getThread(PxThreadImpl* impl) { return reinterpret_cast<ThreadImpl*>(impl); } static void setTid(ThreadImpl& threadImpl) { // query TID // AM: TODO: neither of the below are implemented #if PX_APPLE_FAMILY threadImpl.tid = syscall(SYS_gettid); #elif PX_EMSCRIPTEN threadImpl.tid = pthread_self(); #else threadImpl.tid = syscall(__NR_gettid); #endif // notify/unblock parent thread PxAtomicCompareExchange(&(threadImpl.threadStarted), 1, 0); } void* PxThreadStart(void* arg) { ThreadImpl* impl = getThread(reinterpret_cast<PxThreadImpl*>(arg)); impl->state = ePxThreadStarted; // run setTid in thread's context setTid(*impl); // then run either the passed in function or execute from the derived class (Runnable). if(impl->fn) (*impl->fn)(impl->arg); else if(impl->arg) (reinterpret_cast<PxRunnable*>(impl->arg))->execute(); return 0; } } uint32_t PxThreadImpl::getSize() { return sizeof(ThreadImpl); } PxThreadImpl::Id PxThreadImpl::getId() { return Id(pthread_self()); } PxThreadImpl::PxThreadImpl() { getThread(this)->thread = 0; getThread(this)->tid = 0; getThread(this)->state = ePxThreadNotStarted; getThread(this)->quitNow = 0; getThread(this)->threadStarted = 0; getThread(this)->fn = NULL; getThread(this)->arg = NULL; getThread(this)->affinityMask = 0; getThread(this)->name = "set my name before starting me"; } PxThreadImpl::PxThreadImpl(PxThreadImpl::ExecuteFn fn, void* arg, const char* name) { getThread(this)->thread = 0; getThread(this)->tid = 0; getThread(this)->state = ePxThreadNotStarted; getThread(this)->quitNow = 0; getThread(this)->threadStarted = 0; getThread(this)->fn = fn; getThread(this)->arg = arg; getThread(this)->affinityMask = 0; getThread(this)->name = name; start(0, NULL); } PxThreadImpl::~PxThreadImpl() { if(getThread(this)->state == ePxThreadStarted) kill(); } void PxThreadImpl::start(uint32_t stackSize, PxRunnable* runnable) { if(getThread(this)->state != ePxThreadNotStarted) return; if(stackSize == 0) stackSize = getDefaultStackSize(); #if defined(PTHREAD_STACK_MIN) if(stackSize < PTHREAD_STACK_MIN) { PxGetFoundation().error(PxErrorCode::eDEBUG_WARNING, __FILE__, __LINE__, "PxThreadImpl::start(): stack size was set below PTHREAD_STACK_MIN"); stackSize = PTHREAD_STACK_MIN; } #endif if(runnable && !getThread(this)->arg && !getThread(this)->fn) getThread(this)->arg = runnable; pthread_attr_t attr; int status = pthread_attr_init(&attr); PX_ASSERT(!status); PX_UNUSED(status); status = pthread_attr_setstacksize(&attr, stackSize); PX_ASSERT(!status); status = pthread_create(&getThread(this)->thread, &attr, PxThreadStart, this); PX_ASSERT(!status); // wait for thread to startup and write out TID // otherwise TID dependent calls like setAffinity will fail. while(PxAtomicCompareExchange(&(getThread(this)->threadStarted), 1, 1) == 0) yield(); // here we are sure that getThread(this)->state >= ePxThreadStarted status = pthread_attr_destroy(&attr); PX_ASSERT(!status); // apply stored affinity mask if(getThread(this)->affinityMask) setAffinityMask(getThread(this)->affinityMask); if (getThread(this)->name) setName(getThread(this)->name); } void PxThreadImpl::signalQuit() { PxAtomicIncrement(&(getThread(this)->quitNow)); } bool PxThreadImpl::waitForQuit() { if(getThread(this)->state == ePxThreadNotStarted) return false; // works also with a stopped/exited thread if the handle is still valid pthread_join(getThread(this)->thread, NULL); getThread(this)->state = ePxThreadStopped; return true; } bool PxThreadImpl::quitIsSignalled() { return PxAtomicCompareExchange(&(getThread(this)->quitNow), 0, 0) != 0; } #if defined(PX_GCC_FAMILY) __attribute__((noreturn)) #endif void PxThreadImpl::quit() { getThread(this)->state = ePxThreadStopped; pthread_exit(0); } void PxThreadImpl::kill() { if(getThread(this)->state == ePxThreadStarted) pthread_cancel(getThread(this)->thread); getThread(this)->state = ePxThreadStopped; } void PxThreadImpl::sleep(uint32_t ms) { timespec sleepTime; uint32_t remainder = ms % 1000; sleepTime.tv_sec = ms - remainder; sleepTime.tv_nsec = remainder * 1000000L; while(nanosleep(&sleepTime, &sleepTime) == -1) continue; } void PxThreadImpl::yield() { sched_yield(); } void PxThreadImpl::yieldProcessor() { #if (PX_ARM || PX_A64) __asm__ __volatile__("yield"); #else __asm__ __volatile__("pause"); #endif } uint32_t PxThreadImpl::setAffinityMask(uint32_t mask) { // Same as windows impl if mask is zero if(!mask) return 0; getThread(this)->affinityMask = mask; uint64_t prevMask = 0; if(getThread(this)->state == ePxThreadStarted) { #if PX_EMSCRIPTEN // not supported #elif !PX_APPLE_FAMILY // Apple doesn't support syscall with getaffinity and setaffinity int32_t errGet = syscall(__NR_sched_getaffinity, getThread(this)->tid, sizeof(prevMask), &prevMask); if(errGet < 0) return 0; int32_t errSet = syscall(__NR_sched_setaffinity, getThread(this)->tid, sizeof(mask), &mask); if(errSet != 0) return 0; #endif } return uint32_t(prevMask); } void PxThreadImpl::setName(const char* name) { getThread(this)->name = name; if (getThread(this)->state == ePxThreadStarted) { // not implemented because most unix APIs expect setName() // to be called from the thread's context. Example see next comment: // this works only with the current thread and can rename // the main process if used in the wrong context: // prctl(PR_SET_NAME, reinterpret_cast<unsigned long>(name) ,0,0,0); PX_UNUSED(name); } } #if !PX_APPLE_FAMILY static PxThreadPriority::Enum convertPriorityFromLinux(uint32_t inPrio, int policy) { PX_COMPILE_TIME_ASSERT(PxThreadPriority::eLOW > PxThreadPriority::eHIGH); PX_COMPILE_TIME_ASSERT(PxThreadPriority::eHIGH == 0); int maxL = sched_get_priority_max(policy); int minL = sched_get_priority_min(policy); int rangeL = maxL - minL; int rangeNv = PxThreadPriority::eLOW - PxThreadPriority::eHIGH; // case for default scheduler policy if(rangeL == 0) return PxThreadPriority::eNORMAL; float floatPrio = (float(maxL - inPrio) * float(rangeNv)) / float(rangeL); return PxThreadPriority::Enum(int(roundf(floatPrio))); } static int convertPriorityToLinux(PxThreadPriority::Enum inPrio, int policy) { int maxL = sched_get_priority_max(policy); int minL = sched_get_priority_min(policy); int rangeL = maxL - minL; int rangeNv = PxThreadPriority::eLOW - PxThreadPriority::eHIGH; // case for default scheduler policy if(rangeL == 0) return 0; float floatPrio = (float(PxThreadPriority::eLOW - inPrio) * float(rangeL)) / float(rangeNv); return minL + int(roundf(floatPrio)); } #endif void PxThreadImpl::setPriority(PxThreadPriority::Enum val) { PX_UNUSED(val); #if !PX_APPLE_FAMILY int policy; sched_param s_param; pthread_getschedparam(getThread(this)->thread, &policy, &s_param); s_param.sched_priority = convertPriorityToLinux(val, policy); pthread_setschedparam(getThread(this)->thread, policy, &s_param); #endif } PxThreadPriority::Enum PxThreadImpl::getPriority(Id pthread) { PX_UNUSED(pthread); #if !PX_APPLE_FAMILY int policy; sched_param s_param; int ret = pthread_getschedparam(pthread_t(pthread), &policy, &s_param); if(ret == 0) return convertPriorityFromLinux(s_param.sched_priority, policy); else return PxThreadPriority::eNORMAL; #else return PxThreadPriority::eNORMAL; #endif } uint32_t PxThreadImpl::getNbPhysicalCores() { #if PX_APPLE_FAMILY int count; size_t size = sizeof(count); return sysctlbyname("hw.physicalcpu", &count, &size, NULL, 0) ? 0 : count; #else // Linux exposes CPU topology using /sys/devices/system/cpu // https://www.kernel.org/doc/Documentation/cputopology.txt if(FILE* f = fopen("/sys/devices/system/cpu/possible", "r")) { int minIndex, maxIndex; int n = fscanf(f, "%d-%d", &minIndex, &maxIndex); fclose(f); if(n == 2) return (maxIndex - minIndex) + 1; else if(n == 1) return minIndex + 1; } // For non-Linux kernels this fallback is possibly the best we can do // but will report logical (hyper-threaded) counts int n = sysconf(_SC_NPROCESSORS_CONF); if(n < 0) return 0; else return n; #endif } PxU32 PxTlsAlloc() { pthread_key_t key; int status = pthread_key_create(&key, NULL); PX_ASSERT(!status); PX_UNUSED(status); return PxU32(key); } void PxTlsFree(PxU32 index) { int status = pthread_key_delete(pthread_key_t(index)); PX_ASSERT(!status); PX_UNUSED(status); } void* PxTlsGet(PxU32 index) { return reinterpret_cast<void*>(pthread_getspecific(pthread_key_t(index))); } size_t PxTlsGetValue(PxU32 index) { return reinterpret_cast<size_t>(pthread_getspecific(pthread_key_t(index))); } PxU32 PxTlsSet(PxU32 index, void* value) { int status = pthread_setspecific(pthread_key_t(index), value); PX_ASSERT(!status); return !status; } PxU32 PxTlsSetValue(PxU32 index, size_t value) { int status = pthread_setspecific(pthread_key_t(index), reinterpret_cast<void*>(value)); PX_ASSERT(!status); return !status; } // DM: On Linux x86-32, without implementation-specific restrictions // the default stack size for a new thread should be 2 megabytes (kernel.org). // NOTE: take care of this value on other architectures! PxU32 PxThreadImpl::getDefaultStackSize() { return 1 << 21; } } // namespace physx
12,042
C++
24.788009
102
0.72189
NVIDIA-Omniverse/PhysX/physx/source/foundation/unix/FdUnixMutex.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/PxAssert.h" #include "foundation/PxErrorCallback.h" #include "foundation/PxUserAllocated.h" #include "foundation/PxMutex.h" #include "foundation/PxAtomic.h" #include "foundation/PxThread.h" #include <pthread.h> namespace physx { #if PX_LINUX #include <sched.h> static int gMutexProtocol = PTHREAD_PRIO_INHERIT; PX_FORCE_INLINE bool isLegalProtocol(const int mutexProtocol) { return ( (PTHREAD_PRIO_NONE == mutexProtocol) || (PTHREAD_PRIO_INHERIT == mutexProtocol) || ((PTHREAD_PRIO_PROTECT == mutexProtocol) && ((sched_getscheduler(0) == SCHED_FIFO) || (sched_getscheduler(0) == SCHED_RR))) ); } bool PxSetMutexProtocol(const int mutexProtocol) { if(isLegalProtocol(mutexProtocol)) { gMutexProtocol = mutexProtocol; return true; } return false; } int PxGetMutexProtocol() { return gMutexProtocol; } #endif //PX_LINUX namespace { struct MutexUnixImpl { pthread_mutex_t lock; PxThread::Id owner; }; MutexUnixImpl* getMutex(PxMutexImpl* impl) { return reinterpret_cast<MutexUnixImpl*>(impl); } } PxMutexImpl::PxMutexImpl() { pthread_mutexattr_t attr; pthread_mutexattr_init(&attr); pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); #if PX_LINUX pthread_mutexattr_setprotocol(&attr, gMutexProtocol); pthread_mutexattr_setprioceiling(&attr, 0); #endif pthread_mutex_init(&getMutex(this)->lock, &attr); pthread_mutexattr_destroy(&attr); } PxMutexImpl::~PxMutexImpl() { pthread_mutex_destroy(&getMutex(this)->lock); } void PxMutexImpl::lock() { int err = pthread_mutex_lock(&getMutex(this)->lock); PX_ASSERT(!err); PX_UNUSED(err); #if PX_DEBUG getMutex(this)->owner = PxThread::getId(); #endif } bool PxMutexImpl::trylock() { bool success = !pthread_mutex_trylock(&getMutex(this)->lock); #if PX_DEBUG if(success) getMutex(this)->owner = PxThread::getId(); #endif return success; } void PxMutexImpl::unlock() { #if PX_DEBUG if(getMutex(this)->owner != PxThread::getId()) { PxGetFoundation().error(PxErrorCode::eINVALID_OPERATION, __FILE__, __LINE__, "Mutex must be unlocked only by thread that has already acquired lock"); return; } #endif int err = pthread_mutex_unlock(&getMutex(this)->lock); PX_ASSERT(!err); PX_UNUSED(err); } uint32_t PxMutexImpl::getSize() { return sizeof(MutexUnixImpl); } class ReadWriteLockImpl { public: PxMutex mutex; volatile int readerCounter; }; PxReadWriteLock::PxReadWriteLock() { mImpl = reinterpret_cast<ReadWriteLockImpl*>(PX_ALLOC(sizeof(ReadWriteLockImpl), "ReadWriteLockImpl")); PX_PLACEMENT_NEW(mImpl, ReadWriteLockImpl); mImpl->readerCounter = 0; } PxReadWriteLock::~PxReadWriteLock() { mImpl->~ReadWriteLockImpl(); PX_FREE(mImpl); } void PxReadWriteLock::lockReader(bool takeLock) { if(takeLock) mImpl->mutex.lock(); PxAtomicIncrement(&mImpl->readerCounter); if(takeLock) mImpl->mutex.unlock(); } void PxReadWriteLock::lockWriter() { mImpl->mutex.lock(); // spin lock until no readers while(mImpl->readerCounter); } void PxReadWriteLock::unlockReader() { PxAtomicDecrement(&mImpl->readerCounter); } void PxReadWriteLock::unlockWriter() { mImpl->mutex.unlock(); } } // namespace physx
4,878
C++
23.395
126
0.736367
NVIDIA-Omniverse/PhysX/physx/source/foundation/unix/FdUnixAtomic.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/PxAtomic.h" #if ! PX_EMSCRIPTEN #define PAUSE() asm("nop") #else #define PAUSE() #endif namespace physx { void* PxAtomicCompareExchangePointer(volatile void** dest, void* exch, void* comp) { return __sync_val_compare_and_swap(const_cast<void**>(dest), comp, exch); } PxI32 PxAtomicCompareExchange(volatile PxI32* dest, PxI32 exch, PxI32 comp) { return __sync_val_compare_and_swap(dest, comp, exch); } PxI32 PxAtomicIncrement(volatile PxI32* val) { return __sync_add_and_fetch(val, 1); } PxI32 PxAtomicDecrement(volatile PxI32* val) { return __sync_sub_and_fetch(val, 1); } PxI32 PxAtomicAdd(volatile PxI32* val, PxI32 delta) { return __sync_add_and_fetch(val, delta); } PxI32 PxAtomicMax(volatile PxI32* val, PxI32 val2) { PxI32 oldVal, newVal; do { PAUSE(); oldVal = *val; if(val2 > oldVal) newVal = val2; else newVal = oldVal; } while(PxAtomicCompareExchange(val, newVal, oldVal) != oldVal); return *val; } PxI32 PxAtomicExchange(volatile PxI32* val, PxI32 val2) { PxI32 newVal, oldVal; do { PAUSE(); oldVal = *val; newVal = val2; } while(PxAtomicCompareExchange(val, newVal, oldVal) != oldVal); return oldVal; } } // namespace physx
2,904
C++
28.343434
82
0.737948
NVIDIA-Omniverse/PhysX/physx/source/foundation/unix/FdUnixSList.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/PxAllocator.h" #include "foundation/PxAtomic.h" #include "foundation/PxSList.h" #include "foundation/PxThread.h" #include <pthread.h> #if PX_EMSCRIPTEN #define USE_MUTEX #endif namespace physx { namespace { #if defined(USE_MUTEX) class ScopedMutexLock { pthread_mutex_t& mMutex; public: PX_INLINE ScopedMutexLock(pthread_mutex_t& mutex) : mMutex(mutex) { pthread_mutex_lock(&mMutex); } PX_INLINE ~ScopedMutexLock() { pthread_mutex_unlock(&mMutex); } }; typedef ScopedMutexLock ScopedLock; #else struct ScopedSpinLock { PX_FORCE_INLINE ScopedSpinLock(volatile int32_t& lock) : mLock(lock) { while(__sync_lock_test_and_set(&mLock, 1)) { // spinning without atomics is usually // causing less bus traffic. -> only one // CPU is modifying the cache line. while(lock) PxSpinLockPause(); } } PX_FORCE_INLINE ~ScopedSpinLock() { __sync_lock_release(&mLock); } private: volatile int32_t& mLock; }; typedef ScopedSpinLock ScopedLock; #endif struct SListDetail { PxSListEntry* head; #if defined(USE_MUTEX) pthread_mutex_t lock; #else volatile int32_t lock; #endif }; template <typename T> SListDetail* getDetail(T* impl) { return reinterpret_cast<SListDetail*>(impl); } } PxSListImpl::PxSListImpl() { getDetail(this)->head = NULL; #if defined(USE_MUTEX) pthread_mutex_init(&getDetail(this)->lock, NULL); #else getDetail(this)->lock = 0; // 0 == unlocked #endif } PxSListImpl::~PxSListImpl() { #if defined(USE_MUTEX) pthread_mutex_destroy(&getDetail(this)->lock); #endif } void PxSListImpl::push(PxSListEntry* entry) { ScopedLock lock(getDetail(this)->lock); entry->mNext = getDetail(this)->head; getDetail(this)->head = entry; } PxSListEntry* PxSListImpl::pop() { ScopedLock lock(getDetail(this)->lock); PxSListEntry* result = getDetail(this)->head; if(result != NULL) getDetail(this)->head = result->mNext; return result; } PxSListEntry* PxSListImpl::flush() { ScopedLock lock(getDetail(this)->lock); PxSListEntry* result = getDetail(this)->head; getDetail(this)->head = NULL; return result; } uint32_t PxSListImpl::getSize() { return sizeof(SListDetail); } } // namespace physx
3,873
C++
24.320261
74
0.73638
NVIDIA-Omniverse/PhysX/physx/source/foundation/unix/FdUnixSocket.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/PxIntrinsics.h" #include "foundation/PxMathIntrinsics.h" #include "foundation/PxSocket.h" #include <sys/types.h> #include <sys/socket.h> #include <sys/poll.h> #include <sys/time.h> #include <netdb.h> #include <arpa/inet.h> #include <fcntl.h> #include <errno.h> #include <unistd.h> #define INVALID_SOCKET -1 #ifndef SOMAXCONN #define SOMAXCONN 5 #endif namespace physx { const uint32_t PxSocket::DEFAULT_BUFFER_SIZE = 32768; class SocketImpl { public: SocketImpl(bool isBlocking); virtual ~SocketImpl(); bool connect(const char* host, uint16_t port, uint32_t timeout); bool listen(uint16_t port); bool accept(bool block); void disconnect(); void setBlocking(bool blocking); virtual uint32_t write(const uint8_t* data, uint32_t length); virtual bool flush(); uint32_t read(uint8_t* data, uint32_t length); PX_FORCE_INLINE bool isBlocking() const { return mIsBlocking; } PX_FORCE_INLINE bool isConnected() const { return mIsConnected; } PX_FORCE_INLINE const char* getHost() const { return mHost; } PX_FORCE_INLINE uint16_t getPort() const { return mPort; } protected: bool nonBlockingTimeout() const; int32_t mSocket; int32_t mListenSocket; const char* mHost; uint16_t mPort; bool mIsConnected; bool mIsBlocking; bool mListenMode; }; void socketSetBlockingInternal(int32_t socket, bool blocking); SocketImpl::SocketImpl(bool isBlocking) : mSocket(INVALID_SOCKET) , mListenSocket(INVALID_SOCKET) , mHost(NULL) , mPort(0) , mIsConnected(false) , mIsBlocking(isBlocking) , mListenMode(false) { } SocketImpl::~SocketImpl() { } bool SocketImpl::connect(const char* host, uint16_t port, uint32_t timeout) { sockaddr_in socketAddress; intrinsics::memSet(&socketAddress, 0, sizeof(sockaddr_in)); socketAddress.sin_family = AF_INET; socketAddress.sin_port = htons(port); // get host hostent* hp = gethostbyname(host); if(!hp) { in_addr a; a.s_addr = inet_addr(host); hp = gethostbyaddr(reinterpret_cast<const char*>(&a), sizeof(in_addr), AF_INET); if(!hp) return false; } intrinsics::memCopy(&socketAddress.sin_addr, hp->h_addr_list[0], hp->h_length); // connect mSocket = socket(AF_INET, SOCK_STREAM, 0); if(mSocket == INVALID_SOCKET) return false; socketSetBlockingInternal(mSocket, false); int connectRet = ::connect(mSocket, reinterpret_cast<sockaddr*>(&socketAddress), sizeof(socketAddress)); if(connectRet < 0) { if(errno != EINPROGRESS) { disconnect(); return false; } // Setup poll function call to monitor the connect call. // By querying for POLLOUT we're checking if the socket is // ready for writing. pollfd pfd; pfd.fd = mSocket; pfd.events = POLLOUT; const int pollResult = ::poll(&pfd, 1, timeout /*milliseconds*/); const bool pollTimeout = (pollResult == 0); const bool pollError = (pollResult < 0); // an error inside poll happened. Can check error with `errno` variable. if(pollTimeout || pollError) { disconnect(); return false; } else { PX_ASSERT(pollResult == 1); // check that event was precisely POLLOUT and not anything else (e.g., errors, hang-up) bool test = (pfd.revents & POLLOUT) && !(pfd.revents & (~POLLOUT)); if(!test) { disconnect(); return false; } } // check if we are really connected, above code seems to return // true if host is a unix machine even if the connection was // not accepted. char buffer; if(recv(mSocket, &buffer, 0, 0) < 0) { if(errno != EWOULDBLOCK) { disconnect(); return false; } } } socketSetBlockingInternal(mSocket, mIsBlocking); #if PX_APPLE_FAMILY int noSigPipe = 1; setsockopt(mSocket, SOL_SOCKET, SO_NOSIGPIPE, &noSigPipe, sizeof(int)); #endif mIsConnected = true; mPort = port; mHost = host; return true; } bool SocketImpl::listen(uint16_t port) { mListenSocket = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP); if(mListenSocket == INVALID_SOCKET) return false; // enable address reuse: "Address already in use" error message int yes = 1; if(setsockopt(mListenSocket, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(int)) == -1) return false; mListenMode = true; sockaddr_in addr; addr.sin_family = AF_INET; addr.sin_port = htons(port); addr.sin_addr.s_addr = INADDR_ANY; intrinsics::memSet(addr.sin_zero, '\0', sizeof addr.sin_zero); return bind(mListenSocket, reinterpret_cast<sockaddr*>(&addr), sizeof(addr)) != -1 && ::listen(mListenSocket, SOMAXCONN) != -1; } bool SocketImpl::accept(bool block) { if(mIsConnected || !mListenMode) return false; // set the listen socket to be non-blocking. socketSetBlockingInternal(mListenSocket, block); int32_t clientSocket = ::accept(mListenSocket, 0, 0); if(clientSocket == INVALID_SOCKET) return false; mSocket = clientSocket; mIsConnected = true; socketSetBlockingInternal(mSocket, mIsBlocking); // force the mode to whatever the user set return mIsConnected; } void SocketImpl::disconnect() { if(mListenSocket != INVALID_SOCKET) { close(mListenSocket); mListenSocket = INVALID_SOCKET; } if(mSocket != INVALID_SOCKET) { if(mIsConnected) { socketSetBlockingInternal(mSocket, true); shutdown(mSocket, SHUT_RDWR); } close(mSocket); mSocket = INVALID_SOCKET; } mIsConnected = false; mListenMode = false; mPort = 0; mHost = NULL; } bool SocketImpl::nonBlockingTimeout() const { return !mIsBlocking && errno == EWOULDBLOCK; } void socketSetBlockingInternal(int32_t socket, bool blocking) { int mode = fcntl(socket, F_GETFL, 0); if(!blocking) mode |= O_NONBLOCK; else mode &= ~O_NONBLOCK; fcntl(socket, F_SETFL, mode); } // should be cross-platform from here down void SocketImpl::setBlocking(bool blocking) { if(blocking != mIsBlocking) { mIsBlocking = blocking; if(isConnected()) socketSetBlockingInternal(mSocket, blocking); } } bool SocketImpl::flush() { return true; } uint32_t SocketImpl::write(const uint8_t* data, uint32_t length) { if(length == 0) return 0; int sent = send(mSocket, reinterpret_cast<const char*>(data), int32_t(length), 0); if(sent <= 0 && !nonBlockingTimeout()) disconnect(); return uint32_t(sent > 0 ? sent : 0); } uint32_t SocketImpl::read(uint8_t* data, uint32_t length) { if(length == 0) return 0; int32_t received = recv(mSocket, reinterpret_cast<char*>(data), int32_t(length), 0); if(received <= 0 && !nonBlockingTimeout()) disconnect(); return uint32_t(received > 0 ? received : 0); } class BufferedSocketImpl : public SocketImpl { public: BufferedSocketImpl(bool isBlocking) : SocketImpl(isBlocking), mBufferPos(0) { } virtual ~BufferedSocketImpl() { } bool flush(); uint32_t write(const uint8_t* data, uint32_t length); private: uint32_t mBufferPos; uint8_t mBuffer[PxSocket::DEFAULT_BUFFER_SIZE]; }; bool BufferedSocketImpl::flush() { uint32_t totalBytesWritten = 0; while(totalBytesWritten < mBufferPos && mIsConnected) totalBytesWritten += int32_t(SocketImpl::write(mBuffer + totalBytesWritten, mBufferPos - totalBytesWritten)); bool ret = (totalBytesWritten == mBufferPos); mBufferPos = 0; return ret; } uint32_t BufferedSocketImpl::write(const uint8_t* data, uint32_t length) { uint32_t bytesWritten = 0; while(mBufferPos + length >= PxSocket::DEFAULT_BUFFER_SIZE) { uint32_t currentChunk = PxSocket::DEFAULT_BUFFER_SIZE - mBufferPos; intrinsics::memCopy(mBuffer + mBufferPos, data + bytesWritten, currentChunk); bytesWritten += uint32_t(currentChunk); // for the user, this is consumed even if we fail to shove it down a // non-blocking socket uint32_t sent = SocketImpl::write(mBuffer, PxSocket::DEFAULT_BUFFER_SIZE); mBufferPos = PxSocket::DEFAULT_BUFFER_SIZE - sent; if(sent < PxSocket::DEFAULT_BUFFER_SIZE) // non-blocking or error { if(sent) // we can reasonably hope this is rare intrinsics::memMove(mBuffer, mBuffer + sent, mBufferPos); return bytesWritten; } length -= currentChunk; } if(length > 0) { intrinsics::memCopy(mBuffer + mBufferPos, data + bytesWritten, length); bytesWritten += length; mBufferPos += length; } return bytesWritten; } PxSocket::PxSocket(bool inIsBuffering, bool isBlocking) { if(inIsBuffering) { void* mem = PX_ALLOC(sizeof(BufferedSocketImpl), "BufferedSocketImpl"); mImpl = PX_PLACEMENT_NEW(mem, BufferedSocketImpl)(isBlocking); } else { void* mem = PX_ALLOC(sizeof(SocketImpl), "SocketImpl"); mImpl = PX_PLACEMENT_NEW(mem, SocketImpl)(isBlocking); } } PxSocket::~PxSocket() { mImpl->flush(); mImpl->disconnect(); mImpl->~SocketImpl(); PX_FREE(mImpl); } bool PxSocket::connect(const char* host, uint16_t port, uint32_t timeout) { return mImpl->connect(host, port, timeout); } bool PxSocket::listen(uint16_t port) { return mImpl->listen(port); } bool PxSocket::accept(bool block) { return mImpl->accept(block); } void PxSocket::disconnect() { mImpl->disconnect(); } bool PxSocket::isConnected() const { return mImpl->isConnected(); } const char* PxSocket::getHost() const { return mImpl->getHost(); } uint16_t PxSocket::getPort() const { return mImpl->getPort(); } bool PxSocket::flush() { if(!mImpl->isConnected()) return false; return mImpl->flush(); } uint32_t PxSocket::write(const uint8_t* data, uint32_t length) { if(!mImpl->isConnected()) return 0; return mImpl->write(data, length); } uint32_t PxSocket::read(uint8_t* data, uint32_t length) { if(!mImpl->isConnected()) return 0; return mImpl->read(data, length); } void PxSocket::setBlocking(bool blocking) { mImpl->setBlocking(blocking); } bool PxSocket::isBlocking() const { return mImpl->isBlocking(); } } // namespace physx
11,327
C++
22.6
115
0.712369
NVIDIA-Omniverse/PhysX/physx/source/foundation/windows/FdWindowsFPU.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/PxFPU.h" #include "float.h" #include "foundation/PxIntrinsics.h" #if PX_X64 || PX_ARM || PX_A64 #define _MCW_ALL _MCW_DN | _MCW_EM | _MCW_RC #else #define _MCW_ALL _MCW_DN | _MCW_EM | _MCW_IC | _MCW_RC | _MCW_PC #endif physx::PxFPUGuard::PxFPUGuard() { // default plus FTZ and DAZ #if PX_X64 || PX_ARM || PX_A64 // query current control word state _controlfp_s(mControlWords, 0, 0); // set both x87 and sse units to default + DAZ unsigned int cw; _controlfp_s(&cw, _CW_DEFAULT | _DN_FLUSH, _MCW_ALL); #else // query current control word state __control87_2(0, 0, mControlWords, mControlWords + 1); // set both x87 and sse units to default + DAZ unsigned int x87, sse; __control87_2(_CW_DEFAULT | _DN_FLUSH, _MCW_ALL, &x87, &sse); #endif } physx::PxFPUGuard::~PxFPUGuard() { _clearfp(); #if PX_X64 || PX_ARM || PX_A64 // reset FP state unsigned int cw; _controlfp_s(&cw, *mControlWords, _MCW_ALL); #else // reset FP state unsigned int x87, sse; __control87_2(mControlWords[0], _MCW_ALL, &x87, 0); __control87_2(mControlWords[1], _MCW_ALL, 0, &sse); #endif } void physx::PxEnableFPExceptions() { // clear any pending exceptions _clearfp(); // enable all fp exceptions except inexact and underflow (common, benign) _controlfp_s(NULL, uint32_t(~_MCW_EM) | _EM_INEXACT | _EM_UNDERFLOW, _MCW_EM); } void physx::PxDisableFPExceptions() { _controlfp_s(NULL, _MCW_EM, _MCW_EM); }
3,121
C++
34.078651
79
0.721243
NVIDIA-Omniverse/PhysX/physx/source/foundation/windows/FdWindowsPrintString.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/PxString.h" #include <stdio.h> #include "foundation/windows/PxWindowsInclude.h" #include <stdio.h> #include <string.h> #include <stdarg.h> void physx::PxPrintString(const char* str) { puts(str); // do not use printf here, since str can contain multiple % signs that will not be printed OutputDebugStringA(str); OutputDebugStringA("\n"); }
2,061
C++
46.953487
102
0.762737
NVIDIA-Omniverse/PhysX/physx/source/foundation/windows/FdWindowsSList.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/windows/PxWindowsInclude.h" #include "foundation/PxAllocator.h" #include "foundation/PxSList.h" using namespace physx; template <typename T> static PX_FORCE_INLINE SLIST_HEADER* getDetail(T* impl) { return reinterpret_cast<SLIST_HEADER*>(impl); } PxSListImpl::PxSListImpl() { InitializeSListHead(getDetail(this)); } PxSListImpl::~PxSListImpl() { } void PxSListImpl::push(PxSListEntry* entry) { InterlockedPushEntrySList(getDetail(this), reinterpret_cast<SLIST_ENTRY*>(entry)); } PxSListEntry* PxSListImpl::pop() { return reinterpret_cast<PxSListEntry*>(InterlockedPopEntrySList(getDetail(this))); } PxSListEntry* PxSListImpl::flush() { return reinterpret_cast<PxSListEntry*>(InterlockedFlushSList(getDetail(this))); } uint32_t PxSListImpl::getSize() { return sizeof(SLIST_HEADER); }
2,514
C++
35.449275
83
0.768099
NVIDIA-Omniverse/PhysX/physx/source/foundation/windows/FdWindowsMutex.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/windows/PxWindowsInclude.h" #include "foundation/PxUserAllocated.h" #include "foundation/PxMutex.h" #include "foundation/PxErrorCallback.h" #include "foundation/PxThread.h" namespace physx { namespace { struct MutexWinImpl { CRITICAL_SECTION mLock; PxThread::Id mOwner; }; } static PX_FORCE_INLINE MutexWinImpl* getMutex(PxMutexImpl* impl) { return reinterpret_cast<MutexWinImpl*>(impl); } PxMutexImpl::PxMutexImpl() { InitializeCriticalSection(&getMutex(this)->mLock); getMutex(this)->mOwner = 0; } PxMutexImpl::~PxMutexImpl() { DeleteCriticalSection(&getMutex(this)->mLock); } void PxMutexImpl::lock() { EnterCriticalSection(&getMutex(this)->mLock); #if PX_DEBUG getMutex(this)->mOwner = PxThread::getId(); #endif } bool PxMutexImpl::trylock() { bool success = TryEnterCriticalSection(&getMutex(this)->mLock) != 0; #if PX_DEBUG if(success) getMutex(this)->mOwner = PxThread::getId(); #endif return success; } void PxMutexImpl::unlock() { #if PX_DEBUG // ensure we are already holding the lock if(getMutex(this)->mOwner != PxThread::getId()) { PxGetFoundation().error(PxErrorCode::eINVALID_OPERATION, PX_FL, "Mutex must be unlocked only by thread that has already acquired lock"); return; } #endif LeaveCriticalSection(&getMutex(this)->mLock); } uint32_t PxMutexImpl::getSize() { return sizeof(MutexWinImpl); } class ReadWriteLockImpl { PX_NOCOPY(ReadWriteLockImpl) public: ReadWriteLockImpl() { } PxMutex mutex; volatile LONG readerCount; // handle recursive writer locking }; PxReadWriteLock::PxReadWriteLock() { mImpl = reinterpret_cast<ReadWriteLockImpl*>(PX_ALLOC(sizeof(ReadWriteLockImpl), "ReadWriteLockImpl")); PX_PLACEMENT_NEW(mImpl, ReadWriteLockImpl); mImpl->readerCount = 0; } PxReadWriteLock::~PxReadWriteLock() { mImpl->~ReadWriteLockImpl(); PX_FREE(mImpl); } void PxReadWriteLock::lockReader(bool takeLock) { if(takeLock) mImpl->mutex.lock(); InterlockedIncrement(&mImpl->readerCount); if(takeLock) mImpl->mutex.unlock(); } void PxReadWriteLock::lockWriter() { mImpl->mutex.lock(); // spin lock until no readers while(mImpl->readerCount); } void PxReadWriteLock::unlockReader() { InterlockedDecrement(&mImpl->readerCount); } void PxReadWriteLock::unlockWriter() { mImpl->mutex.unlock(); } } // namespace physx
4,013
C++
24.896774
138
0.751807
NVIDIA-Omniverse/PhysX/physx/source/foundation/windows/FdWindowsSync.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/windows/PxWindowsInclude.h" #include "foundation/PxUserAllocated.h" #include "foundation/PxSync.h" using namespace physx; static PX_FORCE_INLINE HANDLE& getSync(PxSyncImpl* impl) { return *reinterpret_cast<HANDLE*>(impl); } uint32_t PxSyncImpl::getSize() { return sizeof(HANDLE); } PxSyncImpl::PxSyncImpl() { getSync(this) = CreateEvent(0, true, false, 0); } PxSyncImpl::~PxSyncImpl() { CloseHandle(getSync(this)); } void PxSyncImpl::reset() { ResetEvent(getSync(this)); } void PxSyncImpl::set() { SetEvent(getSync(this)); } bool PxSyncImpl::wait(uint32_t milliseconds) { if(milliseconds == -1) milliseconds = INFINITE; return WaitForSingleObject(getSync(this), milliseconds) == WAIT_OBJECT_0; }
2,433
C++
32.342465
74
0.755857
NVIDIA-Omniverse/PhysX/physx/source/foundation/windows/FdWindowsAtomic.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/windows/PxWindowsInclude.h" #include "foundation/PxAtomic.h" namespace physx { PxI32 PxAtomicExchange(volatile PxI32* val, PxI32 val2) { return (PxI32)InterlockedExchange((volatile LONG*)val, (LONG)val2); } PxI32 PxAtomicCompareExchange(volatile PxI32* dest, PxI32 exch, PxI32 comp) { return (PxI32)InterlockedCompareExchange((volatile LONG*)dest, exch, comp); } void* PxAtomicCompareExchangePointer(volatile void** dest, void* exch, void* comp) { return InterlockedCompareExchangePointer((volatile PVOID*)dest, exch, comp); } PxI32 PxAtomicIncrement(volatile PxI32* val) { return (PxI32)InterlockedIncrement((volatile LONG*)val); } PxI32 PxAtomicDecrement(volatile PxI32* val) { return (PxI32)InterlockedDecrement((volatile LONG*)val); } PxI32 PxAtomicAdd(volatile PxI32* val, PxI32 delta) { LONG newValue, oldValue; do { oldValue = *val; newValue = oldValue + delta; } while(InterlockedCompareExchange((volatile LONG*)val, newValue, oldValue) != oldValue); return newValue; } PxI32 PxAtomicMax(volatile PxI32* val, PxI32 val2) { // Could do this more efficiently in asm... LONG newValue, oldValue; do { oldValue = *val; newValue = val2 > oldValue ? val2 : oldValue; } while(InterlockedCompareExchange((volatile LONG*)val, newValue, oldValue) != oldValue); return newValue; } } // namespace physx
3,054
C++
32.944444
90
0.759005
NVIDIA-Omniverse/PhysX/physx/source/foundation/windows/FdWindowsTime.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/PxTime.h" #include "foundation/windows/PxWindowsInclude.h" using namespace physx; static int64_t getTimeTicks() { LARGE_INTEGER a; QueryPerformanceCounter(&a); return a.QuadPart; } static double getTickDuration() { LARGE_INTEGER a; QueryPerformanceFrequency(&a); return 1.0f / double(a.QuadPart); } static double sTickDuration = getTickDuration(); static const PxCounterFrequencyToTensOfNanos gCounterFreq = PxTime::getCounterFrequency(); const PxCounterFrequencyToTensOfNanos& PxTime::getBootCounterFrequency() { return gCounterFreq; } PxCounterFrequencyToTensOfNanos PxTime::getCounterFrequency() { LARGE_INTEGER freq; QueryPerformanceFrequency(&freq); return PxCounterFrequencyToTensOfNanos(PxTime::sNumTensOfNanoSecondsInASecond, (uint64_t)freq.QuadPart); } uint64_t PxTime::getCurrentCounterValue() { LARGE_INTEGER ticks; QueryPerformanceCounter(&ticks); return (uint64_t)ticks.QuadPart; } PxTime::PxTime() : mTickCount(0) { getElapsedSeconds(); } PxTime::Second PxTime::getElapsedSeconds() { int64_t lastTickCount = mTickCount; mTickCount = getTimeTicks(); return (mTickCount - lastTickCount) * sTickDuration; } PxTime::Second PxTime::peekElapsedSeconds() { return (getTimeTicks() - mTickCount) * sTickDuration; } PxTime::Second PxTime::getLastTime() const { return mTickCount * sTickDuration; }
3,051
C++
31.817204
105
0.773517
NVIDIA-Omniverse/PhysX/physx/source/foundation/windows/FdWindowsSocket.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/PxMathIntrinsics.h" #include "foundation/windows/PxWindowsInclude.h" #include "foundation/PxSocket.h" #include "foundation/PxThread.h" #include "foundation/PxArray.h" #include <Winsock2.h> #pragma comment(lib, "Ws2_32") namespace physx { const uint32_t PxSocket::DEFAULT_BUFFER_SIZE = 32768; class SocketImpl { public: SocketImpl(bool isBlocking); virtual ~SocketImpl(); bool connect(const char* host, uint16_t port, uint32_t timeout); bool listen(uint16_t port); bool accept(bool block); void disconnect(); void setBlocking(bool blocking); virtual uint32_t write(const uint8_t* data, uint32_t length); virtual bool flush(); uint32_t read(uint8_t* data, uint32_t length); PX_FORCE_INLINE bool isBlocking() const { return mIsBlocking; } PX_FORCE_INLINE bool isConnected() const { return mIsConnected; } PX_FORCE_INLINE const char* getHost() const { return mHost; } PX_FORCE_INLINE uint16_t getPort() const { return mPort; } protected: bool nonBlockingTimeout() const; void setBlockingInternal(SOCKET socket, bool blocking); mutable SOCKET mSocket; SOCKET mListenSocket; const char* mHost; uint16_t mPort; mutable bool mIsConnected; bool mIsBlocking; bool mListenMode; bool mSocketLayerIntialized; }; SocketImpl::SocketImpl(bool isBlocking) : mSocket(INVALID_SOCKET) , mListenSocket(INVALID_SOCKET) , mPort(0) , mHost(NULL) , mIsConnected(false) , mIsBlocking(isBlocking) , mListenMode(false) , mSocketLayerIntialized(false) { WORD vreq; WSADATA wsaData; vreq = MAKEWORD(2, 2); mSocketLayerIntialized = (WSAStartup(vreq, &wsaData) == 0); } SocketImpl::~SocketImpl() { if(mSocketLayerIntialized) WSACleanup(); } void SocketImpl::setBlockingInternal(SOCKET socket, bool blocking) { uint32_t mode = uint32_t(blocking ? 0 : 1); ioctlsocket(socket, FIONBIO, (u_long*)&mode); } bool SocketImpl::connect(const char* host, uint16_t port, uint32_t timeout) { if(!mSocketLayerIntialized) return false; sockaddr_in socketAddress; hostent* hp; intrinsics::memSet(&socketAddress, 0, sizeof(sockaddr_in)); socketAddress.sin_family = AF_INET; socketAddress.sin_port = htons(port); // get host hp = gethostbyname(host); if(!hp) { in_addr a; a.s_addr = inet_addr(host); hp = gethostbyaddr((const char*)&a, sizeof(in_addr), AF_INET); if(!hp) return false; } intrinsics::memCopy(&socketAddress.sin_addr, hp->h_addr_list[0], (uint32_t)hp->h_length); // connect mSocket = socket(PF_INET, SOCK_STREAM, 0); if(mSocket == INVALID_SOCKET) return false; setBlockingInternal(mSocket, false); ::connect(mSocket, (sockaddr*)&socketAddress, sizeof(socketAddress)); // Setup poll function call to monitor the connect call. // By querying for POLLOUT we're checking if the socket is // ready for writing. WSAPOLLFD pfd; pfd.fd = mSocket; pfd.events = POLLOUT; const int pollResult = WSAPoll(&pfd, 1, timeout /*milliseconds*/); const bool pollTimeout = (pollResult == 0); const bool pollError = (pollResult == SOCKET_ERROR); // an error inside poll happened. Can check error with `WSAGetLastError`. if(pollTimeout || pollError) { disconnect(); return false; } else { PX_ASSERT(pollResult == 1); // check that event was precisely POLLOUT and not anything else (e.g., errors, hang-up) bool test = (pfd.revents & POLLOUT) && !(pfd.revents & (~POLLOUT)); if(!test) { disconnect(); return false; } } setBlockingInternal(mSocket, mIsBlocking); mIsConnected = true; mPort = port; mHost = host; return true; } bool SocketImpl::listen(uint16_t port) { if(!mSocketLayerIntialized) return false; mListenSocket = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP); if(mListenSocket == INVALID_SOCKET) return false; mListenMode = true; sockaddr_in addr = { 0 }; addr.sin_family = AF_INET; addr.sin_port = htons(port); addr.sin_addr.s_addr = htonl(INADDR_ANY); return bind(mListenSocket, (sockaddr*)&addr, sizeof(addr)) == 0 && ::listen(mListenSocket, SOMAXCONN) == 0; } bool SocketImpl::accept(bool block) { if(mIsConnected || !mListenMode) return false; // set the listen socket to be non-blocking. setBlockingInternal(mListenSocket, block); SOCKET clientSocket = ::accept(mListenSocket, 0, 0); if(clientSocket == INVALID_SOCKET) return false; mSocket = clientSocket; mIsConnected = true; setBlockingInternal(mSocket, mIsBlocking); // force the mode to whatever the user set return mIsConnected; } void SocketImpl::disconnect() { if(mListenSocket != INVALID_SOCKET) { closesocket(mListenSocket); mListenSocket = INVALID_SOCKET; } if(mSocket != INVALID_SOCKET) { WSASendDisconnect(mSocket, NULL); closesocket(mSocket); mSocket = INVALID_SOCKET; } mIsConnected = false; mListenMode = false; mPort = 0; mHost = NULL; } bool SocketImpl::nonBlockingTimeout() const { return !mIsBlocking && WSAGetLastError() == WSAEWOULDBLOCK; } // should be cross-platform from here down void SocketImpl::setBlocking(bool blocking) { if(blocking != mIsBlocking) { mIsBlocking = blocking; if(isConnected()) setBlockingInternal(mSocket, blocking); } } bool SocketImpl::flush() { return true; } uint32_t SocketImpl::write(const uint8_t* data, uint32_t length) { if(length == 0) return 0; int sent = send(mSocket, (const char*)data, (int32_t)length, 0); if(sent <= 0 && !nonBlockingTimeout()) disconnect(); return uint32_t(sent > 0 ? sent : 0); } uint32_t SocketImpl::read(uint8_t* data, uint32_t length) { if(length == 0) return 0; int32_t received = recv(mSocket, (char*)data, (int32_t)length, 0); if(received <= 0 && !nonBlockingTimeout()) disconnect(); return uint32_t(received > 0 ? received : 0); } class BufferedSocketImpl : public SocketImpl { public: BufferedSocketImpl(bool isBlocking) : SocketImpl(isBlocking), mBufferPos(0) { } virtual ~BufferedSocketImpl() { } bool flush(); uint32_t write(const uint8_t* data, uint32_t length); private: uint32_t mBufferPos; uint8_t mBuffer[PxSocket::DEFAULT_BUFFER_SIZE]; }; bool BufferedSocketImpl::flush() { uint32_t totalBytesWritten = 0; while(totalBytesWritten < mBufferPos && mIsConnected) totalBytesWritten += (int32_t)SocketImpl::write(mBuffer + totalBytesWritten, mBufferPos - totalBytesWritten); bool ret = (totalBytesWritten == mBufferPos); mBufferPos = 0; return ret; } uint32_t BufferedSocketImpl::write(const uint8_t* data, uint32_t length) { uint32_t bytesWritten = 0; while(mBufferPos + length >= PxSocket::DEFAULT_BUFFER_SIZE) { uint32_t currentChunk = PxSocket::DEFAULT_BUFFER_SIZE - mBufferPos; intrinsics::memCopy(mBuffer + mBufferPos, data + bytesWritten, currentChunk); bytesWritten += (uint32_t)currentChunk; // for the user, this is consumed even if we fail to shove it down a // non-blocking socket uint32_t sent = SocketImpl::write(mBuffer, PxSocket::DEFAULT_BUFFER_SIZE); mBufferPos = PxSocket::DEFAULT_BUFFER_SIZE - sent; if(sent < PxSocket::DEFAULT_BUFFER_SIZE) // non-blocking or error { if(sent) // we can reasonably hope this is rare intrinsics::memMove(mBuffer, mBuffer + sent, mBufferPos); return bytesWritten; } length -= currentChunk; } if(length > 0) { intrinsics::memCopy(mBuffer + mBufferPos, data + bytesWritten, length); bytesWritten += length; mBufferPos += length; } return bytesWritten; } PxSocket::PxSocket(bool inIsBuffering, bool isBlocking) { if(inIsBuffering) { void* mem = PX_ALLOC(sizeof(BufferedSocketImpl), "BufferedSocketImpl"); mImpl = PX_PLACEMENT_NEW(mem, BufferedSocketImpl)(isBlocking); } else { void* mem = PX_ALLOC(sizeof(SocketImpl), "SocketImpl"); mImpl = PX_PLACEMENT_NEW(mem, SocketImpl)(isBlocking); } } PxSocket::~PxSocket() { mImpl->flush(); mImpl->disconnect(); mImpl->~SocketImpl(); PX_FREE(mImpl); } bool PxSocket::connect(const char* host, uint16_t port, uint32_t timeout) { return mImpl->connect(host, port, timeout); } bool PxSocket::listen(uint16_t port) { return mImpl->listen(port); } bool PxSocket::accept(bool block) { return mImpl->accept(block); } void PxSocket::disconnect() { mImpl->disconnect(); } bool PxSocket::isConnected() const { return mImpl->isConnected(); } const char* PxSocket::getHost() const { return mImpl->getHost(); } uint16_t PxSocket::getPort() const { return mImpl->getPort(); } bool PxSocket::flush() { if(!mImpl->isConnected()) return false; return mImpl->flush(); } uint32_t PxSocket::write(const uint8_t* data, uint32_t length) { if(!mImpl->isConnected()) return 0; return mImpl->write(data, length); } uint32_t PxSocket::read(uint8_t* data, uint32_t length) { if(!mImpl->isConnected()) return 0; return mImpl->read(data, length); } void PxSocket::setBlocking(bool blocking) { mImpl->setBlocking(blocking); } bool PxSocket::isBlocking() const { return mImpl->isBlocking(); } } // namespace physx
10,560
C++
23.056948
127
0.720833
NVIDIA-Omniverse/PhysX/physx/source/foundation/windows/FdWindowsThread.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/windows/PxWindowsInclude.h" #include "foundation/PxErrorCallback.h" #include "foundation/PxAssert.h" #include "foundation/PxThread.h" #include "foundation/PxAlloca.h" // an exception for setting the thread name in Microsoft debuggers #define NS_MS_VC_EXCEPTION 0x406D1388 namespace physx { namespace { #if PX_VC #pragma warning(disable : 4061) // enumerator 'identifier' in switch of enum 'enumeration' is not handled #pragma warning(disable : 4191) //'operator/operation' : unsafe conversion from 'type of expression' to 'type required' #endif // struct for naming a thread in the debugger #pragma pack(push, 8) typedef struct tagTHREADNAME_INFO { DWORD dwType; // Must be 0x1000. LPCSTR szName; // Pointer to name (in user addr space). DWORD dwThreadID; // Thread ID (-1=caller thread). DWORD dwFlags; // Reserved for future use, must be zero. } THREADNAME_INFO; #pragma pack(pop) class ThreadImpl { public: enum State { NotStarted, Started, Stopped }; HANDLE thread; LONG quitNow; // Should be 32bit aligned on SMP systems. State state; DWORD threadID; PxThreadImpl::ExecuteFn fn; void* arg; uint32_t affinityMask; const char* name; }; static PX_FORCE_INLINE ThreadImpl* getThread(PxThreadImpl* impl) { return reinterpret_cast<ThreadImpl*>(impl); } static DWORD WINAPI PxThreadStart(LPVOID arg) { ThreadImpl* impl = getThread((PxThreadImpl*)arg); // run either the passed in function or execute from the derived class (Runnable). if(impl->fn) (*impl->fn)(impl->arg); else if(impl->arg) ((PxRunnable*)impl->arg)->execute(); return 0; } // cache physical thread count static uint32_t gPhysicalCoreCount = 0; } uint32_t PxThreadImpl::getSize() { return sizeof(ThreadImpl); } PxThreadImpl::Id PxThreadImpl::getId() { return static_cast<Id>(GetCurrentThreadId()); } // fwd GetLogicalProcessorInformation() typedef BOOL(WINAPI* LPFN_GLPI)(PSYSTEM_LOGICAL_PROCESSOR_INFORMATION, PDWORD); uint32_t PxThreadImpl::getNbPhysicalCores() { if(!gPhysicalCoreCount) { // modified example code from: http://msdn.microsoft.com/en-us/library/ms683194 LPFN_GLPI glpi; PSYSTEM_LOGICAL_PROCESSOR_INFORMATION buffer = NULL; PSYSTEM_LOGICAL_PROCESSOR_INFORMATION ptr = NULL; DWORD returnLength = 0; DWORD processorCoreCount = 0; DWORD byteOffset = 0; glpi = (LPFN_GLPI)GetProcAddress(GetModuleHandle(TEXT("kernel32")), "GetLogicalProcessorInformation"); if(NULL == glpi) { // GetLogicalProcessorInformation not supported on OS < XP Service Pack 3 return 0; } DWORD rc = (DWORD)glpi(NULL, &returnLength); PX_ASSERT(rc == FALSE); PX_UNUSED(rc); // first query reports required buffer space if(GetLastError() == ERROR_INSUFFICIENT_BUFFER) { buffer = (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION)PxAlloca(returnLength); } else { PxGetFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL, "Error querying buffer size for number of physical processors"); return 0; } // retrieve data rc = (DWORD)glpi(buffer, &returnLength); if(rc != TRUE) { PxGetFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL, "Error querying number of physical processors"); return 0; } ptr = buffer; while(byteOffset + sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION) <= returnLength) { switch(ptr->Relationship) { case RelationProcessorCore: processorCoreCount++; break; default: break; } byteOffset += sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION); ptr++; } gPhysicalCoreCount = processorCoreCount; } return gPhysicalCoreCount; } PxThreadImpl::PxThreadImpl() { getThread(this)->thread = NULL; getThread(this)->state = ThreadImpl::NotStarted; getThread(this)->quitNow = 0; getThread(this)->fn = NULL; getThread(this)->arg = NULL; getThread(this)->affinityMask = 0; getThread(this)->name = NULL; } PxThreadImpl::PxThreadImpl(ExecuteFn fn, void* arg, const char* name) { getThread(this)->thread = NULL; getThread(this)->state = ThreadImpl::NotStarted; getThread(this)->quitNow = 0; getThread(this)->fn = fn; getThread(this)->arg = arg; getThread(this)->affinityMask = 0; getThread(this)->name = name; start(0, NULL); } PxThreadImpl::~PxThreadImpl() { if(getThread(this)->state == ThreadImpl::Started) kill(); CloseHandle(getThread(this)->thread); } void PxThreadImpl::start(uint32_t stackSize, PxRunnable* runnable) { if(getThread(this)->state != ThreadImpl::NotStarted) return; getThread(this)->state = ThreadImpl::Started; if(runnable && !getThread(this)->arg && !getThread(this)->fn) getThread(this)->arg = runnable; getThread(this)->thread = CreateThread(NULL, stackSize, PxThreadStart, (LPVOID) this, CREATE_SUSPENDED, &getThread(this)->threadID); if(!getThread(this)->thread) { PxGetFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL, "FdWindowsThread::start: Failed to create thread."); getThread(this)->state = ThreadImpl::NotStarted; return; } // set affinity, set name and resume if(getThread(this)->affinityMask) setAffinityMask(getThread(this)->affinityMask); if (getThread(this)->name) setName(getThread(this)->name); DWORD rc = ResumeThread(getThread(this)->thread); if(rc == DWORD(-1)) { PxGetFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL, "FdWindowsThread::start: Failed to resume thread."); getThread(this)->state = ThreadImpl::NotStarted; return; } } void PxThreadImpl::signalQuit() { InterlockedIncrement(&(getThread(this)->quitNow)); } bool PxThreadImpl::waitForQuit() { if(getThread(this)->state == ThreadImpl::NotStarted) return false; WaitForSingleObject(getThread(this)->thread, INFINITE); getThread(this)->state = ThreadImpl::Stopped; return true; } bool PxThreadImpl::quitIsSignalled() { return InterlockedCompareExchange(&(getThread(this)->quitNow), 0, 0) != 0; } void PxThreadImpl::quit() { getThread(this)->state = ThreadImpl::Stopped; ExitThread(0); } void PxThreadImpl::kill() { if(getThread(this)->state == ThreadImpl::Started) TerminateThread(getThread(this)->thread, 0); getThread(this)->state = ThreadImpl::Stopped; } void PxThreadImpl::sleep(uint32_t ms) { Sleep(ms); } void PxThreadImpl::yield() { SwitchToThread(); } void PxThreadImpl::yieldProcessor() { YieldProcessor(); } uint32_t PxThreadImpl::setAffinityMask(uint32_t mask) { if(mask) { // store affinity getThread(this)->affinityMask = mask; // if thread already started apply immediately if(getThread(this)->state == ThreadImpl::Started) { uint32_t err = uint32_t(SetThreadAffinityMask(getThread(this)->thread, mask)); return err; } } return 0; } void PxThreadImpl::setName(const char* name) { getThread(this)->name = name; if (getThread(this)->state == ThreadImpl::Started) { THREADNAME_INFO info; info.dwType = 0x1000; info.szName = name; info.dwThreadID = getThread(this)->threadID; info.dwFlags = 0; // C++ Exceptions are disabled for this project, but SEH is not (and cannot be) // http://stackoverflow.com/questions/943087/what-exactly-will-happen-if-i-disable-c-exceptions-in-a-project __try { RaiseException(NS_MS_VC_EXCEPTION, 0, sizeof(info) / sizeof(ULONG_PTR), (ULONG_PTR*)&info); } __except (EXCEPTION_EXECUTE_HANDLER) { // this runs if not attached to a debugger (thus not really naming the thread) } } } void PxThreadImpl::setPriority(PxThreadPriority::Enum prio) { BOOL rc = false; switch(prio) { case PxThreadPriority::eHIGH: rc = SetThreadPriority(getThread(this)->thread, THREAD_PRIORITY_HIGHEST); break; case PxThreadPriority::eABOVE_NORMAL: rc = SetThreadPriority(getThread(this)->thread, THREAD_PRIORITY_ABOVE_NORMAL); break; case PxThreadPriority::eNORMAL: rc = SetThreadPriority(getThread(this)->thread, THREAD_PRIORITY_NORMAL); break; case PxThreadPriority::eBELOW_NORMAL: rc = SetThreadPriority(getThread(this)->thread, THREAD_PRIORITY_BELOW_NORMAL); break; case PxThreadPriority::eLOW: rc = SetThreadPriority(getThread(this)->thread, THREAD_PRIORITY_LOWEST); break; default: break; } if(!rc) { PxGetFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL, "FdWindowsThread::setPriority: Failed to set thread priority."); } } PxThreadPriority::Enum PxThreadImpl::getPriority(Id threadId) { PxThreadPriority::Enum retval = PxThreadPriority::eLOW; int priority = GetThreadPriority((HANDLE)threadId); PX_COMPILE_TIME_ASSERT(THREAD_PRIORITY_HIGHEST > THREAD_PRIORITY_ABOVE_NORMAL); if(priority >= THREAD_PRIORITY_HIGHEST) retval = PxThreadPriority::eHIGH; else if(priority >= THREAD_PRIORITY_ABOVE_NORMAL) retval = PxThreadPriority::eABOVE_NORMAL; else if(priority >= THREAD_PRIORITY_NORMAL) retval = PxThreadPriority::eNORMAL; else if(priority >= THREAD_PRIORITY_BELOW_NORMAL) retval = PxThreadPriority::eBELOW_NORMAL; return retval; } PxU32 PxTlsAlloc() { DWORD rv = ::TlsAlloc(); PX_ASSERT(rv != TLS_OUT_OF_INDEXES); return (PxU32)rv; } void PxTlsFree(PxU32 index) { ::TlsFree(index); } void* PxTlsGet(PxU32 index) { return ::TlsGetValue(index); } size_t PxTlsGetValue(PxU32 index) { return size_t(::TlsGetValue(index)); } PxU32 PxTlsSet(PxU32 index, void* value) { return PxU32(::TlsSetValue(index, value)); } PxU32 PxTlsSetValue(PxU32 index, size_t value) { return PxU32(::TlsSetValue(index, reinterpret_cast<void*>(value))); } PxU32 PxThreadImpl::getDefaultStackSize() { return 1048576; }; } // namespace physx
11,129
C++
25.374408
128
0.729176
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/ExtGjkQueryExt.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "extensions/PxGjkQueryExt.h" #include "geometry/PxSphereGeometry.h" #include "geometry/PxBoxGeometry.h" #include "geometry/PxPlaneGeometry.h" #include "geometry/PxCapsuleGeometry.h" #include "geometry/PxConvexMeshGeometry.h" #include "foundation/PxAllocator.h" #include "geomutils/PxContactBuffer.h" using namespace physx; /////////////////////////////////////////////////////////////////////////////// PxGjkQueryExt::SphereSupport::SphereSupport() : radius(0.0f) { } PxGjkQueryExt::SphereSupport::SphereSupport(PxReal _radius) : radius(_radius) { } PxGjkQueryExt::SphereSupport::SphereSupport(const PxSphereGeometry& geom) : radius(geom.radius) { } PxReal PxGjkQueryExt::SphereSupport::getMargin() const { return radius; } PxVec3 PxGjkQueryExt::SphereSupport::supportLocal(const PxVec3& /*dir*/) const { return PxVec3(0.0f); } /////////////////////////////////////////////////////////////////////////////// PxGjkQueryExt::CapsuleSupport::CapsuleSupport() : radius(0.0f), halfHeight(0.0f) { } PxGjkQueryExt::CapsuleSupport::CapsuleSupport(PxReal _radius, PxReal _halfHeight) : radius(_radius), halfHeight(_halfHeight) { } PxGjkQueryExt::CapsuleSupport::CapsuleSupport(const PxCapsuleGeometry& geom) : radius(geom.radius) { } PxReal PxGjkQueryExt::CapsuleSupport::getMargin() const { return radius; } PxVec3 PxGjkQueryExt::CapsuleSupport::supportLocal(const PxVec3& dir) const { return PxVec3(PxSign2(dir.x) * halfHeight, 0.0f, 0.0f); } /////////////////////////////////////////////////////////////////////////////// PxGjkQueryExt::BoxSupport::BoxSupport() : halfExtents(0.0f), margin(0.0f) { } PxGjkQueryExt::BoxSupport::BoxSupport(const PxVec3& _halfExtents, PxReal _margin) : halfExtents(_halfExtents), margin(_margin) { } PxGjkQueryExt::BoxSupport::BoxSupport(const PxBoxGeometry& box, PxReal _margin) : halfExtents(box.halfExtents), margin(_margin) { } PxReal PxGjkQueryExt::BoxSupport::getMargin() const { return margin; } PxVec3 PxGjkQueryExt::BoxSupport::supportLocal(const PxVec3& dir) const { const PxVec3 d = dir.getNormalized(); return PxVec3(PxSign2(d.x) * halfExtents.x, PxSign2(d.y) * halfExtents.y, PxSign2(d.z) * halfExtents.z); } /////////////////////////////////////////////////////////////////////////////// PxGjkQueryExt::ConvexMeshSupport::ConvexMeshSupport() : convexMesh (NULL), scale (0.0f), scaleRotation (0.0f), margin (0.0f) { } PxGjkQueryExt::ConvexMeshSupport::ConvexMeshSupport(const PxConvexMesh& _convexMesh, const PxVec3& _scale, const PxQuat& _scaleRotation, PxReal _margin) : convexMesh (&_convexMesh), scale (_scale), scaleRotation (_scaleRotation), margin (_margin) { } PxGjkQueryExt::ConvexMeshSupport::ConvexMeshSupport(const PxConvexMeshGeometry& _convexMesh, PxReal _margin) : convexMesh (_convexMesh.convexMesh), scale (_convexMesh.scale.scale), scaleRotation (_convexMesh.scale.rotation), margin (_margin) { } PxReal PxGjkQueryExt::ConvexMeshSupport::getMargin() const { return margin * scale.minElement(); } PxVec3 PxGjkQueryExt::ConvexMeshSupport::supportLocal(const PxVec3& dir) const { if (convexMesh == NULL) return PxVec3(0.0f); PxVec3 d = scaleRotation.rotateInv(scaleRotation.rotate(dir).multiply(PxVec3(1.0f / scale.x, 1.0f / scale.y, 1.0f / scale.z))); const PxVec3* verts = convexMesh->getVertices(); int count = int(convexMesh->getNbVertices()); float maxDot = -FLT_MAX; int index = -1; for (int i = 0; i < count; ++i) { float dot = verts[i].dot(d); if (dot > maxDot) { maxDot = dot; index = i; } } if (index == -1) return PxVec3(0); return scaleRotation.rotateInv(scaleRotation.rotate(verts[index]).multiply(scale)); } /////////////////////////////////////////////////////////////////////////////// PxGjkQueryExt::ConvexGeomSupport::ConvexGeomSupport() : mType(PxGeometryType::eINVALID) { } PxGjkQueryExt::ConvexGeomSupport::ConvexGeomSupport(const PxGeometry& geom, PxReal margin) { mType = PxGeometryType::eINVALID; switch (geom.getType()) { case PxGeometryType::eSPHERE: { mType = PxGeometryType::eSPHERE; const PxSphereGeometry& sphere = static_cast<const PxSphereGeometry&>(geom); PX_PLACEMENT_NEW(&mSupport, SphereSupport(sphere.radius + margin)); break; } case PxGeometryType::eCAPSULE: { mType = PxGeometryType::eCAPSULE; const PxCapsuleGeometry& capsule = static_cast<const PxCapsuleGeometry&>(geom); PX_PLACEMENT_NEW(&mSupport, CapsuleSupport(capsule.radius + margin, capsule.halfHeight)); break; } case PxGeometryType::eBOX: { mType = PxGeometryType::eBOX; PX_PLACEMENT_NEW(&mSupport, BoxSupport(static_cast<const PxBoxGeometry&>(geom), margin)); break; } case PxGeometryType::eCONVEXMESH: { mType = PxGeometryType::eCONVEXMESH; PX_PLACEMENT_NEW(&mSupport, ConvexMeshSupport(static_cast<const PxConvexMeshGeometry&>(geom), margin)); break; } default: break; } } PxGjkQueryExt::ConvexGeomSupport::~ConvexGeomSupport() { if (isValid()) reinterpret_cast<Support&>(mSupport).~Support(); } bool PxGjkQueryExt::ConvexGeomSupport::isValid() const { return mType != PxGeometryType::eINVALID; } PxReal PxGjkQueryExt::ConvexGeomSupport::getMargin() const { return isValid() ? reinterpret_cast<const Support&>(mSupport).getMargin() : 0.0f; } PxVec3 PxGjkQueryExt::ConvexGeomSupport::supportLocal(const PxVec3& dir) const { return isValid() ? reinterpret_cast<const Support&>(mSupport).supportLocal(dir) : PxVec3(0.0f); } /////////////////////////////////////////////////////////////////////////////// bool PxGjkQueryExt::generateContacts(const PxGjkQuery::Support& a, const PxGjkQuery::Support& b, const PxTransform& poseA, const PxTransform& poseB, PxReal contactDistance, PxReal toleranceLength, PxContactBuffer& contactBuffer) { PxVec3 pointA, pointB, separatingAxis; PxReal separation; if (!PxGjkQuery::proximityInfo(a, b, poseA, poseB, contactDistance, toleranceLength, pointA, pointB, separatingAxis, separation)) return false; PxContactPoint contact; contact.point = (pointA + pointB) * 0.5f; // VR: should I make it just pointB? contact.normal = separatingAxis; contact.separation = separation; contactBuffer.contact(contact); return true; }
7,879
C++
30.646586
228
0.706435
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/ExtBroadPhase.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/PxBounds3.h" #include "foundation/PxErrorCallback.h" #include "foundation/PxFoundation.h" #include "extensions/PxBroadPhaseExt.h" using namespace physx; PxU32 PxBroadPhaseExt::createRegionsFromWorldBounds(PxBounds3* regions, const PxBounds3& globalBounds, PxU32 nbSubdiv, PxU32 upAxis) { PX_CHECK_MSG(globalBounds.isValid(), "PxBroadPhaseExt::createRegionsFromWorldBounds(): invalid bounds provided!"); PX_CHECK_MSG(upAxis<3, "PxBroadPhaseExt::createRegionsFromWorldBounds(): invalid up-axis provided!"); const PxVec3& min = globalBounds.minimum; const PxVec3& max = globalBounds.maximum; const float dx = (max.x - min.x) / float(nbSubdiv); const float dy = (max.y - min.y) / float(nbSubdiv); const float dz = (max.z - min.z) / float(nbSubdiv); PxU32 nbRegions = 0; PxVec3 currentMin, currentMax; for(PxU32 j=0;j<nbSubdiv;j++) { for(PxU32 i=0;i<nbSubdiv;i++) { if(upAxis==0) { currentMin = PxVec3(min.x, min.y + dy * float(i), min.z + dz * float(j)); currentMax = PxVec3(max.x, min.y + dy * float(i+1), min.z + dz * float(j+1)); } else if(upAxis==1) { currentMin = PxVec3(min.x + dx * float(i), min.y, min.z + dz * float(j)); currentMax = PxVec3(min.x + dx * float(i+1), max.y, min.z + dz * float(j+1)); } else if(upAxis==2) { currentMin = PxVec3(min.x + dx * float(i), min.y + dy * float(j), min.z); currentMax = PxVec3(min.x + dx * float(i+1), min.y + dy * float(j+1), max.z); } regions[nbRegions++] = PxBounds3(currentMin, currentMax); } } return nbRegions; }
3,266
C++
43.148648
132
0.717085
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/ExtSampling.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #include "extensions/PxSamplingExt.h" #include "GuSDF.h" #include "foundation/PxQuat.h" #include "foundation/PxHashSet.h" #include "GuDistancePointTriangle.h" #include "extensions/PxRemeshingExt.h" #include "geometry/PxGeometryQuery.h" #include "PxQueryReport.h" #include "foundation/PxHashMap.h" #include "CmRandom.h" #include "GuAABBTreeNode.h" #include "GuAABBTree.h" #include "GuAABBTreeBounds.h" #include "GuWindingNumber.h" #include "foundation/PxMathUtils.h" #include "foundation/PxSort.h" namespace physx { namespace { using namespace Gu; using namespace Cm; static const PxI32 neighborEdges[3][2] = { { 0, 1 }, { 2, 0 }, { 1, 2 } }; struct Int3 { PxI32 x; PxI32 y; PxI32 z; Int3(PxI32 x_, PxI32 y_, PxI32 z_) : x(x_), y(y_), z(z_) {} Int3() : x(0), y(0), z(0) {} }; struct ActiveSample { PxI32 mIndex; PxArray<PxI32> mNearbyTriangles; PxArray<PxReal> mCumulativeTriangleAreas; ActiveSample() : mIndex(-1) {} ActiveSample(PxI32 index, const PxArray<PxI32>& nearbyTriangles, const PxArray<PxReal>& cumulativeTriangleAreas) { mIndex = index; mNearbyTriangles = nearbyTriangles; mCumulativeTriangleAreas = cumulativeTriangleAreas; } }; struct PointWithNormal { PxVec3 mPoint; PxVec3 mNormal; PointWithNormal() {} PointWithNormal(const PxVec3& point, const PxVec3& normal) : mPoint(point), mNormal(normal) { } }; struct IndexWithNormal { PxI32 mIndex; PxVec3 mNormal; IndexWithNormal(PxI32 index, const PxVec3& normal) : mIndex(index), mNormal(normal) { } }; void getBoundsFromPoints(const PxVec3* points, const PxU32 numPoints, PxVec3& outMinExtents, PxVec3& outMaxExtents) { PxVec3 minExtents(FLT_MAX); PxVec3 maxExtents(-FLT_MAX); // calculate face bounds for (PxU32 i = 0; i < numPoints; ++i) { const PxVec3& a = points[i]; minExtents = a.minimum(minExtents); maxExtents = a.maximum(maxExtents); } outMinExtents = minExtents; outMaxExtents = maxExtents; } PX_FORCE_INLINE PxReal triArea(const PxVec3& a, const PxVec3& b, const PxVec3& c) { return 0.5f * (b - a).cross(c - a).magnitude(); } PX_FORCE_INLINE PxReal triArea(const PxU32* tri, const PxVec3* points) { return triArea(points[tri[0]], points[tri[1]], points[tri[2]]); } PX_FORCE_INLINE PxU64 edgeKey(PxI32 a, PxI32 b) { if (a < b) return ((PxU64(a)) << 32) | (PxU64(b)); else return ((PxU64(b)) << 32) | (PxU64(a)); } void buildTriangleAdjacency(const PxU32* tris, PxU32 numTriangles, PxArray<PxI32>& result) { PxU32 l = 4 * numTriangles; //4 elements per triangle - waste one entry per triangle to get a power of 2 which allows for bit shift usage instead of modulo result.clear(); result.resize(l, -1); for (PxU32 i = 3; i < l; i += 4) result[i] = -2; PxHashMap<PxU64, PxU32> edges; for (PxU32 i = 0; i < numTriangles; ++i) { const PxU32* tri = &tris[3 * i]; for (PxU32 j = 0; j < 3; ++j) { const PxU64 edge = edgeKey(tri[neighborEdges[j][0]], tri[neighborEdges[j][1]]); const PxPair<const PxU64, PxU32>* it = edges.find(edge); if (it) { result[4u * i + j] = it->second; result[it->second] = 4u * i + j; } else { PxU32 v = 4u * i + j; edges.insert(edge, v); } } } } void collectTrianglesInSphere(const PxVec3& center, PxReal radius, PxI32 startTri, const PxU32* triangles, const PxVec3* points, const PxArray<PxI32>& adj, PxHashSet<PxI32>& result) { PxArray<PxI32> stack; stack.pushBack(startTri); result.clear(); result.insert(startTri); while (stack.size() > 0) { PxI32 tri = stack.popBack() * 4; for (PxI32 i = 0; i < 3; ++i) { PxI32 n = adj[tri + i] >> 2; if (n >= 0 && !result.contains(n)) { const PxU32* t = &triangles[3 * n]; if (Gu::distancePointTriangleSquared(center, points[t[0]], points[t[1]] - points[t[0]], points[t[2]] - points[t[0]]) < radius * radius) { result.insert(n); stack.pushBack(n); } } } } } void createActiveSample(const PxArray<PxReal>& triangleAreaBuffer, PxI32 sampleIndex, const PxVec3& sample, PxReal radius, PxI32 startTri, const PxU32* triangles, const PxVec3* points, const PxArray<PxI32>& adj, ActiveSample& result) { PxHashSet<PxI32> nearbyTriangles; collectTrianglesInSphere(sample, radius, startTri, triangles, points, adj, nearbyTriangles); result.mNearbyTriangles.clear(); result.mNearbyTriangles.reserve(nearbyTriangles.size()); //for (PxI32 t : nearbyTriangles) for (PxHashSet<PxI32>::Iterator iter = nearbyTriangles.getIterator(); !iter.done(); ++iter) result.mNearbyTriangles.pushBack(*iter); result.mCumulativeTriangleAreas.clear(); result.mCumulativeTriangleAreas.resize(nearbyTriangles.size()); result.mCumulativeTriangleAreas[0] = triangleAreaBuffer[result.mNearbyTriangles[0]]; for (PxU32 i = 1; i < nearbyTriangles.size(); ++i) result.mCumulativeTriangleAreas[i] = result.mCumulativeTriangleAreas[i - 1] + triangleAreaBuffer[result.mNearbyTriangles[i]]; result.mIndex = sampleIndex; } //Returns the index of the element with value <= v PxU32 binarySearch(const PxArray<PxReal>& sorted, PxReal v) { PxU32 low = 0; PxU32 up = PxU32(sorted.size()); while (up - low > 1) { PxU32 middle = (up + low) >> 1; PxReal m = sorted[middle]; if (v <= m) up = middle; else low = middle; } return low; } PxVec3 randomPointOnTriangle(BasicRandom& rnd, const PxU32* tri, const PxVec3* points, PxVec3* barycentricCoordinates = NULL) { while (true) { PxReal a = rnd.rand(0.0f, 1.0f); PxReal b = rnd.rand(0.0f, 1.0f); PxReal sum = a + b; if (sum > 1) continue; PxReal c = 1 - a - b; if (barycentricCoordinates) (*barycentricCoordinates) = PxVec3(a, b, c); return points[tri[0]] * a + points[tri[1]] * b + points[tri[2]] * c; } } bool samplePointInBallOnSurface(BasicRandom& rnd, const PxArray<PxReal>& cumulativeAreas, const PxVec3* points, const PxU32* triangles, const PxArray<PxI32>& nearbyTriangles, const PxVec3& point, PxReal radius, PxVec3& sample, PxI32& triId, PxI32 numAttempts = 30, PxVec3* barycentricCoordinates = NULL) { triId = -1; //Use variable upper bound as described in http://extremelearning.com.au/an-improved-version-of-bridsons-algorithm-n-for-poisson-disc-sampling/ PxReal step = radius / numAttempts; PxReal rUpper = radius + step; PxVec3 fallback; PxReal fallbackDist = FLT_MAX; PxI32 fallbackId = -1; PxVec3 fallbackBary; for (PxI32 i = 0; i < numAttempts; ++i) { PxReal totalArea = cumulativeAreas[cumulativeAreas.size() - 1]; PxReal r = rnd.rand(0.0f, 1.0f) * totalArea; PxI32 id; id = binarySearch(cumulativeAreas, r); triId = nearbyTriangles[id]; sample = randomPointOnTriangle(rnd, &triangles[3 * triId], points, barycentricCoordinates); const PxReal dist2 = (sample - point).magnitudeSquared(); if (dist2 > radius * radius && dist2 < rUpper * rUpper) return true; if (dist2 > radius * radius && dist2 < 4 * radius * radius && dist2 < fallbackDist) { fallbackDist = dist2; fallbackId = triId; fallback = sample; if (barycentricCoordinates) fallbackBary = *barycentricCoordinates; } rUpper += step; } if (fallbackId >= 0) { sample = fallback; triId = fallbackId; if (barycentricCoordinates) *barycentricCoordinates = fallbackBary; return true; } return false; } } class PoissonSamplerShared { private: struct SparseGridNode { PxI32 mPointIndex; PxI32 mExcessStartIndex; PxI32 mExcessEndIndex; SparseGridNode(PxI32 pointIndex_) : mPointIndex(pointIndex_), mExcessStartIndex(0), mExcessEndIndex(-1) { } SparseGridNode() : mPointIndex(-1), mExcessStartIndex(0), mExcessEndIndex(-1) { } }; //Returns true if successful. False if too many cells are required (overflow) bool rebuildSparseGrid(); public: PoissonSamplerShared() : maxNumSamples(0), currentSamplingRadius(0.0f) {} //Returns true if successful. False if too many cells are required (overflow) bool setSamplingRadius(PxReal r); void addSamples(const PxArray<PxVec3>& samples); PxU32 removeSamples(const PxArray<PxVec3>& samples); PxI32 findSample(const PxVec3& p); PxReal minDistanceToOtherSamplesSquared(const PxVec3& p) const; const PxArray<PxVec3>& getSamples() const { return result; } bool addPointToSparseGrid(const PxVec3& p, PxI32 pointIndex); protected: bool postAddPointToSparseGrid(const PxVec3& p, PxI32 pointIndex); bool preAddPointToSparseGrid(const PxVec3& p, PxI32 pointIndex); public: //Input PxI32 numSampleAttemptsAroundPoint; PxVec3 size; PxVec3 min; PxU32 maxNumSamples; //Intermediate data PxReal currentSamplingRadius; Int3 resolution; PxReal cellSize; PxArray<PxU32> occupiedCellBits; PxHashMap<PxI32, SparseGridNode> sparseGrid3D; PxArray<PxI32> excessList; Cm::BasicRandom rnd; bool gridResolutionValid = false; //Output PxArray<PxVec3> result; PX_NOCOPY(PoissonSamplerShared) }; struct PointInVolumeTester { virtual bool pointInVolume(const PxVec3& p) const = 0; virtual ~PointInVolumeTester() {} }; PX_FORCE_INLINE bool pointInSphere(const PxVec3& p, const PxVec3& sphereCenter, PxReal sphereRadius) { return (p - sphereCenter).magnitudeSquared() < sphereRadius * sphereRadius; } struct AlwaysInsideTester : public PointInVolumeTester { AlwaysInsideTester() {} virtual bool pointInVolume(const PxVec3&) const { return true; } virtual ~AlwaysInsideTester() {} }; struct PointInSphereTester : public PointInVolumeTester { PxVec3 mCenter; PxReal mRadius; PointInSphereTester(const PxVec3& center, const PxReal radius) : mCenter(center), mRadius(radius) {} virtual bool pointInVolume(const PxVec3& p) const { return pointInSphere(p, mCenter, mRadius); } virtual ~PointInSphereTester() {} }; struct PointInOBBTester : public PointInVolumeTester { PxVec3 mBoxCenter; PxVec3 mBoxAxisAlignedExtents; PxQuat mBoxOrientation; PointInOBBTester(const PxVec3& boxCenter, const PxVec3& boxAxisAlignedExtents, const PxQuat boxOrientation) : mBoxCenter(boxCenter), mBoxAxisAlignedExtents(boxAxisAlignedExtents), mBoxOrientation(boxOrientation) {} virtual bool pointInVolume(const PxVec3& p) const { PxVec3 localPoint = mBoxOrientation.rotateInv(p - mBoxCenter); return localPoint.x >= -mBoxAxisAlignedExtents.x && localPoint.x <= mBoxAxisAlignedExtents.x && localPoint.y >= -mBoxAxisAlignedExtents.y && localPoint.y <= mBoxAxisAlignedExtents.y && localPoint.z >= -mBoxAxisAlignedExtents.z && localPoint.z <= mBoxAxisAlignedExtents.z; } virtual ~PointInOBBTester() {} }; class TriangleMeshPoissonSampler : public PxTriangleMeshPoissonSampler { public: TriangleMeshPoissonSampler(const PxU32* tris, PxU32 numTris, const PxVec3* pts_, PxU32 numPts, PxReal r, PxI32 numSampleAttemptsAroundPoint_ = 30, PxU32 maxNumSamples_ = 0); virtual void addSamplesInVolume(const PointInVolumeTester& pointInVolume, const PxVec3& sphereCenter, PxReal sphereRadius, bool createVolumeSamples); virtual void addSamplesInSphere(const PxVec3& sphereCenter, PxReal sphereRadius, bool createVolumeSamples); virtual void addSamplesInBox(const PxBounds3& axisAlignedBox, const PxQuat& boxOrientation, bool createVolumeSamples); virtual const PxArray<PxI32>& getSampleTriangleIds() const { return triangleIds; } virtual const PxArray<PxVec3>& getSampleBarycentrics() const { return barycentricCoordinates; } virtual bool setSamplingRadius(PxReal samplingRadius) { return poissonSamplerShared.setSamplingRadius(samplingRadius); } virtual void addSamples(const PxArray<PxVec3>& samples) { poissonSamplerShared.addSamples(samples); } void createVolumeSamples(const PointInVolumeTester& pointInVolume, const PxVec3& sphereCenter, PxReal sphereRadius, PxReal randomScale, PxReal r, bool addToSparseGrid = true); virtual PxU32 removeSamples(const PxArray<PxVec3>& samples) { return poissonSamplerShared.removeSamples(samples); } virtual const PxArray<PxVec3>& getSamples() const { return poissonSamplerShared.result; } virtual bool isPointInTriangleMesh(const PxVec3& p); virtual ~TriangleMeshPoissonSampler() { } public: bool pointInMesh(const PxVec3& p); PoissonSamplerShared poissonSamplerShared; //Input const PxVec3* originalPoints; const PxU32* originalTriangles; const PxU32 numOriginalTriangles; PxVec3 max; PxArray<PxVec3> points; PxArray<PxU32> triangles; PxArray<PxU32> triangleMap; PxArray<PxReal> triangleAreaBuffer; PxArray<PxI32> adj; PxArray<Gu::BVHNode> tree; PxHashMap<PxU32, Gu::ClusterApproximation> clusters; //Intermediate data PxArray<ActiveSample> activeSamples; //Output PxArray<PxI32> triangleIds; PxArray<PxVec3> barycentricCoordinates; PX_NOCOPY(TriangleMeshPoissonSampler) }; class ShapePoissonSampler : public PxPoissonSampler { public: ShapePoissonSampler(const PxGeometry& geometry_, const PxTransform& transform_, const PxBounds3& worldBounds_, PxReal r, PxI32 numSampleAttemptsAroundPoint_ = 30, PxU32 maxNumSamples_ = 0); virtual void addSamplesInVolume(const PointInVolumeTester& pointInVolume, const PxVec3& sphereCenter, PxReal sphereRadius, bool createVolumeSamples); virtual void addSamplesInSphere(const PxVec3& sphereCenter, PxReal sphereRadius, bool createVolumeSamples); virtual void addSamplesInBox(const PxBounds3& axisAlignedBox, const PxQuat& boxOrientation, bool createVolumeSamples); virtual bool setSamplingRadius(PxReal samplingRadius) { return poissonSamplerShared.setSamplingRadius(samplingRadius); } virtual void addSamples(const PxArray<PxVec3>& samples) { poissonSamplerShared.addSamples(samples); } void createVolumeSamples(const PointInVolumeTester& pointInVolume, const PxVec3& sphereCenter, PxReal sphereRadius, PxReal randomScale, PxReal r, bool addToSparseGrid = true); virtual PxU32 removeSamples(const PxArray<PxVec3>& samples) { return poissonSamplerShared.removeSamples(samples); } virtual const PxArray<PxVec3>& getSamples() const { return poissonSamplerShared.result; } virtual ~ShapePoissonSampler() { } public: PoissonSamplerShared poissonSamplerShared; //Input const PxGeometry& shape; const PxTransform actorGlobalPose; //Intermediate data PxArray<IndexWithNormal> activeSamples; }; bool TriangleMeshPoissonSampler::pointInMesh(const PxVec3& p) { return Gu::computeWindingNumber(tree.begin(), p, clusters, originalTriangles, originalPoints) > 0.5f; } PxPoissonSampler* PxCreateShapeSampler(const PxGeometry& geometry, const PxTransform& transform, const PxBounds3& worldBounds, PxReal r, PxI32 numSampleAttemptsAroundPoint) { return PX_NEW(ShapePoissonSampler)(geometry, transform, worldBounds, r, numSampleAttemptsAroundPoint); } PxTriangleMeshPoissonSampler* PxCreateTriangleMeshSampler(const PxU32* tris, PxU32 numTris, const PxVec3* pts, PxU32 numPts, PxReal r, PxI32 numSampleAttemptsAroundPoint) { return PX_NEW(TriangleMeshPoissonSampler)(tris, numTris, pts, numPts, r, numSampleAttemptsAroundPoint); } PxVec3 computeBarycentricCoordinates(PxVec3 p, const PxVec3& a, PxVec3 b, PxVec3 c) { PxVec4 bary; computeBarycentric(a, b, c, p, bary); return PxVec3(bary.x, bary.y, bary.z); } ShapePoissonSampler::ShapePoissonSampler(const PxGeometry& shape_, const PxTransform& actorGlobalPose_, const PxBounds3& worldBounds_, PxReal r, PxI32 numSampleAttemptsAroundPoint_, PxU32 maxNumSamples_) : shape(shape_), actorGlobalPose(actorGlobalPose_) { poissonSamplerShared.size = worldBounds_.maximum - worldBounds_.minimum; poissonSamplerShared.min = worldBounds_.minimum; poissonSamplerShared.numSampleAttemptsAroundPoint = numSampleAttemptsAroundPoint_; poissonSamplerShared.maxNumSamples = maxNumSamples_; setSamplingRadius(r); } TriangleMeshPoissonSampler::TriangleMeshPoissonSampler(const PxU32* tris, PxU32 numTris, const PxVec3* pts_, PxU32 numPts, PxReal r, PxI32 numSampleAttemptsAroundPoint_, PxU32 maxNumSamples_) : originalPoints(pts_), originalTriangles(tris), numOriginalTriangles(numTris) { poissonSamplerShared.currentSamplingRadius = 0.0f; poissonSamplerShared.numSampleAttemptsAroundPoint = numSampleAttemptsAroundPoint_; poissonSamplerShared.maxNumSamples = maxNumSamples_; getBoundsFromPoints(originalPoints, numPts, poissonSamplerShared.min, max); poissonSamplerShared.size = max - poissonSamplerShared.min; points.assign(originalPoints, originalPoints + numPts); triangles.assign(tris, tris + 3 * numTris); PxRemeshingExt::limitMaxEdgeLength(triangles, points, 2.0f * r, 100, &triangleMap, PxMax(10000u, 4 * numTris)); PxU32 numTriangles = triangles.size() / 3; triangleAreaBuffer.resize(numTriangles); for (PxU32 i = 0; i < numTriangles; ++i) triangleAreaBuffer[i] = triArea(&triangles[3 * i], points.begin()); buildTriangleAdjacency(triangles.begin(), numTriangles, adj); setSamplingRadius(r); } void PoissonSamplerShared::addSamples(const PxArray<PxVec3>& samples) { if (samples.size() > 0) { for (PxU32 i = 0; i < samples.size(); ++i) { result.pushBack(samples[i]); } rebuildSparseGrid(); } } PxI32 PoissonSamplerShared::findSample(const PxVec3& p) { PxI32 x = PxI32((p.x - min.x) / cellSize); PxI32 y = PxI32((p.y - min.y) / cellSize); PxI32 z = PxI32((p.z - min.z) / cellSize); if (x >= resolution.x) x = resolution.x - 1; if (y >= resolution.y) y = resolution.y - 1; if (z >= resolution.z) z = resolution.z - 1; PxReal minDist = FLT_MAX; PxI32 index = -1; for (PxI32 oX = -1; oX <= 1; ++oX) { for (PxI32 oY = -1; oY <= 1; ++oY) { for (PxI32 oZ = -1; oZ <= 1; ++oZ) { const PxI32 xx = x + oX; const PxI32 yy = y + oY; const PxI32 zz = z + oZ; if (xx >= 0 && xx < resolution.x && yy >= 0 && yy < resolution.y && zz >= 0 && zz < resolution.z) { PxI32 cellIndex = xx + resolution.x * yy + (resolution.x * resolution.y) * zz; if ((occupiedCellBits[cellIndex >> 5] & (1u << (cellIndex & 31))) != 0) { const PxPair<const PxI32, SparseGridNode>* it = sparseGrid3D.find(cellIndex); if (it) { const PxReal dist2 = (result[it->second.mPointIndex] - p).magnitudeSquared(); if (dist2 < minDist) { minDist = dist2; index = it->second.mPointIndex; } if (it->second.mExcessStartIndex >= 0) { for (PxI32 i = it->second.mExcessStartIndex; i < it->second.mExcessEndIndex; ++i) { const PxReal dist2_ = (result[excessList[i]] - p).magnitudeSquared(); if (dist2_ < minDist) { minDist = dist2_; index = it->second.mPointIndex; } } } if (minDist == 0.0f) { return index; } } } } } } } return -1; } PxU32 PoissonSamplerShared::removeSamples(const PxArray<PxVec3>& samples) { if (samples.size() > 0) { PxArray<PxI32> samplesToRemove; samplesToRemove.reserve(samples.size()); for (PxU32 i = 0; i < samples.size(); ++i) { PxI32 index = findSample(samples[i]); PX_ASSERT(samples[i] == result[index]); if (index >= 0) samplesToRemove.pushBack(index); } PxSort(samplesToRemove.begin(), samplesToRemove.size()); PxI32 counter = 0; for (PxI32 i = PxI32(samplesToRemove.size()) - 1; i >= 0; --i) { result[samplesToRemove[i]] = result[result.size() - 1 - counter]; ++counter; } result.removeRange(result.size() - counter, counter); rebuildSparseGrid(); return samplesToRemove.size(); } return 0; } bool PoissonSamplerShared::setSamplingRadius(PxReal r) { if (r != currentSamplingRadius) { currentSamplingRadius = r; return rebuildSparseGrid(); } return gridResolutionValid; } bool PoissonSamplerShared::rebuildSparseGrid() { const PxReal dimension = 3.0f; cellSize = (currentSamplingRadius / PxSqrt(dimension)) * 0.9999f; const PxF64 cellsX = PxF64(size.x) / PxF64(cellSize); const PxF64 cellsY = PxF64(size.y) / PxF64(cellSize); const PxF64 cellsZ = PxF64(size.z) / PxF64(cellSize); resolution = Int3(PxMax(1, PxI32(ceil(cellsX))), PxMax(1, PxI32(ceil(cellsY))), PxMax(1, PxI32(ceil(cellsZ)))); const PxF64 numCellsDbl = PxF64(resolution.x) * PxF64(resolution.y) * PxI64(resolution.z); if (numCellsDbl >= (1u << 31) || cellsX >= (1u << 31) || cellsY >= (1u << 31) || cellsZ >= (1u << 31)) { gridResolutionValid = false; PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL, "Internal grid resolution of sampler too high. Either a smaller mesh or a bigger radius must be used."); return false; } gridResolutionValid = true; PxU32 numCells = PxU32(resolution.x * resolution.y * resolution.z); occupiedCellBits.clear(); occupiedCellBits.resize((numCells + 32 - 1) / 32, 0); sparseGrid3D.clear(); for (PxU32 i = 0; i < result.size(); ++i) preAddPointToSparseGrid(result[i], i); PxI32 cumulativeSum = 0; for (PxHashMap<PxI32, SparseGridNode>::Iterator iter = sparseGrid3D.getIterator(); !iter.done(); ++iter) { if (iter->second.mExcessStartIndex > 0) { PxI32 start = cumulativeSum; cumulativeSum += iter->second.mExcessStartIndex; iter->second.mExcessStartIndex = start; iter->second.mExcessEndIndex = start - 1; } else { iter->second.mExcessStartIndex = -1; } } excessList.resize(cumulativeSum); for (PxU32 i = 0; i < result.size(); ++i) postAddPointToSparseGrid(result[i], i); return true; } bool PoissonSamplerShared::postAddPointToSparseGrid(const PxVec3& p, PxI32 pointIndex) { PxI32 x = PxI32((p.x - min.x) / cellSize); PxI32 y = PxI32((p.y - min.y) / cellSize); PxI32 z = PxI32((p.z - min.z) / cellSize); if (x >= resolution.x) x = resolution.x - 1; if (y >= resolution.y) y = resolution.y - 1; if (z >= resolution.z) z = resolution.z - 1; PxI32 cellIndex = x + resolution.x * y + (resolution.x * resolution.y) * z; SparseGridNode& n = sparseGrid3D[cellIndex]; if (n.mExcessStartIndex < 0) { PX_ASSERT(n.mPointIndex == pointIndex); return true; } if (n.mExcessEndIndex < n.mExcessStartIndex) { PX_ASSERT(n.mPointIndex == pointIndex); n.mExcessEndIndex++; return true; } else { excessList[n.mExcessEndIndex] = pointIndex; n.mExcessEndIndex++; return true; } } bool PoissonSamplerShared::preAddPointToSparseGrid(const PxVec3& p, PxI32 pointIndex) { PxI32 x = PxI32((p.x - min.x) / cellSize); PxI32 y = PxI32((p.y - min.y) / cellSize); PxI32 z = PxI32((p.z - min.z) / cellSize); if (x >= resolution.x) x = resolution.x - 1; if (y >= resolution.y) y = resolution.y - 1; if (z >= resolution.z) z = resolution.z - 1; PxI32 cellIndex = x + resolution.x * y + (resolution.x * resolution.y) * z; if ((occupiedCellBits[cellIndex >> 5] & (1u << (cellIndex & 31))) != 0) { SparseGridNode& n = sparseGrid3D[cellIndex]; n.mExcessStartIndex++; return true; } else { sparseGrid3D.insert(cellIndex, SparseGridNode(pointIndex)); occupiedCellBits[cellIndex >> 5] ^= (1u << (cellIndex & 31)); return true; } } bool PoissonSamplerShared::addPointToSparseGrid(const PxVec3& p, PxI32 pointIndex) { PxI32 x = PxI32((p.x - min.x) / cellSize); PxI32 y = PxI32((p.y - min.y) / cellSize); PxI32 z = PxI32((p.z - min.z) / cellSize); if (x >= resolution.x) x = resolution.x - 1; if (y >= resolution.y) y = resolution.y - 1; if (z >= resolution.z) z = resolution.z - 1; PxI32 cellIndex = x + resolution.x * y + (resolution.x * resolution.y) * z; //if (sparseGrid3D.ContainsKey(cellIndex)) // return false; sparseGrid3D.insert(cellIndex, pointIndex); occupiedCellBits[cellIndex >> 5] ^= (1u << (cellIndex & 31)); return true; } PxReal PoissonSamplerShared::minDistanceToOtherSamplesSquared(const PxVec3& p) const { PxI32 x = PxI32((p.x - min.x) / cellSize); PxI32 y = PxI32((p.y - min.y) / cellSize); PxI32 z = PxI32((p.z - min.z) / cellSize); if (x >= resolution.x) x = resolution.x - 1; if (y >= resolution.y) y = resolution.y - 1; if (z >= resolution.z) z = resolution.z - 1; PxReal minDist = FLT_MAX; for (PxI32 oX = -2; oX <= 2; ++oX) { for (PxI32 oY = -2; oY <= 2; ++oY) { for (PxI32 oZ = -2; oZ <= 2; ++oZ) { const PxI32 xx = x + oX; const PxI32 yy = y + oY; const PxI32 zz = z + oZ; if (xx >= 0 && xx < resolution.x && yy >= 0 && yy < resolution.y && zz >= 0 && zz < resolution.z) { PxI32 cellIndex = xx + resolution.x * yy + (resolution.x * resolution.y) * zz; if ((occupiedCellBits[cellIndex >> 5] & (1u << (cellIndex & 31))) != 0) { const PxPair<const PxI32, SparseGridNode>* it = sparseGrid3D.find(cellIndex); if (it) { const PxReal dist2 = (result[it->second.mPointIndex] - p).magnitudeSquared(); if (dist2 < minDist) minDist = dist2; if (it->second.mExcessStartIndex >= 0) { for (PxI32 i = it->second.mExcessStartIndex; i < it->second.mExcessEndIndex; ++i) { const PxReal dist2_ = (result[excessList[i]] - p).magnitudeSquared(); if (dist2_ < minDist) minDist = dist2_; } } } } } } } } return minDist; } void buildTree(const PxU32* triangles, const PxU32 numTriangles, const PxVec3* points, PxArray<Gu::BVHNode>& tree, PxF32 enlargement = 1e-4f) { //Computes a bounding box for every triangle in triangles Gu::AABBTreeBounds boxes; boxes.init(numTriangles); for (PxU32 i = 0; i < numTriangles; ++i) { const PxU32* tri = &triangles[3 * i]; PxBounds3 box = PxBounds3::empty(); box.include(points[tri[0]]); box.include(points[tri[1]]); box.include(points[tri[2]]); box.fattenFast(enlargement); boxes.getBounds()[i] = box; } Gu::buildAABBTree(numTriangles, boxes, tree); } bool TriangleMeshPoissonSampler::isPointInTriangleMesh(const PxVec3& p) { if (tree.size() == 0) { //Lazy initialization buildTree(originalTriangles, numOriginalTriangles, originalPoints, tree); Gu::precomputeClusterInformation(tree.begin(), originalTriangles, numOriginalTriangles, originalPoints, clusters); } return pointInMesh(p); } void TriangleMeshPoissonSampler::addSamplesInBox(const PxBounds3& axisAlignedBox, const PxQuat& boxOrientation, bool createVolumeSamples) { PointInOBBTester pointInOBB(axisAlignedBox.getCenter(), axisAlignedBox.getExtents(), boxOrientation); addSamplesInVolume(pointInOBB, axisAlignedBox.getCenter(), axisAlignedBox.getExtents().magnitude(), createVolumeSamples); } void TriangleMeshPoissonSampler::addSamplesInSphere(const PxVec3& sphereCenter, PxReal sphereRadius, bool createVolumeSamples) { PointInSphereTester pointInSphere(sphereCenter, sphereRadius); addSamplesInVolume(pointInSphere, sphereCenter, sphereRadius, createVolumeSamples); } //Ideally the sphere center is located on the mesh's surface void TriangleMeshPoissonSampler::addSamplesInVolume(const PointInVolumeTester& pointInVolume, const PxVec3& sphereCenter, PxReal sphereRadius, bool volumeSamples) { PxArray<PxU32> localActiveSamples; for (PxU32 i = 0; i < activeSamples.size();) { if (activeSamples[i].mIndex >= PxI32(poissonSamplerShared.result.size())) { activeSamples[i] = activeSamples[activeSamples.size() - 1]; activeSamples.remove(activeSamples.size() - 1); continue; } if (pointInSphere(poissonSamplerShared.result[activeSamples[i].mIndex], sphereCenter, sphereRadius)) localActiveSamples.pushBack(i); ++i; } if (localActiveSamples.size() == 0) { const PxReal r = poissonSamplerShared.currentSamplingRadius; //Try to find a seed sample for (PxU32 i = 0; i < triangles.size(); i += 3) { PxVec3 p = (1.0f / 3.0f) * (points[triangles[i]] + points[triangles[i + 1]] + points[triangles[i + 2]]); PxReal triRadius = PxSqrt(PxMax((p - points[triangles[i]]).magnitudeSquared(), PxMax((p - points[triangles[i + 1]]).magnitudeSquared(), (p - points[triangles[i + 2]]).magnitudeSquared()))); PxReal sum = triRadius + sphereRadius; if ((p - sphereCenter).magnitudeSquared() < sum * sum) { bool success = false; for (PxI32 j = 0; j < 30; ++j) { PxVec3 sample = randomPointOnTriangle(poissonSamplerShared.rnd, triangles.begin() + i, points.begin()); if (poissonSamplerShared.minDistanceToOtherSamplesSquared(sample) > r * r) { if (pointInVolume.pointInVolume(sample)) { PxI32 newSampleId = PxI32(poissonSamplerShared.result.size()); PxU32 sampleTriId = i / 3; ActiveSample as; createActiveSample(triangleAreaBuffer, newSampleId, sample, 2 * r, sampleTriId, triangles.begin(), points.begin(), adj, as); localActiveSamples.pushBack(activeSamples.size()); activeSamples.pushBack(as); poissonSamplerShared.result.pushBack(sample); triangleIds.pushBack(triangleMap[sampleTriId]); { const PxU32 triId = triangleMap[sampleTriId]; const PxU32* origTri = &originalTriangles[3 * triId]; barycentricCoordinates.pushBack(computeBarycentricCoordinates(sample, originalPoints[origTri[0]], originalPoints[origTri[1]], originalPoints[origTri[2]])); } poissonSamplerShared.addPointToSparseGrid(sample, newSampleId); success = true; if (poissonSamplerShared.maxNumSamples > 0 && poissonSamplerShared.result.size() >= poissonSamplerShared.maxNumSamples) return; break; } } } if (success) break; } } } //Start poisson sampling while (localActiveSamples.size() > 0) { const PxReal r = poissonSamplerShared.currentSamplingRadius; PxI32 localSampleIndex = poissonSamplerShared.rnd.rand32() % localActiveSamples.size(); PxI32 selectedActiveSample = localActiveSamples[localSampleIndex]; const ActiveSample& s = activeSamples[selectedActiveSample]; bool successDist = false; bool success = false; for (PxI32 i = 0; i < poissonSamplerShared.numSampleAttemptsAroundPoint; ++i) { PxI32 sampleTriId; PxVec3 barycentricCoordinate; PxVec3 sample; if (samplePointInBallOnSurface(poissonSamplerShared.rnd, s.mCumulativeTriangleAreas, points.begin(), triangles.begin(), s.mNearbyTriangles, poissonSamplerShared.result[s.mIndex], r, sample, sampleTriId, 30, &barycentricCoordinate)) { if (poissonSamplerShared.minDistanceToOtherSamplesSquared(sample) > r * r) { successDist = true; if (pointInVolume.pointInVolume(sample)) { PxI32 newSampleId = PxI32(poissonSamplerShared.result.size()); ActiveSample as; createActiveSample(triangleAreaBuffer, newSampleId, sample, 2 * r, sampleTriId, triangles.begin(), points.begin(), adj, as); localActiveSamples.pushBack(activeSamples.size()); activeSamples.pushBack(as); poissonSamplerShared.result.pushBack(sample); triangleIds.pushBack(triangleMap[sampleTriId]); { const PxU32 triId = triangleMap[sampleTriId]; const PxU32* origTri = &originalTriangles[3 * triId]; barycentricCoordinates.pushBack(computeBarycentricCoordinates(sample, originalPoints[origTri[0]], originalPoints[origTri[1]], originalPoints[origTri[2]])); } poissonSamplerShared.addPointToSparseGrid(sample, newSampleId); success = true; if (poissonSamplerShared.maxNumSamples > 0 && poissonSamplerShared.result.size() >= poissonSamplerShared.maxNumSamples) return; break; } } } } if (!successDist) { PxU32 oldId = activeSamples.size() - 1; activeSamples[selectedActiveSample] = activeSamples[activeSamples.size() - 1]; activeSamples.remove(activeSamples.size() - 1); for (PxU32 i = 0; i < localActiveSamples.size(); ++i) { if (localActiveSamples[i] == oldId) { localActiveSamples[i] = selectedActiveSample; break; } } } if (!success) { localActiveSamples[localSampleIndex] = localActiveSamples[localActiveSamples.size() - 1]; localActiveSamples.remove(localActiveSamples.size() - 1); } } if (volumeSamples) { PxReal randomScale = 0.1f; //Relative to particleSpacing PxReal r = (1.0f + 2.0f * randomScale) * 1.001f * poissonSamplerShared.currentSamplingRadius; createVolumeSamples(pointInVolume, sphereCenter, sphereRadius, randomScale, r); } } void TriangleMeshPoissonSampler::createVolumeSamples(const PointInVolumeTester& pointInVolume, const PxVec3& sphereCenter, PxReal sphereRadius, PxReal randomScale, PxReal r, bool addToSparseGrid) { if (tree.size() == 0) { //Lazy initialization buildTree(originalTriangles, numOriginalTriangles, originalPoints, tree); Gu::precomputeClusterInformation(tree.begin(), originalTriangles, numOriginalTriangles, originalPoints, clusters); } PxVec3 sphereBoxMin = PxVec3(sphereCenter.x - sphereRadius, sphereCenter.y - sphereRadius, sphereCenter.z - sphereRadius) - poissonSamplerShared.min; PxVec3 sphereBoxMax = PxVec3(sphereCenter.x + sphereRadius, sphereCenter.y + sphereRadius, sphereCenter.z + sphereRadius) - poissonSamplerShared.min; Int3 start(PxI32(PxFloor(sphereBoxMin.x / r)), PxI32(PxFloor(sphereBoxMin.y / r)), PxI32(PxFloor(sphereBoxMin.z / r))); Int3 end(PxI32(PxCeil(sphereBoxMax.x / r)), PxI32(PxCeil(sphereBoxMax.y / r)), PxI32(PxCeil(sphereBoxMax.z / r))); for (PxI32 x = start.x; x < end.x; ++x) { for (PxI32 y = start.y; y < end.y; ++y) { for (PxI32 z = start.z; z < end.z; ++z) { PxVec3 p = poissonSamplerShared.min + PxVec3(x * r, y * r, z * r); p += PxVec3(poissonSamplerShared.rnd.randomFloat32(-randomScale, randomScale) * poissonSamplerShared.currentSamplingRadius, poissonSamplerShared.rnd.randomFloat32(-randomScale, randomScale) * poissonSamplerShared.currentSamplingRadius, poissonSamplerShared.rnd.randomFloat32(-randomScale, randomScale) * poissonSamplerShared.currentSamplingRadius); if (pointInVolume.pointInVolume(p)) { if (poissonSamplerShared.minDistanceToOtherSamplesSquared(p) > poissonSamplerShared.currentSamplingRadius * poissonSamplerShared.currentSamplingRadius && pointInMesh(p)) { PxI32 newSampleId = PxI32(poissonSamplerShared.result.size()); poissonSamplerShared.result.pushBack(p); if (addToSparseGrid) poissonSamplerShared.addPointToSparseGrid(p, newSampleId); triangleIds.pushBack(-1); barycentricCoordinates.pushBack(PxVec3(0.0f)); if (poissonSamplerShared.maxNumSamples > 0 && poissonSamplerShared.result.size() >= poissonSamplerShared.maxNumSamples) return; } } } } } } PxU32 PX_FORCE_INLINE raycast(const PxGeometry& geometry, const PxTransform& transform, const PxVec3& rayOrigin, const PxVec3& rayDir, PxReal maxDist, PxHitFlags hitFlags, PxU32 maxHits, PxRaycastHit* rayHits) { return PxGeometryQuery::raycast( rayOrigin, rayDir, geometry, transform, maxDist, hitFlags, maxHits, rayHits); } bool PX_FORCE_INLINE pointInShape(const PxGeometry& geometry, const PxTransform& transform, const PxVec3& p) { //This is a bit a work-around solution: When a raycast starts inside of a shape, it returns the ray origin as hit point PxRaycastHit hit; return PxGeometryQuery::raycast(p, PxVec3(1.0f, 0.0f, 0.0f), geometry, transform, 1.0f, PxHitFlag::ePOSITION, 1, &hit) > 0 && hit.position == p; } bool projectionCallback(PxReal targetDistance, const PxGeometry& geometry, const PxTransform& transform, PxReal boundingBoxDiagonalLength, const PxVec3& p, const PxVec3& n, PointWithNormal& result) { PxRaycastHit hitPos; PxU32 numHitsPos = raycast(geometry, transform, p - boundingBoxDiagonalLength * n, n, 2 * boundingBoxDiagonalLength, PxHitFlag::eMESH_BOTH_SIDES | PxHitFlag::ePOSITION | PxHitFlag::eNORMAL, 1, &hitPos); PxRaycastHit hitNeg; PxU32 numHitsNeg = raycast(geometry, transform, p + boundingBoxDiagonalLength * n, -n, 2 * boundingBoxDiagonalLength, PxHitFlag::eMESH_BOTH_SIDES | PxHitFlag::ePOSITION | PxHitFlag::eNORMAL, 1, &hitNeg); targetDistance *= 0.5f; if (numHitsPos && numHitsNeg) { if (PxAbs((hitPos.position - p).magnitude() - targetDistance) < PxAbs((hitNeg.position - p).magnitude() - targetDistance)) result = PointWithNormal(hitPos.position, hitPos.normal); else result = PointWithNormal(hitNeg.position, hitNeg.normal); return true; } else if (numHitsPos) { result = PointWithNormal(hitPos.position, hitPos.normal); return true; } else if (numHitsNeg) { result = PointWithNormal(hitNeg.position, hitNeg.normal); return true; } return false; } PxVec3 randomDirection(BasicRandom& rnd) { PxVec3 dir; do { dir = PxVec3((rnd.rand(0.0f, 1.0f) - 0.5f), (rnd.rand(0.0f, 1.0f) - 0.5f), (rnd.rand(0.0f, 1.0f) - 0.5f)); } while (dir.magnitudeSquared() < 1e-8f); return dir.getNormalized(); } PxVec3 getPerpendicularVectorNormalized(const PxVec3& dir) { PxReal x = PxAbs(dir.x); PxReal y = PxAbs(dir.y); PxReal z = PxAbs(dir.z); if (x >= y && x >= z) return dir.cross(PxVec3(0, 1, 0)).getNormalized(); else if (y >= x && y >= z) return dir.cross(PxVec3(0, 0, 1)).getNormalized(); else return dir.cross(PxVec3(1, 0, 0)).getNormalized(); } PxVec3 randomPointOnDisc(BasicRandom& rnd, const PxVec3& point, const PxVec3& normal, PxReal radius, PxReal rUpper) { //TODO: Use better random number generator PxReal r = radius + rnd.rand(0.0f, 1.0f) * (rUpper - radius); PxVec3 x = getPerpendicularVectorNormalized(normal); PxVec3 y = normal.cross(x).getNormalized(); PxReal angle = rnd.rand(0.0f, 1.0f) * (2.0f * 3.1415926535898f); return point + x * (r * PxCos(angle)) + y * (r * PxSin(angle)); } bool samplePointInBallOnSurface(BasicRandom& rnd, const PxGeometry& shape, const PxTransform& actorGlobalPose, const PxReal diagonalLength, const PxVec3& point, const PxVec3& normal, PxReal radius, PointWithNormal& sample, PxI32 numAttempts = 30) { for (PxI32 i = 0; i < numAttempts; ++i) { PxVec3 p = randomPointOnDisc(rnd, point, normal, radius, 2 * radius); //Distort the direction of the normal a bit PxVec3 n = normal; do { n.x += 0.5f * (rnd.rand(0.0f, 1.0f) - 0.5f); n.y += 0.5f * (rnd.rand(0.0f, 1.0f) - 0.5f); n.z += 0.5f * (rnd.rand(0.0f, 1.0f) - 0.5f); } while (n.magnitudeSquared() < 1e-8f); n.normalize(); if (projectionCallback(radius, shape, actorGlobalPose, diagonalLength, p, n, sample)) { PxReal d2 = (sample.mPoint - point).magnitudeSquared(); if (d2 >= radius * radius && d2 < 4 * radius * radius) return true; } } return false; } void ShapePoissonSampler::addSamplesInBox(const PxBounds3& axisAlignedBox, const PxQuat& boxOrientation, bool createVolumeSamples) { PointInOBBTester pointInOBB(axisAlignedBox.getCenter(), axisAlignedBox.getExtents(), boxOrientation); addSamplesInVolume(pointInOBB, axisAlignedBox.getCenter(), axisAlignedBox.getExtents().magnitude(), createVolumeSamples); } void ShapePoissonSampler::addSamplesInSphere(const PxVec3& sphereCenter, PxReal sphereRadius, bool createVolumeSamples) { PointInSphereTester pointInSphere(sphereCenter, sphereRadius); addSamplesInVolume(pointInSphere, sphereCenter, sphereRadius, createVolumeSamples); } void ShapePoissonSampler::addSamplesInVolume(const PointInVolumeTester& pointInVolume, const PxVec3& sphereCenter, PxReal sphereRadius, bool volumeSamples) { PxReal boundingBoxDiagonalLength = poissonSamplerShared.size.magnitude(); PxArray<PxU32> localActiveSamples; for (PxU32 i = 0; i < activeSamples.size();) { if (activeSamples[i].mIndex >= PxI32(poissonSamplerShared.result.size())) { activeSamples[i] = activeSamples[activeSamples.size() - 1]; activeSamples.remove(activeSamples.size() - 1); continue; } if (pointInSphere(poissonSamplerShared.result[activeSamples[i].mIndex], sphereCenter, sphereRadius)) localActiveSamples.pushBack(i); ++i; } if (localActiveSamples.size() == 0) { PxVec3 center = poissonSamplerShared.min + 0.5f * poissonSamplerShared.size;; PointWithNormal sample; PxVec3 arbitrarySeedPointOnSurface; PxVec3 reference = sphereCenter - center; reference.normalizeSafe(); for (PxI32 i = 0; i < 1000; ++i) { PxVec3 dir = /*reference + 0.5f**/randomDirection(poissonSamplerShared.rnd); dir.normalize(); if (projectionCallback(poissonSamplerShared.currentSamplingRadius, shape, actorGlobalPose, boundingBoxDiagonalLength, sphereCenter, dir, sample)) { if (poissonSamplerShared.minDistanceToOtherSamplesSquared(sample.mPoint) > poissonSamplerShared.currentSamplingRadius * poissonSamplerShared.currentSamplingRadius) { if (pointInVolume.pointInVolume(sample.mPoint)) { PxI32 newSampleId = PxI32(poissonSamplerShared.result.size()); localActiveSamples.pushBack(activeSamples.size()); activeSamples.pushBack(IndexWithNormal(newSampleId, sample.mNormal)); poissonSamplerShared.result.pushBack(sample.mPoint); poissonSamplerShared.addPointToSparseGrid(sample.mPoint, newSampleId); if (poissonSamplerShared.maxNumSamples > 0 && poissonSamplerShared.result.size() >= poissonSamplerShared.maxNumSamples) return; break; } } } } } while (localActiveSamples.size() > 0) { PxI32 localSampleIndex = poissonSamplerShared.rnd.rand32() % localActiveSamples.size(); PxI32 selectedActiveSample = localActiveSamples[localSampleIndex]; const IndexWithNormal& s = activeSamples[selectedActiveSample]; bool successDist = false; bool success = false; for (PxI32 i = 0; i < poissonSamplerShared.numSampleAttemptsAroundPoint; ++i) { PointWithNormal sample; if (samplePointInBallOnSurface(poissonSamplerShared.rnd, shape, actorGlobalPose, boundingBoxDiagonalLength, poissonSamplerShared.result[s.mIndex], s.mNormal, poissonSamplerShared.currentSamplingRadius, sample)) { if (poissonSamplerShared.minDistanceToOtherSamplesSquared(sample.mPoint) > poissonSamplerShared.currentSamplingRadius * poissonSamplerShared.currentSamplingRadius) { successDist = true; if (pointInVolume.pointInVolume(sample.mPoint)) { successDist = true; PxI32 newSampleId = PxI32(poissonSamplerShared.result.size()); localActiveSamples.pushBack(activeSamples.size()); activeSamples.pushBack(IndexWithNormal(newSampleId, sample.mNormal)); poissonSamplerShared.result.pushBack(sample.mPoint); poissonSamplerShared.addPointToSparseGrid(sample.mPoint, newSampleId); success = true; if (poissonSamplerShared.maxNumSamples > 0 && poissonSamplerShared.result.size() >= poissonSamplerShared.maxNumSamples) return; break; } } } } if (!successDist) { PxU32 oldId = activeSamples.size() - 1; activeSamples[selectedActiveSample] = activeSamples[activeSamples.size() - 1]; activeSamples.remove(activeSamples.size() - 1); for (PxU32 i = 0; i < localActiveSamples.size(); ++i) { if (localActiveSamples[i] == oldId) { localActiveSamples[i] = selectedActiveSample; break; } } } if (!success) { localActiveSamples[localSampleIndex] = localActiveSamples[localActiveSamples.size() - 1]; localActiveSamples.remove(localActiveSamples.size() - 1); } } if (volumeSamples) { PxReal randomScale = 0.1f; //Relative to particleSpacing PxReal r = (1.0f + 2.0f * randomScale) * 1.001f * poissonSamplerShared.currentSamplingRadius; createVolumeSamples(pointInVolume, sphereCenter, sphereRadius, randomScale, r); } } void ShapePoissonSampler::createVolumeSamples(const PointInVolumeTester& pointInVolume, const PxVec3& sphereCenter, PxReal sphereRadius, PxReal randomScale, PxReal r, bool addToSparseGrid) { PxVec3 sphereBoxMin = PxVec3(sphereCenter.x - sphereRadius, sphereCenter.y - sphereRadius, sphereCenter.z - sphereRadius) - poissonSamplerShared.min; PxVec3 sphereBoxMax = PxVec3(sphereCenter.x + sphereRadius, sphereCenter.y + sphereRadius, sphereCenter.z + sphereRadius) - poissonSamplerShared.min; Int3 start(PxI32(PxFloor(sphereBoxMin.x / r)), PxI32(PxFloor(sphereBoxMin.y / r)), PxI32(PxFloor(sphereBoxMin.z / r))); Int3 end(PxI32(PxCeil(sphereBoxMax.x / r)), PxI32(PxCeil(sphereBoxMax.y / r)), PxI32(PxCeil(sphereBoxMax.z / r))); for (PxI32 x = start.x; x < end.x; ++x) { for (PxI32 y = start.y; y < end.y; ++y) { for (PxI32 z = start.z; z < end.z; ++z) { PxVec3 p = poissonSamplerShared.min + PxVec3(x * r, y * r, z * r); p += PxVec3(poissonSamplerShared.rnd.randomFloat32(-randomScale, randomScale) * poissonSamplerShared.currentSamplingRadius, poissonSamplerShared.rnd.randomFloat32(-randomScale, randomScale) * poissonSamplerShared.currentSamplingRadius, poissonSamplerShared.rnd.randomFloat32(-randomScale, randomScale) * poissonSamplerShared.currentSamplingRadius); if (pointInVolume.pointInVolume(p) && pointInShape(shape, actorGlobalPose, p)) { if (poissonSamplerShared.minDistanceToOtherSamplesSquared(p) > poissonSamplerShared.currentSamplingRadius * poissonSamplerShared.currentSamplingRadius) { PxI32 newSampleId = PxI32(poissonSamplerShared.result.size()); poissonSamplerShared.result.pushBack(p); if (addToSparseGrid) poissonSamplerShared.addPointToSparseGrid(p, newSampleId); if (poissonSamplerShared.maxNumSamples > 0 && poissonSamplerShared.result.size() >= poissonSamplerShared.maxNumSamples) return; } } } } } } //Use for triangle meshes //https://www.cs.ubc.ca/~rbridson/docs/bridson-siggraph07-poissondisk.pdf bool PxSamplingExt::poissonSample(const PxSimpleTriangleMesh& mesh, PxReal r, PxArray<PxVec3>& result, PxReal rVolume, PxArray<PxI32>* triangleIds, PxArray<PxVec3>* barycentricCoordinates, const PxBounds3* axisAlignedBox, const PxQuat* boxOrientation, PxU32 maxNumSamples, PxU32 numSampleAttemptsAroundPoint) { TriangleMeshPoissonSampler sampler(reinterpret_cast<const PxU32*>(mesh.triangles.data), mesh.triangles.count, reinterpret_cast<const PxVec3*>(mesh.points.data), mesh.points.count, r, numSampleAttemptsAroundPoint, maxNumSamples); if (!sampler.poissonSamplerShared.gridResolutionValid) { return false; } PxVec3 center = 0.5f*(sampler.max + sampler.poissonSamplerShared.min); PxReal boundingSphereRadius = 1.001f * (sampler.max - sampler.poissonSamplerShared.min).magnitude() * 0.5f; if (axisAlignedBox == NULL || boxOrientation == NULL) { sampler.addSamplesInSphere(center, boundingSphereRadius, false); if (rVolume > 0.0f) { AlwaysInsideTester tester; sampler.createVolumeSamples(tester, center, boundingSphereRadius, 0.1f, rVolume, false); } } else { sampler.addSamplesInBox(*axisAlignedBox, *boxOrientation, false); if (rVolume > 0.0f) { PointInOBBTester tester(axisAlignedBox->getCenter(), axisAlignedBox->getExtents(), *boxOrientation); sampler.createVolumeSamples(tester, center, boundingSphereRadius, 0.1f, rVolume, false); } } result = sampler.getSamples(); if (triangleIds) *triangleIds = sampler.getSampleTriangleIds(); if (barycentricCoordinates) *barycentricCoordinates = sampler.getSampleBarycentrics(); return true; } bool PxSamplingExt::poissonSample(const PxGeometry& geometry, const PxTransform& transform, const PxBounds3& worldBounds, PxReal r, PxArray<PxVec3>& result, PxReal rVolume, const PxBounds3* axisAlignedBox, const PxQuat* boxOrientation, PxU32 maxNumSamples, PxU32 numSampleAttemptsAroundPoint) { PxVec3 center = worldBounds.getCenter(); ShapePoissonSampler sampler(geometry, transform, worldBounds, r, numSampleAttemptsAroundPoint, maxNumSamples); PxReal boundingSphereRadius = 1.001f * worldBounds.getExtents().magnitude(); if (!sampler.poissonSamplerShared.gridResolutionValid) return false; if (axisAlignedBox == NULL || boxOrientation == NULL) { sampler.addSamplesInSphere(center, worldBounds.getExtents().magnitude() * 1.001f, false); if (rVolume > 0.0f) { AlwaysInsideTester tester; sampler.createVolumeSamples(tester, center, boundingSphereRadius, 0.1f, rVolume, false); } } else { sampler.addSamplesInBox(*axisAlignedBox, *boxOrientation, false); if (rVolume > 0.0f) { PointInOBBTester tester(axisAlignedBox->getCenter(), axisAlignedBox->getExtents(), *boxOrientation); sampler.createVolumeSamples(tester, center, boundingSphereRadius, 0.1f, rVolume, false); } } result = sampler.getSamples(); return true; } }
50,931
C++
33.251513
235
0.700457
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/ExtDistanceJoint.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef EXT_DISTANCE_JOINT_H #define EXT_DISTANCE_JOINT_H #include "common/PxTolerancesScale.h" #include "extensions/PxDistanceJoint.h" #include "ExtJoint.h" #include "foundation/PxUserAllocated.h" #include "CmUtils.h" namespace physx { struct PxDistanceJointGeneratedValues; namespace Ext { struct DistanceJointData : public JointData { PxReal minDistance; PxReal maxDistance; PxReal tolerance; PxReal stiffness; PxReal damping; PxDistanceJointFlags jointFlags; }; typedef JointT<PxDistanceJoint, DistanceJointData, PxDistanceJointGeneratedValues> DistanceJointT; class DistanceJoint : public DistanceJointT { public: // PX_SERIALIZATION DistanceJoint(PxBaseFlags baseFlags) : DistanceJointT(baseFlags) {} void resolveReferences(PxDeserializationContext& context); static DistanceJoint* createObject(PxU8*& address, PxDeserializationContext& context) { return createJointObject<DistanceJoint>(address, context); } static void getBinaryMetaData(PxOutputStream& stream); //~PX_SERIALIZATION DistanceJoint(const PxTolerancesScale& scale, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1); // PxDistanceJoint virtual PxReal getDistance() const PX_OVERRIDE; virtual void setMinDistance(PxReal distance) PX_OVERRIDE; virtual PxReal getMinDistance() const PX_OVERRIDE; virtual void setMaxDistance(PxReal distance) PX_OVERRIDE; virtual PxReal getMaxDistance() const PX_OVERRIDE; virtual void setTolerance(PxReal tolerance) PX_OVERRIDE; virtual PxReal getTolerance() const PX_OVERRIDE; virtual void setStiffness(PxReal spring) PX_OVERRIDE; virtual PxReal getStiffness() const PX_OVERRIDE; virtual void setDamping(PxReal damping) PX_OVERRIDE; virtual PxReal getDamping() const PX_OVERRIDE; virtual void setDistanceJointFlags(PxDistanceJointFlags flags) PX_OVERRIDE; virtual void setDistanceJointFlag(PxDistanceJointFlag::Enum flag, bool value) PX_OVERRIDE; virtual PxDistanceJointFlags getDistanceJointFlags() const PX_OVERRIDE; //~PxDistanceJoint // PxConstraintConnector virtual PxConstraintSolverPrep getPrep() const PX_OVERRIDE; #if PX_SUPPORT_OMNI_PVD virtual void updateOmniPvdProperties() const PX_OVERRIDE; #endif //~PxConstraintConnector }; } // namespace Ext } // namespace physx #endif
4,130
C
42.03125
164
0.765617
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/ExtDefaultCpuDispatcher.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "ExtDefaultCpuDispatcher.h" #include "ExtCpuWorkerThread.h" #include "ExtTaskQueueHelper.h" #include "foundation/PxString.h" using namespace physx; PxDefaultCpuDispatcher* physx::PxDefaultCpuDispatcherCreate(PxU32 numThreads, PxU32* affinityMasks, PxDefaultCpuDispatcherWaitForWorkMode::Enum mode, PxU32 yieldProcessorCount) { return PX_NEW(Ext::DefaultCpuDispatcher)(numThreads, affinityMasks, mode, yieldProcessorCount); } #if !PX_SWITCH void Ext::DefaultCpuDispatcher::getAffinityMasks(PxU32* affinityMasks, PxU32 threadCount) { for(PxU32 i=0; i < threadCount; i++) { affinityMasks[i] = 0; } } #endif Ext::DefaultCpuDispatcher::DefaultCpuDispatcher(PxU32 numThreads, PxU32* affinityMasks, PxDefaultCpuDispatcherWaitForWorkMode::Enum mode, PxU32 yieldProcessorCount) : mQueueEntryPool(EXT_TASK_QUEUE_ENTRY_POOL_SIZE, "QueueEntryPool"), mNumThreads(numThreads), mShuttingDown(false) #if PX_PROFILE ,mRunProfiled(true) #else ,mRunProfiled(false) #endif , mWaitForWorkMode(mode) , mYieldProcessorCount(yieldProcessorCount) { PX_CHECK_MSG((((PxDefaultCpuDispatcherWaitForWorkMode::eYIELD_PROCESSOR == mWaitForWorkMode) && (mYieldProcessorCount > 0)) || (((PxDefaultCpuDispatcherWaitForWorkMode::eYIELD_THREAD == mWaitForWorkMode) || (PxDefaultCpuDispatcherWaitForWorkMode::eWAIT_FOR_WORK == mWaitForWorkMode)) && (0 == mYieldProcessorCount))), "Illegal yield processor count for chosen execute mode"); PxU32* defaultAffinityMasks = NULL; if(!affinityMasks) { defaultAffinityMasks = PX_ALLOCATE(PxU32, numThreads, "ThreadAffinityMasks"); getAffinityMasks(defaultAffinityMasks, numThreads); affinityMasks = defaultAffinityMasks; } // initialize threads first, then start mWorkerThreads = PX_ALLOCATE(CpuWorkerThread, numThreads, "CpuWorkerThread"); const PxU32 nameLength = 32; mThreadNames = PX_ALLOCATE(PxU8, nameLength * numThreads, "CpuWorkerThreadName"); if (mWorkerThreads) { for(PxU32 i = 0; i < numThreads; ++i) { PX_PLACEMENT_NEW(mWorkerThreads+i, CpuWorkerThread)(); mWorkerThreads[i].initialize(this); } for(PxU32 i = 0; i < numThreads; ++i) { if (mThreadNames) { char* threadName = reinterpret_cast<char*>(mThreadNames + (i*nameLength)); Pxsnprintf(threadName, nameLength, "PxWorker%02d", i); mWorkerThreads[i].setName(threadName); } mWorkerThreads[i].setAffinityMask(affinityMasks[i]); mWorkerThreads[i].start(PxThread::getDefaultStackSize()); } PX_FREE(defaultAffinityMasks); } else { mNumThreads = 0; } } Ext::DefaultCpuDispatcher::~DefaultCpuDispatcher() { for(PxU32 i = 0; i < mNumThreads; ++i) mWorkerThreads[i].signalQuit(); mShuttingDown = true; if(PxDefaultCpuDispatcherWaitForWorkMode::eWAIT_FOR_WORK == mWaitForWorkMode) mWorkReady.set(); for(PxU32 i = 0; i < mNumThreads; ++i) mWorkerThreads[i].waitForQuit(); for(PxU32 i = 0; i < mNumThreads; ++i) mWorkerThreads[i].~CpuWorkerThread(); PX_FREE(mWorkerThreads); PX_FREE(mThreadNames); } void Ext::DefaultCpuDispatcher::release() { PX_DELETE_THIS; } void Ext::DefaultCpuDispatcher::submitTask(PxBaseTask& task) { if(!mNumThreads) { // no worker threads, run directly runTask(task); task.release(); return; } // TODO: Could use TLS to make this more efficient const PxThread::Id currentThread = PxThread::getId(); const PxU32 nbThreads = mNumThreads; for(PxU32 i=0; i<nbThreads; ++i) { if(mWorkerThreads[i].tryAcceptJobToLocalQueue(task, currentThread)) { if(PxDefaultCpuDispatcherWaitForWorkMode::eWAIT_FOR_WORK == mWaitForWorkMode) { return mWorkReady.set(); } else { PX_ASSERT(PxDefaultCpuDispatcherWaitForWorkMode::eYIELD_PROCESSOR == mWaitForWorkMode || PxDefaultCpuDispatcherWaitForWorkMode::eYIELD_THREAD == mWaitForWorkMode); return; } } } SharedQueueEntry* entry = mQueueEntryPool.getEntry(&task); if(entry) { mJobList.push(*entry); if(PxDefaultCpuDispatcherWaitForWorkMode::eWAIT_FOR_WORK == mWaitForWorkMode) mWorkReady.set(); } } PxBaseTask* Ext::DefaultCpuDispatcher::fetchNextTask() { PxBaseTask* task = getJob(); if(!task) task = stealJob(); return task; } PxBaseTask* Ext::DefaultCpuDispatcher::getJob() { return TaskQueueHelper::fetchTask(mJobList, mQueueEntryPool); } PxBaseTask* Ext::DefaultCpuDispatcher::stealJob() { const PxU32 nbThreads = mNumThreads; for(PxU32 i=0; i<nbThreads; ++i) { PxBaseTask* ret = mWorkerThreads[i].giveUpJob(); if(ret) return ret; } return NULL; } void Ext::DefaultCpuDispatcher::resetWakeSignal() { PX_ASSERT(PxDefaultCpuDispatcherWaitForWorkMode::eWAIT_FOR_WORK == mWaitForWorkMode); mWorkReady.reset(); // The code below is necessary to avoid deadlocks on shut down. // A thread usually loops as follows: // while quit is not signaled // 1) reset wake signal // 2) fetch work // 3) if work -> process // 4) else -> wait for wake signal // // If a thread reaches 1) after the thread pool signaled wake up, // the wake up sync gets reset and all other threads which have not // passed 4) already will wait forever. // The code below makes sure that on shutdown, the wake up signal gets // sent again after it was reset // if (mShuttingDown) mWorkReady.set(); }
6,923
C++
30.761468
253
0.744764
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/ExtSceneQueryExt.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "extensions/PxSceneQueryExt.h" #include "geometry/PxGeometryHelpers.h" #include "foundation/PxAllocatorCallback.h" #include "CmUtils.h" #include "foundation/PxAllocator.h" using namespace physx; bool PxSceneQueryExt::raycastAny( const PxScene& scene, const PxVec3& origin, const PxVec3& unitDir, const PxReal distance, PxSceneQueryHit& hit, const PxSceneQueryFilterData& filterData, PxSceneQueryFilterCallback* filterCall, const PxSceneQueryCache* cache) { PxSceneQueryFilterData fdAny = filterData; fdAny.flags |= PxQueryFlag::eANY_HIT; PxRaycastBuffer buf; scene.raycast(origin, unitDir, distance, buf, PxHitFlag::eANY_HIT, fdAny, filterCall, cache); hit = buf.block; return buf.hasBlock; } bool PxSceneQueryExt::raycastSingle(const PxScene& scene, const PxVec3& origin, const PxVec3& unitDir, const PxReal distance, PxSceneQueryFlags outputFlags, PxRaycastHit& hit, const PxSceneQueryFilterData& filterData, PxSceneQueryFilterCallback* filterCall, const PxSceneQueryCache* cache) { PxRaycastBuffer buf; PxQueryFilterData fd1 = filterData; scene.raycast(origin, unitDir, distance, buf, outputFlags, fd1, filterCall, cache); hit = buf.block; return buf.hasBlock; } PxI32 PxSceneQueryExt::raycastMultiple( const PxScene& scene, const PxVec3& origin, const PxVec3& unitDir, const PxReal distance, PxSceneQueryFlags outputFlags, PxRaycastHit* hitBuffer, PxU32 hitBufferSize, bool& blockingHit, const PxSceneQueryFilterData& filterData, PxSceneQueryFilterCallback* filterCall, const PxSceneQueryCache* cache) { PxRaycastBuffer buf(hitBuffer, hitBufferSize); PxQueryFilterData fd1 = filterData; scene.raycast(origin, unitDir, distance, buf, outputFlags, fd1, filterCall, cache); blockingHit = buf.hasBlock; if(blockingHit) { if(buf.nbTouches < hitBufferSize) { hitBuffer[buf.nbTouches] = buf.block; return PxI32(buf.nbTouches+1); } else // overflow, drop the last touch { hitBuffer[hitBufferSize-1] = buf.block; return -1; } } else // no block return PxI32(buf.nbTouches); } bool PxSceneQueryExt::sweepAny( const PxScene& scene, const PxGeometry& geometry, const PxTransform& pose, const PxVec3& unitDir, const PxReal distance, PxSceneQueryFlags queryFlags, PxSceneQueryHit& hit, const PxSceneQueryFilterData& filterData, PxSceneQueryFilterCallback* filterCall, const PxSceneQueryCache* cache, PxReal inflation) { PxSceneQueryFilterData fdAny = filterData; fdAny.flags |= PxQueryFlag::eANY_HIT; PxSweepBuffer buf; scene.sweep(geometry, pose, unitDir, distance, buf, queryFlags|PxHitFlag::eANY_HIT, fdAny, filterCall, cache, inflation); hit = buf.block; return buf.hasBlock; } bool PxSceneQueryExt::sweepSingle( const PxScene& scene, const PxGeometry& geometry, const PxTransform& pose, const PxVec3& unitDir, const PxReal distance, PxSceneQueryFlags outputFlags, PxSweepHit& hit, const PxSceneQueryFilterData& filterData, PxSceneQueryFilterCallback* filterCall, const PxSceneQueryCache* cache, PxReal inflation) { PxSweepBuffer buf; PxQueryFilterData fd1 = filterData; scene.sweep(geometry, pose, unitDir, distance, buf, outputFlags, fd1, filterCall, cache, inflation); hit = buf.block; return buf.hasBlock; } PxI32 PxSceneQueryExt::sweepMultiple( const PxScene& scene, const PxGeometry& geometry, const PxTransform& pose, const PxVec3& unitDir, const PxReal distance, PxSceneQueryFlags outputFlags, PxSweepHit* hitBuffer, PxU32 hitBufferSize, bool& blockingHit, const PxSceneQueryFilterData& filterData, PxSceneQueryFilterCallback* filterCall, const PxSceneQueryCache* cache, PxReal inflation) { PxQueryFilterData fd1 = filterData; PxSweepBuffer buf(hitBuffer, hitBufferSize); scene.sweep(geometry, pose, unitDir, distance, buf, outputFlags, fd1, filterCall, cache, inflation); blockingHit = buf.hasBlock; if(blockingHit) { if(buf.nbTouches < hitBufferSize) { hitBuffer[buf.nbTouches] = buf.block; return PxI32(buf.nbTouches+1); } else // overflow, drop the last touch { hitBuffer[hitBufferSize-1] = buf.block; return -1; } } else // no block return PxI32(buf.nbTouches); } PxI32 PxSceneQueryExt::overlapMultiple( const PxScene& scene, const PxGeometry& geometry, const PxTransform& pose, PxOverlapHit* hitBuffer, PxU32 hitBufferSize, const PxSceneQueryFilterData& filterData, PxSceneQueryFilterCallback* filterCall) { PxQueryFilterData fd1 = filterData; fd1.flags |= PxQueryFlag::eNO_BLOCK; PxOverlapBuffer buf(hitBuffer, hitBufferSize); scene.overlap(geometry, pose, buf, fd1, filterCall); if(buf.hasBlock) { if(buf.nbTouches < hitBufferSize) { hitBuffer[buf.nbTouches] = buf.block; return PxI32(buf.nbTouches+1); } else // overflow, drop the last touch { hitBuffer[hitBufferSize-1] = buf.block; return -1; } } else // no block return PxI32(buf.nbTouches); } bool PxSceneQueryExt::overlapAny( const PxScene& scene, const PxGeometry& geometry, const PxTransform& pose, PxOverlapHit& hit, const PxSceneQueryFilterData& filterData, PxSceneQueryFilterCallback* filterCall) { PxSceneQueryFilterData fdAny = filterData; fdAny.flags |= (PxQueryFlag::eANY_HIT | PxQueryFlag::eNO_BLOCK); PxOverlapBuffer buf; scene.overlap(geometry, pose, buf, fdAny, filterCall); hit = buf.block; return buf.hasBlock; } namespace { struct Raycast { PxVec3 origin; PxVec3 unitDir; PxReal distance; PxHitFlags hitFlags; PxQueryFilterData filterData; const PxQueryCache* cache; }; struct Sweep { PxGeometryHolder geometry; PxTransform pose; PxVec3 unitDir; PxReal distance; PxHitFlags hitFlags; PxQueryFilterData filterData; const PxQueryCache* cache; PxReal inflation; }; struct Overlap { PxGeometryHolder geometry; PxTransform pose; PxQueryFilterData filterData; const PxQueryCache* cache; }; } template<typename HitType> struct NpOverflowBuffer : PxHitBuffer<HitType> { bool overflow; bool processCalled; PxU32 saveNbTouches; NpOverflowBuffer(HitType* hits, PxU32 count) : PxHitBuffer<HitType>(hits, count), overflow(false), processCalled(false), saveNbTouches(0) { } virtual PxAgain processTouches(const HitType* /*hits*/, PxU32 /*count*/) { if (processCalled) return false; saveNbTouches = this->nbTouches; processCalled = true; return true; } virtual void finalizeQuery() { if (processCalled) { overflow = (this->nbTouches > 0); this->nbTouches = saveNbTouches; } } }; class ExtBatchQuery : public PxBatchQueryExt { PX_NOCOPY(ExtBatchQuery) public: ExtBatchQuery( const PxScene& scene, PxQueryFilterCallback* queryFilterCallback, PxRaycastBuffer* raycastBuffers, Raycast* raycastQueries, const PxU32 maxNbRaycasts, PxRaycastHit* raycastTouches, const PxU32 maxNbRaycastTouches, PxSweepBuffer* sweepBuffers, Sweep* sweepQueries, const PxU32 maxNbSweeps, PxSweepHit* sweepTouches, const PxU32 maxNbSweepTouches, PxOverlapBuffer* overlapBuffers, Overlap* overlapQueries, const PxU32 maxNbOverlaps, PxOverlapHit* overlapTouches, const PxU32 maxNbOverlapTouches); ~ExtBatchQuery() {} virtual void release(); virtual PxRaycastBuffer* raycast( const PxVec3& origin, const PxVec3& unitDir, const PxReal distance, const PxU16 maxNbTouches, PxHitFlags hitFlags = PxHitFlags(PxHitFlag::eDEFAULT), const PxQueryFilterData& filterData = PxQueryFilterData(), const PxQueryCache* cache = NULL); virtual PxSweepBuffer* sweep( const PxGeometry& geometry, const PxTransform& pose, const PxVec3& unitDir, const PxReal distance, const PxU16 maxNbTouches, PxHitFlags hitFlags = PxHitFlags(PxHitFlag::eDEFAULT), const PxQueryFilterData& filterData = PxQueryFilterData(), const PxQueryCache* cache = NULL, const PxReal inflation = 0.f); virtual PxOverlapBuffer* overlap( const PxGeometry& geometry, const PxTransform& pose, PxU16 maxNbTouches = 0, const PxQueryFilterData& filterData = PxQueryFilterData(), const PxQueryCache* cache = NULL); virtual void execute(); private: template<typename HitType, typename QueryType> struct Query { PxHitBuffer<HitType>* mBuffers; QueryType* mQueries; PxU32 mMaxNbBuffers; HitType* mTouches; PxU32 mMaxNbTouches; PxU32 mBufferTide; Query() : mBuffers(NULL), mQueries(NULL), mMaxNbBuffers(0), mTouches(NULL), mMaxNbTouches(0), mBufferTide(0) { } Query(PxHitBuffer<HitType>* buffers, QueryType* queries, const PxU32 maxNbBuffers, HitType* touches, const PxU32 maxNbTouches) : mBuffers(buffers), mQueries(queries), mMaxNbBuffers(maxNbBuffers), mTouches(touches), mMaxNbTouches(maxNbTouches), mBufferTide(0) { for (PxU32 i = 0; i < mMaxNbBuffers; i++) { mBuffers[i].hasBlock = false; mBuffers[i].nbTouches = 0; } } PxHitBuffer<HitType>* addQuery(const QueryType& query, const PxU32 maxNbTouches) { if ((mBufferTide + 1) > mMaxNbBuffers) { //Ran out of queries. return NULL; } PxHitBuffer<HitType>* buffer = mBuffers + mBufferTide; buffer->touches = NULL; buffer->maxNbTouches = maxNbTouches; buffer->hasBlock = false; buffer->nbTouches = 0xffffffff; mQueries[mBufferTide] = query; mBufferTide++; return buffer; } static void performQuery(const PxScene& scene, const Raycast& query, NpOverflowBuffer<PxRaycastHit>& hitBuffer, PxQueryFilterCallback* qfcb) { scene.raycast( query.origin, query.unitDir, query.distance, hitBuffer, query.hitFlags, query.filterData, qfcb, query.cache); } static void performQuery(const PxScene& scene, const Sweep& query, NpOverflowBuffer<PxSweepHit>& hitBuffer, PxQueryFilterCallback* qfcb) { scene.sweep( query.geometry.any(), query.pose, query.unitDir, query.distance, hitBuffer, query.hitFlags, query.filterData, qfcb, query.cache, query.inflation); } static void performQuery(const PxScene& scene, const Overlap& query, NpOverflowBuffer<PxOverlapHit>& hitBuffer, PxQueryFilterCallback* qfcb) { scene.overlap( query.geometry.any(), query.pose, hitBuffer, query.filterData, qfcb, query.cache); } void execute(const PxScene& scene, PxQueryFilterCallback* qfcb) { PxU32 touchesTide = 0; for (PxU32 i = 0; i < mBufferTide; i++) { PX_ASSERT(0xffffffff == mBuffers[i].nbTouches); PX_ASSERT(0xffffffff != mBuffers[i].maxNbTouches); PX_ASSERT(!mBuffers[i].touches); bool noTouchesRemaining = false; if (mBuffers[i].maxNbTouches > 0) { if (touchesTide >= mMaxNbTouches) { //No resources left. mBuffers[i].maxNbTouches = 0; mBuffers[i].touches = NULL; noTouchesRemaining = true; } else if ((touchesTide + mBuffers[i].maxNbTouches) > mMaxNbTouches) { //Some resources left but not enough to match requested number. //This might be enough but it depends on the number of hits generated by the query. mBuffers[i].maxNbTouches = mMaxNbTouches - touchesTide; mBuffers[i].touches = mTouches + touchesTide; } else { //Enough resources left to match request. mBuffers[i].touches = mTouches + touchesTide; } } bool overflow = false; { PX_ALIGN(16, NpOverflowBuffer<HitType> overflowBuffer)(mBuffers[i].touches, mBuffers[i].maxNbTouches); performQuery(scene, mQueries[i], overflowBuffer, qfcb); overflow = overflowBuffer.overflow || noTouchesRemaining; mBuffers[i].hasBlock = overflowBuffer.hasBlock; mBuffers[i].block = overflowBuffer.block; mBuffers[i].nbTouches = overflowBuffer.nbTouches; } if(overflow) { mBuffers[i].maxNbTouches = 0xffffffff; } touchesTide += mBuffers[i].nbTouches; } mBufferTide = 0; } }; const PxScene& mScene; PxQueryFilterCallback* mQueryFilterCallback; Query<PxRaycastHit, Raycast> mRaycasts; Query<PxSweepHit, Sweep> mSweeps; Query<PxOverlapHit, Overlap> mOverlaps; }; template<typename HitType> class ExtBatchQueryDesc { public: ExtBatchQueryDesc(const PxU32 maxNbResults, const PxU32 maxNbTouches) : mResults(NULL), mMaxNbResults(maxNbResults), mTouches(NULL), mMaxNbTouches(maxNbTouches) { } ExtBatchQueryDesc(PxHitBuffer<HitType>* results, const PxU32 maxNbResults, HitType* touches, PxU32 maxNbTouches) : mResults(results), mMaxNbResults(maxNbResults), mTouches(touches), mMaxNbTouches(maxNbTouches) { } PX_FORCE_INLINE PxHitBuffer<HitType>* getResults() const { return mResults; } PX_FORCE_INLINE PxU32 getNbResults() const { return mMaxNbResults; } PX_FORCE_INLINE HitType* getTouches() const { return mTouches; } PX_FORCE_INLINE PxU32 getNbTouches() const { return mMaxNbTouches; } private: PxHitBuffer<HitType>* mResults; PxU32 mMaxNbResults; HitType* mTouches; PxU32 mMaxNbTouches; }; template <typename HitType, typename QueryType> PxU32 computeByteSize(const ExtBatchQueryDesc<HitType>& queryDesc) { PxU32 byteSize = 0; if (queryDesc.getNbResults() > 0) { byteSize += sizeof(QueryType)*queryDesc.getNbResults(); if (!queryDesc.getResults()) { byteSize += sizeof(PxHitBuffer<HitType>)*queryDesc.getNbResults() + sizeof(HitType)*queryDesc.getNbTouches(); } } return byteSize; } template <typename HitType, typename QueryType> PxU8* parseDesc (PxU8* bufIn, const ExtBatchQueryDesc<HitType>& queryDesc, PxHitBuffer<HitType>*& results, QueryType*& queries, PxU32& maxBufferSize, HitType*& touches, PxU32& maxNbTouches) { PxU8* bufOut = bufIn; results = queryDesc.getResults(); queries = NULL; maxBufferSize = queryDesc.getNbResults(); touches = queryDesc.getTouches(); maxNbTouches = queryDesc.getNbTouches(); if (maxBufferSize > 0) { queries = reinterpret_cast<QueryType*>(bufOut); bufOut += sizeof(QueryType)*maxBufferSize; if (!results) { results = reinterpret_cast<PxHitBuffer<HitType>*>(bufOut); for (PxU32 i = 0; i < maxBufferSize; i++) { PX_PLACEMENT_NEW(results + i, PxHitBuffer<HitType>); } bufOut += sizeof(PxHitBuffer<HitType>)*maxBufferSize; if (maxNbTouches > 0) { touches = reinterpret_cast<HitType*>(bufOut); bufOut += sizeof(HitType)*maxNbTouches; } } } return bufOut; } PxBatchQueryExt* create (const PxScene& scene, PxQueryFilterCallback* queryFilterCallback, const ExtBatchQueryDesc<PxRaycastHit>& raycastDesc, const ExtBatchQueryDesc<PxSweepHit>& sweepDesc, const ExtBatchQueryDesc<PxOverlapHit>& overlapDesc) { const PxU32 byteSize = sizeof(ExtBatchQuery) + computeByteSize<PxRaycastHit, Raycast>(raycastDesc) + computeByteSize<PxSweepHit, Sweep>(sweepDesc) + computeByteSize<PxOverlapHit, Overlap>(overlapDesc); PxAllocatorCallback& allocator = *PxGetAllocatorCallback(); PxU8* buf = reinterpret_cast<PxU8*>(allocator.allocate(byteSize, "NpBatchQueryExt", PX_FL)); PX_CHECK_AND_RETURN_NULL(buf, "PxCreateBatchQueryExt - alllocation failed"); ExtBatchQuery* bq = reinterpret_cast<ExtBatchQuery*>(buf); buf += sizeof(ExtBatchQuery); PxHitBuffer<PxRaycastHit>* raycastBuffers = NULL; Raycast* raycastQueries = NULL; PxU32 maxNbRaycasts = 0; PxRaycastHit* raycastTouches = NULL; PxU32 maxNbRaycastTouches = 0; buf = parseDesc<PxRaycastHit, Raycast>(buf, raycastDesc, raycastBuffers, raycastQueries, maxNbRaycasts, raycastTouches, maxNbRaycastTouches); PxHitBuffer<PxSweepHit>* sweepBuffers = NULL; Sweep* sweepQueries = NULL; PxU32 maxNbSweeps = 0; PxSweepHit* sweepTouches = NULL; PxU32 maxNbSweepTouches = 0; buf = parseDesc<PxSweepHit, Sweep>(buf, sweepDesc, sweepBuffers, sweepQueries, maxNbSweeps, sweepTouches, maxNbSweepTouches); PxHitBuffer<PxOverlapHit>* overlapBuffers = NULL; Overlap* overlapQueries = NULL; PxU32 maxNbOverlaps = 0; PxOverlapHit* overlapTouches = NULL; PxU32 maxNbOverlapTouches = 0; buf = parseDesc<PxOverlapHit, Overlap>(buf, overlapDesc, overlapBuffers, overlapQueries, maxNbOverlaps, overlapTouches, maxNbOverlapTouches); PX_ASSERT((reinterpret_cast<PxU8*>(bq) + byteSize) == buf); PX_PLACEMENT_NEW(bq, ExtBatchQuery)( scene, queryFilterCallback, raycastBuffers, raycastQueries, maxNbRaycasts, raycastTouches, maxNbRaycastTouches, sweepBuffers, sweepQueries, maxNbSweeps, sweepTouches, maxNbSweepTouches, overlapBuffers, overlapQueries, maxNbOverlaps, overlapTouches, maxNbOverlapTouches); return bq; } PxBatchQueryExt* physx::PxCreateBatchQueryExt( const PxScene& scene, PxQueryFilterCallback* queryFilterCallback, const PxU32 maxNbRaycasts, const PxU32 maxNbRaycastTouches, const PxU32 maxNbSweeps, const PxU32 maxNbSweepTouches, const PxU32 maxNbOverlaps, const PxU32 maxNbOverlapTouches) { PX_CHECK_AND_RETURN_NULL(!((0 != maxNbRaycastTouches) && (0 == maxNbRaycasts)), "PxCreateBatchQueryExt - maxNbRaycastTouches is non-zero but maxNbRaycasts is zero"); PX_CHECK_AND_RETURN_NULL(!((0 != maxNbSweepTouches) && (0 == maxNbSweeps)), "PxCreateBatchQueryExt - maxNbSweepTouches is non-zero but maxNbSweeps is zero"); PX_CHECK_AND_RETURN_NULL(!((0 != maxNbOverlapTouches) && (0 == maxNbOverlaps)), "PxCreateBatchQueryExt - maxNbOverlaps is non-zero but maxNbOverlaps is zero"); return create(scene, queryFilterCallback, ExtBatchQueryDesc<PxRaycastHit>(maxNbRaycasts, maxNbRaycastTouches), ExtBatchQueryDesc<PxSweepHit>(maxNbSweeps, maxNbSweepTouches), ExtBatchQueryDesc<PxOverlapHit>(maxNbOverlaps, maxNbOverlapTouches)); } PxBatchQueryExt* physx::PxCreateBatchQueryExt( const PxScene& scene, PxQueryFilterCallback* queryFilterCallback, PxRaycastBuffer* raycastBuffers, const PxU32 maxNbRaycasts, PxRaycastHit* raycastTouches, const PxU32 maxNbRaycastTouches, PxSweepBuffer* sweepBuffers, const PxU32 maxNbSweeps, PxSweepHit* sweepTouches, const PxU32 maxNbSweepTouches, PxOverlapBuffer* overlapBuffers, const PxU32 maxNbOverlaps, PxOverlapHit* overlapTouches, const PxU32 maxNbOverlapTouches) { PX_CHECK_AND_RETURN_NULL(!(!raycastTouches && (maxNbRaycastTouches != 0)), "PxCreateBatchQueryExt - maxNbRaycastTouches > 0 but raycastTouches is NULL"); PX_CHECK_AND_RETURN_NULL(!(!raycastBuffers && (maxNbRaycasts != 0)), "PxCreateBatchQueryExt - maxNbRaycasts > 0 but raycastBuffers is NULL"); PX_CHECK_AND_RETURN_NULL(!(!raycastBuffers && raycastTouches), "PxCreateBatchQueryExt - raycastBuffers is NULL but raycastTouches is non-NULL"); PX_CHECK_AND_RETURN_NULL(!(!sweepTouches && (maxNbSweepTouches != 0)), "PxCreateBatchQueryExt - maxNbSweepTouches > 0 but sweepTouches is NULL"); PX_CHECK_AND_RETURN_NULL(!(!sweepBuffers && (maxNbSweeps != 0)), "PxCreateBatchQueryExt - maxNbSweeps > 0 but sweepBuffers is NULL"); PX_CHECK_AND_RETURN_NULL(!(!sweepBuffers && sweepTouches), "PxCreateBatchQueryExt - sweepBuffers is NULL but sweepTouches is non-NULL"); PX_CHECK_AND_RETURN_NULL(!(!overlapTouches && (maxNbOverlapTouches != 0)), "PxCreateBatchQueryExt - maxNbOverlapTouches > 0 but overlapTouches is NULL"); PX_CHECK_AND_RETURN_NULL(!(!overlapBuffers && (maxNbOverlaps != 0)), "PxCreateBatchQueryExt - maxNbOverlaps > 0 but overlapBuffers is NULL"); PX_CHECK_AND_RETURN_NULL(!(!overlapBuffers && overlapTouches), "PxCreateBatchQueryExt - overlapBuffers is NULL but overlapTouches is non-NULL"); return create(scene, queryFilterCallback, ExtBatchQueryDesc<PxRaycastHit>(raycastBuffers, maxNbRaycasts, raycastTouches, maxNbRaycastTouches), ExtBatchQueryDesc<PxSweepHit>(sweepBuffers, maxNbSweeps, sweepTouches, maxNbSweepTouches), ExtBatchQueryDesc<PxOverlapHit>(overlapBuffers, maxNbOverlaps, overlapTouches, maxNbOverlapTouches)); } ExtBatchQuery::ExtBatchQuery (const PxScene& scene, PxQueryFilterCallback* queryFilterCallback, PxRaycastBuffer* raycastBuffers, Raycast* raycastQueries, const PxU32 maxNbRaycasts, PxRaycastHit* raycastTouches, const PxU32 maxNbRaycastTouches, PxSweepBuffer* sweepBuffers, Sweep* sweepQueries, const PxU32 maxNbSweeps, PxSweepHit* sweepTouches, const PxU32 maxNbSweepTouches, PxOverlapBuffer* overlapBuffers, Overlap* overlapQueries, const PxU32 maxNbOverlaps, PxOverlapHit* overlapTouches, const PxU32 maxNbOverlapTouches) : mScene(scene), mQueryFilterCallback(queryFilterCallback) { typedef Query<PxRaycastHit, Raycast> QueryRaycast; typedef Query<PxSweepHit, Sweep> QuerySweep; typedef Query<PxOverlapHit, Overlap> QueryOverlap; PX_PLACEMENT_NEW(&mRaycasts, QueryRaycast)(raycastBuffers, raycastQueries, maxNbRaycasts, raycastTouches, maxNbRaycastTouches); PX_PLACEMENT_NEW(&mSweeps, QuerySweep)(sweepBuffers, sweepQueries, maxNbSweeps, sweepTouches, maxNbSweepTouches); PX_PLACEMENT_NEW(&mOverlaps, QueryOverlap)(overlapBuffers, overlapQueries, maxNbOverlaps, overlapTouches, maxNbOverlapTouches); } void ExtBatchQuery::release() { PxGetAllocatorCallback()->deallocate(this); } PxRaycastBuffer* ExtBatchQuery::raycast (const PxVec3& origin, const PxVec3& unitDir, const PxReal distance, const PxU16 maxNbTouches, PxHitFlags hitFlags, const PxQueryFilterData& filterData, const PxQueryCache* cache) { const PxQueryFilterData qfd(filterData.data, filterData.flags | PxQueryFlag::eBATCH_QUERY_LEGACY_BEHAVIOUR); const Raycast raycast = { origin, unitDir, distance, hitFlags, qfd, cache }; PxRaycastBuffer* buffer = mRaycasts.addQuery(raycast, maxNbTouches); PX_CHECK_MSG(buffer, "PxBatchQueryExt::raycast - number of raycast() calls exceeds maxNbRaycasts. query discarded"); return buffer; } PxSweepBuffer* ExtBatchQuery::sweep (const PxGeometry& geometry, const PxTransform& pose, const PxVec3& unitDir, const PxReal distance, const PxU16 maxNbTouches, PxHitFlags hitFlags, const PxQueryFilterData& filterData, const PxQueryCache* cache, const PxReal inflation) { const PxQueryFilterData qfd(filterData.data, filterData.flags | PxQueryFlag::eBATCH_QUERY_LEGACY_BEHAVIOUR); const Sweep sweep = { geometry, pose, unitDir, distance, hitFlags, qfd, cache, inflation}; PxSweepBuffer* buffer = mSweeps.addQuery(sweep, maxNbTouches); PX_CHECK_MSG(buffer, "PxBatchQueryExt::sweep - number of sweep() calls exceeds maxNbSweeps. query discarded"); return buffer; } PxOverlapBuffer* ExtBatchQuery::overlap (const PxGeometry& geometry, const PxTransform& pose, PxU16 maxNbTouches, const PxQueryFilterData& filterData, const PxQueryCache* cache) { const PxQueryFilterData qfd(filterData.data, filterData.flags | PxQueryFlag::eBATCH_QUERY_LEGACY_BEHAVIOUR); const Overlap overlap = { geometry, pose, qfd, cache}; PxOverlapBuffer* buffer = mOverlaps.addQuery(overlap, maxNbTouches); PX_CHECK_MSG(buffer, "PxBatchQueryExt::overlap - number of overlap() calls exceeds maxNbOverlaps. query discarded"); return buffer; } void ExtBatchQuery::execute() { mRaycasts.execute(mScene, mQueryFilterCallback); mSweeps.execute(mScene, mQueryFilterCallback); mOverlaps.execute(mScene, mQueryFilterCallback); }
24,701
C++
34.440459
152
0.751265
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/ExtCustomSceneQuerySystem.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "extensions/PxCustomSceneQuerySystem.h" #include "extensions/PxShapeExt.h" #include "foundation/PxAlloca.h" #include "foundation/PxHashMap.h" #include "foundation/PxUserAllocated.h" #include "geometry/PxBVH.h" #include "GuActorShapeMap.h" #include "ExtSqQuery.h" #include "SqFactory.h" #include "PxRigidActor.h" #include "PxPruningStructure.h" using namespace physx; using namespace Sq; using namespace Gu; // PT: this customized version uses: // - a modified version of Sq::PrunerManager, named Sq::ExtPrunerManager, located in ExtSqManager.cpp // - a modified version of Sq::SceneQueries, named Sq::ExtSceneQueries, located in ExtSqQuery.cpp // // Sq::PrunerManager and Sq::SceneQueries live in the SceneQuery lib, and are used by PhysX internally // to implement the regular SQ system. // // Sq::ExtPrunerManager and Sq::ExtSceneQueries live in the Extensions lib, and are not used by the // regular PhysX SQ system. They are examples of how the default code can be customized. // static CompanionPrunerType getCompanionType(PxDynamicTreeSecondaryPruner::Enum type) { switch(type) { case PxDynamicTreeSecondaryPruner::eNONE: return COMPANION_PRUNER_NONE; case PxDynamicTreeSecondaryPruner::eBUCKET: return COMPANION_PRUNER_BUCKET; case PxDynamicTreeSecondaryPruner::eINCREMENTAL: return COMPANION_PRUNER_INCREMENTAL; case PxDynamicTreeSecondaryPruner::eBVH: return COMPANION_PRUNER_AABB_TREE; case PxDynamicTreeSecondaryPruner::eLAST: return COMPANION_PRUNER_NONE; } return COMPANION_PRUNER_NONE; } static BVHBuildStrategy getBuildStrategy(PxBVHBuildStrategy::Enum bs) { switch(bs) { case PxBVHBuildStrategy::eFAST: return BVH_SPLATTER_POINTS; case PxBVHBuildStrategy::eDEFAULT: return BVH_SPLATTER_POINTS_SPLIT_GEOM_CENTER; case PxBVHBuildStrategy::eSAH: return BVH_SAH; case PxBVHBuildStrategy::eLAST: return BVH_SPLATTER_POINTS; } return BVH_SPLATTER_POINTS; } static Pruner* create(PxPruningStructureType::Enum type, PxU64 contextID, PxDynamicTreeSecondaryPruner::Enum secondaryType, PxBVHBuildStrategy::Enum buildStrategy, PxU32 nbObjectsPerNode) { // if(0) // return createIncrementalPruner(contextID); const CompanionPrunerType cpType = getCompanionType(secondaryType); const BVHBuildStrategy bs = getBuildStrategy(buildStrategy); Pruner* pruner = NULL; switch(type) { case PxPruningStructureType::eNONE: { pruner = createBucketPruner(contextID); break; } case PxPruningStructureType::eDYNAMIC_AABB_TREE: { pruner = createAABBPruner(contextID, true, cpType, bs, nbObjectsPerNode); break; } case PxPruningStructureType::eSTATIC_AABB_TREE: { pruner = createAABBPruner(contextID, false, cpType, bs, nbObjectsPerNode); break; } case PxPruningStructureType::eLAST: break; } return pruner; } #define EXT_PRUNER_EPSILON 0.005f // PT: in this external implementation we'll use Px pointers instead of Np pointers in the payload. static PX_FORCE_INLINE void setPayload(PrunerPayload& pp, const PxShape* shape, const PxRigidActor* actor) { pp.data[0] = size_t(shape); pp.data[1] = size_t(actor); } static PX_FORCE_INLINE PxShape* getShapeFromPayload(const PrunerPayload& payload) { return reinterpret_cast<PxShape*>(payload.data[0]); } static PX_FORCE_INLINE PxRigidActor* getActorFromPayload(const PrunerPayload& payload) { return reinterpret_cast<PxRigidActor*>(payload.data[1]); } static PX_FORCE_INLINE bool isDynamicActor(const PxRigidActor& actor) { const PxType actorType = actor.getConcreteType(); return actorType != PxConcreteType::eRIGID_STATIC; } /////////////////////////////////////////////////////////////////////////////// static PX_FORCE_INLINE ActorShapeData createActorShapeData(PrunerHandle h, PrunerCompoundId id) { return (ActorShapeData(id) << 32) | ActorShapeData(h); } static PX_FORCE_INLINE PrunerHandle getPrunerHandle(ActorShapeData data) { return PrunerHandle(data); } static PX_FORCE_INLINE PrunerCompoundId getCompoundID(ActorShapeData data) { return PrunerCompoundId(data >> 32); } /////////////////////////////////////////////////////////////////////////////// namespace { class ExtSqAdapter : public ExtQueryAdapter { PX_NOCOPY(ExtSqAdapter) public: ExtSqAdapter(const PxCustomSceneQuerySystemAdapter& adapter) : mUserAdapter(adapter)/*, mFilterData(NULL)*/ {} virtual ~ExtSqAdapter() {} // Adapter virtual const PxGeometry& getGeometry(const PrunerPayload& payload) const; //~Adapter // ExtQueryAdapter virtual PrunerHandle findPrunerHandle(const PxQueryCache& cache, PrunerCompoundId& compoundId, PxU32& prunerIndex) const; virtual void getFilterData(const PrunerPayload& payload, PxFilterData& filterData) const; virtual void getActorShape(const PrunerPayload& payload, PxActorShape& actorShape) const; virtual bool processPruner(PxU32 prunerIndex, const PxQueryThreadContext* context, const PxQueryFilterData& filterData, PxQueryFilterCallback* filterCall) const; //~ExtQueryAdapter const PxCustomSceneQuerySystemAdapter& mUserAdapter; ActorShapeMap mDatabase; const PxQueryFilterData* mFilterData; }; } const PxGeometry& ExtSqAdapter::getGeometry(const PrunerPayload& payload) const { PxShape* shape = getShapeFromPayload(payload); return shape->getGeometry(); } PrunerHandle ExtSqAdapter::findPrunerHandle(const PxQueryCache& cache, PrunerCompoundId& compoundId, PxU32& prunerIndex) const { const PxU32 actorIndex = cache.actor->getInternalActorIndex(); PX_ASSERT(actorIndex!=0xffffffff); const ActorShapeData actorShapeData = mDatabase.find(actorIndex, cache.actor, cache.shape); compoundId = getCompoundID(actorShapeData); prunerIndex = mUserAdapter.getPrunerIndex(*cache.actor, *cache.shape); return getPrunerHandle(actorShapeData); } void ExtSqAdapter::getFilterData(const PrunerPayload& payload, PxFilterData& filterData) const { PxShape* shape = getShapeFromPayload(payload); filterData = shape->getQueryFilterData(); } void ExtSqAdapter::getActorShape(const PrunerPayload& payload, PxActorShape& actorShape) const { actorShape.actor = getActorFromPayload(payload); actorShape.shape = getShapeFromPayload(payload); } bool ExtSqAdapter::processPruner(PxU32 prunerIndex, const PxQueryThreadContext* context, const PxQueryFilterData& filterData, PxQueryFilterCallback* filterCall) const { return mUserAdapter.processPruner(prunerIndex, context, filterData, filterCall); } /////////////////////////////////////////////////////////////////////////////// namespace { class CustomPxSQ : public PxCustomSceneQuerySystem, public PxUserAllocated { public: CustomPxSQ(const PxCustomSceneQuerySystemAdapter& adapter, ExtPVDCapture* pvd, PxU64 contextID, PxSceneQueryUpdateMode::Enum mode, bool usesTreeOfPruners) : mExtAdapter (adapter), mQueries (pvd, contextID, EXT_PRUNER_EPSILON, mExtAdapter, usesTreeOfPruners), mUpdateMode (mode), mRefCount (1) {} virtual ~CustomPxSQ() {} virtual void release(); virtual void acquireReference(); virtual void preallocate(PxU32 prunerIndex, PxU32 nbShapes) { SQ().preallocate(prunerIndex, nbShapes); } virtual void addSQShape( const PxRigidActor& actor, const PxShape& shape, const PxBounds3& bounds, const PxTransform& transform, const PxSQCompoundHandle* compoundHandle, bool hasPruningStructure); virtual void removeSQShape(const PxRigidActor& actor, const PxShape& shape); virtual void updateSQShape(const PxRigidActor& actor, const PxShape& shape, const PxTransform& transform); virtual PxSQCompoundHandle addSQCompound(const PxRigidActor& actor, const PxShape** shapes, const PxBVH& pxbvh, const PxTransform* transforms); virtual void removeSQCompound(PxSQCompoundHandle compoundHandle); virtual void updateSQCompound(PxSQCompoundHandle compoundHandle, const PxTransform& compoundTransform); virtual void flushUpdates() { SQ().flushUpdates(); } virtual void flushMemory() { SQ().flushMemory(); } virtual void visualize(PxU32 prunerIndex, PxRenderOutput& out) const { SQ().visualize(prunerIndex, out); } virtual void shiftOrigin(const PxVec3& shift) { SQ().shiftOrigin(shift); } virtual PxSQBuildStepHandle prepareSceneQueryBuildStep(PxU32 prunerIndex); virtual void sceneQueryBuildStep(PxSQBuildStepHandle handle); virtual void finalizeUpdates(); virtual void setDynamicTreeRebuildRateHint(PxU32 dynTreeRebuildRateHint) { SQ().setDynamicTreeRebuildRateHint(dynTreeRebuildRateHint); } virtual PxU32 getDynamicTreeRebuildRateHint() const { return SQ().getDynamicTreeRebuildRateHint(); } virtual void forceRebuildDynamicTree(PxU32 prunerIndex) { SQ().forceRebuildDynamicTree(prunerIndex); } virtual PxSceneQueryUpdateMode::Enum getUpdateMode() const { return mUpdateMode; } virtual void setUpdateMode(PxSceneQueryUpdateMode::Enum mode) { mUpdateMode = mode; } virtual PxU32 getStaticTimestamp() const { return SQ().getStaticTimestamp(); } virtual void merge(const PxPruningStructure& pxps); virtual bool raycast(const PxVec3& origin, const PxVec3& unitDir, const PxReal distance, PxRaycastCallback& hitCall, PxHitFlags hitFlags, const PxQueryFilterData& filterData, PxQueryFilterCallback* filterCall, const PxQueryCache* cache, PxGeometryQueryFlags flags) const; virtual bool sweep( const PxGeometry& geometry, const PxTransform& pose, const PxVec3& unitDir, const PxReal distance, PxSweepCallback& hitCall, PxHitFlags hitFlags, const PxQueryFilterData& filterData, PxQueryFilterCallback* filterCall, const PxQueryCache* cache, const PxReal inflation, PxGeometryQueryFlags flags) const; virtual bool overlap(const PxGeometry& geometry, const PxTransform& transform, PxOverlapCallback& hitCall, const PxQueryFilterData& filterData, PxQueryFilterCallback* filterCall, const PxQueryCache* cache, PxGeometryQueryFlags flags) const; virtual PxSQPrunerHandle getHandle(const PxRigidActor& actor, const PxShape& shape, PxU32& prunerIndex) const; virtual void sync(PxU32 prunerIndex, const PxSQPrunerHandle* handles, const PxU32* indices, const PxBounds3* bounds, const PxTransform32* transforms, PxU32 count, const PxBitMap& ignoredIndices); virtual PxU32 addPruner(PxPruningStructureType::Enum primaryType, PxDynamicTreeSecondaryPruner::Enum secondaryType, PxU32 preallocated); virtual PxU32 startCustomBuildstep(); virtual void customBuildstep(PxU32 index); virtual void finishCustomBuildstep(); PX_FORCE_INLINE ExtPrunerManager& SQ() { return mQueries.mSQManager; } PX_FORCE_INLINE const ExtPrunerManager& SQ() const { return mQueries.mSQManager; } ExtSqAdapter mExtAdapter; ExtSceneQueries mQueries; PxSceneQueryUpdateMode::Enum mUpdateMode; PxU32 mRefCount; }; } /////////////////////////////////////////////////////////////////////////////// void addExternalSQ(PxSceneQuerySystem* added); void removeExternalSQ(PxSceneQuerySystem* removed); void CustomPxSQ::release() { mRefCount--; if(!mRefCount) { removeExternalSQ(this); PX_DELETE_THIS; } } void CustomPxSQ::acquireReference() { mRefCount++; } void CustomPxSQ::addSQShape(const PxRigidActor& actor, const PxShape& shape, const PxBounds3& bounds, const PxTransform& transform, const PxSQCompoundHandle* compoundHandle, bool hasPruningStructure) { PrunerPayload payload; setPayload(payload, &shape, &actor); const bool isDynamic = isDynamicActor(actor); const PxU32 prunerIndex = mExtAdapter.mUserAdapter.getPrunerIndex(actor, shape); const PrunerCompoundId cid = compoundHandle ? PrunerCompoundId(*compoundHandle) : INVALID_COMPOUND_ID; const PrunerHandle shapeHandle = SQ().addPrunerShape(payload, prunerIndex, isDynamic, cid, bounds, transform, hasPruningStructure); const PxU32 actorIndex = actor.getInternalActorIndex(); PX_ASSERT(actorIndex!=0xffffffff); mExtAdapter.mDatabase.add(actorIndex, &actor, &shape, createActorShapeData(shapeHandle, cid)); } namespace { struct DatabaseCleaner : PrunerPayloadRemovalCallback { DatabaseCleaner(ExtSqAdapter& adapter) : mAdapter(adapter){} virtual void invoke(PxU32 nbRemoved, const PrunerPayload* removed) { PxU32 actorIndex = 0xffffffff; const PxRigidActor* cachedActor = NULL; while(nbRemoved--) { const PrunerPayload& payload = *removed++; const PxRigidActor* actor = getActorFromPayload(payload); if(actor!=cachedActor) { actorIndex = actor->getInternalActorIndex(); cachedActor = actor; } PX_ASSERT(actorIndex!=0xffffffff); bool status = mAdapter.mDatabase.remove(actorIndex, actor, getShapeFromPayload(payload), NULL); PX_ASSERT(status); PX_UNUSED(status); } } ExtSqAdapter& mAdapter; PX_NOCOPY(DatabaseCleaner) }; } void CustomPxSQ::removeSQShape(const PxRigidActor& actor, const PxShape& shape) { const bool isDynamic = isDynamicActor(actor); const PxU32 prunerIndex = mExtAdapter.mUserAdapter.getPrunerIndex(actor, shape); const PxU32 actorIndex = actor.getInternalActorIndex(); PX_ASSERT(actorIndex!=0xffffffff); ActorShapeData actorShapeData; mExtAdapter.mDatabase.remove(actorIndex, &actor, &shape, &actorShapeData); const PrunerHandle shapeHandle = getPrunerHandle(actorShapeData); const PrunerCompoundId compoundId = getCompoundID(actorShapeData); SQ().removePrunerShape(prunerIndex, isDynamic, compoundId, shapeHandle, NULL); } void CustomPxSQ::updateSQShape(const PxRigidActor& actor, const PxShape& shape, const PxTransform& transform) { const bool isDynamic = isDynamicActor(actor); const PxU32 prunerIndex = mExtAdapter.mUserAdapter.getPrunerIndex(actor, shape); const PxU32 actorIndex = actor.getInternalActorIndex(); PX_ASSERT(actorIndex!=0xffffffff); const ActorShapeData actorShapeData = mExtAdapter.mDatabase.find(actorIndex, &actor, &shape); const PrunerHandle shapeHandle = getPrunerHandle(actorShapeData); const PrunerCompoundId cid = getCompoundID(actorShapeData); SQ().markForUpdate(prunerIndex, isDynamic, cid, shapeHandle, transform); } PxSQCompoundHandle CustomPxSQ::addSQCompound(const PxRigidActor& actor, const PxShape** shapes, const PxBVH& bvh, const PxTransform* transforms) { const PxU32 numSqShapes = bvh.getNbBounds(); PX_ALLOCA(payloads, PrunerPayload, numSqShapes); for(PxU32 i=0; i<numSqShapes; i++) setPayload(payloads[i], shapes[i], &actor); const PxU32 actorIndex = actor.getInternalActorIndex(); PX_ASSERT(actorIndex!=0xffffffff); PX_ALLOCA(shapeHandles, PrunerHandle, numSqShapes); SQ().addCompoundShape(bvh, actorIndex, actor.getGlobalPose(), shapeHandles, payloads, transforms, isDynamicActor(actor)); for(PxU32 i=0; i<numSqShapes; i++) { // PT: TODO: actorIndex is now redundant! mExtAdapter.mDatabase.add(actorIndex, &actor, shapes[i], createActorShapeData(shapeHandles[i], actorIndex)); } return PxSQCompoundHandle(actorIndex); } void CustomPxSQ::removeSQCompound(PxSQCompoundHandle compoundHandle) { DatabaseCleaner cleaner(mExtAdapter); SQ().removeCompoundActor(PrunerCompoundId(compoundHandle), &cleaner); } void CustomPxSQ::updateSQCompound(PxSQCompoundHandle compoundHandle, const PxTransform& compoundTransform) { SQ().updateCompoundActor(PrunerCompoundId(compoundHandle), compoundTransform); } PxSQBuildStepHandle CustomPxSQ::prepareSceneQueryBuildStep(PxU32 prunerIndex) { return SQ().prepareSceneQueriesUpdate(prunerIndex); } void CustomPxSQ::sceneQueryBuildStep(PxSQBuildStepHandle handle) { SQ().sceneQueryBuildStep(handle); } void CustomPxSQ::finalizeUpdates() { switch(mUpdateMode) { case PxSceneQueryUpdateMode::eBUILD_ENABLED_COMMIT_ENABLED: SQ().afterSync(true, true); break; case PxSceneQueryUpdateMode::eBUILD_ENABLED_COMMIT_DISABLED: SQ().afterSync(true, false); break; case PxSceneQueryUpdateMode::eBUILD_DISABLED_COMMIT_DISABLED: SQ().afterSync(false, false); break; } } void CustomPxSQ::merge(const PxPruningStructure& /*pxps*/) { PX_ASSERT(!"Not supported by this custom SQ system"); // PT: PxPruningStructure only knows about the regular static/dynamic pruners, so it is not // compatible with this custom version. } bool CustomPxSQ::raycast( const PxVec3& origin, const PxVec3& unitDir, const PxReal distance, PxRaycastCallback& hitCall, PxHitFlags hitFlags, const PxQueryFilterData& filterData, PxQueryFilterCallback* filterCall, const PxQueryCache* cache, PxGeometryQueryFlags flags) const { return mQueries._raycast(origin, unitDir, distance, hitCall, hitFlags, filterData, filterCall, cache, flags); } bool CustomPxSQ::sweep( const PxGeometry& geometry, const PxTransform& pose, const PxVec3& unitDir, const PxReal distance, PxSweepCallback& hitCall, PxHitFlags hitFlags, const PxQueryFilterData& filterData, PxQueryFilterCallback* filterCall, const PxQueryCache* cache, const PxReal inflation, PxGeometryQueryFlags flags) const { return mQueries._sweep(geometry, pose, unitDir, distance, hitCall, hitFlags, filterData, filterCall, cache, inflation, flags); } bool CustomPxSQ::overlap( const PxGeometry& geometry, const PxTransform& transform, PxOverlapCallback& hitCall, const PxQueryFilterData& filterData, PxQueryFilterCallback* filterCall, const PxQueryCache* cache, PxGeometryQueryFlags flags) const { return mQueries._overlap( geometry, transform, hitCall, filterData, filterCall, cache, flags); } PxSQPrunerHandle CustomPxSQ::getHandle(const PxRigidActor& actor, const PxShape& shape, PxU32& prunerIndex) const { const PxU32 actorIndex = actor.getInternalActorIndex(); PX_ASSERT(actorIndex!=0xffffffff); const ActorShapeData actorShapeData = mExtAdapter.mDatabase.find(actorIndex, &actor, &shape); prunerIndex = mExtAdapter.mUserAdapter.getPrunerIndex(actor, shape); return PxSQPrunerHandle(getPrunerHandle(actorShapeData)); } void CustomPxSQ::sync(PxU32 prunerIndex, const PxSQPrunerHandle* handles, const PxU32* indices, const PxBounds3* bounds, const PxTransform32* transforms, PxU32 count, const PxBitMap& ignoredIndices) { SQ().sync(prunerIndex, handles, indices, bounds, transforms, count, ignoredIndices); } PxU32 CustomPxSQ::addPruner(PxPruningStructureType::Enum primaryType, PxDynamicTreeSecondaryPruner::Enum secondaryType, PxU32 preallocated) { Pruner* pruner = create(primaryType, mQueries.getContextId(), secondaryType, PxBVHBuildStrategy::eFAST, 4); return mQueries.mSQManager.addPruner(pruner, preallocated); } PxU32 CustomPxSQ::startCustomBuildstep() { return SQ().startCustomBuildstep(); } void CustomPxSQ::customBuildstep(PxU32 index) { SQ().customBuildstep(index); } void CustomPxSQ::finishCustomBuildstep() { SQ().finishCustomBuildstep(); } /////////////////////////////////////////////////////////////////////////////// PxCustomSceneQuerySystem* physx::PxCreateCustomSceneQuerySystem(PxSceneQueryUpdateMode::Enum sceneQueryUpdateMode, PxU64 contextID, const PxCustomSceneQuerySystemAdapter& adapter, bool usesTreeOfPruners) { ExtPVDCapture* pvd = NULL; CustomPxSQ* pxsq = PX_NEW(CustomPxSQ)(adapter, pvd, contextID, sceneQueryUpdateMode, usesTreeOfPruners); addExternalSQ(pxsq); return pxsq; } ///////////////////////////////////////////////////////////////////////////////
21,442
C++
40.880859
203
0.743821
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/ExtRigidActorExt.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "extensions/PxRigidActorExt.h" #include "foundation/PxFPU.h" #include "foundation/PxAllocator.h" #include "foundation/PxInlineArray.h" #include "geometry/PxGeometryQuery.h" #include "cooking/PxBVHDesc.h" #include "cooking/PxCooking.h" using namespace physx; PxBounds3* PxRigidActorExt::getRigidActorShapeLocalBoundsList(const PxRigidActor& actor, PxU32& numBounds) { const PxU32 numShapes = actor.getNbShapes(); if(numShapes == 0) return NULL; PxInlineArray<PxShape*, 64> shapes("PxShape*"); shapes.resize(numShapes); actor.getShapes(shapes.begin(), shapes.size()); PxU32 numSqShapes = 0; for(PxU32 i=0; i<numShapes; i++) { if(shapes[i]->getFlags() & PxShapeFlag::eSCENE_QUERY_SHAPE) numSqShapes++; } PxBounds3* bounds = PX_ALLOCATE(PxBounds3, numSqShapes, "PxBounds3"); numSqShapes = 0; { PX_SIMD_GUARD // PT: external guard because we use PxGeometryQueryFlag::Enum(0) below for(PxU32 i=0; i<numShapes; i++) { if(shapes[i]->getFlags() & PxShapeFlag::eSCENE_QUERY_SHAPE) PxGeometryQuery::computeGeomBounds(bounds[numSqShapes++], shapes[i]->getGeometry(), shapes[i]->getLocalPose(), 0.0f, 1.0f, PxGeometryQueryFlag::Enum(0)); } } numBounds = numSqShapes; return bounds; } PxBVH* PxRigidActorExt::createBVHFromActor(PxPhysics& physics, const PxRigidActor& actor) { PxU32 nbBounds = 0; PxBounds3* bounds = PxRigidActorExt::getRigidActorShapeLocalBoundsList(actor, nbBounds); PxBVHDesc bvhDesc; bvhDesc.bounds.count = nbBounds; bvhDesc.bounds.data = bounds; bvhDesc.bounds.stride = sizeof(PxBounds3); PxBVH* bvh = PxCreateBVH(bvhDesc, physics.getPhysicsInsertionCallback()); PX_FREE(bounds); return bvh; }
3,372
C++
36.477777
157
0.753559
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/ExtTriangleMeshExt.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "geometry/PxMeshQuery.h" #include "geometry/PxGeometryQuery.h" #include "geometry/PxTriangleMeshGeometry.h" #include "geometry/PxHeightFieldGeometry.h" #include "geometry/PxHeightField.h" #include "geometry/PxTriangleMesh.h" #include "extensions/PxTriangleMeshExt.h" #include "GuSDF.h" #include "GuTriangleMesh.h" #include "foundation/PxAllocator.h" using namespace physx; PxMeshOverlapUtil::PxMeshOverlapUtil() : mResultsMemory(mResults), mNbResults(0), mMaxNbResults(256) { } PxMeshOverlapUtil::~PxMeshOverlapUtil() { if(mResultsMemory != mResults) PX_FREE(mResultsMemory); } PxU32 PxMeshOverlapUtil::findOverlap(const PxGeometry& geom, const PxTransform& geomPose, const PxTriangleMeshGeometry& meshGeom, const PxTransform& meshPose) { bool overflow; PxU32 nbTouchedTris = PxMeshQuery::findOverlapTriangleMesh(geom, geomPose, meshGeom, meshPose, mResultsMemory, mMaxNbResults, 0, overflow); if(overflow) { const PxU32 maxNbTris = meshGeom.triangleMesh->getNbTriangles(); if(!maxNbTris) { mNbResults = 0; return 0; } if(mMaxNbResults<maxNbTris) { if(mResultsMemory != mResults) PX_FREE(mResultsMemory); mResultsMemory = PX_ALLOCATE(PxU32, maxNbTris, "PxMeshOverlapUtil::findOverlap"); mMaxNbResults = maxNbTris; } nbTouchedTris = PxMeshQuery::findOverlapTriangleMesh(geom, geomPose, meshGeom, meshPose, mResultsMemory, mMaxNbResults, 0, overflow); PX_ASSERT(nbTouchedTris); PX_ASSERT(!overflow); } mNbResults = nbTouchedTris; return nbTouchedTris; } PxU32 PxMeshOverlapUtil::findOverlap(const PxGeometry& geom, const PxTransform& geomPose, const PxHeightFieldGeometry& hfGeom, const PxTransform& hfPose) { bool overflow = true; PxU32 nbTouchedTris = PxMeshQuery::findOverlapHeightField(geom, geomPose, hfGeom, hfPose, mResultsMemory, mMaxNbResults, 0, overflow); if(overflow) { const PxU32 maxNbTris = hfGeom.heightField->getNbRows()*hfGeom.heightField->getNbColumns()*2; if(!maxNbTris) { mNbResults = 0; return 0; } if(mMaxNbResults<maxNbTris) { if(mResultsMemory != mResults) PX_FREE(mResultsMemory); mResultsMemory = PX_ALLOCATE(PxU32, maxNbTris, "PxMeshOverlapUtil::findOverlap"); mMaxNbResults = maxNbTris; } nbTouchedTris = PxMeshQuery::findOverlapHeightField(geom, geomPose, hfGeom, hfPose, mResultsMemory, mMaxNbResults, 0, overflow); PX_ASSERT(nbTouchedTris); PX_ASSERT(!overflow); } mNbResults = nbTouchedTris; return nbTouchedTris; } namespace { template<typename MeshGeometry> bool computeMeshPenetrationT(PxVec3& direction, PxReal& depth, const PxGeometry& geom, const PxTransform& geomPose, const MeshGeometry& meshGeom, const PxTransform& meshPose, PxU32 maxIter, PxU32* nbIterOut) { PxU32 nbIter = 0; PxTransform pose = geomPose; for (; nbIter < maxIter; nbIter++) { PxVec3 currentDir; PxF32 currentDepth; if (!PxGeometryQuery::computePenetration(currentDir, currentDepth, geom, pose, meshGeom, meshPose)) break; pose.p += currentDir * currentDepth; } if(nbIterOut) *nbIterOut = nbIter; PxVec3 diff = pose.p - geomPose.p; depth = diff.magnitude(); if (depth>0) direction = diff / depth; return nbIter!=0; } } bool physx::PxComputeTriangleMeshPenetration(PxVec3& direction, PxReal& depth, const PxGeometry& geom, const PxTransform& geomPose, const PxTriangleMeshGeometry& meshGeom, const PxTransform& meshPose, PxU32 maxIter, PxU32* nbIter) { return computeMeshPenetrationT(direction, depth, geom, geomPose, meshGeom, meshPose, maxIter, nbIter); } bool physx::PxComputeHeightFieldPenetration(PxVec3& direction, PxReal& depth, const PxGeometry& geom, const PxTransform& geomPose, const PxHeightFieldGeometry& hfGeom, const PxTransform& meshPose, PxU32 maxIter, PxU32* nbIter) { return computeMeshPenetrationT(direction, depth, geom, geomPose, hfGeom, meshPose, maxIter, nbIter); } bool physx::PxExtractIsosurfaceFromSDF(const PxTriangleMesh& triangleMesh, PxArray<PxVec3>& isosurfaceVertices, PxArray<PxU32>& isosurfaceTriangleIndices) { PxU32 dimX, dimY, dimZ; triangleMesh.getSDFDimensions(dimX, dimY, dimZ); if (dimX == 0 || dimY == 0 || dimZ == 0) return false; const Gu::TriangleMesh* guTriangleMesh = static_cast<const Gu::TriangleMesh*>(&triangleMesh); const Gu::SDF& sdf = guTriangleMesh->getSdfDataFast(); extractIsosurfaceFromSDF(sdf, isosurfaceVertices, isosurfaceTriangleIndices); return true; }
6,322
C++
32.278947
158
0.737583
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/ExtPvd.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. // suppress LNK4221 #include "foundation/PxPreprocessor.h" PX_DUMMY_SYMBOL #if PX_SUPPORT_PVD #include "ExtPvd.h" #include "PxExtensionMetaDataObjects.h" #include "ExtD6Joint.h" #include "ExtFixedJoint.h" #include "ExtSphericalJoint.h" #include "ExtDistanceJoint.h" #include "ExtRevoluteJoint.h" #include "ExtPrismaticJoint.h" #include "ExtJointMetaDataExtensions.h" #include "PvdMetaDataPropertyVisitor.h" #include "PvdMetaDataDefineProperties.h" namespace physx { namespace Ext { using namespace physx::Vd; template<typename TObjType, typename TOperator> inline void visitPvdInstanceProperties( TOperator inOperator ) { PxClassInfoTraits<TObjType>().Info.visitInstanceProperties( makePvdPropertyFilter( inOperator ), 0 ); } template<typename TObjType, typename TOperator> inline void visitPvdProperties( TOperator inOperator ) { PvdPropertyFilter<TOperator> theFilter( makePvdPropertyFilter( inOperator ) ); PxU32 thePropCount = PxClassInfoTraits<TObjType>().Info.visitBaseProperties( theFilter ); PxClassInfoTraits<TObjType>().Info.visitInstanceProperties( theFilter, thePropCount ); } Pvd::PvdNameSpace::PvdNameSpace(physx::pvdsdk::PvdDataStream& conn, const char* /*name*/) : mConnection(conn) { } Pvd::PvdNameSpace::~PvdNameSpace() { } void Pvd::releasePvdInstance(physx::pvdsdk::PvdDataStream& pvdConnection, const PxConstraint& c, const PxJoint& joint) { if(!pvdConnection.isConnected()) return; //remove from scene and from any attached actors. PxRigidActor* actor0, *actor1; c.getActors( actor0, actor1 ); PxScene* scene = c.getScene(); if(scene) pvdConnection.removeObjectRef( scene, "Joints", &joint ); if ( actor0 && actor0->getScene() ) pvdConnection.removeObjectRef( actor0, "Joints", &joint ); if ( actor1 && actor1->getScene()) pvdConnection.removeObjectRef( actor1, "Joints", &joint ); pvdConnection.destroyInstance(&joint); } template<typename TObjType> void registerProperties( PvdDataStream& inStream ) { inStream.createClass<TObjType>(); PvdPropertyDefinitionHelper& theHelper( inStream.getPropertyDefinitionHelper() ); PvdClassInfoDefine theDefinitionObj( theHelper, getPvdNamespacedNameForType<TObjType>() ); visitPvdInstanceProperties<TObjType>( theDefinitionObj ); } template<typename TObjType, typename TValueStructType> void registerPropertiesAndValueStruct( PvdDataStream& inStream ) { inStream.createClass<TObjType>(); inStream.deriveClass<PxJoint,TObjType>(); PvdPropertyDefinitionHelper& theHelper( inStream.getPropertyDefinitionHelper() ); { PvdClassInfoDefine theDefinitionObj( theHelper, getPvdNamespacedNameForType<TObjType>() ); visitPvdInstanceProperties<TObjType>( theDefinitionObj ); } { PvdClassInfoValueStructDefine theDefinitionObj( theHelper ); visitPvdProperties<TObjType>( theDefinitionObj ); theHelper.addPropertyMessage<TObjType,TValueStructType>(); } } void Pvd::sendClassDescriptions(physx::pvdsdk::PvdDataStream& inStream) { if (inStream.isClassExist<PxJoint>()) return; { //PxJoint registerProperties<PxJoint>( inStream ); inStream.createProperty<PxJoint,ObjectRef>( "Parent", "parents" ); registerPropertiesAndValueStruct<PxDistanceJoint,PxDistanceJointGeneratedValues>( inStream); registerPropertiesAndValueStruct<PxContactJoint, PxContactJointGeneratedValues>(inStream); registerPropertiesAndValueStruct<PxFixedJoint,PxFixedJointGeneratedValues>( inStream); registerPropertiesAndValueStruct<PxPrismaticJoint,PxPrismaticJointGeneratedValues>( inStream); registerPropertiesAndValueStruct<PxSphericalJoint,PxSphericalJointGeneratedValues>( inStream); registerPropertiesAndValueStruct<PxRevoluteJoint,PxRevoluteJointGeneratedValues>( inStream); registerPropertiesAndValueStruct<PxD6Joint,PxD6JointGeneratedValues>( inStream); registerPropertiesAndValueStruct<PxGearJoint,PxGearJointGeneratedValues>( inStream); registerPropertiesAndValueStruct<PxRackAndPinionJoint,PxRackAndPinionJointGeneratedValues>( inStream); } } void Pvd::setActors( physx::pvdsdk::PvdDataStream& inStream, const PxJoint& inJoint, const PxConstraint& c, const PxActor* newActor0, const PxActor* newActor1 ) { PxRigidActor* actor0, *actor1; c.getActors( actor0, actor1 ); if ( actor0 ) inStream.removeObjectRef( actor0, "Joints", &inJoint ); if ( actor1 ) inStream.removeObjectRef( actor1, "Joints", &inJoint ); if ( newActor0 && newActor0->getScene()) inStream.pushBackObjectRef( newActor0, "Joints", &inJoint ); if ( newActor1 && newActor1->getScene()) inStream.pushBackObjectRef( newActor1, "Joints", &inJoint ); inStream.setPropertyValue( &inJoint, "Actors.actor0", reinterpret_cast<const void*>(newActor0) ); inStream.setPropertyValue( &inJoint, "Actors.actor1", reinterpret_cast<const void*>(newActor1) ); const void* parent = newActor0 ? newActor0 : newActor1; inStream.setPropertyValue( &inJoint, "Parent", parent ); if((newActor0 && !newActor0->getScene()) || (newActor1 && !newActor1->getScene())) { inStream.removeObjectRef( c.getScene(), "Joints", &inJoint ); } } } } #endif // PX_SUPPORT_PVD
6,837
C++
40.192771
161
0.771098
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/ExtRackAndPinionJoint.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "ExtRackAndPinionJoint.h" #include "ExtConstraintHelper.h" #include "extensions/PxRevoluteJoint.h" #include "extensions/PxPrismaticJoint.h" #include "PxArticulationJointReducedCoordinate.h" //#include <stdio.h> #include "omnipvd/ExtOmniPvdSetData.h" using namespace physx; using namespace Ext; PX_IMPLEMENT_OUTPUT_ERROR RackAndPinionJoint::RackAndPinionJoint(const PxTolerancesScale& /*scale*/, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1) : RackAndPinionJointT(PxJointConcreteType::eRACK_AND_PINION, actor0, localFrame0, actor1, localFrame1, "RackAndPinionJointData") { RackAndPinionJointData* data = static_cast<RackAndPinionJointData*>(mData); data->hingeJoint = NULL; data->prismaticJoint = NULL; data->ratio = 1.0f; data->px = 0.0f; data->vangle = 0.0f; resetError(); } void RackAndPinionJoint::setRatio(float ratio) { RackAndPinionJointData* data = reinterpret_cast<RackAndPinionJointData*>(mData); data->ratio = ratio; resetError(); markDirty(); OMNI_PVD_SET(OMNI_PVD_CONTEXT_HANDLE, PxRackAndPinionJoint, ratio, static_cast<PxRackAndPinionJoint&>(*this), ratio) } float RackAndPinionJoint::getRatio() const { RackAndPinionJointData* data = reinterpret_cast<RackAndPinionJointData*>(mData); return data->ratio; } bool RackAndPinionJoint::setData(PxU32 nbRackTeeth, PxU32 nbPinionTeeth, float rackLength) { if(!nbRackTeeth) return outputError<PxErrorCode::eINVALID_PARAMETER>(__LINE__, "PxRackAndPinionJoint::setData: nbRackTeeth cannot be zero."); if(!nbPinionTeeth) return outputError<PxErrorCode::eINVALID_PARAMETER>(__LINE__, "PxRackAndPinionJoint::setData: nbPinionTeeth cannot be zero."); if(rackLength<=0.0f) return outputError<PxErrorCode::eINVALID_PARAMETER>(__LINE__, "PxRackAndPinionJoint::setData: rackLength must be positive."); RackAndPinionJointData* data = reinterpret_cast<RackAndPinionJointData*>(mData); data->ratio = (PxTwoPi*nbRackTeeth)/(rackLength*nbPinionTeeth); resetError(); markDirty(); return true; } bool RackAndPinionJoint::setJoints(const PxBase* hinge, const PxBase* prismatic) { RackAndPinionJointData* data = static_cast<RackAndPinionJointData*>(mData); if(hinge) { const PxType type0 = hinge->getConcreteType(); if(type0 == PxConcreteType::eARTICULATION_JOINT_REDUCED_COORDINATE) { const PxArticulationJointReducedCoordinate* joint0 = static_cast<const PxArticulationJointReducedCoordinate*>(hinge); const PxArticulationJointType::Enum artiJointType0 = joint0->getJointType(); if(artiJointType0 != PxArticulationJointType::eREVOLUTE && artiJointType0 != PxArticulationJointType::eREVOLUTE_UNWRAPPED) return outputError<PxErrorCode::eINVALID_PARAMETER>(__LINE__, "PxRackAndPinionJoint::setJoints: passed joint must be a revolute joint."); } else { if(type0 != PxJointConcreteType::eREVOLUTE && type0 != PxJointConcreteType::eD6) return outputError<PxErrorCode::eINVALID_PARAMETER>(__LINE__, "PxRackAndPinionJoint::setJoints: passed hinge joint must be either a revolute joint or a D6 joint."); } } if(prismatic) { const PxType type1 = prismatic->getConcreteType(); if(type1 == PxConcreteType::eARTICULATION_JOINT_REDUCED_COORDINATE) { const PxArticulationJointReducedCoordinate* joint1 = static_cast<const PxArticulationJointReducedCoordinate*>(prismatic); const PxArticulationJointType::Enum artiJointType1 = joint1->getJointType(); if(artiJointType1 != PxArticulationJointType::ePRISMATIC) return outputError<PxErrorCode::eINVALID_PARAMETER>(__LINE__, "PxRackAndPinionJoint::setJoints: passed joint must be a prismatic joint."); } else { if(type1 != PxJointConcreteType::ePRISMATIC && type1 != PxJointConcreteType::eD6) return outputError<PxErrorCode::eINVALID_PARAMETER>(__LINE__, "PxRackAndPinionJoint::setJoints: passed prismatic joint must be either a prismatic joint or a D6 joint."); } } data->hingeJoint = hinge; data->prismaticJoint = prismatic; resetError(); markDirty(); #if PX_SUPPORT_OMNI_PVD const PxBase* joints[] = { hinge, prismatic }; PxU32 jointCount = sizeof(joints) / sizeof(joints[0]); OMNI_PVD_SET_ARRAY(OMNI_PVD_CONTEXT_HANDLE, PxRackAndPinionJoint, joints, static_cast<PxRackAndPinionJoint&>(*this), joints, jointCount) #endif return true; } void RackAndPinionJoint::getJoints(const PxBase*& hinge, const PxBase*& prismatic) const { const RackAndPinionJointData* data = static_cast<const RackAndPinionJointData*>(mData); hinge = data->hingeJoint; prismatic = data->prismaticJoint; } static float angleDiff(float angle0, float angle1) { const float diff = fmodf( angle1 - angle0 + PxPi, PxTwoPi) - PxPi; return diff < -PxPi ? diff + PxTwoPi : diff; } void RackAndPinionJoint::updateError() { RackAndPinionJointData* data = static_cast<RackAndPinionJointData*>(mData); if(!data->hingeJoint || !data->prismaticJoint) return; PxRigidActor* rackActor0; PxRigidActor* rackActor1; getActors(rackActor0, rackActor1); float Angle0 = 0.0f; float Sign0 = 0.0f; { PxRigidActor* hingeActor0; PxRigidActor* hingeActor1; const PxType type = data->hingeJoint->getConcreteType(); if(type == PxConcreteType::eARTICULATION_JOINT_REDUCED_COORDINATE) { const PxArticulationJointReducedCoordinate* artiHingeJoint = static_cast<const PxArticulationJointReducedCoordinate*>(data->hingeJoint); hingeActor0 = &artiHingeJoint->getParentArticulationLink(); hingeActor1 = &artiHingeJoint->getChildArticulationLink(); Angle0 = artiHingeJoint->getJointPosition(PxArticulationAxis::eTWIST); } else { const PxJoint* hingeJoint = static_cast<const PxJoint*>(data->hingeJoint); hingeJoint->getActors(hingeActor0, hingeActor1); if(type == PxJointConcreteType::eREVOLUTE) Angle0 = static_cast<const PxRevoluteJoint*>(hingeJoint)->getAngle(); else if (type == PxJointConcreteType::eD6) Angle0 = static_cast<const PxD6Joint*>(hingeJoint)->getTwistAngle(); } if(rackActor0 == hingeActor0 || rackActor1 == hingeActor0) Sign0 = -1.0f; else if (rackActor0 == hingeActor1 || rackActor1 == hingeActor1) Sign0 = 1.0f; else PX_ASSERT(0); } if(!mInitDone) { mInitDone = true; mPersistentAngle0 = Angle0; } const float travelThisFrame0 = angleDiff(Angle0, mPersistentAngle0); mVirtualAngle0 += travelThisFrame0; // printf("mVirtualAngle0: %f\n", mVirtualAngle0); mPersistentAngle0 = Angle0; float px = 0.0f; float Sign1 = 0.0f; { PxRigidActor* prismaticActor0; PxRigidActor* prismaticActor1; const PxType type = data->prismaticJoint->getConcreteType(); if(type == PxConcreteType::eARTICULATION_JOINT_REDUCED_COORDINATE) { const PxArticulationJointReducedCoordinate* artiPrismaticJoint = static_cast<const PxArticulationJointReducedCoordinate*>(data->prismaticJoint); prismaticActor0 = &artiPrismaticJoint->getParentArticulationLink(); prismaticActor1 = &artiPrismaticJoint->getChildArticulationLink(); px = artiPrismaticJoint->getJointPosition(PxArticulationAxis::eX); } else { const PxJoint* prismaticJoint = static_cast<const PxJoint*>(data->prismaticJoint); prismaticJoint->getActors(prismaticActor0, prismaticActor1); if(type==PxJointConcreteType::ePRISMATIC) px = static_cast<const PxPrismaticJoint*>(prismaticJoint)->getPosition(); else if(type==PxJointConcreteType::eD6) px = prismaticJoint->getRelativeTransform().p.x; } if(rackActor0 == prismaticActor0 || rackActor1 == prismaticActor0) Sign1 = -1.0f; else if(rackActor0 == prismaticActor1 || rackActor1 == prismaticActor1) Sign1 = 1.0f; else PX_ASSERT(0); } // printf("px: %f\n", px); data->px = Sign1*px; data->vangle = Sign0*mVirtualAngle0; markDirty(); } void RackAndPinionJoint::resetError() { mVirtualAngle0 = 0.0f; mPersistentAngle0 = 0.0f; mInitDone = false; } static void RackAndPinionJointVisualize(PxConstraintVisualizer& viz, const void* constantBlock, const PxTransform& body0Transform, const PxTransform& body1Transform, PxU32 flags) { if(flags & PxConstraintVisualizationFlag::eLOCAL_FRAMES) { const RackAndPinionJointData& data = *reinterpret_cast<const RackAndPinionJointData*>(constantBlock); // Visualize joint frames PxTransform32 cA2w, cB2w; joint::computeJointFrames(cA2w, cB2w, data, body0Transform, body1Transform); viz.visualizeJointFrames(cA2w, cB2w); } if(0) { const RackAndPinionJointData& data = *reinterpret_cast<const RackAndPinionJointData*>(constantBlock); if(0) { PxTransform32 cA2w, cB2w; joint::computeJointFrames(cA2w, cB2w, data, body0Transform, body1Transform); const PxVec3 gearAxis0 = cA2w.q.getBasisVector0(); const PxVec3 rackPrismaticAxis = cB2w.q.getBasisVector0(); viz.visualizeLine(cA2w.p, cA2w.p + gearAxis0, 0xff0000ff); viz.visualizeLine(cB2w.p, cB2w.p + rackPrismaticAxis, 0xff0000ff); } } } //TAG:solverprepshader static PxU32 RackAndPinionJointSolverPrep(Px1DConstraint* constraints, PxVec3p& body0WorldOffset, PxU32 /*maxConstraints*/, PxConstraintInvMassScale& invMassScale, const void* constantBlock, const PxTransform& bA2w, const PxTransform& bB2w, bool /*useExtendedLimits*/, PxVec3p& cA2wOut, PxVec3p& cB2wOut) { const RackAndPinionJointData& data = *reinterpret_cast<const RackAndPinionJointData*>(constantBlock); PxTransform32 cA2w, cB2w; joint::ConstraintHelper ch(constraints, invMassScale, cA2w, cB2w, body0WorldOffset, data, bA2w, bB2w); cA2wOut = cB2w.p; cB2wOut = cB2w.p; const PxVec3 gearAxis = cA2w.q.getBasisVector0(); const PxVec3 rackPrismaticAxis = cB2w.q.getBasisVector0(); // PT: this optional bit of code tries to fix the ratio for cases where the "same" rack is moved e.g. above or below a gear. // In that case the rack would move in one direction or another depending on its position compared to the gear, and to avoid // having to use a negative ratio in one of these cases this code tries to compute the proper sign and handle both cases the // same way from the user's perspective. This created unexpected issues in ill-defined cases where e.g. the gear and the rack // completely overlap, and we end up with a +0 or -0 for "dp" in the code below. So now this code disables itself in these // cases but it would probably be better to disable it entirely. We don't do it though since it could break existing scenes. // We might want to revisit these decisions at some point. float Coeff = 1.0f; const float epsilon = 0.001f; const PxVec3 delta = cB2w.p - cA2w.p; if(delta.magnitudeSquared()>epsilon*epsilon) { const PxVec3 velocity = gearAxis.cross(delta); if(velocity.magnitudeSquared()>epsilon*epsilon) { const float dp = velocity.dot(rackPrismaticAxis); Coeff = fabsf(dp)>epsilon ? PxSign(dp) : 1.0f; } } Px1DConstraint& con = constraints[0]; con.linear0 = PxVec3(0.0f); con.linear1 = rackPrismaticAxis * data.ratio*Coeff; con.angular0 = gearAxis; con.angular1 = PxVec3(0.0f); con.geometricError = -Coeff*data.px*data.ratio - data.vangle; con.minImpulse = -PX_MAX_F32; con.maxImpulse = PX_MAX_F32; con.velocityTarget = 0.f; con.forInternalUse = 0.f; con.solveHint = 0; con.flags = Px1DConstraintFlag::eOUTPUT_FORCE|Px1DConstraintFlag::eANGULAR_CONSTRAINT; con.mods.bounce.restitution = 0.0f; con.mods.bounce.velocityThreshold = 0.0f; return 1; } /////////////////////////////////////////////////////////////////////////////// static PxConstraintShaderTable gRackAndPinionJointShaders = { RackAndPinionJointSolverPrep, RackAndPinionJointVisualize, PxConstraintFlag::eALWAYS_UPDATE }; PxConstraintSolverPrep RackAndPinionJoint::getPrep() const { return gRackAndPinionJointShaders.solverPrep; } PxRackAndPinionJoint* physx::PxRackAndPinionJointCreate(PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1) { PX_CHECK_AND_RETURN_NULL(localFrame0.isSane(), "PxRackAndPinionJointCreate: local frame 0 is not a valid transform"); PX_CHECK_AND_RETURN_NULL(localFrame1.isSane(), "PxRackAndPinionJointCreate: local frame 1 is not a valid transform"); PX_CHECK_AND_RETURN_NULL((actor0 && actor0->is<PxRigidBody>()) || (actor1 && actor1->is<PxRigidBody>()), "PxRackAndPinionJointCreate: at least one actor must be dynamic"); PX_CHECK_AND_RETURN_NULL(actor0 != actor1, "PxRackAndPinionJointCreate: actors must be different"); return createJointT<RackAndPinionJoint, RackAndPinionJointData>(physics, actor0, localFrame0, actor1, localFrame1, gRackAndPinionJointShaders); } // PX_SERIALIZATION void RackAndPinionJoint::resolveReferences(PxDeserializationContext& context) { mPxConstraint = resolveConstraintPtr(context, mPxConstraint, this, gRackAndPinionJointShaders); RackAndPinionJointData* data = static_cast<RackAndPinionJointData*>(mData); context.translatePxBase(data->hingeJoint); context.translatePxBase(data->prismaticJoint); } //~PX_SERIALIZATION #if PX_SUPPORT_OMNI_PVD template<> void physx::Ext::omniPvdInitJoint<RackAndPinionJoint>(RackAndPinionJoint& joint) { OMNI_PVD_WRITE_SCOPE_BEGIN(pvdWriter, pvdRegData) PxRackAndPinionJoint& j = static_cast<PxRackAndPinionJoint&>(joint); OMNI_PVD_CREATE_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxRackAndPinionJoint, j); omniPvdSetBaseJointParams(static_cast<PxJoint&>(joint), PxJointConcreteType::eRACK_AND_PINION); OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxRackAndPinionJoint, ratio, j, joint.getRatio()) OMNI_PVD_WRITE_SCOPE_END } #endif
15,195
C++
37.568528
184
0.760777
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/ExtTaskQueueHelper.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef EXT_TASK_QUEUE_HELPER_H #define EXT_TASK_QUEUE_HELPER_H #include "task/PxTask.h" #include "ExtSharedQueueEntryPool.h" namespace physx { #define EXT_TASK_QUEUE_ENTRY_POOL_SIZE 128 namespace Ext { class TaskQueueHelper { public: static PxBaseTask* fetchTask(PxSList& taskQueue, Ext::SharedQueueEntryPool<>& entryPool) { SharedQueueEntry* entry = static_cast<SharedQueueEntry*>(taskQueue.pop()); if (entry) { PxBaseTask* task = reinterpret_cast<PxBaseTask*>(entry->mObjectRef); entryPool.putEntry(*entry); return task; } else return NULL; } }; } // namespace Ext } #endif
2,323
C
35.312499
90
0.749892
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/ExtD6JointCreate.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/PxMathUtils.h" #include "extensions/PxD6JointCreate.h" #include "extensions/PxD6Joint.h" #include "extensions/PxFixedJoint.h" #include "extensions/PxRevoluteJoint.h" #include "extensions/PxSphericalJoint.h" #include "extensions/PxPrismaticJoint.h" #include "extensions/PxDistanceJoint.h" #include "PxPhysics.h" using namespace physx; static const PxVec3 gX(1.0f, 0.0f, 0.0f); PxJoint* physx::PxD6JointCreate_Fixed(PxPhysics& physics, PxRigidActor* actor0, const PxVec3& localPos0, PxRigidActor* actor1, const PxVec3& localPos1, bool useD6) { const PxTransform jointFrame0(localPos0); const PxTransform jointFrame1(localPos1); if(useD6) // PT: by default all D6 axes are locked, i.e. it is a fixed joint. return PxD6JointCreate(physics, actor0, jointFrame0, actor1, jointFrame1); else return PxFixedJointCreate(physics, actor0, jointFrame0, actor1, jointFrame1); } PxJoint* physx::PxD6JointCreate_Distance(PxPhysics& physics, PxRigidActor* actor0, const PxVec3& localPos0, PxRigidActor* actor1, const PxVec3& localPos1, float maxDist, bool useD6) { const PxTransform localFrame0(localPos0); const PxTransform localFrame1(localPos1); if(useD6) { PxD6Joint* j = PxD6JointCreate(physics, actor0, localFrame0, actor1, localFrame1); j->setMotion(PxD6Axis::eTWIST, PxD6Motion::eFREE); j->setMotion(PxD6Axis::eSWING1, PxD6Motion::eFREE); j->setMotion(PxD6Axis::eSWING2, PxD6Motion::eFREE); j->setMotion(PxD6Axis::eX, PxD6Motion::eLIMITED); j->setMotion(PxD6Axis::eY, PxD6Motion::eLIMITED); j->setMotion(PxD6Axis::eZ, PxD6Motion::eLIMITED); j->setDistanceLimit(PxJointLinearLimit(maxDist)); return j; } else { PxDistanceJoint* j = PxDistanceJointCreate(physics, actor0, localFrame0, actor1, localFrame1); j->setDistanceJointFlag(PxDistanceJointFlag::eMAX_DISTANCE_ENABLED, true); j->setMaxDistance(maxDist); return j; } } PxJoint* physx::PxD6JointCreate_Prismatic(PxPhysics& physics, PxRigidActor* actor0, const PxVec3& localPos0, PxRigidActor* actor1, const PxVec3& localPos1, const PxVec3& axis, float minLimit, float maxLimit, bool useD6) { const PxQuat q = PxShortestRotation(gX, axis); const PxTransform localFrame0(localPos0, q); const PxTransform localFrame1(localPos1, q); const PxJointLinearLimitPair limit(PxTolerancesScale(), minLimit, maxLimit); if(useD6) { PxD6Joint* j = PxD6JointCreate(physics, actor0, localFrame0, actor1, localFrame1); j->setMotion(PxD6Axis::eX, PxD6Motion::eFREE); if(minLimit==maxLimit) j->setMotion(PxD6Axis::eX, PxD6Motion::eLOCKED); else if(minLimit>maxLimit) j->setMotion(PxD6Axis::eX, PxD6Motion::eFREE); else// if(minLimit<maxLimit) { j->setMotion(PxD6Axis::eX, PxD6Motion::eLIMITED); j->setLinearLimit(PxD6Axis::eX, limit); } return j; } else { PxPrismaticJoint* j = PxPrismaticJointCreate(physics, actor0, localFrame0, actor1, localFrame1); if(minLimit<maxLimit) { j->setPrismaticJointFlag(PxPrismaticJointFlag::eLIMIT_ENABLED, true); j->setLimit(limit); } return j; } } PxJoint* physx::PxD6JointCreate_Revolute(PxPhysics& physics, PxRigidActor* actor0, const PxVec3& localPos0, PxRigidActor* actor1, const PxVec3& localPos1, const PxVec3& axis, float minLimit, float maxLimit, bool useD6) { const PxQuat q = PxShortestRotation(gX, axis); const PxTransform localFrame0(localPos0, q); const PxTransform localFrame1(localPos1, q); const PxJointAngularLimitPair limit(minLimit, maxLimit); if(useD6) { PxD6Joint* j = PxD6JointCreate(physics, actor0, localFrame0, actor1, localFrame1); if(minLimit==maxLimit) j->setMotion(PxD6Axis::eTWIST, PxD6Motion::eLOCKED); else if(minLimit>maxLimit) j->setMotion(PxD6Axis::eTWIST, PxD6Motion::eFREE); else// if(minLimit<maxLimit) { j->setMotion(PxD6Axis::eTWIST, PxD6Motion::eLIMITED); j->setTwistLimit(limit); } return j; } else { PxRevoluteJoint* j = PxRevoluteJointCreate(physics, actor0, localFrame0, actor1, localFrame1); if(minLimit<maxLimit) { j->setRevoluteJointFlag(PxRevoluteJointFlag::eLIMIT_ENABLED, true); j->setLimit(limit); } return j; } } PxJoint* physx::PxD6JointCreate_Spherical(PxPhysics& physics, PxRigidActor* actor0, const PxVec3& localPos0, PxRigidActor* actor1, const PxVec3& localPos1, const PxVec3& axis, float limit1, float limit2, bool useD6) { const PxQuat q = PxShortestRotation(gX, axis); const PxTransform localFrame0(localPos0, q); const PxTransform localFrame1(localPos1, q); const PxJointLimitCone limit(limit1, limit2); if(useD6) { PxD6Joint* j = PxD6JointCreate(physics, actor0, localFrame0, actor1, localFrame1); j->setMotion(PxD6Axis::eTWIST, PxD6Motion::eFREE); if(limit1>0.0f && limit2>0.0f) { j->setMotion(PxD6Axis::eSWING1, PxD6Motion::eLIMITED); j->setMotion(PxD6Axis::eSWING2, PxD6Motion::eLIMITED); j->setSwingLimit(limit); } else { j->setMotion(PxD6Axis::eSWING1, PxD6Motion::eFREE); j->setMotion(PxD6Axis::eSWING2, PxD6Motion::eFREE); } return j; } else { PxSphericalJoint* j = PxSphericalJointCreate(physics, actor0, localFrame0, actor1, localFrame1); if(limit1>0.0f && limit2>0.0f) { j->setSphericalJointFlag(PxSphericalJointFlag::eLIMIT_ENABLED, true); j->setLimitCone(limit); } return j; } } PxJoint* physx::PxD6JointCreate_GenericCone(float& apiroty, float& apirotz, PxPhysics& physics, PxRigidActor* actor0, const PxVec3& localPos0, PxRigidActor* actor1, const PxVec3& localPos1, float minLimit1, float maxLimit1, float minLimit2, float maxLimit2, bool useD6) { const float DesiredMinSwingY = minLimit1; const float DesiredMaxSwingY = maxLimit1; const float DesiredMinSwingZ = minLimit2; const float DesiredMaxSwingZ = maxLimit2; const float APIMaxY = (DesiredMaxSwingY - DesiredMinSwingY)*0.5f; const float APIMaxZ = (DesiredMaxSwingZ - DesiredMinSwingZ)*0.5f; const float APIRotY = (DesiredMaxSwingY + DesiredMinSwingY)*0.5f; const float APIRotZ = (DesiredMaxSwingZ + DesiredMinSwingZ)*0.5f; apiroty = APIRotY; apirotz = APIRotZ; const PxQuat RotY = PxGetRotYQuat(APIRotY); const PxQuat RotZ = PxGetRotZQuat(APIRotZ); const PxQuat Rot = RotY * RotZ; const PxTransform localFrame0(localPos0, Rot); const PxTransform localFrame1(localPos1); const PxJointLimitCone limit(APIMaxY, APIMaxZ); if(useD6) { PxD6Joint* j = PxD6JointCreate(physics, actor0, localFrame0, actor1, localFrame1); j->setMotion(PxD6Axis::eTWIST, PxD6Motion::eFREE); j->setMotion(PxD6Axis::eSWING1, PxD6Motion::eLIMITED); j->setMotion(PxD6Axis::eSWING2, PxD6Motion::eLIMITED); j->setSwingLimit(limit); return j; } else { PxSphericalJoint* j = PxSphericalJointCreate(physics, actor0, localFrame0, actor1, localFrame1); j->setSphericalJointFlag(PxSphericalJointFlag::eLIMIT_ENABLED, true); j->setLimitCone(limit); return j; } } PxJoint* physx::PxD6JointCreate_Pyramid(PxPhysics& physics, PxRigidActor* actor0, const PxVec3& localPos0, PxRigidActor* actor1, const PxVec3& localPos1, const PxVec3& axis, float minLimit1, float maxLimit1, float minLimit2, float maxLimit2) { const PxQuat q = PxShortestRotation(gX, axis); const PxTransform localFrame0(localPos0, q); const PxTransform localFrame1(localPos1, q); const PxJointLimitPyramid limit(minLimit1, maxLimit1, minLimit2, maxLimit2); PxD6Joint* j = PxD6JointCreate(physics, actor0, localFrame0, actor1, localFrame1); j->setMotion(PxD6Axis::eTWIST, PxD6Motion::eFREE); if(limit.isValid()) { j->setMotion(PxD6Axis::eSWING1, PxD6Motion::eLIMITED); j->setMotion(PxD6Axis::eSWING2, PxD6Motion::eLIMITED); j->setPyramidSwingLimit(limit); } else { j->setMotion(PxD6Axis::eSWING1, PxD6Motion::eFREE); j->setMotion(PxD6Axis::eSWING2, PxD6Motion::eFREE); } return j; }
9,426
C++
36.557769
269
0.756525
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/ExtRaycastCCD.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "geometry/PxBoxGeometry.h" #include "geometry/PxSphereGeometry.h" #include "geometry/PxCapsuleGeometry.h" #include "geometry/PxConvexMeshGeometry.h" #include "geometry/PxConvexMesh.h" #include "extensions/PxShapeExt.h" #include "extensions/PxRaycastCCD.h" #include "PxScene.h" #include "PxRigidDynamic.h" #include "foundation/PxArray.h" using namespace physx; namespace physx { class RaycastCCDManagerInternal { PX_NOCOPY(RaycastCCDManagerInternal) public: RaycastCCDManagerInternal(PxScene* scene) : mScene(scene) {} ~RaycastCCDManagerInternal(){} bool registerRaycastCCDObject(PxRigidDynamic* actor, PxShape* shape); bool unregisterRaycastCCDObject(PxRigidDynamic* actor, PxShape* shape); void doRaycastCCD(bool doDynamicDynamicCCD); struct CCDObject { PX_FORCE_INLINE CCDObject(PxRigidDynamic* actor, PxShape* shape, const PxVec3& witness) : mActor(actor), mShape(shape), mWitness(witness) {} PxRigidDynamic* mActor; PxShape* mShape; PxVec3 mWitness; }; private: PxScene* mScene; physx::PxArray<CCDObject> mObjects; }; } static PxVec3 getShapeCenter(PxShape* shape, const PxTransform& pose) { PxVec3 offset(0.0f); const PxGeometry& geom = shape->getGeometry(); if(geom.getType()==PxGeometryType::eCONVEXMESH) { const PxConvexMeshGeometry& geometry = static_cast<const PxConvexMeshGeometry&>(geom); PxReal mass; PxMat33 localInertia; PxVec3 localCenterOfMass; geometry.convexMesh->getMassInformation(mass, localInertia, localCenterOfMass); offset += localCenterOfMass; } return pose.transform(offset); } static PX_FORCE_INLINE PxVec3 getShapeCenter(PxRigidActor* actor, PxShape* shape) { const PxTransform pose = PxShapeExt::getGlobalPose(*shape, *actor); return getShapeCenter(shape, pose); } static PxReal computeInternalRadius(PxRigidActor* actor, PxShape* shape, const PxVec3& dir) { const PxBounds3 bounds = PxShapeExt::getWorldBounds(*shape, *actor); const PxReal diagonal = (bounds.maximum - bounds.minimum).magnitude(); const PxReal offsetFromOrigin = diagonal * 2.0f; PxTransform pose = PxShapeExt::getGlobalPose(*shape, *actor); PxReal internalRadius = 0.0f; const PxReal length = offsetFromOrigin*2.0f; const PxGeometry& geom = shape->getGeometry(); switch(geom.getType()) { case PxGeometryType::eSPHERE: { const PxSphereGeometry& geometry = static_cast<const PxSphereGeometry&>(geom); internalRadius = geometry.radius; } break; case PxGeometryType::eBOX: case PxGeometryType::eCAPSULE: { pose.p = PxVec3(0.0f); const PxVec3 virtualOrigin = pose.p + dir * offsetFromOrigin; PxRaycastHit hit; PxU32 nbHits = PxGeometryQuery::raycast(virtualOrigin, -dir, shape->getGeometry(), pose, length, PxHitFlags(0), 1, &hit); PX_UNUSED(nbHits); PX_ASSERT(nbHits); internalRadius = offsetFromOrigin - hit.distance; } break; case PxGeometryType::eCONVEXMESH: { PxVec3 shapeCenter = getShapeCenter(shape, pose); shapeCenter -= pose.p; pose.p = PxVec3(0.0f); const PxVec3 virtualOrigin = shapeCenter + dir * offsetFromOrigin; PxRaycastHit hit; PxU32 nbHits = PxGeometryQuery::raycast(virtualOrigin, -dir, shape->getGeometry(), pose, length, PxHitFlags(0), 1, &hit); PX_UNUSED(nbHits); PX_ASSERT(nbHits); internalRadius = offsetFromOrigin - hit.distance; } break; default: break; } return internalRadius; } class CCDRaycastFilterCallback : public PxQueryFilterCallback { public: CCDRaycastFilterCallback(PxRigidActor* actor, PxShape* shape) : mActor(actor), mShape(shape){} PxRigidActor* mActor; PxShape* mShape; virtual PxQueryHitType::Enum preFilter(const PxFilterData&, const PxShape* shape, const PxRigidActor* actor, PxHitFlags&) { if(mActor==actor && mShape==shape) return PxQueryHitType::eNONE; return PxQueryHitType::eBLOCK; } virtual PxQueryHitType::Enum postFilter(const PxFilterData&, const PxQueryHit&, const PxShape*, const PxRigidActor*) { return PxQueryHitType::eNONE; } }; static bool CCDRaycast(PxScene* scene, PxRigidActor* actor, PxShape* shape, const PxVec3& origin, const PxVec3& unitDir, const PxReal distance, PxRaycastHit& hit, bool dyna_dyna) { const PxQueryFlags qf(dyna_dyna ? PxQueryFlags(PxQueryFlag::eSTATIC|PxQueryFlag::eDYNAMIC|PxQueryFlag::ePREFILTER) : PxQueryFlags(PxQueryFlag::eSTATIC)); const PxQueryFilterData filterData(PxFilterData(), qf); CCDRaycastFilterCallback CB(actor, shape); PxRaycastBuffer buf1; scene->raycast(origin, unitDir, distance, buf1, PxHitFlags(0), filterData, &CB); hit = buf1.block; return buf1.hasBlock; } static PxRigidDynamic* canDoCCD(PxRigidActor& actor, PxShape* /*shape*/) { if(actor.getConcreteType()!=PxConcreteType::eRIGID_DYNAMIC) return NULL; // PT: no need to do it for statics PxRigidDynamic* dyna = static_cast<PxRigidDynamic*>(&actor); const PxU32 nbShapes = dyna->getNbShapes(); if(nbShapes!=1) return NULL; // PT: only works with simple actors for now if(dyna->getRigidBodyFlags() & PxRigidBodyFlag::eKINEMATIC) return NULL; // PT: no need to do it for kinematics return dyna; } static bool doRaycastCCD(PxScene* scene, const RaycastCCDManagerInternal::CCDObject& object, PxTransform& newPose, PxVec3& newShapeCenter, bool dyna_dyna) { PxRigidDynamic* dyna = canDoCCD(*object.mActor, object.mShape); if(!dyna) return true; bool updateCCDWitness = true; const PxVec3 offset = newPose.p - newShapeCenter; const PxVec3& origin = object.mWitness; const PxVec3& dest = newShapeCenter; PxVec3 dir = dest - origin; const PxReal length = dir.magnitude(); if(length!=0.0f) { dir /= length; const PxReal internalRadius = computeInternalRadius(object.mActor, object.mShape, dir); PxRaycastHit hit; if(internalRadius!=0.0f && CCDRaycast(scene, object.mActor, object.mShape, origin, dir, length, hit, dyna_dyna)) { updateCCDWitness = false; const PxReal radiusLimit = internalRadius * 0.75f; if(hit.distance>radiusLimit) { newShapeCenter = origin + dir * (hit.distance - radiusLimit); } else { if(hit.actor->getConcreteType()==PxConcreteType::eRIGID_DYNAMIC) return true; newShapeCenter = origin; } newPose.p = offset + newShapeCenter; const PxTransform shapeLocalPose = object.mShape->getLocalPose(); const PxTransform inverseShapeLocalPose = shapeLocalPose.getInverse(); const PxTransform newGlobalPose = newPose * inverseShapeLocalPose; dyna->setGlobalPose(newGlobalPose); } } return updateCCDWitness; } bool RaycastCCDManagerInternal::registerRaycastCCDObject(PxRigidDynamic* actor, PxShape* shape) { if(!actor || !shape) return false; mObjects.pushBack(CCDObject(actor, shape, getShapeCenter(actor, shape))); return true; } bool RaycastCCDManagerInternal::unregisterRaycastCCDObject(PxRigidDynamic* actor, PxShape* shape) { if(!actor || !shape) return false; const PxU32 nbObjects = mObjects.size(); for(PxU32 i=0;i<nbObjects;i++) { if(mObjects[i].mActor==actor && mObjects[i].mShape==shape) { mObjects[i] = mObjects[nbObjects-1]; mObjects.popBack(); return true; } } return false; } void RaycastCCDManagerInternal::doRaycastCCD(bool doDynamicDynamicCCD) { const PxU32 nbObjects = mObjects.size(); for(PxU32 i=0;i<nbObjects;i++) { CCDObject& object = mObjects[i]; if(object.mActor->isSleeping()) continue; PxTransform newPose = PxShapeExt::getGlobalPose(*object.mShape, *object.mActor); PxVec3 newShapeCenter = getShapeCenter(object.mShape, newPose); if(::doRaycastCCD(mScene, object, newPose, newShapeCenter, doDynamicDynamicCCD)) object.mWitness = newShapeCenter; } } RaycastCCDManager::RaycastCCDManager(PxScene* scene) { mImpl = new RaycastCCDManagerInternal(scene); } RaycastCCDManager::~RaycastCCDManager() { delete mImpl; } bool RaycastCCDManager::registerRaycastCCDObject(PxRigidDynamic* actor, PxShape* shape) { return mImpl->registerRaycastCCDObject(actor, shape); } bool RaycastCCDManager::unregisterRaycastCCDObject(PxRigidDynamic* actor, PxShape* shape) { return mImpl->unregisterRaycastCCDObject(actor, shape); } void RaycastCCDManager::doRaycastCCD(bool doDynamicDynamicCCD) { mImpl->doRaycastCCD(doDynamicDynamicCCD); }
9,900
C++
29.940625
178
0.751212
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/ExtCpuWorkerThread.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef EXT_CPU_WORKER_THREAD_H #define EXT_CPU_WORKER_THREAD_H #include "foundation/PxThread.h" #include "ExtDefaultCpuDispatcher.h" #include "ExtSharedQueueEntryPool.h" namespace physx { namespace Ext { class DefaultCpuDispatcher; #if PX_VC #pragma warning(push) #pragma warning(disable:4324) // Padding was added at the end of a structure because of a __declspec(align) value. #endif // Because of the SList member I assume class CpuWorkerThread : public PxThread { public: CpuWorkerThread(); ~CpuWorkerThread(); void initialize(DefaultCpuDispatcher* ownerDispatcher); void execute(); bool tryAcceptJobToLocalQueue(PxBaseTask& task, PxThread::Id taskSubmitionThread); PxBaseTask* giveUpJob(); PxThread::Id getWorkerThreadId() const { return mThreadId; } protected: SharedQueueEntryPool<> mQueueEntryPool; DefaultCpuDispatcher* mOwner; PxSList mLocalJobList; PxThread::Id mThreadId; }; #if PX_VC #pragma warning(pop) #endif } // namespace Ext } #endif
2,732
C
35.44
114
0.752196
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/ExtD6Joint.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "common/PxRenderBuffer.h" #include "ExtD6Joint.h" #include "ExtConstraintHelper.h" #include "CmConeLimitHelper.h" #include "omnipvd/ExtOmniPvdSetData.h" using namespace physx; using namespace Ext; D6Joint::D6Joint(const PxTolerancesScale& scale, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1) : D6JointT (PxJointConcreteType::eD6, actor0, localFrame0, actor1, localFrame1, "D6JointData"), mRecomputeMotion(true) { D6JointData* data = static_cast<D6JointData*>(mData); for(PxU32 i=0;i<6;i++) data->motion[i] = PxD6Motion::eLOCKED; data->twistLimit = PxJointAngularLimitPair(-PxPi/2, PxPi/2); data->swingLimit = PxJointLimitCone(PxPi/2, PxPi/2); data->pyramidSwingLimit = PxJointLimitPyramid(-PxPi/2, PxPi/2, -PxPi/2, PxPi/2); data->distanceLimit = PxJointLinearLimit(PX_MAX_F32); data->distanceMinDist = 1e-6f*scale.length; data->linearLimitX = PxJointLinearLimitPair(scale); data->linearLimitY = PxJointLinearLimitPair(scale); data->linearLimitZ = PxJointLinearLimitPair(scale); for(PxU32 i=0;i<PxD6Drive::eCOUNT;i++) data->drive[i] = PxD6JointDrive(); data->drivePosition = PxTransform(PxIdentity); data->driveLinearVelocity = PxVec3(0.0f); data->driveAngularVelocity = PxVec3(0.0f); data->mUseDistanceLimit = false; data->mUseNewLinearLimits = false; data->mUseConeLimit = false; data->mUsePyramidLimits = false; } PxD6Motion::Enum D6Joint::getMotion(PxD6Axis::Enum index) const { return data().motion[index]; } void D6Joint::setMotion(PxD6Axis::Enum index, PxD6Motion::Enum t) { data().motion[index] = t; mRecomputeMotion = true; markDirty(); #if PX_SUPPORT_OMNI_PVD PxD6Motion::Enum motions[PxD6Axis::eCOUNT]; for (PxU32 i = 0; i < PxD6Axis::eCOUNT; ++i) motions[i] = getMotion(PxD6Axis::Enum(i)); OMNI_PVD_SET_ARRAY(OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, motions, static_cast<PxD6Joint&>(*this), motions, PxD6Axis::eCOUNT) #endif } PxReal D6Joint::getTwistAngle() const { return getTwistAngle_Internal(); } PxReal D6Joint::getSwingYAngle() const { return getSwingYAngle_Internal(); } PxReal D6Joint::getSwingZAngle() const { return getSwingZAngle_Internal(); } PxD6JointDrive D6Joint::getDrive(PxD6Drive::Enum index) const { return data().drive[index]; } void D6Joint::setDrive(PxD6Drive::Enum index, const PxD6JointDrive& d) { PX_CHECK_AND_RETURN(d.isValid(), "PxD6Joint::setDrive: drive is invalid"); data().drive[index] = d; mRecomputeMotion = true; markDirty(); #if PX_SUPPORT_OMNI_PVD OMNI_PVD_WRITE_SCOPE_BEGIN(pvdWriter, pvdRegData) const PxD6Joint& j = static_cast<const PxD6Joint&>(*this); PxReal forceLimit[PxD6Axis::eCOUNT]; for (PxU32 i = 0; i < PxD6Axis::eCOUNT; ++i) forceLimit[i] = getDrive(PxD6Drive::Enum(i)).forceLimit; OMNI_PVD_SET_ARRAY_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, driveForceLimit, j, forceLimit, PxD6Axis::eCOUNT) PxD6JointDriveFlags flags[PxD6Axis::eCOUNT]; for (PxU32 i = 0; i < PxD6Axis::eCOUNT; ++i) flags[i] = getDrive(PxD6Drive::Enum(i)).flags; OMNI_PVD_SET_ARRAY_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, driveFlags, j, flags, PxD6Axis::eCOUNT) PxReal stiffness[PxD6Axis::eCOUNT]; for (PxU32 i = 0; i < PxD6Axis::eCOUNT; ++i) stiffness[i] = getDrive(PxD6Drive::Enum(i)).stiffness; OMNI_PVD_SET_ARRAY_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, driveStiffness, j, stiffness, PxD6Axis::eCOUNT) PxReal damping[PxD6Axis::eCOUNT]; for (PxU32 i = 0; i < PxD6Axis::eCOUNT; ++i) damping[i] = getDrive(PxD6Drive::Enum(i)).damping; OMNI_PVD_SET_ARRAY_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, driveDamping, j, damping, PxD6Axis::eCOUNT) OMNI_PVD_WRITE_SCOPE_END #endif } void D6Joint::setDistanceLimit(const PxJointLinearLimit& l) { PX_CHECK_AND_RETURN(l.isValid(), "PxD6Joint::setDistanceLimit: limit invalid"); data().distanceLimit = l; data().mUseDistanceLimit = true; markDirty(); #if PX_SUPPORT_OMNI_PVD OMNI_PVD_WRITE_SCOPE_BEGIN(pvdWriter, pvdRegData) const PxD6Joint& j = static_cast<const PxD6Joint&>(*this); OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, distanceLimitValue, j, l.value) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, distanceLimitRestitution, j, l.restitution) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, distanceLimitBounceThreshold, j, l.bounceThreshold) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, distanceLimitStiffness, j, l.stiffness) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, distanceLimitDamping, j, l.damping) OMNI_PVD_WRITE_SCOPE_END #endif } PxJointLinearLimit D6Joint::getDistanceLimit() const { return data().distanceLimit; } void D6Joint::setLinearLimit(PxD6Axis::Enum axis, const PxJointLinearLimitPair& limit) { PX_CHECK_AND_RETURN(axis>=PxD6Axis::eX && axis<=PxD6Axis::eZ, "PxD6Joint::setLinearLimit: invalid axis value"); PX_CHECK_AND_RETURN(limit.isValid(), "PxD6Joint::setLinearLimit: limit invalid"); D6JointData& d = data(); if(axis==PxD6Axis::eX) d.linearLimitX = limit; else if(axis==PxD6Axis::eY) d.linearLimitY = limit; else if(axis==PxD6Axis::eZ) d.linearLimitZ = limit; else return; d.mUseNewLinearLimits = true; markDirty(); #if PX_SUPPORT_OMNI_PVD OMNI_PVD_WRITE_SCOPE_BEGIN(pvdWriter, pvdRegData) const PxD6Joint& j = static_cast<const PxD6Joint&>(*this); const PxU32 valueCount = 3; PxReal values[valueCount]; for (PxU32 i = 0; i < valueCount; ++i) values[i] = getLinearLimit(PxD6Axis::Enum(i)).lower; OMNI_PVD_SET_ARRAY_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, linearLimitLower, j, values, valueCount) for (PxU32 i = 0; i < valueCount; ++i) values[i] = getLinearLimit(PxD6Axis::Enum(i)).upper; OMNI_PVD_SET_ARRAY_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, linearLimitUpper, j, values, valueCount) for (PxU32 i = 0; i < valueCount; ++i) values[i] = getLinearLimit(PxD6Axis::Enum(i)).restitution; OMNI_PVD_SET_ARRAY_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, linearLimitRestitution, j, values, valueCount) for (PxU32 i = 0; i < valueCount; ++i) values[i] = getLinearLimit(PxD6Axis::Enum(i)).bounceThreshold; OMNI_PVD_SET_ARRAY_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, linearLimitBounceThreshold, j, values, valueCount) for (PxU32 i = 0; i < valueCount; ++i) values[i] = getLinearLimit(PxD6Axis::Enum(i)).stiffness; OMNI_PVD_SET_ARRAY_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, linearLimitStiffness, j, values, valueCount) for (PxU32 i = 0; i < valueCount; ++i) values[i] = getLinearLimit(PxD6Axis::Enum(i)).damping; OMNI_PVD_SET_ARRAY_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, linearLimitDamping, j, values, valueCount) OMNI_PVD_WRITE_SCOPE_END #endif } PxJointLinearLimitPair D6Joint::getLinearLimit(PxD6Axis::Enum axis) const { PX_CHECK_AND_RETURN_VAL(axis>=PxD6Axis::eX && axis<=PxD6Axis::eZ, "PxD6Joint::getLinearLimit: invalid axis value", PxJointLinearLimitPair(PxTolerancesScale(), 0.0f, 0.0f)); const D6JointData& d = data(); if(axis==PxD6Axis::eX) return d.linearLimitX; else if(axis==PxD6Axis::eY) return d.linearLimitY; else if(axis==PxD6Axis::eZ) return d.linearLimitZ; return PxJointLinearLimitPair(PxTolerancesScale(), 0.0f, 0.0f); } PxJointAngularLimitPair D6Joint::getTwistLimit() const { return data().twistLimit; } void D6Joint::setTwistLimit(const PxJointAngularLimitPair& l) { PX_CHECK_AND_RETURN(l.isValid(), "PxD6Joint::setTwistLimit: limit invalid"); // PT: the tangent version is not compatible with the double-cover feature, since the potential limit extent in that case is 4*PI. // i.e. we'd potentially take the tangent of something equal to PI/2. So the tangent stuff makes the limits less accurate, and it // also reduces the available angular range for the joint. All that for questionable performance gains. PX_CHECK_AND_RETURN(l.lower>-PxTwoPi && l.upper<PxTwoPi , "PxD6Joint::twist limit must be strictly between -2*PI and 2*PI"); data().twistLimit = l; markDirty(); #if PX_SUPPORT_OMNI_PVD OMNI_PVD_WRITE_SCOPE_BEGIN(pvdWriter, pvdRegData) const PxD6Joint& j = static_cast<const PxD6Joint&>(*this); OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, twistLimitLower, j, l.lower) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, twistLimitUpper, j, l.upper) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, twistLimitRestitution, j, l.restitution) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, twistLimitBounceThreshold, j, l.bounceThreshold) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, twistLimitStiffness, j, l.stiffness) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, twistLimitDamping, j, l.damping) OMNI_PVD_WRITE_SCOPE_END #endif } PxJointLimitPyramid D6Joint::getPyramidSwingLimit() const { return data().pyramidSwingLimit; } void D6Joint::setPyramidSwingLimit(const PxJointLimitPyramid& l) { PX_CHECK_AND_RETURN(l.isValid(), "PxD6Joint::setPyramidSwingLimit: limit invalid"); data().pyramidSwingLimit = l; data().mUsePyramidLimits = true; markDirty(); #if PX_SUPPORT_OMNI_PVD OMNI_PVD_WRITE_SCOPE_BEGIN(pvdWriter, pvdRegData) const PxD6Joint& j = static_cast<const PxD6Joint&>(*this); OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, pyramidSwingLimitYAngleMin, j, l.yAngleMin) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, pyramidSwingLimitYAngleMax, j, l.yAngleMax) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, pyramidSwingLimitZAngleMin, j, l.zAngleMin) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, pyramidSwingLimitZAngleMax, j, l.zAngleMax) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, pyramidSwingLimitRestitution, j, l.restitution) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, pyramidSwingLimitBounceThreshold, j, l.bounceThreshold) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, pyramidSwingLimitStiffness, j, l.stiffness) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, pyramidSwingLimitDamping, j, l.damping) OMNI_PVD_WRITE_SCOPE_END #endif } PxJointLimitCone D6Joint::getSwingLimit() const { return data().swingLimit; } void D6Joint::setSwingLimit(const PxJointLimitCone& l) { PX_CHECK_AND_RETURN(l.isValid(), "PxD6Joint::setSwingLimit: limit invalid"); data().swingLimit = l; data().mUseConeLimit = true; markDirty(); #if PX_SUPPORT_OMNI_PVD OMNI_PVD_WRITE_SCOPE_BEGIN(pvdWriter, pvdRegData) const PxD6Joint& j = static_cast<const PxD6Joint&>(*this); OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, swingLimitYAngle, j, l.yAngle) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, swingLimitZAngle, j, l.zAngle) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, swingLimitRestitution, j, l.restitution) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, swingLimitBounceThreshold, j, l.bounceThreshold) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, swingLimitStiffness, j, l.stiffness) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, swingLimitDamping, j, l.damping) OMNI_PVD_WRITE_SCOPE_END #endif } PxTransform D6Joint::getDrivePosition() const { return data().drivePosition; } void D6Joint::setDrivePosition(const PxTransform& pose, bool autowake) { PX_CHECK_AND_RETURN(pose.isSane(), "PxD6Joint::setDrivePosition: pose invalid"); data().drivePosition = pose.getNormalized(); if(autowake) wakeUpActors(); markDirty(); OMNI_PVD_SET(OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, drivePosition, static_cast<PxD6Joint&>(*this), pose) } void D6Joint::getDriveVelocity(PxVec3& linear, PxVec3& angular) const { linear = data().driveLinearVelocity; angular = data().driveAngularVelocity; } void D6Joint::setDriveVelocity(const PxVec3& linear, const PxVec3& angular, bool autowake) { PX_CHECK_AND_RETURN(linear.isFinite() && angular.isFinite(), "PxD6Joint::setDriveVelocity: velocity invalid"); data().driveLinearVelocity = linear; data().driveAngularVelocity = angular; if(autowake) wakeUpActors(); markDirty(); #if PX_SUPPORT_OMNI_PVD OMNI_PVD_WRITE_SCOPE_BEGIN(pvdWriter, pvdRegData) const PxD6Joint& j = static_cast<const PxD6Joint&>(*this); OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, driveLinVelocity, j, linear) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, driveAngVelocity, j, angular) OMNI_PVD_WRITE_SCOPE_END #endif } void* D6Joint::prepareData() { D6JointData& d = data(); if(mRecomputeMotion) { mRecomputeMotion = false; d.driving = 0; d.limited = 0; d.locked = 0; for(PxU32 i=0;i<PxD6Axis::eCOUNT;i++) { if(d.motion[i] == PxD6Motion::eLIMITED) d.limited |= 1<<i; else if(d.motion[i] == PxD6Motion::eLOCKED) d.locked |= 1<<i; } // a linear direction isn't driven if it's locked if(active(PxD6Drive::eX) && d.motion[PxD6Axis::eX]!=PxD6Motion::eLOCKED) d.driving |= 1<< PxD6Drive::eX; if(active(PxD6Drive::eY) && d.motion[PxD6Axis::eY]!=PxD6Motion::eLOCKED) d.driving |= 1<< PxD6Drive::eY; if(active(PxD6Drive::eZ) && d.motion[PxD6Axis::eZ]!=PxD6Motion::eLOCKED) d.driving |= 1<< PxD6Drive::eZ; // SLERP drive requires all angular dofs unlocked, and inhibits swing/twist const bool swing1Locked = d.motion[PxD6Axis::eSWING1] == PxD6Motion::eLOCKED; const bool swing2Locked = d.motion[PxD6Axis::eSWING2] == PxD6Motion::eLOCKED; const bool twistLocked = d.motion[PxD6Axis::eTWIST] == PxD6Motion::eLOCKED; if(active(PxD6Drive::eSLERP) && !swing1Locked && !swing2Locked && !twistLocked) d.driving |= 1<<PxD6Drive::eSLERP; else { if(active(PxD6Drive::eTWIST) && !twistLocked) d.driving |= 1<<PxD6Drive::eTWIST; if(active(PxD6Drive::eSWING) && (!swing1Locked || !swing2Locked)) d.driving |= 1<< PxD6Drive::eSWING; } } this->D6JointT::prepareData(); return mData; } static PX_FORCE_INLINE PxReal computePhi(const PxQuat& q) { PxQuat twist = q; twist.normalize(); PxReal angle = twist.getAngle(); if(twist.x<0.0f) angle = -angle; return angle; } static void visualizeAngularLimit(PxConstraintVisualizer& viz, const PxTransform& t, float swingLimitYZ) { viz.visualizeAngularLimit(t, -swingLimitYZ, swingLimitYZ); } static void visualizeDoubleCone(PxConstraintVisualizer& viz, const PxTransform& t, float swingLimitYZ) { viz.visualizeDoubleCone(t, swingLimitYZ); } static void visualizeCone(PxConstraintVisualizer& viz, const D6JointData& data, const PxTransform& cA2w) { viz.visualizeLimitCone(cA2w, PxTan(data.swingLimit.zAngle / 4), PxTan(data.swingLimit.yAngle / 4)); } static void visualizeLine(PxConstraintVisualizer& viz, const PxVec3& origin, const PxVec3& axis, const PxJointLinearLimitPair& limit) { const PxVec3 p0 = origin + axis * limit.lower; const PxVec3 p1 = origin + axis * limit.upper; viz.visualizeLine(p0, p1, PxU32(PxDebugColor::eARGB_YELLOW)); } static void visualizeQuad(PxConstraintVisualizer& viz, const PxVec3& origin, const PxVec3& axis0, const PxJointLinearLimitPair& limit0, const PxVec3& axis1, const PxJointLinearLimitPair& limit1) { const PxU32 color = PxU32(PxDebugColor::eARGB_YELLOW); const PxVec3 l0 = axis0 * limit0.lower; const PxVec3 u0 = axis0 * limit0.upper; const PxVec3 l1 = axis1 * limit1.lower; const PxVec3 u1 = axis1 * limit1.upper; const PxVec3 p0 = origin + l0 + l1; const PxVec3 p1 = origin + u0 + l1; const PxVec3 p2 = origin + u0 + u1; const PxVec3 p3 = origin + l0 + u1; viz.visualizeLine(p0, p1, color); viz.visualizeLine(p1, p2, color); viz.visualizeLine(p2, p3, color); viz.visualizeLine(p3, p0, color); } static void visualizeBox(PxConstraintVisualizer& viz, const PxVec3& origin, const PxVec3& axis0, const PxJointLinearLimitPair& limit0, const PxVec3& axis1, const PxJointLinearLimitPair& limit1, const PxVec3& axis2, const PxJointLinearLimitPair& limit2) { const PxU32 color = PxU32(PxDebugColor::eARGB_YELLOW); const PxVec3 l0 = axis0 * limit0.lower; const PxVec3 u0 = axis0 * limit0.upper; const PxVec3 l1 = axis1 * limit1.lower; const PxVec3 u1 = axis1 * limit1.upper; const PxVec3 l2 = axis2 * limit2.lower; const PxVec3 u2 = axis2 * limit2.upper; const PxVec3 p0 = origin + l0 + l1 + l2; const PxVec3 p1 = origin + u0 + l1 + l2; const PxVec3 p2 = origin + u0 + u1 + l2; const PxVec3 p3 = origin + l0 + u1 + l2; const PxVec3 p0b = origin + l0 + l1 + u2; const PxVec3 p1b = origin + u0 + l1 + u2; const PxVec3 p2b = origin + u0 + u1 + u2; const PxVec3 p3b = origin + l0 + u1 + u2; viz.visualizeLine(p0, p1, color); viz.visualizeLine(p1, p2, color); viz.visualizeLine(p2, p3, color); viz.visualizeLine(p3, p0, color); viz.visualizeLine(p0b, p1b, color); viz.visualizeLine(p1b, p2b, color); viz.visualizeLine(p2b, p3b, color); viz.visualizeLine(p3b, p0b, color); viz.visualizeLine(p0, p0b, color); viz.visualizeLine(p1, p1b, color); viz.visualizeLine(p2, p2b, color); viz.visualizeLine(p3, p3b, color); } static float computeLimitedDistance(const D6JointData& data, const PxTransform& cB2cA, const PxMat33& cA2w_m, PxVec3& _limitDir) { PxVec3 limitDir(0.0f); for(PxU32 i = 0; i<3; i++) { if(data.limited & (1 << (PxD6Axis::eX + i))) limitDir += cA2w_m[i] * cB2cA.p[i]; } _limitDir = limitDir; return limitDir.magnitude(); } static void drawPyramid(PxConstraintVisualizer& viz, const D6JointData& data, const PxTransform& cA2w, const PxQuat& /*swing*/, bool /*useY*/, bool /*useZ*/) { struct Local { static void drawArc(PxConstraintVisualizer& _viz, const PxTransform& _cA2w, float ymin, float ymax, float zmin, float zmax, PxU32 color) { // PT: we use 32 segments for the cone case, so that's 32/4 segments per arc in the pyramid case const PxU32 nb = 32/4; PxVec3 prev(0.0f); for(PxU32 i=0;i<nb;i++) { const float coeff = float(i)/float(nb-1); const float y = coeff*ymax + (1.0f-coeff)*ymin; const float z = coeff*zmax + (1.0f-coeff)*zmin; const float r = 1.0f; PxMat33 my; PxSetRotZ(my, z); PxMat33 mz; PxSetRotY(mz, y); const PxVec3 p0 = (my*mz).transform(PxVec3(r, 0.0f, 0.0f)); const PxVec3 p0w = _cA2w.transform(p0); _viz.visualizeLine(_cA2w.p, p0w, color); if(i) _viz.visualizeLine(prev, p0w, color); prev = p0w; } } }; const PxJointLimitPyramid& l = data.pyramidSwingLimit; const PxU32 color = PxU32(PxDebugColor::eARGB_YELLOW); Local::drawArc(viz, cA2w, l.yAngleMin, l.yAngleMin, l.zAngleMin, l.zAngleMax, color); Local::drawArc(viz, cA2w, l.yAngleMax, l.yAngleMax, l.zAngleMin, l.zAngleMax, color); Local::drawArc(viz, cA2w, l.yAngleMin, l.yAngleMax, l.zAngleMin, l.zAngleMin, color); Local::drawArc(viz, cA2w, l.yAngleMin, l.yAngleMax, l.zAngleMax, l.zAngleMax, color); } static void D6JointVisualize(PxConstraintVisualizer& viz, const void* constantBlock, const PxTransform& body0Transform, const PxTransform& body1Transform, PxU32 flags) { const PxU32 SWING1_FLAG = 1<<PxD6Axis::eSWING1, SWING2_FLAG = 1<<PxD6Axis::eSWING2, TWIST_FLAG = 1<<PxD6Axis::eTWIST; const PxU32 ANGULAR_MASK = SWING1_FLAG | SWING2_FLAG | TWIST_FLAG; const PxU32 LINEAR_MASK = 1<<PxD6Axis::eX | 1<<PxD6Axis::eY | 1<<PxD6Axis::eZ; PX_UNUSED(ANGULAR_MASK); PX_UNUSED(LINEAR_MASK); const D6JointData& data = *reinterpret_cast<const D6JointData*>(constantBlock); PxTransform32 cA2w, cB2w; joint::computeJointFrames(cA2w, cB2w, data, body0Transform, body1Transform); if(flags & PxConstraintVisualizationFlag::eLOCAL_FRAMES) viz.visualizeJointFrames(cA2w, cB2w); if(flags & PxConstraintVisualizationFlag::eLIMITS) { const PxTransform cB2cA = cA2w.transformInv(cB2w); const PxMat33Padded cA2w_m(cA2w.q); const PxMat33Padded cB2w_m(cB2w.q); if(data.mUseNewLinearLimits) { switch(data.limited) { case 1<<PxD6Axis::eX: visualizeLine(viz, cA2w.p, cA2w_m.column0, data.linearLimitX); break; case 1<<PxD6Axis::eY: visualizeLine(viz, cA2w.p, cA2w_m.column1, data.linearLimitY); break; case 1<<PxD6Axis::eZ: visualizeLine(viz, cA2w.p, cA2w_m.column2, data.linearLimitZ); break; case 1<<PxD6Axis::eX|1<<PxD6Axis::eY: visualizeQuad(viz, cA2w.p, cA2w_m.column0, data.linearLimitX, cA2w_m.column1, data.linearLimitY); break; case 1<<PxD6Axis::eX|1<<PxD6Axis::eZ: visualizeQuad(viz, cA2w.p, cA2w_m.column0, data.linearLimitX, cA2w_m.column2, data.linearLimitZ); break; case 1<<PxD6Axis::eY|1<<PxD6Axis::eZ: visualizeQuad(viz, cA2w.p, cA2w_m.column1, data.linearLimitY, cA2w_m.column2, data.linearLimitZ); break; case 1<<PxD6Axis::eX|1<<PxD6Axis::eY|1<<PxD6Axis::eZ: visualizeBox(viz, cA2w.p, cA2w_m.column0, data.linearLimitX, cA2w_m.column1, data.linearLimitY, cA2w_m.column2, data.linearLimitZ); break; } } if(data.mUseDistanceLimit) // PT: old linear/distance limit { PxVec3 limitDir; const float distance = computeLimitedDistance(data, cB2cA, cA2w_m, limitDir); // visualise only if some of the axis is limited if(distance > data.distanceMinDist) { PxU32 color = 0x00ff00; if(distance>data.distanceLimit.value) color = 0xff0000; viz.visualizeLine(cA2w.p, cB2w.p, color); } } PxQuat swing, twist; PxSeparateSwingTwist(cB2cA.q, swing, twist); if(data.limited&TWIST_FLAG) viz.visualizeAngularLimit(cA2w, data.twistLimit.lower, data.twistLimit.upper); const bool swing1Limited = (data.limited & SWING1_FLAG)!=0, swing2Limited = (data.limited & SWING2_FLAG)!=0; if(swing1Limited && swing2Limited) { if(data.mUseConeLimit) visualizeCone(viz, data, cA2w); if(data.mUsePyramidLimits) drawPyramid(viz, data, cA2w, swing, true, true); } else if(swing1Limited ^ swing2Limited) { const PxTransform yToX(PxVec3(0.0f), PxQuat(-PxPi/2.0f, PxVec3(0.0f, 0.0f, 1.0f))); const PxTransform zToX(PxVec3(0.0f), PxQuat(PxPi/2.0f, PxVec3(0.0f, 1.0f, 0.0f))); if(swing1Limited) { if(data.locked & SWING2_FLAG) { if(data.mUsePyramidLimits) drawPyramid(viz, data, cA2w, swing, true, false); else // PT:: tag: scalar transform*transform visualizeAngularLimit(viz, cA2w * yToX, data.swingLimit.yAngle); // PT: swing Y limited, swing Z locked } else if(!data.mUsePyramidLimits) // PT:: tag: scalar transform*transform visualizeDoubleCone(viz, cA2w * zToX, data.swingLimit.yAngle); // PT: swing Y limited, swing Z free } else { if(data.locked & SWING1_FLAG) { if(data.mUsePyramidLimits) drawPyramid(viz, data, cA2w, swing, false, true); else // PT:: tag: scalar transform*transform visualizeAngularLimit(viz, cA2w * zToX, data.swingLimit.zAngle); // PT: swing Z limited, swing Y locked } else if(!data.mUsePyramidLimits) // PT:: tag: scalar transform*transform visualizeDoubleCone(viz, cA2w * yToX, data.swingLimit.zAngle); // PT: swing Z limited, swing Y free } } } } static PX_FORCE_INLINE void setupSingleSwingLimit(joint::ConstraintHelper& ch, const D6JointData& data, const PxVec3& axis, float swingYZ, float swingW, float swingLimitYZ) { ch.anglePair(computeSwingAngle(swingYZ, swingW), -swingLimitYZ, swingLimitYZ, axis, data.swingLimit); } static PX_FORCE_INLINE void setupDualConeSwingLimits(joint::ConstraintHelper& ch, const D6JointData& data, const PxVec3& axis, float sin, float swingLimitYZ) { ch.anglePair(PxAsin(sin), -swingLimitYZ, swingLimitYZ, axis.getNormalized(), data.swingLimit); } static void setupConeSwingLimits(joint::ConstraintHelper& ch, const D6JointData& data, const PxQuat& swing, const PxTransform& cA2w) { PxVec3 axis; PxReal error; const Cm::ConeLimitHelperTanLess coneHelper(data.swingLimit.yAngle, data.swingLimit.zAngle); coneHelper.getLimit(swing, axis, error); ch.angularLimit(cA2w.rotate(axis), error, data.swingLimit); } static void setupPyramidSwingLimits(joint::ConstraintHelper& ch, const D6JointData& data, const PxQuat& swing, const PxTransform& cA2w, bool useY, bool useZ) { const PxQuat q = cA2w.q * swing; const PxJointLimitPyramid& l = data.pyramidSwingLimit; if(useY) ch.anglePair(computeSwingAngle(swing.y, swing.w), l.yAngleMin, l.yAngleMax, q.getBasisVector1(), l); if(useZ) ch.anglePair(computeSwingAngle(swing.z, swing.w), l.zAngleMin, l.zAngleMax, q.getBasisVector2(), l); } static void setupLinearLimit(joint::ConstraintHelper& ch, const PxJointLinearLimitPair& limit, const float origin, const PxVec3& axis) { ch.linearLimit(axis, origin, limit.upper, limit); ch.linearLimit(-axis, -origin, -limit.lower, limit); } //TAG:solverprepshader static PxU32 D6JointSolverPrep(Px1DConstraint* constraints, PxVec3p& body0WorldOffset, PxU32 /*maxConstraints*/, PxConstraintInvMassScale& invMassScale, const void* constantBlock, const PxTransform& bA2w, const PxTransform& bB2w, bool useExtendedLimits, PxVec3p& cA2wOut, PxVec3p& cB2wOut) { const D6JointData& data = *reinterpret_cast<const D6JointData*>(constantBlock); PxTransform32 cA2w, cB2w; joint::ConstraintHelper ch(constraints, invMassScale, cA2w, cB2w, body0WorldOffset, data, bA2w, bB2w); const PxU32 SWING1_FLAG = 1<<PxD6Axis::eSWING1; const PxU32 SWING2_FLAG = 1<<PxD6Axis::eSWING2; const PxU32 TWIST_FLAG = 1<<PxD6Axis::eTWIST; const PxU32 ANGULAR_MASK = SWING1_FLAG | SWING2_FLAG | TWIST_FLAG; const PxU32 LINEAR_MASK = 1<<PxD6Axis::eX | 1<<PxD6Axis::eY | 1<<PxD6Axis::eZ; const PxD6JointDrive* drives = data.drive; PxU32 locked = data.locked; const PxU32 limited = data.limited; const PxU32 driving = data.driving; // PT: it is a mistake to use the neighborhood operator since it // prevents us from using the quat's double-cover feature. if(!useExtendedLimits) joint::applyNeighborhoodOperator(cA2w, cB2w); const PxTransform cB2cA = cA2w.transformInv(cB2w); PX_ASSERT(data.c2b[0].isValid()); PX_ASSERT(data.c2b[1].isValid()); PX_ASSERT(cA2w.isValid()); PX_ASSERT(cB2w.isValid()); PX_ASSERT(cB2cA.isValid()); const PxMat33Padded cA2w_m(cA2w.q); const PxMat33Padded cB2w_m(cB2w.q); // handy for swing computation const PxVec3& bX = cB2w_m.column0; const PxVec3& aY = cA2w_m.column1; const PxVec3& aZ = cA2w_m.column2; if(driving & ((1<<PxD6Drive::eX)|(1<<PxD6Drive::eY)|(1<<PxD6Drive::eZ))) { // TODO: make drive unilateral if we are outside the limit const PxVec3 posErr = data.drivePosition.p - cB2cA.p; for(PxU32 i=0; i<3; i++) { // -driveVelocity because velTarget is child (body1) - parent (body0) and Jacobian is 1 for body0 and -1 for parent if(driving & (1<<(PxD6Drive::eX+i))) ch.linear(cA2w_m[i], -data.driveLinearVelocity[i], posErr[i], drives[PxD6Drive::eX+i]); } } if(driving & ((1<<PxD6Drive::eSLERP)|(1<<PxD6Drive::eSWING)|(1<<PxD6Drive::eTWIST))) { const PxQuat d2cA_q = cB2cA.q.dot(data.drivePosition.q)>0.0f ? data.drivePosition.q : -data.drivePosition.q; const PxVec3& v = data.driveAngularVelocity; const PxQuat delta = d2cA_q.getConjugate() * cB2cA.q; if(driving & (1<<PxD6Drive::eSLERP)) { const PxVec3 velTarget = -cA2w.rotate(data.driveAngularVelocity); PxVec3 axis[3] = { PxVec3(1.0f, 0.0f, 0.0f), PxVec3(0.0f, 1.0f, 0.0f), PxVec3(0.0f, 0.0f, 1.0f) }; if(drives[PxD6Drive::eSLERP].stiffness!=0.0f) joint::computeJacobianAxes(axis, cA2w.q * d2cA_q, cB2w.q); // converges faster if there is only velocity drive for(PxU32 i=0; i<3; i++) ch.angular(axis[i], axis[i].dot(velTarget), -delta.getImaginaryPart()[i], drives[PxD6Drive::eSLERP], PxConstraintSolveHint::eSLERP_SPRING); } else { if(driving & (1<<PxD6Drive::eTWIST)) ch.angular(cA2w_m.column0, v.x, -2.0f * delta.x, drives[PxD6Drive::eTWIST]); if(driving & (1<<PxD6Drive::eSWING)) { const PxVec3 err = delta.getBasisVector0(); if(!(locked & SWING1_FLAG)) ch.angular(aY, v.y, err.z, drives[PxD6Drive::eSWING]); if(!(locked & SWING2_FLAG)) ch.angular(aZ, v.z, -err.y, drives[PxD6Drive::eSWING]); } } } if(limited & ANGULAR_MASK) { PxQuat swing, twist; PxSeparateSwingTwist(cB2cA.q, swing, twist); // swing limits: if just one is limited: if the other is free, we support // (-pi/2, +pi/2) limit, using tan of the half-angle as the error measure parameter. // If the other is locked, we support (-pi, +pi) limits using the tan of the quarter-angle // Notation: th == PxTanHalf, tq = tanQuarter if(limited & SWING1_FLAG && limited & SWING2_FLAG) { if(data.mUseConeLimit) setupConeSwingLimits(ch, data, swing, cA2w); // PT: no else here by design, we want to allow creating both the cone & the pyramid at the same time, // which can be useful to make the cone more robust against large velocities. if(data.mUsePyramidLimits) setupPyramidSwingLimits(ch, data, swing, cA2w, true, true); } else { if(limited & SWING1_FLAG) { if(locked & SWING2_FLAG) { if(data.mUsePyramidLimits) setupPyramidSwingLimits(ch, data, swing, cA2w, true, false); else setupSingleSwingLimit(ch, data, aY, swing.y, swing.w, data.swingLimit.yAngle); // PT: swing Y limited, swing Z locked } else { if(!data.mUsePyramidLimits) setupDualConeSwingLimits(ch, data, aZ.cross(bX), -aZ.dot(bX), data.swingLimit.yAngle); // PT: swing Y limited, swing Z free else PxGetFoundation().error(PxErrorCode::eINVALID_OPERATION, PX_FL, "D6JointSolverPrep: invalid joint setup. Double pyramid mode not supported."); } } if(limited & SWING2_FLAG) { if(locked & SWING1_FLAG) { if(data.mUsePyramidLimits) setupPyramidSwingLimits(ch, data, swing, cA2w, false, true); else setupSingleSwingLimit(ch, data, aZ, swing.z, swing.w, data.swingLimit.zAngle); // PT: swing Z limited, swing Y locked } else if(!data.mUsePyramidLimits) setupDualConeSwingLimits(ch, data, -aY.cross(bX), aY.dot(bX), data.swingLimit.zAngle); // PT: swing Z limited, swing Y free else PxGetFoundation().error(PxErrorCode::eINVALID_OPERATION, PX_FL, "D6JointSolverPrep: invalid joint setup. Double pyramid mode not supported."); } } if(limited & TWIST_FLAG) ch.anglePair(computePhi(twist), data.twistLimit.lower, data.twistLimit.upper, cB2w_m.column0, data.twistLimit); } if(limited & LINEAR_MASK) { if(data.mUseDistanceLimit) // PT: old linear/distance limit { PxVec3 limitDir; const float distance = computeLimitedDistance(data, cB2cA, cA2w_m, limitDir); if(distance > data.distanceMinDist) ch.linearLimit(limitDir * (1.0f/distance), distance, data.distanceLimit.value, data.distanceLimit); } if(data.mUseNewLinearLimits) // PT: new asymmetric linear limits { const PxVec3& bOriginInA = cB2cA.p; // PT: TODO: we check that the DOFs are not "locked" to be consistent with the prismatic joint, but it // doesn't look like this case is possible, since it would be caught by the "isValid" check when setting // the limits. And in fact the "distance" linear limit above doesn't do this check. if((limited & (1<<PxD6Axis::eX)) && data.linearLimitX.lower <= data.linearLimitX.upper) setupLinearLimit(ch, data.linearLimitX, bOriginInA.x, cA2w_m.column0); if((limited & (1<<PxD6Axis::eY)) && data.linearLimitY.lower <= data.linearLimitY.upper) setupLinearLimit(ch, data.linearLimitY, bOriginInA.y, cA2w_m.column1); if((limited & (1<<PxD6Axis::eZ)) && data.linearLimitZ.lower <= data.linearLimitZ.upper) setupLinearLimit(ch, data.linearLimitZ, bOriginInA.z, cA2w_m.column2); } } // we handle specially the case of just one swing dof locked const PxU32 angularLocked = locked & ANGULAR_MASK; if(angularLocked == SWING1_FLAG) { ch.angularHard(bX.cross(aZ), -bX.dot(aZ)); locked &= ~SWING1_FLAG; } else if(angularLocked == SWING2_FLAG) { locked &= ~SWING2_FLAG; ch.angularHard(bX.cross(aY), -bX.dot(aY)); } // PT: TODO: cA2w_m has already been computed above, no need to recompute it within prepareLockedAxes PxVec3 ra, rb; ch.prepareLockedAxes(cA2w.q, cB2w.q, cB2cA.p, locked&7, locked>>3, ra, rb); cA2wOut = ra + bA2w.p; cB2wOut = rb + bB2w.p; /*cA2wOut = cA2w.p; cB2wOut = cB2w.p;*/ // PT: TODO: check the number cannot be too high now return ch.getCount(); } /////////////////////////////////////////////////////////////////////////////// static PxConstraintShaderTable gD6JointShaders = { D6JointSolverPrep, D6JointVisualize, /*PxConstraintFlag::Enum(0)*/PxConstraintFlag::eGPU_COMPATIBLE }; PxConstraintSolverPrep D6Joint::getPrep() const { return gD6JointShaders.solverPrep; } PxD6Joint* physx::PxD6JointCreate(PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1) { PX_CHECK_AND_RETURN_NULL(localFrame0.isSane(), "PxD6JointCreate: local frame 0 is not a valid transform"); PX_CHECK_AND_RETURN_NULL(localFrame1.isSane(), "PxD6JointCreate: local frame 1 is not a valid transform"); PX_CHECK_AND_RETURN_NULL(actor0 != actor1, "PxD6JointCreate: actors must be different"); PX_CHECK_AND_RETURN_NULL((actor0 && actor0->is<PxRigidBody>()) || (actor1 && actor1->is<PxRigidBody>()), "PxD6JointCreate: at least one actor must be dynamic"); return createJointT<D6Joint, D6JointData>(physics, actor0, localFrame0, actor1, localFrame1, gD6JointShaders); } // PX_SERIALIZATION void D6Joint::resolveReferences(PxDeserializationContext& context) { mPxConstraint = resolveConstraintPtr(context, mPxConstraint, this, gD6JointShaders); } //~PX_SERIALIZATION #if PX_SUPPORT_OMNI_PVD void D6Joint::updateOmniPvdProperties() const { OMNI_PVD_WRITE_SCOPE_BEGIN(pvdWriter, pvdRegData) const PxD6Joint& j = static_cast<const PxD6Joint&>(*this); OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, twistAngle, j, getTwistAngle()) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, swingYAngle, j, getSwingYAngle()) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, swingZAngle, j, getSwingZAngle()) OMNI_PVD_WRITE_SCOPE_END } template<> void physx::Ext::omniPvdInitJoint<D6Joint>(D6Joint& joint) { OMNI_PVD_WRITE_SCOPE_BEGIN(pvdWriter, pvdRegData) const PxD6Joint& j = static_cast<const PxD6Joint&>(joint); OMNI_PVD_CREATE_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, j); omniPvdSetBaseJointParams(static_cast<PxJoint&>(joint), PxJointConcreteType::eD6); PxD6Motion::Enum motions[PxD6Axis::eCOUNT]; for (PxU32 i = 0; i < PxD6Axis::eCOUNT; ++i) motions[i] = joint.getMotion(PxD6Axis::Enum(i)); OMNI_PVD_SET_ARRAY_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, motions, j, motions, PxD6Axis::eCOUNT) PxReal forceLimit[PxD6Axis::eCOUNT]; for (PxU32 i = 0; i < PxD6Axis::eCOUNT; ++i) forceLimit[i] = joint.getDrive(PxD6Drive::Enum(i)).forceLimit; OMNI_PVD_SET_ARRAY_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, driveForceLimit, j, forceLimit, PxD6Axis::eCOUNT) PxD6JointDriveFlags flags[PxD6Axis::eCOUNT]; for (PxU32 i = 0; i < PxD6Axis::eCOUNT; ++i) { flags[i] = joint.getDrive(PxD6Drive::Enum(i)).flags; } OMNI_PVD_SET_ARRAY_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, driveFlags, j, flags, PxD6Axis::eCOUNT) PxReal stiffness[PxD6Axis::eCOUNT]; for (PxU32 i = 0; i < PxD6Axis::eCOUNT; ++i) { stiffness[i] = joint.getDrive(PxD6Drive::Enum(i)).stiffness; } OMNI_PVD_SET_ARRAY_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, driveStiffness, j, stiffness, PxD6Axis::eCOUNT) PxReal damping[PxD6Axis::eCOUNT]; for (PxU32 i = 0; i < PxD6Axis::eCOUNT; ++i) { damping[i] = joint.getDrive(PxD6Drive::Enum(i)).damping; } OMNI_PVD_SET_ARRAY_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, driveDamping, j, damping, PxD6Axis::eCOUNT) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, drivePosition, j, joint.getDrivePosition()) PxVec3 driveLinVel, driveAngVel; joint.getDriveVelocity(driveLinVel, driveAngVel); OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, driveLinVelocity, j, driveLinVel) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, driveAngVelocity, j, driveAngVel) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, twistAngle, j, joint.getTwistAngle()) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, swingYAngle, j, joint.getSwingYAngle()) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxD6Joint, swingZAngle, j, joint.getSwingZAngle()) OMNI_PVD_WRITE_SCOPE_END } #endif
39,185
C++
37.79802
173
0.728544
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/ExtSqManager.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef EXT_SQ_MANAGER_H #define EXT_SQ_MANAGER_H #include "common/PxPhysXCommonConfig.h" #include "foundation/PxBitMap.h" #include "foundation/PxArray.h" #include "SqPruner.h" #include "SqManager.h" #include "foundation/PxHashSet.h" namespace physx { namespace Sq { class CompoundPruner; } } #include "foundation/PxMutex.h" namespace physx { class PxRenderOutput; class PxBVH; class PxSceneLimits; namespace Gu { class BVH; } namespace Sq { // PT: this is a customized version of physx::Sq::PrunerManager that supports more than 2 hardcoded pruners. // It might not be possible to support the whole PxSceneQuerySystem API with an arbitrary number of pruners. class ExtPrunerManager : public PxUserAllocated { public: ExtPrunerManager(PxU64 contextID, float inflation, const Adapter& adapter, bool usesTreeOfPruners); ~ExtPrunerManager(); PxU32 addPruner(Gu::Pruner* pruner, PxU32 preallocated); Gu::PrunerHandle addPrunerShape(const Gu::PrunerPayload& payload, PxU32 prunerIndex, bool dynamic, PrunerCompoundId compoundId, const PxBounds3& bounds, const PxTransform& transform, bool hasPruningStructure=false); void addCompoundShape(const PxBVH& bvh, PrunerCompoundId compoundId, const PxTransform& compoundTransform, Gu::PrunerHandle* prunerHandle, const Gu::PrunerPayload* payloads, const PxTransform* transforms, bool isDynamic); void markForUpdate(PxU32 prunerIndex, bool dynamic, PrunerCompoundId compoundId, Gu::PrunerHandle shapeHandle, const PxTransform& transform); void removePrunerShape(PxU32 prunerIndex, bool dynamic, PrunerCompoundId compoundId, Gu::PrunerHandle shapeHandle, Gu::PrunerPayloadRemovalCallback* removalCallback); PX_FORCE_INLINE PxU32 getNbPruners() const { return mPrunerExt.size(); } PX_FORCE_INLINE const Gu::Pruner* getPruner(PxU32 index) const { return mPrunerExt[index]->mPruner; } PX_FORCE_INLINE Gu::Pruner* getPruner(PxU32 index) { return mPrunerExt[index]->mPruner; } PX_FORCE_INLINE const CompoundPruner* getCompoundPruner() const { return mCompoundPrunerExt.mPruner; } PX_FORCE_INLINE PxU64 getContextId() const { return mContextID; } void preallocate(PxU32 prunerIndex, PxU32 nbShapes); void setDynamicTreeRebuildRateHint(PxU32 dynTreeRebuildRateHint); PX_FORCE_INLINE PxU32 getDynamicTreeRebuildRateHint() const { return mRebuildRateHint; } void flushUpdates(); void forceRebuildDynamicTree(PxU32 prunerIndex); void updateCompoundActor(PrunerCompoundId compoundId, const PxTransform& compoundTransform); void removeCompoundActor(PrunerCompoundId compoundId, Gu::PrunerPayloadRemovalCallback* removalCallback); void* prepareSceneQueriesUpdate(PxU32 prunerIndex); void sceneQueryBuildStep(void* handle); void sync(PxU32 prunerIndex, const Gu::PrunerHandle* handles, const PxU32* boundsIndices, const PxBounds3* bounds, const PxTransform32* transforms, PxU32 count, const PxBitMap& ignoredIndices); void afterSync(bool buildStep, bool commit); void shiftOrigin(const PxVec3& shift); void visualize(PxU32 prunerIndex, PxRenderOutput& out) const; void flushMemory(); PX_FORCE_INLINE PxU32 getStaticTimestamp() const { return mStaticTimestamp; } PX_FORCE_INLINE const Adapter& getAdapter() const { return mAdapter; } PX_FORCE_INLINE const Gu::BVH* getTreeOfPruners() const { return mTreeOfPruners; } PxU32 startCustomBuildstep(); void customBuildstep(PxU32 index); void finishCustomBuildstep(); void createTreeOfPruners(); private: const Adapter& mAdapter; PxArray<PrunerExt*> mPrunerExt; CompoundPrunerExt mCompoundPrunerExt; Gu::BVH* mTreeOfPruners; const PxU64 mContextID; PxU32 mStaticTimestamp; PxU32 mRebuildRateHint; const float mInflation; // SQ_PRUNER_EPSILON PxMutex mSQLock; // to make sure only one query updates the dirty pruner structure if multiple queries run in parallel volatile bool mPrunerNeedsUpdating; volatile bool mTimestampNeedsUpdating; const bool mUsesTreeOfPruners; void flushShapes(); PX_FORCE_INLINE void invalidateStaticTimestamp() { mStaticTimestamp++; } PX_NOCOPY(ExtPrunerManager) }; } } #endif
6,262
C
43.735714
233
0.724529
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/ExtConvexMeshExt.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/PxPlane.h" #include "geometry/PxConvexMeshGeometry.h" #include "geometry/PxConvexMesh.h" #include "extensions/PxConvexMeshExt.h" using namespace physx; static const PxReal gEpsilon = .01f; PxU32 physx::PxFindFaceIndex(const PxConvexMeshGeometry& convexGeom, const PxTransform& pose, const PxVec3& impactPos, const PxVec3& unitDir) { PX_ASSERT(unitDir.isFinite()); PX_ASSERT(unitDir.isNormalized()); PX_ASSERT(impactPos.isFinite()); PX_ASSERT(pose.isFinite()); const PxVec3 impact = impactPos - unitDir * gEpsilon; const PxVec3 localPoint = pose.transformInv(impact); const PxVec3 localDir = pose.rotateInv(unitDir); // Create shape to vertex scale transformation matrix const PxMeshScale& meshScale = convexGeom.scale; const PxMat33 rot(meshScale.rotation); PxMat33 shape2VertexSkew = rot.getTranspose(); const PxMat33 diagonal = PxMat33::createDiagonal(PxVec3(1.0f / meshScale.scale.x, 1.0f / meshScale.scale.y, 1.0f / meshScale.scale.z)); shape2VertexSkew = shape2VertexSkew * diagonal; shape2VertexSkew = shape2VertexSkew * rot; const PxU32 nbPolys = convexGeom.convexMesh->getNbPolygons(); PxU32 minIndex = 0; PxReal minD = PX_MAX_REAL; for (PxU32 j = 0; j < nbPolys; j++) { PxHullPolygon hullPolygon; convexGeom.convexMesh->getPolygonData(j, hullPolygon); // transform hull plane into shape space PxPlane plane; const PxVec3 tmp = shape2VertexSkew.transformTranspose(PxVec3(hullPolygon.mPlane[0],hullPolygon.mPlane[1],hullPolygon.mPlane[2])); const PxReal denom = 1.0f / tmp.magnitude(); plane.n = tmp * denom; plane.d = hullPolygon.mPlane[3] * denom; PxReal d = plane.distance(localPoint); if (d < 0.0f) continue; const PxReal tweak = plane.n.dot(localDir) * gEpsilon; d += tweak; if (d < minD) { minIndex = j; minD = d; } } return minIndex; }
3,545
C++
38.4
136
0.750353
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/ExtPrismaticJoint.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "ExtPrismaticJoint.h" #include "ExtConstraintHelper.h" #include "omnipvd/ExtOmniPvdSetData.h" using namespace physx; using namespace Ext; PrismaticJoint::PrismaticJoint(const PxTolerancesScale& scale, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1) : PrismaticJointT(PxJointConcreteType::ePRISMATIC, actor0, localFrame0, actor1, localFrame1, "PrismaticJointData") { PrismaticJointData* data = static_cast<PrismaticJointData*>(mData); data->limit = PxJointLinearLimitPair(scale); data->jointFlags = PxPrismaticJointFlags(); } PxPrismaticJointFlags PrismaticJoint::getPrismaticJointFlags(void) const { return data().jointFlags; } void PrismaticJoint::setPrismaticJointFlags(PxPrismaticJointFlags flags) { data().jointFlags = flags; markDirty(); OMNI_PVD_SET(OMNI_PVD_CONTEXT_HANDLE, PxPrismaticJoint, jointFlags, static_cast<PxPrismaticJoint&>(*this), flags) } void PrismaticJoint::setPrismaticJointFlag(PxPrismaticJointFlag::Enum flag, bool value) { if(value) data().jointFlags |= flag; else data().jointFlags &= ~flag; markDirty(); OMNI_PVD_SET(OMNI_PVD_CONTEXT_HANDLE, PxPrismaticJoint, jointFlags, static_cast<PxPrismaticJoint&>(*this), getPrismaticJointFlags()) } PxJointLinearLimitPair PrismaticJoint::getLimit() const { return data().limit; } void PrismaticJoint::setLimit(const PxJointLinearLimitPair& limit) { PX_CHECK_AND_RETURN(limit.isValid(), "PxPrismaticJoint::setLimit: invalid parameter"); data().limit = limit; markDirty(); #if PX_SUPPORT_OMNI_PVD OMNI_PVD_WRITE_SCOPE_BEGIN(pvdWriter, pvdRegData) PxPrismaticJoint& j = static_cast<PxPrismaticJoint&>(*this); OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxPrismaticJoint, limitLower, j, limit.lower) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxPrismaticJoint, limitUpper, j, limit.upper) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxPrismaticJoint, limitRestitution, j, limit.restitution) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxPrismaticJoint, limitBounceThreshold, j, limit.bounceThreshold) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxPrismaticJoint, limitStiffness, j, limit.stiffness) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxPrismaticJoint, limitDamping, j, limit.damping) OMNI_PVD_WRITE_SCOPE_END #endif } static void PrismaticJointVisualize(PxConstraintVisualizer& viz, const void* constantBlock, const PxTransform& body0Transform, const PxTransform& body1Transform, PxU32 flags) { const PrismaticJointData& data = *reinterpret_cast<const PrismaticJointData*>(constantBlock); PxTransform32 cA2w, cB2w; joint::computeJointFrames(cA2w, cB2w, data, body0Transform, body1Transform); if(flags & PxConstraintVisualizationFlag::eLOCAL_FRAMES) viz.visualizeJointFrames(cA2w, cB2w); if((flags & PxConstraintVisualizationFlag::eLIMITS) && (data.jointFlags & PxPrismaticJointFlag::eLIMIT_ENABLED)) { viz.visualizeLinearLimit(cA2w, cB2w, data.limit.lower); viz.visualizeLinearLimit(cA2w, cB2w, data.limit.upper); } } //TAG:solverprepshader static PxU32 PrismaticJointSolverPrep(Px1DConstraint* constraints, PxVec3p& body0WorldOffset, PxU32 /*maxConstraints*/, PxConstraintInvMassScale& invMassScale, const void* constantBlock, const PxTransform& bA2w, const PxTransform& bB2w, bool /*useExtendedLimits*/, PxVec3p& cA2wOut, PxVec3p& cB2wOut) { const PrismaticJointData& data = *reinterpret_cast<const PrismaticJointData*>(constantBlock); PxTransform32 cA2w, cB2w; joint::ConstraintHelper ch(constraints, invMassScale, cA2w, cB2w, body0WorldOffset, data, bA2w, bB2w); joint::applyNeighborhoodOperator(cA2w, cB2w); const bool limitEnabled = data.jointFlags & PxPrismaticJointFlag::eLIMIT_ENABLED; const PxJointLinearLimitPair& limit = data.limit; const bool limitIsLocked = limitEnabled && limit.lower >= limit.upper; const PxVec3 bOriginInA = cA2w.transformInv(cB2w.p); PxVec3 ra, rb, axis; ch.prepareLockedAxes(cA2w.q, cB2w.q, bOriginInA, limitIsLocked ? 7ul : 6ul, 7ul, ra, rb, &axis); cA2wOut = ra + bA2w.p; cB2wOut = rb + bB2w.p; if(limitEnabled && !limitIsLocked) { const PxReal ordinate = bOriginInA.x; ch.linearLimit(axis, ordinate, limit.upper, limit); ch.linearLimit(-axis, -ordinate, -limit.lower, limit); } return ch.getCount(); } /////////////////////////////////////////////////////////////////////////////// static PxConstraintShaderTable gPrismaticJointShaders = { PrismaticJointSolverPrep, PrismaticJointVisualize, PxConstraintFlag::Enum(0) }; PxConstraintSolverPrep PrismaticJoint::getPrep() const { return gPrismaticJointShaders.solverPrep; } PxPrismaticJoint* physx::PxPrismaticJointCreate(PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1) { PX_CHECK_AND_RETURN_NULL(localFrame0.isSane(), "PxPrismaticJointCreate: local frame 0 is not a valid transform"); PX_CHECK_AND_RETURN_NULL(localFrame1.isSane(), "PxPrismaticJointCreate: local frame 1 is not a valid transform"); PX_CHECK_AND_RETURN_NULL((actor0 && actor0->is<PxRigidBody>()) || (actor1 && actor1->is<PxRigidBody>()), "PxPrismaticJointCreate: at least one actor must be dynamic"); PX_CHECK_AND_RETURN_NULL(actor0 != actor1, "PxPrismaticJointCreate: actors must be different"); return createJointT<PrismaticJoint, PrismaticJointData>(physics, actor0, localFrame0, actor1, localFrame1, gPrismaticJointShaders); } // PX_SERIALIZATION void PrismaticJoint::resolveReferences(PxDeserializationContext& context) { mPxConstraint = resolveConstraintPtr(context, mPxConstraint, this, gPrismaticJointShaders); } //~PX_SERIALIZATION #if PX_SUPPORT_OMNI_PVD void PrismaticJoint::updateOmniPvdProperties() const { OMNI_PVD_WRITE_SCOPE_BEGIN(pvdWriter, pvdRegData) const PxPrismaticJoint& j = static_cast<const PxPrismaticJoint&>(*this); OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxPrismaticJoint, position, j, getPosition()) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxPrismaticJoint, velocity, j, getVelocity()) OMNI_PVD_WRITE_SCOPE_END } template<> void physx::Ext::omniPvdInitJoint<PrismaticJoint>(PrismaticJoint& joint) { OMNI_PVD_WRITE_SCOPE_BEGIN(pvdWriter, pvdRegData) PxPrismaticJoint& j = static_cast<PxPrismaticJoint&>(joint); OMNI_PVD_CREATE_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxPrismaticJoint, j); omniPvdSetBaseJointParams(static_cast<PxJoint&>(joint), PxJointConcreteType::ePRISMATIC); PxJointLinearLimitPair limit = joint.getLimit(); OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxPrismaticJoint, limitLower, j, limit.lower) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxPrismaticJoint, limitUpper, j, limit.upper) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxPrismaticJoint, limitRestitution, j, limit.restitution) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxPrismaticJoint, limitBounceThreshold, j, limit.bounceThreshold) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxPrismaticJoint, limitStiffness, j, limit.stiffness) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxPrismaticJoint, limitDamping, j, limit.damping) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxPrismaticJoint, position, j, joint.getPosition()) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxPrismaticJoint, velocity, j, joint.getVelocity()) OMNI_PVD_WRITE_SCOPE_END } #endif
9,429
C++
43.904762
175
0.778768
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/ExtConstraintHelper.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef EXT_CONSTRAINT_HELPER_H #define EXT_CONSTRAINT_HELPER_H #include "foundation/PxAssert.h" #include "foundation/PxTransform.h" #include "foundation/PxMat33.h" #include "foundation/PxSIMDHelpers.h" #include "extensions/PxD6Joint.h" #include "ExtJointData.h" #include "foundation/PxVecMath.h" namespace physx { namespace Ext { namespace joint { PX_FORCE_INLINE void applyNeighborhoodOperator(const PxTransform32& cA2w, PxTransform32& cB2w) { if(cA2w.q.dot(cB2w.q)<0.0f) // minimum dist quat (equiv to flipping cB2bB.q, which we don't use anywhere) cB2w.q = -cB2w.q; } PX_INLINE void computeJointFrames(PxTransform32& cA2w, PxTransform32& cB2w, const JointData& data, const PxTransform& bA2w, const PxTransform& bB2w) { PX_ASSERT(bA2w.isValid() && bB2w.isValid()); aos::transformMultiply<false, true>(cA2w, bA2w, data.c2b[0]); aos::transformMultiply<false, true>(cB2w, bB2w, data.c2b[1]); PX_ASSERT(cA2w.isValid() && cB2w.isValid()); } PX_INLINE void computeJacobianAxes(PxVec3 row[3], const PxQuat& qa, const PxQuat& qb) { // Compute jacobian matrix for (qa* qb) [[* means conjugate in this expr]] // d/dt (qa* qb) = 1/2 L(qa*) R(qb) (omega_b - omega_a) // result is L(qa*) R(qb), where L(q) and R(q) are left/right q multiply matrix const PxReal wa = qa.w, wb = qb.w; const PxVec3 va(qa.x,qa.y,qa.z), vb(qb.x,qb.y,qb.z); const PxVec3 c = vb*wa + va*wb; const PxReal d0 = wa*wb; const PxReal d1 = va.dot(vb); const PxReal d = d0 - d1; row[0] = (va * vb.x + vb * va.x + PxVec3(d, c.z, -c.y)) * 0.5f; row[1] = (va * vb.y + vb * va.y + PxVec3(-c.z, d, c.x)) * 0.5f; row[2] = (va * vb.z + vb * va.z + PxVec3(c.y, -c.x, d)) * 0.5f; if((d0 + d1) != 0.0f) // check if relative rotation is 180 degrees which can lead to singular matrix return; else { row[0].x += PX_EPS_F32; row[1].y += PX_EPS_F32; row[2].z += PX_EPS_F32; } } PX_FORCE_INLINE Px1DConstraint* _linear(const PxVec3& axis, const PxVec3& ra, const PxVec3& rb, PxReal posErr, PxConstraintSolveHint::Enum hint, Px1DConstraint* c) { c->solveHint = PxU16(hint); c->linear0 = axis; c->angular0 = ra.cross(axis); c->linear1 = axis; c->angular1 = rb.cross(axis); c->geometricError = posErr; PX_ASSERT(c->linear0.isFinite()); PX_ASSERT(c->linear1.isFinite()); PX_ASSERT(c->angular0.isFinite()); PX_ASSERT(c->angular1.isFinite()); return c; } PX_FORCE_INLINE Px1DConstraint* _angular(const PxVec3& axis, PxReal posErr, PxConstraintSolveHint::Enum hint, Px1DConstraint* c) { c->solveHint = PxU16(hint); c->linear0 = PxVec3(0.0f); c->angular0 = axis; c->linear1 = PxVec3(0.0f); c->angular1 = axis; c->geometricError = posErr; c->flags |= Px1DConstraintFlag::eANGULAR_CONSTRAINT; return c; } class ConstraintHelper { Px1DConstraint* mConstraints; Px1DConstraint* mCurrent; PX_ALIGN(16, PxVec3p mRa); PX_ALIGN(16, PxVec3p mRb); PX_ALIGN(16, PxVec3p mCA2w); PX_ALIGN(16, PxVec3p mCB2w); public: ConstraintHelper(Px1DConstraint* c, const PxVec3& ra, const PxVec3& rb) : mConstraints(c), mCurrent(c), mRa(ra), mRb(rb) {} /*PX_NOINLINE*/ ConstraintHelper(Px1DConstraint* c, PxConstraintInvMassScale& invMassScale, PxTransform32& cA2w, PxTransform32& cB2w, PxVec3p& body0WorldOffset, const JointData& data, const PxTransform& bA2w, const PxTransform& bB2w) : mConstraints(c), mCurrent(c) { using namespace aos; V4StoreA(V4LoadA(&data.invMassScale.linear0), &invMassScale.linear0); //invMassScale = data.invMassScale; computeJointFrames(cA2w, cB2w, data, bA2w, bB2w); if(1) { const Vec4V cB2wV = V4LoadA(&cB2w.p.x); const Vec4V raV = V4Sub(cB2wV, V4LoadU(&bA2w.p.x)); // const PxVec3 ra = cB2w.p - bA2w.p; V4StoreU(raV, &body0WorldOffset.x); // body0WorldOffset = ra; V4StoreA(raV, &mRa.x); // mRa = ra; V4StoreA(V4Sub(cB2wV, V4LoadU(&bB2w.p.x)), &mRb.x); // mRb = cB2w.p - bB2w.p; V4StoreA(V4LoadA(&cA2w.p.x), &mCA2w.x); // mCA2w = cA2w.p; V4StoreA(cB2wV, &mCB2w.x); // mCB2w = cB2w.p; } else { const PxVec3 ra = cB2w.p - bA2w.p; body0WorldOffset = ra; mRa = ra; mRb = cB2w.p - bB2w.p; mCA2w = cA2w.p; mCB2w = cB2w.p; } } PX_FORCE_INLINE const PxVec3& getRa() const { return mRa; } PX_FORCE_INLINE const PxVec3& getRb() const { return mRb; } // hard linear & angular PX_FORCE_INLINE void linearHard(const PxVec3& axis, PxReal posErr) { Px1DConstraint* c = linear(axis, posErr, PxConstraintSolveHint::eEQUALITY); c->flags |= Px1DConstraintFlag::eOUTPUT_FORCE; } PX_FORCE_INLINE void angularHard(const PxVec3& axis, PxReal posErr) { Px1DConstraint* c = angular(axis, posErr, PxConstraintSolveHint::eEQUALITY); c->flags |= Px1DConstraintFlag::eOUTPUT_FORCE; } // limited linear & angular PX_FORCE_INLINE void linearLimit(const PxVec3& axis, PxReal ordinate, PxReal limitValue, const PxJointLimitParameters& limit) { if(!limit.isSoft() || ordinate > limitValue) addLimit(linear(axis, limitValue - ordinate, PxConstraintSolveHint::eNONE), limit); } PX_FORCE_INLINE void angularLimit(const PxVec3& axis, PxReal ordinate, PxReal limitValue, const PxJointLimitParameters& limit) { if(!limit.isSoft() || ordinate > limitValue) addLimit(angular(axis, limitValue - ordinate, PxConstraintSolveHint::eNONE), limit); } PX_FORCE_INLINE void angularLimit(const PxVec3& axis, PxReal error, const PxJointLimitParameters& limit) { addLimit(angular(axis, error, PxConstraintSolveHint::eNONE), limit); } PX_FORCE_INLINE void anglePair(PxReal angle, PxReal lower, PxReal upper, const PxVec3& axis, const PxJointLimitParameters& limit) { PX_ASSERT(lower<upper); const bool softLimit = limit.isSoft(); if(!softLimit || angle < lower) angularLimit(-axis, -(lower - angle), limit); if(!softLimit || angle > upper) angularLimit(axis, (upper - angle), limit); } // driven linear & angular PX_FORCE_INLINE void linear(const PxVec3& axis, PxReal velTarget, PxReal error, const PxD6JointDrive& drive) { addDrive(linear(axis, error, PxConstraintSolveHint::eNONE), velTarget, drive); } PX_FORCE_INLINE void angular(const PxVec3& axis, PxReal velTarget, PxReal error, const PxD6JointDrive& drive, PxConstraintSolveHint::Enum hint = PxConstraintSolveHint::eNONE) { addDrive(angular(axis, error, hint), velTarget, drive); } PX_FORCE_INLINE PxU32 getCount() const { return PxU32(mCurrent - mConstraints); } void prepareLockedAxes(const PxQuat& qA, const PxQuat& qB, const PxVec3& cB2cAp, PxU32 lin, PxU32 ang, PxVec3& raOut, PxVec3& rbOut, PxVec3* axis=NULL) { Px1DConstraint* current = mCurrent; PxVec3 errorVector(0.0f); PxVec3 ra = mRa; PxVec3 rb = mRb; if(lin) { const PxMat33Padded axes(qA); if(axis) *axis = axes.column0; if(lin&1) errorVector -= axes.column0 * cB2cAp.x; if(lin&2) errorVector -= axes.column1 * cB2cAp.y; if(lin&4) errorVector -= axes.column2 * cB2cAp.z; ra += errorVector; if(lin&1) _linear(axes.column0, ra, rb, -cB2cAp.x, PxConstraintSolveHint::eEQUALITY, current++); if(lin&2) _linear(axes.column1, ra, rb, -cB2cAp.y, PxConstraintSolveHint::eEQUALITY, current++); if(lin&4) _linear(axes.column2, ra, rb, -cB2cAp.z, PxConstraintSolveHint::eEQUALITY, current++); } if (ang) { const PxQuat qB2qA = qA.getConjugate() * qB; PxVec3 row[3]; computeJacobianAxes(row, qA, qB); if (ang & 1) _angular(row[0], -qB2qA.x, PxConstraintSolveHint::eEQUALITY, current++); if (ang & 2) _angular(row[1], -qB2qA.y, PxConstraintSolveHint::eEQUALITY, current++); if (ang & 4) _angular(row[2], -qB2qA.z, PxConstraintSolveHint::eEQUALITY, current++); } raOut = ra; rbOut = rb; for(Px1DConstraint* front = mCurrent; front < current; front++) front->flags |= Px1DConstraintFlag::eOUTPUT_FORCE; mCurrent = current; } PX_FORCE_INLINE Px1DConstraint* getConstraintRow() { return mCurrent++; } private: PX_FORCE_INLINE Px1DConstraint* linear(const PxVec3& axis, PxReal posErr, PxConstraintSolveHint::Enum hint) { return _linear(axis, mRa, mRb, posErr, hint, mCurrent++); } PX_FORCE_INLINE Px1DConstraint* angular(const PxVec3& axis, PxReal posErr, PxConstraintSolveHint::Enum hint) { return _angular(axis, posErr, hint, mCurrent++); } void addLimit(Px1DConstraint* c, const PxJointLimitParameters& limit) { PxU16 flags = PxU16(c->flags | Px1DConstraintFlag::eOUTPUT_FORCE); if(limit.isSoft()) { flags |= Px1DConstraintFlag::eSPRING; c->mods.spring.stiffness = limit.stiffness; c->mods.spring.damping = limit.damping; } else { c->solveHint = PxConstraintSolveHint::eINEQUALITY; c->mods.bounce.restitution = limit.restitution; c->mods.bounce.velocityThreshold = limit.bounceThreshold; if(c->geometricError>0.0f) flags |= Px1DConstraintFlag::eKEEPBIAS; if(limit.restitution>0.0f) flags |= Px1DConstraintFlag::eRESTITUTION; } c->flags = flags; c->minImpulse = 0.0f; } void addDrive(Px1DConstraint* c, PxReal velTarget, const PxD6JointDrive& drive) { c->velocityTarget = velTarget; PxU16 flags = PxU16(c->flags | Px1DConstraintFlag::eSPRING | Px1DConstraintFlag::eHAS_DRIVE_LIMIT); if(drive.flags & PxD6JointDriveFlag::eACCELERATION) flags |= Px1DConstraintFlag::eACCELERATION_SPRING; c->flags = flags; c->mods.spring.stiffness = drive.stiffness; c->mods.spring.damping = drive.damping; c->minImpulse = -drive.forceLimit; c->maxImpulse = drive.forceLimit; PX_ASSERT(c->linear0.isFinite()); PX_ASSERT(c->angular0.isFinite()); } }; } } // namespace } #endif
11,712
C
33.551622
177
0.676571
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/ExtDefaultCpuDispatcher.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef EXT_DEFAULT_CPU_DISPATCHER_H #define EXT_DEFAULT_CPU_DISPATCHER_H #include "common/PxProfileZone.h" #include "task/PxTask.h" #include "extensions/PxDefaultCpuDispatcher.h" #include "foundation/PxUserAllocated.h" #include "foundation/PxSync.h" #include "foundation/PxSList.h" #include "ExtSharedQueueEntryPool.h" namespace physx { namespace Ext { class CpuWorkerThread; #if PX_VC #pragma warning(push) #pragma warning(disable:4324) // Padding was added at the end of a structure because of a __declspec(align) value. #endif // Because of the SList member I assume class DefaultCpuDispatcher : public PxDefaultCpuDispatcher, public PxUserAllocated { friend class TaskQueueHelper; PX_NOCOPY(DefaultCpuDispatcher) private: ~DefaultCpuDispatcher(); public: DefaultCpuDispatcher(PxU32 numThreads, PxU32* affinityMasks, PxDefaultCpuDispatcherWaitForWorkMode::Enum mode = PxDefaultCpuDispatcherWaitForWorkMode::eWAIT_FOR_WORK, PxU32 yieldProcessorCount = 0); // PxCpuDispatcher virtual void submitTask(PxBaseTask& task) PX_OVERRIDE; virtual PxU32 getWorkerCount() const PX_OVERRIDE { return mNumThreads; } //~PxCpuDispatcher // PxDefaultCpuDispatcher virtual void release() PX_OVERRIDE; virtual void setRunProfiled(bool runProfiled) PX_OVERRIDE { mRunProfiled = runProfiled; } virtual bool getRunProfiled() const PX_OVERRIDE { return mRunProfiled; } //~PxDefaultCpuDispatcher PxBaseTask* getJob(); PxBaseTask* stealJob(); PxBaseTask* fetchNextTask(); PX_FORCE_INLINE void runTask(PxBaseTask& task) { if(mRunProfiled) { PX_PROFILE_ZONE(task.getName(), task.getContextId()); task.run(); } else task.run(); } void waitForWork() { PX_ASSERT(PxDefaultCpuDispatcherWaitForWorkMode::eWAIT_FOR_WORK == mWaitForWorkMode); mWorkReady.wait(); } void resetWakeSignal(); static void getAffinityMasks(PxU32* affinityMasks, PxU32 threadCount); PX_FORCE_INLINE PxDefaultCpuDispatcherWaitForWorkMode::Enum getWaitForWorkMode() const { return mWaitForWorkMode; } PX_FORCE_INLINE PxU32 getYieldProcessorCount() const { return mYieldProcessorCount; } protected: CpuWorkerThread* mWorkerThreads; SharedQueueEntryPool<> mQueueEntryPool; PxSList mJobList; PxSync mWorkReady; PxU8* mThreadNames; PxU32 mNumThreads; bool mShuttingDown; bool mRunProfiled; const PxDefaultCpuDispatcherWaitForWorkMode::Enum mWaitForWorkMode; const PxU32 mYieldProcessorCount; }; #if PX_VC #pragma warning(pop) #endif } // namespace Ext } #endif
4,717
C
39.672413
216
0.687301
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/ExtContactJoint.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "ExtContactJoint.h" #include "omnipvd/ExtOmniPvdSetData.h" using namespace physx; using namespace Ext; ContactJoint::ContactJoint(const PxTolerancesScale& scale, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1) : ContactJointT(PxJointConcreteType::eCONTACT, actor0, localFrame0, actor1, localFrame1, "ContactJointData") { PX_UNUSED(scale); ContactJointData* data = static_cast<ContactJointData*>(mData); data->contact = PxVec3(0.f); data->normal = PxVec3(0.f); data->penetration = 0.f; data->restitution = 0.f; data->bounceThreshold = 0.f; } PxVec3 ContactJoint::getContact() const { return data().contact; } void ContactJoint::setContact(const PxVec3& contact) { PX_CHECK_AND_RETURN(contact.isFinite(), "PxContactJoint::setContact: invalid parameter"); data().contact = contact; markDirty(); OMNI_PVD_SET(OMNI_PVD_CONTEXT_HANDLE, PxContactJoint, point, static_cast<PxContactJoint&>(*this), getContact()) } PxVec3 ContactJoint::getContactNormal() const { return data().normal; } void ContactJoint::setContactNormal(const PxVec3& normal) { PX_CHECK_AND_RETURN(normal.isFinite(), "PxContactJoint::setContactNormal: invalid parameter"); data().normal = normal; markDirty(); OMNI_PVD_SET(OMNI_PVD_CONTEXT_HANDLE, PxContactJoint, normal, static_cast<PxContactJoint&>(*this), getContactNormal()) } PxReal ContactJoint::getPenetration() const { return data().penetration; } void ContactJoint::setPenetration(PxReal penetration) { PX_CHECK_AND_RETURN(PxIsFinite(penetration), "ContactJoint::setPenetration: invalid parameter"); data().penetration = penetration; markDirty(); OMNI_PVD_SET(OMNI_PVD_CONTEXT_HANDLE, PxContactJoint, penetration, static_cast<PxContactJoint&>(*this), getPenetration()) } PxReal ContactJoint::getRestitution() const { return data().restitution; } void ContactJoint::setRestitution(const PxReal restitution) { PX_CHECK_AND_RETURN(PxIsFinite(restitution) && restitution >= 0.f && restitution <= 1.f, "ContactJoint::setRestitution: invalid parameter"); data().restitution = restitution; markDirty(); OMNI_PVD_SET(OMNI_PVD_CONTEXT_HANDLE, PxContactJoint, restitution, static_cast<PxContactJoint&>(*this), getRestitution()) } PxReal ContactJoint::getBounceThreshold() const { return data().bounceThreshold; } void ContactJoint::setBounceThreshold(const PxReal bounceThreshold) { PX_CHECK_AND_RETURN(PxIsFinite(bounceThreshold) && bounceThreshold > 0.f, "ContactJoint::setBounceThreshold: invalid parameter"); data().bounceThreshold = bounceThreshold; markDirty(); OMNI_PVD_SET(OMNI_PVD_CONTEXT_HANDLE, PxContactJoint, bounceThreshold, static_cast<PxContactJoint&>(*this), getBounceThreshold()) } void ContactJoint::computeJacobians(PxJacobianRow* jacobian) const { const PxVec3 cp = data().contact; const PxVec3 normal = data().normal; PxRigidActor* actor0, *actor1; this->getActors(actor0, actor1); PxVec3 raXn(0.f), rbXn(0.f); if (actor0 && actor0->is<PxRigidBody>()) { PxRigidBody* dyn = actor0->is<PxRigidBody>(); PxTransform cmassPose = dyn->getGlobalPose() * dyn->getCMassLocalPose(); raXn = (cp - cmassPose.p).cross(normal); } if (actor1 && actor1->is<PxRigidBody>()) { PxRigidBody* dyn = actor1->is<PxRigidBody>(); PxTransform cmassPose = dyn->getGlobalPose() * dyn->getCMassLocalPose(); rbXn = (cp - cmassPose.p).cross(normal); } jacobian->linear0 = normal; jacobian->angular0 = raXn; jacobian->linear1 = -normal; jacobian->angular1 = -rbXn; } PxU32 ContactJoint::getNbJacobianRows() const { return 1; } static void ContactJointVisualize(PxConstraintVisualizer& /*viz*/, const void* /*constantBlock*/, const PxTransform& /*body0Transform*/, const PxTransform& /*body1Transform*/, PxU32 /*flags*/) { //TODO } //TAG:solverprepshader static PxU32 ContactJointSolverPrep(Px1DConstraint* constraints, PxVec3p& body0WorldOffset, PxU32 /*maxConstraints*/, PxConstraintInvMassScale& /*invMassScale*/, const void* constantBlock, const PxTransform& bA2w, const PxTransform& bB2w, bool, PxVec3p& cA2wOut, PxVec3p& cB2wOut) { const ContactJointData& data = *reinterpret_cast<const ContactJointData*>(constantBlock); const PxVec3& contact = data.contact; const PxVec3& normal = data.normal; cA2wOut = contact; cB2wOut = contact; const PxVec3 ra = contact - bA2w.p; const PxVec3 rb = contact - bB2w.p; body0WorldOffset = PxVec3(0.f); Px1DConstraint& con = constraints[0]; con.linear0 = normal; con.linear1 = normal; con.angular0 = ra.cross(normal); con.angular1 = rb.cross(normal); con.geometricError = data.penetration; con.minImpulse = 0.f; con.maxImpulse = PX_MAX_F32; con.velocityTarget = 0.f; con.forInternalUse = 0.f; con.solveHint = 0; con.flags = Px1DConstraintFlag::eOUTPUT_FORCE; con.mods.bounce.restitution = data.restitution; con.mods.bounce.velocityThreshold = data.bounceThreshold; return 1; } /////////////////////////////////////////////////////////////////////////////// static PxConstraintShaderTable gContactJointShaders = { ContactJointSolverPrep, ContactJointVisualize, PxConstraintFlag::Enum(0) }; PxConstraintSolverPrep ContactJoint::getPrep() const { return gContactJointShaders.solverPrep; } PxContactJoint* physx::PxContactJointCreate(PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1) { PX_CHECK_AND_RETURN_NULL(localFrame0.isSane(), "PxContactJointCreate: local frame 0 is not a valid transform"); PX_CHECK_AND_RETURN_NULL(localFrame1.isSane(), "PxContactJointCreate: local frame 1 is not a valid transform"); PX_CHECK_AND_RETURN_NULL(actor0 != actor1, "PxContactJointCreate: actors must be different"); PX_CHECK_AND_RETURN_NULL((actor0 && actor0->is<PxRigidBody>()) || (actor1 && actor1->is<PxRigidBody>()), "PxContactJointCreate: at least one actor must be dynamic"); return createJointT<ContactJoint, ContactJointData>(physics, actor0, localFrame0, actor1, localFrame1, gContactJointShaders); } // PX_SERIALIZATION void ContactJoint::resolveReferences(PxDeserializationContext& context) { mPxConstraint = resolveConstraintPtr(context, mPxConstraint, this, gContactJointShaders); } //~PX_SERIALIZATION #if PX_SUPPORT_OMNI_PVD template<> void physx::Ext::omniPvdInitJoint<ContactJoint>(ContactJoint& joint) { OMNI_PVD_WRITE_SCOPE_BEGIN(pvdWriter, pvdRegData) PxContactJoint& j = static_cast<PxContactJoint&>(joint); OMNI_PVD_CREATE_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxContactJoint, j); omniPvdSetBaseJointParams(static_cast<PxJoint&>(joint), PxJointConcreteType::eCONTACT); OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxContactJoint, point, j, joint.getContact()) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxContactJoint, normal, j, joint.getContactNormal()) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxContactJoint, penetration, j, joint.getPenetration()) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxContactJoint, restitution, j, joint.getRestitution()) OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, OMNI_PVD_CONTEXT_HANDLE, PxContactJoint, bounceThreshold, j, joint.getBounceThreshold()) OMNI_PVD_WRITE_SCOPE_END } #endif
8,998
C++
35.28629
192
0.757279
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/ExtPlatform.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PLATFORM_H #define PLATFORM_H #include <assert.h> #include "foundation/Px.h" #include "foundation/PxThread.h" #endif
1,824
C
49.694443
74
0.765351
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/ExtRigidBodyExt.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "geometry/PxBoxGeometry.h" #include "geometry/PxSphereGeometry.h" #include "geometry/PxCapsuleGeometry.h" #include "geometry/PxPlaneGeometry.h" #include "geometry/PxConvexMeshGeometry.h" #include "geometry/PxTriangleMeshGeometry.h" #include "geometry/PxHeightFieldGeometry.h" #include "geometry/PxGeometryHelpers.h" #include "geometry/PxConvexMesh.h" #include "geometry/PxTriangleMesh.h" #include "extensions/PxRigidBodyExt.h" #include "extensions/PxShapeExt.h" #include "extensions/PxMassProperties.h" #include "PxShape.h" #include "PxScene.h" #include "PxRigidDynamic.h" #include "PxRigidStatic.h" #include "ExtInertiaTensor.h" #include "foundation/PxAllocator.h" #include "foundation/PxSIMDHelpers.h" #include "CmUtils.h" using namespace physx; using namespace Cm; static bool computeMassAndDiagInertia(Ext::InertiaTensorComputer& inertiaComp, PxVec3& diagTensor, PxQuat& orient, PxReal& massOut, PxVec3& coM, bool lockCOM, const PxRigidBody& body, const char* errorStr) { // The inertia tensor and center of mass is relative to the actor at this point. Transform to the // body frame directly if CoM is specified, else use computed center of mass if (lockCOM) { inertiaComp.translate(-coM); // base the tensor on user's desired center of mass. } else { //get center of mass - has to be done BEFORE centering. coM = inertiaComp.getCenterOfMass(); //the computed result now needs to be centered around the computed center of mass: inertiaComp.center(); } // The inertia matrix is now based on the body's center of mass desc.massLocalPose.p massOut = inertiaComp.getMass(); diagTensor = PxDiagonalize(inertiaComp.getInertia(), orient); if ((diagTensor.x > 0.0f) && (diagTensor.y > 0.0f) && (diagTensor.z > 0.0f)) return true; else { PxGetFoundation().error(PxErrorCode::eDEBUG_WARNING, PX_FL, "%s: inertia tensor has negative components (ill-conditioned input expected). Approximation for inertia tensor will be used instead.", errorStr); // keep center of mass but use the AABB as a crude approximation for the inertia tensor PxBounds3 bounds = body.getWorldBounds(); const PxTransform pose = body.getGlobalPose(); bounds = PxBounds3::transformFast(pose.getInverse(), bounds); Ext::InertiaTensorComputer it(false); it.setBox(bounds.getExtents()); it.scaleDensity(massOut / it.getMass()); const PxMat33 inertia = it.getInertia(); diagTensor = PxVec3(inertia.column0.x, inertia.column1.y, inertia.column2.z); orient = PxQuat(PxIdentity); return true; } } static bool computeMassAndInertia(Ext::InertiaTensorComputer& inertiaComp, bool multipleMassOrDensity, PxRigidBody& body, const PxReal* densities, const PxReal* masses, PxU32 densityOrMassCount, bool includeNonSimShapes) { PX_ASSERT(!densities || !masses); PX_ASSERT((densities || masses) && (densityOrMassCount > 0)); PxInlineArray<PxShape*, 16> shapes("PxShape*"); shapes.resize(body.getNbShapes()); body.getShapes(shapes.begin(), shapes.size()); PxU32 validShapeIndex = 0; PxReal currentMassOrDensity; const PxReal* massOrDensityArray; if (densities) { massOrDensityArray = densities; currentMassOrDensity = densities[0]; } else { massOrDensityArray = masses; currentMassOrDensity = masses[0]; } if (!PxIsFinite(currentMassOrDensity)) return PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "computeMassAndInertia: Provided mass or density has no valid value"); for(PxU32 i=0; i < shapes.size(); i++) { if ((!(shapes[i]->getFlags() & PxShapeFlag::eSIMULATION_SHAPE)) && (!includeNonSimShapes)) continue; if (multipleMassOrDensity) { if (validShapeIndex < densityOrMassCount) { currentMassOrDensity = massOrDensityArray[validShapeIndex]; if (!PxIsFinite(currentMassOrDensity)) return PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "computeMassAndInertia: Provided mass or density has no valid value"); } else return PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "computeMassAndInertia: Not enough mass/density values provided for all (simulation) shapes"); } Ext::InertiaTensorComputer it(false); const PxGeometry& geom = shapes[i]->getGeometry(); switch(geom.getType()) { case PxGeometryType::eSPHERE : { const PxSphereGeometry& g = static_cast<const PxSphereGeometry&>(geom); PxTransform temp(shapes[i]->getLocalPose()); it.setSphere(g.radius, &temp); } break; case PxGeometryType::eBOX : { const PxBoxGeometry& g = static_cast<const PxBoxGeometry&>(geom); PxTransform temp(shapes[i]->getLocalPose()); it.setBox(g.halfExtents, &temp); } break; case PxGeometryType::eCAPSULE : { const PxCapsuleGeometry& g = static_cast<const PxCapsuleGeometry&>(geom); PxTransform temp(shapes[i]->getLocalPose()); it.setCapsule(0, g.radius, g.halfHeight, &temp); } break; case PxGeometryType::eCONVEXMESH : { const PxConvexMeshGeometry& g = static_cast<const PxConvexMeshGeometry&>(geom); PxConvexMesh& convMesh = *g.convexMesh; PxReal convMass; PxMat33 convInertia; PxVec3 convCoM; convMesh.getMassInformation(convMass, convInertia, convCoM); if (!g.scale.isIdentity()) { //scale the mass properties convMass *= (g.scale.scale.x * g.scale.scale.y * g.scale.scale.z); convCoM = g.scale.transform(convCoM); convInertia = PxMassProperties::scaleInertia(convInertia, g.scale.rotation, g.scale.scale); } it = Ext::InertiaTensorComputer(convInertia, convCoM, convMass); it.transform(shapes[i]->getLocalPose()); } break; case PxGeometryType::eCUSTOM: { PxMassProperties mp(shapes[i]->getGeometry()); it = Ext::InertiaTensorComputer(mp.inertiaTensor, mp.centerOfMass, mp.mass); it.transform(shapes[i]->getLocalPose()); } break; case PxGeometryType::eTRIANGLEMESH: { const PxTriangleMeshGeometry& g = static_cast<const PxTriangleMeshGeometry&>(geom); PxReal mass; PxMat33 inertia; PxVec3 centerOfMass; g.triangleMesh->getMassInformation(mass, inertia, centerOfMass); if (!g.scale.isIdentity()) { //scale the mass properties mass *= (g.scale.scale.x * g.scale.scale.y * g.scale.scale.z); centerOfMass = g.scale.transform(centerOfMass); inertia = PxMassProperties::scaleInertia(inertia, g.scale.rotation, g.scale.scale); } it = Ext::InertiaTensorComputer(inertia, centerOfMass, mass); it.transform(shapes[i]->getLocalPose()); } break; default: { return PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "computeMassAndInertia: Dynamic actor with illegal collision shapes"); } } if (densities) it.scaleDensity(currentMassOrDensity); else if (multipleMassOrDensity) // mass per shape -> need to scale density per shape it.scaleDensity(currentMassOrDensity / it.getMass()); inertiaComp.add(it); validShapeIndex++; } if (validShapeIndex && masses && (!multipleMassOrDensity)) // at least one simulation shape and single mass for all shapes -> scale density at the end inertiaComp.scaleDensity(currentMassOrDensity / inertiaComp.getMass()); return true; } static bool updateMassAndInertia(bool multipleMassOrDensity, PxRigidBody& body, const PxReal* densities, PxU32 densityCount, const PxVec3* massLocalPose, bool includeNonSimShapes) { bool success; // default values in case there were no shapes PxReal massOut = 1.0f; PxVec3 diagTensor(1.0f); PxQuat orient(PxIdentity); bool lockCom = massLocalPose != NULL; PxVec3 com = lockCom ? *massLocalPose : PxVec3(0); const char* errorStr = "PxRigidBodyExt::updateMassAndInertia"; if (densities && densityCount) { Ext::InertiaTensorComputer inertiaComp(true); if(computeMassAndInertia(inertiaComp, multipleMassOrDensity, body, densities, NULL, densityCount, includeNonSimShapes)) { if(inertiaComp.getMass()!=0 && computeMassAndDiagInertia(inertiaComp, diagTensor, orient, massOut, com, lockCom, body, errorStr)) success = true; else success = false; // body with no shapes provided or computeMassAndDiagInertia() failed } else { PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "%s: Mass and inertia computation failed, setting mass to 1 and inertia to (1,1,1)", errorStr); success = false; } } else { PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "%s: No density specified, setting mass to 1 and inertia to (1,1,1)", errorStr); success = false; } PX_ASSERT(orient.isFinite()); PX_ASSERT(diagTensor.isFinite()); PX_ASSERT(PxIsFinite(massOut)); body.setMass(massOut); body.setMassSpaceInertiaTensor(diagTensor); body.setCMassLocalPose(PxTransform(com, orient)); return success; } bool PxRigidBodyExt::updateMassAndInertia(PxRigidBody& body, const PxReal* densities, PxU32 densityCount, const PxVec3* massLocalPose, bool includeNonSimShapes) { return ::updateMassAndInertia(true, body, densities, densityCount, massLocalPose, includeNonSimShapes); } bool PxRigidBodyExt::updateMassAndInertia(PxRigidBody& body, PxReal density, const PxVec3* massLocalPose, bool includeNonSimShapes) { return ::updateMassAndInertia(false, body, &density, 1, massLocalPose, includeNonSimShapes); } static bool setMassAndUpdateInertia(bool multipleMassOrDensity, PxRigidBody& body, const PxReal* masses, PxU32 massCount, const PxVec3* massLocalPose, bool includeNonSimShapes) { bool success; // default values in case there were no shapes PxReal massOut = 1.0f; PxVec3 diagTensor(1.0f); PxQuat orient(PxIdentity); bool lockCom = massLocalPose != NULL; PxVec3 com = lockCom ? *massLocalPose : PxVec3(0); const char* errorStr = "PxRigidBodyExt::setMassAndUpdateInertia"; if(masses && massCount) { Ext::InertiaTensorComputer inertiaComp(true); if(computeMassAndInertia(inertiaComp, multipleMassOrDensity, body, NULL, masses, massCount, includeNonSimShapes)) { success = true; if (inertiaComp.getMass()!=0 && !computeMassAndDiagInertia(inertiaComp, diagTensor, orient, massOut, com, lockCom, body, errorStr)) success = false; // computeMassAndDiagInertia() failed (mass zero?) if (massCount == 1) massOut = masses[0]; // to cover special case where body has no simulation shape } else { PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "%s: Mass and inertia computation failed, setting mass to 1 and inertia to (1,1,1)", errorStr); success = false; } } else { PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "%s: No mass specified, setting mass to 1 and inertia to (1,1,1)", errorStr); success = false; } PX_ASSERT(orient.isFinite()); PX_ASSERT(diagTensor.isFinite()); body.setMass(massOut); body.setMassSpaceInertiaTensor(diagTensor); body.setCMassLocalPose(PxTransform(com, orient)); return success; } bool PxRigidBodyExt::setMassAndUpdateInertia(PxRigidBody& body, const PxReal* masses, PxU32 massCount, const PxVec3* massLocalPose, bool includeNonSimShapes) { return ::setMassAndUpdateInertia(true, body, masses, massCount, massLocalPose, includeNonSimShapes); } bool PxRigidBodyExt::setMassAndUpdateInertia(PxRigidBody& body, PxReal mass, const PxVec3* massLocalPose, bool includeNonSimShapes) { return ::setMassAndUpdateInertia(false, body, &mass, 1, massLocalPose, includeNonSimShapes); } PxMassProperties PxRigidBodyExt::computeMassPropertiesFromShapes(const PxShape* const* shapes, PxU32 shapeCount) { PxInlineArray<PxMassProperties, 16> massProps; massProps.reserve(shapeCount); PxInlineArray<PxTransform, 16> localTransforms; localTransforms.reserve(shapeCount); for(PxU32 shapeIdx=0; shapeIdx < shapeCount; shapeIdx++) { const PxShape* shape = shapes[shapeIdx]; PxMassProperties mp(shape->getGeometry()); massProps.pushBack(mp); localTransforms.pushBack(shape->getLocalPose()); } return PxMassProperties::sum(massProps.begin(), localTransforms.begin(), shapeCount); } PX_INLINE void addForceAtPosInternal(PxRigidBody& body, const PxVec3& force, const PxVec3& pos, PxForceMode::Enum mode, bool wakeup) { if(mode == PxForceMode::eACCELERATION || mode == PxForceMode::eVELOCITY_CHANGE) { PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "PxRigidBodyExt::addForce methods do not support eACCELERATION or eVELOCITY_CHANGE modes"); return; } const PxTransform globalPose = body.getGlobalPose(); const PxVec3 centerOfMass = globalPose.transform(body.getCMassLocalPose().p); const PxVec3 torque = (pos - centerOfMass).cross(force); body.addForce(force, mode, wakeup); body.addTorque(torque, mode, wakeup); } void PxRigidBodyExt::addForceAtPos(PxRigidBody& body, const PxVec3& force, const PxVec3& pos, PxForceMode::Enum mode, bool wakeup) { addForceAtPosInternal(body, force, pos, mode, wakeup); } void PxRigidBodyExt::addForceAtLocalPos(PxRigidBody& body, const PxVec3& force, const PxVec3& pos, PxForceMode::Enum mode, bool wakeup) { //transform pos to world space const PxVec3 globalForcePos = body.getGlobalPose().transform(pos); addForceAtPosInternal(body, force, globalForcePos, mode, wakeup); } void PxRigidBodyExt::addLocalForceAtPos(PxRigidBody& body, const PxVec3& force, const PxVec3& pos, PxForceMode::Enum mode, bool wakeup) { const PxVec3 globalForce = body.getGlobalPose().rotate(force); addForceAtPosInternal(body, globalForce, pos, mode, wakeup); } void PxRigidBodyExt::addLocalForceAtLocalPos(PxRigidBody& body, const PxVec3& force, const PxVec3& pos, PxForceMode::Enum mode, bool wakeup) { const PxTransform globalPose = body.getGlobalPose(); const PxVec3 globalForcePos = globalPose.transform(pos); const PxVec3 globalForce = globalPose.rotate(force); addForceAtPosInternal(body, globalForce, globalForcePos, mode, wakeup); } PX_INLINE PxVec3 getVelocityAtPosInternal(const PxRigidBody& body, const PxVec3& point) { PxVec3 velocity = body.getLinearVelocity(); velocity += body.getAngularVelocity().cross(point); return velocity; } PxVec3 PxRigidBodyExt::getVelocityAtPos(const PxRigidBody& body, const PxVec3& point) { const PxTransform globalPose = body.getGlobalPose(); const PxVec3 centerOfMass = globalPose.transform(body.getCMassLocalPose().p); const PxVec3 rpoint = point - centerOfMass; return getVelocityAtPosInternal(body, rpoint); } PxVec3 PxRigidBodyExt::getLocalVelocityAtLocalPos(const PxRigidBody& body, const PxVec3& point) { const PxTransform globalPose = body.getGlobalPose(); const PxVec3 centerOfMass = globalPose.transform(body.getCMassLocalPose().p); const PxVec3 rpoint = globalPose.transform(point) - centerOfMass; return getVelocityAtPosInternal(body, rpoint); } PxVec3 PxRigidBodyExt::getVelocityAtOffset(const PxRigidBody& body, const PxVec3& point) { const PxTransform globalPose = body.getGlobalPose(); const PxVec3 centerOfMass = globalPose.rotate(body.getCMassLocalPose().p); const PxVec3 rpoint = point - centerOfMass; return getVelocityAtPosInternal(body, rpoint); } void PxRigidBodyExt::computeVelocityDeltaFromImpulse(const PxRigidBody& body, const PxTransform& globalPose, const PxVec3& point, const PxVec3& impulse, const PxReal invMassScale, const PxReal invInertiaScale, PxVec3& linearVelocityChange, PxVec3& angularVelocityChange) { const PxVec3 centerOfMass = globalPose.transform(body.getCMassLocalPose().p); const PxReal invMass = body.getInvMass() * invMassScale; const PxVec3 invInertiaMS = body.getMassSpaceInvInertiaTensor() * invInertiaScale; PxMat33 invInertia; transformInertiaTensor(invInertiaMS, PxMat33Padded(globalPose.q), invInertia); linearVelocityChange = impulse * invMass; const PxVec3 rXI = (point - centerOfMass).cross(impulse); angularVelocityChange = invInertia * rXI; } void PxRigidBodyExt::computeLinearAngularImpulse(const PxRigidBody& body, const PxTransform& globalPose, const PxVec3& point, const PxVec3& impulse, const PxReal invMassScale, const PxReal invInertiaScale, PxVec3& linearImpulse, PxVec3& angularImpulse) { const PxVec3 centerOfMass = globalPose.transform(body.getCMassLocalPose().p); linearImpulse = impulse * invMassScale; angularImpulse = (point - centerOfMass).cross(impulse) * invInertiaScale; } void PxRigidBodyExt::computeVelocityDeltaFromImpulse(const PxRigidBody& body, const PxVec3& impulsiveForce, const PxVec3& impulsiveTorque, PxVec3& deltaLinearVelocity, PxVec3& deltaAngularVelocity) { { const PxF32 recipMass = body.getInvMass(); deltaLinearVelocity = impulsiveForce*recipMass; } { const PxTransform globalPose = body.getGlobalPose(); const PxTransform cmLocalPose = body.getCMassLocalPose(); // PT:: tag: scalar transform*transform const PxTransform body2World = globalPose*cmLocalPose; const PxMat33Padded M(body2World.q); const PxVec3 recipInertiaBodySpace = body.getMassSpaceInvInertiaTensor(); PxMat33 recipInertiaWorldSpace; const float axx = recipInertiaBodySpace.x*M(0,0), axy = recipInertiaBodySpace.x*M(1,0), axz = recipInertiaBodySpace.x*M(2,0); const float byx = recipInertiaBodySpace.y*M(0,1), byy = recipInertiaBodySpace.y*M(1,1), byz = recipInertiaBodySpace.y*M(2,1); const float czx = recipInertiaBodySpace.z*M(0,2), czy = recipInertiaBodySpace.z*M(1,2), czz = recipInertiaBodySpace.z*M(2,2); recipInertiaWorldSpace(0,0) = axx*M(0,0) + byx*M(0,1) + czx*M(0,2); recipInertiaWorldSpace(1,1) = axy*M(1,0) + byy*M(1,1) + czy*M(1,2); recipInertiaWorldSpace(2,2) = axz*M(2,0) + byz*M(2,1) + czz*M(2,2); recipInertiaWorldSpace(0,1) = recipInertiaWorldSpace(1,0) = axx*M(1,0) + byx*M(1,1) + czx*M(1,2); recipInertiaWorldSpace(0,2) = recipInertiaWorldSpace(2,0) = axx*M(2,0) + byx*M(2,1) + czx*M(2,2); recipInertiaWorldSpace(1,2) = recipInertiaWorldSpace(2,1) = axy*M(2,0) + byy*M(2,1) + czy*M(2,2); deltaAngularVelocity = recipInertiaWorldSpace*(impulsiveTorque); } } //================================================================================= // Single closest hit compound sweep bool PxRigidBodyExt::linearSweepSingle( PxRigidBody& body, PxScene& scene, const PxVec3& unitDir, const PxReal distance, PxHitFlags outputFlags, PxSweepHit& closestHit, PxU32& shapeIndex, const PxQueryFilterData& filterData, PxQueryFilterCallback* filterCall, const PxQueryCache* cache, const PxReal inflation) { shapeIndex = 0xFFFFffff; PxReal closestDist = distance; PxU32 nbShapes = body.getNbShapes(); for(PxU32 i=0; i < nbShapes; i++) { PxShape* shape = NULL; body.getShapes(&shape, 1, i); PX_ASSERT(shape != NULL); PxTransform pose = PxShapeExt::getGlobalPose(*shape, body); PxQueryFilterData fd; fd.flags = filterData.flags; PxU32 or4 = (filterData.data.word0 | filterData.data.word1 | filterData.data.word2 | filterData.data.word3); fd.data = or4 ? filterData.data : shape->getQueryFilterData(); PxSweepBuffer subHit; // touching hits are not allowed to be returned from the filters scene.sweep(shape->getGeometry(), pose, unitDir, distance, subHit, outputFlags, fd, filterCall, cache, inflation); if (subHit.hasBlock && subHit.block.distance < closestDist) { closestDist = subHit.block.distance; closestHit = subHit.block; shapeIndex = i; } } return (shapeIndex != 0xFFFFffff); } //================================================================================= // Multiple hits compound sweep // AP: we might be able to improve the return results API but no time for it in 3.3 PxU32 PxRigidBodyExt::linearSweepMultiple( PxRigidBody& body, PxScene& scene, const PxVec3& unitDir, const PxReal distance, PxHitFlags outputFlags, PxSweepHit* hitBuffer, PxU32* hitShapeIndices, PxU32 hitBufferSize, PxSweepHit& block, PxI32& blockingHitShapeIndex, bool& overflow, const PxQueryFilterData& filterData, PxQueryFilterCallback* filterCall, const PxQueryCache* cache, const PxReal inflation) { overflow = false; blockingHitShapeIndex = -1; for (PxU32 i = 0; i < hitBufferSize; i++) hitShapeIndices[i] = 0xFFFFffff; PxI32 sumNbResults = 0; PxU32 nbShapes = body.getNbShapes(); PxF32 shrunkMaxDistance = distance; for(PxU32 i=0; i < nbShapes; i++) { PxShape* shape = NULL; body.getShapes(&shape, 1, i); PX_ASSERT(shape != NULL); PxTransform pose = PxShapeExt::getGlobalPose(*shape, body); PxQueryFilterData fd; fd.flags = filterData.flags; PxU32 or4 = (filterData.data.word0 | filterData.data.word1 | filterData.data.word2 | filterData.data.word3); fd.data = or4 ? filterData.data : shape->getQueryFilterData(); PxU32 bufSizeLeft = hitBufferSize-sumNbResults; PxSweepHit extraHit; PxSweepBuffer buffer(bufSizeLeft == 0 ? &extraHit : hitBuffer+sumNbResults, bufSizeLeft == 0 ? 1 : hitBufferSize-sumNbResults); scene.sweep(shape->getGeometry(), pose, unitDir, shrunkMaxDistance, buffer, outputFlags, fd, filterCall, cache, inflation); // Check and abort on overflow. Assume overflow if result count is bufSize. PxU32 nbNewResults = buffer.getNbTouches(); overflow |= (nbNewResults >= bufSizeLeft); if (bufSizeLeft == 0) // this is for when we used the extraHit buffer nbNewResults = 0; // set hitShapeIndices for each new non-blocking hit for (PxU32 j = 0; j < nbNewResults; j++) if (sumNbResults + PxU32(j) < hitBufferSize) hitShapeIndices[sumNbResults+j] = i; if (buffer.hasBlock) // there's a blocking hit in the most recent sweepMultiple results { // overwrite the return result blocking hit with the new blocking hit if under if (blockingHitShapeIndex == -1 || buffer.block.distance < block.distance) { blockingHitShapeIndex = PxI32(i); block = buffer.block; } // Remove all the old touching hits below the new maxDist // sumNbResults is not updated yet at this point // and represents the count accumulated so far excluding the very last query PxI32 nbNewResultsSigned = PxI32(nbNewResults); // need a signed version, see nbNewResultsSigned-- below for (PxI32 j = sumNbResults-1; j >= 0; j--) // iterate over "old" hits (up to shapeIndex-1) if (buffer.block.distance < hitBuffer[j].distance) { // overwrite with last "new" hit PxI32 sourceIndex = PxI32(sumNbResults)+nbNewResultsSigned-1; PX_ASSERT(sourceIndex >= j); hitBuffer[j] = hitBuffer[sourceIndex]; hitShapeIndices[j] = hitShapeIndices[sourceIndex]; nbNewResultsSigned--; // can get negative, that means we are shifting the last results array } sumNbResults += nbNewResultsSigned; } else // if there was no new blocking hit we don't need to do anything special, simply append all results to touch array sumNbResults += nbNewResults; PX_ASSERT(sumNbResults >= 0 && sumNbResults <= PxI32(hitBufferSize)); } return PxU32(sumNbResults); }
24,451
C++
37.689873
220
0.741401
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/ExtPvd.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef EXT_PVD_H #define EXT_PVD_H #if PX_SUPPORT_PVD #include "extensions/PxJoint.h" #include "foundation/PxUserAllocated.h" #include "PxPvdDataStream.h" #include "PvdTypeNames.h" #include "PxPvdObjectModelBaseTypes.h" #if PX_LINUX && PX_CLANG #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wreserved-identifier" #endif #include "PxExtensionMetaDataObjects.h" #if PX_LINUX && PX_CLANG #pragma clang diagnostic pop #endif namespace physx { class PxJoint; class PxD6Joint; class PxDistanceJoint; class PxFixedJoint; class PxPrismaticJoint; class PxRevoluteJoint; class PxSphericalJoint; class PxContactJoint; class PxGearJoint; class PxRackAndPinionJoint; } #define JOINT_GROUP 3 namespace physx { namespace pvdsdk { #define DEFINE_NATIVE_PVD_PHYSX3_TYPE_MAP( type ) DEFINE_PVD_TYPE_NAME_MAP( physx::type, "physx3", #type ) DEFINE_NATIVE_PVD_PHYSX3_TYPE_MAP(PxJoint) DEFINE_NATIVE_PVD_PHYSX3_TYPE_MAP(PxJointGeneratedValues) DEFINE_NATIVE_PVD_PHYSX3_TYPE_MAP(PxFixedJoint) DEFINE_NATIVE_PVD_PHYSX3_TYPE_MAP(PxFixedJointGeneratedValues) DEFINE_NATIVE_PVD_PHYSX3_TYPE_MAP(PxDistanceJoint) DEFINE_NATIVE_PVD_PHYSX3_TYPE_MAP(PxDistanceJointGeneratedValues) DEFINE_NATIVE_PVD_PHYSX3_TYPE_MAP(PxContactJoint) DEFINE_NATIVE_PVD_PHYSX3_TYPE_MAP(PxContactJointGeneratedValues) DEFINE_NATIVE_PVD_PHYSX3_TYPE_MAP(PxPrismaticJoint) DEFINE_NATIVE_PVD_PHYSX3_TYPE_MAP(PxPrismaticJointGeneratedValues) DEFINE_NATIVE_PVD_PHYSX3_TYPE_MAP(PxRevoluteJoint) DEFINE_NATIVE_PVD_PHYSX3_TYPE_MAP(PxRevoluteJointGeneratedValues) DEFINE_NATIVE_PVD_PHYSX3_TYPE_MAP(PxSphericalJoint) DEFINE_NATIVE_PVD_PHYSX3_TYPE_MAP(PxSphericalJointGeneratedValues) DEFINE_NATIVE_PVD_PHYSX3_TYPE_MAP(PxD6Joint) DEFINE_NATIVE_PVD_PHYSX3_TYPE_MAP(PxD6JointGeneratedValues) DEFINE_NATIVE_PVD_PHYSX3_TYPE_MAP(PxGearJoint) DEFINE_NATIVE_PVD_PHYSX3_TYPE_MAP(PxGearJointGeneratedValues) DEFINE_NATIVE_PVD_PHYSX3_TYPE_MAP(PxRackAndPinionJoint) DEFINE_NATIVE_PVD_PHYSX3_TYPE_MAP(PxRackAndPinionJointGeneratedValues) #undef DEFINE_NATIVE_PVD_PHYSX3_TYPE_MAP } //pvdsdk } // physx namespace physx { namespace Ext { using namespace physx::pvdsdk; class Pvd: public physx::PxUserAllocated { Pvd& operator=(const Pvd&); public: class PvdNameSpace { public: PvdNameSpace(PvdDataStream& conn, const char* name); ~PvdNameSpace(); private: PvdNameSpace& operator=(const PvdNameSpace&); PvdDataStream& mConnection; }; static void setActors( PvdDataStream& PvdDataStream, const PxJoint& inJoint, const PxConstraint& c, const PxActor* newActor0, const PxActor* newActor1 ); template<typename TObjType> static void createInstance( PvdDataStream& inStream, const PxConstraint& c, const TObjType& inSource ) { inStream.createInstance( &inSource ); inStream.pushBackObjectRef( c.getScene(), "Joints", &inSource ); class ConstraintUpdateCmd : public PvdDataStream::PvdCommand { ConstraintUpdateCmd &operator=(const ConstraintUpdateCmd&) { PX_ASSERT(0); return *this; } //PX_NOCOPY doesn't work for local classes public: const PxConstraint& mConstraint; const PxJoint& mJoint; PxRigidActor* actor0, *actor1; ConstraintUpdateCmd(const PxConstraint& constraint, const PxJoint& joint):PvdDataStream::PvdCommand(), mConstraint(constraint), mJoint(joint) { mConstraint.getActors( actor0, actor1 ); } //Assigned is needed for copying ConstraintUpdateCmd(const ConstraintUpdateCmd& cmd) :PvdDataStream::PvdCommand(), mConstraint(cmd.mConstraint), mJoint(cmd.mJoint) { } virtual bool canRun(PvdInstanceDataStream &inStream_ ) { PX_ASSERT(inStream_.isInstanceValid(&mJoint)); //When run this command, the constraint maybe buffer removed return ((actor0 == NULL) || inStream_.isInstanceValid(actor0)) && ((actor1 == NULL) || inStream_.isInstanceValid(actor1)); } virtual void run( PvdInstanceDataStream &inStream_ ) { //When run this command, the constraint maybe buffer removed if(!inStream_.isInstanceValid(&mJoint)) return; PxRigidActor* actor0_, *actor1_; mConstraint.getActors( actor0_, actor1_ ); if ( actor0_ && (inStream_.isInstanceValid(actor0_)) ) inStream_.pushBackObjectRef( actor0_, "Joints", &mJoint ); if ( actor1_ && (inStream_.isInstanceValid(actor1_)) ) inStream_.pushBackObjectRef( actor1_, "Joints", &mJoint ); const void* parent = actor0_ ? actor0_ : actor1_; inStream_.setPropertyValue( &mJoint, "Parent", parent ); } }; ConstraintUpdateCmd* cmd = PX_PLACEMENT_NEW(inStream.allocateMemForCmd(sizeof(ConstraintUpdateCmd)), ConstraintUpdateCmd)(c, inSource); if(cmd->canRun( inStream )) cmd->run( inStream ); else inStream.pushPvdCommand( *cmd ); } template<typename jointtype, typename structValue> static void updatePvdProperties(PvdDataStream& pvdConnection, const jointtype& joint) { structValue theValueStruct( &joint ); pvdConnection.setPropertyMessage( &joint, theValueStruct ); } template<typename jointtype> static void simUpdate(PvdDataStream& /*pvdConnection*/, const jointtype& /*joint*/) {} template<typename jointtype> static void createPvdInstance(PvdDataStream& pvdConnection, const PxConstraint& c, const jointtype& joint) { createInstance<jointtype>( pvdConnection, c, joint ); } static void releasePvdInstance(PvdDataStream& pvdConnection, const PxConstraint& c, const PxJoint& joint); static void sendClassDescriptions(PvdDataStream& pvdConnection); }; } // ext } // physx #endif // PX_SUPPORT_PVD #endif // EXT_PVD_H
7,347
C
35.019608
145
0.75051
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/ExtParticleExt.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "extensions/PxParticleExt.h" #include "foundation/PxUserAllocated.h" #include "PxScene.h" #include "PxPhysics.h" #include "PxRigidBody.h" #include "cudamanager/PxCudaContextManager.h" #include "cudamanager/PxCudaContext.h" namespace physx { namespace ExtGpu { void PxDmaDataToDevice(PxCudaContextManager* cudaContextManager, PxParticleBuffer* particleBuffer, const PxParticleBufferDesc& desc) { #if PX_SUPPORT_GPU_PHYSX cudaContextManager->acquireContext(); PxVec4* posInvMass = particleBuffer->getPositionInvMasses(); PxVec4* velocities = particleBuffer->getVelocities(); PxU32* phases = particleBuffer->getPhases(); PxParticleVolume* volumes = particleBuffer->getParticleVolumes(); PxCudaContext* cudaContext = cudaContextManager->getCudaContext(); //KS - TODO - use an event to wait for this cudaContext->memcpyHtoDAsync(CUdeviceptr(posInvMass), desc.positions, desc.numActiveParticles * sizeof(PxVec4), 0); cudaContext->memcpyHtoDAsync(CUdeviceptr(velocities), desc.velocities, desc.numActiveParticles * sizeof(PxVec4), 0); cudaContext->memcpyHtoDAsync(CUdeviceptr(phases), desc.phases, desc.numActiveParticles * sizeof(PxU32), 0); cudaContext->memcpyHtoDAsync(CUdeviceptr(volumes), desc.volumes, desc.numVolumes * sizeof(PxParticleVolume), 0); particleBuffer->setNbActiveParticles(desc.numActiveParticles); particleBuffer->setNbParticleVolumes(desc.numVolumes); cudaContext->streamSynchronize(0); cudaContextManager->releaseContext(); #else PX_UNUSED(cudaContextManager); PX_UNUSED(particleBuffer); PX_UNUSED(desc); #endif } PxParticleBuffer* PxCreateAndPopulateParticleBuffer(const PxParticleBufferDesc& desc, PxCudaContextManager* cudaContextManager) { PxParticleBuffer* particleBuffer = PxGetPhysics().createParticleBuffer(desc.maxParticles, desc.maxVolumes, cudaContextManager); PxDmaDataToDevice(cudaContextManager, particleBuffer, desc); return particleBuffer; } PxParticleAndDiffuseBuffer* PxCreateAndPopulateParticleAndDiffuseBuffer(const PxParticleAndDiffuseBufferDesc& desc, PxCudaContextManager* cudaContextManager) { PxParticleAndDiffuseBuffer* particleBuffer = PxGetPhysics().createParticleAndDiffuseBuffer(desc.maxParticles, desc.maxVolumes, desc.maxDiffuseParticles, cudaContextManager); PxDmaDataToDevice(cudaContextManager, particleBuffer, desc); particleBuffer->setMaxActiveDiffuseParticles(desc.maxActiveDiffuseParticles); return particleBuffer; } PxParticleClothBuffer* PxCreateAndPopulateParticleClothBuffer(const PxParticleBufferDesc& desc, const PxParticleClothDesc& clothDesc, PxPartitionedParticleCloth& output, PxCudaContextManager* cudaContextManager) { #if PX_SUPPORT_GPU_PHYSX cudaContextManager->acquireContext(); PxParticleClothBuffer* clothBuffer = PxGetPhysics().createParticleClothBuffer(desc.maxParticles, desc.maxVolumes, clothDesc.nbCloths, clothDesc.nbTriangles, clothDesc.nbSprings, cudaContextManager); PxVec4* posInvMass = clothBuffer->getPositionInvMasses(); PxVec4* velocities = clothBuffer->getVelocities(); PxU32* phases = clothBuffer->getPhases(); PxParticleVolume* volumes = clothBuffer->getParticleVolumes(); PxU32* triangles = clothBuffer->getTriangles(); PxVec4* restPositions = clothBuffer->getRestPositions(); PxCudaContext* cudaContext = cudaContextManager->getCudaContext(); cudaContext->memcpyHtoDAsync(CUdeviceptr(posInvMass), desc.positions, desc.numActiveParticles * sizeof(PxVec4), 0); cudaContext->memcpyHtoDAsync(CUdeviceptr(velocities), desc.velocities, desc.numActiveParticles * sizeof(PxVec4), 0); cudaContext->memcpyHtoDAsync(CUdeviceptr(phases), desc.phases, desc.numActiveParticles * sizeof(PxU32), 0); cudaContext->memcpyHtoDAsync(CUdeviceptr(volumes), desc.volumes, desc.numVolumes * sizeof(PxParticleVolume), 0); cudaContext->memcpyHtoDAsync(CUdeviceptr(triangles), clothDesc.triangles, clothDesc.nbTriangles * sizeof(PxU32) * 3, 0); cudaContext->memcpyHtoDAsync(CUdeviceptr(restPositions), clothDesc.restPositions, desc.numActiveParticles * sizeof(PxVec4), 0); clothBuffer->setNbActiveParticles(desc.numActiveParticles); clothBuffer->setNbParticleVolumes(desc.numVolumes); clothBuffer->setNbTriangles(clothDesc.nbTriangles); clothBuffer->setCloths(output); cudaContext->streamSynchronize(0); cudaContextManager->releaseContext(); return clothBuffer; #else PX_UNUSED(desc); PX_UNUSED(clothDesc); PX_UNUSED(output); PX_UNUSED(cudaContextManager); return NULL; #endif } PxParticleRigidBuffer* PxCreateAndPopulateParticleRigidBuffer(const PxParticleBufferDesc& desc, const PxParticleRigidDesc& rigidDesc, PxCudaContextManager* cudaContextManager) { #if PX_SUPPORT_GPU_PHYSX cudaContextManager->acquireContext(); PxParticleRigidBuffer* rigidBuffer = PxGetPhysics().createParticleRigidBuffer(desc.maxParticles, desc.maxVolumes, rigidDesc.maxRigids, cudaContextManager); PxVec4* posInvMassd = rigidBuffer->getPositionInvMasses(); PxVec4* velocitiesd = rigidBuffer->getVelocities(); PxU32* phasesd = rigidBuffer->getPhases(); PxParticleVolume* volumesd = rigidBuffer->getParticleVolumes(); PxU32* rigidOffsetsd = rigidBuffer->getRigidOffsets(); PxReal* rigidCoefficientsd = rigidBuffer->getRigidCoefficients(); PxVec4* rigidTranslationsd = rigidBuffer->getRigidTranslations(); PxVec4* rigidRotationsd = rigidBuffer->getRigidRotations(); PxVec4* rigidLocalPositionsd = rigidBuffer->getRigidLocalPositions(); PxVec4* rigidLocalNormalsd = rigidBuffer->getRigidLocalNormals(); PxCudaContext* cudaContext = cudaContextManager->getCudaContext(); const PxU32 numRigids = rigidDesc.numActiveRigids; const PxU32 numActiveParticles = desc.numActiveParticles; cudaContext->memcpyHtoDAsync(CUdeviceptr(posInvMassd), desc.positions, numActiveParticles * sizeof(PxVec4), 0); cudaContext->memcpyHtoDAsync(CUdeviceptr(velocitiesd), desc.velocities, numActiveParticles * sizeof(PxVec4), 0); cudaContext->memcpyHtoDAsync(CUdeviceptr(phasesd), desc.phases, numActiveParticles * sizeof(PxU32), 0); cudaContext->memcpyHtoDAsync(CUdeviceptr(volumesd), desc.volumes, desc.numVolumes * sizeof(PxParticleVolume), 0); cudaContext->memcpyHtoDAsync(CUdeviceptr(rigidOffsetsd), rigidDesc.rigidOffsets, sizeof(PxU32) * (numRigids + 1), 0); cudaContext->memcpyHtoDAsync(CUdeviceptr(rigidCoefficientsd), rigidDesc.rigidCoefficients, sizeof(PxReal) * numRigids, 0); cudaContext->memcpyHtoDAsync(CUdeviceptr(rigidTranslationsd), rigidDesc.rigidTranslations, sizeof(PxVec4) * numRigids, 0); cudaContext->memcpyHtoDAsync(CUdeviceptr(rigidRotationsd), rigidDesc.rigidRotations, sizeof(PxQuat) * numRigids, 0); cudaContext->memcpyHtoDAsync(CUdeviceptr(rigidLocalPositionsd), rigidDesc.rigidLocalPositions, sizeof(PxVec4) * desc.numActiveParticles, 0); cudaContext->memcpyHtoDAsync(CUdeviceptr(rigidLocalNormalsd), rigidDesc.rigidLocalNormals, sizeof(PxVec4) * desc.numActiveParticles, 0); rigidBuffer->setNbActiveParticles(numActiveParticles); rigidBuffer->setNbRigids(numRigids); rigidBuffer->setNbParticleVolumes(desc.numVolumes); cudaContext->streamSynchronize(0); cudaContextManager->releaseContext(); return rigidBuffer; #else PX_UNUSED(desc); PX_UNUSED(rigidDesc); PX_UNUSED(cudaContextManager); return NULL; #endif } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// PxParticleAttachmentBuffer::PxParticleAttachmentBuffer(PxParticleBuffer& particleBuffer, PxParticleSystem& particleSystem) : mParticleBuffer(particleBuffer), mDeviceAttachments(NULL), mDeviceFilters(NULL), mNumDeviceAttachments(0), mNumDeviceFilters(0), mCudaContextManager(particleSystem.getCudaContextManager()), mParticleSystem(particleSystem), mDirty(false) { } PxParticleAttachmentBuffer::~PxParticleAttachmentBuffer() { #if PX_SUPPORT_GPU_PHYSX mCudaContextManager->acquireContext(); PxCudaContext* cudaContext = mCudaContextManager->getCudaContext(); if (mDeviceAttachments) cudaContext->memFree((CUdeviceptr)mDeviceAttachments); if (mDeviceFilters) cudaContext->memFree((CUdeviceptr)mDeviceFilters); mDeviceAttachments = NULL; mDeviceFilters = NULL; mCudaContextManager->releaseContext(); #endif } void PxParticleAttachmentBuffer::copyToDevice(CUstream stream) { #if PX_SUPPORT_GPU_PHYSX mCudaContextManager->acquireContext(); PxCudaContext* cudaContext = mCudaContextManager->getCudaContext(); if (mAttachments.size() > mNumDeviceAttachments) { if (mDeviceAttachments) cudaContext->memFree((CUdeviceptr)mDeviceAttachments); cudaContext->memAlloc((CUdeviceptr*)&mDeviceAttachments, sizeof(PxParticleRigidAttachment)*mAttachments.size()); mNumDeviceAttachments = mAttachments.size(); } if (mFilters.size() > mNumDeviceFilters) { if (mDeviceFilters) cudaContext->memFree((CUdeviceptr)mDeviceFilters); cudaContext->memAlloc((CUdeviceptr*)&mDeviceFilters, sizeof(PxParticleRigidFilterPair)*mFilters.size()); mNumDeviceFilters = mFilters.size(); } if (mAttachments.size()) cudaContext->memcpyHtoDAsync((CUdeviceptr)mDeviceAttachments, mAttachments.begin(), sizeof(PxParticleRigidAttachment)*mAttachments.size(), stream); if (mFilters.size()) cudaContext->memcpyHtoDAsync((CUdeviceptr)mDeviceFilters, mFilters.begin(), sizeof(PxParticleRigidFilterPair)*mFilters.size(), stream); mParticleBuffer.setRigidAttachments(mDeviceAttachments, mAttachments.size()); mParticleBuffer.setRigidFilters(mDeviceFilters, mFilters.size()); mDirty = true; for (PxU32 i = 0; i < mNewReferencedBodies.size(); ++i) { if (mReferencedBodies[mNewReferencedBodies[i]] > 0) mParticleSystem.addRigidAttachment(mNewReferencedBodies[i]); } for (PxU32 i = 0; i < mDestroyedRefrencedBodies.size(); ++i) { if (mReferencedBodies[mDestroyedRefrencedBodies[i]] == 0) mParticleSystem.removeRigidAttachment(mDestroyedRefrencedBodies[i]); } mNewReferencedBodies.resize(0); mDestroyedRefrencedBodies.resize(0); mCudaContextManager->releaseContext(); #else PX_UNUSED(stream); #endif } void PxParticleAttachmentBuffer::addRigidAttachment(PxRigidActor* rigidActor, const PxU32 particleID, const PxVec3& localPose, PxConeLimitedConstraint* coneLimit) { PX_CHECK_AND_RETURN(coneLimit == NULL || coneLimit->isValid(), "PxParticleAttachmentBuffer::addRigidAttachment: PxConeLimitedConstraint needs to be valid if specified."); PX_ASSERT(particleID < mParticleBuffer.getNbActiveParticles()); PxParticleRigidAttachment attachment(PxConeLimitedConstraint(), PxVec4(0.0f)); if (rigidActor == NULL) { PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL, "PxParticleAttachmentBuffer::addRigidAttachment: rigidActor cannot be NULL."); return; } if (coneLimit) { attachment.mConeLimitParams.axisAngle = PxVec4(coneLimit->mAxis, coneLimit->mAngle); attachment.mConeLimitParams.lowHighLimits = PxVec4(coneLimit->mLowLimit, coneLimit->mHighLimit, 0.f, 0.f); } if (rigidActor->getType() == PxActorType::eRIGID_STATIC) { // attachments to rigid static work in global space attachment.mLocalPose0 = PxVec4(static_cast<PxRigidBody*>(rigidActor)->getGlobalPose().transform(localPose), 0.0f); attachment.mID0 = PxNodeIndex().getInd(); } else { // others use body space. PxRigidBody* rigid = static_cast<PxRigidBody*>(rigidActor); PxTransform body2Actor = rigid->getCMassLocalPose(); attachment.mLocalPose0 = PxVec4(body2Actor.transformInv(localPose), 0.f); attachment.mID0 = rigid->getInternalIslandNodeIndex().getInd(); } attachment.mID1 = particleID; //Insert in order... PxU32 l = 0, r = PxU32(mAttachments.size()); while (l < r) //If difference is just 1, we've found an item... { PxU32 index = (l + r) / 2; if (attachment < mAttachments[index]) r = index; else if (attachment > mAttachments[index]) l = index + 1; else l = r = index; //This is a match so insert before l } mAttachments.insert(); for (PxU32 i = mAttachments.size()-1; i > l; --i) { mAttachments[i] = mAttachments[i - 1]; } mAttachments[l] = attachment; mDirty = true; if (rigidActor) { PxU32& refCount = mReferencedBodies[rigidActor]; if (refCount == 0) mNewReferencedBodies.pushBack(rigidActor); refCount++; } } bool PxParticleAttachmentBuffer::removeRigidAttachment(PxRigidActor* rigidActor, const PxU32 particleID) { PX_ASSERT(particleID < mParticleBuffer.getNbActiveParticles()); if (rigidActor == NULL) { PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL, "PxParticleAttachmentBuffer::removeRigidAttachment: rigidActor cannot be NULL."); return false; } if (rigidActor) { PxU32& refCount = mReferencedBodies[rigidActor]; refCount--; if (refCount == 0) mDestroyedRefrencedBodies.pushBack(rigidActor); } PxParticleRigidFilterPair attachment; attachment.mID0 = rigidActor->getType() != PxActorType::eRIGID_STATIC ? static_cast<PxRigidBody*>(rigidActor)->getInternalIslandNodeIndex().getInd() : PxNodeIndex().getInd(); attachment.mID1 = particleID; PxU32 l = 0, r = PxU32(mAttachments.size()); while (l < r) //If difference is just 1, we've found an item... { PxU32 index = (l + r) / 2; if (attachment < mAttachments[index]) r = index; else if (attachment > mAttachments[index]) l = index + 1; else l = r = index; //This is a match so insert before l } if (mAttachments[l] == attachment) { mDirty = true; //Remove mAttachments.remove(l); return true; } return false; } void PxParticleAttachmentBuffer::addRigidFilter(PxRigidActor* rigidActor, const PxU32 particleID) { PX_ASSERT(particleID < mParticleBuffer.getNbActiveParticles()); PxParticleRigidFilterPair attachment; attachment.mID0 = rigidActor->getType() != PxActorType::eRIGID_STATIC ? static_cast<PxRigidBody*>(rigidActor)->getInternalIslandNodeIndex().getInd() : PxNodeIndex().getInd(); attachment.mID1 = particleID; //Insert in order... PxU32 l = 0, r = PxU32(mFilters.size()); while (l < r) //If difference is just 1, we've found an item... { PxU32 index = (l + r) / 2; if (attachment < mFilters[index]) r = index; else if (attachment > mFilters[index]) l = index + 1; else l = r = index; //This is a match so insert before l } mFilters.insert(); for (PxU32 i = mFilters.size() - 1; i > l; --i) { mFilters[i] = mFilters[i - 1]; } mFilters[l] = attachment; mDirty = true; } bool PxParticleAttachmentBuffer::removeRigidFilter(PxRigidActor* rigidActor, const PxU32 particleID) { PX_ASSERT(particleID < mParticleBuffer.getNbActiveParticles()); PxParticleRigidFilterPair attachment; attachment.mID0 = rigidActor->getType() != PxActorType::eRIGID_STATIC ? static_cast<PxRigidBody*>(rigidActor)->getInternalIslandNodeIndex().getInd() : PxNodeIndex().getInd(); attachment.mID1 = particleID; PxU32 l = 0, r = PxU32(mFilters.size()); while (l < r) //If difference is just 1, we've found an item... { PxU32 index = (l + r) / 2; if (attachment < mFilters[index]) r = index; else if (attachment > mFilters[index]) l = index + 1; else l = r = index; //This is a match so insert before l } if (mFilters[l] == attachment) { mDirty = true; //Remove mFilters.remove(l); return true; } return false; } PxParticleAttachmentBuffer* PxCreateParticleAttachmentBuffer(PxParticleBuffer& buffer, PxParticleSystem& particleSystem) { return PX_NEW(PxParticleAttachmentBuffer)(buffer, particleSystem); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// struct ParticleClothBuffersImpl : public PxParticleClothBufferHelper, public PxUserAllocated { ParticleClothBuffersImpl(const PxU32 maxCloths, const PxU32 maxTriangles, const PxU32 maxSprings, const PxU32 maxParticles, PxCudaContextManager* cudaContextManager) : mCudaContextManager(cudaContextManager) { mMaxParticles = maxParticles; mClothDesc.nbParticles = 0; mMaxTriangles = maxTriangles; mClothDesc.nbTriangles = 0; mMaxSprings = maxSprings; mClothDesc.nbSprings = 0; mMaxCloths = maxCloths; mClothDesc.nbCloths = 0; #if PX_SUPPORT_GPU_PHYSX mClothDesc.restPositions = mCudaContextManager->allocPinnedHostBuffer<PxVec4>(maxParticles); mClothDesc.triangles = mCudaContextManager->allocPinnedHostBuffer<PxU32>(maxTriangles * 3); mClothDesc.springs = mCudaContextManager->allocPinnedHostBuffer<PxParticleSpring>(maxSprings); mClothDesc.cloths = mCudaContextManager->allocPinnedHostBuffer<PxParticleCloth>(maxCloths); #endif } void release() { #if PX_SUPPORT_GPU_PHYSX mCudaContextManager->freePinnedHostBuffer(mClothDesc.cloths); mCudaContextManager->freePinnedHostBuffer(mClothDesc.restPositions); mCudaContextManager->freePinnedHostBuffer(mClothDesc.triangles); mCudaContextManager->freePinnedHostBuffer(mClothDesc.springs); #endif PX_DELETE_THIS; } PxU32 getMaxCloths() const { return mMaxCloths; } PxU32 getNumCloths() const { return mClothDesc.nbCloths; } PxU32 getMaxSprings() const { return mMaxSprings; } PxU32 getNumSprings() const { return mClothDesc.nbSprings; } PxU32 getMaxTriangles() const { return mMaxTriangles; } PxU32 getNumTriangles() const { return mClothDesc.nbTriangles; } PxU32 getMaxParticles() const { return mMaxParticles; } PxU32 getNumParticles() const { return mClothDesc.nbParticles; } void addCloth(const PxParticleCloth& particleCloth, const PxU32* triangles, const PxU32 numTriangles, const PxParticleSpring* springs, const PxU32 numSprings, const PxVec4* restPositions, const PxU32 numParticles) { if (mClothDesc.nbCloths + 1 > mMaxCloths) { PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL, "PxParticleClothBufferHelper::addCloth: exceeding maximal number of cloths that can be added."); return; } if (mClothDesc.nbSprings + numSprings > mMaxSprings) { PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL, "PxParticleClothBufferHelper::addCloth: exceeding maximal number of springs that can be added."); return; } if (mClothDesc.nbTriangles + numTriangles > mMaxTriangles) { PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL, "PxParticleClothBufferHelper::addCloth: exceeding maximal number of triangles that can be added."); return; } if (mClothDesc.nbParticles + numParticles > mMaxParticles) { PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL, "PxParticleClothBufferHelper::addCloth: exceeding maximal number of particles that can be added."); return; } mClothDesc.cloths[mClothDesc.nbCloths] = particleCloth; mClothDesc.nbCloths += 1; for (PxU32 i = 0; i < numSprings; ++i) { PxParticleSpring& dst = mClothDesc.springs[mClothDesc.nbSprings + i]; dst = springs[i]; dst.ind0 += mClothDesc.nbParticles; dst.ind1 += mClothDesc.nbParticles; } mClothDesc.nbSprings += numSprings; for (PxU32 i = 0; i < numTriangles*3; ++i) { PxU32& dst = mClothDesc.triangles[mClothDesc.nbTriangles*3 + i]; dst = triangles[i] + mClothDesc.nbParticles; } mClothDesc.nbTriangles += numTriangles; PxMemCopy(mClothDesc.restPositions + mClothDesc.nbParticles, restPositions, sizeof(PxVec4)*numParticles); mClothDesc.nbParticles += numParticles; } void addCloth(const PxReal blendScale, const PxReal restVolume, const PxReal pressure, const PxU32* triangles, const PxU32 numTriangles, const PxParticleSpring* springs, const PxU32 numSprings, const PxVec4* restPositions, const PxU32 numParticles) { PX_UNUSED(blendScale); PxParticleCloth particleCloth; //particleCloth.clothBlendScale = blendScale; particleCloth.restVolume = restVolume; particleCloth.pressure = pressure; particleCloth.startVertexIndex = mClothDesc.nbParticles; particleCloth.numVertices = numParticles; particleCloth.startTriangleIndex = mClothDesc.nbTriangles * 3; particleCloth.numTriangles = numTriangles; addCloth(particleCloth, triangles, numTriangles, springs, numSprings, restPositions, numParticles); } PxParticleClothDesc& getParticleClothDesc() { return mClothDesc; } PxU32 mMaxCloths; PxU32 mMaxSprings; PxU32 mMaxTriangles; PxU32 mMaxParticles; PxParticleClothDesc mClothDesc; PxCudaContextManager* mCudaContextManager; }; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// struct ParticleVolumeBuffersImpl : public PxParticleVolumeBufferHelper, public PxUserAllocated { ParticleVolumeBuffersImpl(PxU32 maxVolumes, PxU32 maxTriangles, PxCudaContextManager* cudaContextManager) { mMaxVolumes = maxVolumes; mNumVolumes = 0; mMaxTriangles = maxTriangles; mNumTriangles = 0; mParticleVolumeMeshes = reinterpret_cast<PxParticleVolumeMesh*>(PX_ALLOC(sizeof(PxParticleVolumeMesh) * maxVolumes, "ParticleVolumeBuffersImpl::mParticleVolumeMeshes")); mTriangles = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32) * maxTriangles * 3, "ParticleVolumeBuffersImpl::mTriangles")); #if PX_SUPPORT_GPU_PHYSX mParticleVolumes = cudaContextManager->allocPinnedHostBuffer<PxParticleVolume>(maxVolumes); mCudaContextManager = cudaContextManager; #else PX_UNUSED(cudaContextManager); #endif } void release() { #if PX_SUPPORT_GPU_PHYSX mCudaContextManager->freePinnedHostBuffer(mParticleVolumes); #endif PX_FREE(mParticleVolumeMeshes); PX_FREE(mTriangles); PX_DELETE_THIS; } virtual PxU32 getMaxVolumes() const { return mMaxVolumes; } virtual PxU32 getNumVolumes() const { return mNumVolumes; } virtual PxU32 getMaxTriangles() const { return mMaxTriangles; } virtual PxU32 getNumTriangles() const { return mNumTriangles; } virtual PxParticleVolume* getParticleVolumes() { return mParticleVolumes; } virtual PxParticleVolumeMesh* getParticleVolumeMeshes() { return mParticleVolumeMeshes; } virtual PxU32* getTriangles() { return mTriangles; } virtual void addVolume(const PxParticleVolume& volume, const PxParticleVolumeMesh& volumeMesh, const PxU32* triangles, const PxU32 numTriangles) { if (mNumVolumes < mMaxVolumes && mNumTriangles + numTriangles <= mMaxTriangles) { PX_ASSERT(volumeMesh.startIndex == mNumTriangles); mParticleVolumes[mNumVolumes] = volume; mParticleVolumeMeshes[mNumVolumes] = volumeMesh; mNumVolumes++; for (PxU32 i = 0; i < numTriangles*3; ++i) { mTriangles[mNumTriangles*3 + i] = triangles[i] + volumeMesh.startIndex; } mNumTriangles += numTriangles; } } virtual void addVolume(const PxU32 particleOffset, const PxU32 numParticles, const PxU32* triangles, const PxU32 numTriangles) { if (mNumVolumes < mMaxVolumes && mNumTriangles + numTriangles <= mMaxTriangles) { PxParticleVolume particleVolume; particleVolume.bound.setEmpty(); particleVolume.particleIndicesOffset = particleOffset; particleVolume.numParticles = numParticles; PxParticleVolumeMesh particleVolumeMesh; particleVolumeMesh.startIndex = mNumTriangles; particleVolumeMesh.count = numTriangles; mParticleVolumes[mNumVolumes] = particleVolume; mParticleVolumeMeshes[mNumVolumes] = particleVolumeMesh; mNumVolumes++; for (PxU32 i = 0; i < numTriangles*3; ++i) { mTriangles[mNumTriangles*3 + i] = triangles[i] + particleOffset; } mNumTriangles += numTriangles; } } PxU32 mMaxVolumes; PxU32 mNumVolumes; PxU32 mMaxTriangles; PxU32 mNumTriangles; PxParticleVolume* mParticleVolumes; PxParticleVolumeMesh* mParticleVolumeMeshes; PxU32* mTriangles; PxCudaContextManager* mCudaContextManager; }; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// struct ParticleRigidBuffersImpl : public PxParticleRigidBufferHelper, public PxUserAllocated { ParticleRigidBuffersImpl(PxU32 maxRigids, PxU32 maxParticles, PxCudaContextManager* cudaContextManager) : mCudaContextManager(cudaContextManager) { mRigidDesc.maxRigids = maxRigids; mRigidDesc.numActiveRigids = 0; mMaxParticles = maxParticles; mNumParticles = 0; #if PX_SUPPORT_GPU_PHYSX mCudaContextManager->allocPinnedHostBuffer<PxU32>(mRigidDesc.rigidOffsets, maxRigids + 1); mCudaContextManager->allocPinnedHostBuffer<PxReal>(mRigidDesc.rigidCoefficients, maxRigids); mCudaContextManager->allocPinnedHostBuffer<PxVec4>(mRigidDesc.rigidTranslations, maxRigids); mCudaContextManager->allocPinnedHostBuffer<PxQuat>(mRigidDesc.rigidRotations, maxRigids); mCudaContextManager->allocPinnedHostBuffer<PxVec4>(mRigidDesc.rigidLocalPositions, maxParticles); mCudaContextManager->allocPinnedHostBuffer<PxVec4>(mRigidDesc.rigidLocalNormals, maxParticles); #endif } void release() { #if PX_SUPPORT_GPU_PHYSX mCudaContextManager->freePinnedHostBuffer(mRigidDesc.rigidOffsets); mCudaContextManager->freePinnedHostBuffer(mRigidDesc.rigidCoefficients); mCudaContextManager->freePinnedHostBuffer(mRigidDesc.rigidTranslations); mCudaContextManager->freePinnedHostBuffer(mRigidDesc.rigidRotations); mCudaContextManager->freePinnedHostBuffer(mRigidDesc.rigidLocalPositions); mCudaContextManager->freePinnedHostBuffer(mRigidDesc.rigidLocalNormals); #endif PX_DELETE_THIS; } virtual PxU32 getMaxRigids() const { return mRigidDesc.maxRigids; } virtual PxU32 getNumRigids() const { return mRigidDesc.numActiveRigids; } virtual PxU32 getMaxParticles() const { return mMaxParticles; } virtual PxU32 getNumParticles() const { return mNumParticles; } void addRigid(const PxVec3& translation, const PxQuat& rotation, const PxReal coefficient, const PxVec4* localPositions, const PxVec4* localNormals, PxU32 numParticles) { PX_ASSERT(numParticles > 0); const PxU32 numRigids = mRigidDesc.numActiveRigids; if (numParticles > 0 && numRigids < mRigidDesc.maxRigids && mNumParticles + numParticles <= mMaxParticles) { mRigidDesc.rigidOffsets[numRigids] = mNumParticles; mRigidDesc.rigidOffsets[numRigids + 1] = mNumParticles + numParticles; mRigidDesc.rigidTranslations[numRigids] = PxVec4(translation, 0.0f); mRigidDesc.rigidRotations[numRigids] = rotation; mRigidDesc.rigidCoefficients[numRigids] = coefficient; PxMemCopy(mRigidDesc.rigidLocalPositions + mNumParticles, localPositions, numParticles * sizeof(PxVec4)); PxMemCopy(mRigidDesc.rigidLocalNormals + mNumParticles, localNormals, numParticles * sizeof(PxVec4)); mRigidDesc.numActiveRigids += 1; mNumParticles += numParticles; } } PxParticleRigidDesc& getParticleRigidDesc() { return mRigidDesc; } PxU32 mMaxParticles; PxU32 mNumParticles; PxParticleRigidDesc mRigidDesc; PxCudaContextManager* mCudaContextManager; }; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// PxParticleVolumeBufferHelper* PxCreateParticleVolumeBufferHelper(PxU32 maxVolumes, PxU32 maxTriangles, PxCudaContextManager* cudaContextManager) { PxParticleVolumeBufferHelper* ret = NULL; #if PX_SUPPORT_GPU_PHYSX ret = PX_NEW(ParticleVolumeBuffersImpl)(maxVolumes, maxTriangles, cudaContextManager); #else PX_UNUSED(maxVolumes); PX_UNUSED(maxTriangles); PX_UNUSED(cudaContextManager); #endif return ret; } PxParticleClothBufferHelper* PxCreateParticleClothBufferHelper(const PxU32 maxCloths, const PxU32 maxTriangles, const PxU32 maxSprings, const PxU32 maxParticles, PxCudaContextManager* cudaContextManager) { PxParticleClothBufferHelper* ret = NULL; #if PX_SUPPORT_GPU_PHYSX ret = PX_NEW(ParticleClothBuffersImpl)(maxCloths, maxTriangles, maxSprings, maxParticles, cudaContextManager); #else PX_UNUSED(maxCloths); PX_UNUSED(maxTriangles); PX_UNUSED(maxSprings); PX_UNUSED(maxParticles); PX_UNUSED(cudaContextManager); #endif return ret; } PxParticleRigidBufferHelper* PxCreateParticleRigidBufferHelper(const PxU32 maxRigids, const PxU32 maxParticles, PxCudaContextManager* cudaContextManager) { PxParticleRigidBufferHelper* ret = NULL; #if PX_SUPPORT_GPU_PHYSX ret = PX_NEW(ParticleRigidBuffersImpl)(maxRigids, maxParticles, cudaContextManager); #else PX_UNUSED(maxRigids); PX_UNUSED(maxParticles); PX_UNUSED(cudaContextManager); #endif return ret; } } //namespace ExtGpu } //namespace physx
29,866
C++
35.378806
211
0.762004
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/ExtSimpleFactory.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/PxMathUtils.h" #include "foundation/PxQuat.h" #include "geometry/PxSphereGeometry.h" #include "geometry/PxBoxGeometry.h" #include "geometry/PxCapsuleGeometry.h" #include "geometry/PxConvexMeshGeometry.h" #include "geometry/PxPlaneGeometry.h" #include "extensions/PxRigidBodyExt.h" #include "extensions/PxSimpleFactory.h" #include "PxPhysics.h" #include "PxScene.h" #include "PxRigidStatic.h" #include "PxRigidStatic.h" #include "PxRigidDynamic.h" #include "PxShape.h" #include "foundation/PxUtilities.h" #include "foundation/PxInlineArray.h" using namespace physx; static bool isDynamicGeometry(PxGeometryType::Enum type) { return type == PxGeometryType::eBOX || type == PxGeometryType::eSPHERE || type == PxGeometryType::eCAPSULE || type == PxGeometryType::eCONVEXMESH; } namespace physx { PxRigidDynamic* PxCreateDynamic(PxPhysics& sdk, const PxTransform& transform, PxShape& shape, PxReal density) { PX_CHECK_AND_RETURN_NULL(transform.isValid(), "PxCreateDynamic: transform is not valid."); PxRigidDynamic* actor = sdk.createRigidDynamic(transform); if(actor) { if(!actor->attachShape(shape)) { actor->release(); return NULL; } if(!PxRigidBodyExt::updateMassAndInertia(*actor, density)) { actor->release(); return NULL; } } return actor; } PxRigidDynamic* PxCreateDynamic(PxPhysics& sdk, const PxTransform& transform, const PxGeometry& geometry, PxMaterial& material, PxReal density, const PxTransform& shapeOffset) { PX_CHECK_AND_RETURN_NULL(transform.isValid(), "PxCreateDynamic: transform is not valid."); PX_CHECK_AND_RETURN_NULL(shapeOffset.isValid(), "PxCreateDynamic: shapeOffset is not valid."); if(!isDynamicGeometry(geometry.getType()) || density <= 0.0f) return NULL; PxShape* shape = sdk.createShape(geometry, material, true); if(!shape) return NULL; shape->setLocalPose(shapeOffset); PxRigidDynamic* body = shape ? PxCreateDynamic(sdk, transform, *shape, density) : NULL; shape->release(); return body; } PxRigidDynamic* PxCreateKinematic(PxPhysics& sdk, const PxTransform& transform, PxShape& shape, PxReal density) { PX_CHECK_AND_RETURN_NULL(transform.isValid(), "PxCreateKinematic: transform is not valid."); bool isDynGeom = isDynamicGeometry(shape.getGeometry().getType()); if(isDynGeom && density <= 0.0f) return NULL; PxRigidDynamic* actor = sdk.createRigidDynamic(transform); if(actor) { actor->setRigidBodyFlag(PxRigidBodyFlag::eKINEMATIC, true); if(!isDynGeom) shape.setFlag(PxShapeFlag::eSIMULATION_SHAPE, false); actor->attachShape(shape); if(isDynGeom) PxRigidBodyExt::updateMassAndInertia(*actor, density); else { actor->setMass(1.f); actor->setMassSpaceInertiaTensor(PxVec3(1.f,1.f,1.f)); } } return actor; } PxRigidDynamic* PxCreateKinematic(PxPhysics& sdk, const PxTransform& transform, const PxGeometry& geometry, PxMaterial& material, PxReal density, const PxTransform& shapeOffset) { PX_CHECK_AND_RETURN_NULL(transform.isValid(), "PxCreateKinematic: transform is not valid."); PX_CHECK_AND_RETURN_NULL(shapeOffset.isValid(), "PxCreateKinematic: shapeOffset is not valid."); bool isDynGeom = isDynamicGeometry(geometry.getType()); if(isDynGeom && density <= 0.0f) return NULL; PxShape* shape = sdk.createShape(geometry, material, true); if(!shape) return NULL; shape->setLocalPose(shapeOffset); PxRigidDynamic* body = PxCreateKinematic(sdk, transform, *shape, density); shape->release(); return body; } PxRigidStatic* PxCreateStatic(PxPhysics& sdk, const PxTransform& transform, PxShape& shape) { PX_CHECK_AND_RETURN_NULL(transform.isValid(), "PxCreateStatic: transform is not valid."); PxRigidStatic* s = sdk.createRigidStatic(transform); if(s) s->attachShape(shape); return s; } PxRigidStatic* PxCreateStatic(PxPhysics& sdk, const PxTransform& transform, const PxGeometry& geometry, PxMaterial& material, const PxTransform& shapeOffset) { PX_CHECK_AND_RETURN_NULL(transform.isValid(), "PxCreateStatic: transform is not valid."); PX_CHECK_AND_RETURN_NULL(shapeOffset.isValid(), "PxCreateStatic: shapeOffset is not valid."); PxShape* shape = sdk.createShape(geometry, material, true); if(!shape) return NULL; shape->setLocalPose(shapeOffset); PxRigidStatic* s = PxCreateStatic(sdk, transform, *shape); shape->release(); return s; } PxRigidStatic* PxCreatePlane(PxPhysics& sdk, const PxPlane& plane, PxMaterial& material) { PX_CHECK_AND_RETURN_NULL(plane.n.isFinite(), "PxCreatePlane: plane normal is not valid."); if (!plane.n.isNormalized()) return NULL; return PxCreateStatic(sdk, PxTransformFromPlaneEquation(plane), PxPlaneGeometry(), material); } PxShape* PxCloneShape(PxPhysics& physics, const PxShape& from, bool isExclusive) { PxInlineArray<PxMaterial*, 64> materials; PxU16 materialCount = from.getNbMaterials(); materials.resize(materialCount); from.getMaterials(materials.begin(), materialCount); PxShape* to = physics.createShape(from.getGeometry(), materials.begin(), materialCount, isExclusive, from.getFlags()); to->setLocalPose(from.getLocalPose()); to->setContactOffset(from.getContactOffset()); to->setRestOffset(from.getRestOffset()); to->setSimulationFilterData(from.getSimulationFilterData()); to->setQueryFilterData(from.getQueryFilterData()); to->setTorsionalPatchRadius(from.getTorsionalPatchRadius()); to->setMinTorsionalPatchRadius(from.getMinTorsionalPatchRadius()); return to; } static void copyStaticProperties(PxPhysics& physics, PxRigidActor& to, const PxRigidActor& from) { PxInlineArray<PxShape*, 64> shapes; shapes.resize(from.getNbShapes()); PxU32 shapeCount = from.getNbShapes(); from.getShapes(shapes.begin(), shapeCount); for(PxU32 i = 0; i < shapeCount; i++) { PxShape* s = shapes[i]; if(!s->isExclusive()) to.attachShape(*s); else { PxShape* newShape = physx::PxCloneShape(physics, *s, true); to.attachShape(*newShape); newShape->release(); } } to.setActorFlags(from.getActorFlags()); to.setOwnerClient(from.getOwnerClient()); to.setDominanceGroup(from.getDominanceGroup()); } PxRigidStatic* PxCloneStatic(PxPhysics& physicsSDK, const PxTransform& transform, const PxRigidActor& from) { PxRigidStatic* to = physicsSDK.createRigidStatic(transform); if(!to) return NULL; copyStaticProperties(physicsSDK, *to, from); return to; } PxRigidDynamic* PxCloneDynamic(PxPhysics& physicsSDK, const PxTransform& transform, const PxRigidDynamic& from) { PxRigidDynamic* to = physicsSDK.createRigidDynamic(transform); if(!to) return NULL; copyStaticProperties(physicsSDK, *to, from); to->setRigidBodyFlags(from.getRigidBodyFlags()); to->setMass(from.getMass()); to->setMassSpaceInertiaTensor(from.getMassSpaceInertiaTensor()); to->setCMassLocalPose(from.getCMassLocalPose()); to->setLinearVelocity(from.getLinearVelocity()); to->setAngularVelocity(from.getAngularVelocity()); to->setLinearDamping(from.getLinearDamping()); to->setAngularDamping(from.getAngularDamping()); PxU32 posIters, velIters; from.getSolverIterationCounts(posIters, velIters); to->setSolverIterationCounts(posIters, velIters); to->setMaxLinearVelocity(from.getMaxLinearVelocity()); to->setMaxAngularVelocity(from.getMaxAngularVelocity()); to->setMaxDepenetrationVelocity(from.getMaxDepenetrationVelocity()); to->setSleepThreshold(from.getSleepThreshold()); to->setStabilizationThreshold(from.getStabilizationThreshold()); to->setMinCCDAdvanceCoefficient(from.getMinCCDAdvanceCoefficient()); to->setContactReportThreshold(from.getContactReportThreshold()); to->setMaxContactImpulse(from.getMaxContactImpulse()); PxTransform target; if (from.getKinematicTarget(target)) to->setKinematicTarget(target); to->setRigidDynamicLockFlags(from.getRigidDynamicLockFlags()); return to; } static PxTransform scalePosition(const PxTransform& t, PxReal scale) { return PxTransform(t.p*scale, t.q); } void PxScaleRigidActor(PxRigidActor& actor, PxReal scale, bool scaleMassProps) { PX_CHECK_AND_RETURN(scale > 0, "PxScaleRigidActor requires that the scale parameter is greater than zero"); PxInlineArray<PxShape*, 64> shapes; shapes.resize(actor.getNbShapes()); actor.getShapes(shapes.begin(), shapes.size()); for(PxU32 i=0;i<shapes.size();i++) { shapes[i]->setLocalPose(scalePosition(shapes[i]->getLocalPose(), scale)); PxGeometryHolder h(shapes[i]->getGeometry()); switch(h.getType()) { case PxGeometryType::eSPHERE: h.sphere().radius *= scale; break; case PxGeometryType::ePLANE: break; case PxGeometryType::eCAPSULE: h.capsule().halfHeight *= scale; h.capsule().radius *= scale; break; case PxGeometryType::eBOX: h.box().halfExtents *= scale; break; case PxGeometryType::eCONVEXMESH: h.convexMesh().scale.scale *= scale; break; case PxGeometryType::eTRIANGLEMESH: h.triangleMesh().scale.scale *= scale; break; case PxGeometryType::eHEIGHTFIELD: h.heightField().heightScale *= scale; h.heightField().rowScale *= scale; h.heightField().columnScale *= scale; break; default: PX_ASSERT(0); } shapes[i]->setGeometry(h.any()); } if(!scaleMassProps) return; PxRigidDynamic* dynamic = (&actor)->is<PxRigidDynamic>(); if(!dynamic) return; PxReal scale3 = scale*scale*scale; dynamic->setMass(dynamic->getMass()*scale3); dynamic->setMassSpaceInertiaTensor(dynamic->getMassSpaceInertiaTensor()*scale3*scale*scale); dynamic->setCMassLocalPose(scalePosition(dynamic->getCMassLocalPose(), scale)); } }
11,482
C++
29.703208
119
0.73637
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/ExtParticleClothCooker.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "extensions/PxParticleClothCooker.h" #include "foundation/PxArray.h" #include "foundation/PxSort.h" #include "foundation/PxHashMap.h" #include "foundation/PxHashSet.h" #include "GuInternal.h" namespace physx { namespace ExtGpu { namespace { struct Edge { Edge(PxU32 v0, PxU32 v1, PxU32 triangle0, PxU32 triangle1, bool isLongest0, bool isLongest1) { a = PxMin(v0, v1); b = PxMax(v0, v1); triangleA = triangle0; triangleB = triangle1; longestA = isLongest0; longestB = isLongest1; PX_ASSERT_WITH_MESSAGE(a != b, "PxCreateInflatableFromMesh encountered a degenerate edge inside a triangle."); } PxU32 a, b; PxU32 triangleA, triangleB; bool longestA, longestB; //true if it is the longest edge of triangle bool operator<(const Edge& other) const { if(a == other.a) return b < other.b; return a < other.a; } bool operator==(const Edge& other) const { if(a == other.a) return b == other.b; return false; } }; class EdgeHash { public: uint32_t operator()(const Edge& e) const { return PxComputeHash(e.a) ^ PxComputeHash(e.b); } bool equal(const Edge& e0, const Edge& e1) const { return e0.a == e1.a && e0.b == e1.b; } }; class EdgeSet : public PxHashSet<Edge, EdgeHash> { public: typedef PxHashSet<Edge, EdgeHash> Base; typedef Base::Iterator Iterator; void insert(const Edge& newEdge) { PX_ASSERT(newEdge.a < newEdge.b); bool exists; Edge* edge = mBase.create(newEdge, exists); if (!exists) { PX_PLACEMENT_NEW(edge, Edge)(newEdge); } else { PX_ASSERT_WITH_MESSAGE(edge->triangleB == 0xffffffff, "Edge with more than 2 triangles found in PxCreateInflatableFromMesh"); edge->triangleB = newEdge.triangleA; //Add triangle info from duplicate to Unique edge edge->longestB = newEdge.longestA; } } }; template<typename T> void partialSum(T* begin, T* end, T* dest) { *dest = *begin; dest++; begin++; while(begin!=end) { *dest = dest[-1] + *begin; dest++; begin++; } } /* outAdjacencies is a list of adjacent vertices in outAdjacencies outAdjacencyIndices is a list of indices to quickly find adjacent vertices in outAdjacencies. all the adjacent vertices to vertex V are stored in outAdjacencies starting at outAdjacencyIndices[V] and ending at outAdjacencyIndices[V+1] so the first vertex is outAdjacencies[outAdjacencyIndices[V]], and the last one is outAdjacencies[outAdjacencyIndices[V+1]-1] */ void gatherAdjacencies(PxArray<PxU32>& outAdjacencyIndices, PxArray<PxU32>& outAdjacencies, PxU32 vertexCount, PxArray<Edge> const& inUniqueEdges, bool ignoreDiagonals = false ) { PX_ASSERT(outAdjacencyIndices.size() == 0); PX_ASSERT(outAdjacencies.size() == 0); outAdjacencyIndices.resize(vertexCount+1, 0); //calculate valency for(PxU32 i = 0; i < inUniqueEdges.size(); i++) { const Edge& edge = inUniqueEdges[i]; if(ignoreDiagonals && edge.longestA && edge.longestB) continue; outAdjacencyIndices[edge.a]++; outAdjacencyIndices[edge.b]++; } partialSum(outAdjacencyIndices.begin(), outAdjacencyIndices.end(), outAdjacencyIndices.begin()); outAdjacencyIndices.back() = outAdjacencyIndices[vertexCount-1]; outAdjacencies.resize(outAdjacencyIndices.back(),0xffffffff); for(PxU32 i = 0; i < inUniqueEdges.size(); i++) { const Edge& edge = inUniqueEdges[i]; if(ignoreDiagonals && edge.longestA && edge.longestB) continue; outAdjacencyIndices[edge.a]--; outAdjacencies[outAdjacencyIndices[edge.a]]=edge.b; outAdjacencyIndices[edge.b]--; outAdjacencies[outAdjacencyIndices[edge.b]] = edge.a; } } template <typename T, typename A> A MaxArg(T const& vA, T const& vB, A const& aA, A const& aB) { if(vA > vB) return aA; return aB; } PxU32 GetOppositeVertex(PxU32* inTriangleIndices, PxU32 triangleIndex, PxU32 a, PxU32 b) { for(int i = 0; i<3; i++) { if(inTriangleIndices[triangleIndex+i] != a && inTriangleIndices[triangleIndex + i] !=b) return inTriangleIndices[triangleIndex + i]; } PX_ASSERT_WITH_MESSAGE(0, "Degenerate Triangle found in PxCreateInflatableFromMesh"); return 0; } Edge GetAlternateDiagonal(Edge const& edge, PxU32* inTriangleIndices) { PxU32 vA = GetOppositeVertex(inTriangleIndices, edge.triangleA, edge.a, edge.b); PxU32 vB = GetOppositeVertex(inTriangleIndices, edge.triangleB, edge.a, edge.b); bool longestA = true; bool longestB = true; PxU32 tA = 0xffffffff; PxU32 tB = 0xffffffff; return Edge(vA, vB, tA, tB, longestA, longestB); } } //namespace class PxParticleClothCookerImpl : public PxParticleClothCooker, public PxUserAllocated { public: PxParticleClothCookerImpl(PxU32 vertexCount, physx::PxVec4* inVertices, PxU32 triangleIndexCount, PxU32* inTriangleIndices, PxU32 constraintTypeFlags, PxVec3 verticalDirection, PxReal bendingConstraintMaxAngle) : mVertexCount(vertexCount), mVertices(inVertices), mTriangleIndexCount(triangleIndexCount), mTriangleIndices(inTriangleIndices), mConstraintTypeFlags(constraintTypeFlags), mVerticalDirection(verticalDirection), mBendingConstraintMaxAngle(bendingConstraintMaxAngle) { } virtual void release() { PX_DELETE_THIS; } /** \brief generate the constraint and triangle per vertex information. */ virtual void cookConstraints(const PxParticleClothConstraint* constraints, const PxU32 numConstraints); virtual PxU32* getTriangleIndices() { return mTriangleIndexBuffer.begin(); } virtual PxU32 getTriangleIndicesCount() { return mTriangleIndexBuffer.size(); } virtual PxParticleClothConstraint* getConstraints() { return mConstraintBuffer.begin(); } virtual PxU32 getConstraintCount() { return mConstraintBuffer.size(); } /** \brief Computes the volume of a closed mesh and the contraintScale. Expects vertices in local space - 'close' to origin. */ virtual void calculateMeshVolume(); virtual float getMeshVolume() {return mMeshVolume;} private: PxArray<PxU32> mTriangleIndexBuffer; PxArray<PxParticleClothConstraint> mConstraintBuffer; PxU32 mVertexCount; physx::PxVec4* mVertices; //we don't own this PxU32 mTriangleIndexCount; PxU32* mTriangleIndices; //we don't own this PxU32 mConstraintTypeFlags; PxVec3 mVerticalDirection; float mBendingConstraintMaxAngle; float mMeshVolume; void addTriangle(PxArray<PxU32>& trianglesPerVertex, PxU32 triangleIndex) { for(int j = 0; j < 3; j++) { PxU32 vertexIndex = mTriangleIndices[triangleIndex + j]; mTriangleIndexBuffer.pushBack(vertexIndex); trianglesPerVertex[vertexIndex]++; } } }; void PxParticleClothCookerImpl::cookConstraints(const PxParticleClothConstraint* constraints, const PxU32 numConstraints) { EdgeSet edgeSet; edgeSet.reserve(mTriangleIndexCount); PxArray<PxU32> trianglesPerVertex; trianglesPerVertex.resize(mVertexCount, 0); mTriangleIndexBuffer.clear(); mTriangleIndexBuffer.reserve(mTriangleIndexCount); mConstraintBuffer.clear(); mConstraintBuffer.reserve(mVertexCount*12); //Add all edges to Edges for(PxU32 i = 0; i<mTriangleIndexCount; i+=3) { //Get vertex indices PxU32 v0 = mTriangleIndices[i + 0]; PxU32 v1 = mTriangleIndices[i + 1]; PxU32 v2 = mTriangleIndices[i + 2]; //Get vertex points PxVec3 p0 = mVertices[v0].getXYZ(); PxVec3 p1 = mVertices[v1].getXYZ(); PxVec3 p2 = mVertices[v2].getXYZ(); //check which edge is the longest float len0 = (p0 - p1).magnitude(); float len1 = (p1 - p2).magnitude(); float len2 = (p2 - p0).magnitude(); int longest = MaxArg(len0, PxMax(len1,len2), 0, MaxArg(len1, len2, 1, 2)); //Store edges edgeSet.insert(Edge(v0, v1, i, 0xffffffff, longest == 0, false)); edgeSet.insert(Edge(v1, v2, i, 0xffffffff, longest == 1, false)); edgeSet.insert(Edge(v2, v0, i, 0xffffffff, longest == 2, false)); //Add triangle to mTriangleIndexBuffer and increment trianglesPerVertex values addTriangle(trianglesPerVertex,i); } if (constraints) { //skip constraints cooking if provided by user mConstraintBuffer.assign(constraints, constraints + numConstraints); return; } trianglesPerVertex.clear(); trianglesPerVertex.shrink(); PxArray<Edge> uniqueEdges; uniqueEdges.reserve(mTriangleIndexCount); //over allocate to avoid resizes for (EdgeSet::Iterator iter = edgeSet.getIterator(); !iter.done(); ++iter) { const Edge& e = *iter; uniqueEdges.pushBack(e); } //Maximum angle before it is a horizontal constraint const float cosAngle45 = cosf(45.0f / 360.0f * PxTwoPi); //Add all horizontal, vertical and shearing constraints PxU32 constraintCount = uniqueEdges.size(); //we are going to push back more edges, but we don't need to process them for(PxU32 i = 0; i < constraintCount; i++) { const Edge& edge = uniqueEdges[i]; PxParticleClothConstraint c; c.particleIndexA = edge.a; c.particleIndexB = edge.b; //Get vertices's PxVec3 vA = mVertices[c.particleIndexA].getXYZ(); PxVec3 vB = mVertices[c.particleIndexB].getXYZ(); //Calculate rest length c.length = (vA - vB).magnitude(); if(edge.longestA && edge.longestB && (mConstraintTypeFlags & PxParticleClothConstraint::eTYPE_DIAGONAL_CONSTRAINT)) { //Shearing constraint c.constraintType = c.eTYPE_DIAGONAL_CONSTRAINT; //add constraint mConstraintBuffer.pushBack(c); //We only have one of the quad diagonals in a triangle mesh, get the other one here const Edge alternateEdge = GetAlternateDiagonal(edge, mTriangleIndices); c.particleIndexA = alternateEdge.a; c.particleIndexB = alternateEdge.b; //Get vertices's PxVec3 vA2 = mVertices[c.particleIndexA].getXYZ(); PxVec3 vB2 = mVertices[c.particleIndexB].getXYZ(); //Calculate rest length c.length = (vA2 - vB2).magnitude(); //add constraint mConstraintBuffer.pushBack(c); if (mConstraintTypeFlags & PxParticleClothConstraint::eTYPE_DIAGONAL_BENDING_CONSTRAINT) { if (!edgeSet.contains(alternateEdge)) { edgeSet.insert(alternateEdge); uniqueEdges.pushBack(alternateEdge); //Add edge for bending constraint step } } } else { //horizontal/vertical constraint PxVec3 dir = (vA - vB) / c.length; if(mVerticalDirection.dot(dir)> cosAngle45) c.constraintType = c.eTYPE_VERTICAL_CONSTRAINT; else c.constraintType = c.eTYPE_HORIZONTAL_CONSTRAINT; if(mConstraintTypeFlags & c.constraintType) { //add constraint mConstraintBuffer.pushBack(c); } } } if(!(mConstraintTypeFlags & PxParticleClothConstraint::eTYPE_BENDING_CONSTRAINT)) return; //Get adjacency information needed for the bending constraints PxArray<PxU32> adjacencyIndices; PxArray<PxU32> adjacencies; gatherAdjacencies(adjacencyIndices, adjacencies, mVertexCount, uniqueEdges, !(mConstraintTypeFlags & PxParticleClothConstraint::eTYPE_DIAGONAL_BENDING_CONSTRAINT)); //Maximum angle we consider to be parallel for the bending constraints const float maxCosAngle = PxCos(mBendingConstraintMaxAngle); for(PxU32 i = 0; i<mVertexCount; i++) { //For each vertex, find all adjacent vertex pairs, and add bending constraints for pairs that form roughly a straight line PxVec3 center = mVertices[i].getXYZ(); for(PxU32 adjA = adjacencyIndices[i]; PxI32(adjA) < PxI32(adjacencyIndices[i+1])-1; adjA++) { PxVec3 a = mVertices[adjacencies[adjA]].getXYZ(); PxVec3 dir1 = (a-center).getNormalized(); float bestCosAngle = -1.0f; PxU32 bestAdjB = 0xffffffff; //Choose the most parallel adjB for(PxU32 adjB = adjA+1; adjB < adjacencyIndices[i + 1]; adjB++) { PxVec3 b = mVertices[adjacencies[adjB]].getXYZ(); PxVec3 dir2 = (b - center).getNormalized(); float cosAngleAbs = PxAbs(dir1.dot(dir2)); if(cosAngleAbs > bestCosAngle) { bestCosAngle = cosAngleAbs; bestAdjB = adjB; } } //Check if the lines a-center and center-b are roughly parallel if(bestCosAngle > maxCosAngle) { //Add bending constraint PxParticleClothConstraint c; c.particleIndexA = adjacencies[adjA]; c.particleIndexB = adjacencies[bestAdjB]; PX_ASSERT(c.particleIndexA != c.particleIndexB); //Get vertices's PxVec3 vA = mVertices[c.particleIndexA].getXYZ(); PxVec3 vB = mVertices[c.particleIndexB].getXYZ(); //Calculate rest length c.length = (vA - vB).magnitude(); c.constraintType = c.eTYPE_BENDING_CONSTRAINT; //add constraint mConstraintBuffer.pushBack(c); } } } } void PxParticleClothCookerImpl::calculateMeshVolume() { // the physx api takes volume*6 now. mMeshVolume = Gu::computeTriangleMeshVolume(mVertices, mTriangleIndices, mTriangleIndexCount / 3) * 6.0f; } } // namespace ExtGpu ExtGpu::PxParticleClothCooker* PxCreateParticleClothCooker(PxU32 vertexCount, PxVec4* inVertices, PxU32 triangleIndexCount, PxU32* inTriangleIndices, PxU32 constraintTypeFlags, PxVec3 verticalDirection, PxReal bendingConstraintMaxAngle) { return PX_NEW(ExtGpu::PxParticleClothCookerImpl)(vertexCount, inVertices, triangleIndexCount, inTriangleIndices, constraintTypeFlags, verticalDirection, bendingConstraintMaxAngle); } } // namespace physx
14,643
C++
30.492473
165
0.7381