file_path
stringlengths
21
207
content
stringlengths
5
1.02M
size
int64
5
1.02M
lang
stringclasses
9 values
avg_line_length
float64
1.33
100
max_line_length
int64
4
993
alphanum_fraction
float64
0.27
0.93
NVIDIA-Omniverse/PhysX/physx/include/PxScene.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_SCENE_H #define PX_SCENE_H /** \addtogroup physics @{ */ #include "PxSceneQuerySystem.h" #include "PxSceneDesc.h" #include "PxVisualizationParameter.h" #include "PxSimulationStatistics.h" #include "PxClient.h" #include "task/PxTask.h" #include "PxArticulationFlag.h" #include "PxSoftBodyFlag.h" #include "PxHairSystemFlag.h" #include "PxActorData.h" #include "PxParticleSystemFlag.h" #include "PxParticleSolverType.h" #include "cudamanager/PxCudaTypes.h" #include "pvd/PxPvdSceneClient.h" #if !PX_DOXYGEN namespace physx { #endif class PxCollection; class PxConstraint; class PxSimulationEventCallback; class PxPhysics; class PxAggregate; class PxRenderBuffer; class PxArticulationReducedCoordinate; class PxParticleSystem; struct PxContactPairHeader; typedef PxU8 PxDominanceGroup; class PxPvdSceneClient; class PxSoftBody; class PxFEMCloth; class PxHairSystem; /** \brief Expresses the dominance relationship of a contact. For the time being only three settings are permitted: (1, 1), (0, 1), and (1, 0). @see getDominanceGroup() PxDominanceGroup PxScene::setDominanceGroupPair() */ struct PxDominanceGroupPair { PxDominanceGroupPair(PxU8 a, PxU8 b) : dominance0(a), dominance1(b) {} PxU8 dominance0; PxU8 dominance1; }; /** \brief Identifies each type of actor for retrieving actors from a scene. \note #PxArticulationLink objects are not supported. Use the #PxArticulationReducedCoordinate object to retrieve all its links. @see PxScene::getActors(), PxScene::getNbActors() */ struct PxActorTypeFlag { enum Enum { /** \brief A static rigid body @see PxRigidStatic */ eRIGID_STATIC = (1 << 0), /** \brief A dynamic rigid body @see PxRigidDynamic */ eRIGID_DYNAMIC = (1 << 1) }; }; /** \brief Collection of set bits defined in PxActorTypeFlag. @see PxActorTypeFlag */ typedef PxFlags<PxActorTypeFlag::Enum,PxU16> PxActorTypeFlags; PX_FLAGS_OPERATORS(PxActorTypeFlag::Enum,PxU16) class PxActor; /** \brief Broad-phase callback to receive broad-phase related events. Each broadphase callback object is associated with a PxClientID. It is possible to register different callbacks for different clients. The callback functions are called this way: - for shapes/actors, the callback assigned to the actors' clients are used - for aggregates, the callbacks assigned to clients from aggregated actors are used \note SDK state should not be modified from within the callbacks. In particular objects should not be created or destroyed. If state modification is needed then the changes should be stored to a buffer and performed after the simulation step. <b>Threading:</b> It is not necessary to make this class thread safe as it will only be called in the context of the user thread. @see PxSceneDesc PxScene.setBroadPhaseCallback() PxScene.getBroadPhaseCallback() */ class PxBroadPhaseCallback { public: virtual ~PxBroadPhaseCallback() {} /** \brief Out-of-bounds notification. This function is called when an object leaves the broad-phase. \param[in] shape Shape that left the broad-phase bounds \param[in] actor Owner actor */ virtual void onObjectOutOfBounds(PxShape& shape, PxActor& actor) = 0; /** \brief Out-of-bounds notification. This function is called when an aggregate leaves the broad-phase. \param[in] aggregate Aggregate that left the broad-phase bounds */ virtual void onObjectOutOfBounds(PxAggregate& aggregate) = 0; }; /** \brief A scene is a collection of bodies and constraints which can interact. The scene simulates the behavior of these objects over time. Several scenes may exist at the same time, but each body or constraint is specific to a scene -- they may not be shared. @see PxSceneDesc PxPhysics.createScene() release() */ class PxScene : public PxSceneSQSystem { protected: /************************************************************************************************/ /** @name Basics */ //@{ PxScene() : userData(NULL) {} virtual ~PxScene() {} public: /** \brief Deletes the scene. Removes any actors and constraint shaders from this scene (if the user hasn't already done so). Be sure to not keep a reference to this object after calling release. Avoid release calls while the scene is simulating (in between simulate() and fetchResults() calls). @see PxPhysics.createScene() */ virtual void release() = 0; /** \brief Sets a scene flag. You can only set one flag at a time. \note Not all flags are mutable and changing some will result in an error. Please check #PxSceneFlag to see which flags can be changed. @see PxSceneFlag */ virtual void setFlag(PxSceneFlag::Enum flag, bool value) = 0; /** \brief Get the scene flags. \return The scene flags. See #PxSceneFlag @see PxSceneFlag */ virtual PxSceneFlags getFlags() const = 0; /** \brief Set new scene limits. \note Increase the maximum capacity of various data structures in the scene. The new capacities will be at least as large as required to deal with the objects currently in the scene. Further, these values are for preallocation and do not represent hard limits. \param[in] limits Scene limits. @see PxSceneLimits */ virtual void setLimits(const PxSceneLimits& limits) = 0; /** \brief Get current scene limits. \return Current scene limits. @see PxSceneLimits */ virtual PxSceneLimits getLimits() const = 0; /** \brief Call this method to retrieve the Physics SDK. \return The physics SDK this scene is associated with. @see PxPhysics */ virtual PxPhysics& getPhysics() = 0; /** \brief Retrieves the scene's internal timestamp, increased each time a simulation step is completed. \return scene timestamp */ virtual PxU32 getTimestamp() const = 0; /** \brief Sets a name string for the Scene that can be retrieved with getName(). This is for debugging and is not used by the SDK. The string is not copied by the SDK, only the pointer is stored. \param[in] name String to set the objects name to. <b>Default:</b> NULL @see getName() */ virtual void setName(const char* name) = 0; /** \brief Retrieves the name string set with setName(). \return Name string associated with the Scene. @see setName() */ virtual const char* getName() const = 0; //@} /************************************************************************************************/ /** @name Add/Remove Articulations */ //@{ /** \brief Adds an articulation to this scene. \note If the articulation is already assigned to a scene (see #PxArticulationReducedCoordinate::getScene), the call is ignored and an error is issued. \param[in] articulation The articulation to add to the scene. \return True if success @see PxArticulationReducedCoordinate */ virtual bool addArticulation(PxArticulationReducedCoordinate& articulation) = 0; /** \brief Removes an articulation from this scene. \note If the articulation is not part of this scene (see #PxArticulationReducedCoordinate::getScene), the call is ignored and an error is issued. \note If the articulation is in an aggregate it will be removed from the aggregate. \param[in] articulation The articulation to remove from the scene. \param[in] wakeOnLostTouch Specifies whether touching objects from the previous frame should get woken up in the next frame. Only applies to PxArticulationReducedCoordinate and PxRigidActor types. @see PxArticulationReducedCoordinate, PxAggregate */ virtual void removeArticulation(PxArticulationReducedCoordinate& articulation, bool wakeOnLostTouch = true) = 0; //@} /************************************************************************************************/ /** @name Add/Remove Actors */ //@{ /** \brief Adds an actor to this scene. \note If the actor is already assigned to a scene (see #PxActor::getScene), the call is ignored and an error is issued. \note If the actor has an invalid constraint, in checked builds the call is ignored and an error is issued. \note You can not add individual articulation links (see #PxArticulationLink) to the scene. Use #addArticulation() instead. \note If the actor is a PxRigidActor then each assigned PxConstraint object will get added to the scene automatically if it connects to another actor that is part of the scene already. \note When a BVH is provided the actor shapes are grouped together. The scene query pruning structure inside PhysX SDK will store/update one bound per actor. The scene queries against such an actor will query actor bounds and then make a local space query against the provided BVH, which is in actor's local space. \param[in] actor Actor to add to scene. \param[in] bvh BVH for actor shapes. \return True if success @see PxActor, PxConstraint::isValid(), PxBVH */ virtual bool addActor(PxActor& actor, const PxBVH* bvh = NULL) = 0; /** \brief Adds actors to this scene. Only supports actors of type PxRigidStatic and PxRigidDynamic. \note This method only supports actors of type PxRigidStatic and PxRigidDynamic. For other actors, use addActor() instead. For articulation links, use addArticulation(). \note If one of the actors is already assigned to a scene (see #PxActor::getScene), the call is ignored and an error is issued. \note If an actor in the array contains an invalid constraint, in checked builds the call is ignored and an error is issued. \note If an actor in the array is a PxRigidActor then each assigned PxConstraint object will get added to the scene automatically if it connects to another actor that is part of the scene already. \note this method is optimized for high performance. \param[in] actors Array of actors to add to scene. \param[in] nbActors Number of actors in the array. \return True if success @see PxActor, PxConstraint::isValid() */ virtual bool addActors(PxActor*const* actors, PxU32 nbActors) = 0; /** \brief Adds a pruning structure together with its actors to this scene. Only supports actors of type PxRigidStatic and PxRigidDynamic. \note This method only supports actors of type PxRigidStatic and PxRigidDynamic. For other actors, use addActor() instead. For articulation links, use addArticulation(). \note If an actor in the pruning structure contains an invalid constraint, in checked builds the call is ignored and an error is issued. \note For all actors in the pruning structure each assigned PxConstraint object will get added to the scene automatically if it connects to another actor that is part of the scene already. \note This method is optimized for high performance. \note Merging a PxPruningStructure into an active scene query optimization AABB tree might unbalance the tree. A typical use case for PxPruningStructure is a large world scenario where blocks of closely positioned actors get streamed in. The merge process finds the best node in the active scene query optimization AABB tree and inserts the PxPruningStructure. Therefore using PxPruningStructure for actors scattered throughout the world will result in an unbalanced tree. \param[in] pruningStructure Pruning structure for a set of actors. \return True if success @see PxPhysics::createPruningStructure, PxPruningStructure */ virtual bool addActors(const PxPruningStructure& pruningStructure) = 0; /** \brief Removes an actor from this scene. \note If the actor is not part of this scene (see #PxActor::getScene), the call is ignored and an error is issued. \note You can not remove individual articulation links (see #PxArticulationLink) from the scene. Use #removeArticulation() instead. \note If the actor is a PxRigidActor then all assigned PxConstraint objects will get removed from the scene automatically. \note If the actor is in an aggregate it will be removed from the aggregate. \param[in] actor Actor to remove from scene. \param[in] wakeOnLostTouch Specifies whether touching objects from the previous frame should get woken up in the next frame. Only applies to PxArticulationReducedCoordinate and PxRigidActor types. @see PxActor, PxAggregate */ virtual void removeActor(PxActor& actor, bool wakeOnLostTouch = true) = 0; /** \brief Removes actors from this scene. Only supports actors of type PxRigidStatic and PxRigidDynamic. \note This method only supports actors of type PxRigidStatic and PxRigidDynamic. For other actors, use removeActor() instead. For articulation links, use removeArticulation(). \note If some actor is not part of this scene (see #PxActor::getScene), the actor remove is ignored and an error is issued. \note You can not remove individual articulation links (see #PxArticulationLink) from the scene. Use #removeArticulation() instead. \note If the actor is a PxRigidActor then all assigned PxConstraint objects will get removed from the scene automatically. \param[in] actors Array of actors to add to scene. \param[in] nbActors Number of actors in the array. \param[in] wakeOnLostTouch Specifies whether touching objects from the previous frame should get woken up in the next frame. Only applies to PxArticulationReducedCooridnate and PxRigidActor types. @see PxActor */ virtual void removeActors(PxActor*const* actors, PxU32 nbActors, bool wakeOnLostTouch = true) = 0; /** \brief Adds an aggregate to this scene. \note If the aggregate is already assigned to a scene (see #PxAggregate::getScene), the call is ignored and an error is issued. \note If the aggregate contains an actor with an invalid constraint, in checked builds the call is ignored and an error is issued. \note If the aggregate already contains actors, those actors are added to the scene as well. \param[in] aggregate Aggregate to add to scene. \return True if success @see PxAggregate, PxConstraint::isValid() */ virtual bool addAggregate(PxAggregate& aggregate) = 0; /** \brief Removes an aggregate from this scene. \note If the aggregate is not part of this scene (see #PxAggregate::getScene), the call is ignored and an error is issued. \note If the aggregate contains actors, those actors are removed from the scene as well. \param[in] aggregate Aggregate to remove from scene. \param[in] wakeOnLostTouch Specifies whether touching objects from the previous frame should get woken up in the next frame. Only applies to PxArticulationReducedCoordinate and PxRigidActor types. @see PxAggregate */ virtual void removeAggregate(PxAggregate& aggregate, bool wakeOnLostTouch = true) = 0; /** \brief Adds objects in the collection to this scene. This function adds the following types of objects to this scene: PxRigidActor (except PxArticulationLink), PxAggregate, PxArticulationReducedCoordinate. This method is typically used after deserializing the collection in order to populate the scene with deserialized objects. \note If the collection contains an actor with an invalid constraint, in checked builds the call is ignored and an error is issued. \param[in] collection Objects to add to this scene. See #PxCollection \return True if success @see PxCollection, PxConstraint::isValid() */ virtual bool addCollection(const PxCollection& collection) = 0; //@} /************************************************************************************************/ /** @name Contained Object Retrieval */ //@{ /** \brief Retrieve the number of actors of certain types in the scene. For supported types, see PxActorTypeFlags. \param[in] types Combination of actor types. \return the number of actors. @see getActors() */ virtual PxU32 getNbActors(PxActorTypeFlags types) const = 0; /** \brief Retrieve an array of all the actors of certain types in the scene. For supported types, see PxActorTypeFlags. \param[in] types Combination of actor types to retrieve. \param[out] userBuffer The buffer to receive actor pointers. \param[in] bufferSize Size of provided user buffer. \param[in] startIndex Index of first actor pointer to be retrieved \return Number of actors written to the buffer. @see getNbActors() */ virtual PxU32 getActors(PxActorTypeFlags types, PxActor** userBuffer, PxU32 bufferSize, PxU32 startIndex=0) const = 0; /** \brief Queries the PxScene for a list of the PxActors whose transforms have been updated during the previous simulation step. Only includes actors of type PxRigidDynamic and PxArticulationLink. \note PxSceneFlag::eENABLE_ACTIVE_ACTORS must be set. \note Do not use this method while the simulation is running. Calls to this method while the simulation is running will be ignored and NULL will be returned. \param[out] nbActorsOut The number of actors returned. \return A pointer to the list of active PxActors generated during the last call to fetchResults(). @see PxActor */ virtual PxActor** getActiveActors(PxU32& nbActorsOut) = 0; /** \brief Retrieve the number of soft bodies in the scene. \return the number of soft bodies. @see getActors() */ virtual PxU32 getNbSoftBodies() const = 0; /** \brief Retrieve an array of all the soft bodies in the scene. \param[out] userBuffer The buffer to receive actor pointers. \param[in] bufferSize Size of provided user buffer. \param[in] startIndex Index of first actor pointer to be retrieved \return Number of actors written to the buffer. @see getNbActors() */ virtual PxU32 getSoftBodies(PxSoftBody** userBuffer, PxU32 bufferSize, PxU32 startIndex = 0) const = 0; /** \brief Retrieve the number of particle systems of the requested type in the scene. \param[in] type The particle system type. See PxParticleSolverType. Only one type can be requested per function call. \return the number particle systems. See getParticleSystems(), PxParticleSolverType */ virtual PxU32 getNbParticleSystems(PxParticleSolverType::Enum type) const = 0; /** \brief Retrieve an array of all the particle systems of the requested type in the scene. \param[in] type The particle system type. See PxParticleSolverType. Only one type can be requested per function call. \param[out] userBuffer The buffer to receive particle system pointers. \param[in] bufferSize Size of provided user buffer. \param[in] startIndex Index of first particle system pointer to be retrieved \return Number of particle systems written to the buffer. See getNbParticleSystems(), PxParticleSolverType */ virtual PxU32 getParticleSystems(PxParticleSolverType::Enum type, PxParticleSystem** userBuffer, PxU32 bufferSize, PxU32 startIndex = 0) const = 0; /** \brief Retrieve the number of FEM cloths in the scene. \warning Feature under development, only for internal usage. \return the number of FEM cloths. See getFEMCloths() */ virtual PxU32 getNbFEMCloths() const = 0; /** \brief Retrieve an array of all the FEM cloths in the scene. \warning Feature under development, only for internal usage. \param[out] userBuffer The buffer to write the FEM cloth pointers to \param[in] bufferSize Size of the provided user buffer \param[in] startIndex Index of first FEM cloth pointer to be retrieved \return Number of FEM cloths written to the buffer */ virtual PxU32 getFEMCloths(PxFEMCloth** userBuffer, PxU32 bufferSize, PxU32 startIndex = 0) const = 0; /** \brief Retrieve the number of hair systems in the scene. \warning Feature under development, only for internal usage. \return the number of hair systems @see getActors() */ virtual PxU32 getNbHairSystems() const = 0; /** \brief Retrieve an array of all the hair systems in the scene. \warning Feature under development, only for internal usage. \param[out] userBuffer The buffer to write the actor pointers to \param[in] bufferSize Size of the provided user buffer \param[in] startIndex Index of first actor pointer to be retrieved \return Number of actors written to the buffer */ virtual PxU32 getHairSystems(PxHairSystem** userBuffer, PxU32 bufferSize, PxU32 startIndex = 0) const = 0; /** \brief Returns the number of articulations in the scene. \return the number of articulations in this scene. @see getArticulations() */ virtual PxU32 getNbArticulations() const = 0; /** \brief Retrieve all the articulations in the scene. \param[out] userBuffer The buffer to receive articulations pointers. \param[in] bufferSize Size of provided user buffer. \param[in] startIndex Index of first articulations pointer to be retrieved \return Number of articulations written to the buffer. @see getNbArticulations() */ virtual PxU32 getArticulations(PxArticulationReducedCoordinate** userBuffer, PxU32 bufferSize, PxU32 startIndex=0) const = 0; /** \brief Returns the number of constraint shaders in the scene. \return the number of constraint shaders in this scene. @see getConstraints() */ virtual PxU32 getNbConstraints() const = 0; /** \brief Retrieve all the constraint shaders in the scene. \param[out] userBuffer The buffer to receive constraint shader pointers. \param[in] bufferSize Size of provided user buffer. \param[in] startIndex Index of first constraint pointer to be retrieved \return Number of constraint shaders written to the buffer. @see getNbConstraints() */ virtual PxU32 getConstraints(PxConstraint** userBuffer, PxU32 bufferSize, PxU32 startIndex=0) const = 0; /** \brief Returns the number of aggregates in the scene. \return the number of aggregates in this scene. @see getAggregates() */ virtual PxU32 getNbAggregates() const = 0; /** \brief Retrieve all the aggregates in the scene. \param[out] userBuffer The buffer to receive aggregates pointers. \param[in] bufferSize Size of provided user buffer. \param[in] startIndex Index of first aggregate pointer to be retrieved \return Number of aggregates written to the buffer. @see getNbAggregates() */ virtual PxU32 getAggregates(PxAggregate** userBuffer, PxU32 bufferSize, PxU32 startIndex=0) const = 0; //@} /************************************************************************************************/ /** @name Dominance */ //@{ /** \brief Specifies the dominance behavior of contacts between two actors with two certain dominance groups. It is possible to assign each actor to a dominance groups using #PxActor::setDominanceGroup(). With dominance groups one can have all contacts created between actors act in one direction only. This is useful, for example, if you want an object to push debris out of its way and be unaffected,while still responding physically to forces and collisions with non-debris objects. Whenever a contact between two actors (a0, a1) needs to be solved, the groups (g0, g1) of both actors are retrieved. Then the PxDominanceGroupPair setting for this group pair is retrieved with getDominanceGroupPair(g0, g1). In the contact, PxDominanceGroupPair::dominance0 becomes the dominance setting for a0, and PxDominanceGroupPair::dominance1 becomes the dominance setting for a1. A dominanceN setting of 1.0f, the default, will permit aN to be pushed or pulled by a(1-N) through the contact. A dominanceN setting of 0.0f, will however prevent aN to be pushed by a(1-N) via the contact. Thus, a PxDominanceGroupPair of (1.0f, 0.0f) makes the interaction one-way. The matrix sampled by getDominanceGroupPair(g1, g2) is initialised by default such that: if g1 == g2, then (1.0f, 1.0f) is returned if g1 < g2, then (0.0f, 1.0f) is returned if g1 > g2, then (1.0f, 0.0f) is returned In other words, we permit actors in higher groups to be pushed around by actors in lower groups by default. These settings should cover most applications, and in fact not overriding these settings may likely result in higher performance. It is not possible to make the matrix asymetric, or to change the diagonal. In other words: * it is not possible to change (g1, g2) if (g1==g2) * if you set (g1, g2) to X, then (g2, g1) will implicitly and automatically be set to ~X, where: ~(1.0f, 1.0f) is (1.0f, 1.0f) ~(0.0f, 1.0f) is (1.0f, 0.0f) ~(1.0f, 0.0f) is (0.0f, 1.0f) These two restrictions are to make sure that contacts between two actors will always evaluate to the same dominance setting, regardless of the order of the actors. Dominance settings are currently specified as floats 0.0f or 1.0f because in the future we may permit arbitrary fractional settings to express 'partly-one-way' interactions. <b>Sleeping:</b> Does <b>NOT</b> wake actors up automatically. @see getDominanceGroupPair() PxDominanceGroup PxDominanceGroupPair PxActor::setDominanceGroup() PxActor::getDominanceGroup() */ virtual void setDominanceGroupPair( PxDominanceGroup group1, PxDominanceGroup group2, const PxDominanceGroupPair& dominance) = 0; /** \brief Samples the dominance matrix. @see setDominanceGroupPair() PxDominanceGroup PxDominanceGroupPair PxActor::setDominanceGroup() PxActor::getDominanceGroup() */ virtual PxDominanceGroupPair getDominanceGroupPair(PxDominanceGroup group1, PxDominanceGroup group2) const = 0; //@} /************************************************************************************************/ /** @name Dispatcher */ //@{ /** \brief Return the cpu dispatcher that was set in PxSceneDesc::cpuDispatcher when creating the scene with PxPhysics::createScene @see PxSceneDesc::cpuDispatcher, PxPhysics::createScene */ virtual PxCpuDispatcher* getCpuDispatcher() const = 0; /** \brief Return the CUDA context manager that was set in PxSceneDesc::cudaContextManager when creating the scene with PxPhysics::createScene <b>Platform specific:</b> Applies to PC GPU only. @see PxSceneDesc::cudaContextManager, PxPhysics::createScene */ virtual PxCudaContextManager* getCudaContextManager() const = 0; //@} /************************************************************************************************/ /** @name Multiclient */ //@{ /** \brief Reserves a new client ID. PX_DEFAULT_CLIENT is always available as the default clientID. Additional clients are returned by this function. Clients cannot be released once created. An error is reported when more than a supported number of clients (currently 128) are created. @see PxClientID */ virtual PxClientID createClient() = 0; //@} /************************************************************************************************/ /** @name Callbacks */ //@{ /** \brief Sets a user notify object which receives special simulation events when they occur. \note Do not set the callback while the simulation is running. Calls to this method while the simulation is running will be ignored. \param[in] callback User notification callback. See #PxSimulationEventCallback. @see PxSimulationEventCallback getSimulationEventCallback */ virtual void setSimulationEventCallback(PxSimulationEventCallback* callback) = 0; /** \brief Retrieves the simulationEventCallback pointer set with setSimulationEventCallback(). \return The current user notify pointer. See #PxSimulationEventCallback. @see PxSimulationEventCallback setSimulationEventCallback() */ virtual PxSimulationEventCallback* getSimulationEventCallback() const = 0; /** \brief Sets a user callback object, which receives callbacks on all contacts generated for specified actors. \note Do not set the callback while the simulation is running. Calls to this method while the simulation is running will be ignored. \param[in] callback Asynchronous user contact modification callback. See #PxContactModifyCallback. */ virtual void setContactModifyCallback(PxContactModifyCallback* callback) = 0; /** \brief Sets a user callback object, which receives callbacks on all CCD contacts generated for specified actors. \note Do not set the callback while the simulation is running. Calls to this method while the simulation is running will be ignored. \param[in] callback Asynchronous user contact modification callback. See #PxCCDContactModifyCallback. */ virtual void setCCDContactModifyCallback(PxCCDContactModifyCallback* callback) = 0; /** \brief Retrieves the PxContactModifyCallback pointer set with setContactModifyCallback(). \return The current user contact modify callback pointer. See #PxContactModifyCallback. @see PxContactModifyCallback setContactModifyCallback() */ virtual PxContactModifyCallback* getContactModifyCallback() const = 0; /** \brief Retrieves the PxCCDContactModifyCallback pointer set with setContactModifyCallback(). \return The current user contact modify callback pointer. See #PxContactModifyCallback. @see PxContactModifyCallback setContactModifyCallback() */ virtual PxCCDContactModifyCallback* getCCDContactModifyCallback() const = 0; /** \brief Sets a broad-phase user callback object. \note Do not set the callback while the simulation is running. Calls to this method while the simulation is running will be ignored. \param[in] callback Asynchronous broad-phase callback. See #PxBroadPhaseCallback. */ virtual void setBroadPhaseCallback(PxBroadPhaseCallback* callback) = 0; /** \brief Retrieves the PxBroadPhaseCallback pointer set with setBroadPhaseCallback(). \return The current broad-phase callback pointer. See #PxBroadPhaseCallback. @see PxBroadPhaseCallback setBroadPhaseCallback() */ virtual PxBroadPhaseCallback* getBroadPhaseCallback() const = 0; //@} /************************************************************************************************/ /** @name Collision Filtering */ //@{ /** \brief Sets the shared global filter data which will get passed into the filter shader. \note It is the user's responsibility to ensure that changing the shared global filter data does not change the filter output value for existing pairs. If the filter output for existing pairs does change nonetheless then such a change will not take effect until the pair gets refiltered. resetFiltering() can be used to explicitly refilter the pairs of specific objects. \note The provided data will get copied to internal buffers and this copy will be used for filtering calls. \note Do not use this method while the simulation is running. Calls to this method while the simulation is running will be ignored. \param[in] data The shared global filter shader data. \param[in] dataSize Size of the shared global filter shader data (in bytes). @see getFilterShaderData() PxSceneDesc.filterShaderData PxSimulationFilterShader */ virtual void setFilterShaderData(const void* data, PxU32 dataSize) = 0; /** \brief Gets the shared global filter data in use for this scene. \note The reference points to a copy of the original filter data specified in #PxSceneDesc.filterShaderData or provided by #setFilterShaderData(). \return Shared filter data for filter shader. @see getFilterShaderDataSize() setFilterShaderData() PxSceneDesc.filterShaderData PxSimulationFilterShader */ virtual const void* getFilterShaderData() const = 0; /** \brief Gets the size of the shared global filter data (#PxSceneDesc.filterShaderData) \return Size of shared filter data [bytes]. @see getFilterShaderData() PxSceneDesc.filterShaderDataSize PxSimulationFilterShader */ virtual PxU32 getFilterShaderDataSize() const = 0; /** \brief Gets the custom collision filter shader in use for this scene. \return Filter shader class that defines the collision pair filtering. @see PxSceneDesc.filterShader PxSimulationFilterShader */ virtual PxSimulationFilterShader getFilterShader() const = 0; /** \brief Gets the custom collision filter callback in use for this scene. \return Filter callback class that defines the collision pair filtering. @see PxSceneDesc.filterCallback PxSimulationFilterCallback */ virtual PxSimulationFilterCallback* getFilterCallback() const = 0; /** \brief Marks the object to reset interactions and re-run collision filters in the next simulation step. This call forces the object to remove all existing collision interactions, to search anew for existing contact pairs and to run the collision filters again for found collision pairs. \note The operation is supported for PxRigidActor objects only. \note All persistent state of existing interactions will be lost and can not be retrieved even if the same collison pair is found again in the next step. This will mean, for example, that you will not get notified about persistent contact for such an interaction (see #PxPairFlag::eNOTIFY_TOUCH_PERSISTS), the contact pair will be interpreted as newly found instead. \note Lost touch contact reports will be sent for every collision pair which includes this shape, if they have been requested through #PxPairFlag::eNOTIFY_TOUCH_LOST or #PxPairFlag::eNOTIFY_THRESHOLD_FORCE_LOST. \note This is an expensive operation, don't use it if you don't have to. \note Can be used to retrieve collision pairs that were killed by the collision filters (see #PxFilterFlag::eKILL) \note It is invalid to use this method if the actor has not been added to a scene already. \note It is invalid to use this method if PxActorFlag::eDISABLE_SIMULATION is set. \note Do not use this method while the simulation is running. <b>Sleeping:</b> Does wake up the actor. \param[in] actor The actor for which to re-evaluate interactions. \return True if success @see PxSimulationFilterShader PxSimulationFilterCallback */ virtual bool resetFiltering(PxActor& actor) = 0; /** \brief Marks the object to reset interactions and re-run collision filters for specified shapes in the next simulation step. This is a specialization of the resetFiltering(PxActor& actor) method and allows to reset interactions for specific shapes of a PxRigidActor. \note Do not use this method while the simulation is running. <b>Sleeping:</b> Does wake up the actor. \param[in] actor The actor for which to re-evaluate interactions. \param[in] shapes The shapes for which to re-evaluate interactions. \param[in] shapeCount Number of shapes in the list. @see PxSimulationFilterShader PxSimulationFilterCallback */ virtual bool resetFiltering(PxRigidActor& actor, PxShape*const* shapes, PxU32 shapeCount) = 0; /** \brief Gets the pair filtering mode for kinematic-kinematic pairs. \return Filtering mode for kinematic-kinematic pairs. @see PxPairFilteringMode PxSceneDesc */ virtual PxPairFilteringMode::Enum getKinematicKinematicFilteringMode() const = 0; /** \brief Gets the pair filtering mode for static-kinematic pairs. \return Filtering mode for static-kinematic pairs. @see PxPairFilteringMode PxSceneDesc */ virtual PxPairFilteringMode::Enum getStaticKinematicFilteringMode() const = 0; //@} /************************************************************************************************/ /** @name Simulation */ //@{ /** \brief Advances the simulation by an elapsedTime time. \note Large elapsedTime values can lead to instabilities. In such cases elapsedTime should be subdivided into smaller time intervals and simulate() should be called multiple times for each interval. Calls to simulate() should pair with calls to fetchResults(): Each fetchResults() invocation corresponds to exactly one simulate() invocation; calling simulate() twice without an intervening fetchResults() or fetchResults() twice without an intervening simulate() causes an error condition. scene->simulate(); ...do some processing until physics is computed... scene->fetchResults(); ...now results of run may be retrieved. \param[in] elapsedTime Amount of time to advance simulation by. The parameter has to be larger than 0, else the resulting behavior will be undefined. <b>Range:</b> (0, PX_MAX_F32) \param[in] completionTask if non-NULL, this task will have its refcount incremented in simulate(), then decremented when the scene is ready to have fetchResults called. So the task will not run until the application also calls removeReference(). \param[in] scratchMemBlock a memory region for physx to use for temporary data during simulation. This block may be reused by the application after fetchResults returns. Must be aligned on a 16-byte boundary \param[in] scratchMemBlockSize the size of the scratch memory block. Must be a multiple of 16K. \param[in] controlSimulation if true, the scene controls its PxTaskManager simulation state. Leave true unless the application is calling the PxTaskManager start/stopSimulation() methods itself. \return True if success @see fetchResults() checkResults() */ virtual bool simulate(PxReal elapsedTime, physx::PxBaseTask* completionTask = NULL, void* scratchMemBlock = 0, PxU32 scratchMemBlockSize = 0, bool controlSimulation = true) = 0; /** \brief Performs dynamics phase of the simulation pipeline. \note Calls to advance() should follow calls to fetchCollision(). An error message will be issued if this sequence is not followed. \param[in] completionTask if non-NULL, this task will have its refcount incremented in advance(), then decremented when the scene is ready to have fetchResults called. So the task will not run until the application also calls removeReference(). \return True if success */ virtual bool advance(physx::PxBaseTask* completionTask = 0) = 0; /** \brief Performs collision detection for the scene over elapsedTime \note Calls to collide() should be the first method called to simulate a frame. \param[in] elapsedTime Amount of time to advance simulation by. The parameter has to be larger than 0, else the resulting behavior will be undefined. <b>Range:</b> (0, PX_MAX_F32) \param[in] completionTask if non-NULL, this task will have its refcount incremented in collide(), then decremented when the scene is ready to have fetchResults called. So the task will not run until the application also calls removeReference(). \param[in] scratchMemBlock a memory region for physx to use for temporary data during simulation. This block may be reused by the application after fetchResults returns. Must be aligned on a 16-byte boundary \param[in] scratchMemBlockSize the size of the scratch memory block. Must be a multiple of 16K. \param[in] controlSimulation if true, the scene controls its PxTaskManager simulation state. Leave true unless the application is calling the PxTaskManager start/stopSimulation() methods itself. \return True if success */ virtual bool collide(PxReal elapsedTime, physx::PxBaseTask* completionTask = 0, void* scratchMemBlock = 0, PxU32 scratchMemBlockSize = 0, bool controlSimulation = true) = 0; /** \brief This checks to see if the simulation run has completed. This does not cause the data available for reading to be updated with the results of the simulation, it is simply a status check. The bool will allow it to either return immediately or block waiting for the condition to be met so that it can return true \param[in] block When set to true will block until the condition is met. \return True if the results are available. @see simulate() fetchResults() */ virtual bool checkResults(bool block = false) = 0; /** This method must be called after collide() and before advance(). It will wait for the collision phase to finish. If the user makes an illegal simulation call, the SDK will issue an error message. \param[in] block When set to true will block until the condition is met, which is collision must finish running. */ virtual bool fetchCollision(bool block = false) = 0; /** This is the big brother to checkResults() it basically does the following: \code if ( checkResults(block) ) { fire appropriate callbacks swap buffers return true } else return false \endcode \param[in] block When set to true will block until results are available. \param[out] errorState Used to retrieve hardware error codes. A non zero value indicates an error. \return True if the results have been fetched. @see simulate() checkResults() */ virtual bool fetchResults(bool block = false, PxU32* errorState = 0) = 0; /** This call performs the first section of fetchResults, and returns a pointer to the contact streams output by the simulation. It can be used to process contact pairs in parallel, which is often a limiting factor for fetchResults() performance. After calling this function and processing the contact streams, call fetchResultsFinish(). Note that writes to the simulation are not permitted between the start of fetchResultsStart() and the end of fetchResultsFinish(). \param[in] block When set to true will block until results are available. \param[out] contactPairs an array of pointers to contact pair headers \param[out] nbContactPairs the number of contact pairs \return True if the results have been fetched. @see simulate() checkResults() fetchResults() fetchResultsFinish() */ virtual bool fetchResultsStart(const PxContactPairHeader*& contactPairs, PxU32& nbContactPairs, bool block = false) = 0; /** This call processes all event callbacks in parallel. It takes a continuation task, which will be executed once all callbacks have been processed. This is a utility function to make it easier to process callbacks in parallel using the PhysX task system. It can only be used in conjunction with fetchResultsStart(...) and fetchResultsFinish(...) \param[in] continuation The task that will be executed once all callbacks have been processed. */ virtual void processCallbacks(physx::PxBaseTask* continuation) = 0; /** This call performs the second section of fetchResults. It must be called after fetchResultsStart() returns and contact reports have been processed. Note that once fetchResultsFinish() has been called, the contact streams returned in fetchResultsStart() will be invalid. \param[out] errorState Used to retrieve hardware error codes. A non zero value indicates an error. @see simulate() checkResults() fetchResults() fetchResultsStart() */ virtual void fetchResultsFinish(PxU32* errorState = 0) = 0; /** This call performs the synchronization of particle system data copies. */ virtual void fetchResultsParticleSystem() = 0; /** \brief Clear internal buffers and free memory. This method can be used to clear buffers and free internal memory without having to destroy the scene. Can be useful if the physics data gets streamed in and a checkpoint with a clean state should be created. \note It is not allowed to call this method while the simulation is running. The call will fail. \param[in] sendPendingReports When set to true pending reports will be sent out before the buffers get cleaned up (for instance lost touch contact/trigger reports due to deleted objects). */ virtual void flushSimulation(bool sendPendingReports = false) = 0; /** \brief Sets a constant gravity for the entire scene. \note Do not use this method while the simulation is running. <b>Sleeping:</b> Does <b>NOT</b> wake the actor up automatically. \param[in] vec A new gravity vector(e.g. PxVec3(0.0f,-9.8f,0.0f) ) <b>Range:</b> force vector @see PxSceneDesc.gravity getGravity() */ virtual void setGravity(const PxVec3& vec) = 0; /** \brief Retrieves the current gravity setting. \return The current gravity for the scene. @see setGravity() PxSceneDesc.gravity */ virtual PxVec3 getGravity() const = 0; /** \brief Set the bounce threshold velocity. Collision speeds below this threshold will not cause a bounce. \note Do not use this method while the simulation is running. @see PxSceneDesc::bounceThresholdVelocity, getBounceThresholdVelocity */ virtual void setBounceThresholdVelocity(const PxReal t) = 0; /** \brief Return the bounce threshold velocity. @see PxSceneDesc.bounceThresholdVelocity, setBounceThresholdVelocity */ virtual PxReal getBounceThresholdVelocity() const = 0; /** \brief Sets the maximum number of CCD passes \note Do not use this method while the simulation is running. \param[in] ccdMaxPasses Maximum number of CCD passes @see PxSceneDesc.ccdMaxPasses getCCDMaxPasses() */ virtual void setCCDMaxPasses(PxU32 ccdMaxPasses) = 0; /** \brief Gets the maximum number of CCD passes. \return The maximum number of CCD passes. @see PxSceneDesc::ccdMaxPasses setCCDMaxPasses() */ virtual PxU32 getCCDMaxPasses() const = 0; /** \brief Set the maximum CCD separation. \note Do not use this method while the simulation is running. @see PxSceneDesc::ccdMaxSeparation, getCCDMaxSeparation */ virtual void setCCDMaxSeparation(const PxReal t) = 0; /** \brief Gets the maximum CCD separation. \return The maximum CCD separation. @see PxSceneDesc::ccdMaxSeparation setCCDMaxSeparation() */ virtual PxReal getCCDMaxSeparation() const = 0; /** \brief Set the CCD threshold. \note Do not use this method while the simulation is running. @see PxSceneDesc::ccdThreshold, getCCDThreshold */ virtual void setCCDThreshold(const PxReal t) = 0; /** \brief Gets the CCD threshold. \return The CCD threshold. @see PxSceneDesc::ccdThreshold setCCDThreshold() */ virtual PxReal getCCDThreshold() const = 0; /** \brief Set the max bias coefficient. \note Do not use this method while the simulation is running. @see PxSceneDesc::maxBiasCoefficient, getMaxBiasCoefficient */ virtual void setMaxBiasCoefficient(const PxReal t) = 0; /** \brief Gets the max bias coefficient. \return The max bias coefficient. @see PxSceneDesc::maxBiasCoefficient setMaxBiasCoefficient() */ virtual PxReal getMaxBiasCoefficient() const = 0; /** \brief Set the friction offset threshold. \note Do not use this method while the simulation is running. @see PxSceneDesc::frictionOffsetThreshold, getFrictionOffsetThreshold */ virtual void setFrictionOffsetThreshold(const PxReal t) = 0; /** \brief Gets the friction offset threshold. @see PxSceneDesc::frictionOffsetThreshold, setFrictionOffsetThreshold */ virtual PxReal getFrictionOffsetThreshold() const = 0; /** \brief Set the friction correlation distance. \note Do not use this method while the simulation is running. @see PxSceneDesc::frictionCorrelationDistance, getFrictionCorrelationDistance */ virtual void setFrictionCorrelationDistance(const PxReal t) = 0; /** \brief Gets the friction correlation distance. @see PxSceneDesc::frictionCorrelationDistance, setFrictionCorrelationDistance */ virtual PxReal getFrictionCorrelationDistance() const = 0; /** \brief Return the friction model. @see PxFrictionType, PxSceneDesc::frictionType */ virtual PxFrictionType::Enum getFrictionType() const = 0; /** \brief Return the solver model. @see PxSolverType, PxSceneDesc::solverType */ virtual PxSolverType::Enum getSolverType() const = 0; //@} /************************************************************************************************/ /** @name Visualization and Statistics */ //@{ /** \brief Function that lets you set debug visualization parameters. Returns false if the value passed is out of range for usage specified by the enum. \note Do not use this method while the simulation is running. \param[in] param Parameter to set. See #PxVisualizationParameter \param[in] value The value to set, see #PxVisualizationParameter for allowable values. Setting to zero disables visualization for the specified property, setting to a positive value usually enables visualization and defines the scale factor. \return False if the parameter is out of range. @see getVisualizationParameter PxVisualizationParameter getRenderBuffer() */ virtual bool setVisualizationParameter(PxVisualizationParameter::Enum param, PxReal value) = 0; /** \brief Function that lets you query debug visualization parameters. \param[in] paramEnum The Parameter to retrieve. \return The value of the parameter. @see setVisualizationParameter PxVisualizationParameter */ virtual PxReal getVisualizationParameter(PxVisualizationParameter::Enum paramEnum) const = 0; /** \brief Defines a box in world space to which visualization geometry will be (conservatively) culled. Use a non-empty culling box to enable the feature, and an empty culling box to disable it. \note Do not use this method while the simulation is running. \param[in] box the box to which the geometry will be culled. Empty box to disable the feature. @see setVisualizationParameter getVisualizationCullingBox getRenderBuffer() */ virtual void setVisualizationCullingBox(const PxBounds3& box) = 0; /** \brief Retrieves the visualization culling box. \return the box to which the geometry will be culled. @see setVisualizationParameter setVisualizationCullingBox */ virtual PxBounds3 getVisualizationCullingBox() const = 0; /** \brief Retrieves the render buffer. This will contain the results of any active visualization for this scene. \note Do not use this method while the simulation is running. Calls to this method while the simulation is running will result in undefined behaviour. \return The render buffer. @see PxRenderBuffer */ virtual const PxRenderBuffer& getRenderBuffer() = 0; /** \brief Call this method to retrieve statistics for the current simulation step. \note Do not use this method while the simulation is running. Calls to this method while the simulation is running will be ignored. \param[out] stats Used to retrieve statistics for the current simulation step. @see PxSimulationStatistics */ virtual void getSimulationStatistics(PxSimulationStatistics& stats) const = 0; //@} /************************************************************************************************/ /** @name Broad-phase */ //@{ /** \brief Returns broad-phase type. \return Broad-phase type */ virtual PxBroadPhaseType::Enum getBroadPhaseType() const = 0; /** \brief Gets broad-phase caps. \param[out] caps Broad-phase caps \return True if success */ virtual bool getBroadPhaseCaps(PxBroadPhaseCaps& caps) const = 0; /** \brief Returns number of regions currently registered in the broad-phase. \return Number of regions */ virtual PxU32 getNbBroadPhaseRegions() const = 0; /** \brief Gets broad-phase regions. \param[out] userBuffer Returned broad-phase regions \param[in] bufferSize Size of userBuffer \param[in] startIndex Index of first desired region, in [0 ; getNbRegions()[ \return Number of written out regions */ virtual PxU32 getBroadPhaseRegions(PxBroadPhaseRegionInfo* userBuffer, PxU32 bufferSize, PxU32 startIndex=0) const = 0; /** \brief Adds a new broad-phase region. The bounds for the new region must be non-empty, otherwise an error occurs and the call is ignored. Note that by default, objects already existing in the SDK that might touch this region will not be automatically added to the region. In other words the newly created region will be empty, and will only be populated with new objects when they are added to the simulation, or with already existing objects when they are updated. It is nonetheless possible to override this default behavior and let the SDK populate the new region automatically with already existing objects overlapping the incoming region. This has a cost though, and it should only be used when the game can not guarantee that all objects within the new region will be added to the simulation after the region itself. Objects automatically move from one region to another during their lifetime. The system keeps tracks of what regions a given object is in. It is legal for an object to be in an arbitrary number of regions. However if an object leaves all regions, or is created outside of all regions, several things happen: - collisions get disabled for this object - if a PxBroadPhaseCallback object is provided, an "out-of-bounds" event is generated via that callback - if a PxBroadPhaseCallback object is not provided, a warning/error message is sent to the error stream If an object goes out-of-bounds and user deletes it during the same frame, neither the out-of-bounds event nor the error message is generated. \param[in] region User-provided region data \param[in] populateRegion Automatically populate new region with already existing objects overlapping it \return Handle for newly created region, or 0xffffffff in case of failure. \see PxBroadPhaseRegion PxBroadPhaseCallback */ virtual PxU32 addBroadPhaseRegion(const PxBroadPhaseRegion& region, bool populateRegion=false) = 0; /** \brief Removes a new broad-phase region. If the region still contains objects, and if those objects do not overlap any region any more, they are not automatically removed from the simulation. Instead, the PxBroadPhaseCallback::onObjectOutOfBounds notification is used for each object. Users are responsible for removing the objects from the simulation if this is the desired behavior. If the handle is invalid, or if a valid handle is removed twice, an error message is sent to the error stream. \param[in] handle Region's handle, as returned by PxScene::addBroadPhaseRegion. \return True if success */ virtual bool removeBroadPhaseRegion(PxU32 handle) = 0; //@} /************************************************************************************************/ /** @name Threads and Memory */ //@{ /** \brief Get the task manager associated with this scene \return the task manager associated with the scene */ virtual PxTaskManager* getTaskManager() const = 0; /** \brief Lock the scene for reading from the calling thread. When the PxSceneFlag::eREQUIRE_RW_LOCK flag is enabled lockRead() must be called before any read calls are made on the scene. Multiple threads may read at the same time, no threads may read while a thread is writing. If a call to lockRead() is made while another thread is holding a write lock then the calling thread will be blocked until the writing thread calls unlockWrite(). \note Lock upgrading is *not* supported, that means it is an error to call lockRead() followed by lockWrite(). \note Recursive locking is supported but each lockRead() call must be paired with an unlockRead(). \param file String representing the calling file, for debug purposes \param line The source file line number, for debug purposes */ virtual void lockRead(const char* file=NULL, PxU32 line=0) = 0; /** \brief Unlock the scene from reading. \note Each unlockRead() must be paired with a lockRead() from the same thread. */ virtual void unlockRead() = 0; /** \brief Lock the scene for writing from this thread. When the PxSceneFlag::eREQUIRE_RW_LOCK flag is enabled lockWrite() must be called before any write calls are made on the scene. Only one thread may write at a time and no threads may read while a thread is writing. If a call to lockWrite() is made and there are other threads reading then the calling thread will be blocked until the readers complete. Writers have priority. If a thread is blocked waiting to write then subsequent calls to lockRead() from other threads will be blocked until the writer completes. \note If multiple threads are waiting to write then the thread that is first granted access depends on OS scheduling. \note Recursive locking is supported but each lockWrite() call must be paired with an unlockWrite(). \note If a thread has already locked the scene for writing then it may call lockRead(). \param file String representing the calling file, for debug purposes \param line The source file line number, for debug purposes */ virtual void lockWrite(const char* file=NULL, PxU32 line=0) = 0; /** \brief Unlock the scene from writing. \note Each unlockWrite() must be paired with a lockWrite() from the same thread. */ virtual void unlockWrite() = 0; /** \brief set the cache blocks that can be used during simulate(). Each frame the simulation requires memory to store contact, friction, and contact cache data. This memory is used in blocks of 16K. Each frame the blocks used by the previous frame are freed, and may be retrieved by the application using PxScene::flushSimulation() This call will force allocation of cache blocks if the numBlocks parameter is greater than the currently allocated number of blocks, and less than the max16KContactDataBlocks parameter specified at scene creation time. \note Do not use this method while the simulation is running. \param[in] numBlocks The number of blocks to allocate. @see PxSceneDesc.nbContactDataBlocks PxSceneDesc.maxNbContactDataBlocks flushSimulation() getNbContactDataBlocksUsed getMaxNbContactDataBlocksUsed */ virtual void setNbContactDataBlocks(PxU32 numBlocks) = 0; /** \brief get the number of cache blocks currently used by the scene This function may not be called while the scene is simulating \return the number of cache blocks currently used by the scene @see PxSceneDesc.nbContactDataBlocks PxSceneDesc.maxNbContactDataBlocks flushSimulation() setNbContactDataBlocks() getMaxNbContactDataBlocksUsed() */ virtual PxU32 getNbContactDataBlocksUsed() const = 0; /** \brief get the maximum number of cache blocks used by the scene This function may not be called while the scene is simulating \return the maximum number of cache blocks everused by the scene @see PxSceneDesc.nbContactDataBlocks PxSceneDesc.maxNbContactDataBlocks flushSimulation() setNbContactDataBlocks() getNbContactDataBlocksUsed() */ virtual PxU32 getMaxNbContactDataBlocksUsed() const = 0; /** \brief Return the value of PxSceneDesc::contactReportStreamBufferSize that was set when creating the scene with PxPhysics::createScene @see PxSceneDesc::contactReportStreamBufferSize, PxPhysics::createScene */ virtual PxU32 getContactReportStreamBufferSize() const = 0; /** \brief Sets the number of actors required to spawn a separate rigid body solver thread. \note Do not use this method while the simulation is running. \param[in] solverBatchSize Number of actors required to spawn a separate rigid body solver thread. @see PxSceneDesc.solverBatchSize getSolverBatchSize() */ virtual void setSolverBatchSize(PxU32 solverBatchSize) = 0; /** \brief Retrieves the number of actors required to spawn a separate rigid body solver thread. \return Current number of actors required to spawn a separate rigid body solver thread. @see PxSceneDesc.solverBatchSize setSolverBatchSize() */ virtual PxU32 getSolverBatchSize() const = 0; /** \brief Sets the number of articulations required to spawn a separate rigid body solver thread. \note Do not use this method while the simulation is running. \param[in] solverBatchSize Number of articulations required to spawn a separate rigid body solver thread. @see PxSceneDesc.solverBatchSize getSolverArticulationBatchSize() */ virtual void setSolverArticulationBatchSize(PxU32 solverBatchSize) = 0; /** \brief Retrieves the number of articulations required to spawn a separate rigid body solver thread. \return Current number of articulations required to spawn a separate rigid body solver thread. @see PxSceneDesc.solverBatchSize setSolverArticulationBatchSize() */ virtual PxU32 getSolverArticulationBatchSize() const = 0; //@} /** \brief Returns the wake counter reset value. \return Wake counter reset value @see PxSceneDesc.wakeCounterResetValue */ virtual PxReal getWakeCounterResetValue() const = 0; /** \brief Shift the scene origin by the specified vector. The poses of all objects in the scene and the corresponding data structures will get adjusted to reflect the new origin location (the shift vector will get subtracted from all object positions). \note It is the user's responsibility to keep track of the summed total origin shift and adjust all input/output to/from PhysX accordingly. \note Do not use this method while the simulation is running. Calls to this method while the simulation is running will be ignored. \note Make sure to propagate the origin shift to other dependent modules (for example, the character controller module etc.). \note This is an expensive operation and we recommend to use it only in the case where distance related precision issues may arise in areas far from the origin. \param[in] shift Translation vector to shift the origin by. */ virtual void shiftOrigin(const PxVec3& shift) = 0; /** \brief Returns the Pvd client associated with the scene. \return the client, NULL if no PVD supported. */ virtual PxPvdSceneClient* getScenePvdClient() = 0; /** \brief Copy GPU articulation data from the internal GPU buffer to a user-provided device buffer. \param[in] data User-provided gpu data buffer which should be sized appropriately for the particular data that is requested. Further details provided in the user guide. \param[in] index User-provided gpu index buffer. This buffer stores the articulation indices which the user wants to copy. \param[in] dataType Enum specifying the type of data the user wants to read back from the articulations. \param[in] nbCopyArticulations Number of articulations that data should be copied from. \param[in] copyEvent User-provided event for the articulation stream to signal when the data copy to the user buffer has completed. Defaults to NULL, which means that the function will wait for the copy to finish before returning. */ virtual void copyArticulationData(void* data, void* index, PxArticulationGpuDataType::Enum dataType, const PxU32 nbCopyArticulations, CUevent copyEvent = NULL) = 0; /** \brief Apply GPU articulation data from a user-provided device buffer to the internal GPU buffer. \param[in] data User-provided gpu data buffer which should be sized appropriately for the particular data that is requested. Further details provided in the user guide. \param[in] index User-provided gpu index buffer. This buffer stores the articulation indices which the user wants to write to. \param[in] dataType Enum specifying the type of data the user wants to write to the articulations. \param[in] nbUpdatedArticulations Number of articulations that data should be written to. \param[in] waitEvent User-provided event for the articulation stream to wait for data. Defaults to NULL, which means the function will execute immediately. \param[in] signalEvent User-provided event for the articulation stream to signal when the data read from the user buffer has completed. Defaults to NULL which means the function will wait for the copy to finish before returning. */ virtual void applyArticulationData(void* data, void* index, PxArticulationGpuDataType::Enum dataType, const PxU32 nbUpdatedArticulations, CUevent waitEvent = NULL, CUevent signalEvent = NULL) = 0; /** \brief Update link state for all articulations in the scene that have been updated using PxScene::applyArticulationData(). This function can be called by the user to propagate changes made to root transform/root velocity/joint position/joint velocities to be reflected in the link transform/velocity. Calling this function will perform the kinematic update for all the articulations in the scene that have outstanding changes to at least one of the properties mentioned above. Calling this function will clear output calculated by the simulation, specifically link accelerations, link incoming joint forces, and joint accelerations, for the articulations affected by the call. \note Calling this function is not mandatory, as it will be called internally at the start of the simulation step for any outstanding changes. \note This function has to be called if the user wants to obtain correct link transforms and velocities using PxScene::copyArticulationData() after setting joint positions, joint velocities, root link transform or root link velocity using PxScene::applyArticulationData(). \note This function only has an effect if the PxSceneFlag::eENABLE_DIRECT_GPU_API is raised and the user has manipulated articulation state using PxScene::applyArticulationData(). \param[in] signalEvent User-provided event for the articulation stream to signal when the kinematic update has been completed. Defaults to NULL which means the function will wait for the operation to finish before returning. */ virtual void updateArticulationsKinematic(CUevent signalEvent = NULL) = 0; /** \brief Copy GPU softbody data from the internal GPU buffer to a user-provided device buffer. \param[in] data User-provided gpu buffer containing a pointer to another gpu buffer for every softbody to process \param[in] dataSizes The size of every buffer in bytes \param[in] softBodyIndices User provided gpu index buffer. This buffer stores the softbody index which the user want to copy. \param[in] maxSize The largest size stored in dataSizes. Used internally to decide how many threads to launch for the copy process. \param[in] flag Flag defining which data the user wants to read back from the softbody system \param[in] nbCopySoftBodies The number of softbodies to be copied. \param[in] copyEvent User-provided event for the user to sync data. Defaults to NULL which means the function will wait for the copy to finish before returning. */ virtual void copySoftBodyData(void** data, void* dataSizes, void* softBodyIndices, PxSoftBodyGpuDataFlag::Enum flag, const PxU32 nbCopySoftBodies, const PxU32 maxSize, CUevent copyEvent = NULL) = 0; /** \brief Apply user-provided data to the internal softbody system. \param[in] data User-provided gpu buffer containing a pointer to another gpu buffer for every softbody to process \param[in] dataSizes The size of every buffer in bytes \param[in] softBodyIndices User provided gpu index buffer. This buffer stores the updated softbody index. \param[in] flag Flag defining which data the user wants to write to the softbody system \param[in] maxSize The largest size stored in dataSizes. Used internally to decide how many threads to launch for the copy process. \param[in] nbUpdatedSoftBodies The number of updated softbodies \param[in] applyEvent User-provided event for the softbody stream to wait for data. \param[in] signalEvent User-provided event for the softbody stream to signal when the read from the user buffer has completed. Defaults to NULL which means the function will wait for the copy to finish before returning. */ virtual void applySoftBodyData(void** data, void* dataSizes, void* softBodyIndices, PxSoftBodyGpuDataFlag::Enum flag, const PxU32 nbUpdatedSoftBodies, const PxU32 maxSize, CUevent applyEvent = NULL, CUevent signalEvent = NULL) = 0; /** \brief Copy rigid body contact data from the internal GPU buffer to a user-provided device buffer. \note This function only reports contact data for actor pairs where both actors are either rigid bodies or articulations. \note The contact data contains pointers to internal state and is only valid until the next call to simulate(). \param[in] data User-provided gpu data buffer, which should be the size of PxGpuContactPair * numContactPairs \param[in] maxContactPairs The maximum number of pairs that the buffer can contain \param[in] numContactPairs The actual number of contact pairs that were written \param[in] copyEvent User-provided event for the user to sync data. Defaults to NULL which means the function will wait for the copy to finish before returning. */ virtual void copyContactData(void* data, const PxU32 maxContactPairs, void* numContactPairs, CUevent copyEvent = NULL) = 0; /** \brief Direct-GPU interface that copies the simulation state for a set of rigid bodies into a user-provided device buffer. \param[in] data User-provided gpu data buffer which has size (maxSrcIndex + 1) * sizeof(PxGpuBodyData), where maxSrcIndex is the largest index used in the PxGpuActorPairs provided with the index argument. Will contain the PxGpuBodyData for every requested body. \param[in] index User-provided gpu index buffer containing elements of PxGpuActorPair. This buffer stores pairs of indices: the PxNodeIndex corresponding to the rigid body and an index corresponding to the location in the user buffer that this value should be placed. There must be 1 PxGpuActorPair for each element of the data buffer. The total size of the buffer must be sizeof(PxGpuActorPair) * nbCopyActors. \param[in] nbCopyActors The number of rigid bodies to be copied. \param[in] copyEvent User-provided event that is recorded at the end of this function. Defaults to NULL which means the function will wait for the copy to finish before returning. \note This function only works if PxSceneFlag::eENABLE_DIRECT_GPU_API has been raised, the scene is using GPU dynamics, and the simulation has been warm-started by simulating for at least 1 simulation step. */ virtual void copyBodyData(PxGpuBodyData* data, PxGpuActorPair* index, const PxU32 nbCopyActors, CUevent copyEvent = NULL) = 0; /** \brief Direct-GPU interface to apply batched updates to simulation state for a set of rigid bodies from a device buffer. \param[in] data User-provided gpu data buffer which should be sized appropriately for the particular data that is requested. The data layout for PxActorCacheFlag::eFORCE and PxActorCacheFlag::eTORQUE is 1 PxVec4 per rigid body (4th component is unused). For PxActorCacheFlag::eACTOR_DATA the data layout it 1 PxGpuBodyData per rigid body. The total size of the buffer must be sizeof(type) * (maxSrcIndex + 1), where maxSrcIndex is the largest source index used in the PxGpuActorPairs provided in the index array. \param[in] index User-provided PxGpuActorPair buffer. This buffer stores pairs of indices: the PxNodeIndex corresponding to the rigid body and an index (srcIndex) corresponding to the location in the user buffer that the value is located at. The total size of this buffer must be sizeof(PxGpuActorPair) * nbUpdatedActors. \param[in] flag Flag specifying which data the user wants to write to the rigid bodies. \param[in] nbUpdatedActors The number of updated rigid bodies. \param[in] waitEvent User-provided event for the rigid body stream to wait for data. Will be awaited at the start of this function. Defaults to NULL which means the operation will start immediately. \param[in] signalEvent User-provided event for the rigid body stream to signal when the read from the user buffer has completed. Defaults to NULL which means the function will wait for the copy to finish before returning. \note This function only works if PxSceneFlag::eENABLE_DIRECT_GPU_API has been raised, the scene is using GPU dynamics, and the simulation has been warm-started by simulating for at least 1 simulation step. \note The combined usage of this function and the object-oriented CPU interface is forbidden for all parameters that can be set through this function. Specifically, this includes: PxRigidDynamic::setGlobalPose(), PxRigidDynamic::setLinearVelocity(), PxRigidDynamic::setAngularVelocity(), PxRigidDynamic::addForce(), PxRigidDynamic::addTorque(), PxRigidDynamic::setForceAndTorque(). However, using the CPU interface to update simulation parameters like, for example, mass or angular damping is still supported. */ virtual void applyActorData(void* data, PxGpuActorPair* index, PxActorCacheFlag::Enum flag, const PxU32 nbUpdatedActors, CUevent waitEvent = NULL, CUevent signalEvent = NULL) = 0; /** \brief Evaluate sample point distances on sdf shapes \param[in] sdfShapeIds The shapes ids in a gpu buffer (must be triangle mesh shapes with SDFs) which specify the shapes from which the sdf information is taken \param[in] nbShapes The number of shapes \param[in] localSamplePointsConcatenated User-provided gpu buffer containing the sample point locations for every shape in the shapes local space. The buffer stride is maxPointCount. \param[in] samplePointCountPerShape Gpu buffer containing the number of sample points for every shape \param[in] maxPointCount The maximum value in the array samplePointCountPerShape \param[out] localGradientAndSDFConcatenated The gpu buffer where the evaluated distances and gradients in SDF local space get stored. It has the same structure as localSamplePointsConcatenated. \param[in] event User-provided event for the user to sync. Defaults to NULL which means the function will wait for the operation to finish before returning. */ virtual void evaluateSDFDistances(const PxU32* sdfShapeIds, const PxU32 nbShapes, const PxVec4* localSamplePointsConcatenated, const PxU32* samplePointCountPerShape, const PxU32 maxPointCount, PxVec4* localGradientAndSDFConcatenated, CUevent event = NULL) = 0; /** \brief Compute dense Jacobian matrices for specified articulations on the GPU. The size of Jacobians can vary by articulation, since it depends on the number of links, degrees-of-freedom, and whether the base is fixed. The size is determined using these formulas: nCols = (fixedBase ? 0 : 6) + dofCount nRows = (fixedBase ? 0 : 6) + (linkCount - 1) * 6; The user must ensure that adequate space is provided for each Jacobian matrix. \param[in] indices User-provided gpu buffer of (index, data) pairs. The entries map a GPU articulation index to a GPU block of memory where the returned Jacobian will be stored. \param[in] nbIndices The number of (index, data) pairs provided. \param[in] computeEvent User-provided event for the user to sync data. Defaults to NULL which means the function will wait for the computation to finish before returning. */ virtual void computeDenseJacobians(const PxIndexDataPair* indices, PxU32 nbIndices, CUevent computeEvent = NULL) = 0; /** \brief Compute the joint-space inertia matrices that maps joint accelerations to joint forces: forces = M * accelerations on the GPU. The size of matrices can vary by articulation, since it depends on the number of links and degrees-of-freedom. The size is determined using this formula: sizeof(float) * dofCount * dofCount The user must ensure that adequate space is provided for each mass matrix. \param[in] indices User-provided gpu buffer of (index, data) pairs. The entries map a GPU articulation index to a GPU block of memory where the returned matrix will be stored. \param[in] nbIndices The number of (index, data) pairs provided. \param[in] computeEvent User-provided event for the user to sync data. Defaults to NULL which means the function will wait for the computation to finish before returning. */ virtual void computeGeneralizedMassMatrices(const PxIndexDataPair* indices, PxU32 nbIndices, CUevent computeEvent = NULL) = 0; /** \brief Computes the joint DOF forces required to counteract gravitational forces for the given articulation pose. The size of the result can vary by articulation, since it depends on the number of links and degrees-of-freedom. The size is determined using this formula: sizeof(float) * dofCount The user must ensure that adequate space is provided for each articulation. \param[in] indices User-provided gpu buffer of (index, data) pairs. The entries map a GPU articulation index to a GPU block of memory where the returned matrix will be stored. \param[in] nbIndices The number of (index, data) pairs provided. \param[in] computeEvent User-provided event for the user to sync data. Defaults to NULL which means the function will wait for the computation to finish before returning. */ virtual void computeGeneralizedGravityForces(const PxIndexDataPair* indices, PxU32 nbIndices, CUevent computeEvent = NULL) = 0; /** \brief Computes the joint DOF forces required to counteract coriolis and centrifugal forces for the given articulation pose. The size of the result can vary by articulation, since it depends on the number of links and degrees-of-freedom. The size is determined using this formula: sizeof(float) * dofCount The user must ensure that adequate space is provided for each articulation. \param[in] indices User-provided gpu buffer of (index, data) pairs. The entries map a GPU articulation index to a GPU block of memory where the returned matrix will be stored. \param[in] nbIndices The number of (index, data) pairs provided. \param[in] computeEvent User-provided event for the user to sync data. Defaults to NULL which means the function will wait for the computation to finish before returning. */ virtual void computeCoriolisAndCentrifugalForces(const PxIndexDataPair* indices, PxU32 nbIndices, CUevent computeEvent = NULL) = 0; virtual PxgDynamicsMemoryConfig getGpuDynamicsConfig() const = 0; /** \brief Apply user-provided data to particle buffers. This function should be used if the particle buffer flags are already on the device. Otherwise, use PxParticleBuffer::raiseFlags() from the CPU. This assumes the data has been changed directly in the PxParticleBuffer. \param[in] indices User-provided index buffer that indexes into the BufferIndexPair and flags list. \param[in] bufferIndexPair User-provided index pair buffer specifying the unique id and GPU particle system for each PxParticleBuffer. See PxGpuParticleBufferIndexPair. \param[in] flags Flags to mark what data needs to be updated. See PxParticleBufferFlags. \param[in] nbUpdatedBuffers The number of particle buffers to update. \param[in] waitEvent User-provided event for the particle stream to wait for data. Defaults to NULL which means the operation will start immediately. \param[in] signalEvent User-provided event for the particle stream to signal when the data read from the user buffer has completed. Defaults to NULL which means the function will wait for copy to finish before returning. */ virtual void applyParticleBufferData(const PxU32* indices, const PxGpuParticleBufferIndexPair* bufferIndexPair, const PxParticleBufferFlags* flags, PxU32 nbUpdatedBuffers, CUevent waitEvent = NULL, CUevent signalEvent = NULL) = 0; void* userData; //!< user can assign this to whatever, usually to create a 1:1 relationship with a user object. }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
79,934
C
41.31604
513
0.758989
NVIDIA-Omniverse/PhysX/physx/include/PxSceneQueryDesc.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_SCENE_QUERY_DESC_H #define PX_SCENE_QUERY_DESC_H /** \addtogroup physics @{ */ #include "PxPhysXConfig.h" #include "geometry/PxBVHBuildStrategy.h" #if !PX_DOXYGEN namespace physx { #endif class PxSceneQuerySystem; /** \brief Pruning structure used to accelerate scene queries. eNONE uses a simple data structure that consumes less memory than the alternatives, but generally has slower query performance. eDYNAMIC_AABB_TREE usually provides the fastest queries. However there is a constant per-frame management cost associated with this structure. How much work should be done per frame can be tuned via the #PxSceneQueryDesc::dynamicTreeRebuildRateHint parameter. eSTATIC_AABB_TREE is typically used for static objects. It is the same as the dynamic AABB tree, without the per-frame overhead. This can be a good choice for static objects, if no static objects are added, moved or removed after the scene has been created. If there is no such guarantee (e.g. when streaming parts of the world in and out), then the dynamic version is a better choice even for static objects. */ struct PxPruningStructureType { enum Enum { eNONE, //!< Using a simple data structure eDYNAMIC_AABB_TREE, //!< Using a dynamic AABB tree eSTATIC_AABB_TREE, //!< Using a static AABB tree eLAST }; }; /** \brief Secondary pruning structure used for newly added objects in dynamic trees. Dynamic trees (PxPruningStructureType::eDYNAMIC_AABB_TREE) are slowly rebuilt over several frames. A secondary pruning structure holds and manages objects added to the scene while this rebuild is in progress. eNONE ignores newly added objects. This means that for a number of frames (roughly defined by PxSceneQueryDesc::dynamicTreeRebuildRateHint) newly added objects will be ignored by scene queries. This can be acceptable when streaming large worlds, e.g. when the objects added at the boundaries of the game world don't immediately need to be visible from scene queries (it would be equivalent to streaming that data in a few frames later). The advantage of this approach is that there is no CPU cost associated with inserting the new objects in the scene query data structures, and no extra runtime cost when performing queries. eBUCKET uses a structure similar to PxPruningStructureType::eNONE. Insertion is fast but query cost can be high. eINCREMENTAL uses an incremental AABB-tree, with no direct PxPruningStructureType equivalent. Query time is fast but insertion cost can be high. eBVH uses a PxBVH structure. This usually offers the best overall performance. */ struct PxDynamicTreeSecondaryPruner { enum Enum { eNONE, //!< no secondary pruner, new objects aren't visible to SQ for a few frames eBUCKET , //!< bucket-based secondary pruner, faster updates, slower query time eINCREMENTAL, //!< incremental-BVH secondary pruner, faster query time, slower updates eBVH, //!< PxBVH-based secondary pruner, good overall performance eLAST }; }; /** \brief Scene query update mode This enum controls what work is done when the scene query system is updated. The updates traditionally happen when PxScene::fetchResults is called. This function then calls PxSceneQuerySystem::finalizeUpdates, where the update mode is used. fetchResults/finalizeUpdates will sync changed bounds during simulation and update the scene query bounds in pruners, this work is mandatory. eBUILD_ENABLED_COMMIT_ENABLED does allow to execute the new AABB tree build step during fetchResults/finalizeUpdates, additionally the pruner commit is called where any changes are applied. During commit PhysX refits the dynamic scene query tree and if a new tree was built and the build finished the tree is swapped with current AABB tree. eBUILD_ENABLED_COMMIT_DISABLED does allow to execute the new AABB tree build step during fetchResults/finalizeUpdates. Pruner commit is not called, this means that refit will then occur during the first scene query following fetchResults/finalizeUpdates, or may be forced by the method PxScene::flushQueryUpdates() / PxSceneQuerySystemBase::flushUpdates(). eBUILD_DISABLED_COMMIT_DISABLED no further scene query work is executed. The scene queries update needs to be called manually, see PxScene::sceneQueriesUpdate (see that function's doc for the equivalent PxSceneQuerySystem sequence). It is recommended to call PxScene::sceneQueriesUpdate right after fetchResults/finalizeUpdates as the pruning structures are not updated. */ struct PxSceneQueryUpdateMode { enum Enum { eBUILD_ENABLED_COMMIT_ENABLED, //!< Both scene query build and commit are executed. eBUILD_ENABLED_COMMIT_DISABLED, //!< Scene query build only is executed. eBUILD_DISABLED_COMMIT_DISABLED //!< No work is done, no update of scene queries }; }; /** \brief Descriptor class for scene query system. See #PxSceneQuerySystem. */ class PxSceneQueryDesc { public: /** \brief Defines the structure used to store static objects (PxRigidStatic actors). There are usually a lot more static actors than dynamic actors in a scene, so they are stored in a separate structure. The idea is that when dynamic actors move each frame, the static structure remains untouched and does not need updating. <b>Default:</b> PxPruningStructureType::eDYNAMIC_AABB_TREE \note Only PxPruningStructureType::eSTATIC_AABB_TREE and PxPruningStructureType::eDYNAMIC_AABB_TREE are allowed here. @see PxPruningStructureType PxSceneSQSystem.getStaticStructure() */ PxPruningStructureType::Enum staticStructure; /** \brief Defines the structure used to store dynamic objects (non-PxRigidStatic actors). <b>Default:</b> PxPruningStructureType::eDYNAMIC_AABB_TREE @see PxPruningStructureType PxSceneSQSystem.getDynamicStructure() */ PxPruningStructureType::Enum dynamicStructure; /** \brief Hint for how much work should be done per simulation frame to rebuild the pruning structures. This parameter gives a hint on the distribution of the workload for rebuilding the dynamic AABB tree pruning structure #PxPruningStructureType::eDYNAMIC_AABB_TREE. It specifies the desired number of simulation frames the rebuild process should take. Higher values will decrease the workload per frame but the pruning structure will get more and more outdated the longer the rebuild takes (which can make scene queries less efficient). \note Only used for #PxPruningStructureType::eDYNAMIC_AABB_TREE pruning structures. \note Both staticStructure & dynamicStructure can use a PxPruningStructureType::eDYNAMIC_AABB_TREE, in which case this parameter is used for both. \note This parameter gives only a hint. The rebuild process might still take more or less time depending on the number of objects involved. <b>Range:</b> [4, PX_MAX_U32)<br> <b>Default:</b> 100 @see PxSceneQuerySystemBase.setDynamicTreeRebuildRateHint() PxSceneQuerySystemBase.getDynamicTreeRebuildRateHint() */ PxU32 dynamicTreeRebuildRateHint; /** \brief Secondary pruner for dynamic tree. This is used for PxPruningStructureType::eDYNAMIC_AABB_TREE structures, to control how objects added to the system at runtime are managed. \note Both staticStructure & dynamicStructure can use a PxPruningStructureType::eDYNAMIC_AABB_TREE, in which case this parameter is used for both. <b>Default:</b> PxDynamicTreeSecondaryPruner::eINCREMENTAL @see PxDynamicTreeSecondaryPruner */ PxDynamicTreeSecondaryPruner::Enum dynamicTreeSecondaryPruner; /** \brief Build strategy for PxSceneQueryDesc::staticStructure. This parameter is used to refine / control the build strategy of PxSceneQueryDesc::staticStructure. This is only used with PxPruningStructureType::eDYNAMIC_AABB_TREE and PxPruningStructureType::eSTATIC_AABB_TREE. <b>Default:</b> PxBVHBuildStrategy::eFAST @see PxBVHBuildStrategy PxSceneQueryDesc::staticStructure */ PxBVHBuildStrategy::Enum staticBVHBuildStrategy; /** \brief Build strategy for PxSceneQueryDesc::dynamicStructure. This parameter is used to refine / control the build strategy of PxSceneQueryDesc::dynamicStructure. This is only used with PxPruningStructureType::eDYNAMIC_AABB_TREE and PxPruningStructureType::eSTATIC_AABB_TREE. <b>Default:</b> PxBVHBuildStrategy::eFAST @see PxBVHBuildStrategy PxSceneQueryDesc::dynamicStructure */ PxBVHBuildStrategy::Enum dynamicBVHBuildStrategy; /** \brief Number of objects per node for PxSceneQueryDesc::staticStructure. This parameter is used to refine / control the number of objects per node for PxSceneQueryDesc::staticStructure. This is only used with PxPruningStructureType::eDYNAMIC_AABB_TREE and PxPruningStructureType::eSTATIC_AABB_TREE. This parameter has an impact on how quickly the structure gets built, and on the per-frame cost of maintaining the structure. Increasing this value gives smaller AABB-trees that use less memory, are faster to build and update, but it can lead to slower queries. <b>Default:</b> 4 @see PxSceneQueryDesc::staticStructure */ PxU32 staticNbObjectsPerNode; /** \brief Number of objects per node for PxSceneQueryDesc::dynamicStructure. This parameter is used to refine / control the number of objects per node for PxSceneQueryDesc::dynamicStructure. This is only used with PxPruningStructureType::eDYNAMIC_AABB_TREE and PxPruningStructureType::eSTATIC_AABB_TREE. This parameter has an impact on how quickly the structure gets built, and on the per-frame cost of maintaining the structure. Increasing this value gives smaller AABB-trees that use less memory, are faster to build and update, but it can lead to slower queries. <b>Default:</b> 4 @see PxSceneQueryDesc::dynamicStructure */ PxU32 dynamicNbObjectsPerNode; /** \brief Defines the scene query update mode. <b>Default:</b> PxSceneQueryUpdateMode::eBUILD_ENABLED_COMMIT_ENABLED @see PxSceneQuerySystemBase.setUpdateMode() PxSceneQuerySystemBase.getUpdateMode() */ PxSceneQueryUpdateMode::Enum sceneQueryUpdateMode; public: /** \brief constructor sets to default. */ PX_INLINE PxSceneQueryDesc(); /** \brief (re)sets the structure to the default. */ PX_INLINE void setToDefault(); /** \brief Returns true if the descriptor is valid. \return true if the current settings are valid. */ PX_INLINE bool isValid() const; }; PX_INLINE PxSceneQueryDesc::PxSceneQueryDesc(): staticStructure (PxPruningStructureType::eDYNAMIC_AABB_TREE), dynamicStructure (PxPruningStructureType::eDYNAMIC_AABB_TREE), dynamicTreeRebuildRateHint (100), dynamicTreeSecondaryPruner (PxDynamicTreeSecondaryPruner::eINCREMENTAL), staticBVHBuildStrategy (PxBVHBuildStrategy::eFAST), dynamicBVHBuildStrategy (PxBVHBuildStrategy::eFAST), staticNbObjectsPerNode (4), dynamicNbObjectsPerNode (4), sceneQueryUpdateMode (PxSceneQueryUpdateMode::eBUILD_ENABLED_COMMIT_ENABLED) { } PX_INLINE void PxSceneQueryDesc::setToDefault() { *this = PxSceneQueryDesc(); } PX_INLINE bool PxSceneQueryDesc::isValid() const { if(staticStructure!=PxPruningStructureType::eSTATIC_AABB_TREE && staticStructure!=PxPruningStructureType::eDYNAMIC_AABB_TREE) return false; if(dynamicTreeRebuildRateHint < 4) return false; return true; } #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
12,911
C
38.2462
141
0.789482
NVIDIA-Omniverse/PhysX/physx/include/PxVisualizationParameter.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_VISUALIZATION_PARAMETER_H #define PX_VISUALIZATION_PARAMETER_H #include "foundation/PxPreprocessor.h" /** \addtogroup physics @{ */ #if !PX_DOXYGEN namespace physx { #endif /* NOTE: Parameters should NOT be conditionally compiled out. Even if a particular feature is not available. Otherwise the parameter values get shifted about and the numeric values change per platform. This causes problems when trying to serialize parameters. New parameters should also be added to the end of the list for this reason. Also make sure to update eNUM_VALUES, which should be one higher than the maximum value in the enum. */ /** \brief Debug visualization parameters. #PxVisualizationParameter::eSCALE is the master switch for enabling visualization, please read the corresponding documentation for further details. @see PxScene.setVisualizationParameter() PxScene.getVisualizationParameter() PxScene.getRenderBuffer() */ struct PxVisualizationParameter { enum Enum { /* RigidBody-related parameters */ /** \brief This overall visualization scale gets multiplied with the individual scales. Setting to zero ignores all visualizations. Default is 0. The below settings permit the debug visualization of various simulation properties. The setting is either zero, in which case the property is not drawn. Otherwise it is a scaling factor that determines the size of the visualization widgets. Only objects for which visualization is turned on using setFlag(eVISUALIZATION) are visualized (see #PxActorFlag::eVISUALIZATION, #PxShapeFlag::eVISUALIZATION, ...). Default is 0. Notes: - to see any visualization, you have to set PxVisualizationParameter::eSCALE to nonzero first. - the scale factor has been introduced because it's difficult (if not impossible) to come up with a good scale for 3D vectors. Normals are normalized and their length is always 1. But it doesn't mean we should render a line of length 1. Depending on your objects/scene, this might be completely invisible or extremely huge. That's why the scale factor is here, to let you tune the length until it's ok in your scene. - however, things like collision shapes aren't ambiguous. They are clearly defined for example by the triangles & polygons themselves, and there's no point in scaling that. So the visualization widgets are only scaled when it makes sense. <b>Range:</b> [0, PX_MAX_F32)<br> <b>Default:</b> 0 */ eSCALE, /** \brief Visualize the world axes. */ eWORLD_AXES, /* Body visualizations */ /** \brief Visualize a bodies axes. @see PxActor.globalPose PxActor */ eBODY_AXES, /** \brief Visualize a body's mass axes. This visualization is also useful for visualizing the sleep state of bodies. Sleeping bodies are drawn in black, while awake bodies are drawn in white. If the body is sleeping and part of a sleeping group, it is drawn in red. @see PxBodyDesc.massLocalPose PxActor */ eBODY_MASS_AXES, /** \brief Visualize the bodies linear velocity. @see PxBodyDesc.linearVelocity PxActor */ eBODY_LIN_VELOCITY, /** \brief Visualize the bodies angular velocity. @see PxBodyDesc.angularVelocity PxActor */ eBODY_ANG_VELOCITY, /* Contact visualisations */ /** \brief Visualize contact points. Will enable contact information. */ eCONTACT_POINT, /** \brief Visualize contact normals. Will enable contact information. */ eCONTACT_NORMAL, /** \brief Visualize contact errors. Will enable contact information. */ eCONTACT_ERROR, /** \brief Visualize Contact forces. Will enable contact information. */ eCONTACT_FORCE, /** \brief Visualize actor axes. @see PxRigidStatic PxRigidDynamic PxArticulationLink */ eACTOR_AXES, /** \brief Visualize bounds (AABBs in world space) */ eCOLLISION_AABBS, /** \brief Shape visualization @see PxShape */ eCOLLISION_SHAPES, /** \brief Shape axis visualization @see PxShape */ eCOLLISION_AXES, /** \brief Compound visualization (compound AABBs in world space) */ eCOLLISION_COMPOUNDS, /** \brief Mesh & convex face normals @see PxTriangleMesh PxConvexMesh */ eCOLLISION_FNORMALS, /** \brief Active edges for meshes @see PxTriangleMesh */ eCOLLISION_EDGES, /** \brief Static pruning structures */ eCOLLISION_STATIC, /** \brief Dynamic pruning structures */ eCOLLISION_DYNAMIC, /** \brief Joint local axes */ eJOINT_LOCAL_FRAMES, /** \brief Joint limits */ eJOINT_LIMITS, /** \brief Visualize culling box */ eCULL_BOX, /** \brief MBP regions */ eMBP_REGIONS, /** \brief Renders the simulation mesh instead of the collision mesh (only available for tetmeshes) */ eSIMULATION_MESH, /** \brief Renders the SDF of a mesh instead of the collision mesh (only available for triangle meshes with SDFs) */ eSDF, /** \brief This is not a parameter, it just records the current number of parameters (as maximum(PxVisualizationParameter)+1) for use in loops. */ eNUM_VALUES, eFORCE_DWORD = 0x7fffffff }; }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
6,909
C
26.312253
167
0.73151
NVIDIA-Omniverse/PhysX/physx/include/PxSceneDesc.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_SCENE_DESC_H #define PX_SCENE_DESC_H /** \addtogroup physics @{ */ #include "PxSceneQueryDesc.h" #include "PxPhysXConfig.h" #include "foundation/PxFlags.h" #include "foundation/PxBounds3.h" #include "foundation/PxBitUtils.h" #include "PxFiltering.h" #include "PxBroadPhase.h" #include "common/PxTolerancesScale.h" #include "task/PxTask.h" #if !PX_DOXYGEN namespace physx { #endif class PxBroadPhaseCallback; class PxCudaContextManager; /** \brief Enum for selecting the friction algorithm used for simulation. #PxFrictionType::ePATCH selects the patch friction model which typically leads to the most stable results at low solver iteration counts and is also quite inexpensive, as it uses only up to four scalar solver constraints per pair of touching objects. The patch friction model is the same basic strong friction algorithm as PhysX 3.2 and before. #PxFrictionType::eTWO_DIRECTIONAL is identical to the one directional model, but it applies friction in both tangent directions simultaneously. This hurts convergence a bit so it requires more solver iterations, but is more accurate. Like the one directional model, it is applied at every contact point, which makes it potentially more expensive than patch friction for scenarios with many contact points. #PxFrictionType::eFRICTION_COUNT is the total numer of friction models supported by the SDK. */ struct PxFrictionType { enum Enum { ePATCH, //!< Select default patch-friction model. eONE_DIRECTIONAL PX_DEPRECATED, //!< @deprecated Please do not use any longer. eTWO_DIRECTIONAL, //!< Select two directional per-contact friction model. eFRICTION_COUNT //!< The total number of friction models supported by the SDK. }; }; /** \brief Enum for selecting the type of solver used for the simulation. #PxSolverType::ePGS selects the iterative sequential impulse solver. This is the same kind of solver used in PhysX 3.4 and earlier releases. #PxSolverType::eTGS selects a non linear iterative solver. This kind of solver can lead to improved convergence and handle large mass ratios, long chains and jointed systems better. It is slightly more expensive than the default solver and can introduce more energy to correct joint and contact errors. */ struct PxSolverType { enum Enum { ePGS, //!< Projected Gauss-Seidel iterative solver eTGS //!< Temporal Gauss-Seidel solver }; }; /** \brief flags for configuring properties of the scene @see PxScene */ struct PxSceneFlag { enum Enum { /** \brief Enable Active Actors Notification. This flag enables the Active Actor Notification feature for a scene. This feature defaults to disabled. When disabled, the function PxScene::getActiveActors() will always return a NULL list. \note There may be a performance penalty for enabling the Active Actor Notification, hence this flag should only be enabled if the application intends to use the feature. <b>Default:</b> False */ eENABLE_ACTIVE_ACTORS = (1<<0), /** \brief Enables a second broad phase check after integration that makes it possible to prevent objects from tunneling through eachother. PxPairFlag::eDETECT_CCD_CONTACT requires this flag to be specified. \note For this feature to be effective for bodies that can move at a significant velocity, the user should raise the flag PxRigidBodyFlag::eENABLE_CCD for them. \note This flag is not mutable, and must be set in PxSceneDesc at scene creation. <b>Default:</b> False @see PxRigidBodyFlag::eENABLE_CCD, PxPairFlag::eDETECT_CCD_CONTACT, eDISABLE_CCD_RESWEEP */ eENABLE_CCD = (1<<1), /** \brief Enables a simplified swept integration strategy, which sacrifices some accuracy for improved performance. This simplified swept integration approach makes certain assumptions about the motion of objects that are not made when using a full swept integration. These assumptions usually hold but there are cases where they could result in incorrect behavior between a set of fast-moving rigid bodies. A key issue is that fast-moving dynamic objects may tunnel through each-other after a rebound. This will not happen if this mode is disabled. However, this approach will be potentially faster than a full swept integration because it will perform significantly fewer sweeps in non-trivial scenes involving many fast-moving objects. This approach should successfully resist objects passing through the static environment. PxPairFlag::eDETECT_CCD_CONTACT requires this flag to be specified. \note This scene flag requires eENABLE_CCD to be enabled as well. If it is not, this scene flag will do nothing. \note For this feature to be effective for bodies that can move at a significant velocity, the user should raise the flag PxRigidBodyFlag::eENABLE_CCD for them. \note This flag is not mutable, and must be set in PxSceneDesc at scene creation. <b>Default:</b> False @see PxRigidBodyFlag::eENABLE_CCD, PxPairFlag::eDETECT_CCD_CONTACT, eENABLE_CCD */ eDISABLE_CCD_RESWEEP = (1<<2), /** \brief Enable GJK-based distance collision detection system. \note This flag is not mutable, and must be set in PxSceneDesc at scene creation. <b>Default:</b> true */ eENABLE_PCM = (1 << 6), /** \brief Disable contact report buffer resize. Once the contact buffer is full, the rest of the contact reports will not be buffered and sent. \note This flag is not mutable, and must be set in PxSceneDesc at scene creation. <b>Default:</b> false */ eDISABLE_CONTACT_REPORT_BUFFER_RESIZE = (1 << 7), /** \brief Disable contact cache. Contact caches are used internally to provide faster contact generation. You can disable all contact caches if memory usage for this feature becomes too high. \note This flag is not mutable, and must be set in PxSceneDesc at scene creation. <b>Default:</b> false */ eDISABLE_CONTACT_CACHE = (1 << 8), /** \brief Require scene-level locking When set to true this requires that threads accessing the PxScene use the multi-threaded lock methods. \note This flag is not mutable, and must be set in PxSceneDesc at scene creation. @see PxScene::lockRead @see PxScene::unlockRead @see PxScene::lockWrite @see PxScene::unlockWrite <b>Default:</b> false */ eREQUIRE_RW_LOCK = (1 << 9), /** \brief Enables additional stabilization pass in solver When set to true, this enables additional stabilization processing to improve that stability of complex interactions between large numbers of bodies. Note that this flag is not mutable and must be set in PxSceneDesc at scene creation. Also, this is an experimental feature which does result in some loss of momentum. */ eENABLE_STABILIZATION = (1 << 10), /** \brief Enables average points in contact manifolds When set to true, this enables additional contacts to be generated per manifold to represent the average point in a manifold. This can stabilize stacking when only a small number of solver iterations is used. Note that this flag is not mutable and must be set in PxSceneDesc at scene creation. */ eENABLE_AVERAGE_POINT = (1 << 11), /** \brief Do not report kinematics in list of active actors. Since the target pose for kinematics is set by the user, an application can track the activity state directly and use this flag to avoid that kinematics get added to the list of active actors. \note This flag has only an effect in combination with eENABLE_ACTIVE_ACTORS. @see eENABLE_ACTIVE_ACTORS <b>Default:</b> false */ eEXCLUDE_KINEMATICS_FROM_ACTIVE_ACTORS = (1 << 12), /*\brief Enables the GPU dynamics pipeline When set to true, a CUDA ARCH 3.0 or above-enabled NVIDIA GPU is present and the CUDA context manager has been configured, this will run the GPU dynamics pipelin instead of the CPU dynamics pipeline. Note that this flag is not mutable and must be set in PxSceneDesc at scene creation. */ eENABLE_GPU_DYNAMICS = (1 << 13), /** \brief Provides improved determinism at the expense of performance. By default, PhysX provides limited determinism guarantees. Specifically, PhysX guarantees that the exact scene (same actors created in the same order) and simulated using the same time-stepping scheme should provide the exact same behaviour. However, if additional actors are added to the simulation, this can affect the behaviour of the existing actors in the simulation, even if the set of new actors do not interact with the existing actors. This flag provides an additional level of determinism that guarantees that the simulation will not change if additional actors are added to the simulation, provided those actors do not interfere with the existing actors in the scene. Determinism is only guaranteed if the actors are inserted in a consistent order each run in a newly-created scene and simulated using a consistent time-stepping scheme. Note that this flag is not mutable and must be set at scene creation. Note that enabling this flag can have a negative impact on performance. Note that this feature is not currently supported on GPU. <b>Default</b> false */ eENABLE_ENHANCED_DETERMINISM = (1<<14), /** \brief Controls processing friction in all solver iterations By default, PhysX processes friction only in the final 3 position iterations, and all velocity iterations. This flag enables friction processing in all position and velocity iterations. The default behaviour provides a good trade-off between performance and stability and is aimed primarily at game development. When simulating more complex frictional behaviour, such as grasping of complex geometries with a robotic manipulator, better results can be achieved by enabling friction in all solver iterations. \note This flag only has effect with the default solver. The TGS solver always performs friction per-iteration. */ eENABLE_FRICTION_EVERY_ITERATION = (1 << 15), /* \brief Enables the direct-GPU API. Raising this flag is only allowed if eENABLE_GPU_DYNAMICS is raised and PxBroadphaseType::eGPU is used. This is useful if your application only needs to communicate to the GPU via GPU buffers. Can be significantly faster. \note Enabling the direct-GPU API will disable the readback of simulation state from GPU to CPU. Simulation outputs can only be accessed using the direct-GPU API functions in PxScene (PxScene::copyBodyData(), PxScene::copyArticulationData(), PxScene::copySoftbodyData(), PxScene::copyContactData()), and reading state directly from the actor is not allowed. \note This flag is not mutable and must be set in PxSceneDesc at scene creation. */ eENABLE_DIRECT_GPU_API = (1 << 16), eMUTABLE_FLAGS = eENABLE_ACTIVE_ACTORS|eEXCLUDE_KINEMATICS_FROM_ACTIVE_ACTORS }; }; /** \brief collection of set bits defined in PxSceneFlag. @see PxSceneFlag */ typedef PxFlags<PxSceneFlag::Enum,PxU32> PxSceneFlags; PX_FLAGS_OPERATORS(PxSceneFlag::Enum,PxU32) class PxSimulationEventCallback; class PxContactModifyCallback; class PxCCDContactModifyCallback; class PxSimulationFilterCallback; /** \brief Class used to retrieve limits(e.g. maximum number of bodies) for a scene. The limits are used as a hint to the size of the scene, not as a hard limit (i.e. it will be possible to create more objects than specified in the scene limits). 0 indicates no limit. Using limits allows the SDK to preallocate various arrays, leading to less re-allocations and faster code at runtime. */ class PxSceneLimits { public: PxU32 maxNbActors; //!< Expected maximum number of actors PxU32 maxNbBodies; //!< Expected maximum number of dynamic rigid bodies PxU32 maxNbStaticShapes; //!< Expected maximum number of static shapes PxU32 maxNbDynamicShapes; //!< Expected maximum number of dynamic shapes PxU32 maxNbAggregates; //!< Expected maximum number of aggregates PxU32 maxNbConstraints; //!< Expected maximum number of constraint shaders PxU32 maxNbRegions; //!< Expected maximum number of broad-phase regions PxU32 maxNbBroadPhaseOverlaps; //!< Expected maximum number of broad-phase overlaps /** \brief constructor sets to default */ PX_INLINE PxSceneLimits(); /** \brief (re)sets the structure to the default */ PX_INLINE void setToDefault(); /** \brief Returns true if the descriptor is valid. \return true if the current settings are valid. */ PX_INLINE bool isValid() const; }; PX_INLINE PxSceneLimits::PxSceneLimits() : //constructor sets to default maxNbActors (0), maxNbBodies (0), maxNbStaticShapes (0), maxNbDynamicShapes (0), maxNbAggregates (0), maxNbConstraints (0), maxNbRegions (0), maxNbBroadPhaseOverlaps (0) { } PX_INLINE void PxSceneLimits::setToDefault() { *this = PxSceneLimits(); } PX_INLINE bool PxSceneLimits::isValid() const { if(maxNbRegions>256) // max number of regions is currently limited return false; return true; } //#if PX_SUPPORT_GPU_PHYSX /** \brief Sizes of pre-allocated buffers use for GPU dynamics */ struct PxgDynamicsMemoryConfig { PxU32 tempBufferCapacity; //!< Initial capacity of temp solver buffer allocated in pinned host memory. This buffer will grow if more memory is needed than specified here. PxU32 maxRigidContactCount; //!< Size of contact stream buffer allocated in pinned host memory. This is double-buffered so total allocation size = 2* contactStreamCapacity * sizeof(PxContact). PxU32 maxRigidPatchCount; //!< Size of the contact patch stream buffer allocated in pinned host memory. This is double-buffered so total allocation size = 2 * patchStreamCapacity * sizeof(PxContactPatch). PxU32 heapCapacity; //!< Initial capacity of the GPU and pinned host memory heaps. Additional memory will be allocated if more memory is required. PxU32 foundLostPairsCapacity; //!< Capacity of found and lost buffers allocated in GPU global memory. This is used for the found/lost pair reports in the BP. PxU32 foundLostAggregatePairsCapacity; //!<Capacity of found and lost buffers in aggregate system allocated in GPU global memory. This is used for the found/lost pair reports in AABB manager PxU32 totalAggregatePairsCapacity; //!<Capacity of total number of aggregate pairs allocated in GPU global memory. PxU32 maxSoftBodyContacts; PxU32 maxFemClothContacts; PxU32 maxParticleContacts; PxU32 collisionStackSize; PxU32 maxHairContacts; PxgDynamicsMemoryConfig() : tempBufferCapacity(16 * 1024 * 1024), maxRigidContactCount(1024 * 512), maxRigidPatchCount(1024 * 80), heapCapacity(64 * 1024 * 1024), foundLostPairsCapacity(256 * 1024), foundLostAggregatePairsCapacity(1024), totalAggregatePairsCapacity(1024), maxSoftBodyContacts(1 * 1024 * 1024), maxFemClothContacts(1 * 1024 * 1024), maxParticleContacts(1*1024*1024), collisionStackSize(64*1024*1024), maxHairContacts(1 * 1024 * 1024) { } PX_PHYSX_CORE_API bool isValid() const; }; PX_INLINE bool PxgDynamicsMemoryConfig::isValid() const { const bool isPowerOfTwo = PxIsPowerOfTwo(heapCapacity); return isPowerOfTwo; } //#endif /** \brief Descriptor class for scenes. See #PxScene. This struct must be initialized with the same PxTolerancesScale values used to initialize PxPhysics. @see PxScene PxPhysics.createScene PxTolerancesScale */ class PxSceneDesc : public PxSceneQueryDesc { public: /** \brief Gravity vector. <b>Range:</b> force vector<br> <b>Default:</b> Zero @see PxScene.setGravity() PxScene.getGravity() When setting gravity, you should probably also set bounce threshold. */ PxVec3 gravity; /** \brief Possible notification callback. <b>Default:</b> NULL @see PxSimulationEventCallback PxScene.setSimulationEventCallback() PxScene.getSimulationEventCallback() */ PxSimulationEventCallback* simulationEventCallback; /** \brief Possible asynchronous callback for contact modification. <b>Default:</b> NULL @see PxContactModifyCallback PxScene.setContactModifyCallback() PxScene.getContactModifyCallback() */ PxContactModifyCallback* contactModifyCallback; /** \brief Possible asynchronous callback for contact modification. <b>Default:</b> NULL @see PxContactModifyCallback PxScene.setContactModifyCallback() PxScene.getContactModifyCallback() */ PxCCDContactModifyCallback* ccdContactModifyCallback; /** \brief Shared global filter data which will get passed into the filter shader. \note The provided data will get copied to internal buffers and this copy will be used for filtering calls. <b>Default:</b> NULL @see PxSimulationFilterShader PxScene.setFilterShaderData() PxScene.getFilterShaderData() */ const void* filterShaderData; /** \brief Size (in bytes) of the shared global filter data #filterShaderData. <b>Default:</b> 0 @see PxSimulationFilterShader filterShaderData PxScene.getFilterShaderDataSize() */ PxU32 filterShaderDataSize; /** \brief The custom filter shader to use for collision filtering. \note This parameter is compulsory. If you don't want to define your own filter shader you can use the default shader #PxDefaultSimulationFilterShader which can be found in the PhysX extensions library. @see PxSimulationFilterShader PxScene.getFilterShader() */ PxSimulationFilterShader filterShader; /** \brief A custom collision filter callback which can be used to implement more complex filtering operations which need access to the simulation state, for example. <b>Default:</b> NULL @see PxSimulationFilterCallback PxScene.getFilterCallback() */ PxSimulationFilterCallback* filterCallback; /** \brief Filtering mode for kinematic-kinematic pairs in the broadphase. <b>Default:</b> PxPairFilteringMode::eDEFAULT @see PxPairFilteringMode PxScene.getKinematicKinematicFilteringMode() */ PxPairFilteringMode::Enum kineKineFilteringMode; /** \brief Filtering mode for static-kinematic pairs in the broadphase. <b>Default:</b> PxPairFilteringMode::eDEFAULT @see PxPairFilteringMode PxScene.getStaticKinematicFilteringMode() */ PxPairFilteringMode::Enum staticKineFilteringMode; /** \brief Selects the broad-phase algorithm to use. <b>Default:</b> PxBroadPhaseType::ePABP @see PxBroadPhaseType PxScene.getBroadPhaseType() */ PxBroadPhaseType::Enum broadPhaseType; /** \brief Broad-phase callback <b>Default:</b> NULL @see PxBroadPhaseCallback PxScene.getBroadPhaseCallback() PxScene.setBroadPhaseCallback() */ PxBroadPhaseCallback* broadPhaseCallback; /** \brief Expected scene limits. @see PxSceneLimits PxScene.getLimits() */ PxSceneLimits limits; /** \brief Selects the friction algorithm to use for simulation. \note frictionType cannot be modified after the first call to any of PxScene::simulate, PxScene::solve and PxScene::collide <b>Default:</b> PxFrictionType::ePATCH @see PxFrictionType PxScene.setFrictionType(), PxScene.getFrictionType() */ PxFrictionType::Enum frictionType; /** \brief Selects the solver algorithm to use. <b>Default:</b> PxSolverType::ePGS @see PxSolverType PxScene.getSolverType() */ PxSolverType::Enum solverType; /** \brief A contact with a relative velocity below this will not bounce. A typical value for simulation. stability is about 0.2 * gravity. <b>Range:</b> (0, PX_MAX_F32)<br> <b>Default:</b> 0.2 * PxTolerancesScale::speed @see PxMaterial PxScene.setBounceThresholdVelocity() PxScene.getBounceThresholdVelocity() */ PxReal bounceThresholdVelocity; /** \brief A threshold of contact separation distance used to decide if a contact point will experience friction forces. \note If the separation distance of a contact point is greater than the threshold then the contact point will not experience friction forces. \note If the aggregated contact offset of a pair of shapes is large it might be desirable to neglect friction for contact points whose separation distance is sufficiently large that the shape surfaces are clearly separated. \note This parameter can be used to tune the separation distance of contact points at which friction starts to have an effect. <b>Range:</b> [0, PX_MAX_F32)<br> <b>Default:</b> 0.04 * PxTolerancesScale::length @see PxScene.setFrictionOffsetThreshold() PxScene.getFrictionOffsetThreshold() */ PxReal frictionOffsetThreshold; /** \brief Friction correlation distance used to decide whether contacts are close enough to be merged into a single friction anchor point or not. \note If the correlation distance is larger than the distance between contact points generated between a pair of shapes, some of the contacts may not experience frictional forces. \note This parameter can be used to tune the correlation distance used in the solver. Contact points can be merged into a single friction anchor if the distance between the contacts is smaller than correlation distance. <b>Range:</b> [0, PX_MAX_F32)<br> <b>Default:</b> 0.025f * PxTolerancesScale::length @see PxScene.setFrictionCorrelationDistance() PxScene.getFrictionCorrelationDistance() */ PxReal frictionCorrelationDistance; /** \brief Flags used to select scene options. <b>Default:</b> PxSceneFlag::eENABLE_PCM @see PxSceneFlag PxSceneFlags PxScene.getFlags() PxScene.setFlag() */ PxSceneFlags flags; /** \brief The CPU task dispatcher for the scene. @see PxCpuDispatcher, PxScene::getCpuDispatcher */ PxCpuDispatcher* cpuDispatcher; /** \brief The CUDA context manager for the scene. <b>Platform specific:</b> Applies to PC GPU only. @see PxCudaContextManager, PxScene::getCudaContextManager */ PxCudaContextManager* cudaContextManager; /** \brief Will be copied to PxScene::userData. <b>Default:</b> NULL */ void* userData; /** \brief Defines the number of actors required to spawn a separate rigid body solver island task chain. This parameter defines the minimum number of actors required to spawn a separate rigid body solver task chain. Setting a low value will potentially cause more task chains to be generated. This may result in the overhead of spawning tasks can become a limiting performance factor. Setting a high value will potentially cause fewer islands to be generated. This may reduce thread scaling (fewer task chains spawned) and may detrimentally affect performance if some bodies in the scene have large solver iteration counts because all constraints in a given island are solved by the maximum number of solver iterations requested by any body in the island. Note that a rigid body solver task chain is spawned as soon as either a sufficient number of rigid bodies or articulations are batched together. <b>Default:</b> 128 @see PxScene.setSolverBatchSize() PxScene.getSolverBatchSize() */ PxU32 solverBatchSize; /** \brief Defines the number of articulations required to spawn a separate rigid body solver island task chain. This parameter defines the minimum number of articulations required to spawn a separate rigid body solver task chain. Setting a low value will potentially cause more task chains to be generated. This may result in the overhead of spawning tasks can become a limiting performance factor. Setting a high value will potentially cause fewer islands to be generated. This may reduce thread scaling (fewer task chains spawned) and may detrimentally affect performance if some bodies in the scene have large solver iteration counts because all constraints in a given island are solved by the maximum number of solver iterations requested by any body in the island. Note that a rigid body solver task chain is spawned as soon as either a sufficient number of rigid bodies or articulations are batched together. <b>Default:</b> 16 @see PxScene.setSolverArticulationBatchSize() PxScene.getSolverArticulationBatchSize() */ PxU32 solverArticulationBatchSize; /** \brief Setting to define the number of 16K blocks that will be initially reserved to store contact, friction, and contact cache data. This is the number of 16K memory blocks that will be automatically allocated from the user allocator when the scene is instantiated. Further 16k memory blocks may be allocated during the simulation up to maxNbContactDataBlocks. \note This value cannot be larger than maxNbContactDataBlocks because that defines the maximum number of 16k blocks that can be allocated by the SDK. <b>Default:</b> 0 <b>Range:</b> [0, PX_MAX_U32]<br> @see PxPhysics::createScene PxScene::setNbContactDataBlocks */ PxU32 nbContactDataBlocks; /** \brief Setting to define the maximum number of 16K blocks that can be allocated to store contact, friction, and contact cache data. As the complexity of a scene increases, the SDK may require to allocate new 16k blocks in addition to the blocks it has already allocated. This variable controls the maximum number of blocks that the SDK can allocate. In the case that the scene is sufficiently complex that all the permitted 16K blocks are used, contacts will be dropped and a warning passed to the error stream. If a warning is reported to the error stream to indicate the number of 16K blocks is insufficient for the scene complexity then the choices are either (i) re-tune the number of 16K data blocks until a number is found that is sufficient for the scene complexity, (ii) to simplify the scene or (iii) to opt to not increase the memory requirements of physx and accept some dropped contacts. <b>Default:</b> 65536 <b>Range:</b> [0, PX_MAX_U32]<br> @see nbContactDataBlocks PxScene.setNbContactDataBlocks() */ PxU32 maxNbContactDataBlocks; /** \brief The maximum bias coefficient used in the constraint solver When geometric errors are found in the constraint solver, either as a result of shapes penetrating or joints becoming separated or violating limits, a bias is introduced in the solver position iterations to correct these errors. This bias is proportional to 1/dt, meaning that the bias becomes increasingly strong as the time-step passed to PxScene::simulate(...) becomes smaller. This coefficient allows the application to restrict how large the bias coefficient is, to reduce how violent error corrections are. This can improve simulation quality in cases where either variable time-steps or extremely small time-steps are used. <b>Default:</b> PX_MAX_F32 <b> Range</b> [0, PX_MAX_F32] <br> @see PxScene.setMaxBiasCoefficient() PxScene.getMaxBiasCoefficient() */ PxReal maxBiasCoefficient; /** \brief Size of the contact report stream (in bytes). The contact report stream buffer is used during the simulation to store all the contact reports. If the size is not sufficient, the buffer will grow by a factor of two. It is possible to disable the buffer growth by setting the flag PxSceneFlag::eDISABLE_CONTACT_REPORT_BUFFER_RESIZE. In that case the buffer will not grow but contact reports not stored in the buffer will not get sent in the contact report callbacks. <b>Default:</b> 8192 <b>Range:</b> (0, PX_MAX_U32]<br> @see PxScene.getContactReportStreamBufferSize() */ PxU32 contactReportStreamBufferSize; /** \brief Maximum number of CCD passes The CCD performs multiple passes, where each pass every object advances to its time of first impact. This value defines how many passes the CCD system should perform. \note The CCD system is a multi-pass best-effort conservative advancement approach. After the defined number of passes has been completed, any remaining time is dropped. \note This defines the maximum number of passes the CCD can perform. It may perform fewer if additional passes are not necessary. <b>Default:</b> 1 <b>Range:</b> [1, PX_MAX_U32]<br> @see PxScene.setCCDMaxPasses() PxScene.getCCDMaxPasses() */ PxU32 ccdMaxPasses; /** \brief CCD threshold CCD performs sweeps against shapes if and only if the relative motion of the shapes is fast-enough that a collision would be missed by the discrete contact generation. However, in some circumstances, e.g. when the environment is constructed from large convex shapes, this approach may produce undesired simulation artefacts. This parameter defines the minimum relative motion that would be required to force CCD between shapes. The smaller of this value and the sum of the thresholds calculated for the shapes involved will be used. \note It is not advisable to set this to a very small value as this may lead to CCD "jamming" and detrimentally effect performance. This value should be at least larger than the translation caused by a single frame's gravitational effect <b>Default:</b> PX_MAX_F32 <b>Range:</b> [Eps, PX_MAX_F32]<br> @see PxScene.setCCDThreshold() PxScene.getCCDThreshold() */ PxReal ccdThreshold; /** \brief A threshold for speculative CCD. Used to control whether bias, restitution or a combination of the two are used to resolve the contacts. \note This only has any effect on contacting pairs where one of the bodies has PxRigidBodyFlag::eENABLE_SPECULATIVE_CCD raised. <b>Range:</b> [0, PX_MAX_F32)<br> <b>Default:</b> 0.04 * PxTolerancesScale::length @see PxScene.setCCDMaxSeparation() PxScene.getCCDMaxSeparation() */ PxReal ccdMaxSeparation; /** \brief The wake counter reset value Calling wakeUp() on objects which support sleeping will set their wake counter value to the specified reset value. <b>Range:</b> (0, PX_MAX_F32)<br> <b>Default:</b> 0.4 (which corresponds to 20 frames for a time step of 0.02) @see PxRigidDynamic::wakeUp() PxArticulationReducedCoordinate::wakeUp() PxScene.getWakeCounterResetValue() */ PxReal wakeCounterResetValue; /** \brief The bounds used to sanity check user-set positions of actors and articulation links These bounds are used to check the position values of rigid actors inserted into the scene, and positions set for rigid actors already within the scene. <b>Range:</b> any valid PxBounds3 <br> <b>Default:</b> (-PX_MAX_BOUNDS_EXTENTS, PX_MAX_BOUNDS_EXTENTS) on each axis */ PxBounds3 sanityBounds; /** \brief The pre-allocations performed in the GPU dynamics pipeline. */ PxgDynamicsMemoryConfig gpuDynamicsConfig; /** \brief Limitation for the partitions in the GPU dynamics pipeline. This variable must be power of 2. A value greater than 32 is currently not supported. <b>Range:</b> (1, 32)<br> */ PxU32 gpuMaxNumPartitions; /** \brief Limitation for the number of static rigid body partitions in the GPU dynamics pipeline. <b>Range:</b> (1, 255)<br> <b>Default:</b> 16 */ PxU32 gpuMaxNumStaticPartitions; /** \brief Defines which compute version the GPU dynamics should target. DO NOT MODIFY */ PxU32 gpuComputeVersion; /** \brief Defines the size of a contact pool slab. Contact pairs and associated data are allocated using a pool allocator. Increasing the slab size can trade off some performance spikes when a large number of new contacts are found for an increase in overall memory usage. <b>Range:</b>(1, PX_MAX_U32)<br> <b>Default:</b> 256 */ PxU32 contactPairSlabSize; /** \brief The scene query sub-system for the scene. If left to NULL, PxScene will use its usual internal sub-system. If non-NULL, all SQ-related calls will be re-routed to the user-provided implementation. An external SQ implementation is available in the Extensions library (see PxCreateExternalSceneQuerySystem). This can also be fully re-implemented by users if needed. @see PxSceneQuerySystem */ PxSceneQuerySystem* sceneQuerySystem; private: /** \cond */ // For internal use only PxTolerancesScale tolerancesScale; /** \endcond */ public: /** \brief constructor sets to default. \param[in] scale scale values for the tolerances in the scene, these must be the same values passed into PxCreatePhysics(). The affected tolerances are bounceThresholdVelocity and frictionOffsetThreshold. @see PxCreatePhysics() PxTolerancesScale bounceThresholdVelocity frictionOffsetThreshold */ PX_INLINE PxSceneDesc(const PxTolerancesScale& scale); /** \brief (re)sets the structure to the default. \param[in] scale scale values for the tolerances in the scene, these must be the same values passed into PxCreatePhysics(). The affected tolerances are bounceThresholdVelocity and frictionOffsetThreshold. @see PxCreatePhysics() PxTolerancesScale bounceThresholdVelocity frictionOffsetThreshold */ PX_INLINE void setToDefault(const PxTolerancesScale& scale); /** \brief Returns true if the descriptor is valid. \return true if the current settings are valid. */ PX_INLINE bool isValid() const; /** \cond */ // For internal use only PX_INLINE const PxTolerancesScale& getTolerancesScale() const { return tolerancesScale; } /** \endcond */ }; PX_INLINE PxSceneDesc::PxSceneDesc(const PxTolerancesScale& scale): gravity (PxVec3(0.0f)), simulationEventCallback (NULL), contactModifyCallback (NULL), ccdContactModifyCallback (NULL), filterShaderData (NULL), filterShaderDataSize (0), filterShader (NULL), filterCallback (NULL), kineKineFilteringMode (PxPairFilteringMode::eDEFAULT), staticKineFilteringMode (PxPairFilteringMode::eDEFAULT), broadPhaseType (PxBroadPhaseType::ePABP), broadPhaseCallback (NULL), frictionType (PxFrictionType::ePATCH), solverType (PxSolverType::ePGS), bounceThresholdVelocity (0.2f * scale.speed), frictionOffsetThreshold (0.04f * scale.length), frictionCorrelationDistance (0.025f * scale.length), flags (PxSceneFlag::eENABLE_PCM), cpuDispatcher (NULL), cudaContextManager (NULL), userData (NULL), solverBatchSize (128), solverArticulationBatchSize (16), nbContactDataBlocks (0), maxNbContactDataBlocks (1<<16), maxBiasCoefficient (PX_MAX_F32), contactReportStreamBufferSize (8192), ccdMaxPasses (1), ccdThreshold (PX_MAX_F32), ccdMaxSeparation (0.04f * scale.length), wakeCounterResetValue (20.0f*0.02f), sanityBounds (PxBounds3(PxVec3(-PX_MAX_BOUNDS_EXTENTS), PxVec3(PX_MAX_BOUNDS_EXTENTS))), gpuMaxNumPartitions (8), gpuMaxNumStaticPartitions (16), gpuComputeVersion (0), contactPairSlabSize (256), sceneQuerySystem (NULL), tolerancesScale (scale) { } PX_INLINE void PxSceneDesc::setToDefault(const PxTolerancesScale& scale) { *this = PxSceneDesc(scale); } PX_INLINE bool PxSceneDesc::isValid() const { if(!PxSceneQueryDesc::isValid()) return false; if(!filterShader) return false; if( ((filterShaderDataSize == 0) && (filterShaderData != NULL)) || ((filterShaderDataSize > 0) && (filterShaderData == NULL)) ) return false; if(!limits.isValid()) return false; if(bounceThresholdVelocity <= 0.0f) return false; if(frictionOffsetThreshold < 0.0f) return false; if(frictionCorrelationDistance <= 0) return false; if(maxBiasCoefficient < 0.0f) return false; if(!ccdMaxPasses) return false; if(ccdThreshold <= 0.0f) return false; if(ccdMaxSeparation < 0.0f) return false; if(!cpuDispatcher) return false; if(!contactReportStreamBufferSize) return false; if(maxNbContactDataBlocks < nbContactDataBlocks) return false; if(wakeCounterResetValue <= 0.0f) return false; if(!sanityBounds.isValid()) return false; #if PX_SUPPORT_GPU_PHYSX if(!PxIsPowerOfTwo(gpuMaxNumPartitions)) return false; if(gpuMaxNumPartitions > 32) return false; if (gpuMaxNumPartitions == 0) return false; if(!gpuDynamicsConfig.isValid()) return false; if (flags & PxSceneFlag::eENABLE_DIRECT_GPU_API) { if(!(flags & PxSceneFlag::eENABLE_GPU_DYNAMICS && broadPhaseType == PxBroadPhaseType::eGPU)) return false; } #endif if(contactPairSlabSize == 0) return false; return true; } #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
37,439
C
34.827751
302
0.76431
NVIDIA-Omniverse/PhysX/physx/include/PxParticleSystemFlag.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_PARTICLE_SYSTEM_FLAG_H #define PX_PARTICLE_SYSTEM_FLAG_H #include "foundation/PxFlags.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief Identifies dirty particle buffers that need to be updated in the particle system. This flag can be used mark the device user buffers that are dirty and need to be written to the particle system. */ struct PxParticleBufferFlag { enum Enum { eNONE = 0, //!< No data specified eUPDATE_POSITION = 1 << 0, //!< Specifies the position (first 3 floats) and inverse mass (last float) data (array of PxVec4 * number of particles) eUPDATE_VELOCITY = 1 << 1, //!< Specifies the velocity (first 3 floats) data (array of PxVec4 * number of particles) eUPDATE_PHASE = 1 << 2, //!< Specifies the per-particle phase flag data (array of PxU32 * number of particles) eUPDATE_RESTPOSITION = 1 << 3, //!< Specifies the rest position (first 3 floats) data for cloth buffers eUPDATE_CLOTH = 1 << 5, //!< Specifies the cloth buffer (see PxParticleClothBuffer) eUPDATE_RIGID = 1 << 6, //!< Specifies the rigid buffer (see PxParticleRigidBuffer) eUPDATE_DIFFUSE_PARAM = 1 << 7, //!< Specifies the diffuse particle parameter buffer (see PxDiffuseParticleParams) eUPDATE_ATTACHMENTS = 1 << 8, //!< Specifies the attachments. eALL = eUPDATE_POSITION | eUPDATE_VELOCITY | eUPDATE_PHASE | eUPDATE_RESTPOSITION | eUPDATE_CLOTH | eUPDATE_RIGID | eUPDATE_DIFFUSE_PARAM | eUPDATE_ATTACHMENTS }; }; typedef PxFlags<PxParticleBufferFlag::Enum, PxU32> PxParticleBufferFlags; /** \brief A pair of particle buffer unique id and GPU particle system index. @see PxScene::applyParticleBufferData */ struct PxGpuParticleBufferIndexPair { PxU32 systemIndex; // gpu particle system index PxU32 bufferIndex; // particle buffer unique id }; /** \brief Identifies per-particle behavior for a PxParticleSystem. See #PxParticleSystem::createPhase(). */ struct PxParticlePhaseFlag { enum Enum { eParticlePhaseGroupMask = 0x000fffff, //!< Bits [ 0, 19] represent the particle group for controlling collisions eParticlePhaseFlagsMask = 0xfff00000, //!< Bits [20, 23] hold flags about how the particle behave eParticlePhaseSelfCollide = 1 << 20, //!< If set this particle will interact with particles of the same group eParticlePhaseSelfCollideFilter = 1 << 21, //!< If set this particle will ignore collisions with particles closer than the radius in the rest pose, this flag should not be specified unless valid rest positions have been specified using setRestParticles() eParticlePhaseFluid = 1 << 22 //!< If set this particle will generate fluid density constraints for its overlapping neighbors }; }; typedef PxFlags<PxParticlePhaseFlag::Enum, PxU32> PxParticlePhaseFlags; #if !PX_DOXYGEN } // namespace physx #endif #endif
4,530
C
43.421568
257
0.747461
NVIDIA-Omniverse/PhysX/physx/include/PxContact.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_CONTACT_H #define PX_CONTACT_H /** \addtogroup physics @{ */ #include "foundation/PxVec3.h" #include "foundation/PxAssert.h" #include "PxConstraintDesc.h" #include "PxNodeIndex.h" #if !PX_DOXYGEN namespace physx { #endif #if PX_VC #pragma warning(push) #pragma warning(disable: 4324) // Padding was added at the end of a structure because of a __declspec(align) value. #endif #define PXC_CONTACT_NO_FACE_INDEX 0xffffffff class PxActor; /** \brief Header for a contact patch where all points share same material and normal */ PX_ALIGN_PREFIX(16) struct PxContactPatch { enum PxContactPatchFlags { eHAS_FACE_INDICES = 1, //!< Indicates this contact stream has face indices. eMODIFIABLE = 2, //!< Indicates this contact stream is modifiable. eFORCE_NO_RESPONSE = 4, //!< Indicates this contact stream is notify-only (no contact response). eHAS_MODIFIED_MASS_RATIOS = 8, //!< Indicates this contact stream has modified mass ratios eHAS_TARGET_VELOCITY = 16, //!< Indicates this contact stream has target velocities set eHAS_MAX_IMPULSE = 32, //!< Indicates this contact stream has max impulses set eREGENERATE_PATCHES = 64, //!< Indicates this contact stream needs patches re-generated. This is required if the application modified either the contact normal or the material properties eCOMPRESSED_MODIFIED_CONTACT = 128 }; /** \brief Modifiers for scaling the inertia of the involved bodies */ PX_ALIGN(16, PxConstraintInvMassScale mMassModification); /** \brief Contact normal */ PX_ALIGN(16, PxVec3 normal); /** \brief Restitution coefficient */ PxReal restitution; /** \brief Dynamic friction coefficient */ PxReal dynamicFriction; /** \brief Static friction coefficient */ PxReal staticFriction; /** \brief Damping coefficient (for compliant contacts) */ PxReal damping; /** \brief Index of the first contact in the patch */ PxU16 startContactIndex; /** \brief The number of contacts in this patch */ PxU8 nbContacts; /** \brief The combined material flag of two actors that come in contact @see PxMaterialFlag, PxCombineMode */ PxU8 materialFlags; /** \brief The PxContactPatchFlags for this patch */ PxU16 internalFlags; /** \brief Material index of first body */ PxU16 materialIndex0; /** \brief Material index of second body */ PxU16 materialIndex1; PxU16 pad[5]; } PX_ALIGN_SUFFIX(16); /** \brief Contact point data */ PX_ALIGN_PREFIX(16) struct PxContact { /** \brief Contact point in world space */ PxVec3 contact; /** \brief Separation value (negative implies penetration). */ PxReal separation; } PX_ALIGN_SUFFIX(16); /** \brief Contact point data with additional target and max impulse values */ PX_ALIGN_PREFIX(16) struct PxExtendedContact : public PxContact { /** \brief Target velocity */ PX_ALIGN(16, PxVec3 targetVelocity); /** \brief Maximum impulse */ PxReal maxImpulse; } PX_ALIGN_SUFFIX(16); /** \brief A modifiable contact point. This has additional fields per-contact to permit modification by user. \note Not all fields are currently exposed to the user. */ PX_ALIGN_PREFIX(16) struct PxModifiableContact : public PxExtendedContact { /** \brief Contact normal */ PX_ALIGN(16, PxVec3 normal); /** \brief Restitution coefficient */ PxReal restitution; /** \brief Material Flags */ PxU32 materialFlags; /** \brief Shape A's material index */ PxU16 materialIndex0; /** \brief Shape B's material index */ PxU16 materialIndex1; /** \brief static friction coefficient */ PxReal staticFriction; /** \brief dynamic friction coefficient */ PxReal dynamicFriction; } PX_ALIGN_SUFFIX(16); /** \brief A class to iterate over a compressed contact stream. This supports read-only access to the various contact formats. */ struct PxContactStreamIterator { enum StreamFormat { eSIMPLE_STREAM, eMODIFIABLE_STREAM, eCOMPRESSED_MODIFIABLE_STREAM }; /** \brief Utility zero vector to optimize functions returning zero vectors when a certain flag isn't set. \note This allows us to return by reference instead of having to return by value. Returning by value will go via memory (registers -> stack -> registers), which can cause performance issues on certain platforms. */ PxVec3 zero; /** \brief The patch headers. */ const PxContactPatch* patch; /** \brief The contacts */ const PxContact* contact; /** \brief The contact triangle face index */ const PxU32* faceIndice; /** \brief The total number of patches in this contact stream */ PxU32 totalPatches; /** \brief The total number of contact points in this stream */ PxU32 totalContacts; /** \brief The current contact index */ PxU32 nextContactIndex; /** \brief The current patch Index */ PxU32 nextPatchIndex; /** \brief Size of contact patch header \note This varies whether the patch is modifiable or not. */ PxU32 contactPatchHeaderSize; /** \brief Contact point size \note This varies whether the patch has feature indices or is modifiable. */ PxU32 contactPointSize; /** \brief The stream format */ StreamFormat mStreamFormat; /** \brief Indicates whether this stream is notify-only or not. */ PxU32 forceNoResponse; /** \brief Internal helper for stepping the contact stream iterator */ bool pointStepped; /** \brief Specifies if this contactPatch has face indices (handled as bool) @see faceIndice */ PxU32 hasFaceIndices; /** \brief Constructor */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxContactStreamIterator(const PxU8* contactPatches, const PxU8* contactPoints, const PxU32* contactFaceIndices, PxU32 nbPatches, PxU32 nbContacts) : zero(0.f) { bool modify = false; bool compressedModify = false; bool response = false; bool indices = false; PxU32 pointSize = 0; PxU32 patchHeaderSize = sizeof(PxContactPatch); const PxContactPatch* patches = reinterpret_cast<const PxContactPatch*>(contactPatches); if(patches) { modify = (patches->internalFlags & PxContactPatch::eMODIFIABLE) != 0; compressedModify = (patches->internalFlags & PxContactPatch::eCOMPRESSED_MODIFIED_CONTACT) != 0; indices = (patches->internalFlags & PxContactPatch::eHAS_FACE_INDICES) != 0; patch = patches; contact = reinterpret_cast<const PxContact*>(contactPoints); faceIndice = contactFaceIndices; pointSize = compressedModify ? sizeof(PxExtendedContact) : modify ? sizeof(PxModifiableContact) : sizeof(PxContact); response = (patch->internalFlags & PxContactPatch::eFORCE_NO_RESPONSE) == 0; } mStreamFormat = compressedModify ? eCOMPRESSED_MODIFIABLE_STREAM : modify ? eMODIFIABLE_STREAM : eSIMPLE_STREAM; hasFaceIndices = PxU32(indices); forceNoResponse = PxU32(!response); contactPatchHeaderSize = patchHeaderSize; contactPointSize = pointSize; nextPatchIndex = 0; nextContactIndex = 0; totalContacts = nbContacts; totalPatches = nbPatches; pointStepped = false; } /** \brief Returns whether there are more patches in this stream. \return Whether there are more patches in this stream. */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool hasNextPatch() const { return nextPatchIndex < totalPatches; } /** \brief Returns the total contact count. \return Total contact count. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 getTotalContactCount() const { return totalContacts; } /** \brief Returns the total patch count. \return Total patch count. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 getTotalPatchCount() const { return totalPatches; } /** \brief Advances iterator to next contact patch. */ PX_CUDA_CALLABLE PX_INLINE void nextPatch() { PX_ASSERT(nextPatchIndex < totalPatches); if(nextPatchIndex) { if(nextContactIndex < patch->nbContacts) { PxU32 nbToStep = patch->nbContacts - this->nextContactIndex; contact = reinterpret_cast<const PxContact*>(reinterpret_cast<const PxU8*>(contact) + contactPointSize * nbToStep); } patch = reinterpret_cast<const PxContactPatch*>(reinterpret_cast<const PxU8*>(patch) + contactPatchHeaderSize); } nextPatchIndex++; nextContactIndex = 0; } /** \brief Returns if the current patch has more contacts. \return If there are more contacts in the current patch. */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool hasNextContact() const { return nextContactIndex < (patch->nbContacts); } /** \brief Advances to the next contact in the patch. */ PX_CUDA_CALLABLE PX_FORCE_INLINE void nextContact() { PX_ASSERT(nextContactIndex < patch->nbContacts); if(pointStepped) { contact = reinterpret_cast<const PxContact*>(reinterpret_cast<const PxU8*>(contact) + contactPointSize); faceIndice++; } nextContactIndex++; pointStepped = true; } /** \brief Gets the current contact's normal \return The current contact's normal. */ PX_CUDA_CALLABLE PX_FORCE_INLINE const PxVec3& getContactNormal() const { return getContactPatch().normal; } /** \brief Gets the inverse mass scale for body 0. \return The inverse mass scale for body 0. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal getInvMassScale0() const { return patch->mMassModification.linear0; } /** \brief Gets the inverse mass scale for body 1. \return The inverse mass scale for body 1. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal getInvMassScale1() const { return patch->mMassModification.linear1; } /** \brief Gets the inverse inertia scale for body 0. \return The inverse inertia scale for body 0. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal getInvInertiaScale0() const { return patch->mMassModification.angular0; } /** \brief Gets the inverse inertia scale for body 1. \return The inverse inertia scale for body 1. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal getInvInertiaScale1() const { return patch->mMassModification.angular1; } /** \brief Gets the contact's max impulse. \return The contact's max impulse. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal getMaxImpulse() const { return mStreamFormat != eSIMPLE_STREAM ? getExtendedContact().maxImpulse : PX_MAX_REAL; } /** \brief Gets the contact's target velocity. \return The contact's target velocity. */ PX_CUDA_CALLABLE PX_FORCE_INLINE const PxVec3& getTargetVel() const { return mStreamFormat != eSIMPLE_STREAM ? getExtendedContact().targetVelocity : zero; } /** \brief Gets the contact's contact point. \return The contact's contact point. */ PX_CUDA_CALLABLE PX_FORCE_INLINE const PxVec3& getContactPoint() const { return contact->contact; } /** \brief Gets the contact's separation. \return The contact's separation. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal getSeparation() const { return contact->separation; } /** \brief Gets the contact's face index for shape 0. \return The contact's face index for shape 0. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 getFaceIndex0() const { return PXC_CONTACT_NO_FACE_INDEX; } /** \brief Gets the contact's face index for shape 1. \return The contact's face index for shape 1. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 getFaceIndex1() const { return hasFaceIndices ? *faceIndice : PXC_CONTACT_NO_FACE_INDEX; } /** \brief Gets the contact's static friction coefficient. \return The contact's static friction coefficient. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal getStaticFriction() const { return getContactPatch().staticFriction; } /** \brief Gets the contact's dynamic friction coefficient. \return The contact's dynamic friction coefficient. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal getDynamicFriction() const { return getContactPatch().dynamicFriction; } /** \brief Gets the contact's restitution coefficient. \return The contact's restitution coefficient. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal getRestitution() const { return getContactPatch().restitution; } /** \brief Gets the contact's damping value. \return The contact's damping value. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal getDamping() const { return getContactPatch().damping; } /** \brief Gets the contact's material flags. \return The contact's material flags. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 getMaterialFlags() const { return getContactPatch().materialFlags; } /** \brief Gets the contact's material index for shape 0. \return The contact's material index for shape 0. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxU16 getMaterialIndex0() const { return PxU16(getContactPatch().materialIndex0); } /** \brief Gets the contact's material index for shape 1. \return The contact's material index for shape 1. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxU16 getMaterialIndex1() const { return PxU16(getContactPatch().materialIndex1); } /** \brief Advances the contact stream iterator to a specific contact index. \return True if advancing was possible */ bool advanceToIndex(const PxU32 initialIndex) { PX_ASSERT(this->nextPatchIndex == 0 && this->nextContactIndex == 0); PxU32 numToAdvance = initialIndex; if(numToAdvance == 0) { PX_ASSERT(hasNextPatch()); nextPatch(); return true; } while(numToAdvance) { while(hasNextPatch()) { nextPatch(); PxU32 patchSize = patch->nbContacts; if(numToAdvance <= patchSize) { contact = reinterpret_cast<const PxContact*>(reinterpret_cast<const PxU8*>(contact) + contactPointSize * numToAdvance); nextContactIndex += numToAdvance; return true; } else { numToAdvance -= patchSize; } } } return false; } private: /** \brief Internal helper */ PX_CUDA_CALLABLE PX_FORCE_INLINE const PxContactPatch& getContactPatch() const { return *static_cast<const PxContactPatch*>(patch); } PX_CUDA_CALLABLE PX_FORCE_INLINE const PxExtendedContact& getExtendedContact() const { PX_ASSERT(mStreamFormat == eMODIFIABLE_STREAM || mStreamFormat == eCOMPRESSED_MODIFIABLE_STREAM); return *static_cast<const PxExtendedContact*>(contact); } }; /** \brief Contains contact information for a contact reported by the direct-GPU contact report API. See PxScene::copyContactData(). */ struct PxGpuContactPair { PxU8* contactPatches; //!< Ptr to contact patches. Type: PxContactPatch*, size: nbPatches. PxU8* contactPoints; //!< Ptr to contact points. Type: PxContact*, size: nbContacts. PxReal* contactForces; //!< Ptr to contact forces. Size: nbContacts. PxU32 transformCacheRef0; //!< Ref to shape0's transform in transform cache. PxU32 transformCacheRef1; //!< Ref to shape1's transform in transform cache. PxNodeIndex nodeIndex0; //!< Unique Id for actor0 if the actor is dynamic. PxNodeIndex nodeIndex1; //!< Unique Id for actor1 if the actor is dynamic. PxActor* actor0; //!< Ptr to PxActor for actor0. PxActor* actor1; //!< Ptr to PxActor for actor1. PxU16 nbContacts; //!< Num contacts. PxU16 nbPatches; //!< Num patches. }; #if PX_VC #pragma warning(pop) #endif #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
16,701
C
24.153614
190
0.726483
NVIDIA-Omniverse/PhysX/physx/include/PxArticulationLink.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_ARTICULATION_LINK_H #define PX_ARTICULATION_LINK_H /** \addtogroup physics @{ */ #include "PxPhysXConfig.h" #include "PxRigidBody.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief A component of an articulation that represents a rigid body. Articulation links have a restricted subset of the functionality of a PxRigidDynamic: - They may not be kinematic, and do not support contact-force thresholds. - Their velocity or global pose cannot be set directly, but must be set via the articulation-root and joint positions/velocities. - Sleep state and solver iteration counts are properties of the entire articulation rather than the individual links. @see PxArticulationReducedCoordinate, PxArticulationReducedCoordinate::createLink, PxArticulationJointReducedCoordinate, PxRigidBody */ class PxArticulationLink : public PxRigidBody { public: /** \brief Releases the link from the articulation. \note Only a leaf articulation link can be released. \note Releasing a link is not allowed while the articulation link is in a scene. In order to release a link, remove and then re-add the corresponding articulation to the scene. @see PxArticulationReducedCoordinate::createLink() */ virtual void release() = 0; /** \brief Gets the articulation that the link is a part of. \return The articulation. @see PxArticulationReducedCoordinate */ virtual PxArticulationReducedCoordinate& getArticulation() const = 0; /** \brief Gets the joint which connects this link to its parent. \return The joint connecting the link to the parent. NULL for the root link. @see PxArticulationJointReducedCoordinate */ virtual PxArticulationJointReducedCoordinate* getInboundJoint() const = 0; /** \brief Gets the number of degrees of freedom of the joint which connects this link to its parent. - The root link DOF-count is defined to be 0 regardless of PxArticulationFlag::eFIX_BASE. - The return value is only valid for articulations that are in a scene. \return The number of degrees of freedom, or 0xFFFFFFFF if the articulation is not in a scene. @see PxArticulationJointReducedCoordinate */ virtual PxU32 getInboundJointDof() const = 0; /** \brief Gets the number of child links. \return The number of child links. @see getChildren */ virtual PxU32 getNbChildren() const = 0; /** \brief Gets the low-level link index that may be used to index into members of PxArticulationCache. The return value is only valid for articulations that are in a scene. \return The low-level index, or 0xFFFFFFFF if the articulation is not in a scene. @see PxArticulationCache */ virtual PxU32 getLinkIndex() const = 0; /** \brief Retrieves the child links. \param[out] userBuffer The buffer to receive articulation link pointers. \param[in] bufferSize The size of the provided user buffer, use getNbChildren() for sizing. \param[in] startIndex The index of the first child pointer to be retrieved. \return The number of articulation links written to the buffer. @see getNbChildren */ virtual PxU32 getChildren(PxArticulationLink** userBuffer, PxU32 bufferSize, PxU32 startIndex = 0) const = 0; /** \brief Set the constraint-force-mixing scale term. The cfm scale term is a stabilization term that helps avoid instabilities with over-constrained configurations. It should be a small value that is multiplied by 1/mass internally to produce an additional bias added to the unit response term in the solver. \param[in] cfm The constraint-force-mixing scale term. <b>Default:</b> 0.025 <b>Range:</b> [0, 1] \note This call is not allowed while the simulation is running. @see getCfmScale */ virtual void setCfmScale(const PxReal cfm) = 0; /** \brief Get the constraint-force-mixing scale term. \return The constraint-force-mixing scale term. @see setCfmScale */ virtual PxReal getCfmScale() const = 0; /** \brief Get the linear velocity of the link. - For performance, prefer PxArticulationCache::linkVelocity to get link spatial velocities in a batch query. - When the articulation state is updated via non-cache API, use PxArticulationReducedCoordinate::updateKinematic before querying velocity. \return The linear velocity of the link. \note This call is not allowed while the simulation is running except in a split simulation during #PxScene::collide() and up to #PxScene::advance(), and in PxContactModifyCallback or in contact report callbacks. \note The linear velocity is reported with respect to the link's center of mass and not the actor frame origin. @see PxRigidBody::getCMassLocalPose */ virtual PxVec3 getLinearVelocity() const = 0; /** \brief Get the angular velocity of the link. - For performance, prefer PxArticulationCache::linkVelocity to get link spatial velocities in a batch query. - When the articulation state is updated via non-cache API, use PxArticulationReducedCoordinate::updateKinematic before querying velocity. \return The angular velocity of the link. \note This call is not allowed while the simulation is running except in a split simulation during #PxScene::collide() and up to #PxScene::advance(), and in PxContactModifyCallback or in contact report callbacks. */ virtual PxVec3 getAngularVelocity() const = 0; /** \brief Returns the string name of the dynamic type. \return The string name. */ virtual const char* getConcreteTypeName() const { return "PxArticulationLink"; } protected: PX_INLINE PxArticulationLink(PxType concreteType, PxBaseFlags baseFlags) : PxRigidBody(concreteType, baseFlags) {} PX_INLINE PxArticulationLink(PxBaseFlags baseFlags) : PxRigidBody(baseFlags) {} virtual ~PxArticulationLink() {} virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxArticulationLink", PxRigidBody); } }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
7,641
C
35.917874
150
0.758539
NVIDIA-Omniverse/PhysX/physx/include/PxAttachment.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_ATTACHMENT_H #define PX_ATTACHMENT_H #include "PxConeLimitedConstraint.h" #include "PxFiltering.h" #include "PxNodeIndex.h" #include "foundation/PxVec4.h" /** \addtogroup physics @{ */ #if !PX_DOXYGEN namespace physx { #endif /** \brief Struct to specify attachment between a particle/vertex and a rigid */ struct PxParticleRigidAttachment : public PxParticleRigidFilterPair { PxParticleRigidAttachment() {} PxParticleRigidAttachment(const PxConeLimitedConstraint& coneLimitedConstraint, const PxVec4& localPose0): PxParticleRigidFilterPair(PxNodeIndex().getInd(), PxNodeIndex().getInd()), mLocalPose0(localPose0), mConeLimitParams(coneLimitedConstraint) { } PX_ALIGN(16, PxVec4 mLocalPose0); //!< local pose in body frame - except for statics, these are using world positions. PxConeLimitParams mConeLimitParams; //!< Parameters to specify cone constraints }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
2,651
C
36.885714
119
0.765749
NVIDIA-Omniverse/PhysX/physx/include/PxParticleSystem.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_PARTICLE_SYSTEM_H #define PX_PARTICLE_SYSTEM_H /** \addtogroup physics @{ */ #include "foundation/PxSimpleTypes.h" #include "PxActor.h" #include "PxFiltering.h" #include "PxParticleSystemFlag.h" #include "foundation/PxArray.h" #include "cudamanager/PxCudaTypes.h" #if !PX_DOXYGEN namespace physx { #endif #if PX_VC #pragma warning(push) #pragma warning(disable : 4435) #endif class PxCudaContextManager; class PxGpuParticleSystem; class PxParticleAndDiffuseBuffer; class PxParticleBuffer; class PxParticleMaterial; /** \brief Container to hold a pair of corresponding device and host pointers. These pointers should point to GPU / CPU mirrors of the same data, but this is not enforced. */ template <typename Type> struct PxGpuMirroredPointer { Type* mDevicePtr; Type* mHostPtr; PxGpuMirroredPointer(Type* devicePtr, Type* hostPtr) : mDevicePtr(devicePtr), mHostPtr(hostPtr) { } }; /** \brief Particle system callback base class to schedule work that should be done before, while or after the particle system updates. A call to fetchResultsParticleSystem() on the PxScene will synchronize the work such that the caller knows that all tasks of this callback completed. */ class PxParticleSystemCallback { public: /** \brief Method gets called when dirty data from the particle system is uploated to the gpu \param[in] gpuParticleSystem Pointers to the particle systems gpu data available as host accessible pointer and as gpu accessible pointer \param[in] stream The stream on which all cuda kernel calls get scheduled for execution. A call to fetchResultsParticleSystem() on the PxScene will synchronize the work such that the caller knows that the task completed. */ virtual void onBegin(const PxGpuMirroredPointer<PxGpuParticleSystem>& gpuParticleSystem, CUstream stream) = 0; /** \brief Method gets called when the simulation step of the particle system is performed \param[in] gpuParticleSystem Pointers to the particle systems gpu data available as host accessible pointer and as gpu accessible pointer \param[in] stream The stream on which all cuda kernel calls get scheduled for execution. A call to fetchResultsParticleSystem() on the PxScene will synchronize the work such that the caller knows that the task completed. */ virtual void onAdvance(const PxGpuMirroredPointer<PxGpuParticleSystem>& gpuParticleSystem, CUstream stream) = 0; /** \brief Method gets called after the particle system simulation step completed \param[in] gpuParticleSystem Pointers to the particle systems gpu data available as host accessible pointer and as gpu accessible pointer \param[in] stream The stream on which all cuda kernel calls get scheduled for execution. A call to fetchResultsParticleSystem() on the PxScene will synchronize the work such that the caller knows that the task completed. */ virtual void onPostSolve(const PxGpuMirroredPointer<PxGpuParticleSystem>& gpuParticleSystem, CUstream stream) = 0; /** \brief Destructor */ virtual ~PxParticleSystemCallback() {} }; /** \brief Special callback that forwards calls to arbitrarily many sub-callbacks */ class PxMultiCallback : public PxParticleSystemCallback { private: PxArray<PxParticleSystemCallback*> mCallbacks; public: PxMultiCallback() : mCallbacks(0) {} virtual void onPostSolve(const PxGpuMirroredPointer<PxGpuParticleSystem>& gpuParticleSystem, CUstream stream) PX_OVERRIDE { for (PxU32 i = 0; i < mCallbacks.size(); ++i) mCallbacks[i]->onPostSolve(gpuParticleSystem, stream); } virtual void onBegin(const PxGpuMirroredPointer<PxGpuParticleSystem>& gpuParticleSystem, CUstream stream) PX_OVERRIDE { for (PxU32 i = 0; i < mCallbacks.size(); ++i) mCallbacks[i]->onBegin(gpuParticleSystem, stream); } virtual void onAdvance(const PxGpuMirroredPointer<PxGpuParticleSystem>& gpuParticleSystem, CUstream stream) PX_OVERRIDE { for (PxU32 i = 0; i < mCallbacks.size(); ++i) mCallbacks[i]->onAdvance(gpuParticleSystem, stream); } /** \brief Adds a callback \param[in] callback The callback to add \return True if the callback was added */ bool addCallback(PxParticleSystemCallback* callback) { if (mCallbacks.find(callback) != mCallbacks.end()) return false; mCallbacks.pushBack(callback); return true; } /** \brief Removes a callback \param[in] callback The callback to remove \return True if the callback was removed */ bool removeCallback(const PxParticleSystemCallback* callback) { for (PxU32 i = 0; i < mCallbacks.size(); ++i) { if (mCallbacks[i] == callback) { mCallbacks.remove(i); return true; } } return false; } }; /** \brief Flags which control the behaviour of a particle system. See #PxParticleSystem::setParticleFlag(), #PxParticleSystem::setParticleFlags(), #PxParticleSystem::getParticleFlags() */ struct PxParticleFlag { enum Enum { eDISABLE_SELF_COLLISION = 1 << 0, //!< Disables particle self-collision eDISABLE_RIGID_COLLISION = 1 << 1, //!< Disables particle-rigid body collision eFULL_DIFFUSE_ADVECTION = 1 << 2 //!< Enables full advection of diffuse particles. By default, diffuse particles are advected only by particles in the cell they are contained. This flag enables full neighbourhood generation (more expensive). }; }; typedef PxFlags<PxParticleFlag::Enum, PxU32> PxParticleFlags; /** \brief The shared base class for all particle systems A particle system simulates a bunch of particles that interact with each other. The interactions can be simple collisions with friction (granular material) ore more complex like fluid interactions, cloth, inflatables etc. */ class PxParticleSystem : public PxActor { public: /** \brief Sets the solver iteration counts for the body. The solver iteration count determines how accurately joints and contacts are resolved. If you are having trouble with jointed bodies oscillating and behaving erratically, then setting a higher position iteration count may improve their stability. If intersecting bodies are being depenetrated too violently, increase the number of velocity iterations. More velocity iterations will drive the relative exit velocity of the intersecting objects closer to the correct value given the restitution. <b>Default:</b> 4 position iterations, 1 velocity iteration \param[in] minPositionIters Number of position iterations the solver should perform for this body. <b>Range:</b> [1,255] \param[in] minVelocityIters Number of velocity iterations the solver should perform for this body. <b>Range:</b> [1,255] See #getSolverIterationCounts() */ virtual void setSolverIterationCounts(PxU32 minPositionIters, PxU32 minVelocityIters = 1) = 0; /** \brief Retrieves the solver iteration counts. See #setSolverIterationCounts() */ virtual void getSolverIterationCounts(PxU32& minPositionIters, PxU32& minVelocityIters) const = 0; /** \brief Retrieves the collision filter settings. \return The filter data */ virtual PxFilterData getSimulationFilterData() const = 0; /** \brief Set collision filter settings Allows to control with which objects the particle system collides \param[in] data The filter data */ virtual void setSimulationFilterData(const PxFilterData& data) = 0; /** \brief Set particle flag Allows to control self collision etc. \param[in] flag The flag to set \param[in] val The new value of the flag */ virtual void setParticleFlag(PxParticleFlag::Enum flag, bool val) = 0; /** \brief Set particle flags Allows to control self collision etc. \param[in] flags The flags to set */ virtual void setParticleFlags(PxParticleFlags flags) = 0; /** \brief Retrieves the particle flags. \return The particle flags */ virtual PxParticleFlags getParticleFlags() const = 0; /** \brief Set the maximal depenetration velocity particles can reach Allows to limit the particles' maximal depenetration velocity to avoid that collision responses lead to very high particle velocities \param[in] maxDepenetrationVelocity The maximal depenetration velocity */ virtual void setMaxDepenetrationVelocity(PxReal maxDepenetrationVelocity) = 0; /** \brief Retrieves maximal depenetration velocity a particle can have. \return The maximal depenetration velocity */ virtual PxReal getMaxDepenetrationVelocity() = 0; /** \brief Set the maximal velocity particles can reach Allows to limit the particles' maximal velocity to control the maximal distance a particle can move per frame \param[in] maxVelocity The maximal velocity */ virtual void setMaxVelocity(PxReal maxVelocity) = 0; /** \brief Retrieves maximal velocity a particle can have. \return The maximal velocity */ virtual PxReal getMaxVelocity() = 0; /** \brief Return the cuda context manager \return The cuda context manager */ virtual PxCudaContextManager* getCudaContextManager() const = 0; /** \brief Set the rest offset for the collision between particles and rigids or soft bodies. A particle and a rigid or soft body will come to rest at a distance equal to the sum of their restOffset values. \param[in] restOffset <b>Range:</b> (0, contactOffset) */ virtual void setRestOffset(PxReal restOffset) = 0; /** \brief Return the rest offset \return the rest offset See #setRestOffset() */ virtual PxReal getRestOffset() const = 0; /** \brief Set the contact offset for the collision between particles and rigids or soft bodies The contact offset needs to be larger than the rest offset. Contact constraints are generated for a particle and a rigid or softbody below the distance equal to the sum of their contacOffset values. \param[in] contactOffset <b>Range:</b> (restOffset, PX_MAX_F32) */ virtual void setContactOffset(PxReal contactOffset) = 0; /** \brief Return the contact offset \return the contact offset See #setContactOffset() */ virtual PxReal getContactOffset() const = 0; /** \brief Set the contact offset for the interactions between particles The particle contact offset needs to be larger than the fluid rest offset and larger than the solid rest offset. Interactions for two particles are computed if their distance is below twice the particleContactOffset value. \param[in] particleContactOffset <b>Range:</b> (Max(solidRestOffset, fluidRestOffset), PX_MAX_F32) */ virtual void setParticleContactOffset(PxReal particleContactOffset) = 0; /** \brief Return the particle contact offset \return the particle contact offset See #setParticleContactOffset() */ virtual PxReal getParticleContactOffset() const = 0; /** \brief Set the solid rest offset Two solid particles (or a solid and a fluid particle) will come to rest at a distance equal to twice the solidRestOffset value. \param[in] solidRestOffset <b>Range:</b> (0, particleContactOffset) */ virtual void setSolidRestOffset(PxReal solidRestOffset) = 0; /** \brief Return the solid rest offset \return the solid rest offset See #setSolidRestOffset() */ virtual PxReal getSolidRestOffset() const = 0; /** \brief Creates a rigid attachment between a particle and a rigid actor. This method creates a symbolic attachment between the particle system and a rigid body for the purpose of island management. The actual attachments will be contained in the particle buffers. Be aware that destroying the rigid body before destroying the attachment is illegal and may cause a crash. The particle system keeps track of these attachments but the rigid body does not. \param[in] actor The rigid actor used for the attachment */ virtual void addRigidAttachment(PxRigidActor* actor) = 0; /** \brief Removes a rigid attachment between a particle and a rigid body. This method destroys a symbolic attachment between the particle system and a rigid body for the purpose of island management. Be aware that destroying the rigid body before destroying the attachment is illegal and may cause a crash. The particle system keeps track of these attachments but the rigid body does not. \param[in] actor The rigid body actor used for the attachment */ virtual void removeRigidAttachment(PxRigidActor* actor) = 0; /** \brief Enable continuous collision detection for particles \param[in] enable Boolean indicates whether continuous collision detection is enabled. */ virtual void enableCCD(bool enable) = 0; /** \brief Creates combined particle flag with particle material and particle phase flags. \param[in] material A material instance to associate with the new particle group. \param[in] flags The particle phase flags. \return The combined particle group index and phase flags. See #PxParticlePhaseFlag */ virtual PxU32 createPhase(PxParticleMaterial* material, PxParticlePhaseFlags flags) = 0; /** \brief Returns number of particle materials referenced by particle phases \return The number of particle materials */ virtual PxU32 getNbParticleMaterials() const = 0; /** \brief Returns particle materials referenced by particle phases \return The particle materials */ virtual PxU32 getParticleMaterials(PxParticleMaterial** userBuffer, PxU32 bufferSize, PxU32 startIndex = 0) const = 0; /** \brief Sets a user notify object which receives special simulation events when they occur. \note Do not set the callback while the simulation is running. Calls to this method while the simulation is running will be ignored. \note A call to fetchResultsParticleSystem() on the PxScene will synchronize the work such that the caller knows that all worke done in the callback completed. \param[in] callback User notification callback. See PxSimulationEventCallback. See #PxParticleSystemCallback, #getParticleSystemCallback() */ virtual void setParticleSystemCallback(PxParticleSystemCallback* callback) = 0; /** \brief Retrieves the simulationEventCallback pointer set with setSimulationEventCallback(). \return The current user notify pointer. See PxSimulationEventCallback. See #PxParticleSystemCallback, #setParticleSystemCallback() */ virtual PxParticleSystemCallback* getParticleSystemCallback() const = 0; /** \brief Add an existing particle buffer to the particle system. \param[in] particleBuffer a PxParticleBuffer*. See #PxParticleBuffer. */ virtual void addParticleBuffer(PxParticleBuffer* particleBuffer) = 0; /** \brief Remove particle buffer from the particle system. \param[in] particleBuffer a PxParticleBuffer*. See #PxParticleBuffer. */ virtual void removeParticleBuffer(PxParticleBuffer* particleBuffer) = 0; /** \brief Returns the GPU particle system index. \return The GPU index, if the particle system is in a scene and PxSceneFlag::eENABLE_DIRECT_GPU_API is set, or 0xFFFFFFFF otherwise. */ virtual PxU32 getGpuParticleSystemIndex() = 0; protected: virtual ~PxParticleSystem() {} PX_INLINE PxParticleSystem(PxType concreteType, PxBaseFlags baseFlags) : PxActor(concreteType, baseFlags) {} PX_INLINE PxParticleSystem(PxBaseFlags baseFlags) : PxActor(baseFlags) {} virtual bool isKindOf(const char* name) const PX_OVERRIDE { PX_IS_KIND_OF(name, "PxParticleSystem", PxActor); } }; #if PX_VC #pragma warning(pop) #endif #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
17,510
C
33.675247
243
0.742833
NVIDIA-Omniverse/PhysX/physx/include/PxLineStripSkinning.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_LINE_STRIP_SKINNING_H #define PX_LINE_STRIP_SKINNING_H #include "cudamanager/PxCudaContext.h" #include "cudamanager/PxCudaContextManager.h" #include "foundation/PxSimpleTypes.h" #include "foundation/PxVec4.h" #include "foundation/PxMat44.h" #if !PX_DOXYGEN namespace physx { #endif #if PX_SUPPORT_GPU_PHYSX class PxHairSystemDesc; /** \brief Specifies the position and binding of a point relative to a line strip such that the point moves with the line during simulation. */ struct PxLineStripSkinnedVertex { PxVec3 mPosition; //!< The position of the vertex that should move with a line strip. Expressed in the same coordinate system as the line strip points when computing the skinning. PxU32 mSegmentId; //!< The id of the vertex at the beginning of the line segment to which this point is connected PxReal mSegmentLocation; //!< Parameter in the range 0...1 that specifies the location on the segment where the base coordinate system gets evaluated at runtime to update the skinned position. PxLineStripSkinnedVertex() : mPosition(0.0f), mSegmentId(0), mSegmentLocation(0.0f) { } PxLineStripSkinnedVertex(const PxVec3& position, PxU32 segmentId, PxReal segmentLocation) : mPosition(position), mSegmentId(segmentId), mSegmentLocation(segmentLocation) { } }; /** \brief Utility class to embed high resolution line strips into low resolution line strips. No internal data is cached except a reference to the cuda context manager. Therefore, a single instance of this class may be used for different hair systems. */ class PxLineStripSkinning { public: /** \brief Computes the skinning information used to update the skinned points during simulation \note All computations in this function are happening on the CPU, therefore, all supplied pointers must point to CPU-accessible memory. \param[in] simVertices The simulated vertices in the state in which the skinning information shall be computed. These vertices will eventually drive the skinned vertices \param[in] simStrandPastEndIndices The index after the last strand vertex for the simulation strands (strand = line strip) \param[in] nbLineStrips The number of line strips \param[in] skinnedVertices The positions and segment locations of the skinned vertices in the initial frame. They will keep their position relative to the line strip segment during simulation. \param[in] nbSkinnedVertices The total number of skinned vertices \param[out] skinnedVertexInterpolationData Must provide space for one entry per skinned vertex. Contains the location of the skinned vertex relative to the interpolated base frame. The w component encodes the location of the base frame along the line segment. \param[out] skinningInfoRootStrandDirections Must provide space for one entry per line strip. Contains the direction of the first line segment per line strip during the calculation of the skinning information. \param[out] skinningInfoStrandStartIndices Must provide space for one entry per line strip. Contains the index of the first skinning vertex in the buffer for every line strip. The skinned vertices must be sorted such that vertices attached to the same line strip are adjacent to each other in the buffer. \param[out] skinningInfoReorderMap Can be NULL if the skinning vertices are already sorted per strand segment id. Must provide space for one entry per skinned vertex. Contains a reorder map since the skinning algorithm needs to process the skinned vertices in a specific order. \param[in] transform Optional transform that gets applied to the simVertices before computing the skinning information \param[in] catmullRomAlpha Optional parameter in the range 0...1 that allows to control the curve interpolation. */ virtual void initializeInterpolatedVertices(const PxVec4* simVertices, const PxU32* simStrandPastEndIndices, PxU32 nbLineStrips, const PxLineStripSkinnedVertex* skinnedVertices, PxU32 nbSkinnedVertices, PxVec4* skinnedVertexInterpolationData, PxVec3* skinningInfoRootStrandDirections, PxU32* skinningInfoStrandStartIndices, PxU32* skinningInfoReorderMap = NULL, const PxMat44& transform = PxMat44(PxIdentity), PxReal catmullRomAlpha = 0.5f) = 0; /** \brief Evaluates and updates the skinned positions based on the interpolation data on the GPU. All input and output arrays must be accessible on the GPU. \param[in] simVerticesD The simulation vertices (device pointer) according to which the skinned positions shall be computed. \param[in] simStrandPastEndIndicesD Device pointer containing the index after the last strand vertex for the simulation strands (strand = line strip) \param[in] nbSimStrands The number of line strips \param[in] skinnedVertexInterpolationDataD Device pointer containing the location of the skinned vertex relative to the interpolated base frame. The w component encodes the location of the base frame along the line segment. \param[in] skinningInfoStrandStartIndicesD Device pointer containing the index of the first skinning vertex in the buffer for every line strip. The skinned vertices must be sorted such that vertices attached to the same line strip are adjacent to each other in the buffer. \param[in] skinningInfoRootStrandDirectionsD Device pointer containing the direction of the first line segment per line strip during the calculation of the skinning information. \param[in] skinningInfoReorderMapD Can be NULL if the skinning vertices are already sorted per strand segment id. Device pointer containing the reorder map for the skinned vertices. \param[in] nbSkinnedVertices The total number of skinned vertices \param[out] resultD The skinned vertex positions where the updated positions will be written \param[in] stream The cuda stream on which ther kernel call gets scheduled \param[in] inputTransformD Optional device buffer holding a transform that gets applied to the simVertices before computing the skinning information \param[in] outputTransformD Optional device buffer holding a transform that gets applied to result vertices \param[in] catmullRomAlpha Optional parameter in the range 0...1 that allows to control the curve interpolation. */ virtual void evaluateInterpolatedVertices(const PxVec4* simVerticesD, const PxU32* simStrandPastEndIndicesD, PxU32 nbSimStrands, const PxVec4* skinnedVertexInterpolationDataD, const PxU32* skinningInfoStrandStartIndicesD, const PxVec3* skinningInfoRootStrandDirectionsD, const PxU32* skinningInfoReorderMapD, PxU32 nbSkinnedVertices, PxVec3* resultD, CUstream stream, const PxMat44* inputTransformD = NULL, const PxMat44* outputTransformD = NULL, PxReal catmullRomAlpha = 0.5f) = 0; /** \brief Destructor */ virtual ~PxLineStripSkinning() {} }; #endif #if !PX_DOXYGEN } // namespace physx #endif #endif
8,527
C
65.108527
306
0.794887
NVIDIA-Omniverse/PhysX/physx/include/PxFEMClothMaterial.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_FEM_CLOTH_MATERIAL_H #define PX_FEM_CLOTH_MATERIAL_H /** \addtogroup physics @{ */ #include "PxFEMMaterial.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief Material class to represent a set of FEM material properties. @see PxPhysics.createFEMClothMaterial */ class PxFEMClothMaterial : public PxFEMMaterial { public: /** \brief Sets material thickness \param[in] thickness Material thickness. @see getThickness */ virtual void setThickness(PxReal thickness) = 0; /** \brief Retrieves the material thickness. \return thickness. @see setDamping() */ virtual PxReal getThickness() const = 0; virtual const char* getConcreteTypeName() const { return "PxFEMClothMaterial"; } protected: PX_INLINE PxFEMClothMaterial(PxType concreteType, PxBaseFlags baseFlags) : PxFEMMaterial(concreteType, baseFlags) {} PX_INLINE PxFEMClothMaterial(PxBaseFlags baseFlags) : PxFEMMaterial(baseFlags) {} virtual ~PxFEMClothMaterial() {} virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxFEMClothMaterial", PxFEMMaterial); } }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
2,880
C
33.710843
122
0.744444
NVIDIA-Omniverse/PhysX/physx/include/PxSoftBodyFlag.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_SOFT_BODY_FLAG_H #define PX_SOFT_BODY_FLAG_H #include "PxPhysXConfig.h" #include "foundation/PxFlags.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief Identifies the buffers of a PxSoftBody. @see PxSoftBody::markDirty() */ struct PxSoftBodyDataFlag { enum Enum { eNONE = 0, ePOSITION_INVMASS = 1 << 0, //!< The collision mesh's positions eSIM_POSITION_INVMASS = 1 << 1, //!< The simulation mesh's positions and inverse masses eSIM_VELOCITY = 1 << 2, //!< The simulation mesh's velocities eREST_POSITION_INVMASS = 1 << 3, //!< The collision mesh's rest positions eALL = ePOSITION_INVMASS | eSIM_POSITION_INVMASS | eSIM_VELOCITY | eREST_POSITION_INVMASS }; }; typedef PxFlags<PxSoftBodyDataFlag::Enum, PxU32> PxSoftBodyDataFlags; /** \brief These flags determine what data is read or written when using PxScene::copySoftBodyData() or PxScene::applySoftBodyData. @see PxScene::copySoftBodyData, PxScene::applySoftBodyData */ class PxSoftBodyGpuDataFlag { public: enum Enum { eTET_INDICES = 0, //!< The collision mesh tetrahedron indices (quadruples of int32) eTET_REST_POSES = 1, //!< The collision mesh tetrahedron rest poses (float 3x3 matrices) eTET_ROTATIONS = 2, //!< The collision mesh tetrahedron orientations (quaternions, quadruples of float) eTET_POSITION_INV_MASS = 3, //!< The collision mesh vertex positions and their inverted mass in the 4th component (quadruples of float) eSIM_TET_INDICES = 4, //!< The simulation mesh tetrahedron indices (quadruples of int32) eSIM_TET_ROTATIONS = 5, //!< The simulation mesh tetrahedron orientations (quaternions, quadruples of float) eSIM_VELOCITY_INV_MASS = 6, //!< The simulation mesh vertex velocities and their inverted mass in the 4th component (quadruples of float) eSIM_POSITION_INV_MASS = 7 //!< The simulation mesh vertex positions and their inverted mass in the 4th component (quadruples of float) }; }; #if !PX_DOXYGEN } #endif #endif
3,704
C
40.629213
139
0.741361
NVIDIA-Omniverse/PhysX/physx/include/PxDeletionListener.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_DELETION_LISTENER_H #define PX_DELETION_LISTENER_H /** \addtogroup physics @{ */ #include "PxPhysXConfig.h" #include "common/PxBase.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief Flags specifying deletion event types. @see PxDeletionListener::onRelease PxPhysics.registerDeletionListener() */ struct PxDeletionEventFlag { enum Enum { eUSER_RELEASE = (1<<0), //!< The user has called release on an object. eMEMORY_RELEASE = (1<<1) //!< The destructor of an object has been called and the memory has been released. }; }; /** \brief Collection of set bits defined in PxDeletionEventFlag. @see PxDeletionEventFlag */ typedef PxFlags<PxDeletionEventFlag::Enum,PxU8> PxDeletionEventFlags; PX_FLAGS_OPERATORS(PxDeletionEventFlag::Enum,PxU8) /** \brief interface to get notification on object deletion */ class PxDeletionListener { public: /** \brief Notification if an object or its memory gets released If release() gets called on a PxBase object, an eUSER_RELEASE event will get fired immediately. The object state can be queried in the callback but it is not allowed to change the state. Furthermore, when reading from the object it is the user's responsibility to make sure that no other thread is writing at the same time to the object (this includes the simulation itself, i.e., #PxScene::fetchResults() must not get called at the same time). Calling release() on a PxBase object does not necessarily trigger its destructor immediately. For example, the object can be shared and might still be referenced by other objects or the simulation might still be running and accessing the object state. In such cases the destructor will be called as soon as it is safe to do so. After the destruction of the object and its memory, an eMEMORY_RELEASE event will get fired. In this case it is not allowed to dereference the object pointer in the callback. \param[in] observed The object for which the deletion event gets fired. \param[in] userData The user data pointer of the object for which the deletion event gets fired. Not available for all object types in which case it will be set to 0. \param[in] deletionEvent The type of deletion event. Do not dereference the object pointer argument if the event is eMEMORY_RELEASE. */ virtual void onRelease(const PxBase* observed, void* userData, PxDeletionEventFlag::Enum deletionEvent) = 0; protected: PxDeletionListener() {} virtual ~PxDeletionListener() {} }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
4,229
C
39.285714
167
0.763774
NVIDIA-Omniverse/PhysX/physx/include/PxConstraint.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_CONSTRAINT_H #define PX_CONSTRAINT_H /** \addtogroup physics @{ */ #include "PxPhysXConfig.h" #include "PxConstraintDesc.h" #include "common/PxBase.h" #if !PX_DOXYGEN namespace physx { #endif class PxRigidActor; class PxScene; class PxConstraintConnector; /** \brief constraint flags \note eBROKEN is a read only flag */ struct PxConstraintFlag { enum Enum { eBROKEN = 1<<0, //!< whether the constraint is broken eCOLLISION_ENABLED = 1<<3, //!< whether contacts should be generated between the objects this constraint constrains eVISUALIZATION = 1<<4, //!< whether this constraint should be visualized, if constraint visualization is turned on eDRIVE_LIMITS_ARE_FORCES = 1<<5, //!< limits for drive strength are forces rather than impulses eIMPROVED_SLERP = 1<<7, //!< perform preprocessing for improved accuracy on D6 Slerp Drive (this flag will be removed in a future release when preprocessing is no longer required) eDISABLE_PREPROCESSING = 1<<8, //!< suppress constraint preprocessing, intended for use with rowResponseThreshold. May result in worse solver accuracy for ill-conditioned constraints. eENABLE_EXTENDED_LIMITS = 1<<9, //!< enables extended limit ranges for angular limits (e.g., limit values > PxPi or < -PxPi) eGPU_COMPATIBLE = 1<<10, //!< the constraint type is supported by gpu dynamics eALWAYS_UPDATE = 1<<11, //!< updates the constraint each frame eDISABLE_CONSTRAINT = 1<<12 //!< disables the constraint. SolverPrep functions won't be called for this constraint. }; }; /** \brief constraint flags @see PxConstraintFlag */ typedef PxFlags<PxConstraintFlag::Enum, PxU16> PxConstraintFlags; PX_FLAGS_OPERATORS(PxConstraintFlag::Enum, PxU16) /** \brief a table of function pointers for a constraint @see PxConstraint */ struct PxConstraintShaderTable { PxConstraintSolverPrep solverPrep; //!< solver constraint generation function PxConstraintVisualize visualize; //!< constraint visualization function PxConstraintFlag::Enum flag; //!< constraint flags }; /** \brief A plugin class for implementing constraints @see PxPhysics.createConstraint */ class PxConstraint : public PxBase { public: /** \brief Releases a PxConstraint instance. \note This call does not wake up the connected rigid bodies. @see PxPhysics.createConstraint, PxBase.release() */ virtual void release() = 0; /** \brief Retrieves the scene which this constraint belongs to. \return Owner Scene. NULL if not part of a scene. @see PxScene */ virtual PxScene* getScene() const = 0; /** \brief Retrieves the actors for this constraint. \param[out] actor0 a reference to the pointer for the first actor \param[out] actor1 a reference to the pointer for the second actor @see PxActor */ virtual void getActors(PxRigidActor*& actor0, PxRigidActor*& actor1) const = 0; /** \brief Sets the actors for this constraint. \param[in] actor0 a reference to the pointer for the first actor \param[in] actor1 a reference to the pointer for the second actor @see PxActor */ virtual void setActors(PxRigidActor* actor0, PxRigidActor* actor1) = 0; /** \brief Notify the scene that the constraint shader data has been updated by the application */ virtual void markDirty() = 0; /** \brief Retrieve the flags for this constraint \return the constraint flags @see PxConstraintFlags */ virtual PxConstraintFlags getFlags() const = 0; /** \brief Set the flags for this constraint \param[in] flags the new constraint flags default: PxConstraintFlag::eDRIVE_LIMITS_ARE_FORCES @see PxConstraintFlags */ virtual void setFlags(PxConstraintFlags flags) = 0; /** \brief Set a flag for this constraint \param[in] flag the constraint flag \param[in] value the new value of the flag @see PxConstraintFlags */ virtual void setFlag(PxConstraintFlag::Enum flag, bool value) = 0; /** \brief Retrieve the constraint force most recently applied to maintain this constraint. \note It is not allowed to use this method while the simulation is running (except during PxScene::collide(), in PxContactModifyCallback or in contact report callbacks). \param[out] linear the constraint force \param[out] angular the constraint torque */ virtual void getForce(PxVec3& linear, PxVec3& angular) const = 0; /** \brief whether the constraint is valid. A constraint is valid if it has at least one dynamic rigid body or articulation link. A constraint that is not valid may not be inserted into a scene, and therefore a static actor to which an invalid constraint is attached may not be inserted into a scene. Invalid constraints arise only when an actor to which the constraint is attached has been deleted. */ virtual bool isValid() const = 0; /** \brief Set the break force and torque thresholds for this constraint. If either the force or torque measured at the constraint exceed these thresholds the constraint will break. \param[in] linear the linear break threshold \param[in] angular the angular break threshold */ virtual void setBreakForce(PxReal linear, PxReal angular) = 0; /** \brief Retrieve the constraint break force and torque thresholds \param[out] linear the linear break threshold \param[out] angular the angular break threshold */ virtual void getBreakForce(PxReal& linear, PxReal& angular) const = 0; /** \brief Set the minimum response threshold for a constraint row When using mass modification for a joint or infinite inertia for a jointed body, very stiff solver constraints can be generated which can destabilize simulation. Setting this value to a small positive value (e.g. 1e-8) will cause constraint rows to be ignored if very large changes in impulses will generate only small changes in velocity. When setting this value, also set PxConstraintFlag::eDISABLE_PREPROCESSING. The solver accuracy for this joint may be reduced. \param[in] threshold the minimum response threshold @see PxConstraintFlag::eDISABLE_PREPROCESSING */ virtual void setMinResponseThreshold(PxReal threshold) = 0; /** \brief Retrieve the constraint break force and torque thresholds \return the minimum response threshold for a constraint row */ virtual PxReal getMinResponseThreshold() const = 0; /** \brief Fetch external owner of the constraint. Provides a reference to the external owner of a constraint and a unique owner type ID. \param[out] typeID Unique type identifier of the external object. \return Reference to the external object which owns the constraint. @see PxConstraintConnector.getExternalReference() */ virtual void* getExternalReference(PxU32& typeID) = 0; /** \brief Set the constraint functions for this constraint \param[in] connector the constraint connector object by which the SDK communicates with the constraint. \param[in] shaders the shader table for the constraint @see PxConstraintConnector PxConstraintSolverPrep PxConstraintVisualize */ virtual void setConstraintFunctions(PxConstraintConnector& connector, const PxConstraintShaderTable& shaders) = 0; virtual const char* getConcreteTypeName() const PX_OVERRIDE { return "PxConstraint"; } void* userData; //!< user can assign this to whatever, usually to create a 1:1 relationship with a user object. protected: PX_INLINE PxConstraint(PxType concreteType, PxBaseFlags baseFlags) : PxBase(concreteType, baseFlags), userData(NULL) {} PX_INLINE PxConstraint(PxBaseFlags baseFlags) : PxBase(baseFlags), userData(NULL) {} virtual ~PxConstraint() {} virtual bool isKindOf(const char* name) const PX_OVERRIDE { PX_IS_KIND_OF(name, "PxConstraint", PxBase); } }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
9,559
C
34.276753
187
0.74558
NVIDIA-Omniverse/PhysX/physx/include/PxFEMSoftBodyMaterial.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_FEM_SOFT_BODY_MATERIAL_H #define PX_FEM_SOFT_BODY_MATERIAL_H /** \addtogroup physics @{ */ #include "PxFEMMaterial.h" #if !PX_DOXYGEN namespace physx { #endif struct PxFEMSoftBodyMaterialModel { enum Enum { eCO_ROTATIONAL, //!< Default model. Well suited for high stiffness. Does need tetrahedra with good shapes (no extreme slivers) in the rest pose. eNEO_HOOKEAN //!< Well suited for lower stiffness. Robust to any tetrahedron shape. }; }; class PxScene; /** \brief Material class to represent a set of softbody FEM material properties. @see PxPhysics.createFEMSoftBodyMaterial */ class PxFEMSoftBodyMaterial : public PxFEMMaterial { public: /** \brief Sets material velocity damping term \param[in] damping Material velocity damping term. <b>Range:</b> [0, PX_MAX_F32)<br> @see getDamping */ virtual void setDamping(PxReal damping) = 0; /** \brief Retrieves velocity damping \return The velocity damping. @see setDamping() */ virtual PxReal getDamping() const = 0; /** \brief Sets material damping scale. A scale of 1 corresponds to default damping, a value of 0 will only apply damping to certain motions leading to special effects that look similar to water filled softbodies. \param[in] scale Damping scale term. <b>Default:</b> 1 <b>Range:</b> [0, 1] @see getDampingScale */ virtual void setDampingScale(PxReal scale) = 0; /** \brief Retrieves material damping scale. \return The damping scale term. @see setDamping() */ virtual PxReal getDampingScale() const = 0; /** \brief Sets the material model. \param[in] model The material model @see getMaterialModel */ virtual void setMaterialModel(PxFEMSoftBodyMaterialModel::Enum model) = 0; /** \brief Retrieves the material model. \return The material model. @see setMaterialModel() */ virtual PxFEMSoftBodyMaterialModel::Enum getMaterialModel() const = 0; virtual const char* getConcreteTypeName() const { return "PxFEMSoftBodyMaterial"; } protected: PX_INLINE PxFEMSoftBodyMaterial(PxType concreteType, PxBaseFlags baseFlags) : PxFEMMaterial(concreteType, baseFlags) {} PX_INLINE PxFEMSoftBodyMaterial(PxBaseFlags baseFlags) : PxFEMMaterial(baseFlags) {} virtual ~PxFEMSoftBodyMaterial() {} virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxFEMSoftBodyMaterial", PxFEMMaterial); } }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
4,189
C
31.992126
211
0.736453
NVIDIA-Omniverse/PhysX/physx/include/PxPhysics.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_PHYSICS_H #define PX_PHYSICS_H /** \addtogroup physics @{ */ #include "PxPhysXConfig.h" #include "PxDeletionListener.h" #include "foundation/PxTransform.h" #include "PxShape.h" #include "PxAggregate.h" #include "PxParticleSystem.h" #include "foundation/PxPreprocessor.h" #if PX_ENABLE_FEATURES_UNDER_CONSTRUCTION #include "PxFEMCloth.h" #include "PxHairSystem.h" #endif #if !PX_DOXYGEN namespace physx { #endif class PxScene; class PxSceneDesc; class PxTolerancesScale; class PxPvd; class PxOmniPvd; class PxInsertionCallback; class PxRigidActor; class PxConstraintConnector; struct PxConstraintShaderTable; class PxGeometry; class PxFoundation; class PxPruningStructure; class PxBVH; class PxParticleClothBuffer; class PxParticleRigidBuffer; class PxSoftBodyMesh; /** \brief Abstract singleton factory class used for instancing objects in the Physics SDK. In addition you can use PxPhysics to set global parameters which will effect all scenes and create objects that can be shared across multiple scenes. You can get an instance of this class by calling PxCreatePhysics(). @see PxCreatePhysics() PxScene */ class PxPhysics { public: /** @name Basics */ //@{ virtual ~PxPhysics() {} /** \brief Destroys the instance it is called on. Use this release method to destroy an instance of this class. Be sure to not keep a reference to this object after calling release. Avoid release calls while a scene is simulating (in between simulate() and fetchResults() calls). Note that this must be called once for each prior call to PxCreatePhysics, as there is a reference counter. Also note that you mustn't destroy the PxFoundation instance (holding the allocator, error callback etc.) until after the reference count reaches 0 and the SDK is actually removed. Releasing an SDK will also release any objects created through it (scenes, triangle meshes, convex meshes, heightfields, shapes etc.), provided the user hasn't already done so. \note Releasing the PxPhysics instance is a prerequisite to releasing the PxFoundation instance. @see PxCreatePhysics() PxFoundation */ virtual void release() = 0; /** \brief Retrieves the Foundation instance. \return A reference to the Foundation object. */ virtual PxFoundation& getFoundation() = 0; /** \brief Retrieves the PxOmniPvd instance if there is one registered with PxPhysics. \return A pointer to a PxOmniPvd object. */ virtual PxOmniPvd* getOmniPvd() = 0; /** \brief Creates an aggregate with the specified maximum size and filtering hint. The previous API used "bool enableSelfCollision" which should now silently evaluates to a PxAggregateType::eGENERIC aggregate with its self-collision bit. Use PxAggregateType::eSTATIC or PxAggregateType::eKINEMATIC for aggregates that will only contain static or kinematic actors. This provides faster filtering when used in combination with PxPairFilteringMode. \param [in] maxActor The maximum number of actors that may be placed in the aggregate. \param [in] maxShape The maximum number of shapes that may be placed in the aggregate. \param [in] filterHint The aggregate's filtering hint. \return The new aggregate. @see PxAggregate PxAggregateFilterHint PxAggregateType PxPairFilteringMode */ virtual PxAggregate* createAggregate(PxU32 maxActor, PxU32 maxShape, PxAggregateFilterHint filterHint) = 0; /** \brief Returns the simulation tolerance parameters. \return The current simulation tolerance parameters. */ virtual const PxTolerancesScale& getTolerancesScale() const = 0; //@} /** @name Meshes */ //@{ /** \brief Creates a triangle mesh object. This can then be instanced into #PxShape objects. \param [in] stream The triangle mesh stream. \return The new triangle mesh. @see PxTriangleMesh PxMeshPreprocessingFlag PxTriangleMesh.release() PxInputStream PxTriangleMeshFlag */ virtual PxTriangleMesh* createTriangleMesh(PxInputStream& stream) = 0; /** \brief Return the number of triangle meshes that currently exist. \return Number of triangle meshes. @see getTriangleMeshes() */ virtual PxU32 getNbTriangleMeshes() const = 0; /** \brief Writes the array of triangle mesh pointers to a user buffer. Returns the number of pointers written. The ordering of the triangle meshes in the array is not specified. \param [out] userBuffer The buffer to receive triangle mesh pointers. \param [in] bufferSize The number of triangle mesh pointers which can be stored in the buffer. \param [in] startIndex Index of first mesh pointer to be retrieved. \return The number of triangle mesh pointers written to userBuffer, this should be less or equal to bufferSize. @see getNbTriangleMeshes() PxTriangleMesh */ virtual PxU32 getTriangleMeshes(PxTriangleMesh** userBuffer, PxU32 bufferSize, PxU32 startIndex = 0) const = 0; //@} /** @name Tetrahedron Meshes */ //@{ /** \brief Creates a tetrahedron mesh object. This can then be instanced into #PxShape objects. \param[in] stream The tetrahedron mesh stream. \return The new tetrahedron mesh. @see PxTetrahedronMesh PxMeshPreprocessingFlag PxTetrahedronMesh.release() PxInputStream PxTriangleMeshFlag */ virtual PxTetrahedronMesh* createTetrahedronMesh(PxInputStream& stream) = 0; /** \brief Creates a softbody mesh object. \param[in] stream The softbody mesh stream. \return The new softbody mesh. @see createTetrahedronMesh */ virtual PxSoftBodyMesh* createSoftBodyMesh(PxInputStream& stream) = 0; /** \brief Return the number of tetrahedron meshes that currently exist. \return Number of tetrahedron meshes. @see getTetrahedronMeshes() */ virtual PxU32 getNbTetrahedronMeshes() const = 0; /** \brief Writes the array of tetrahedron mesh pointers to a user buffer. Returns the number of pointers written. The ordering of the tetrahedron meshes in the array is not specified. \param[out] userBuffer The buffer to receive tetrahedron mesh pointers. \param[in] bufferSize The number of tetrahedron mesh pointers which can be stored in the buffer. \param[in] startIndex Index of first mesh pointer to be retrieved. \return The number of tetrahedron mesh pointers written to userBuffer, this should be less or equal to bufferSize. @see getNbTetrahedronMeshes() PxTetrahedronMesh */ virtual PxU32 getTetrahedronMeshes(PxTetrahedronMesh** userBuffer, PxU32 bufferSize, PxU32 startIndex = 0) const = 0; /** \brief Creates a heightfield object from previously cooked stream. This can then be instanced into #PxShape objects. \param [in] stream The heightfield mesh stream. \return The new heightfield. @see PxHeightField PxHeightField.release() PxInputStream */ virtual PxHeightField* createHeightField(PxInputStream& stream) = 0; /** \brief Return the number of heightfields that currently exist. \return Number of heightfields. @see getHeightFields() */ virtual PxU32 getNbHeightFields() const = 0; /** \brief Writes the array of heightfield pointers to a user buffer. Returns the number of pointers written. The ordering of the heightfields in the array is not specified. \param [out] userBuffer The buffer to receive heightfield pointers. \param [in] bufferSize The number of heightfield pointers which can be stored in the buffer. \param [in] startIndex Index of first heightfield pointer to be retrieved. \return The number of heightfield pointers written to userBuffer, this should be less or equal to bufferSize. @see getNbHeightFields() PxHeightField */ virtual PxU32 getHeightFields(PxHeightField** userBuffer, PxU32 bufferSize, PxU32 startIndex = 0) const = 0; /** \brief Creates a convex mesh object. This can then be instanced into #PxShape objects. \param [in] stream The stream to load the convex mesh from. \return The new convex mesh. @see PxConvexMesh PxConvexMesh.release() PxInputStream createTriangleMesh() PxConvexMeshGeometry PxShape */ virtual PxConvexMesh* createConvexMesh(PxInputStream& stream) = 0; /** \brief Return the number of convex meshes that currently exist. \return Number of convex meshes. @see getConvexMeshes() */ virtual PxU32 getNbConvexMeshes() const = 0; /** \brief Writes the array of convex mesh pointers to a user buffer. Returns the number of pointers written. The ordering of the convex meshes in the array is not specified. \param [out] userBuffer The buffer to receive convex mesh pointers. \param [in] bufferSize The number of convex mesh pointers which can be stored in the buffer. \param [in] startIndex Index of first convex mesh pointer to be retrieved. \return The number of convex mesh pointers written to userBuffer, this should be less or equal to bufferSize. @see getNbConvexMeshes() PxConvexMesh */ virtual PxU32 getConvexMeshes(PxConvexMesh** userBuffer, PxU32 bufferSize, PxU32 startIndex = 0) const = 0; /** \brief Creates a bounding volume hierarchy. \param [in] stream The stream to load the BVH from. \return The new BVH. @see PxBVH PxInputStream */ virtual PxBVH* createBVH(PxInputStream& stream) = 0; /** \brief Return the number of bounding volume hierarchies that currently exist. \return Number of bounding volume hierarchies. @see PxBVH getBVHs() */ virtual PxU32 getNbBVHs() const = 0; /** \brief Writes the array of bounding volume hierarchy pointers to a user buffer. Returns the number of pointers written. The ordering of the BVHs in the array is not specified. \param [out] userBuffer The buffer to receive BVH pointers. \param [in] bufferSize The number of BVH pointers which can be stored in the buffer. \param [in] startIndex Index of first BVH pointer to be retrieved. \return The number of BVH pointers written to userBuffer, this should be less or equal to bufferSize. @see getNbBVHs() PxBVH */ virtual PxU32 getBVHs(PxBVH** userBuffer, PxU32 bufferSize, PxU32 startIndex = 0) const = 0; //@} /** @name Scenes */ //@{ /** \brief Creates a scene. \note Every scene uses a Thread Local Storage slot. This imposes a platform specific limit on the number of scenes that can be created. \param [in] sceneDesc Scene descriptor. See #PxSceneDesc \return The new scene object. @see PxScene PxScene.release() PxSceneDesc */ virtual PxScene* createScene(const PxSceneDesc& sceneDesc) = 0; /** \brief Gets number of created scenes. \return The number of scenes created. @see getScenes() */ virtual PxU32 getNbScenes() const = 0; /** \brief Writes the array of scene pointers to a user buffer. Returns the number of pointers written. The ordering of the scene pointers in the array is not specified. \param [out] userBuffer The buffer to receive scene pointers. \param [in] bufferSize The number of scene pointers which can be stored in the buffer. \param [in] startIndex Index of first scene pointer to be retrieved. \return The number of scene pointers written to userBuffer, this should be less or equal to bufferSize. @see getNbScenes() PxScene */ virtual PxU32 getScenes(PxScene** userBuffer, PxU32 bufferSize, PxU32 startIndex = 0) const = 0; //@} /** @name Actors */ //@{ /** \brief Creates a static rigid actor with the specified pose and all other fields initialized to their default values. \param [in] pose The initial pose of the actor. Must be a valid transform. @see PxRigidStatic */ virtual PxRigidStatic* createRigidStatic(const PxTransform& pose) = 0; /** \brief Creates a dynamic rigid actor with the specified pose and all other fields initialized to their default values. \param [in] pose The initial pose of the actor. Must be a valid transform. @see PxRigidDynamic */ virtual PxRigidDynamic* createRigidDynamic(const PxTransform& pose) = 0; /** \brief Creates a pruning structure from actors. \note Every provided actor needs at least one shape with the eSCENE_QUERY_SHAPE flag set. \note Both static and dynamic actors can be provided. \note It is not allowed to pass in actors which are already part of a scene. \note Articulation links cannot be provided. \param [in] actors Array of actors to add to the pruning structure. Must be non NULL. \param [in] nbActors Number of actors in the array. Must be >0. \return Pruning structure created from given actors, or NULL if any of the actors did not comply with the above requirements. @see PxActor PxPruningStructure */ virtual PxPruningStructure* createPruningStructure(PxRigidActor*const* actors, PxU32 nbActors) = 0; //@} /** @name Shapes */ //@{ /** \brief Creates a shape which may be attached to multiple actors The shape will be created with a reference count of 1. \param [in] geometry The geometry for the shape \param [in] material The material for the shape \param [in] isExclusive Whether this shape is exclusive to a single actor or maybe be shared \param [in] shapeFlags The PxShapeFlags to be set \return The shape \note Shared shapes are not mutable when they are attached to an actor @see PxShape */ PX_FORCE_INLINE PxShape* createShape( const PxGeometry& geometry, const PxMaterial& material, bool isExclusive = false, PxShapeFlags shapeFlags = PxShapeFlag::eVISUALIZATION | PxShapeFlag::eSCENE_QUERY_SHAPE | PxShapeFlag::eSIMULATION_SHAPE) { PxMaterial* materialPtr = const_cast<PxMaterial*>(&material); return createShape(geometry, &materialPtr, 1, isExclusive, shapeFlags); } /** \brief Creates a shape which may be attached to one or more softbody actors The shape will be created with a reference count of 1. \param [in] geometry The geometry for the shape \param [in] material The material for the shape \param [in] isExclusive Whether this shape is exclusive to a single actor or maybe be shared \param [in] shapeFlags The PxShapeFlags to be set \return The shape \note Shared shapes are not mutable when they are attached to an actor @see PxShape */ PX_FORCE_INLINE PxShape* createShape( const PxGeometry& geometry, const PxFEMSoftBodyMaterial& material, bool isExclusive = false, PxShapeFlags shapeFlags = PxShapeFlag::eVISUALIZATION | PxShapeFlag::eSCENE_QUERY_SHAPE | PxShapeFlag::eSIMULATION_SHAPE) { PxFEMSoftBodyMaterial* materialPtr = const_cast<PxFEMSoftBodyMaterial*>(&material); return createShape(geometry, &materialPtr, 1, isExclusive, shapeFlags); } #if PX_ENABLE_FEATURES_UNDER_CONSTRUCTION /** \brief Creates a shape which may be attached to one or more FEMCloth actors The shape will be created with a reference count of 1. \param [in] geometry The geometry for the shape \param [in] material The material for the shape \param [in] isExclusive Whether this shape is exclusive to a single actor or maybe be shared \param [in] shapeFlags The PxShapeFlags to be set \return The shape \note Shared shapes are not mutable when they are attached to an actor @see PxShape */ PX_FORCE_INLINE PxShape* createShape( const PxGeometry& geometry, const PxFEMClothMaterial& material, bool isExclusive = false, PxShapeFlags shapeFlags = PxShapeFlag::eVISUALIZATION | PxShapeFlag::eSCENE_QUERY_SHAPE | PxShapeFlag::eSIMULATION_SHAPE) { PxFEMClothMaterial* materialPtr = const_cast<PxFEMClothMaterial*>(&material); return createShape(geometry, &materialPtr, 1, isExclusive, shapeFlags); } #endif /** \brief Creates a shape which may be attached to multiple actors The shape will be created with a reference count of 1. \param [in] geometry The geometry for the shape \param [in] materials The materials for the shape \param [in] materialCount The number of materials \param [in] isExclusive Whether this shape is exclusive to a single actor or may be shared \param [in] shapeFlags The PxShapeFlags to be set \return The shape \note Shared shapes are not mutable when they are attached to an actor \note Shapes created from *SDF* triangle-mesh geometries do not support more than one material. @see PxShape */ virtual PxShape* createShape( const PxGeometry& geometry, PxMaterial*const * materials, PxU16 materialCount, bool isExclusive = false, PxShapeFlags shapeFlags = PxShapeFlag::eVISUALIZATION | PxShapeFlag::eSCENE_QUERY_SHAPE | PxShapeFlag::eSIMULATION_SHAPE) = 0; virtual PxShape* createShape( const PxGeometry& geometry, PxFEMSoftBodyMaterial*const * materials, PxU16 materialCount, bool isExclusive = false, PxShapeFlags shapeFlags = PxShapeFlag::eVISUALIZATION | PxShapeFlag::eSCENE_QUERY_SHAPE | PxShapeFlag::eSIMULATION_SHAPE) = 0; virtual PxShape* createShape( const PxGeometry& geometry, PxFEMClothMaterial*const * materials, PxU16 materialCount, bool isExclusive = false, PxShapeFlags shapeFlags = PxShapeFlag::eVISUALIZATION | PxShapeFlag::eSCENE_QUERY_SHAPE | PxShapeFlag::eSIMULATION_SHAPE) = 0; /** \brief Return the number of shapes that currently exist. \return Number of shapes. @see getShapes() */ virtual PxU32 getNbShapes() const = 0; /** \brief Writes the array of shape pointers to a user buffer. Returns the number of pointers written. The ordering of the shapes in the array is not specified. \param [out] userBuffer The buffer to receive shape pointers. \param [in] bufferSize The number of shape pointers which can be stored in the buffer. \param [in] startIndex Index of first shape pointer to be retrieved \return The number of shape pointers written to userBuffer, this should be less or equal to bufferSize. @see getNbShapes() PxShape */ virtual PxU32 getShapes(PxShape** userBuffer, PxU32 bufferSize, PxU32 startIndex = 0) const = 0; //@} /** @name Constraints and Articulations */ //@{ /** \brief Creates a constraint shader. \note A constraint shader will get added automatically to the scene the two linked actors belong to. Either, but not both, of actor0 and actor1 may be NULL to denote attachment to the world. \param [in] actor0 The first actor \param [in] actor1 The second actor \param [in] connector The connector object, which the SDK uses to communicate with the infrastructure for the constraint \param [in] shaders The shader functions for the constraint \param [in] dataSize The size of the data block for the shader \return The new constraint shader. @see PxConstraint */ virtual PxConstraint* createConstraint(PxRigidActor* actor0, PxRigidActor* actor1, PxConstraintConnector& connector, const PxConstraintShaderTable& shaders, PxU32 dataSize) = 0; /** \brief Creates a reduced-coordinate articulation with all fields initialized to their default values. \return the new articulation @see PxArticulationReducedCoordinate */ virtual PxArticulationReducedCoordinate* createArticulationReducedCoordinate() = 0; /** \brief Creates a FEM-based cloth with all fields initialized to their default values. \warning Feature under development, only for internal usage. \param[in] cudaContextManager The PxCudaContextManager this instance is tied to. \return the new FEM-cloth @see PxFEMCloth */ virtual PxFEMCloth* createFEMCloth(PxCudaContextManager& cudaContextManager) = 0; /** \brief Creates a FEM-based soft body with all fields initialized to their default values. \param[in] cudaContextManager The PxCudaContextManager this instance is tied to. \return the new soft body @see PxSoftBody */ virtual PxSoftBody* createSoftBody(PxCudaContextManager& cudaContextManager) = 0; /** \brief Creates a hair system with all fields initialized to their default values. \warning Feature under development, only for internal usage. \param[in] cudaContextManager The PxCudaContextManager this instance is tied to. \return the new hair system @see PxHairSystem */ virtual PxHairSystem* createHairSystem(PxCudaContextManager& cudaContextManager) = 0; /** \brief Creates a particle system with a position-based dynamics (PBD) solver. A PBD particle system can be used to simulate particle systems with fluid and granular particles. It also allows simulating cloth using mass-spring constraints and rigid bodies by shape matching the bodies with particles. \param[in] cudaContextManager The PxCudaContextManager this instance is tied to. \param[in] maxNeighborhood The maximum number of particles considered in neighborhood-based particle interaction calculations (e.g. fluid density constraints). \return the new particle system @see PxPBDParticleSystem */ virtual PxPBDParticleSystem* createPBDParticleSystem(PxCudaContextManager& cudaContextManager, PxU32 maxNeighborhood = 96) = 0; /** \brief Creates a particle system with a fluid-implicit particle solver (FLIP). \warning Feature under development, only for internal usage. \param[in] cudaContextManager The PxCudaContextManager this instance is tied to. \return the new particle system @see PxFLIPParticleSystem */ virtual PxFLIPParticleSystem* createFLIPParticleSystem(PxCudaContextManager& cudaContextManager) = 0; /** \brief Creates a particle system with a material-point-method solver (MPM). \warning Feature under development, only for internal usage. A MPM particle system can be used to simulate fluid dynamics and deformable body effects using particles. \param[in] cudaContextManager The PxCudaContextManager this instance is tied to. \return the new particle system @see PxMPMParticleSystem */ virtual PxMPMParticleSystem* createMPMParticleSystem(PxCudaContextManager& cudaContextManager) = 0; /** \brief Create particle buffer to simulate fluid/granular material. \param[in] maxParticles The maximum number of particles in this buffer. \param[in] maxVolumes The maximum number of volumes in this buffer. See PxParticleVolume. \param[in] cudaContextManager The PxCudaContextManager this buffer is tied to. \return PxParticleBuffer instance @see PxParticleBuffer */ virtual PxParticleBuffer* createParticleBuffer(PxU32 maxParticles, PxU32 maxVolumes, PxCudaContextManager* cudaContextManager) = 0; /** \brief Create a particle buffer for fluid dynamics with diffuse particles. Diffuse particles are used to simulate fluid effects such as foam, spray and bubbles. \param[in] maxParticles The maximum number of particles in this buffer. \param[in] maxVolumes The maximum number of volumes in this buffer. See #PxParticleVolume. \param[in] maxDiffuseParticles The max number of diffuse particles int this buffer. \param[in] cudaContextManager The PxCudaContextManager this buffer is tied to. \return PxParticleAndDiffuseBuffer instance @see PxParticleAndDiffuseBuffer, PxDiffuseParticleParams */ virtual PxParticleAndDiffuseBuffer* createParticleAndDiffuseBuffer(PxU32 maxParticles, PxU32 maxVolumes, PxU32 maxDiffuseParticles, PxCudaContextManager* cudaContextManager) = 0; /** \brief Create a particle buffer to simulate particle cloth. \param[in] maxParticles The maximum number of particles in this buffer. \param[in] maxNumVolumes The maximum number of volumes in this buffer. See #PxParticleVolume. \param[in] maxNumCloths The maximum number of cloths in this buffer. See #PxParticleCloth. \param[in] maxNumTriangles The maximum number of triangles for aerodynamics. \param[in] maxNumSprings The maximum number of springs to connect particles. See #PxParticleSpring. \param[in] cudaContextManager The PxCudaContextManager this buffer is tied to. \return PxParticleClothBuffer instance @see PxParticleClothBuffer */ virtual PxParticleClothBuffer* createParticleClothBuffer(PxU32 maxParticles, PxU32 maxNumVolumes, PxU32 maxNumCloths, PxU32 maxNumTriangles, PxU32 maxNumSprings, PxCudaContextManager* cudaContextManager) = 0; /** \brief Create a particle buffer to simulate rigid bodies using shape matching with particles. \param[in] maxParticles The maximum number of particles in this buffer. \param[in] maxNumVolumes The maximum number of volumes in this buffer. See #PxParticleVolume. \param[in] maxNumRigids The maximum number of rigid bodies this buffer is used to simulate. \param[in] cudaContextManager The PxCudaContextManager this buffer is tied to. \return PxParticleRigidBuffer instance @see PxParticleRigidBuffer */ virtual PxParticleRigidBuffer* createParticleRigidBuffer(PxU32 maxParticles, PxU32 maxNumVolumes, PxU32 maxNumRigids, PxCudaContextManager* cudaContextManager) = 0; //@} /** @name Materials */ //@{ /** \brief Creates a new rigid body material with certain default properties. \return The new rigid body material. \param [in] staticFriction The coefficient of static friction \param [in] dynamicFriction The coefficient of dynamic friction \param [in] restitution The coefficient of restitution @see PxMaterial */ virtual PxMaterial* createMaterial(PxReal staticFriction, PxReal dynamicFriction, PxReal restitution) = 0; /** \brief Return the number of rigid body materials that currently exist. \return Number of rigid body materials. @see getMaterials() */ virtual PxU32 getNbMaterials() const = 0; /** \brief Writes the array of rigid body material pointers to a user buffer. Returns the number of pointers written. The ordering of the materials in the array is not specified. \param [out] userBuffer The buffer to receive material pointers. \param [in] bufferSize The number of material pointers which can be stored in the buffer. \param [in] startIndex Index of first material pointer to be retrieved. \return The number of material pointers written to userBuffer, this should be less or equal to bufferSize. @see getNbMaterials() PxMaterial */ virtual PxU32 getMaterials(PxMaterial** userBuffer, PxU32 bufferSize, PxU32 startIndex = 0) const = 0; /** \brief Creates a new FEM soft body material with certain default properties. \return The new FEM material. \param [in] youngs The young's modulus \param [in] poissons The poissons's ratio \param [in] dynamicFriction The dynamic friction coefficient @see PxFEMSoftBodyMaterial */ virtual PxFEMSoftBodyMaterial* createFEMSoftBodyMaterial(PxReal youngs, PxReal poissons, PxReal dynamicFriction) = 0; /** \brief Return the number of FEM soft body materials that currently exist. \return Number of FEM materials. @see getFEMSoftBodyMaterials() */ virtual PxU32 getNbFEMSoftBodyMaterials() const = 0; /** \brief Writes the array of FEM soft body material pointers to a user buffer. Returns the number of pointers written. The ordering of the materials in the array is not specified. \param [out] userBuffer The buffer to receive material pointers. \param [in] bufferSize The number of material pointers which can be stored in the buffer. \param [in] startIndex Index of first material pointer to be retrieved. \return The number of material pointers written to userBuffer, this should be less or equal to bufferSize. @see getNbFEMSoftBodyMaterials() PxFEMSoftBodyMaterial */ virtual PxU32 getFEMSoftBodyMaterials(PxFEMSoftBodyMaterial** userBuffer, PxU32 bufferSize, PxU32 startIndex = 0) const = 0; /** \brief Creates a new FEM cloth material with certain default properties. \warning Feature under development, only for internal usage. \return The new FEM material. \param [in] youngs The young's modulus \param [in] poissons The poissons's ratio \param [in] dynamicFriction The dynamic friction coefficient \param [in] thickness The cloth's thickness @see PxFEMClothMaterial */ virtual PxFEMClothMaterial* createFEMClothMaterial(PxReal youngs, PxReal poissons, PxReal dynamicFriction, PxReal thickness = 0.001f) = 0; /** \brief Return the number of FEM cloth materials that currently exist. \return Number of FEM cloth materials. @see getFEMClothMaterials() */ virtual PxU32 getNbFEMClothMaterials() const = 0; /** \brief Writes the array of FEM cloth material pointers to a user buffer. Returns the number of pointers written. The ordering of the materials in the array is not specified. \param [out] userBuffer The buffer to receive material pointers. \param [in] bufferSize The number of material pointers which can be stored in the buffer. \param [in] startIndex Index of first material pointer to be retrieved. \return The number of material pointers written to userBuffer, this should be less or equal to bufferSize. @see getNbFEMClothMaterials() PxFEMClothMaterial */ virtual PxU32 getFEMClothMaterials(PxFEMClothMaterial** userBuffer, PxU32 bufferSize, PxU32 startIndex = 0) const = 0; /** \brief Creates a new PBD material with certain default properties. \param [in] friction The friction parameter \param [in] damping The velocity damping parameter \param [in] adhesion The adhesion parameter \param [in] viscosity The viscosity parameter \param [in] vorticityConfinement The vorticity confinement coefficient \param [in] surfaceTension The surface tension coefficient \param [in] cohesion The cohesion parameter \param [in] lift The lift parameter \param [in] drag The drag parameter \param [in] cflCoefficient The Courant-Friedrichs-Lewy(cfl) coefficient \param [in] gravityScale The gravity scale \return The new PBD material. @see PxPBDMaterial */ virtual PxPBDMaterial* createPBDMaterial(PxReal friction, PxReal damping, PxReal adhesion, PxReal viscosity, PxReal vorticityConfinement, PxReal surfaceTension, PxReal cohesion, PxReal lift, PxReal drag, PxReal cflCoefficient = 1.f, PxReal gravityScale = 1.f) = 0; /** \brief Return the number of PBD materials that currently exist. \return Number of PBD materials. @see getPBDMaterials() */ virtual PxU32 getNbPBDMaterials() const = 0; /** \brief Writes the array of PBD material pointers to a user buffer. Returns the number of pointers written. The ordering of the materials in the array is not specified. \param [out] userBuffer The buffer to receive material pointers. \param [in] bufferSize The number of material pointers which can be stored in the buffer. \param [in] startIndex Index of first material pointer to be retrieved. \return The number of material pointers written to userBuffer, this should be less or equal to bufferSize. @see getNbPBDMaterials() PxPBDMaterial */ virtual PxU32 getPBDMaterials(PxPBDMaterial** userBuffer, PxU32 bufferSize, PxU32 startIndex = 0) const = 0; /** \brief Creates a new FLIP material with certain default properties. \warning Feature under development, only for internal usage. \param [in] friction The friction parameter \param [in] damping The velocity damping parameter \param [in] adhesion The maximum velocity magnitude of particles \param [in] viscosity The viscosity parameter \param [in] gravityScale The gravity scale \return The new FLIP material. @see PxFLIPMaterial */ virtual PxFLIPMaterial* createFLIPMaterial(PxReal friction, PxReal damping, PxReal adhesion, PxReal viscosity, PxReal gravityScale = 1.f) = 0; /** \brief Return the number of FLIP materials that currently exist. \warning Feature under development, only for internal usage. \return Number of FLIP materials. @see getFLIPMaterials() */ virtual PxU32 getNbFLIPMaterials() const = 0; /** \brief Writes the array of FLIP material pointers to a user buffer. \warning Feature under development, only for internal usage. Returns the number of pointers written. The ordering of the materials in the array is not specified. \param [out] userBuffer The buffer to receive material pointers. \param [in] bufferSize The number of material pointers which can be stored in the buffer. \param [in] startIndex Index of first material pointer to be retrieved. \return The number of material pointers written to userBuffer, this should be less or equal to bufferSize. @see getNbFLIPMaterials() PxFLIPMaterial */ virtual PxU32 getFLIPMaterials(PxFLIPMaterial** userBuffer, PxU32 bufferSize, PxU32 startIndex = 0) const = 0; /** \brief Creates a new MPM material with certain default properties. \warning Feature under development, only for internal usage. \param [in] friction The friction parameter \param [in] damping The velocity damping parameter \param [in] adhesion The maximum velocity magnitude of particles \param [in] isPlastic True if plastic \param [in] youngsModulus The Young's modulus \param [in] poissons The Poissons's ratio \param [in] hardening The hardening parameter \param [in] criticalCompression The critical compression parameter \param [in] criticalStretch The critical stretch parameter \param [in] tensileDamageSensitivity The tensile damage sensitivity parameter \param [in] compressiveDamageSensitivity The compressive damage sensitivity parameter \param [in] attractiveForceResidual The attractive force residual parameter \param [in] gravityScale The gravity scale \return The new MPM material. @see PxMPMMaterial */ virtual PxMPMMaterial* createMPMMaterial(PxReal friction, PxReal damping, PxReal adhesion, bool isPlastic, PxReal youngsModulus, PxReal poissons, PxReal hardening, PxReal criticalCompression, PxReal criticalStretch, PxReal tensileDamageSensitivity, PxReal compressiveDamageSensitivity, PxReal attractiveForceResidual, PxReal gravityScale = 1.0f) = 0; /** \brief Return the number of MPM materials that currently exist. \warning Feature under development, only for internal usage. \return Number of MPM materials. @see getMPMMaterials() */ virtual PxU32 getNbMPMMaterials() const = 0; /** \brief Writes the array of MPM material pointers to a user buffer. \warning Feature under development, only for internal usage. Returns the number of pointers written. The ordering of the materials in the array is not specified. \param [out] userBuffer The buffer to receive material pointers. \param [in] bufferSize The number of material pointers which can be stored in the buffer. \param [in] startIndex Index of first material pointer to be retrieved. \return The number of material pointers written to userBuffer, this should be less or equal to bufferSize. @see getNbMPMMaterials() PxMPMMaterial */ virtual PxU32 getMPMMaterials(PxMPMMaterial** userBuffer, PxU32 bufferSize, PxU32 startIndex = 0) const = 0; //@} /** @name Deletion Listeners */ //@{ /** \brief Register a deletion listener. Listeners will be called whenever an object is deleted. It is illegal to register or unregister a deletion listener while deletions are being processed. \note By default a registered listener will receive events from all objects. Set the restrictedObjectSet parameter to true on registration and use #registerDeletionListenerObjects to restrict the received events to specific objects. \note The deletion events are only supported on core PhysX objects. In general, objects in extension modules do not provide this functionality, however, in the case of PxJoint objects, the underlying PxConstraint will send the events. \param [in] observer Observer object to send notifications to. \param [in] deletionEvents The deletion event types to get notified of. \param [in] restrictedObjectSet If false, the deletion listener will get events from all objects, else the objects to receive events from have to be specified explicitly through #registerDeletionListenerObjects. @see PxDeletionListener unregisterDeletionListener */ virtual void registerDeletionListener(PxDeletionListener& observer, const PxDeletionEventFlags& deletionEvents, bool restrictedObjectSet = false) = 0; /** \brief Unregister a deletion listener. It is illegal to register or unregister a deletion listener while deletions are being processed. \param [in] observer Observer object to stop sending notifications to. @see PxDeletionListener registerDeletionListener */ virtual void unregisterDeletionListener(PxDeletionListener& observer) = 0; /** \brief Register specific objects for deletion events. This method allows for a deletion listener to limit deletion events to specific objects only. \note It is illegal to register or unregister objects while deletions are being processed. \note The deletion listener has to be registered through #registerDeletionListener() and configured to support restricted object sets prior to this method being used. \param [in] observer Observer object to send notifications to. \param [in] observables List of objects for which to receive deletion events. Only PhysX core objects are supported. In the case of PxJoint objects, the underlying PxConstraint can be used to get the events. \param [in] observableCount Size of the observables list. @see PxDeletionListener unregisterDeletionListenerObjects */ virtual void registerDeletionListenerObjects(PxDeletionListener& observer, const PxBase* const* observables, PxU32 observableCount) = 0; /** \brief Unregister specific objects for deletion events. This method allows to clear previously registered objects for a deletion listener (see #registerDeletionListenerObjects()). \note It is illegal to register or unregister objects while deletions are being processed. \note The deletion listener has to be registered through #registerDeletionListener() and configured to support restricted object sets prior to this method being used. \param [in] observer Observer object to stop sending notifications to. \param [in] observables List of objects for which to not receive deletion events anymore. \param [in] observableCount Size of the observables list. @see PxDeletionListener registerDeletionListenerObjects */ virtual void unregisterDeletionListenerObjects(PxDeletionListener& observer, const PxBase* const* observables, PxU32 observableCount) = 0; /** \brief Gets PxPhysics object insertion interface. The insertion interface is needed for PxCreateTriangleMesh, PxCooking::createTriangleMesh etc., this allows runtime mesh creation. @see PxCreateTriangleMesh PxCreateHeightField PxCreateTetrahedronMesh PxCreateBVH PxCooking::createTriangleMesh PxCooking::createHeightfield PxCooking::createTetrahedronMesh PxCooking::createBVH */ virtual PxInsertionCallback& getPhysicsInsertionCallback() = 0; //@} }; #if !PX_DOXYGEN } // namespace physx #endif /** \brief Creates an instance of the physics SDK. Creates an instance of this class. May not be a class member to avoid name mangling. Pass the constant #PX_PHYSICS_VERSION as the argument. There may be only one instance of this class per process. Calling this method after an instance has been created already will result in an error message and NULL will be returned. \param version Version number we are expecting (should be #PX_PHYSICS_VERSION) \param foundation Foundation instance (see PxFoundation) \param scale values used to determine default tolerances for objects at creation time \param trackOutstandingAllocations true if you want to track memory allocations so a debugger connection partway through your physics simulation will get an accurate map of everything that has been allocated so far. This could have a memory and performance impact on your simulation hence it defaults to off. \param pvd When pvd points to a valid PxPvd instance (PhysX Visual Debugger), a connection to the specified PxPvd instance is created. If pvd is NULL no connection will be attempted. \param omniPvd When omniPvd points to a valid PxOmniPvd instance PhysX will sample its internal structures to the defined OmniPvd output streams set in the PxOmniPvd object. \return PxPhysics instance on success, NULL if operation failed @see PxPhysics */ PX_C_EXPORT PX_PHYSX_CORE_API physx::PxPhysics* PxCreatePhysics(physx::PxU32 version, physx::PxFoundation& foundation, const physx::PxTolerancesScale& scale, bool trackOutstandingAllocations = false, physx::PxPvd* pvd = NULL, physx::PxOmniPvd* omniPvd = NULL); /** \brief Retrieves the Physics SDK after it has been created. Before using this function the user must call #PxCreatePhysics(). \note The behavior of this method is undefined if the Physics SDK instance has not been created already. */ #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wreturn-type-c-linkage" #endif PX_C_EXPORT PX_PHYSX_CORE_API physx::PxPhysics& PX_CALL_CONV PxGetPhysics(); #ifdef __clang__ #pragma clang diagnostic pop #endif /** @} */ #endif
42,189
C
36.43567
351
0.770627
NVIDIA-Omniverse/PhysX/physx/include/PxImmediateMode.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_IMMEDIATE_MODE_H #define PX_IMMEDIATE_MODE_H /** \addtogroup immediatemode @{ */ #include "PxPhysXConfig.h" #include "foundation/PxMemory.h" #include "solver/PxSolverDefs.h" #include "collision/PxCollisionDefs.h" #include "PxArticulationReducedCoordinate.h" #if !PX_DOXYGEN namespace physx { #endif class PxCudaContextManager; class PxBaseTask; class PxGeometry; #if !PX_DOXYGEN namespace immediate { #endif typedef void* PxArticulationHandle; /** \brief Structure to store linear and angular components of spatial vector */ struct PxSpatialVector { PxVec3 top; PxReal pad0; PxVec3 bottom; PxReal pad1; }; /** \brief Structure to store rigid body properties */ struct PxRigidBodyData { PX_ALIGN(16, PxVec3 linearVelocity); //!< 12 Linear velocity PxReal invMass; //!< 16 Inverse mass PxVec3 angularVelocity; //!< 28 Angular velocity PxReal maxDepenetrationVelocity; //!< 32 Maximum de-penetration velocity PxVec3 invInertia; //!< 44 Mass-space inverse interia diagonal vector PxReal maxContactImpulse; //!< 48 Maximum permissable contact impulse PxTransform body2World; //!< 76 World space transform PxReal linearDamping; //!< 80 Linear damping coefficient PxReal angularDamping; //!< 84 Angular damping coefficient PxReal maxLinearVelocitySq; //!< 88 Squared maximum linear velocity PxReal maxAngularVelocitySq; //!< 92 Squared maximum angular velocity PxU32 pad; //!< 96 Padding for 16-byte alignment }; /** \brief Callback class to record contact points produced by immediate::PxGenerateContacts */ class PxContactRecorder { public: /** \brief Method to record new contacts \param [in] contactPoints The contact points produced \param [in] nbContacts The number of contact points produced \param [in] index The index of this pair. This is an index from 0-N-1 identifying which pair this relates to from within the array of pairs passed to PxGenerateContacts \return a boolean to indicate if this callback successfully stored the contacts or not. */ virtual bool recordContacts(const PxContactPoint* contactPoints, PxU32 nbContacts, PxU32 index) = 0; virtual ~PxContactRecorder(){} }; /** \brief Constructs a PxSolverBodyData structure based on rigid body properties. Applies gravity, damping and clamps maximum velocity. \param [in] inRigidData The array rigid body properties \param [out] outSolverBodyData The array of solverBodyData produced to represent these bodies \param [in] nbBodies The total number of solver bodies to create \param [in] gravity The gravity vector \param [in] dt The timestep \param [in] gyroscopicForces Indicates whether gyroscopic forces should be integrated */ PX_C_EXPORT PX_PHYSX_CORE_API void PxConstructSolverBodies(const PxRigidBodyData* inRigidData, PxSolverBodyData* outSolverBodyData, PxU32 nbBodies, const PxVec3& gravity, PxReal dt, bool gyroscopicForces = false); /** \brief Constructs a PxSolverBodyData structure for a static body at a given pose. \param [in] globalPose The pose of this static actor \param [out] solverBodyData The solver body representation of this static actor */ PX_C_EXPORT PX_PHYSX_CORE_API void PxConstructStaticSolverBody(const PxTransform& globalPose, PxSolverBodyData& solverBodyData); /** \brief Groups together sets of independent PxSolverConstraintDesc objects to be solved using SIMD SOA approach. \param [in] solverConstraintDescs The set of solver constraint descs to batch \param [in] nbConstraints The number of constraints to batch \param [in,out] solverBodies The array of solver bodies that the constraints reference. Some fields in these structures are written to as scratch memory for the batching. \param [in] nbBodies The number of bodies \param [out] outBatchHeaders The batch headers produced by this batching process. This array must have at least 1 entry per input constraint \param [out] outOrderedConstraintDescs A reordered copy of the constraint descs. This array is referenced by the constraint batches. This array must have at least 1 entry per input constraint. \param [in,out] articulations The array of articulations that the constraints reference. Some fields in these structures are written to as scratch memory for the batching. \param [in] nbArticulations The number of articulations \return The total number of batches produced. This should be less than or equal to nbConstraints. \note This method considers all bodies within the range [0, nbBodies-1] to be valid dynamic bodies. A given dynamic body can only be referenced in a batch once. Static or kinematic bodies can be referenced multiple times within a batch safely because constraints do not affect their velocities. The batching will implicitly consider any bodies outside of the range [0, nbBodies-1] to be infinite mass (static or kinematic). This means that either appending static/kinematic to the end of the array of bodies or placing static/kinematic bodies at before the start body pointer will ensure that the minimum number of batches are produced. */ PX_C_EXPORT PX_PHYSX_CORE_API PxU32 PxBatchConstraints( const PxSolverConstraintDesc* solverConstraintDescs, PxU32 nbConstraints, PxSolverBody* solverBodies, PxU32 nbBodies, PxConstraintBatchHeader* outBatchHeaders, PxSolverConstraintDesc* outOrderedConstraintDescs, PxArticulationHandle* articulations=NULL, PxU32 nbArticulations=0); /** \brief Creates a set of contact constraint blocks. Note that, depending the results of PxBatchConstraints, each batchHeader may refer to up to 4 solverConstraintDescs. This function will allocate both constraint and friction patch data via the PxConstraintAllocator provided. Constraint data is only valid until PxSolveConstraints has completed. Friction data is to be retained and provided by the application for friction correlation. \param [in] batchHeaders Array of batch headers to process \param [in] nbHeaders The total number of headers \param [in] contactDescs An array of contact descs defining the pair and contact properties of each respective contacting pair \param [in] allocator An allocator callback to allocate constraint and friction memory \param [in] invDt The inverse timestep \param [in] bounceThreshold The bounce threshold. Relative velocities below this will be solved by bias only. Relative velocities above this will be solved by restitution. If restitution is zero then these pairs will always be solved by bias. \param [in] frictionOffsetThreshold The friction offset threshold. Contacts whose separations are below this threshold can generate friction constraints. \param [in] correlationDistance The correlation distance used by friction correlation to identify whether a friction patch is broken on the grounds of relation separation. \param [out] Z Temporary buffer for impulse propagation. \return a boolean to define if this method was successful or not. */ PX_C_EXPORT PX_PHYSX_CORE_API bool PxCreateContactConstraints(PxConstraintBatchHeader* batchHeaders, PxU32 nbHeaders, PxSolverContactDesc* contactDescs, PxConstraintAllocator& allocator, PxReal invDt, PxReal bounceThreshold, PxReal frictionOffsetThreshold, PxReal correlationDistance, PxSpatialVector* Z = NULL); /** \brief Creates a set of joint constraint blocks. Note that, depending the results of PxBatchConstraints, the batchHeader may refer to up to 4 solverConstraintDescs \param [in] batchHeaders The array of batch headers to be processed. \param [in] nbHeaders The total number of batch headers to process. \param [in] jointDescs An array of constraint prep descs defining the properties of the constraints being created. \param [in] allocator An allocator callback to allocate constraint data. \param [in] dt The timestep. \param [in] invDt The inverse timestep. \param [out] Z Temporary buffer for impulse propagation. \return a boolean indicating if this method was successful or not. */ PX_C_EXPORT PX_PHYSX_CORE_API bool PxCreateJointConstraints(PxConstraintBatchHeader* batchHeaders, PxU32 nbHeaders, PxSolverConstraintPrepDesc* jointDescs, PxConstraintAllocator& allocator, PxSpatialVector* Z, PxReal dt, PxReal invDt); /** \brief Creates a set of joint constraint blocks. This function runs joint shaders defined inside PxConstraint** param, fills in joint row information in jointDescs and then calls PxCreateJointConstraints. \param [in] batchHeaders The set of batchHeaders to be processed. \param [in] nbBatchHeaders The number of batch headers to process. \param [in] constraints The set of constraints to be used to produce constraint rows. \param [in,out] jointDescs An array of constraint prep descs defining the properties of the constraints being created. \param [in] allocator An allocator callback to allocate constraint data. \param [in] dt The timestep. \param [in] invDt The inverse timestep. \param [out] Z Temporary buffer for impulse propagation. \return a boolean indicating if this method was successful or not. @see PxCreateJointConstraints */ PX_C_EXPORT PX_PHYSX_CORE_API bool PxCreateJointConstraintsWithShaders(PxConstraintBatchHeader* batchHeaders, PxU32 nbBatchHeaders, PxConstraint** constraints, PxSolverConstraintPrepDesc* jointDescs, PxConstraintAllocator& allocator, PxReal dt, PxReal invDt, PxSpatialVector* Z = NULL); struct PxImmediateConstraint { PxConstraintSolverPrep prep; const void* constantBlock; }; /** \brief Creates a set of joint constraint blocks. This function runs joint shaders defined inside PxImmediateConstraint* param, fills in joint row information in jointDescs and then calls PxCreateJointConstraints. \param [in] batchHeaders The set of batchHeaders to be processed. \param [in] nbBatchHeaders The number of batch headers to process. \param [in] constraints The set of constraints to be used to produce constraint rows. \param [in,out] jointDescs An array of constraint prep descs defining the properties of the constraints being created. \param [in] allocator An allocator callback to allocate constraint data. \param [in] dt The timestep. \param [in] invDt The inverse timestep. \param [out] Z Temporary buffer for impulse propagation. \return a boolean indicating if this method was successful or not. @see PxCreateJointConstraints */ PX_C_EXPORT PX_PHYSX_CORE_API bool PxCreateJointConstraintsWithImmediateShaders(PxConstraintBatchHeader* batchHeaders, PxU32 nbBatchHeaders, PxImmediateConstraint* constraints, PxSolverConstraintPrepDesc* jointDescs, PxConstraintAllocator& allocator, PxReal dt, PxReal invDt, PxSpatialVector* Z = NULL); /** \brief Iteratively solves the set of constraints defined by the provided PxConstraintBatchHeader and PxSolverConstraintDesc structures. Updates deltaVelocities inside the PxSolverBody structures. Produces resulting linear and angular motion velocities. \param [in] batchHeaders The set of batch headers to be solved \param [in] nbBatchHeaders The total number of batch headers to be solved \param [in] solverConstraintDescs The reordererd set of solver constraint descs referenced by the batch headers \param [in,out] solverBodies The set of solver bodies the bodies reference \param [out] linearMotionVelocity The resulting linear motion velocity \param [out] angularMotionVelocity The resulting angular motion velocity. \param [in] nbSolverBodies The total number of solver bodies \param [in] nbPositionIterations The number of position iterations to run \param [in] nbVelocityIterations The number of velocity iterations to run \param [in] dt Timestep. Only needed if articulations are sent to the function. \param [in] invDt Inverse timestep. Only needed if articulations are sent to the function. \param [in] nbSolverArticulations Number of articulations to solve constraints for. \param [in] solverArticulations Array of articulations to solve constraints for. \param [out] Z Temporary buffer for impulse propagation \param [out] deltaV Temporary buffer for velocity change */ PX_C_EXPORT PX_PHYSX_CORE_API void PxSolveConstraints(const PxConstraintBatchHeader* batchHeaders, PxU32 nbBatchHeaders, const PxSolverConstraintDesc* solverConstraintDescs, const PxSolverBody* solverBodies, PxVec3* linearMotionVelocity, PxVec3* angularMotionVelocity, PxU32 nbSolverBodies, PxU32 nbPositionIterations, PxU32 nbVelocityIterations, float dt=0.0f, float invDt=0.0f, PxU32 nbSolverArticulations=0, PxArticulationHandle* solverArticulations=NULL, PxSpatialVector* Z = NULL, PxSpatialVector* deltaV = NULL); /** \brief Integrates a rigid body, returning the new velocities and transforms. After this function has been called, solverBodyData stores all the body's velocity data. \param [in,out] solverBodyData The array of solver body data to be integrated \param [in] solverBody The bodies' linear and angular velocities \param [in] linearMotionVelocity The bodies' linear motion velocity array \param [in] angularMotionState The bodies' angular motion velocity array \param [in] nbBodiesToIntegrate The total number of bodies to integrate \param [in] dt The timestep */ PX_C_EXPORT PX_PHYSX_CORE_API void PxIntegrateSolverBodies(PxSolverBodyData* solverBodyData, PxSolverBody* solverBody, const PxVec3* linearMotionVelocity, const PxVec3* angularMotionState, PxU32 nbBodiesToIntegrate, PxReal dt); /** \brief Performs contact generation for a given pair of geometries at the specified poses. Produced contacts are stored in the provided contact recorder. Information is cached in PxCache structure to accelerate future contact generation between pairs. This cache data is valid only as long as the memory provided by PxCacheAllocator has not been released/re-used. Recommendation is to retain that data for a single simulation frame, discarding cached data after 2 frames. If the cached memory has been released/re-used prior to the corresponding pair having contact generation performed again, it is the application's responsibility to reset the PxCache. \param [in] geom0 Array of geometries to perform collision detection on. \param [in] geom1 Array of geometries to perform collision detection on \param [in] pose0 Array of poses associated with the corresponding entry in the geom0 array \param [in] pose1 Array of poses associated with the corresponding entry in the geom1 array \param [in,out] contactCache Array of contact caches associated with each pair geom0[i] + geom1[i] \param [in] nbPairs The total number of pairs to process \param [out] contactRecorder A callback that is called to record contacts for each pair that detects contacts \param [in] contactDistance The distance at which contacts begin to be generated between the pairs \param [in] meshContactMargin The mesh contact margin. \param [in] toleranceLength The toleranceLength. Used for scaling distance-based thresholds internally to produce appropriate results given simulations in different units \param [in] allocator A callback to allocate memory for the contact cache \return a boolean indicating if the function was successful or not. */ PX_C_EXPORT PX_PHYSX_CORE_API bool PxGenerateContacts( const PxGeometry* const * geom0, const PxGeometry* const * geom1, const PxTransform* pose0, const PxTransform* pose1, PxCache* contactCache, PxU32 nbPairs, PxContactRecorder& contactRecorder, PxReal contactDistance, PxReal meshContactMargin, PxReal toleranceLength, PxCacheAllocator& allocator); struct PxArticulationJointDataRC { PxTransform parentPose; PxTransform childPose; PxArticulationMotion::Enum motion[PxArticulationAxis::eCOUNT]; PxArticulationLimit limits[PxArticulationAxis::eCOUNT]; PxArticulationDrive drives[PxArticulationAxis::eCOUNT]; PxReal targetPos[PxArticulationAxis::eCOUNT]; PxReal targetVel[PxArticulationAxis::eCOUNT]; PxReal armature[PxArticulationAxis::eCOUNT]; PxReal jointPos[PxArticulationAxis::eCOUNT]; PxReal jointVel[PxArticulationAxis::eCOUNT]; PxReal frictionCoefficient; PxReal maxJointVelocity; PxArticulationJointType::Enum type; void initData() { parentPose = PxTransform(PxIdentity); childPose = PxTransform(PxIdentity); frictionCoefficient = 0.05f; maxJointVelocity = 100.0f; type = PxArticulationJointType::eUNDEFINED; // For root for(PxU32 i=0;i<PxArticulationAxis::eCOUNT;i++) { motion[i] = PxArticulationMotion::eLOCKED; limits[i] = PxArticulationLimit(0.0f, 0.0f); drives[i] = PxArticulationDrive(0.0f, 0.0f, 0.0f); armature[i] = 0.0f; jointPos[i] = 0.0f; jointVel[i] = 0.0f; } PxMemSet(targetPos, 0xff, sizeof(PxReal)*PxArticulationAxis::eCOUNT); PxMemSet(targetVel, 0xff, sizeof(PxReal)*PxArticulationAxis::eCOUNT); } }; struct PxArticulationDataRC { PxArticulationFlags flags; }; struct PxArticulationLinkMutableDataRC { PxVec3 inverseInertia; float inverseMass; float linearDamping; float angularDamping; float maxLinearVelocitySq; float maxAngularVelocitySq; float cfmScale; bool disableGravity; void initData() { inverseInertia = PxVec3(1.0f); inverseMass = 1.0f; linearDamping = 0.05f; angularDamping = 0.05f; maxLinearVelocitySq = 100.0f * 100.0f; maxAngularVelocitySq = 50.0f * 50.0f; cfmScale = 0.025f; disableGravity = false; } }; struct PxArticulationLinkDerivedDataRC { PxTransform pose; PxVec3 linearVelocity; PxVec3 angularVelocity; }; struct PxArticulationLinkDataRC : PxArticulationLinkMutableDataRC { PxArticulationLinkDataRC() { PxArticulationLinkDataRC::initData(); } void initData() { pose = PxTransform(PxIdentity); PxArticulationLinkMutableDataRC::initData(); inboundJoint.initData(); } PxArticulationJointDataRC inboundJoint; PxTransform pose; }; typedef void* PxArticulationCookie; struct PxArticulationLinkCookie { PxArticulationCookie articulation; PxU32 linkId; }; struct PxCreateArticulationLinkCookie : PxArticulationLinkCookie { PX_FORCE_INLINE PxCreateArticulationLinkCookie(PxArticulationCookie art=NULL, PxU32 id=0xffffffff) { articulation = art; linkId = id; } }; struct PxArticulationLinkHandle { PX_FORCE_INLINE PxArticulationLinkHandle(PxArticulationHandle art=NULL, PxU32 id=0xffffffff) : articulation(art), linkId(id) {} PxArticulationHandle articulation; PxU32 linkId; }; /** \brief Begin creation of an immediate-mode reduced-coordinate articulation. Returned cookie must be used to add links to the articulation, and to complete creating the articulation. The cookie is a temporary ID for the articulation, only valid until PxEndCreateArticulationRC is called. \param [in] data Articulation data \return Articulation cookie @see PxAddArticulationLink PxEndCreateArticulationRC */ PX_C_EXPORT PX_PHYSX_CORE_API PxArticulationCookie PxBeginCreateArticulationRC(const PxArticulationDataRC& data); /** \brief Add a link to the articulation. All links must be added before the articulation is completed. It is not possible to add a new link at runtime. Returned cookie is a temporary ID for the link, only valid until PxEndCreateArticulationRC is called. \param [in] articulation Cookie value returned by PxBeginCreateArticulationRC \param [in] parent Parent for the new link, or NULL if this is the root link \param [in] data Link data \return Link cookie @see PxBeginCreateArticulationRC PxEndCreateArticulationRC */ PX_C_EXPORT PX_PHYSX_CORE_API PxArticulationLinkCookie PxAddArticulationLink(PxArticulationCookie articulation, const PxArticulationLinkCookie* parent, const PxArticulationLinkDataRC& data); /** \brief End creation of an immediate-mode reduced-coordinate articulation. This call completes the creation of the articulation. All involved cookies become unsafe to use after that point. The links are actually created in this function, and it returns the actual link handles to users. The given buffer should be large enough to contain as many links as created between the PxBeginCreateArticulationRC & PxEndCreateArticulationRC calls, i.e. if N calls were made to PxAddArticulationLink, the buffer should be large enough to contain N handles. \param [in] articulation Cookie value returned by PxBeginCreateArticulationRC \param [out] linkHandles Articulation link handles of all created articulation links \param [in] bufferSize Size of linkHandles buffer. Must match internal expected number of articulation links. \return Articulation handle, or NULL if creation failed @see PxAddArticulationLink PxEndCreateArticulationRC */ PX_C_EXPORT PX_PHYSX_CORE_API PxArticulationHandle PxEndCreateArticulationRC(PxArticulationCookie articulation, PxArticulationLinkHandle* linkHandles, PxU32 bufferSize); /** \brief Releases an immediate-mode reduced-coordinate articulation. \param [in] articulation Articulation handle @see PxCreateFeatherstoneArticulation */ PX_C_EXPORT PX_PHYSX_CORE_API void PxReleaseArticulation(PxArticulationHandle articulation); /** \brief Creates an articulation cache. \param [in] articulation Articulation handle \return Articulation cache @see PxReleaseArticulationCache */ PX_C_EXPORT PX_PHYSX_CORE_API PxArticulationCache* PxCreateArticulationCache(PxArticulationHandle articulation); /** \brief Copy the internal data of the articulation to the cache \param[in] articulation Articulation handle. \param[in] cache Articulation data \param[in] flag Indicates which values of the articulation system are copied to the cache @see createCache PxApplyArticulationCache */ PX_C_EXPORT PX_PHYSX_CORE_API void PxCopyInternalStateToArticulationCache(PxArticulationHandle articulation, PxArticulationCache& cache, PxArticulationCacheFlags flag); /** \brief Apply the user defined data in the cache to the articulation system \param[in] articulation Articulation handle. \param[in] cache Articulation data. \param[in] flag Defines which values in the cache will be applied to the articulation @see createCache PxCopyInternalStateToArticulationCache */ PX_C_EXPORT PX_PHYSX_CORE_API void PxApplyArticulationCache(PxArticulationHandle articulation, PxArticulationCache& cache, PxArticulationCacheFlags flag); /** \brief Release an articulation cache \param[in] cache The cache to release @see PxCreateArticulationCache PxCopyInternalStateToArticulationCache PxCopyInternalStateToArticulationCache */ PX_C_EXPORT PX_PHYSX_CORE_API void PxReleaseArticulationCache(PxArticulationCache& cache); /** \brief Retrieves non-mutable link data from a link handle. The data here is computed by the articulation code but cannot be directly changed by users. \param [in] link Link handle \param [out] data Link data \return True if success @see PxGetAllLinkData */ PX_C_EXPORT PX_PHYSX_CORE_API bool PxGetLinkData(const PxArticulationLinkHandle& link, PxArticulationLinkDerivedDataRC& data); /** \brief Retrieves non-mutable link data from an articulation handle (all links). The data here is computed by the articulation code but cannot be directly changed by users. \param [in] articulation Articulation handle \param [out] data Link data for N links, or NULL to just retrieve the number of links. \return Number of links in the articulation = number of link data structure written to the data array. @see PxGetLinkData */ PX_C_EXPORT PX_PHYSX_CORE_API PxU32 PxGetAllLinkData(const PxArticulationHandle articulation, PxArticulationLinkDerivedDataRC* data); /** \brief Retrieves mutable link data from a link handle. \param [in] link Link handle \param [out] data Data for this link \return True if success @see PxSetMutableLinkData */ PX_C_EXPORT PX_PHYSX_CORE_API bool PxGetMutableLinkData(const PxArticulationLinkHandle& link, PxArticulationLinkMutableDataRC& data); /** \brief Sets mutable link data for given link. \param [in] link Link handle \param [in] data Data for this link \return True if success @see PxGetMutableLinkData */ PX_C_EXPORT PX_PHYSX_CORE_API bool PxSetMutableLinkData(const PxArticulationLinkHandle& link, const PxArticulationLinkMutableDataRC& data); /** \brief Retrieves joint data from a link handle. \param [in] link Link handle \param [out] data Joint data for this link \return True if success @see PxSetJointData */ PX_C_EXPORT PX_PHYSX_CORE_API bool PxGetJointData(const PxArticulationLinkHandle& link, PxArticulationJointDataRC& data); /** \brief Sets joint data for given link. \param [in] link Link handle \param [in] data Joint data for this link \return True if success @see PxGetJointData */ PX_C_EXPORT PX_PHYSX_CORE_API bool PxSetJointData(const PxArticulationLinkHandle& link, const PxArticulationJointDataRC& data); /** \brief Computes unconstrained velocities for a given articulation. \param [in] articulation Articulation handle \param [in] gravity Gravity vector \param [in] dt Timestep \param [in] invLengthScale 1/lengthScale from PxTolerancesScale. */ PX_C_EXPORT PX_PHYSX_CORE_API void PxComputeUnconstrainedVelocities(PxArticulationHandle articulation, const PxVec3& gravity, PxReal dt, PxReal invLengthScale); /** \brief Updates bodies for a given articulation. \param [in] articulation Articulation handle \param [in] dt Timestep */ PX_C_EXPORT PX_PHYSX_CORE_API void PxUpdateArticulationBodies(PxArticulationHandle articulation, PxReal dt); /** \brief Computes unconstrained velocities for a given articulation. \param [in] articulation Articulation handle \param [in] gravity Gravity vector \param [in] dt Timestep/numPosIterations \param [in] totalDt Timestep \param [in] invDt 1/(Timestep/numPosIterations) \param [in] invTotalDt 1/Timestep \param [in] invLengthScale 1/lengthScale from PxTolerancesScale. */ PX_C_EXPORT PX_PHYSX_CORE_API void PxComputeUnconstrainedVelocitiesTGS( PxArticulationHandle articulation, const PxVec3& gravity, PxReal dt, PxReal totalDt, PxReal invDt, PxReal invTotalDt, PxReal invLengthScale); /** \brief Updates bodies for a given articulation. \param [in] articulation Articulation handle \param [in] dt Timestep */ PX_C_EXPORT PX_PHYSX_CORE_API void PxUpdateArticulationBodiesTGS(PxArticulationHandle articulation, PxReal dt); /** \brief Constructs a PxSolverBodyData structure based on rigid body properties. Applies gravity, damping and clamps maximum velocity. \param [in] inRigidData The array rigid body properties \param [out] outSolverBodyVel The array of PxTGSSolverBodyVel structures produced to represent these bodies \param [out] outSolverBodyTxInertia The array of PxTGSSolverBodyTxInertia produced to represent these bodies \param [out] outSolverBodyData The array of PxTGSolverBodyData produced to represent these bodies \param [in] nbBodies The total number of solver bodies to create \param [in] gravity The gravity vector \param [in] dt The timestep \param [in] gyroscopicForces Indicates whether gyroscopic forces should be integrated */ PX_C_EXPORT PX_PHYSX_CORE_API void PxConstructSolverBodiesTGS(const PxRigidBodyData* inRigidData, PxTGSSolverBodyVel* outSolverBodyVel, PxTGSSolverBodyTxInertia* outSolverBodyTxInertia, PxTGSSolverBodyData* outSolverBodyData, PxU32 nbBodies, const PxVec3& gravity, PxReal dt, bool gyroscopicForces = false); /** \brief Constructs a PxSolverBodyData structure for a static body at a given pose. \param [in] globalPose The pose of this static actor \param [out] solverBodyVel The velocity component of this body (will be zero) \param [out] solverBodyTxInertia The intertia and transform delta component of this body (will be zero) \param [out] solverBodyData The solver body representation of this static actor */ PX_C_EXPORT PX_PHYSX_CORE_API void PxConstructStaticSolverBodyTGS(const PxTransform& globalPose, PxTGSSolverBodyVel& solverBodyVel, PxTGSSolverBodyTxInertia& solverBodyTxInertia, PxTGSSolverBodyData& solverBodyData); /** \brief Groups together sets of independent PxSolverConstraintDesc objects to be solved using SIMD SOA approach. \param [in] solverConstraintDescs The set of solver constraint descs to batch \param [in] nbConstraints The number of constraints to batch \param [in,out] solverBodies The array of solver bodies that the constraints reference. Some fields in these structures are written to as scratch memory for the batching. \param [in] nbBodies The number of bodies \param [out] outBatchHeaders The batch headers produced by this batching process. This array must have at least 1 entry per input constraint \param [out] outOrderedConstraintDescs A reordered copy of the constraint descs. This array is referenced by the constraint batches. This array must have at least 1 entry per input constraint. \param [in,out] articulations The array of articulations that the constraints reference. Some fields in these structures are written to as scratch memory for the batching. \param [in] nbArticulations The number of articulations \return The total number of batches produced. This should be less than or equal to nbConstraints. \note This method considers all bodies within the range [0, nbBodies-1] to be valid dynamic bodies. A given dynamic body can only be referenced in a batch once. Static or kinematic bodies can be referenced multiple times within a batch safely because constraints do not affect their velocities. The batching will implicitly consider any bodies outside of the range [0, nbBodies-1] to be infinite mass (static or kinematic). This means that either appending static/kinematic to the end of the array of bodies or placing static/kinematic bodies at before the start body pointer will ensure that the minimum number of batches are produced. */ PX_C_EXPORT PX_PHYSX_CORE_API PxU32 PxBatchConstraintsTGS( const PxSolverConstraintDesc* solverConstraintDescs, PxU32 nbConstraints, PxTGSSolverBodyVel* solverBodies, PxU32 nbBodies, PxConstraintBatchHeader* outBatchHeaders, PxSolverConstraintDesc* outOrderedConstraintDescs, PxArticulationHandle* articulations = NULL, PxU32 nbArticulations = 0); /** \brief Creates a set of contact constraint blocks. Note that, depending the results of PxBatchConstraints, each batchHeader may refer to up to 4 solverConstraintDescs. This function will allocate both constraint and friction patch data via the PxConstraintAllocator provided. Constraint data is only valid until PxSolveConstraints has completed. Friction data is to be retained and provided by the application for friction correlation. \param [in] batchHeaders Array of batch headers to process \param [in] nbHeaders The total number of headers \param [in] contactDescs An array of contact descs defining the pair and contact properties of each respective contacting pair \param [in] allocator An allocator callback to allocate constraint and friction memory \param [in] invDt The inverse timestep/nbPositionIterations \param [in] invTotalDt The inverse time-step \param [in] bounceThreshold The bounce threshold. Relative velocities below this will be solved by bias only. Relative velocities above this will be solved by restitution. If restitution is zero then these pairs will always be solved by bias. \param [in] frictionOffsetThreshold The friction offset threshold. Contacts whose separations are below this threshold can generate friction constraints. \param [in] correlationDistance The correlation distance used by friction correlation to identify whether a friction patch is broken on the grounds of relation separation. \return a boolean to define if this method was successful or not. */ PX_C_EXPORT PX_PHYSX_CORE_API bool PxCreateContactConstraintsTGS( PxConstraintBatchHeader* batchHeaders, PxU32 nbHeaders, PxTGSSolverContactDesc* contactDescs, PxConstraintAllocator& allocator, PxReal invDt, PxReal invTotalDt, PxReal bounceThreshold, PxReal frictionOffsetThreshold, PxReal correlationDistance); /** \brief Creates a set of joint constraint blocks. Note that, depending the results of PxBatchConstraints, the batchHeader may refer to up to 4 solverConstraintDescs \param [in] batchHeaders The array of batch headers to be processed \param [in] nbHeaders The total number of batch headers to process \param [in] jointDescs An array of constraint prep descs defining the properties of the constraints being created \param [in] allocator An allocator callback to allocate constraint data \param [in] dt The total time-step/nbPositionIterations \param [in] totalDt The total time-step \param [in] invDt The inverse (timestep/nbPositionIterations) \param [in] invTotalDt The inverse total time-step \param [in] lengthScale PxToleranceScale::length, i.e. a meter in simulation units \return a boolean indicating if this method was successful or not. */ PX_C_EXPORT PX_PHYSX_CORE_API bool PxCreateJointConstraintsTGS( PxConstraintBatchHeader* batchHeaders, PxU32 nbHeaders, PxTGSSolverConstraintPrepDesc* jointDescs, PxConstraintAllocator& allocator, PxReal dt, PxReal totalDt, PxReal invDt, PxReal invTotalDt, PxReal lengthScale); /** \brief Creates a set of joint constraint blocks. This function runs joint shaders defined inside PxConstraint** param, fills in joint row information in jointDescs and then calls PxCreateJointConstraints. \param [in] batchHeaders The set of batchHeaders to be processed \param [in] nbBatchHeaders The number of batch headers to process. \param [in] constraints The set of constraints to be used to produce constraint rows \param [in,out] jointDescs An array of constraint prep descs defining the properties of the constraints being created \param [in] allocator An allocator callback to allocate constraint data \param [in] dt The total time-step/nbPositionIterations \param [in] totalDt The total time-step \param [in] invDt The inverse (timestep/nbPositionIterations) \param [in] invTotalDt The inverse total time-step \param [in] lengthScale PxToleranceScale::length, i.e. a meter in simulation units \return a boolean indicating if this method was successful or not. @see PxCreateJointConstraints */ PX_C_EXPORT PX_PHYSX_CORE_API bool PxCreateJointConstraintsWithShadersTGS( PxConstraintBatchHeader* batchHeaders, PxU32 nbBatchHeaders, PxConstraint** constraints, PxTGSSolverConstraintPrepDesc* jointDescs, PxConstraintAllocator& allocator, PxReal dt, PxReal totalDt, PxReal invDt, PxReal invTotalDt, PxReal lengthScale); /** \brief Creates a set of joint constraint blocks. This function runs joint shaders defined inside PxImmediateConstraint* param, fills in joint row information in jointDescs and then calls PxCreateJointConstraints. \param [in] batchHeaders The set of batchHeaders to be processed \param [in] nbBatchHeaders The number of batch headers to process. \param [in] constraints The set of constraints to be used to produce constraint rows \param [in,out] jointDescs An array of constraint prep descs defining the properties of the constraints being created \param [in] allocator An allocator callback to allocate constraint data \param [in] dt The total time-step/nbPositionIterations \param [in] totalDt The total time-step \param [in] invDt The inverse (timestep/nbPositionIterations) \param [in] invTotalDt The inverse total time-step \param [in] lengthScale PxToleranceScale::length, i.e. a meter in simulation units \return a boolean indicating if this method was successful or not. @see PxCreateJointConstraints */ PX_C_EXPORT PX_PHYSX_CORE_API bool PxCreateJointConstraintsWithImmediateShadersTGS(PxConstraintBatchHeader* batchHeaders, PxU32 nbBatchHeaders, PxImmediateConstraint* constraints, PxTGSSolverConstraintPrepDesc* jointDescs, PxConstraintAllocator& allocator, PxReal dt, PxReal totalDt, PxReal invDt, PxReal invTotalDt, PxReal lengthScale); /** \brief Iteratively solves the set of constraints defined by the provided PxConstraintBatchHeader and PxSolverConstraintDesc structures. Updates deltaVelocities inside the PxSolverBody structures. Produces resulting linear and angular motion velocities. \param [in] batchHeaders The set of batch headers to be solved \param [in] nbBatchHeaders The total number of batch headers to be solved \param [in] solverConstraintDescs The reordererd set of solver constraint descs referenced by the batch headers \param [in,out] solverBodies The set of solver bodies the bodies reference \param [in,out] txInertias The set of solver body TxInertias the bodies reference \param [in] nbSolverBodies The total number of solver bodies \param [in] nbPositionIterations The number of position iterations to run \param [in] nbVelocityIterations The number of velocity iterations to run \param [in] dt time-step/nbPositionIterations \param [in] invDt 1/(time-step/nbPositionIterations) \param [in] nbSolverArticulations Number of articulations to solve constraints for. \param [in] solverArticulations Array of articulations to solve constraints for. \param [out] Z Temporary buffer for impulse propagation (only if articulations are used, size should be at least as large as the maximum number of links in any articulations being simulated) \param [out] deltaV Temporary buffer for velocity change (only if articulations are used, size should be at least as large as the maximum number of links in any articulations being simulated) */ PX_C_EXPORT PX_PHYSX_CORE_API void PxSolveConstraintsTGS( const PxConstraintBatchHeader* batchHeaders, PxU32 nbBatchHeaders, const PxSolverConstraintDesc* solverConstraintDescs, PxTGSSolverBodyVel* solverBodies, PxTGSSolverBodyTxInertia* txInertias, PxU32 nbSolverBodies, PxU32 nbPositionIterations, PxU32 nbVelocityIterations, float dt, float invDt, PxU32 nbSolverArticulations = 0, PxArticulationHandle* solverArticulations = NULL, PxSpatialVector* Z = NULL, PxSpatialVector* deltaV = NULL); /** \brief Integrates a rigid body, returning the new velocities and transforms. After this function has been called, solverBody stores all the body's velocity data. \param [in,out] solverBody The array of solver bodies to be integrated \param [in] txInertia The delta pose and inertia terms \param [in,out] poses The original poses of the bodies. Updated to be the new poses of the bodies \param [in] nbBodiesToIntegrate The total number of bodies to integrate \param [in] dt The timestep */ PX_C_EXPORT PX_PHYSX_CORE_API void PxIntegrateSolverBodiesTGS(PxTGSSolverBodyVel* solverBody, PxTGSSolverBodyTxInertia* txInertia, PxTransform* poses, PxU32 nbBodiesToIntegrate, PxReal dt); #if !PX_DOXYGEN } #endif #if !PX_DOXYGEN } #endif /** @} */ #endif
40,881
C
54.02288
308
0.784325
NVIDIA-Omniverse/PhysX/physx/include/PxFLIPMaterial.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_FLIP_MATERIAL_H #define PX_FLIP_MATERIAL_H /** \addtogroup physics @{ */ #include "PxParticleMaterial.h" #if !PX_DOXYGEN namespace physx { #endif class PxScene; /** \brief Material class to represent a set of FLIP particle material properties. @see #PxPhysics.createFLIPMaterial() */ class PxFLIPMaterial : public PxParticleMaterial { public: /** \brief Sets viscosity \param[in] viscosity Viscosity. <b>Range:</b> [0, PX_MAX_F32) @see #getViscosity() */ virtual void setViscosity(PxReal viscosity) = 0; /** \brief Retrieves the viscosity value. \return The viscosity value. @see #setViscosity() */ virtual PxReal getViscosity() const = 0; virtual const char* getConcreteTypeName() const { return "PxFLIPMaterial"; } protected: PX_INLINE PxFLIPMaterial(PxType concreteType, PxBaseFlags baseFlags) : PxParticleMaterial(concreteType, baseFlags) {} PX_INLINE PxFLIPMaterial(PxBaseFlags baseFlags) : PxParticleMaterial(baseFlags) {} virtual ~PxFLIPMaterial() {} virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxFLIPMaterial", PxParticleMaterial); } }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
2,911
C
33.666666
121
0.742357
NVIDIA-Omniverse/PhysX/physx/include/PxConstraintDesc.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_CONSTRAINT_DESC_H #define PX_CONSTRAINT_DESC_H /** \addtogroup physics @{ */ #include "PxPhysXConfig.h" #include "foundation/PxFlags.h" #include "foundation/PxVec3.h" #include "common/PxBase.h" #if !PX_DOXYGEN namespace physx { namespace pvdsdk { #endif class PvdDataStream; #if !PX_DOXYGEN }} #endif #if !PX_DOXYGEN namespace physx { #endif /** \brief Constraint row flags These flags configure the post-processing of constraint rows and the behavior of the solver while solving constraints */ struct Px1DConstraintFlag { PX_CUDA_CALLABLE Px1DConstraintFlag(){} enum Type { eSPRING = 1<<0, //!< whether the constraint is a spring. Mutually exclusive with eRESTITUTION. If set, eKEEPBIAS is ignored. eACCELERATION_SPRING = 1<<1, //!< whether the constraint is a force or acceleration spring. Only valid if eSPRING is set. eRESTITUTION = 1<<2, //!< whether the restitution model should be applied to generate the target velocity. Mutually exclusive with eSPRING. If restitution causes a bounces, eKEEPBIAS is ignored eKEEPBIAS = 1<<3, //!< whether to keep the error term when solving for velocity. Ignored if restitution generates bounce, or eSPRING is set. eOUTPUT_FORCE = 1<<4, //!< whether to accumulate the force value from this constraint in the force total that is reported for the constraint and tested for breakage eHAS_DRIVE_LIMIT = 1<<5, //!< whether the constraint has a drive force limit (which will be scaled by dt unless PxConstraintFlag::eLIMITS_ARE_FORCES is set) eANGULAR_CONSTRAINT = 1<<6, //!< whether this is an angular or linear constraint eDEPRECATED_DRIVE_ROW = 1<<7 //!< whether the constraint's geometric error should drive the target velocity }; }; typedef PxFlags<Px1DConstraintFlag::Type, PxU16> Px1DConstraintFlags; PX_FLAGS_OPERATORS(Px1DConstraintFlag::Type, PxU16) /** \brief Constraint type hints which the solver uses to optimize constraint handling */ struct PxConstraintSolveHint { enum Enum { eNONE = 0, //!< no special properties eACCELERATION1 = 256, //!< a group of acceleration drive constraints with the same stiffness and drive parameters eSLERP_SPRING = 258, //!< temporary special value to identify SLERP drive rows eACCELERATION2 = 512, //!< a group of acceleration drive constraints with the same stiffness and drive parameters eACCELERATION3 = 768, //!< a group of acceleration drive constraints with the same stiffness and drive parameters eROTATIONAL_EQUALITY = 1024, //!< rotational equality constraints with no force limit and no velocity target eROTATIONAL_INEQUALITY = 1025, //!< rotational inequality constraints with (0, PX_MAX_FLT) force limits eEQUALITY = 2048, //!< equality constraints with no force limit and no velocity target eINEQUALITY = 2049 //!< inequality constraints with (0, PX_MAX_FLT) force limits }; }; /** \brief A one-dimensional constraint A constraint is expressed as a set of 1-dimensional constraint rows which define the required constraint on the objects' velocities. Each constraint is either a hard constraint or a spring. We define the velocity at the constraint to be the quantity v = body0vel.dot(lin0,ang0) - body1vel.dot(lin1, ang1) For a hard constraint, the solver attempts to generate 1. a set of velocities for the objects which, when integrated, respect the constraint errors: v + (geometricError / timestep) = velocityTarget 2. a set of velocities for the objects which respect the constraints: v = velocityTarget Hard constraints support restitution: if the impact velocity exceeds the bounce threshold, then the target velocity of the constraint will be set to restitution * -v Alternatively, the solver can attempt to resolve the velocity constraint as an implicit spring: F = stiffness * -geometricError + damping * (velocityTarget - v) where F is the constraint force or acceleration. Springs are fully implicit: that is, the force or acceleration is a function of the position and velocity after the solve. All constraints support limits on the minimum or maximum impulse applied. */ PX_ALIGN_PREFIX(16) struct Px1DConstraint { PxVec3 linear0; //!< linear component of velocity jacobian in world space PxReal geometricError; //!< geometric error of the constraint along this axis PxVec3 angular0; //!< angular component of velocity jacobian in world space PxReal velocityTarget; //!< velocity target for the constraint along this axis PxVec3 linear1; //!< linear component of velocity jacobian in world space PxReal minImpulse; //!< minimum impulse the solver may apply to enforce this constraint PxVec3 angular1; //!< angular component of velocity jacobian in world space PxReal maxImpulse; //!< maximum impulse the solver may apply to enforce this constraint union { struct SpringModifiers { PxReal stiffness; //!< spring parameter, for spring constraints PxReal damping; //!< damping parameter, for spring constraints } spring; struct RestitutionModifiers { PxReal restitution; //!< restitution parameter for determining additional "bounce" PxReal velocityThreshold; //!< minimum impact velocity for bounce } bounce; } mods; PxReal forInternalUse; //!< for internal use only PxU16 flags; //!< a set of Px1DConstraintFlags PxU16 solveHint; //!< constraint optimization hint, should be an element of PxConstraintSolveHint } PX_ALIGN_SUFFIX(16); /** \brief Flags for determining which components of the constraint should be visualized. @see PxConstraintVisualize */ struct PxConstraintVisualizationFlag { enum Enum { eLOCAL_FRAMES = 1, //!< visualize constraint frames eLIMITS = 2 //!< visualize constraint limits }; }; /** \brief Struct for specifying mass scaling for a pair of rigids */ PX_ALIGN_PREFIX(16) struct PxConstraintInvMassScale { PxReal linear0; //!< multiplier for inverse mass of body0 PxReal angular0; //!< multiplier for inverse MoI of body0 PxReal linear1; //!< multiplier for inverse mass of body1 PxReal angular1; //!< multiplier for inverse MoI of body1 PX_CUDA_CALLABLE PX_FORCE_INLINE PxConstraintInvMassScale(){} PX_CUDA_CALLABLE PX_FORCE_INLINE PxConstraintInvMassScale(PxReal lin0, PxReal ang0, PxReal lin1, PxReal ang1) : linear0(lin0), angular0(ang0), linear1(lin1), angular1(ang1){} } PX_ALIGN_SUFFIX(16); /** \brief Solver constraint generation shader This function is called by the constraint solver framework. The function must be reentrant, since it may be called simultaneously from multiple threads, and should access only the arguments passed into it. Developers writing custom constraints are encouraged to read the documentation in the user guide and the implementation code in PhysXExtensions. \param[out] constraints An array of solver constraint rows to be filled in \param[out] bodyAWorldOffset The origin point (offset from the position vector of bodyA's center of mass) at which the constraint is resolved. This value does not affect how constraints are solved, only the constraint force reported. \param[in] maxConstraints The size of the constraint buffer. At most this many constraints rows may be written \param[out] invMassScale The inverse mass and inertia scales for the constraint \param[in] constantBlock The constant data block \param[in] bodyAToWorld The center of mass frame of the first constrained body (the identity transform if the first actor is static, or if a NULL actor pointer was provided for it) \param[in] bodyBToWorld The center of mass frame of the second constrained body (the identity transform if the second actor is static, or if a NULL actor pointer was provided for it) \param[in] useExtendedLimits Enables limit ranges outside of (-PI, PI) \param[out] cAtW The world space location of body A's joint frame (position only) \param[out] cBtW The world space location of body B's joint frame (position only) \return the number of constraint rows written. */ typedef PxU32 (*PxConstraintSolverPrep)(Px1DConstraint* constraints, PxVec3p& bodyAWorldOffset, PxU32 maxConstraints, PxConstraintInvMassScale& invMassScale, const void* constantBlock, const PxTransform& bodyAToWorld, const PxTransform& bodyBToWorld, bool useExtendedLimits, PxVec3p& cAtW, PxVec3p& cBtW); /** \brief API used to visualize details about a constraint. */ class PxConstraintVisualizer { protected: virtual ~PxConstraintVisualizer(){} public: /** \brief Visualize joint frames \param[in] parent Parent transformation \param[in] child Child transformation */ virtual void visualizeJointFrames(const PxTransform& parent, const PxTransform& child) = 0; /** \brief Visualize joint linear limit \param[in] t0 Base transformation \param[in] t1 End transformation \param[in] value Distance */ virtual void visualizeLinearLimit(const PxTransform& t0, const PxTransform& t1, PxReal value) = 0; /** \brief Visualize joint angular limit \param[in] t0 Transformation for the visualization \param[in] lower Lower limit angle \param[in] upper Upper limit angle */ virtual void visualizeAngularLimit(const PxTransform& t0, PxReal lower, PxReal upper) = 0; /** \brief Visualize limit cone \param[in] t Transformation for the visualization \param[in] tanQSwingY Tangent of the quarter Y angle \param[in] tanQSwingZ Tangent of the quarter Z angle */ virtual void visualizeLimitCone(const PxTransform& t, PxReal tanQSwingY, PxReal tanQSwingZ) = 0; /** \brief Visualize joint double cone \param[in] t Transformation for the visualization \param[in] angle Limit angle */ virtual void visualizeDoubleCone(const PxTransform& t, PxReal angle) = 0; /** \brief Visualize line \param[in] p0 Start position \param[in] p1 End postion \param[in] color Color */ virtual void visualizeLine(const PxVec3& p0, const PxVec3& p1, PxU32 color) = 0; }; /** \brief Solver constraint visualization function This function is called by the constraint post-solver framework to visualize the constraint \param[out] visualizer The render buffer to render to \param[in] constantBlock The constant data block \param[in] body0Transform The center of mass frame of the first constrained body (the identity if the actor is static, or a NULL pointer was provided for it) \param[in] body1Transform The center of mass frame of the second constrained body (the identity if the actor is static, or a NULL pointer was provided for it) \param[in] flags The visualization flags (PxConstraintVisualizationFlag) @see PxRenderBuffer */ typedef void (*PxConstraintVisualize)(PxConstraintVisualizer& visualizer, const void* constantBlock, const PxTransform& body0Transform, const PxTransform& body1Transform, PxU32 flags); /** \brief Flags for determining how PVD should serialize a constraint update @see PxConstraintConnector::updatePvdProperties, PvdSceneClient::updateConstraint */ struct PxPvdUpdateType { enum Enum { CREATE_INSTANCE, //!< triggers createPvdInstance call, creates an instance of a constraint RELEASE_INSTANCE, //!< triggers releasePvdInstance call, releases an instance of a constraint UPDATE_ALL_PROPERTIES, //!< triggers updatePvdProperties call, updates all properties of a constraint UPDATE_SIM_PROPERTIES //!< triggers simUpdate call, updates all simulation properties of a constraint }; }; /** \brief This class connects a custom constraint to the SDK This class connects a custom constraint to the SDK, and functions are called by the SDK to query the custom implementation for specific information to pass on to the application or inform the constraint when the application makes calls into the SDK which will update the custom constraint's internal implementation */ class PxConstraintConnector { public: /** \brief Pre-simulation data preparation when the constraint is marked dirty, this function is called at the start of the simulation step for the SDK to copy the constraint data block. */ virtual void* prepareData() = 0; /** \brief this function is called by the SDK to update PVD's view of it */ virtual bool updatePvdProperties(physx::pvdsdk::PvdDataStream& pvdConnection, const PxConstraint* c, PxPvdUpdateType::Enum updateType) const = 0; /** \brief this function is called by the SDK to update OmniPVD's view of it */ virtual void updateOmniPvdProperties() const = 0; /** \brief Constraint release callback When the SDK deletes a PxConstraint object this function is called by the SDK. In general custom constraints should not be deleted directly by applications: rather, the constraint should respond to a release() request by calling PxConstraint::release(), then wait for this call to release its own resources. This function is also called when a PxConstraint object is deleted on cleanup due to destruction of the PxPhysics object. */ virtual void onConstraintRelease() = 0; /** \brief Center-of-mass shift callback This function is called by the SDK when the CoM of one of the actors is moved. Since the API specifies constraint positions relative to actors, and the constraint shader functions are supplied with coordinates relative to bodies, some synchronization is usually required when the application moves an object's center of mass. */ virtual void onComShift(PxU32 actor) = 0; /** \brief Origin shift callback This function is called by the SDK when the scene origin gets shifted and allows to adjust custom data which contains world space transforms. \note If the adjustments affect constraint shader data, it is necessary to call PxConstraint::markDirty() to make sure that the data gets synced at the beginning of the next simulation step. \param[in] shift Translation vector the origin is shifted by. @see PxScene.shiftOrigin() */ virtual void onOriginShift(const PxVec3& shift) = 0; /** \brief Fetches external data for a constraint. This function is used by the SDK to acquire a reference to the owner of a constraint and a unique owner type ID. This information will be passed on when a breakable constraint breaks or when #PxConstraint::getExternalReference() is called. \param[out] typeID Unique type identifier of the external object. The value 0xffffffff is reserved and should not be used. Furthermore, if the PhysX extensions library is used, some other IDs are reserved already (see PxConstraintExtIDs) \return Reference to the external object which owns the constraint. @see PxConstraintInfo PxSimulationEventCallback.onConstraintBreak() */ virtual void* getExternalReference(PxU32& typeID) = 0; /** \brief Obtain a reference to a PxBase interface if the constraint has one. If the constraint does not implement the PxBase interface, it should return NULL. */ virtual PxBase* getSerializable() = 0; /** \brief Obtain the shader function pointer used to prep rows for this constraint */ virtual PxConstraintSolverPrep getPrep() const = 0; /** \brief Obtain the pointer to the constraint's constant data */ virtual const void* getConstantBlock() const = 0; /** \brief Let the connector know it has been connected to a constraint. */ virtual void connectToConstraint(PxConstraint*) {} /** \brief virtual destructor */ virtual ~PxConstraintConnector() {} }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
17,167
C
38.832947
238
0.759189
NVIDIA-Omniverse/PhysX/physx/include/PxForceMode.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_FORCE_MODE_H #define PX_FORCE_MODE_H #include "foundation/PxPreprocessor.h" /** \addtogroup physics @{ */ #if !PX_DOXYGEN namespace physx { #endif /** \brief Parameter to addForce() and addTorque() calls, determines the exact operation that is carried out. @see PxRigidBody.addForce() PxRigidBody.addTorque() */ struct PxForceMode { enum Enum { eFORCE, //!< parameter has unit of mass * length / time^2, i.e., a force eIMPULSE, //!< parameter has unit of mass * length / time, i.e., force * time eVELOCITY_CHANGE, //!< parameter has unit of length / time, i.e., the effect is mass independent: a velocity change. eACCELERATION //!< parameter has unit of length/ time^2, i.e., an acceleration. It gets treated just like a force except the mass is not divided out before integration. }; }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
2,582
C
38.738461
171
0.74206
NVIDIA-Omniverse/PhysX/physx/include/PxArticulationFlag.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_ARTICULATION_FLAG_H #define PX_ARTICULATION_FLAG_H #include "PxPhysXConfig.h" #include "foundation/PxFlags.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief A description of the types of articulation data that may be directly written to and read from the GPU using the functions PxScene::copyArticulationData() and PxScene::applyArticulationData(). Types that are read-only may only be used in conjunction with PxScene::copyArticulationData(). Types that are write-only may only be used in conjunction with PxScene::applyArticulationData(). A subset of data types may be used in conjunction with both PxScene::applyArticulationData() and PxScene::applyArticulationData(). @see PxArticulationCache, PxScene::copyArticulationData(), PxScene::applyArticulationData() */ class PxArticulationGpuDataType { public: enum Enum { eJOINT_POSITION = 0, //!< The joint positions, read and write, see PxScene::copyArticulationData(), PxScene::applyArticulationData() eJOINT_VELOCITY, //!< The joint velocities, read and write, see PxScene::copyArticulationData(), PxScene::applyArticulationData() eJOINT_ACCELERATION, //!< The joint accelerations, read only, see PxScene::copyArticulationData() eJOINT_FORCE, //!< The applied joint forces, write only, see PxScene::applyArticulationData() eJOINT_SOLVER_FORCE, //!< @deprecated The computed joint constraint solver forces, read only, see PxScene::copyArticulationData() eJOINT_TARGET_VELOCITY, //!< The velocity targets for the joint drives, write only, see PxScene::applyArticulationData() eJOINT_TARGET_POSITION, //!< The position targets for the joint drives, write only, see PxScene::applyArticulationData() eSENSOR_FORCE, //!< @deprecated The spatial sensor forces, read only, see PxScene::copyArticulationData() eROOT_TRANSFORM, //!< The root link transform, read and write, see PxScene::copyArticulationData(), PxScene::applyArticulationData() eROOT_VELOCITY, //!< The root link velocity, read and write, see PxScene::copyArticulationData(), PxScene::applyArticulationData() eLINK_TRANSFORM, //!< The link transforms including root link, read only, see PxScene::copyArticulationData() eLINK_VELOCITY, //!< The link velocities including root link, read only, see PxScene::copyArticulationData() eLINK_ACCELERATION, //!< The link accelerations including root link, read only, see PxScene::copyArticulationData() eLINK_INCOMING_JOINT_FORCE, //!< The link incoming joint forces including root link, read only, see PxScene::copyArticulationData() eLINK_FORCE, //!< The forces to apply to links, write only, see PxScene::applyArticulationData() eLINK_TORQUE, //!< The torques to apply to links, write only, see PxScene::applyArticulationData() eFIXED_TENDON, //!< Fixed tendon data, write only, see PxScene::applyArticulationData() eFIXED_TENDON_JOINT, //!< Fixed tendon joint data, write only, see PxScene::applyArticulationData() eSPATIAL_TENDON, //!< Spatial tendon data, write only, see PxScene::applyArticulationData() eSPATIAL_TENDON_ATTACHMENT //!< Spatial tendon attachment data, write only, see PxScene::applyArticulationData() }; }; /** \brief These flags determine what data is read or written to the internal articulation data via cache. @see PxArticulationCache PxArticulationReducedCoordinate::copyInternalStateToCache PxArticulationReducedCoordinate::applyCache */ class PxArticulationCacheFlag { public: enum Enum { eVELOCITY = (1 << 0), //!< The joint velocities, see PxArticulationCache::jointVelocity. eACCELERATION = (1 << 1), //!< The joint accelerations, see PxArticulationCache::jointAcceleration. ePOSITION = (1 << 2), //!< The joint positions, see PxArticulationCache::jointPosition. eFORCE = (1 << 3), //!< The joint forces, see PxArticulationCache::jointForce. eLINK_VELOCITY = (1 << 4), //!< The link velocities, see PxArticulationCache::linkVelocity. eLINK_ACCELERATION = (1 << 5), //!< The link accelerations, see PxArticulationCache::linkAcceleration. eROOT_TRANSFORM = (1 << 6), //!< Root link transform, see PxArticulationCache::rootLinkData. eROOT_VELOCITIES = (1 << 7), //!< Root link velocities (read/write) and accelerations (read), see PxArticulationCache::rootLinkData. eSENSOR_FORCES = (1 << 8), //!< @deprecated The spatial sensor forces, see PxArticulationCache::sensorForces. eJOINT_SOLVER_FORCES = (1 << 9), //!< @deprecated Solver constraint joint forces, see PxArticulationCache::jointSolverForces. eLINK_INCOMING_JOINT_FORCE = (1 << 10), //!< Link incoming joint forces, see PxArticulationCache::linkIncomingJointForce. eJOINT_TARGET_POSITIONS = (1 << 11), //!< The joint target positions, see PxArticulationCache::jointTargetPositions. eJOINT_TARGET_VELOCITIES = (1 << 12), //!< The joint target velocities, see PxArticulationCache::jointTargetVelocities. eALL = (eVELOCITY | eACCELERATION | ePOSITION | eLINK_VELOCITY | eLINK_ACCELERATION | eROOT_TRANSFORM | eROOT_VELOCITIES) }; }; typedef PxFlags<PxArticulationCacheFlag::Enum, PxU32> PxArticulationCacheFlags; PX_FLAGS_OPERATORS(PxArticulationCacheFlag::Enum, PxU32) #if !PX_DOXYGEN } #endif #endif
6,977
C
61.303571
138
0.748316
NVIDIA-Omniverse/PhysX/physx/include/PxPBDMaterial.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_PBD_MATERIAL_H #define PX_PBD_MATERIAL_H /** \addtogroup physics @{ */ #include "PxParticleMaterial.h" #if !PX_DOXYGEN namespace physx { #endif class PxScene; /** \brief Material class to represent a set of PBD particle material properties. @see #PxPhysics.createPBDMaterial */ class PxPBDMaterial : public PxParticleMaterial { public: /** \brief Sets viscosity \param[in] viscosity Viscosity. <b>Range:</b> [0, PX_MAX_F32) @see #getViscosity() */ virtual void setViscosity(PxReal viscosity) = 0; /** \brief Retrieves the viscosity value. \return The viscosity value. @see #setViscosity() */ virtual PxReal getViscosity() const = 0; /** \brief Sets material vorticity confinement coefficient \param[in] vorticityConfinement Material vorticity confinement coefficient. <b>Range:</b> [0, PX_MAX_F32) @see #getVorticityConfinement() */ virtual void setVorticityConfinement(PxReal vorticityConfinement) = 0; /** \brief Retrieves the vorticity confinement coefficient. \return The vorticity confinement coefficient. @see #setVorticityConfinement() */ virtual PxReal getVorticityConfinement() const = 0; /** \brief Sets material surface tension coefficient \param[in] surfaceTension Material surface tension coefficient. <b>Range:</b> [0, PX_MAX_F32) @see #getSurfaceTension() */ virtual void setSurfaceTension(PxReal surfaceTension) = 0; /** \brief Retrieves the surface tension coefficient. \return The surface tension coefficient. @see #setSurfaceTension() */ virtual PxReal getSurfaceTension() const = 0; /** \brief Sets material cohesion coefficient \param[in] cohesion Material cohesion coefficient. <b>Range:</b> [0, PX_MAX_F32) @see #getCohesion() */ virtual void setCohesion(PxReal cohesion) = 0; /** \brief Retrieves the cohesion coefficient. \return The cohesion coefficient. @see #setCohesion() */ virtual PxReal getCohesion() const = 0; /** \brief Sets material lift coefficient \param[in] lift Material lift coefficient. <b>Range:</b> [0, PX_MAX_F32) @see #getLift() */ virtual void setLift(PxReal lift) = 0; /** \brief Retrieves the lift coefficient. \return The lift coefficient. @see #setLift() */ virtual PxReal getLift() const = 0; /** \brief Sets material drag coefficient \param[in] drag Material drag coefficient. <b>Range:</b> [0, PX_MAX_F32) @see #getDrag() */ virtual void setDrag(PxReal drag) = 0; /** \brief Retrieves the drag coefficient. \return The drag coefficient. @see #setDrag() */ virtual PxReal getDrag() const = 0; /** \brief Sets the CFL coefficient. \param[in] coefficient CFL coefficient. This coefficient scales the CFL term used to limit relative motion between fluid particles. <b>Range:</b> [1.f, PX_MAX_F32) */ virtual void setCFLCoefficient(PxReal coefficient) = 0; /** \brief Retrieves the CFL coefficient. \return The CFL coefficient. @see #setCFLCoefficient() */ virtual PxReal getCFLCoefficient() const = 0; /** \brief Sets material particle friction scale. This allows the application to scale up/down the frictional effect between particles independent of the friction coefficient, which also defines frictional behavior between the particle and rigid bodies/soft bodies/cloth etc. \param[in] scale particle friction scale. <b>Range:</b> [0, PX_MAX_F32) @see #getParticleFrictionScale() */ virtual void setParticleFrictionScale(PxReal scale) = 0; /** \brief Retrieves the particle friction scale. \return The particle friction scale. @see #setParticleFrictionScale() */ virtual PxReal getParticleFrictionScale() const = 0; /** \brief Sets material particle adhesion scale value. This is the adhesive value between particles defined as a scaled multiple of the adhesion parameter. \param[in] adhesion particle adhesion scale value. <b>Range:</b> [0, PX_MAX_F32) @see #getParticleAdhesionScale() */ virtual void setParticleAdhesionScale(PxReal adhesion) = 0; /** \brief Retrieves the particle adhesion scale value. \return The particle adhesion scale value. @see #setParticleAdhesionScale() */ virtual PxReal getParticleAdhesionScale() const = 0; virtual const char* getConcreteTypeName() const { return "PxPBDMaterial"; } protected: PX_INLINE PxPBDMaterial(PxType concreteType, PxBaseFlags baseFlags) : PxParticleMaterial(concreteType, baseFlags) {} PX_INLINE PxPBDMaterial(PxBaseFlags baseFlags) : PxParticleMaterial(baseFlags) {} virtual ~PxPBDMaterial() {} virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxPBDMaterial", PxParticleMaterial); } }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
6,508
C
28.721461
165
0.725722
NVIDIA-Omniverse/PhysX/physx/include/PxPruningStructure.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_PRUNING_STRUCTURE_H #define PX_PRUNING_STRUCTURE_H /** \addtogroup physics @{ */ #include "PxPhysXConfig.h" #include "common/PxBase.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief A precomputed pruning structure to accelerate scene queries against newly added actors. The pruning structure can be provided to #PxScene:: addActors() in which case it will get merged directly into the scene query optimization AABB tree, thus leading to improved performance when doing queries against the newly added actors. This applies to both static and dynamic actors. \note PxPruningStructure objects can be added to a collection and get serialized. \note Adding a PxPruningStructure object to a collection will also add the actors that were used to build the pruning structure. \note PxPruningStructure must be released before its rigid actors. \note PxRigidBody objects can be in one PxPruningStructure only. \note Changing the bounds of PxRigidBody objects assigned to a pruning structure that has not been added to a scene yet will invalidate the pruning structure. Same happens if shape scene query flags change or shape gets removed from an actor. @see PxScene::addActors PxCollection */ class PxPruningStructure : public PxBase { public: /** \brief Release this object. */ virtual void release() = 0; /** \brief Retrieve rigid actors in the pruning structure. You can retrieve the number of rigid actor pointers by calling #getNbRigidActors() \param[out] userBuffer The buffer to store the actor pointers. \param[in] bufferSize Size of provided user buffer. \param[in] startIndex Index of first actor pointer to be retrieved \return Number of rigid actor pointers written to the buffer. @see PxRigidActor */ virtual PxU32 getRigidActors(PxRigidActor** userBuffer, PxU32 bufferSize, PxU32 startIndex=0) const = 0; /** \brief Returns the number of rigid actors in the pruning structure. You can use #getRigidActors() to retrieve the rigid actor pointers. \return Number of rigid actors in the pruning structure. @see PxRigidActor */ virtual PxU32 getNbRigidActors() const = 0; /** \brief Gets the merge data for static actors This is mainly called by the PxSceneQuerySystem::merge() function to merge a PxPruningStructure with the internal data-structures of the scene-query system. \return Implementation-dependent merge data for static actors. @see PxSceneQuerySystem::merge() */ virtual const void* getStaticMergeData() const = 0; /** \brief Gets the merge data for dynamic actors This is mainly called by the PxSceneQuerySystem::merge() function to merge a PxPruningStructure with the internal data-structures of the scene-query system. \return Implementation-dependent merge data for dynamic actors. @see PxSceneQuerySystem::merge() */ virtual const void* getDynamicMergeData() const = 0; virtual const char* getConcreteTypeName() const { return "PxPruningStructure"; } protected: PX_INLINE PxPruningStructure(PxType concreteType, PxBaseFlags baseFlags) : PxBase(concreteType, baseFlags) {} PX_INLINE PxPruningStructure(PxBaseFlags baseFlags) : PxBase(baseFlags) {} virtual ~PxPruningStructure() {} virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxPruningStructure", PxBase); } }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
5,082
C
37.218045
128
0.766234
NVIDIA-Omniverse/PhysX/physx/include/PxRigidActor.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_RIGID_ACTOR_H #define PX_RIGID_ACTOR_H /** \addtogroup physics @{ */ #include "PxActor.h" #include "PxShape.h" #if !PX_DOXYGEN namespace physx { #endif class PxConstraint; /** \brief PxRigidActor represents a base class shared between dynamic and static rigid bodies in the physics SDK. PxRigidActor objects specify the geometry of the object by defining a set of attached shapes (see #PxShape). @see PxActor */ class PxRigidActor : public PxActor { public: /** \brief Deletes the rigid actor object. Also releases any shapes associated with the actor. Releasing an actor will affect any objects that are connected to the actor (constraint shaders like joints etc.). Such connected objects will be deleted upon scene deletion, or explicitly by the user by calling release() on these objects. It is recommended to always remove all objects that reference actors before the actors themselves are removed. It is not possible to retrieve list of dead connected objects. <b>Sleeping:</b> This call will awaken any sleeping actors contacting the deleted actor (directly or indirectly). Calls #PxActor::release() so you might want to check the documentation of that method as well. @see PxActor::release() */ virtual void release() = 0; /** \brief Returns the internal actor index. \warning This is only defined for actors that have been added to a scene. \return The internal actor index, or 0xffffffff if the actor is not part of a scene. */ virtual PxU32 getInternalActorIndex() const = 0; /************************************************************************************************/ /** @name Global Pose Manipulation */ /** \brief Retrieves the actors world space transform. The getGlobalPose() method retrieves the actor's current actor space to world space transformation. \note It is not allowed to use this method while the simulation is running (except during PxScene::collide(), in PxContactModifyCallback or in contact report callbacks). \return Global pose of object. @see PxRigidDynamic.setGlobalPose() PxRigidStatic.setGlobalPose() */ virtual PxTransform getGlobalPose() const = 0; /** \brief Method for setting an actor's pose in the world. This method instantaneously changes the actor space to world space transformation. This method is mainly for dynamic rigid bodies (see #PxRigidDynamic). Calling this method on static actors is likely to result in a performance penalty, since internal optimization structures for static actors may need to be recomputed. In addition, moving static actors will not interact correctly with dynamic actors or joints. To directly control an actor's position and have it correctly interact with dynamic bodies and joints, create a dynamic body with the PxRigidBodyFlag::eKINEMATIC flag, then use the setKinematicTarget() commands to define its path. Even when moving dynamic actors, exercise restraint in making use of this method. Where possible, avoid: \li moving actors into other actors, thus causing overlap (an invalid physical state) \li moving an actor that is connected by a joint to another away from the other (thus causing joint error) \note It is not allowed to use this method if the actor is part of a #PxPruningStructure that has not been added to a scene yet. <b>Sleeping:</b> This call wakes dynamic actors if they are sleeping and the autowake parameter is true (default). \param[in] pose Transformation from the actors local frame to the global frame. <b>Range:</b> rigid body transform. \param[in] autowake whether to wake the object if it is dynamic. This parameter has no effect for static or kinematic actors. If true and the current wake counter value is smaller than #PxSceneDesc::wakeCounterResetValue it will get increased to the reset value. @see getGlobalPose() */ virtual void setGlobalPose(const PxTransform& pose, bool autowake = true) = 0; /************************************************************************************************/ /** @name Shapes */ /** \brief Attach a shape to an actor This call will increment the reference count of the shape. \note Mass properties of dynamic rigid actors will not automatically be recomputed to reflect the new mass distribution implied by the shape. Follow this call with a call to the PhysX extensions method #PxRigidBodyExt::updateMassAndInertia() to do that. Attaching a triangle mesh, heightfield or plane geometry shape configured as eSIMULATION_SHAPE is not supported for non-kinematic PxRigidDynamic instances. <b>Sleeping:</b> Does <b>NOT</b> wake the actor up automatically. \param[in] shape the shape to attach. \return True if success. */ virtual bool attachShape(PxShape& shape) = 0; /** \brief Detach a shape from an actor. This will also decrement the reference count of the PxShape, and if the reference count is zero, will cause it to be deleted. <b>Sleeping:</b> Does <b>NOT</b> wake the actor up automatically. \param[in] shape the shape to detach. \param[in] wakeOnLostTouch Specifies whether touching objects from the previous frame should get woken up in the next frame. Only applies to PxArticulationReducedCoordinate and PxRigidActor types. */ virtual void detachShape(PxShape& shape, bool wakeOnLostTouch = true) = 0; /** \brief Returns the number of shapes assigned to the actor. You can use #getShapes() to retrieve the shape pointers. \return Number of shapes associated with this actor. @see PxShape getShapes() */ virtual PxU32 getNbShapes() const = 0; /** \brief Retrieve all the shape pointers belonging to the actor. These are the shapes used by the actor for collision detection. You can retrieve the number of shape pointers by calling #getNbShapes() Note: Removing shapes with #PxShape::release() will invalidate the pointer of the released shape. \param[out] userBuffer The buffer to store the shape pointers. \param[in] bufferSize Size of provided user buffer. \param[in] startIndex Index of first shape pointer to be retrieved \return Number of shape pointers written to the buffer. @see PxShape getNbShapes() PxShape::release() */ virtual PxU32 getShapes(PxShape** userBuffer, PxU32 bufferSize, PxU32 startIndex=0) const = 0; /************************************************************************************************/ /** @name Constraints */ /** \brief Returns the number of constraint shaders attached to the actor. You can use #getConstraints() to retrieve the constraint shader pointers. \return Number of constraint shaders attached to this actor. @see PxConstraint getConstraints() */ virtual PxU32 getNbConstraints() const = 0; /** \brief Retrieve all the constraint shader pointers belonging to the actor. You can retrieve the number of constraint shader pointers by calling #getNbConstraints() Note: Removing constraint shaders with #PxConstraint::release() will invalidate the pointer of the released constraint. \param[out] userBuffer The buffer to store the constraint shader pointers. \param[in] bufferSize Size of provided user buffer. \param[in] startIndex Index of first constraint pointer to be retrieved \return Number of constraint shader pointers written to the buffer. @see PxConstraint getNbConstraints() PxConstraint::release() */ virtual PxU32 getConstraints(PxConstraint** userBuffer, PxU32 bufferSize, PxU32 startIndex=0) const = 0; protected: PX_INLINE PxRigidActor(PxType concreteType, PxBaseFlags baseFlags) : PxActor(concreteType, baseFlags) {} PX_INLINE PxRigidActor(PxBaseFlags baseFlags) : PxActor(baseFlags) {} virtual ~PxRigidActor() {} virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxRigidActor", PxActor); } }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
9,550
C
38.962343
263
0.739581
NVIDIA-Omniverse/PhysX/physx/include/PxAnisotropy.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_ANISOTROPY_H #define PX_ANISOTROPY_H /** \addtogroup extensions @{ */ #include "cudamanager/PxCudaContext.h" #include "cudamanager/PxCudaContextManager.h" #include "foundation/PxSimpleTypes.h" #include "foundation/PxVec4.h" #include "PxParticleSystem.h" #include "foundation/PxArray.h" #include "PxParticleGpu.h" #if !PX_DOXYGEN namespace physx { #endif #if PX_SUPPORT_GPU_PHYSX class PxgKernelLauncher; class PxParticleNeighborhoodProvider; /** \brief Computes anisotropy information for a particle system to improve rendering quality */ class PxAnisotropyGenerator { public: /** \brief Schedules the compuation of anisotropy information on the specified cuda stream \param[in] gpuParticleSystem A gpu pointer to access particle system data \param[in] numParticles The number of particles \param[in] stream The stream on which the cuda call gets scheduled */ virtual void generateAnisotropy(PxGpuParticleSystem* gpuParticleSystem, PxU32 numParticles, CUstream stream) = 0; /** \brief Schedules the compuation of anisotropy information on the specified cuda stream \param[in] particlePositionsGpu A gpu pointer containing the particle positions \param[in] neighborhoodProvider A neighborhood provider object that supports fast neighborhood queries \param[in] numParticles The number of particles \param[in] particleContactOffset The particle contact offset \param[in] stream The stream on which the cuda call gets scheduled */ virtual void generateAnisotropy(PxVec4* particlePositionsGpu, PxParticleNeighborhoodProvider& neighborhoodProvider, PxU32 numParticles, PxReal particleContactOffset, CUstream stream) = 0; /** \brief Set a host buffer that holds the anisotropy data after the timestep completed \param[in] anisotropy1 A host buffer holding the first row of the anisotropy matrix with memory for all particles already allocated \param[in] anisotropy2 A host buffer holding the second row of the anisotropy matrix with memory for all particles already allocated \param[in] anisotropy3 A host buffer holding the third row of the anisotropy matrix with memory for all particles already allocated */ virtual void setResultBufferHost(PxVec4* anisotropy1, PxVec4* anisotropy2, PxVec4* anisotropy3) = 0; /** \brief Set a device buffer that holds the anisotrpy data after the timestep completed \param[in] anisotropy1 A device buffer holding the first row of the anisotropy matrix with memory for all particles already allocated \param[in] anisotropy2 A device buffer holding the second row of the anisotropy matrix with memory for all particles already allocated \param[in] anisotropy3 A device buffer holding the third row of the anisotropy matrix with memory for all particles already allocated */ virtual void setResultBufferDevice(PxVec4* anisotropy1, PxVec4* anisotropy2, PxVec4* anisotropy3) = 0; /** \brief Sets the maximum value anisotropy can reach in any direction \param[in] maxAnisotropy The maximum anisotropy value */ virtual void setAnisotropyMax(float maxAnisotropy) = 0; /** \brief Sets the minimum value anisotropy can reach in any direction \param[in] minAnisotropy The minimum anisotropy value */ virtual void setAnisotropyMin(float minAnisotropy) = 0; /** \brief Sets the anisotropy scale \param[in] anisotropyScale The anisotropy scale */ virtual void setAnisotropyScale(float anisotropyScale) = 0; /** \brief Gets the maximal number of particles \return The maximal number of particles */ virtual PxU32 getMaxParticles() const = 0; /** \brief Sets the maximal number of particles \param[in] maxParticles The maximal number of particles */ virtual void setMaxParticles(PxU32 maxParticles) = 0; /** \brief Gets the device pointer for the anisotropy in x direction. Only available after calling setResultBufferHost or setResultBufferDevice \return The device pointer for the anisotropy x direction and scale (w component contains the scale) */ virtual PxVec4* getAnisotropy1DevicePointer() const = 0; /** \brief Gets the device pointer for the anisotropy in y direction. Only available after calling setResultBufferHost or setResultBufferDevice \return The device pointer for the anisotropy y direction and scale (w component contains the scale) */ virtual PxVec4* getAnisotropy2DevicePointer() const = 0; /** \brief Gets the device pointer for the anisotropy in z direction. Only available after calling setResultBufferHost or setResultBufferDevice \return The device pointer for the anisotropy z direction and scale (w component contains the scale) */ virtual PxVec4* getAnisotropy3DevicePointer() const = 0; /** \brief Enables or disables the anisotropy generator \param[in] enabled The boolean to set the generator to enabled or disabled */ virtual void setEnabled(bool enabled) = 0; /** \brief Allows to query if the anisotropy generator is enabled \return True if enabled, false otherwise */ virtual bool isEnabled() const = 0; /** \brief Releases the instance and its data */ virtual void release() = 0; /** \brief Destructor */ virtual ~PxAnisotropyGenerator() {} }; /** \brief Default implementation of a particle system callback to trigger anisotropy calculations. A call to fetchResultsParticleSystem() on the PxScene will synchronize the work such that the caller knows that the post solve task completed. */ class PxAnisotropyCallback : public PxParticleSystemCallback { public: /** \brief Initializes the anisotropy callback \param[in] anistropyGenerator The anisotropy generator */ void initialize(PxAnisotropyGenerator* anistropyGenerator) { mAnistropyGenerator = anistropyGenerator; } virtual void onPostSolve(const PxGpuMirroredPointer<PxGpuParticleSystem>& gpuParticleSystem, CUstream stream) { if (mAnistropyGenerator) { mAnistropyGenerator->generateAnisotropy(gpuParticleSystem.mDevicePtr, gpuParticleSystem.mHostPtr->mCommonData.mMaxParticles, stream); } } virtual void onBegin(const PxGpuMirroredPointer<PxGpuParticleSystem>& /*gpuParticleSystem*/, CUstream /*stream*/) { } virtual void onAdvance(const PxGpuMirroredPointer<PxGpuParticleSystem>& /*gpuParticleSystem*/, CUstream /*stream*/) { } private: PxAnisotropyGenerator* mAnistropyGenerator; }; #endif #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
8,159
C
35.591928
189
0.769089
NVIDIA-Omniverse/PhysX/physx/include/PxArrayConverter.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_ARRAY_CONVERTER_H #define PX_ARRAY_CONVERTER_H #include "cudamanager/PxCudaContext.h" #include "cudamanager/PxCudaContextManager.h" #include "foundation/PxSimpleTypes.h" #include "foundation/PxVec4.h" #if !PX_DOXYGEN namespace physx { #endif #if PX_SUPPORT_GPU_PHYSX /** \brief Utility class to convert gpu arrays to a different memory layout */ class PxArrayConverter { public: /** \brief Helper function to merge two separate PxVec4 arrays into one interleaved PxVec3 array \param[in] verticesD The vertices device memory buffer \param[in] normalsD The normals device memory buffer \param[in] length The number of vertices and normals \param[out] interleavedResultBufferD The resulting interleaved buffer containing 2*length elements with the format vertex0, normal0, vertex1, normal1... \param[in] stream The cuda stream on which the conversion is processed */ virtual void interleaveGpuBuffers(const PxVec4* verticesD, const PxVec4* normalsD, PxU32 length, PxVec3* interleavedResultBufferD, CUstream stream) = 0; /** \brief Helper function to convert the hair system's strand representation to a line list. The conversion is done on the GPU. \param[in] verticesD The strand vertices device memory buffer \param[in] numVertices The total number of vertices \param[in] strandPastEndIndicesD One index per strand (device memory array) to find out where the next strand starts \param[in] numStrands the number of strands \param[out] resultD A device memory buffer with 2*numVertices capacity describing line segment where line i extends from result[2*i] to result[2*i+1] \param[in] stream The cuda stream on which the conversion is processed */ virtual void extractLinesFromStrands(const PxVec4* verticesD, PxU32 numVertices, const PxU32* strandPastEndIndicesD, PxU32 numStrands, PxVec4* resultD, CUstream stream) = 0; /** \brief Helper function to convert the hair system's strand representation to a line list. The conversion is done on the GPU. \param[in] verticesD The strand vertices device memory buffer \param[in] numVertices The total number of vertices \param[in] strandPastEndIndicesD One index per strand (device memory array) to find out where the next strand starts \param[in] numStrands the number of strands \param[out] resultD A device memory buffer with 2*numVertices capacity describing line segment where line i extends from result[2*i] to result[2*i+1] \param[in] stream The cuda stream on which the conversion is processed */ virtual void extractLinesFromStrands(const PxVec3* verticesD, PxU32 numVertices, const PxU32* strandPastEndIndicesD, PxU32 numStrands, PxVec3* resultD, CUstream stream) = 0; /** \brief Destructor */ virtual ~PxArrayConverter() {} }; #endif #if !PX_DOXYGEN } // namespace physx #endif #endif
4,542
C
44.888888
154
0.768384
NVIDIA-Omniverse/PhysX/physx/include/PxArticulationReducedCoordinate.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_ARTICULATION_RC_H #define PX_ARTICULATION_RC_H /** \addtogroup physics @{ */ #include "PxPhysXConfig.h" #include "common/PxBase.h" #include "foundation/PxVec3.h" #include "foundation/PxTransform.h" #include "solver/PxSolverDefs.h" #include "PxArticulationFlag.h" #include "PxArticulationTendon.h" #include "PxArticulationFlag.h" #if !PX_DOXYGEN namespace physx { #endif PX_ALIGN_PREFIX(16) /** \brief Data structure to represent spatial forces. */ struct PxSpatialForce { PxVec3 force; PxReal pad0; PxVec3 torque; PxReal pad1; } PX_ALIGN_SUFFIX(16); PX_ALIGN_PREFIX(16) /** \brief Data structure to represent spatial velocities. */ struct PxSpatialVelocity { PxVec3 linear; PxReal pad0; PxVec3 angular; PxReal pad1; } PX_ALIGN_SUFFIX(16); class PxConstraint; class PxScene; /** \brief Data structure used to access the root link state and acceleration. @see PxArticulationCache */ struct PxArticulationRootLinkData { PxTransform transform; //!< Actor transform // The velocities and accelerations below are with respect to the center of mass (COM) of the root link. The COM and actor frame origin may not coincide. PxVec3 worldLinVel; //!< Link linear velocity PxVec3 worldAngVel; //!< Link angular velocity PxVec3 worldLinAccel; //!< Link classical linear acceleration PxVec3 worldAngAccel; //!< Link angular acceleration }; /** \brief Data structure used to read and write internal articulation data. @see PxArticulationCacheFlag, PxArticulationReducedCoordinate::createCache, PxArticulationReducedCoordinate::applyCache, PxArticulationReducedCoordinate::copyInternalStateToCache */ class PxArticulationCache { public: PxArticulationCache() : externalForces (NULL), denseJacobian (NULL), massMatrix (NULL), jointVelocity (NULL), jointAcceleration (NULL), jointPosition (NULL), jointForce (NULL), jointSolverForces (NULL), jointTargetPositions (NULL), jointTargetVelocities (NULL), linkVelocity (NULL), linkAcceleration (NULL), linkIncomingJointForce (NULL), rootLinkData (NULL), sensorForces (NULL), coefficientMatrix (NULL), lambda (NULL), scratchMemory (NULL), scratchAllocator (NULL), version (0) {} /** \brief Releases an articulation cache. @see PxArticulationReducedCoordinate::createCache, PxArticulationReducedCoordinate::applyCache, PxArticulationReducedCoordinate::copyInternalStateToCache */ PX_PHYSX_CORE_API void release(); /** \brief External forces acting on the articulation links for inverse dynamics computation. - N = getNbLinks(). - Indexing follows the low-level link indices, see PxArticulationLink::getLinkIndex. - The forces are with respect to the center of mass of the link. @see PxArticulationReducedCoordinate::computeGeneralizedExternalForce */ PxSpatialForce* externalForces; /** \brief Dense Jacobian data. - N = nbRows * nbCols = (6 * getNbLinks()) * (6 + getDofs()) -> size includes possible floating-base DOFs regardless of PxArticulationFlag::eFIX_BASE flag. - The links, i.e. rows are in order of the low-level link indices (minus one if PxArticulationFlag::eFIX_BASE is true), see PxArticulationLink::getLinkIndex. The corresponding spatial velocities are stacked [vx; vy; vz; wx; wy; wz], where vx and wx refer to the linear and rotational velocity in world X. - The DOFs, i.e. column indices correspond to the low-level DOF indices, see PxArticulationCache::jointVelocity. @see PxArticulationReducedCoordinate::computeDenseJacobian */ PxReal* denseJacobian; /** \brief The generalized mass matrix that maps joint accelerations to joint forces. - N = getDofs() * getDofs(). - The indexing follows the internal DOF index order, see PxArticulationCache::jointVelocity. @see PxArticulationReducedCoordinate::computeGeneralizedMassMatrix */ PxReal* massMatrix; /** \brief The articulation joint DOF velocities. - N = getDofs(). - Read/write using PxArticulationCacheFlag::eVELOCITY. - The indexing follows the internal DOF index order. Therefore, the application should calculate the DOF data indices by summing the joint DOFs in the order of the links' low-level indices (see the manual Section "Cache Indexing" for a snippet for this calculation): \verbatim Low-level link index: | link 0 | link 1 | link 2 | link 3 | ... | <- PxArticulationLink::getLinkIndex() \endverbatim \verbatim Link inbound joint DOF: | 0 | 1 | 2 | 1 | ... | <- PxArticulationLink::getInboundJointDof() \endverbatim \verbatim Low-level DOF index: | - | 0 | 1, 2 | 3 | ... | \endverbatim The root link always has low-level index 0 and always has zero inbound joint DOFs. The link DOF indexing follows the order in PxArticulationAxis::Enum. For example, assume that low-level link 2 has an inbound spherical joint with two DOFs: eSWING1 and eSWING2. The corresponding low-level joint DOF indices are therefore 1 for eSWING1 and 2 for eSWING2. */ PxReal* jointVelocity; /** \brief The articulation joint DOF accelerations. - N = getDofs(). - Read using PxArticulationCacheFlag::eACCELERATION. - The indexing follows the internal DOF index order, see PxArticulationCache::jointVelocity. - Delta joint DOF velocities can be computed from acceleration * dt. */ PxReal* jointAcceleration; /** \brief The articulation joint DOF positions. - N = getDofs(). - Read/write using PxArticulationCacheFlag::ePOSITION. - The indexing follows the internal DOF index order, see PxArticulationCache::jointVelocity. - For spherical joints, the joint position for each axis on the joint must be in range [-Pi, Pi]. */ PxReal* jointPosition; /** \brief The articulation joint DOF forces. - N = getDofs(). - Read/Write using PxArticulationCacheFlag::eFORCE. - The indexing follows the internal DOF index order, see PxArticulationCache::jointVelocity. - Applied joint forces persist and are applied each frame until changed. */ PxReal* jointForce; /** @deprecated Use linkIncomingJointForce instead. \brief Solver constraint joint DOF forces. - N = getDofs(). - Read using PxArticulationCacheFlag::eJOINT_SOLVER_FORCES. - The indexing follows the internal DOF index order, see PxArticulationCache::jointVelocity. - Raise PxArticulationFlag::eCOMPUTE_JOINT_FORCES to enable reading the solver forces. */ PX_DEPRECATED PxReal* jointSolverForces; /** \brief The articulation joint drive target positions. - N = getDofs(). - Write using PxArticulationCacheFlag::eJOINT_TARGET_POSITIONS. - The indexing follows the internal DOF index order, see PxArticulationCache::jointVelocity. */ PxReal* jointTargetPositions; /** \brief The articulation joint drive target velocities. - N = getDofs(). - Write using PxArticulationCacheFlag::eJOINT_TARGET_VELOCITIES. - The indexing follows the internal DOF index order, see PxArticulationCache::jointVelocity. */ PxReal* jointTargetVelocities; /** \brief Link spatial velocity. - N = getNbLinks(). - Read using PxArticulationCacheFlag::eLINK_VELOCITY. - The indexing follows the internal link indexing, see PxArticulationLink::getLinkIndex. - The velocity is with respect to the link's center of mass. @see PxRigidBody::getCMassLocalPose */ PxSpatialVelocity* linkVelocity; /** \brief Link classical acceleration. - N = getNbLinks(). - Read using PxArticulationCacheFlag::eLINK_ACCELERATION. - The indexing follows the internal link indexing, see PxArticulationLink::getLinkIndex. - The acceleration is with respect to the link's center of mass. @see PxArticulationReducedCoordinate::getLinkAcceleration, PxRigidBody::getCMassLocalPose */ PxSpatialVelocity* linkAcceleration; /** \brief Link incoming joint force, i.e. the total force transmitted from the parent link to this link. - N = getNbLinks(). - Read using PxArticulationCacheFlag::eLINK_INCOMING_JOINT_FORCE. - The indexing follows the internal link indexing, see PxArticulationLink::getLinkIndex. - The force is reported in the child joint frame of the link's incoming joint. @see PxArticulationJointReducedCoordinate::getChildPose \note The root link reports a zero spatial force. */ PxSpatialForce* linkIncomingJointForce; /** \brief Root link transform, velocities, and accelerations. - N = 1. - Read/write using PxArticulationCacheFlag::eROOT_TRANSFORM and PxArticulationCacheFlag::eROOT_VELOCITIES (accelerations are read-only). @see PxArticulationRootLinkData */ PxArticulationRootLinkData* rootLinkData; /** @deprecated \brief Link sensor spatial forces. - N = getNbSensors(). - Read using PxArticulationCacheFlag::eSENSOR_FORCES. - For indexing, see PxArticulationSensor::getIndex. @see PxArticulationSensor */ PX_DEPRECATED PxSpatialForce* sensorForces; // Members and memory below here are not zeroed when zeroCache is called, and are not included in the size returned by PxArticulationReducedCoordinate::getCacheDataSize. /** \brief Constraint coefficient matrix. - N = getCoefficentMatrixSize(). - The user needs to allocate memory and set this member to the allocated memory. @see PxArticulationReducedCoordinate::computeCoefficientMatrix */ PxReal* coefficientMatrix; /** \brief Constraint lambda values (impulses applied by the respective constraints). - N = getNbLoopJoints(). - The user needs to allocate memory and set this member to the allocated memory. @see PxArticulationReducedCoordinate::computeLambda */ PxReal* lambda; void* scratchMemory; //!< The scratch memory is used for internal calculations. void* scratchAllocator; //!< The scratch allocator is used for internal calculations. PxU32 version; //!< The cache version used internally to check compatibility with the articulation, i.e. detect if the articulation configuration changed after the cache was created. }; /** @deprecated \brief Flags to configure the forces reported by articulation link sensors. @see PxArticulationSensor::setFlag */ struct PX_DEPRECATED PxArticulationSensorFlag { enum Enum { eFORWARD_DYNAMICS_FORCES = 1 << 0, //!< Raise to receive forces from forward dynamics. eCONSTRAINT_SOLVER_FORCES = 1 << 1, //!< Raise to receive forces from constraint solver. eWORLD_FRAME = 1 << 2 //!< Raise to receive forces in the world rotation frame, otherwise they will be reported in the sensor's local frame. }; }; typedef PX_DEPRECATED physx::PxFlags<PxArticulationSensorFlag::Enum, PxU8> PxArticulationSensorFlags; /** @deprecated \brief A force sensor that can be attached to articulation links to measure spatial force. @see PxArticulationReducedCoordinate::createSensor */ class PX_DEPRECATED PxArticulationSensor : public PxBase { public: /** \brief Releases the sensor. \note Releasing a sensor is not allowed while the articulation is in a scene. In order to release a sensor, remove and then re-add the articulation to the scene. */ virtual void release() = 0; /** \brief Returns the spatial force in the local frame of the sensor. \return The spatial force. \note This call is not allowed while the simulation is running except in a split simulation during #PxScene::collide() and up to #PxScene::advance(), and in PxContactModifyCallback or in contact report callbacks. @see setRelativePose, getRelativePose */ virtual PxSpatialForce getForces() const = 0; /** \brief Returns the relative pose between this sensor and the body frame of the link that the sensor is attached to. The link body frame is at the center of mass and aligned with the principal axes of inertia, see PxRigidBody::getCMassLocalPose. \return The transform link body frame -> sensor frame. @see setRelativePose */ virtual PxTransform getRelativePose() const = 0; /** \brief Sets the relative pose between this sensor and the body frame of the link that the sensor is attached to. The link body frame is at the center of mass and aligned with the principal axes of inertia, see PxRigidBody::getCMassLocalPose. \param[in] pose The transform link body frame -> sensor frame. \note Setting the sensor relative pose is not allowed while the articulation is in a scene. In order to set the pose, remove and then re-add the articulation to the scene. @see getRelativePose */ virtual void setRelativePose(const PxTransform& pose) = 0; /** \brief Returns the link that this sensor is attached to. \return A pointer to the link. */ virtual PxArticulationLink* getLink() const = 0; /** \brief Returns the index of this sensor inside the articulation. The return value is only valid for sensors attached to articulations that are in a scene. \return The low-level index, or 0xFFFFFFFF if the articulation is not in a scene. */ virtual PxU32 getIndex() const = 0; /** \brief Returns the articulation that this sensor is part of. \return A pointer to the articulation. */ virtual PxArticulationReducedCoordinate* getArticulation() const = 0; /** \brief Returns the sensor's flags. \return The current set of flags of the sensor. @see PxArticulationSensorFlag */ virtual PxArticulationSensorFlags getFlags() const = 0; /** \brief Sets a flag of the sensor. \param[in] flag The flag to set. \param[in] enabled The value to set the flag to. \note Setting the sensor flags is not allowed while the articulation is in a scene. In order to set the flags, remove and then re-add the articulation to the scene. @see PxArticulationSensorFlag */ virtual void setFlag(PxArticulationSensorFlag::Enum flag, bool enabled) = 0; /** \brief Returns the string name of the dynamic type. \return The string name. */ virtual const char* getConcreteTypeName() const { return "PxArticulationSensor"; } virtual ~PxArticulationSensor() {} void* userData; //!< user can assign this to whatever, usually to create a 1:1 relationship with a user object. protected: PX_INLINE PxArticulationSensor(PxType concreteType, PxBaseFlags baseFlags) : PxBase(concreteType, baseFlags) {} PX_INLINE PxArticulationSensor(PxBaseFlags baseFlags) : PxBase(baseFlags) {} }; /** \brief Flag that configures articulation-state updates by PxArticulationReducedCoordinate::updateKinematic. */ struct PxArticulationKinematicFlag { enum Enum { ePOSITION = 1 << 0, //!< Raise after any changes to the articulation root or joint positions using non-cache API calls. Updates links' positions and velocities. eVELOCITY = 1 << 1 //!< Raise after velocity-only changes to the articulation root or joints using non-cache API calls. Updates links' velocities. }; }; typedef physx::PxFlags<PxArticulationKinematicFlag::Enum, PxU8> PxArticulationKinematicFlags; PX_FLAGS_OPERATORS(PxArticulationKinematicFlag::Enum, PxU8) #if PX_VC #pragma warning(push) #pragma warning(disable : 4435) #endif /** \brief A tree structure of bodies connected by joints that is treated as a unit by the dynamics solver. Parametrized in reduced (joint) coordinates. @see PxArticulationJointReducedCoordinate, PxArticulationLink, PxPhysics::createArticulationReducedCoordinate */ class PxArticulationReducedCoordinate : public PxBase { public: /** \brief Returns the scene which this articulation belongs to. \return Owner Scene. NULL if not part of a scene. @see PxScene */ virtual PxScene* getScene() const = 0; /** \brief Sets the solver iteration counts for the articulation. The solver iteration count determines how accurately contacts, drives, and limits are resolved. Setting a higher position iteration count may therefore help in scenarios where the articulation is subject to many constraints; for example, a manipulator articulation with drives and joint limits that is grasping objects, or several such articulations interacting through contacts. Other situations where higher position iterations may improve simulation fidelity are: large mass ratios within the articulation or between the articulation and an object in contact with it; or strong drives in the articulation being used to manipulate a light object. If intersecting bodies are being depenetrated too violently, increase the number of velocity iterations. More velocity iterations will drive the relative exit velocity of the intersecting objects closer to the correct value given the restitution. \param[in] minPositionIters Number of position iterations the solver should perform for this articulation. <b>Range:</b> [1,255]. <b>Default:</b> 4. \param[in] minVelocityIters Number of velocity iterations the solver should perform for this articulation. <b>Range:</b> [0,255]. <b>Default:</b> 1 \note This call may not be made during simulation. @see getSolverIterationCounts() */ virtual void setSolverIterationCounts(PxU32 minPositionIters, PxU32 minVelocityIters = 1) = 0; /** \brief Returns the solver iteration counts. @see setSolverIterationCounts() */ virtual void getSolverIterationCounts(PxU32& minPositionIters, PxU32& minVelocityIters) const = 0; /** \brief Returns true if this articulation is sleeping. When an actor does not move for a period of time, it is no longer simulated in order to reduce computational cost. This state is called sleeping. However, because the object automatically wakes up when it is either touched by an awake object, or a sleep-affecting property is changed by the user, the entire sleep mechanism should be transparent to the user. An articulation can only go to sleep if all links are ready for sleeping. An articulation is guaranteed to be awake if at least one of the following holds: \li The wake counter of any link in the articulation is positive (see #setWakeCounter()). \li The mass-normalized energy of any link in the articulation is above a threshold (see #setSleepThreshold()). \li A non-zero force or torque has been applied to any joint or link. If an articulation is sleeping, the following state is guaranteed: \li The wake counter is zero. \li The linear and angular velocity of all links is zero. \li There is no force update pending. When an articulation gets inserted into a scene, it will be considered asleep if all the points above hold, else it will be treated as awake. If an articulation is asleep after the call to #PxScene::fetchResults() returns, it is guaranteed that the poses of the links were not changed. You can use this information to avoid updating the transforms of associated objects. \return True if the articulation is sleeping. \note This call may only be made on articulations that are in a scene, and may not be made during simulation, except in a split simulation in-between #PxScene::fetchCollision and #PxScene::advance. @see wakeUp() putToSleep() getSleepThreshold() setSleepThreshold() */ virtual bool isSleeping() const = 0; /** \brief Sets the mass-normalized energy threshold below which the articulation may go to sleep. The articulation will sleep if the energy of each link is below this threshold. \param[in] threshold Energy below which the articulation may go to sleep. <b>Range:</b> [0, PX_MAX_F32) \note This call may not be made during simulation. <b>Default:</b> 5e-5f * PxTolerancesScale::speed * PxTolerancesScale::speed; @see isSleeping() getSleepThreshold() wakeUp() putToSleep() */ virtual void setSleepThreshold(PxReal threshold) = 0; /** \brief Returns the mass-normalized energy below which the articulation may go to sleep. \return The energy threshold for sleeping. @see isSleeping() wakeUp() putToSleep() setSleepThreshold() */ virtual PxReal getSleepThreshold() const = 0; /** \brief Sets the mass-normalized kinetic energy threshold below which the articulation may participate in stabilization. Articulations whose kinetic energy divided by their mass is above this threshold will not participate in stabilization. This value has no effect if PxSceneFlag::eENABLE_STABILIZATION was not enabled on the PxSceneDesc. <b>Default:</b> 5e-6f * PxTolerancesScale::speed * PxTolerancesScale::speed \param[in] threshold Energy below which the articulation may participate in stabilization. <b>Range:</b> [0,inf) \note This call may not be made during simulation. @see getStabilizationThreshold() PxSceneFlag::eENABLE_STABILIZATION */ virtual void setStabilizationThreshold(PxReal threshold) = 0; /** \brief Returns the mass-normalized kinetic energy below which the articulation may participate in stabilization. Articulations whose kinetic energy divided by their mass is above this threshold will not participate in stabilization. \return The energy threshold for participating in stabilization. @see setStabilizationThreshold() PxSceneFlag::eENABLE_STABILIZATION */ virtual PxReal getStabilizationThreshold() const = 0; /** \brief Sets the wake counter for the articulation in seconds. - The wake counter value specifies a time threshold used to determine whether an articulation may be put to sleep. - The articulation will be put to sleep if all links have experienced a mass-normalised energy less than a threshold for at least a threshold time, as specified by the wake counter. - Passing in a positive value will wake up the articulation automatically. <b>Default:</b> 0.4s (which corresponds to 20 frames for a time step of 0.02s) \param[in] wakeCounterValue Wake counter value in seconds. <b>Range:</b> [0, PX_MAX_F32) \note This call may not be made during simulation, except in a split simulation in-between #PxScene::fetchCollision and #PxScene::advance. @see isSleeping() getWakeCounter() */ virtual void setWakeCounter(PxReal wakeCounterValue) = 0; /** \brief Returns the wake counter of the articulation in seconds. \return The wake counter of the articulation in seconds. \note This call may not be made during simulation, except in a split simulation in-between #PxScene::fetchCollision and #PxScene::advance. @see isSleeping() setWakeCounter() */ virtual PxReal getWakeCounter() const = 0; /** \brief Wakes up the articulation if it is sleeping. - The articulation will be woken up and might cause other touching objects to wake up as well during the next simulation step. - This will set the wake counter of the articulation to the value specified in #PxSceneDesc::wakeCounterResetValue. \note This call may only be made on articulations that are in a scene, and may not be made during simulation, except in a split simulation in-between #PxScene::fetchCollision and #PxScene::advance. @see isSleeping() putToSleep() */ virtual void wakeUp() = 0; /** \brief Forces the articulation to sleep. - The articulation will stay asleep during the next simulation step if not touched by another non-sleeping actor. - This will set any applied force, the velocity, and the wake counter of all bodies in the articulation to zero. \note This call may not be made during simulation, and may only be made on articulations that are in a scene. @see isSleeping() wakeUp() */ virtual void putToSleep() = 0; /** \brief Sets the limit on the magnitude of the linear velocity of the articulation's center of mass. - The limit acts on the linear velocity of the entire articulation. The velocity is calculated from the total momentum and the spatial inertia of the articulation. - The limit only applies to floating-base articulations. - A benefit of the COM velocity limit is that it is evenly applied to the whole articulation, which results in fewer visual artifacts compared to link rigid-body damping or joint-velocity limits. However, these per-link or per-degree-of-freedom limits may still help avoid numerical issues. \note This call may not be made during simulation. \param[in] maxLinearVelocity The maximal linear velocity magnitude. <b>Range:</b> [0, PX_MAX_F32); <b>Default:</b> 1e+6. @see setMaxCOMAngularVelocity, PxRigidBody::setLinearDamping, PxRigidBody::setAngularDamping, PxArticulationJointReducedCoordinate::setMaxJointVelocity */ PX_DEPRECATED virtual void setMaxCOMLinearVelocity(const PxReal maxLinearVelocity) = 0; /** \brief Gets the limit on the magnitude of the linear velocity of the articulation's center of mass. \return The maximal linear velocity magnitude. @see setMaxCOMLinearVelocity */ PX_DEPRECATED virtual PxReal getMaxCOMLinearVelocity() const = 0; /** \brief Sets the limit on the magnitude of the angular velocity at the articulation's center of mass. - The limit acts on the angular velocity of the entire articulation. The velocity is calculated from the total momentum and the spatial inertia of the articulation. - The limit only applies to floating-base articulations. - A benefit of the COM velocity limit is that it is evenly applied to the whole articulation, which results in fewer visual artifacts compared to link rigid-body damping or joint-velocity limits. However, these per-link or per-degree-of-freedom limits may still help avoid numerical issues. \note This call may not be made during simulation. \param[in] maxAngularVelocity The maximal angular velocity magnitude. <b>Range:</b> [0, PX_MAX_F32); <b>Default:</b> 1e+6 @see setMaxCOMLinearVelocity, PxRigidBody::setLinearDamping, PxRigidBody::setAngularDamping, PxArticulationJointReducedCoordinate::setMaxJointVelocity */ PX_DEPRECATED virtual void setMaxCOMAngularVelocity(const PxReal maxAngularVelocity) = 0; /** \brief Gets the limit on the magnitude of the angular velocity at the articulation's center of mass. \return The maximal angular velocity magnitude. @see setMaxCOMAngularVelocity */ PX_DEPRECATED virtual PxReal getMaxCOMAngularVelocity() const = 0; /** \brief Adds a link to the articulation with default attribute values. \param[in] parent The parent link in the articulation. Must be NULL if (and only if) this is the root link. \param[in] pose The initial pose of the new link. Must be a valid transform. \return The new link, or NULL if the link cannot be created. \note Creating a link is not allowed while the articulation is in a scene. In order to add a link, remove and then re-add the articulation to the scene. \note When the articulation is added to a scene, the root link adopts the specified pose. The pose of the root link is propagated through the ensemble of links from parent to child after accounting for each child's inbound joint frames and the joint positions set by PxArticulationJointReducedCoordinate::setJointPosition(). As a consequence, the pose of each non-root link is automatically overwritten when adding the articulation to the scene. @see PxArticulationLink */ virtual PxArticulationLink* createLink(PxArticulationLink* parent, const PxTransform& pose) = 0; /** \brief Releases the articulation, and all its links and corresponding joints. Attached sensors and tendons are released automatically when the articulation is released. \note This call may not be made during simulation. \note This call does not release any PxArticulationCache instance that has been instantiated using #createCache() */ virtual void release() = 0; /** \brief Returns the number of links in the articulation. \return The number of links. */ virtual PxU32 getNbLinks() const = 0; /** \brief Returns the set of links in the articulation in the order that they were added to the articulation using createLink. \param[in] userBuffer Buffer into which to write the array of articulation link pointers. \param[in] bufferSize The size of the buffer. If the buffer is not large enough to contain all the pointers to links, only as many as will fit are written. \param[in] startIndex Index of first link pointer to be retrieved. \return The number of links written into the buffer. @see PxArticulationLink */ virtual PxU32 getLinks(PxArticulationLink** userBuffer, PxU32 bufferSize, PxU32 startIndex = 0) const = 0; /** \brief Returns the number of shapes in the articulation. \return The number of shapes. */ virtual PxU32 getNbShapes() const = 0; /** \brief Sets a name string for the articulation that can be retrieved with getName(). This is for debugging and is not used by the SDK. The string is not copied by the SDK, only the pointer is stored. \param[in] name A pointer to a char buffer used to specify the name of the articulation. @see getName() */ virtual void setName(const char* name) = 0; /** \brief Returns the name string set with setName(). \return Name string associated with the articulation. @see setName() */ virtual const char* getName() const = 0; /** \brief Returns the axis-aligned bounding box enclosing the articulation. \param[in] inflation Scale factor for computed world bounds. Box extents are multiplied by this value. \return The articulation's bounding box. \note It is not allowed to use this method while the simulation is running, except in a split simulation during #PxScene::collide() and up to #PxScene::advance(), and in PxContactModifyCallback or in contact report callbacks. @see PxBounds3 */ virtual PxBounds3 getWorldBounds(float inflation = 1.01f) const = 0; /** \brief Returns the aggregate associated with the articulation. \return The aggregate associated with the articulation or NULL if the articulation does not belong to an aggregate. @see PxAggregate */ virtual PxAggregate* getAggregate() const = 0; /** \brief Sets flags on the articulation. \param[in] flags The articulation flags. \note This call may not be made during simulation. @see PxArticulationFlag */ virtual void setArticulationFlags(PxArticulationFlags flags) = 0; /** \brief Raises or clears a flag on the articulation. \param[in] flag The articulation flag. \param[in] value The value to set the flag to. \note This call may not be made during simulation. @see PxArticulationFlag */ virtual void setArticulationFlag(PxArticulationFlag::Enum flag, bool value) = 0; /** \brief Returns the articulation's flags. \return The articulation's flags. @see PxArticulationFlag */ virtual PxArticulationFlags getArticulationFlags() const = 0; /** \brief Returns the total number of joint degrees-of-freedom (DOFs) of the articulation. - The six DOFs of the base of a floating-base articulation are not included in this count. - Example: Both a fixed-base and a floating-base double-pendulum with two revolute joints will have getDofs() == 2. - The return value is only valid for articulations that are in a scene. \return The number of joint DOFs, or 0xFFFFFFFF if the articulation is not in a scene. */ virtual PxU32 getDofs() const = 0; /** \brief Creates an articulation cache that can be used to read and write internal articulation data. - When the structure of the articulation changes (e.g. adding a link or sensor) after the cache was created, the cache needs to be released and recreated. - Free the memory allocated for the cache by calling the release() method on the cache. - Caches can only be created by articulations that are in a scene. \return The cache, or NULL if the articulation is not in a scene. @see applyCache, copyInternalStateToCache */ virtual PxArticulationCache* createCache() const = 0; /** \brief Returns the size of the articulation cache in bytes. - The size does not include: the user-allocated memory for the coefficient matrix or lambda values; the scratch-related memory/members; and the cache version. See comment in #PxArticulationCache. - The return value is only valid for articulations that are in a scene. \return The byte size of the cache, or 0xFFFFFFFF if the articulation is not in a scene. @see PxArticulationCache */ virtual PxU32 getCacheDataSize() const = 0; /** \brief Zeroes all data in the articulation cache, except user-provided and scratch memory, and cache version. \note This call may only be made on articulations that are in a scene. @see PxArticulationCache */ virtual void zeroCache(PxArticulationCache& cache) const = 0; /** \brief Applies the data in the cache to the articulation. This call wakes the articulation if it is sleeping, and the autowake parameter is true (default) or: - a nonzero joint velocity is applied or - a nonzero joint force is applied or - a nonzero root velocity is applied \param[in] cache The articulation data. \param[in] flags Indicate which data in the cache to apply to the articulation. \param[in] autowake If true, the call wakes up the articulation and increases the wake counter to #PxSceneDesc::wakeCounterResetValue if the counter value is below the reset value. \note This call may only be made on articulations that are in a scene, and may not be made during simulation. @see PxArticulationCache, PxArticulationCacheFlags, createCache, copyInternalStateToCache, PxScene::applyArticulationData */ virtual void applyCache(PxArticulationCache& cache, const PxArticulationCacheFlags flags, bool autowake = true) = 0; /** \brief Copies internal data of the articulation to the cache. \param[in] cache The articulation data. \param[in] flags Indicate which data to copy from the articulation to the cache. \note This call may only be made on articulations that are in a scene, and may not be made during simulation. @see PxArticulationCache, PxArticulationCacheFlags, createCache, applyCache */ virtual void copyInternalStateToCache(PxArticulationCache& cache, const PxArticulationCacheFlags flags) const = 0; /** \brief Converts maximal-coordinate joint DOF data to reduced coordinates. - Indexing into the maximal joint DOF data is via the link's low-level index minus 1 (the root link is not included). - The reduced-coordinate data follows the cache indexing convention, see PxArticulationCache::jointVelocity. \param[in] maximum The maximal-coordinate joint DOF data with minimum array length N = (getNbLinks() - 1) * 6 \param[out] reduced The reduced-coordinate joint DOF data with minimum array length N = getDofs() \note The articulation must be in a scene. \note This can be used as a helper function to prepare per joint cache data such as PxArticulationCache::jointVelocity. @see unpackJointData */ virtual void packJointData(const PxReal* maximum, PxReal* reduced) const = 0; /** \brief Converts reduced-coordinate joint DOF data to maximal coordinates. - Indexing into the maximal joint DOF data is via the link's low-level index minus 1 (the root link is not included). - The reduced-coordinate data follows the cache indexing convention, see PxArticulationCache::jointVelocity. \param[in] reduced The reduced-coordinate joint DOF data with minimum array length N = getDofs(). \param[out] maximum The maximal-coordinate joint DOF data with minimum array length N = (getNbLinks() - 1) * 6. \note The articulation must be in a scene. @see packJointData */ virtual void unpackJointData(const PxReal* reduced, PxReal* maximum) const = 0; /** \brief Prepares common articulation data based on articulation pose for inverse dynamics calculations. Usage: -# Set articulation pose (joint positions and base transform) via articulation cache and applyCache(). -# Call commonInit. -# Call inverse dynamics computation method. \note This call may only be made on articulations that are in a scene, and may not be made during simulation. @see computeGeneralizedGravityForce, computeCoriolisAndCentrifugalForce */ virtual void commonInit() const = 0; /** \brief Computes the joint DOF forces required to counteract gravitational forces for the given articulation pose. - Inputs: Articulation pose (joint positions + base transform). - Outputs: Joint forces to counteract gravity (in cache). - The joint forces returned are determined purely by gravity for the articulation in the current joint and base pose, and joints at rest; i.e. external forces, joint velocities, and joint accelerations are set to zero. Joint drives are also not considered in the computation. - commonInit() must be called before the computation, and after setting the articulation pose via applyCache(). \param[out] cache Out: PxArticulationCache::jointForce. \note This call may only be made on articulations that are in a scene, and may not be made during simulation. @see commonInit */ virtual void computeGeneralizedGravityForce(PxArticulationCache& cache) const = 0; /** \brief Computes the joint DOF forces required to counteract Coriolis and centrifugal forces for the given articulation state. - Inputs: Articulation state (joint positions and velocities (in cache), and base transform and spatial velocity). - Outputs: Joint forces to counteract Coriolis and centrifugal forces (in cache). - The joint forces returned are determined purely by the articulation's state; i.e. external forces, gravity, and joint accelerations are set to zero. Joint drives and potential damping terms, such as link angular or linear damping, or joint friction, are also not considered in the computation. - Prior to the computation, update/set the base spatial velocity with PxArticulationCache::rootLinkData and applyCache(). - commonInit() must be called before the computation, and after setting the articulation pose via applyCache(). \param[in,out] cache In: PxArticulationCache::jointVelocity; Out: PxArticulationCache::jointForce. \note This call may only be made on articulations that are in a scene, and may not be made during simulation. @see commonInit */ virtual void computeCoriolisAndCentrifugalForce(PxArticulationCache& cache) const = 0; /** \brief Computes the joint DOF forces required to counteract external spatial forces applied to articulation links. - Inputs: External forces on links (in cache), articulation pose (joint positions + base transform). - Outputs: Joint forces to counteract the external forces (in cache). - Only the external spatial forces provided in the cache and the articulation pose are considered in the computation. - The external spatial forces are with respect to the links' centers of mass, and not the actor's origin. - commonInit() must be called before the computation, and after setting the articulation pose via applyCache(). \param[in,out] cache In: PxArticulationCache::externalForces; Out: PxArticulationCache::jointForce. \note This call may only be made on articulations that are in a scene, and may not be made during simulation. @see commonInit */ virtual void computeGeneralizedExternalForce(PxArticulationCache& cache) const = 0; /** \brief Computes the joint accelerations for the given articulation state and joint forces. - Inputs: Joint forces (in cache) and articulation state (joint positions and velocities (in cache), and base transform and spatial velocity). - Outputs: Joint accelerations (in cache). - The computation includes Coriolis terms and gravity. However, joint drives and potential damping terms are not considered in the computation (for example, linear link damping or joint friction). - Prior to the computation, update/set the base spatial velocity with PxArticulationCache::rootLinkData and applyCache(). - commonInit() must be called before the computation, and after setting the articulation pose via applyCache(). \param[in,out] cache In: PxArticulationCache::jointForce and PxArticulationCache::jointVelocity; Out: PxArticulationCache::jointAcceleration. \note This call may only be made on articulations that are in a scene, and may not be made during simulation. @see commonInit */ virtual void computeJointAcceleration(PxArticulationCache& cache) const = 0; /** \brief Computes the joint forces for the given articulation state and joint accelerations, not considering gravity. - Inputs: Joint accelerations (in cache) and articulation state (joint positions and velocities (in cache), and base transform and spatial velocity). - Outputs: Joint forces (in cache). - The computation includes Coriolis terms. However, joint drives and potential damping terms are not considered in the computation (for example, linear link damping or joint friction). - Prior to the computation, update/set the base spatial velocity with PxArticulationCache::rootLinkData and applyCache(). - commonInit() must be called before the computation, and after setting the articulation pose via applyCache(). \param[in,out] cache In: PxArticulationCache::jointAcceleration and PxArticulationCache::jointVelocity; Out: PxArticulationCache::jointForce. \note This call may only be made on articulations that are in a scene, and may not be made during simulation. @see commonInit */ virtual void computeJointForce(PxArticulationCache& cache) const = 0; /** \brief Compute the dense Jacobian for the articulation in world space, including the DOFs of a potentially floating base. This computes the dense representation of an inherently sparse matrix. Multiplication with this matrix maps joint space velocities to world-space linear and angular (i.e. spatial) velocities of the centers of mass of the links. \param[out] cache Sets cache.denseJacobian matrix. The matrix is indexed [nCols * row + column]. \param[out] nRows Set to number of rows in matrix, which corresponds to nbLinks() * 6, minus 6 if PxArticulationFlag::eFIX_BASE is true. \param[out] nCols Set to number of columns in matrix, which corresponds to the number of joint DOFs, plus 6 in the case PxArticulationFlag::eFIX_BASE is false. \note This call may only be made on articulations that are in a scene, and may not be made during simulation. */ virtual void computeDenseJacobian(PxArticulationCache& cache, PxU32& nRows, PxU32& nCols) const = 0; /** \brief Computes the coefficient matrix for contact forces. - The matrix dimension is getCoefficientMatrixSize() = getDofs() * getNbLoopJoints(), and the DOF (column) indexing follows the internal DOF order, see PxArticulationCache::jointVelocity. - Each column in the matrix is the joint forces effected by a contact based on impulse strength 1. - The user must allocate memory for PxArticulationCache::coefficientMatrix where the required size of the PxReal array is equal to getCoefficientMatrixSize(). - commonInit() must be called before the computation, and after setting the articulation pose via applyCache(). \param[out] cache Out: PxArticulationCache::coefficientMatrix. \note This call may only be made on articulations that are in a scene, and may not be made during simulation. @see commonInit, getCoefficientMatrixSize */ virtual void computeCoefficientMatrix(PxArticulationCache& cache) const = 0; /** \brief Computes the lambda values when the test impulse is 1. - The user must allocate memory for PxArticulationCache::lambda where the required size of the PxReal array is equal to getNbLoopJoints(). - commonInit() must be called before the computation, and after setting the articulation pose via applyCache(). \param[out] cache Out: PxArticulationCache::lambda. \param[in] initialState The initial state of the articulation system. \param[in] jointTorque M(q)*qddot + C(q,qdot) + g(q) <- calculate by summing joint forces obtained with computeJointForce and computeGeneralizedGravityForce. \param[in] maxIter Maximum number of solver iterations to run. If the system converges, fewer iterations may be used. \return True if convergence was achieved within maxIter; False if convergence was not achieved or the operation failed otherwise. \note This call may only be made on articulations that are in a scene, and may not be made during simulation. @see commonInit, getNbLoopJoints */ virtual bool computeLambda(PxArticulationCache& cache, PxArticulationCache& initialState, const PxReal* const jointTorque, const PxU32 maxIter) const = 0; /** \brief Compute the joint-space inertia matrix that maps joint accelerations to joint forces: forces = M * accelerations. - Inputs: Articulation pose (joint positions and base transform). - Outputs: Mass matrix (in cache). commonInit() must be called before the computation, and after setting the articulation pose via applyCache(). \param[out] cache Out: PxArticulationCache::massMatrix. \note This call may only be made on articulations that are in a scene, and may not be made during simulation. @see commonInit */ virtual void computeGeneralizedMassMatrix(PxArticulationCache& cache) const = 0; /** \brief Adds a loop joint to the articulation system for inverse dynamics. \param[in] joint The joint to add. \note This call may not be made during simulation. @see PxContactJoint, PxFixedJoint, PxSphericalJoint, PxRevoluteJoint, PxPrismaticJoint, PxDistanceJoint, PxD6Joint */ virtual void addLoopJoint(PxConstraint* joint) = 0; /** \brief Removes a loop joint from the articulation for inverse dynamics. \note This call may not be made during simulation. \param[in] joint The joint to remove. */ virtual void removeLoopJoint(PxConstraint* joint) = 0; /** \brief Returns the number of loop joints in the articulation for inverse dynamics. \return The number of loop joints. */ virtual PxU32 getNbLoopJoints() const = 0; /** \brief Returns the set of loop constraints (i.e. joints) in the articulation. \param[in] userBuffer Target buffer for the constraint pointers. \param[in] bufferSize The size of the buffer. If this is not large enough to contain all the pointers to the constraints, only as many as will fit are written. Use getNbLoopJoints() to size the buffer for retrieving all constraints. \param[in] startIndex Index of first constraint pointer to be retrieved. \return The number of constraints written into the buffer. */ virtual PxU32 getLoopJoints(PxConstraint** userBuffer, PxU32 bufferSize, PxU32 startIndex = 0) const = 0; /** \brief Returns the required size of the coefficient matrix in the articulation. \return Size of the coefficient matrix (equal to getDofs() * getNbLoopJoints()). \note This call may only be made on articulations that are in a scene. @see computeCoefficientMatrix */ virtual PxU32 getCoefficientMatrixSize() const = 0; /** \brief Sets the root link transform in the world frame. - Use updateKinematic() after all state updates to the articulation via non-cache API such as this method, in order to update link states for the next simulation frame or querying. \param[in] pose The new root link transform. \param[in] autowake If true and the articulation is in a scene, the articulation will be woken up and the wake counter of each link will be reset to #PxSceneDesc::wakeCounterResetValue. \note This call may not be made during simulation. \note PxArticulationCache::rootLinkData similarly allows the root link pose to be updated and potentially offers better performance if the root link pose is to be updated along with other state variables. @see getRootGlobalPose, updateKinematic, PxArticulationCache, applyCache */ virtual void setRootGlobalPose(const PxTransform& pose, bool autowake = true) = 0; /** \brief Returns the root link transform (world to actor frame). \return The root link transform. \note This call is not allowed while the simulation is running except in a split simulation during #PxScene::collide() and up to #PxScene::advance(), and in PxContactModifyCallback or in contact report callbacks. \note PxArticulationCache::rootLinkData similarly allows the root link pose to be queried and potentially offers better performance if the root link pose is to be queried along with other state variables. @see setRootGlobalPose, PxArticulationCache, copyInternalStateToCache */ virtual PxTransform getRootGlobalPose() const = 0; /** \brief Sets the root link linear center-of-mass velocity. - The linear velocity is with respect to the link's center of mass and not the actor frame origin. - The articulation is woken up if the input velocity is nonzero (ignoring autowake) and the articulation is in a scene. - Use updateKinematic() after all state updates to the articulation via non-cache API such as this method, in order to update link states for the next simulation frame or querying. \param[in] linearVelocity The new root link center-of-mass linear velocity. \param[in] autowake If true and the articulation is in a scene, the call wakes up the articulation and increases the wake counter to #PxSceneDesc::wakeCounterResetValue if the counter value is below the reset value. \note This call may not be made during simulation, except in a split simulation in-between #PxScene::fetchCollision and #PxScene::advance. \note PxArticulationCache::rootLinkData similarly allows the root link linear velocity to be updated and potentially offers better performance if the root link linear velocity is to be updated along with other state variables. @see updateKinematic, getRootLinearVelocity, setRootAngularVelocity, getRootAngularVelocity, PxRigidBody::getCMassLocalPose, PxArticulationCache, applyCache */ virtual void setRootLinearVelocity(const PxVec3& linearVelocity, bool autowake = true) = 0; /** \brief Gets the root link center-of-mass linear velocity. - The linear velocity is with respect to the link's center of mass and not the actor frame origin. \return The root link center-of-mass linear velocity. \note This call is not allowed while the simulation is running except in a split simulation during #PxScene::collide() and up to #PxScene::advance(), and in PxContactModifyCallback or in contact report callbacks. \note PxArticulationCache::rootLinkData similarly allows the root link linear velocity to be queried and potentially offers better performance if the root link linear velocity is to be queried along with other state variables. @see setRootLinearVelocity, setRootAngularVelocity, getRootAngularVelocity, PxRigidBody::getCMassLocalPose, PxArticulationCache, applyCache */ virtual PxVec3 getRootLinearVelocity(void) const = 0; /** \brief Sets the root link angular velocity. - The articulation is woken up if the input velocity is nonzero (ignoring autowake) and the articulation is in a scene. - Use updateKinematic() after all state updates to the articulation via non-cache API such as this method, in order to update link states for the next simulation frame or querying. \param[in] angularVelocity The new root link angular velocity. \param[in] autowake If true and the articulation is in a scene, the call wakes up the articulation and increases the wake counter to #PxSceneDesc::wakeCounterResetValue if the counter value is below the reset value. \note This call may not be made during simulation, except in a split simulation in-between #PxScene::fetchCollision and #PxScene::advance. \note PxArticulationCache::rootLinkData similarly allows the root link angular velocity to be updated and potentially offers better performance if the root link angular velocity is to be updated along with other state variables. @see updateKinematic, getRootAngularVelocity, setRootLinearVelocity, getRootLinearVelocity, PxArticulationCache, applyCache */ virtual void setRootAngularVelocity(const PxVec3& angularVelocity, bool autowake = true) = 0; /** \brief Gets the root link angular velocity. \return The root link angular velocity. \note This call is not allowed while the simulation is running except in a split simulation during #PxScene::collide() and up to #PxScene::advance(), and in PxContactModifyCallback or in contact report callbacks. \note PxArticulationCache::rootLinkData similarly allows the root link angular velocity to be queried and potentially offers better performance if the root link angular velocity is to be queried along with other state variables. @see setRootAngularVelocity, setRootLinearVelocity, getRootLinearVelocity, PxArticulationCache, applyCache */ virtual PxVec3 getRootAngularVelocity(void) const = 0; /** \brief Returns the (classical) link acceleration in world space for the given low-level link index. - The returned acceleration is not a spatial, but a classical, i.e. body-fixed acceleration (https://en.wikipedia.org/wiki/Spatial_acceleration). - The (linear) acceleration is with respect to the link's center of mass and not the actor frame origin. \param[in] linkId The low-level link index, see PxArticulationLink::getLinkIndex. \return The link's center-of-mass classical acceleration, or 0 if the call is made before the articulation participated in a first simulation step. \note This call may only be made on articulations that are in a scene. It is not allowed to use this method while the simulation is running. The exceptions to this rule are a split simulation during #PxScene::collide() and up to #PxScene::advance(); in PxContactModifyCallback; and in contact report callbacks. @see PxArticulationLink::getLinkIndex, PxRigidBody::getCMassLocalPose */ virtual PxSpatialVelocity getLinkAcceleration(const PxU32 linkId) = 0; /** \brief Returns the GPU articulation index. \return The GPU index, or 0xFFFFFFFF if the articulation is not in a scene or PxSceneFlag::eENABLE_DIRECT_GPU_API is not set. */ virtual PxU32 getGpuArticulationIndex() = 0; /** \brief Creates a spatial tendon to attach to the articulation with default attribute values. \return The new spatial tendon. \note Creating a spatial tendon is not allowed while the articulation is in a scene. In order to add the tendon, remove and then re-add the articulation to the scene. \note The spatial tendon is released with PxArticulationReducedCoordinate::release() @see PxArticulationSpatialTendon */ virtual PxArticulationSpatialTendon* createSpatialTendon() = 0; /** \brief Creates a fixed tendon to attach to the articulation with default attribute values. \return The new fixed tendon. \note Creating a fixed tendon is not allowed while the articulation is in a scene. In order to add the tendon, remove and then re-add the articulation to the scene. \note The fixed tendon is released with PxArticulationReducedCoordinate::release() @see PxArticulationFixedTendon */ virtual PxArticulationFixedTendon* createFixedTendon() = 0; /** @deprecated \brief Creates a force sensor attached to a link of the articulation. \param[in] link The link to attach the sensor to. \param[in] relativePose The sensor frame's relative pose to the link's body frame, i.e. the transform body frame -> sensor frame. The link body frame is at the center of mass and aligned with the principal axes of inertia, see PxRigidBody::getCMassLocalPose. \return The new sensor. \note Creating a sensor is not allowed while the articulation is in a scene. In order to add the sensor, remove and then re-add the articulation to the scene. \note The sensor is released with PxArticulationReducedCoordinate::release() @see PxArticulationSensor */ virtual PX_DEPRECATED PxArticulationSensor* createSensor(PxArticulationLink* link, const PxTransform& relativePose) = 0; /** \brief Returns the spatial tendons attached to the articulation. The order of the tendons in the buffer is not necessarily identical to the order in which the tendons were added to the articulation. \param[in] userBuffer The buffer into which to write the array of pointers to the tendons. \param[in] bufferSize The size of the buffer. If this is not large enough to contain all the pointers to tendons, only as many as will fit are written. Use getNbSpatialTendons to size for all attached tendons. \param[in] startIndex Index of first tendon pointer to be retrieved. \return The number of tendons written into the buffer. @see PxArticulationSpatialTendon, getNbSpatialTendons */ virtual PxU32 getSpatialTendons(PxArticulationSpatialTendon** userBuffer, PxU32 bufferSize, PxU32 startIndex = 0) const = 0; /** \brief Returns the number of spatial tendons in the articulation. \return The number of tendons. */ virtual PxU32 getNbSpatialTendons() = 0; /** \brief Returns the fixed tendons attached to the articulation. The order of the tendons in the buffer is not necessarily identical to the order in which the tendons were added to the articulation. \param[in] userBuffer The buffer into which to write the array of pointers to the tendons. \param[in] bufferSize The size of the buffer. If this is not large enough to contain all the pointers to tendons, only as many as will fit are written. Use getNbFixedTendons to size for all attached tendons. \param[in] startIndex Index of first tendon pointer to be retrieved. \return The number of tendons written into the buffer. @see PxArticulationFixedTendon, getNbFixedTendons */ virtual PxU32 getFixedTendons(PxArticulationFixedTendon** userBuffer, PxU32 bufferSize, PxU32 startIndex = 0) const = 0; /** \brief Returns the number of fixed tendons in the articulation. \return The number of tendons. */ virtual PxU32 getNbFixedTendons() = 0; /** @deprecated \brief Returns the sensors attached to the articulation. The order of the sensors in the buffer is not necessarily identical to the order in which the sensors were added to the articulation. \param[in] userBuffer The buffer into which to write the array of pointers to the sensors. \param[in] bufferSize The size of the buffer. If this is not large enough to contain all the pointers to sensors, only as many as will fit are written. Use getNbSensors to size for all attached sensors. \param[in] startIndex Index of first sensor pointer to be retrieved. \return The number of sensors written into the buffer. @see PxArticulationSensor, getNbSensors */ virtual PX_DEPRECATED PxU32 getSensors(PxArticulationSensor** userBuffer, PxU32 bufferSize, PxU32 startIndex = 0) const = 0; /** @deprecated \brief Returns the number of sensors in the articulation. \return The number of sensors. */ virtual PX_DEPRECATED PxU32 getNbSensors() = 0; /** \brief Update link velocities and/or positions in the articulation. An alternative that potentially offers better performance is to use the PxArticulationCache API. If the application updates the root state (position and velocity) or joint state via any combination of the non-cache API calls - setRootGlobalPose(), setRootLinearVelocity(), setRootAngularVelocity() - PxArticulationJointReducedCoordinate::setJointPosition(), PxArticulationJointReducedCoordinate::setJointVelocity() the application needs to call this method after the state setting in order to update the link states for the next simulation frame or querying. Use - PxArticulationKinematicFlag::ePOSITION after any changes to the articulation root or joint positions using non-cache API calls. Updates links' positions and velocities. - PxArticulationKinematicFlag::eVELOCITY after velocity-only changes to the articulation root or joints using non-cache API calls. Updates links' velocities only. \note This call may only be made on articulations that are in a scene, and may not be made during simulation. @see PxArticulationKinematicFlags, PxArticulationCache, applyCache */ virtual void updateKinematic(PxArticulationKinematicFlags flags) = 0; virtual ~PxArticulationReducedCoordinate() {} void* userData; //!< user can assign this to whatever, usually to create a 1:1 relationship with a user object. protected: PX_INLINE PxArticulationReducedCoordinate(PxType concreteType, PxBaseFlags baseFlags) : PxBase(concreteType, baseFlags) {} PX_INLINE PxArticulationReducedCoordinate(PxBaseFlags baseFlags) : PxBase(baseFlags) {} }; #if PX_VC #pragma warning(pop) #endif #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
62,696
C
40.909759
191
0.755854
NVIDIA-Omniverse/PhysX/physx/include/PxParticleBuffer.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_PARTICLE_BUFFER_H #define PX_PARTICLE_BUFFER_H /** \addtogroup physics @{ */ #include "common/PxBase.h" #include "common/PxPhysXCommonConfig.h" #include "common/PxTypeInfo.h" #include "PxParticleSystemFlag.h" #include "foundation/PxBounds3.h" #include "foundation/PxSimpleTypes.h" #include "foundation/PxVec4.h" #if !PX_DOXYGEN namespace physx { #endif #if PX_VC #pragma warning(push) #pragma warning(disable : 4435) #endif class PxCudaContextManager; struct PxParticleRigidFilterPair; struct PxParticleRigidAttachment; /** \brief Particle volume structure. Used to track the bounding volume of a user-specified set of particles. The particles are required to be laid out contiguously within the same PxParticleBuffer. */ PX_ALIGN_PREFIX(16) struct PxParticleVolume { PxBounds3 bound; //!< The current bounds of the particles contained in this #PxParticleVolume. PxU32 particleIndicesOffset; //!< The index into the particle list of the #PxParticleBuffer for the first particle of this volume. PxU32 numParticles; //!< The number of particles contained in this #PxParticleVolume. } PX_ALIGN_SUFFIX(16); /** \brief The shared base class for all particle buffers, can be instantiated directly to simulate granular and fluid particles. See #PxPhysics::createParticleBuffer. A particle buffer is a container that specifies per-particle attributes of a set of particles that will be used during the simulation of a particle system. It exposes direct access to the underlying GPU buffers and is independent of the scene and particle system. Particle buffers can be added/removed from a particle system at any time between simulation steps, and transferred from one particle system to another. */ class PxParticleBuffer : public PxBase { public: /** \brief Get positions and inverse masses for this particle buffer. \return A pointer to a device buffer containing the positions and inverse mass packed as PxVec4(pos.x, pos.y, pos.z, inverseMass). */ virtual PxVec4* getPositionInvMasses() const = 0; /** \brief Get velocities for this particle buffer. \return A pointer to a device buffer containing the velocities packed as PxVec4(vel.x, vel.y, vel.z, 0.0f). */ virtual PxVec4* getVelocities() const = 0; /** \brief Get phases for this particle buffer. See #PxParticlePhase \return A pointer to a device buffer containing the per-particle phases for this particle buffer. */ virtual PxU32* getPhases() const = 0; /** \brief Get particle volumes for this particle buffer. See #PxParticleVolume \return A pointer to a device buffer containing the #PxParticleVolume s for this particle buffer. */ virtual PxParticleVolume* getParticleVolumes() const = 0; /** \brief Set the number of active particles for this particle buffer. \param[in] nbActiveParticles The number of active particles. The number of active particles can be <= PxParticleBuffer::getMaxParticles(). The particle system will simulate the first x particles in the #PxParticleBuffer, where x is the number of active particles. */ virtual void setNbActiveParticles(PxU32 nbActiveParticles) = 0; /** \brief Get the number of active particles for this particle buffer. \return The number of active particles. */ virtual PxU32 getNbActiveParticles() const = 0; /** \brief Get the maximum number particles this particle buffer can hold. The maximum number of particles is specified when creating a #PxParticleBuffer. See #PxPhysics::createParticleBuffer. \return The maximum number of particles. */ virtual PxU32 getMaxParticles() const = 0; /** \brief Get the number of particle volumes in this particle buffer. \return The number of #PxParticleVolume s for this particle buffer. */ virtual PxU32 getNbParticleVolumes() const = 0; /** \brief Set the number of #PxParticleVolume s for this particle buffer. \param[in] nbParticleVolumes The number of particle volumes in this particle buffer. */ virtual void setNbParticleVolumes(PxU32 nbParticleVolumes) = 0; /** \brief Get the maximum number of particle volumes this particle buffer can hold. See #PxParticleVolume. \return The maximum number of particle volumes this particle buffer can hold. */ virtual PxU32 getMaxParticleVolumes() const = 0; /** \brief Set the #PxParticleRigidFilterPair s for collision filtering of particles in this buffer with rigid bodies. See #PxParticleRigidFilterPair \param[in] filters A device buffer containing #PxParticleRigidFilterPair s. \param[in] nbFilters The number of particle-rigid body collision filtering pairs. */ virtual void setRigidFilters(PxParticleRigidFilterPair* filters, PxU32 nbFilters) = 0; /** \brief Set the particle-rigid body attachments for particles in this particle buffer. See #PxParticleRigidAttachment \param[in] attachments A device buffer containing #PxParticleRigidAttachment s. \param[in] nbAttachments The number of particle-rigid body attachments. */ virtual void setRigidAttachments(PxParticleRigidAttachment* attachments, PxU32 nbAttachments) = 0; /** \brief Get the start index for the first particle of this particle buffer in the complete list of particles of the particle system this buffer is used in. The return value is only correct if the particle buffer is assigned to a particle system and at least one call to simulate() has been performed. \return The index of the first particle in the complete particle list. */ virtual PxU32 getFlatListStartIndex() const = 0; /** \brief Raise dirty flags on this particle buffer to communicate that the corresponding data has been updated by the user. \param[in] flags The flag corresponding to the data that is dirty. See #PxParticleBufferFlag. */ virtual void raiseFlags(PxParticleBufferFlag::Enum flags) = 0; /** \brief Release this buffer and deallocate all the memory. */ virtual void release() = 0; /** \brief Cleanup helper used in case a particle system is released before the particle buffers have been removed. */ virtual void onParticleSystemDestroy() = 0; /** \brief Reserved for internal use. */ virtual void setInternalData(void* data) = 0; /** \brief Index of this buffer in the particle system it is assigned to. */ PxU32 bufferIndex; /** \brief Unique index that does not change over the lifetime of a PxParticleBuffer. */ const PxU32 bufferUniqueId; protected: virtual ~PxParticleBuffer() { } PX_INLINE PxParticleBuffer(PxU32 uniqueId) : PxBase(PxConcreteType::ePARTICLE_BUFFER, PxBaseFlag::eOWNS_MEMORY | PxBaseFlag::eIS_RELEASABLE), bufferIndex(0xffffffff), bufferUniqueId(uniqueId){} PX_INLINE PxParticleBuffer(PxU32 uniqueId, PxType type) : PxBase(type, PxBaseFlag::eOWNS_MEMORY | PxBaseFlag::eIS_RELEASABLE), bufferIndex(0xffffffff), bufferUniqueId(uniqueId){} private: PX_NOCOPY(PxParticleBuffer) }; /** \brief Parameters to configure the behavior of diffuse particles */ class PxDiffuseParticleParams { public: /** \brief Construct parameters with default values. */ PX_INLINE PxDiffuseParticleParams() { threshold = 100.0f; lifetime = 5.0f; airDrag = 0.0f; bubbleDrag = 0.5f; buoyancy = 0.8f; kineticEnergyWeight = 0.01f; pressureWeight = 1.0f; divergenceWeight = 5.0f; collisionDecay = 0.5f; useAccurateVelocity = false; } /** \brief (re)sets the structure to the default. */ PX_INLINE void setToDefault() { *this = PxDiffuseParticleParams(); } PxReal threshold; //!< Particles with potential value greater than the threshold will spawn diffuse particles PxReal lifetime; //!< Diffuse particle will be removed after the specified lifetime PxReal airDrag; //!< Air drag force factor for spray particles PxReal bubbleDrag; //!< Fluid drag force factor for bubble particles PxReal buoyancy; //!< Buoyancy force factor for bubble particles PxReal kineticEnergyWeight; //!< Contribution from kinetic energy when deciding diffuse particle creation. PxReal pressureWeight; //!< Contribution from pressure when deciding diffuse particle creation. PxReal divergenceWeight; //!< Contribution from divergence when deciding diffuse particle creation. PxReal collisionDecay; //!< Decay factor of diffuse particles' lifetime after they collide with shapes. bool useAccurateVelocity; //!< If true, enables accurate velocity estimation when using PBD solver. }; /** \brief A particle buffer used to simulate diffuse particles. See #PxPhysics::createParticleAndDiffuseBuffer. */ class PxParticleAndDiffuseBuffer : public PxParticleBuffer { public: /** \brief Get a device buffer of positions and remaining lifetimes for the diffuse particles. \return A device buffer containing positions and lifetimes of diffuse particles packed as PxVec4(pos.x, pos.y, pos.z, lifetime). */ virtual PxVec4* getDiffusePositionLifeTime() const = 0; /** \brief Get number of currently active diffuse particles. \return The number of currently active diffuse particles. */ virtual PxU32 getNbActiveDiffuseParticles() const = 0; /** \brief Set the maximum possible number of diffuse particles for this buffer. \param[in] maxActiveDiffuseParticles the maximum number of active diffuse particles. \note Must be in the range [0, PxParticleAndDiffuseBuffer::getMaxDiffuseParticles()] */ virtual void setMaxActiveDiffuseParticles(PxU32 maxActiveDiffuseParticles) = 0; /** \brief Get maximum possible number of diffuse particles. \return The maximum possible number diffuse particles. */ virtual PxU32 getMaxDiffuseParticles() const = 0; /** \brief Set the parameters for diffuse particle simulation. \param[in] params The diffuse particle parameters. See #PxDiffuseParticleParams */ virtual void setDiffuseParticleParams(const PxDiffuseParticleParams& params) = 0; /** \brief Get the parameters currently used for diffuse particle simulation. \return A PxDiffuseParticleParams structure. */ virtual PxDiffuseParticleParams getDiffuseParticleParams() const = 0; protected: virtual ~PxParticleAndDiffuseBuffer() {} PX_INLINE PxParticleAndDiffuseBuffer(PxU32 uniqueId) : PxParticleBuffer(uniqueId, PxConcreteType::ePARTICLE_DIFFUSE_BUFFER){} private: PX_NOCOPY(PxParticleAndDiffuseBuffer) }; /** \brief Holds all the information for a spring constraint between two particles. Used for particle cloth simulation. */ struct PX_ALIGN_PREFIX(8) PxParticleSpring { PxU32 ind0; //!< particle index of first particle PxU32 ind1; //!< particle index of second particle PxReal length; //!< spring length PxReal stiffness; //!< spring stiffness PxReal damping; //!< spring damping factor PxReal pad; //!< padding bytes. } PX_ALIGN_SUFFIX(8); /** \brief Particle cloth structure. Holds information about a single piece of cloth that is part of a #PxParticleClothBuffer. */ struct PxParticleCloth { PxU32 startVertexIndex; //!< Index of the first particle of this cloth in the position/velocity buffers of the parent #PxParticleClothBuffer PxU32 numVertices; //!< The number of particles of this piece of cloth PxReal clothBlendScale; //!< Used internally. PxReal restVolume; //!< The rest volume of this piece of cloth, used for inflatable simulation. PxReal pressure; //!< The factor of the rest volume to specify the target volume for this piece of cloth, used for inflatable simulation. PxU32 startTriangleIndex; //!< The index of the first triangle of this piece of cloth in the triangle list. PxU32 numTriangles; //!< The number of triangles of this piece of cloth. bool operator <= (const PxParticleCloth& other) const { return startVertexIndex <= other.startVertexIndex; } bool operator >= (const PxParticleCloth& other) const { return startVertexIndex >= other.startVertexIndex; } bool operator < (const PxParticleCloth& other) const { return startVertexIndex < other.startVertexIndex; } bool operator > (const PxParticleCloth& other) const { return startVertexIndex > other.startVertexIndex; } bool operator == (const PxParticleCloth& other) const { return startVertexIndex == other.startVertexIndex; } }; /** \brief Structure to describe the set of particle cloths in the same #PxParticleClothBuffer. Used an input for the cloth preprocessing. */ struct PxParticleClothDesc { PxParticleClothDesc() : cloths(NULL), triangles(NULL), springs(NULL), restPositions(NULL), nbCloths(0), nbSprings(0), nbTriangles(0), nbParticles(0) { } PxParticleCloth* cloths; //!< List of PxParticleCloth s, describes the individual cloths. PxU32* triangles; //!< List of triangle indices, 3 consecutive PxU32 that map triangle vertices to particles PxParticleSpring* springs; //!< List of PxParticleSpring s. PxVec4* restPositions; //!< List of rest positions for all particles PxU32 nbCloths; //!< The number of cloths in described using this cloth descriptor PxU32 nbSprings; //!< The number of springs in this cloth descriptor PxU32 nbTriangles; //!< The number of triangles in this cloth descriptor PxU32 nbParticles; //!< The number of particles in this cloth descriptor }; /** \brief Structure to describe the output of the particle cloth preprocessing. Used as an input to specify cloth data for a #PxParticleClothBuffer. All the pointers point to pinned host memory. See #PxParticleClothPreProcessor */ struct PX_PHYSX_CORE_API PxPartitionedParticleCloth { PxU32* accumulatedSpringsPerPartitions; //!< The number of springs in each partition. Size: numPartitions. PxU32* accumulatedCopiesPerParticles; //!< Start index for each particle in the accumulation buffer. Size: numParticles. PxU32* remapOutput; //!< Index of the next copy of this particle in the next partition, or in the accumulation buffer. Size: numSprings * 2. PxParticleSpring* orderedSprings; //!< Springs ordered by partition. Size: numSprings. PxU32* sortedClothStartIndices; //!< The first particle index into the position buffer of the #PxParticleClothBuffer for each cloth. Cloths are sorted by start particle index. Size: numCloths. PxParticleCloth* cloths; //!< The #PxParticleCloth s sorted by start particle index. PxU32 remapOutputSize; //!< Size of remapOutput. PxU32 nbPartitions; //!< The number of partitions. PxU32 nbSprings; //!< The number of springs. PxU32 nbCloths; //!< The number of cloths. PxU32 maxSpringsPerPartition; //!< The maximum number of springs in a partition. PxCudaContextManager* mCudaManager; //!< A cuda context manager. PxPartitionedParticleCloth(); ~PxPartitionedParticleCloth(); /** \brief allocate all the buffers for this #PxPartitionedParticleCloth. \param[in] nbParticles the number of particles this #PxPartitionedParticleCloth will be generated for. \param[in] cudaManager a cuda context manager. */ void allocateBuffers(PxU32 nbParticles, PxCudaContextManager* cudaManager); }; /** \brief A particle buffer used to simulate particle cloth. See #PxPhysics::createParticleClothBuffer. */ class PxParticleClothBuffer : public PxParticleBuffer { public: /** \brief Get rest positions for this particle buffer. \return A pointer to a device buffer containing the rest positions packed as PxVec4(pos.x, pos.y, pos.z, 0.0f). */ virtual PxVec4* getRestPositions() = 0; /** \brief Get the triangle indices for this particle buffer. \return A pointer to a device buffer containing the triangle indices for this cloth buffer. */ virtual PxU32* getTriangles() const = 0; /** \brief Set the number of triangles for this particle buffer. \param[in] nbTriangles The number of triangles for this particle cloth buffer. */ virtual void setNbTriangles(PxU32 nbTriangles) = 0; /** \brief Get the number of triangles for this particle buffer. \return The number triangles for this cloth buffer. */ virtual PxU32 getNbTriangles() const = 0; /** \brief Get the number of springs in this particle buffer. \return The number of springs in this cloth buffer. */ virtual PxU32 getNbSprings() const = 0; /** \brief Get the springs for this particle buffer. \return A pointer to a device buffer containing the springs for this cloth buffer. */ virtual PxParticleSpring* getSprings() = 0; /** \brief Set cloths for this particle buffer. \param[in] cloths A pointer to a PxPartitionedParticleCloth. See #PxPartitionedParticleCloth, #PxParticleClothPreProcessor */ virtual void setCloths(PxPartitionedParticleCloth& cloths) = 0; protected: virtual ~PxParticleClothBuffer() {} PX_INLINE PxParticleClothBuffer(PxU32 uniqueId) : PxParticleBuffer(uniqueId, PxConcreteType::ePARTICLE_CLOTH_BUFFER) {} private: PX_NOCOPY(PxParticleClothBuffer) }; /** \brief A particle buffer used to simulate rigid bodies using shape matching with particles. See #PxPhysics::createParticleRigidBuffer. */ class PxParticleRigidBuffer : public PxParticleBuffer { public: /** \brief Get the particle indices of the first particle for each shape matched rigid body. \return A device buffer containing the list of particle start indices of each shape matched rigid body. */ virtual PxU32* getRigidOffsets() const = 0; /** \brief Get the stiffness coefficients for all shape matched rigid bodies in this buffer. Stiffness must be in the range [0, 1]. \return A device buffer containing the list of stiffness coefficients for each rigid body. */ virtual PxReal* getRigidCoefficients() const = 0; /** \brief Get the local position of each particle relative to the rigid body's center of mass. \return A pointer to a device buffer containing the local position for each particle. */ virtual PxVec4* getRigidLocalPositions() const = 0; /** \brief Get the world-space translations for all rigid bodies in this buffer. \return A pointer to a device buffer containing the world-space translations for all shape-matched rigid bodies in this buffer. */ virtual PxVec4* getRigidTranslations() const = 0; /** \brief Get the world-space rotation of every shape-matched rigid body in this buffer. Rotations are specified as quaternions. \return A pointer to a device buffer containing the world-space rotation for every shape-matched rigid body in this buffer. */ virtual PxVec4* getRigidRotations() const = 0; /** \brief Get the local space normals for each particle relative to the shape of the corresponding rigid body. The 4th component of every PxVec4 should be the negative signed distance of the particle inside its shape. \return A pointer to a device buffer containing the local-space normals for each particle. */ virtual PxVec4* getRigidLocalNormals() const = 0; /** \brief Set the number of shape matched rigid bodies in this buffer. \param[in] nbRigids The number of shape matched rigid bodies */ virtual void setNbRigids(PxU32 nbRigids) = 0; /** \brief Get the number of shape matched rigid bodies in this buffer. \return The number of shape matched rigid bodies in this buffer. */ virtual PxU32 getNbRigids() const = 0; protected: virtual ~PxParticleRigidBuffer() {} PX_INLINE PxParticleRigidBuffer(PxU32 uniqueId) : PxParticleBuffer(uniqueId, PxConcreteType::ePARTICLE_RIGID_BUFFER) {} private: PX_NOCOPY(PxParticleRigidBuffer) }; /** @brief Preprocessor to prepare particle cloths for simulation. Preprocessing is done by calling #PxParticleClothPreProcessor::partitionSprings() on an instance of this class. This will allocate the memory in the output object, partition the springs and fill all the members of the ouput object. The output can then be passed without any further modifications to #PxParticleClothBuffer::setCloths(). See #PxCreateParticleClothPreprocessor, #PxParticleClothDesc, #PxPartitionedParticleCloth */ class PxParticleClothPreProcessor { public: /** \brief Release this object and deallocate all the memory. */ virtual void release() = 0; /** \brief Partition the spring constraints for particle cloth simulation. \param[in] clothDesc Reference to a valid #PxParticleClothDesc. \param[in] output Reference to a #PxPartitionedParticleCloth object. This is the output of the preprocessing and should be passed to a #PxParticleClothBuffer. */ virtual void partitionSprings(const PxParticleClothDesc& clothDesc, PxPartitionedParticleCloth& output) = 0; protected: virtual ~PxParticleClothPreProcessor(){} }; #if PX_VC #pragma warning(pop) #endif #if !PX_DOXYGEN } // namespace physx #endif /** \brief Create a particle cloth preprocessor. \param[in] cudaContextManager A cuda context manager. See #PxParticleClothDesc, #PxPartitionedParticleCloth. */ PX_C_EXPORT PX_PHYSX_CORE_API physx::PxParticleClothPreProcessor* PX_CALL_CONV PxCreateParticleClothPreProcessor(physx::PxCudaContextManager* cudaContextManager); /** @} */ #endif
22,588
C
36.091954
200
0.760005
NVIDIA-Omniverse/PhysX/physx/include/PxActorData.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_ACTOR_DATA_H #define PX_ACTOR_DATA_H /** \addtogroup physics @{ */ #include "foundation/PxVec4.h" #include "foundation/PxQuat.h" #include "foundation/PxFlags.h" #include "PxNodeIndex.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief Identifies each type of information for retrieving from actor. @see PxScene::applyActorData */ struct PxActorCacheFlag { enum Enum { eACTOR_DATA = (1 << 0), //include transform and velocity eFORCE = (1 << 2), eTORQUE = (1 << 3) }; }; /** \brief Collection of set bits defined in PxActorCacheFlag. @see PxActorCacheFlag */ typedef PxFlags<PxActorCacheFlag::Enum, PxU16> PxActorCacheFlags; PX_FLAGS_OPERATORS(PxActorCacheFlag::Enum, PxU16) /** \brief State of a body used when interfacing with the GPU rigid body pipeline @see PxScene.copyBodyData() */ PX_ALIGN_PREFIX(16) struct PxGpuBodyData { PxQuat quat; /*!< actor global pose quaternion in world frame */ PxVec4 pos; /*!< (x,y,z members): actor global pose position in world frame */ PxVec4 linVel; /*!< (x,y,z members): linear velocity at center of gravity in world frame */ PxVec4 angVel; /*!< (x,y,z members): angular velocity in world frame */ } PX_ALIGN_SUFFIX(16); /** \brief Pair correspondence used for matching array indices with body node indices */ PX_ALIGN_PREFIX(8) struct PxGpuActorPair { PxU32 srcIndex; //Defines which index in src array we read PxNodeIndex nodeIndex; //Defines which actor this entry in src array is updating } PX_ALIGN_SUFFIX(8); /** \brief Maps numeric index to a data pointer. @see PxScene::computeDenseJacobians(), PxScene::computeGeneralizedMassMatrices(), PxScene::computeGeneralizedGravityForces(), PxScene::computeCoriolisAndCentrifugalForces() */ struct PxIndexDataPair { PxU32 index; void* data; }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
3,608
C
31.223214
173
0.732816
NVIDIA-Omniverse/PhysX/physx/include/PxShape.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_SHAPE_H #define PX_SHAPE_H /** \addtogroup physics @{ */ #include "PxPhysXConfig.h" #include "common/PxBase.h" #include "geometry/PxGeometry.h" #include "geometry/PxGeometryHelpers.h" #if !PX_DOXYGEN namespace physx { #endif class PxBoxGeometry; class PxSphereGeometry; class PxCapsuleGeometry; class PxPlaneGeometry; class PxConvexMeshGeometry; class PxTriangleMeshGeometry; class PxTetrahedronMeshGeometry; class PxHeightFieldGeometry; class PxParticleSystemGeometry; class PxHairSystemGeometry; class PxRigidActor; struct PxFilterData; class PxBaseMaterial; class PxMaterial; class PxFEMSoftBodyMaterial; class PxFEMClothMaterial; /** \brief Flags which affect the behavior of PxShapes. @see PxShape PxShape.setFlag() */ struct PxShapeFlag { enum Enum { /** \brief The shape will partake in collision in the physical simulation. \note It is illegal to raise the eSIMULATION_SHAPE and eTRIGGER_SHAPE flags. In the event that one of these flags is already raised the sdk will reject any attempt to raise the other. To raise the eSIMULATION_SHAPE first ensure that eTRIGGER_SHAPE is already lowered. \note This flag has no effect if simulation is disabled for the corresponding actor (see #PxActorFlag::eDISABLE_SIMULATION). @see PxSimulationEventCallback.onContact() PxScene.setSimulationEventCallback() PxShape.setFlag(), PxShape.setFlags() */ eSIMULATION_SHAPE = (1<<0), /** \brief The shape will partake in scene queries (ray casts, overlap tests, sweeps, ...). */ eSCENE_QUERY_SHAPE = (1<<1), /** \brief The shape is a trigger which can send reports whenever other shapes enter/leave its volume. \note Triangle meshes and heightfields can not be triggers. Shape creation will fail in these cases. \note Shapes marked as triggers do not collide with other objects. If an object should act both as a trigger shape and a collision shape then create a rigid body with two shapes, one being a trigger shape and the other a collision shape. It is illegal to raise the eTRIGGER_SHAPE and eSIMULATION_SHAPE flags on a single PxShape instance. In the event that one of these flags is already raised the sdk will reject any attempt to raise the other. To raise the eTRIGGER_SHAPE flag first ensure that eSIMULATION_SHAPE flag is already lowered. \note Trigger shapes will no longer send notification events for interactions with other trigger shapes. \note Shapes marked as triggers are allowed to participate in scene queries, provided the eSCENE_QUERY_SHAPE flag is set. \note This flag has no effect if simulation is disabled for the corresponding actor (see #PxActorFlag::eDISABLE_SIMULATION). @see PxSimulationEventCallback.onTrigger() PxScene.setSimulationEventCallback() PxShape.setFlag(), PxShape.setFlags() */ eTRIGGER_SHAPE = (1<<2), /** \brief Enable debug renderer for this shape @see PxScene.getRenderBuffer() PxRenderBuffer PxVisualizationParameter */ eVISUALIZATION = (1<<3) }; }; /** \brief collection of set bits defined in PxShapeFlag. @see PxShapeFlag */ typedef PxFlags<PxShapeFlag::Enum,PxU8> PxShapeFlags; PX_FLAGS_OPERATORS(PxShapeFlag::Enum,PxU8) /** \brief Abstract class for collision shapes. Shapes are shared, reference counted objects. An instance can be created by calling the createShape() method of the PxRigidActor class, or the createShape() method of the PxPhysics class. <h3>Visualizations</h3> \li PxVisualizationParameter::eCOLLISION_AABBS \li PxVisualizationParameter::eCOLLISION_SHAPES \li PxVisualizationParameter::eCOLLISION_AXES @see PxPhysics.createShape() PxRigidActor.createShape() PxBoxGeometry PxSphereGeometry PxCapsuleGeometry PxPlaneGeometry PxConvexMeshGeometry PxTriangleMeshGeometry PxHeightFieldGeometry */ class PxShape : public PxRefCounted { public: /** \brief Decrements the reference count of a shape and releases it if the new reference count is zero. Note that in releases prior to PhysX 3.3 this method did not have reference counting semantics and was used to destroy a shape created with PxActor::createShape(). In PhysX 3.3 and above, this usage is deprecated, instead, use PxRigidActor::detachShape() to detach a shape from an actor. If the shape to be detached was created with PxActor::createShape(), the actor holds the only counted reference, and so when the shape is detached it will also be destroyed. @see PxRigidActor::createShape() PxPhysics::createShape() PxRigidActor::attachShape() PxRigidActor::detachShape() */ virtual void release() = 0; /** \brief Adjust the geometry of the shape. \note The type of the passed in geometry must match the geometry type of the shape. \note It is not allowed to change the geometry type of a shape. \note This function does not guarantee correct/continuous behavior when objects are resting on top of old or new geometry. \param[in] geometry New geometry of the shape. @see PxGeometry PxGeometryType getGeometryType() */ virtual void setGeometry(const PxGeometry& geometry) = 0; /** \brief Retrieve a reference to the shape's geometry. \warning The returned reference has the same lifetime as the PxShape it comes from. \return Reference to internal PxGeometry object. @see PxGeometry PxGeometryType getGeometryType() setGeometry() */ virtual const PxGeometry& getGeometry() const = 0; /** \brief Retrieves the actor which this shape is associated with. \return The actor this shape is associated with, if it is an exclusive shape, else NULL @see PxRigidStatic, PxRigidDynamic, PxArticulationLink */ virtual PxRigidActor* getActor() const = 0; /************************************************************************************************/ /** @name Pose Manipulation */ //@{ /** \brief Sets the pose of the shape in actor space, i.e. relative to the actors to which they are attached. This transformation is identity by default. The local pose is an attribute of the shape, and so will apply to all actors to which the shape is attached. <b>Sleeping:</b> Does <b>NOT</b> wake the associated actor up automatically. <i>Note:</i> Does not automatically update the inertia properties of the owning actor (if applicable); use the PhysX extensions method #PxRigidBodyExt::updateMassAndInertia() to do this. <b>Default:</b> the identity transform \param[in] pose The new transform from the actor frame to the shape frame. <b>Range:</b> rigid body transform @see getLocalPose() */ virtual void setLocalPose(const PxTransform& pose) = 0; /** \brief Retrieves the pose of the shape in actor space, i.e. relative to the actor they are owned by. This transformation is identity by default. \return Pose of shape relative to the actor's frame. @see setLocalPose() */ virtual PxTransform getLocalPose() const = 0; //@} /************************************************************************************************/ /** @name Collision Filtering */ //@{ /** \brief Sets the user definable collision filter data. <b>Sleeping:</b> Does wake up the actor if the filter data change causes a formerly suppressed collision pair to be enabled. <b>Default:</b> (0,0,0,0) @see getSimulationFilterData() */ virtual void setSimulationFilterData(const PxFilterData& data) = 0; /** \brief Retrieves the shape's collision filter data. @see setSimulationFilterData() */ virtual PxFilterData getSimulationFilterData() const = 0; /** \brief Sets the user definable query filter data. <b>Default:</b> (0,0,0,0) @see getQueryFilterData() */ virtual void setQueryFilterData(const PxFilterData& data) = 0; /** \brief Retrieves the shape's Query filter data. @see setQueryFilterData() */ virtual PxFilterData getQueryFilterData() const = 0; //@} /************************************************************************************************/ /** \brief Assigns material(s) to the shape. Will remove existing materials from the shape. <b>Sleeping:</b> Does <b>NOT</b> wake the associated actor up automatically. \param[in] materials List of material pointers to assign to the shape. See #PxMaterial \param[in] materialCount The number of materials provided. @see PxPhysics.createMaterial() getMaterials() */ virtual void setMaterials(PxMaterial*const* materials, PxU16 materialCount) = 0; /** \brief Assigns FEM soft body material(s) to the shape. Will remove existing materials from the shape. <b>Sleeping:</b> Does <b>NOT</b> wake the associated actor up automatically. \param[in] materials List of material pointers to assign to the shape. See #PxFEMSoftBodyMaterial \param[in] materialCount The number of materials provided. @see PxPhysics.createFEMSoftBodyMaterial() getSoftBodyMaterials() */ virtual void setSoftBodyMaterials(PxFEMSoftBodyMaterial*const* materials, PxU16 materialCount) = 0; /** \brief Assigns FEM cloth material(s) to the shape. Will remove existing materials from the shape. \warning Feature under development, only for internal usage. <b>Sleeping:</b> Does <b>NOT</b> wake the associated actor up automatically. \param[in] materials List of material pointers to assign to the shape. See #PxFEMClothMaterial \param[in] materialCount The number of materials provided. @see PxPhysics.createFEMClothMaterial() getClothMaterials() */ virtual void setClothMaterials(PxFEMClothMaterial*const* materials, PxU16 materialCount) = 0; /** \brief Returns the number of materials assigned to the shape. You can use #getMaterials() to retrieve the material pointers. \return Number of materials associated with this shape. @see PxMaterial getMaterials() */ virtual PxU16 getNbMaterials() const = 0; /** \brief Retrieve all the material pointers associated with the shape. You can retrieve the number of material pointers by calling #getNbMaterials() Note: The returned data may contain invalid pointers if you release materials using #PxMaterial::release(). \param[out] userBuffer The buffer to store the material pointers. \param[in] bufferSize Size of provided user buffer. \param[in] startIndex Index of first material pointer to be retrieved \return Number of material pointers written to the buffer. @see PxMaterial getNbMaterials() PxMaterial::release() */ virtual PxU32 getMaterials(PxMaterial** userBuffer, PxU32 bufferSize, PxU32 startIndex=0) const = 0; /** \brief Retrieve all the FEM soft body material pointers associated with the shape. You can retrieve the number of material pointers by calling #getNbMaterials() Note: The returned data may contain invalid pointers if you release materials using #PxMaterial::release(). \param[out] userBuffer The buffer to store the material pointers. \param[in] bufferSize Size of provided user buffer. \param[in] startIndex Index of first material pointer to be retrieved \return Number of material pointers written to the buffer. @see PxFEMSoftBodyMaterial getNbMaterials() PxMaterial::release() */ virtual PxU32 getSoftBodyMaterials(PxFEMSoftBodyMaterial** userBuffer, PxU32 bufferSize, PxU32 startIndex = 0) const = 0; /** \brief Retrieve all the FEM cloth material pointers associated with the shape. \warning Feature under development, only for internal usage. You can retrieve the number of material pointers by calling #getNbMaterials() Note: The returned data may contain invalid pointers if you release materials using #PxMaterial::release(). \param[out] userBuffer The buffer to store the material pointers. \param[in] bufferSize Size of provided user buffer. \param[in] startIndex Index of first material pointer to be retrieved \return Number of material pointers written to the buffer. @see PxFEMClothMaterial getNbMaterials() PxMaterial::release() */ virtual PxU32 getClothMaterials(PxFEMClothMaterial** userBuffer, PxU32 bufferSize, PxU32 startIndex = 0) const = 0; /** \brief Retrieve material from given triangle index. The input index is the internal triangle index as used inside the SDK. This is the index returned to users by various SDK functions such as raycasts. This function is only useful for triangle meshes or heightfields, which have per-triangle materials. For other shapes or SDF triangle meshes, the function returns the single material associated with the shape, regardless of the index. \param[in] faceIndex The internal triangle index whose material you want to retrieve. \return Material from input triangle \note If faceIndex value of 0xFFFFffff is passed as an input for mesh and heightfield shapes, this function will issue a warning and return NULL. \note Scene queries set the value of PxQueryHit::faceIndex to 0xFFFFffff whenever it is undefined or does not apply. @see PxMaterial getNbMaterials() PxMaterial::release() */ virtual PxBaseMaterial* getMaterialFromInternalFaceIndex(PxU32 faceIndex) const = 0; /** \brief Sets the contact offset. Shapes whose distance is less than the sum of their contactOffset values will generate contacts. The contact offset must be positive and greater than the rest offset. Having a contactOffset greater than than the restOffset allows the collision detection system to predictively enforce the contact constraint even when the objects are slightly separated. This prevents jitter that would occur if the constraint were enforced only when shapes were within the rest distance. <b>Default:</b> 0.02f * PxTolerancesScale::length <b>Sleeping:</b> Does <b>NOT</b> wake the associated actor up automatically. \param[in] contactOffset <b>Range:</b> [maximum(0,restOffset), PX_MAX_F32) @see getContactOffset PxTolerancesScale setRestOffset */ virtual void setContactOffset(PxReal contactOffset) = 0; /** \brief Retrieves the contact offset. \return The contact offset of the shape. @see setContactOffset() */ virtual PxReal getContactOffset() const = 0; /** \brief Sets the rest offset. Two shapes will come to rest at a distance equal to the sum of their restOffset values. If the restOffset is 0, they should converge to touching exactly. Having a restOffset greater than zero is useful to have objects slide smoothly, so that they do not get hung up on irregularities of each others' surfaces. <b>Default:</b> 0.0f <b>Sleeping:</b> Does <b>NOT</b> wake the associated actor up automatically. \param[in] restOffset <b>Range:</b> (-PX_MAX_F32, contactOffset) @see getRestOffset setContactOffset */ virtual void setRestOffset(PxReal restOffset) = 0; /** \brief Retrieves the rest offset. \return The rest offset of the shape. @see setRestOffset() */ virtual PxReal getRestOffset() const = 0; /** \brief Sets the density used to interact with fluids. To be physically accurate, the density of a rigid body should be computed as its mass divided by its volume. To simplify tuning the interaction of fluid and rigid bodies, the density for fluid can differ from the real density. This allows to create floating bodies, even if they are supposed to sink with their mass and volume. <b>Default:</b> 800.0f \param[in] densityForFluid <b>Range:</b> (0, PX_MAX_F32) @see getDensityForFluid */ virtual void setDensityForFluid(PxReal densityForFluid) = 0; /** \brief Retrieves the density used to interact with fluids. \return The density of the body when interacting with fluid. @see setDensityForFluid() */ virtual PxReal getDensityForFluid() const = 0; /** \brief Sets torsional patch radius. This defines the radius of the contact patch used to apply torsional friction. If the radius is 0 (and minTorsionalPatchRadius is 0 too, see #setMinTorsionalPatchRadius), no torsional friction will be applied. If the radius is > 0, some torsional friction will be applied. This is proportional to the penetration depth so, if the shapes are separated or penetration is zero, no torsional friction will be applied. It is used to approximate rotational friction introduced by the compression of contacting surfaces. \note Will only be active, if the friction patch has a single anchor point only. This is for example the case, if a contact patch has a single contact point. \note Only supported in combination with solver type PxSolverType::eTGS. <b>Default:</b> 0.0 \param[in] radius <b>Range:</b> [0, PX_MAX_F32) */ virtual void setTorsionalPatchRadius(PxReal radius) = 0; /** \brief Gets torsional patch radius. See #setTorsionalPatchRadius for more info. \return The torsional patch radius of the shape. */ virtual PxReal getTorsionalPatchRadius() const = 0; /** \brief Sets minimum torsional patch radius. This defines the minimum radius of the contact patch used to apply torsional friction. If the radius is 0, the amount of torsional friction that will be applied will be entirely dependent on the value of torsionalPatchRadius. If the radius is > 0, some torsional friction will be applied regardless of the value of torsionalPatchRadius or the amount of penetration. \note Will only be active in certain cases, see #setTorsionalPatchRadius for details. <b>Default:</b> 0.0 \param[in] radius <b>Range:</b> [0, PX_MAX_F32) */ virtual void setMinTorsionalPatchRadius(PxReal radius) = 0; /** \brief Gets minimum torsional patch radius. See #setMinTorsionalPatchRadius for more info. \return The minimum torsional patch radius of the shape. */ virtual PxReal getMinTorsionalPatchRadius() const = 0; /** \brief Gets internal shape id The internal shape id can be used to reference a specific shape when processing data on the gpu. \return The shape id @see PxScene evaluateSDFDistances() */ virtual PxU32 getInternalShapeIndex() const = 0; /************************************************************************************************/ /** \brief Sets shape flags <b>Sleeping:</b> Does <b>NOT</b> wake the associated actor up automatically. \param[in] flag The shape flag to enable/disable. See #PxShapeFlag. \param[in] value True to set the flag. False to clear the flag specified in flag. <b>Default:</b> PxShapeFlag::eVISUALIZATION | PxShapeFlag::eSIMULATION_SHAPE | PxShapeFlag::eSCENE_QUERY_SHAPE @see PxShapeFlag getFlags() */ virtual void setFlag(PxShapeFlag::Enum flag, bool value) = 0; /** \brief Sets shape flags @see PxShapeFlag getFlags() */ virtual void setFlags(PxShapeFlags inFlags) = 0; /** \brief Retrieves shape flags. \return The values of the shape flags. @see PxShapeFlag setFlag() */ virtual PxShapeFlags getFlags() const = 0; /** \brief Returns true if the shape is exclusive to an actor. @see PxPhysics::createShape() */ virtual bool isExclusive() const = 0; /** \brief Sets a name string for the object that can be retrieved with #getName(). This is for debugging and is not used by the SDK. The string is not copied by the SDK, only the pointer is stored. <b>Default:</b> NULL \param[in] name The name string to set the objects name to. @see getName() */ virtual void setName(const char* name) = 0; /** \brief retrieves the name string set with setName(). \return The name associated with the shape. @see setName() */ virtual const char* getName() const = 0; virtual const char* getConcreteTypeName() const { return "PxShape"; } /************************************************************************************************/ void* userData; //!< user can assign this to whatever, usually to create a 1:1 relationship with a user object. protected: PX_INLINE PxShape(PxBaseFlags baseFlags) : PxRefCounted(baseFlags) {} PX_INLINE PxShape(PxType concreteType, PxBaseFlags baseFlags) : PxRefCounted(concreteType, baseFlags), userData(NULL) {} virtual ~PxShape() {} virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxShape", PxRefCounted); } }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
21,630
C
34.229642
146
0.741008
NVIDIA-Omniverse/PhysX/physx/include/PxQueryReport.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_QUERY_REPORT_H #define PX_QUERY_REPORT_H /** \addtogroup scenequery @{ */ #include "foundation/PxVec3.h" #include "foundation/PxFlags.h" #include "foundation/PxAssert.h" #include "geometry/PxGeometryHit.h" #include "geometry/PxGeometryQueryContext.h" #include "PxPhysXConfig.h" #if !PX_DOXYGEN namespace physx { #endif class PxShape; class PxRigidActor; /** \brief Combines a shape pointer and the actor the shape belongs to into one memory location. Serves as a base class for PxQueryHit. @see PxQueryHit */ struct PxActorShape { PX_INLINE PxActorShape() : actor(NULL), shape(NULL) {} PX_INLINE PxActorShape(PxRigidActor* a, PxShape* s) : actor(a), shape(s) {} PxRigidActor* actor; PxShape* shape; }; // Extends geom hits with Px object pointers struct PxRaycastHit : PxGeomRaycastHit, PxActorShape {}; struct PxOverlapHit : PxGeomOverlapHit, PxActorShape {}; struct PxSweepHit : PxGeomSweepHit, PxActorShape {}; /** \brief Describes query behavior after returning a partial query result via a callback. If callback returns true, traversal will continue and callback can be issued again. If callback returns false, traversal will stop, callback will not be issued again. @see PxHitCallback */ typedef bool PxAgain; /** \brief This callback class facilitates reporting scene query hits (intersections) to the user. User overrides the virtual processTouches function to receive hits in (possibly multiple) fixed size blocks. \note PxHitBuffer derives from this class and is used to receive touching hits in a fixed size buffer. \note Since the compiler doesn't look in template dependent base classes when looking for non-dependent names \note with some compilers it will be necessary to use "this->hasBlock" notation to access a parent variable \note in a child callback class. \note Pre-made typedef shorthands, such as ::PxRaycastCallback can be used for raycast, overlap and sweep queries. @see PxHitBuffer PxRaycastHit PxSweepHit PxOverlapHit PxRaycastCallback PxOverlapCallback PxSweepCallback */ template<typename HitType> struct PxHitCallback : PxQueryThreadContext { HitType block; //!< Holds the closest blocking hit result for the query. Invalid if hasBlock is false. bool hasBlock; //!< Set to true if there was a blocking hit during query. HitType* touches; //!< User specified buffer for touching hits. /** \brief Size of the user specified touching hits buffer. \note If set to 0 all hits will default to PxQueryHitType::eBLOCK, otherwise to PxQueryHitType::eTOUCH \note Hit type returned from pre-filter overrides this default */ PxU32 maxNbTouches; /** \brief Number of touching hits returned by the query. Used with PxHitBuffer. \note If true (PxAgain) is returned from the callback, nbTouches will be reset to 0. */ PxU32 nbTouches; /** \brief Initializes the class with user provided buffer. \param[in] aTouches Optional buffer for recording PxQueryHitType::eTOUCH type hits. \param[in] aMaxNbTouches Size of touch buffer. \note if aTouches is NULL and aMaxNbTouches is 0, only the closest blocking hit will be recorded by the query. \note If PxQueryFlag::eANY_HIT flag is used as a query parameter, hasBlock will be set to true and blockingHit will be used to receive the result. \note Both eTOUCH and eBLOCK hits will be registered as hasBlock=true and stored in PxHitCallback.block when eANY_HIT flag is used. @see PxHitCallback.hasBlock PxHitCallback.block */ PxHitCallback(HitType* aTouches, PxU32 aMaxNbTouches) : hasBlock(false), touches(aTouches), maxNbTouches(aMaxNbTouches), nbTouches(0) {} /** \brief virtual callback function used to communicate query results to the user. This callback will always be invoked with #touches as a buffer if #touches was specified as non-NULL. All reported touch hits are guaranteed to be closer than the closest blocking hit. \param[in] buffer Callback will report touch hits to the user in this buffer. This pointer will be the same as #touches. \param[in] nbHits Number of touch hits reported in buffer. This number will not exceed #maxNbTouches. \note There is a significant performance penalty in case multiple touch callbacks are issued (up to 2x) \note to avoid the penalty use a bigger buffer so that all touching hits can be reported in a single buffer. \note If true (again) is returned from the callback, nbTouches will be reset to 0, \note If false is returned, nbTouched will remain unchanged. \note By the time processTouches is first called, the globally closest blocking hit is already determined, \note values of hasBlock and block are final and all touch hits are guaranteed to be closer than the blocking hit. \note touches and maxNbTouches can be modified inside of processTouches callback. \return true to continue receiving callbacks in case there are more hits or false to stop. @see PxAgain PxRaycastHit PxSweepHit PxOverlapHit */ virtual PxAgain processTouches(const HitType* buffer, PxU32 nbHits) = 0; virtual void finalizeQuery() {} //!< Query finalization callback, called after the last processTouches callback. virtual ~PxHitCallback() {} /** \brief Returns true if any blocking or touching hits were encountered during a query. */ PX_FORCE_INLINE bool hasAnyHits() { return (hasBlock || (nbTouches > 0)); } }; /** \brief Returns scene query hits (intersections) to the user in a preallocated buffer. Will clip touch hits to maximum buffer capacity. When clipped, an arbitrary subset of touching hits will be discarded. Overflow does not trigger warnings or errors. block and hasBlock will be valid in finalizeQuery callback and after query completion. Touching hits are guaranteed to have closer or same distance ( <= condition) as the globally nearest blocking hit at the time any processTouches() callback is issued. \note Pre-made typedef shorthands, such as ::PxRaycastBuffer can be used for raycast, overlap and sweep queries. @see PxHitCallback @see PxRaycastBuffer PxOverlapBuffer PxSweepBuffer PxRaycastBufferN PxOverlapBufferN PxSweepBufferN */ template<typename HitType> struct PxHitBuffer : public PxHitCallback<HitType> { /** \brief Initializes the buffer with user memory. The buffer is initialized with 0 touch hits by default => query will only report a single closest blocking hit. Use PxQueryFlag::eANY_HIT to tell the query to abort and return any first hit encoutered as blocking. \param[in] aTouches Optional buffer for recording PxQueryHitType::eTOUCH type hits. \param[in] aMaxNbTouches Size of touch buffer. @see PxHitCallback */ PxHitBuffer(HitType* aTouches = NULL, PxU32 aMaxNbTouches = 0) : PxHitCallback<HitType>(aTouches, aMaxNbTouches) {} /** \brief Computes the number of any hits in this result, blocking or touching. */ PX_INLINE PxU32 getNbAnyHits() const { return getNbTouches() + PxU32(this->hasBlock); } /** \brief Convenience iterator used to access any hits in this result, blocking or touching. */ PX_INLINE const HitType& getAnyHit(const PxU32 index) const { PX_ASSERT(index < getNbTouches() + PxU32(this->hasBlock)); return index < getNbTouches() ? getTouches()[index] : this->block; } PX_INLINE PxU32 getNbTouches() const { return this->nbTouches; } PX_INLINE const HitType* getTouches() const { return this->touches; } PX_INLINE const HitType& getTouch(const PxU32 index) const { PX_ASSERT(index < getNbTouches()); return getTouches()[index]; } PX_INLINE PxU32 getMaxNbTouches() const { return this->maxNbTouches; } virtual ~PxHitBuffer() {} protected: // stops after the first callback virtual PxAgain processTouches(const HitType* buffer, PxU32 nbHits) { PX_UNUSED(buffer); PX_UNUSED(nbHits); return false; } }; /** \brief Raycast query callback. */ typedef PxHitCallback<PxRaycastHit> PxRaycastCallback; /** \brief Overlap query callback. */ typedef PxHitCallback<PxOverlapHit> PxOverlapCallback; /** \brief Sweep query callback. */ typedef PxHitCallback<PxSweepHit> PxSweepCallback; /** \brief Raycast query buffer. */ typedef PxHitBuffer<PxRaycastHit> PxRaycastBuffer; /** \brief Overlap query buffer. */ typedef PxHitBuffer<PxOverlapHit> PxOverlapBuffer; /** \brief Sweep query buffer. */ typedef PxHitBuffer<PxSweepHit> PxSweepBuffer; /** \brief Returns touching raycast hits to the user in a fixed size array embedded in the buffer class. **/ template <int N> struct PxRaycastBufferN : public PxHitBuffer<PxRaycastHit> { PxRaycastHit hits[N]; PxRaycastBufferN() : PxHitBuffer<PxRaycastHit>(hits, N) {} }; /** \brief Returns touching overlap hits to the user in a fixed size array embedded in the buffer class. **/ template <int N> struct PxOverlapBufferN : public PxHitBuffer<PxOverlapHit> { PxOverlapHit hits[N]; PxOverlapBufferN() : PxHitBuffer<PxOverlapHit>(hits, N) {} }; /** \brief Returns touching sweep hits to the user in a fixed size array embedded in the buffer class. **/ template <int N> struct PxSweepBufferN : public PxHitBuffer<PxSweepHit> { PxSweepHit hits[N]; PxSweepBufferN() : PxHitBuffer<PxSweepHit>(hits, N) {} }; /** \brief single hit cache for scene queries. If a cache object is supplied to a scene query, the cached actor/shape pair is checked for intersection first. \note Filters are not executed for the cached shape. \note If intersection is found, the hit is treated as blocking. \note Typically actor and shape from the last PxHitCallback.block query result is used as a cached actor/shape pair. \note Using past touching hits as cache will produce incorrect behavior since the cached hit will always be treated as blocking. \note Cache is only used if no touch buffer was provided, for single nearest blocking hit queries and queries using eANY_HIT flag. \note if non-zero touch buffer was provided, cache will be ignored \note It is the user's responsibility to ensure that the shape and actor are valid, so care must be taken when deleting shapes to invalidate cached references. The faceIndex field is an additional hint for a mesh or height field which is not currently used. @see PxScene.raycast */ struct PxQueryCache { /** \brief constructor sets to default */ PX_INLINE PxQueryCache() : shape(NULL), actor(NULL), faceIndex(0xffffffff) {} /** \brief constructor to set properties */ PX_INLINE PxQueryCache(PxShape* s, PxU32 findex) : shape(s), actor(NULL), faceIndex(findex) {} PxShape* shape; //!< Shape to test for intersection first PxRigidActor* actor; //!< Actor to which the shape belongs PxU32 faceIndex; //!< Triangle index to test first - NOT CURRENTLY SUPPORTED }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
12,332
C
42.122377
147
0.766137
NVIDIA-Omniverse/PhysX/physx/include/filebuf/PxFileBuf.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PSFILEBUFFER_PXFILEBUF_H #define PSFILEBUFFER_PXFILEBUF_H #include "foundation/PxSimpleTypes.h" /** \addtogroup foundation @{ */ #if !PX_DOXYGEN namespace physx { namespace general_PxIOStream2 { #endif PX_PUSH_PACK_DEFAULT /** \brief Callback class for data serialization. The user needs to supply an PxFileBuf implementation to a number of methods to allow the SDK to read or write chunks of binary data. This allows flexibility for the source/destination of the data. For example the PxFileBuf could store data in a file, memory buffer or custom file format. \note It is the users responsibility to ensure that the data is written to the appropriate offset. */ class PxFileBuf { public: enum EndianMode { ENDIAN_NONE = 0, // do no conversion for endian mode ENDIAN_BIG = 1, // always read/write data as natively big endian (Power PC, etc.) ENDIAN_LITTLE = 2 // always read/write data as natively little endian (Intel, etc.) Default Behavior! }; PxFileBuf(EndianMode mode=ENDIAN_LITTLE) { setEndianMode(mode); } virtual ~PxFileBuf(void) { } /** \brief Declares a constant to seek to the end of the stream. * * Does not support streams longer than 32 bits */ static const uint32_t STREAM_SEEK_END=0xFFFFFFFF; enum OpenMode { OPEN_FILE_NOT_FOUND, OPEN_READ_ONLY, // open file buffer stream for read only access OPEN_WRITE_ONLY, // open file buffer stream for write only access OPEN_READ_WRITE_NEW, // open a new file for both read/write access OPEN_READ_WRITE_EXISTING // open an existing file for both read/write access }; virtual OpenMode getOpenMode(void) const = 0; bool isOpen(void) const { return getOpenMode()!=OPEN_FILE_NOT_FOUND; } enum SeekType { SEEKABLE_NO = 0, SEEKABLE_READ = 0x1, SEEKABLE_WRITE = 0x2, SEEKABLE_READWRITE = 0x3 }; virtual SeekType isSeekable(void) const = 0; void setEndianMode(EndianMode e) { mEndianMode = e; if ( (e==ENDIAN_BIG && !isBigEndian() ) || (e==ENDIAN_LITTLE && isBigEndian() ) ) { mEndianSwap = true; } else { mEndianSwap = false; } } EndianMode getEndianMode(void) const { return mEndianMode; } virtual uint32_t getFileLength(void) const = 0; /** \brief Seeks the stream to a particular location for reading * * If the location passed exceeds the length of the stream, then it will seek to the end. * Returns the location it ended up at (useful if you seek to the end) to get the file position */ virtual uint32_t seekRead(uint32_t loc) = 0; /** \brief Seeks the stream to a particular location for writing * * If the location passed exceeds the length of the stream, then it will seek to the end. * Returns the location it ended up at (useful if you seek to the end) to get the file position */ virtual uint32_t seekWrite(uint32_t loc) = 0; /** \brief Reads from the stream into a buffer. \param[out] mem The buffer to read the stream into. \param[in] len The number of bytes to stream into the buffer \return Returns the actual number of bytes read. If not equal to the length requested, then reached end of stream. */ virtual uint32_t read(void *mem,uint32_t len) = 0; /** \brief Reads from the stream into a buffer but does not advance the read location. \param[out] mem The buffer to read the stream into. \param[in] len The number of bytes to stream into the buffer \return Returns the actual number of bytes read. If not equal to the length requested, then reached end of stream. */ virtual uint32_t peek(void *mem,uint32_t len) = 0; /** \brief Writes a buffer of memory to the stream \param[in] mem The address of a buffer of memory to send to the stream. \param[in] len The number of bytes to send to the stream. \return Returns the actual number of bytes sent to the stream. If not equal to the length specific, then the stream is full or unable to write for some reason. */ virtual uint32_t write(const void *mem,uint32_t len) = 0; /** \brief Reports the current stream location read aqccess. \return Returns the current stream read location. */ virtual uint32_t tellRead(void) const = 0; /** \brief Reports the current stream location for write access. \return Returns the current stream write location. */ virtual uint32_t tellWrite(void) const = 0; /** \brief Causes any temporarily cached data to be flushed to the stream. */ virtual void flush(void) = 0; /** \brief Close the stream. */ virtual void close(void) {} void release(void) { PX_DELETE_THIS; } static PX_INLINE bool isBigEndian() { int32_t i = 1; return *(reinterpret_cast<char*>(&i))==0; } PX_INLINE void swap2Bytes(void* _data) const { char *data = static_cast<char *>(_data); char one_byte; one_byte = data[0]; data[0] = data[1]; data[1] = one_byte; } PX_INLINE void swap4Bytes(void* _data) const { char *data = static_cast<char *>(_data); char one_byte; one_byte = data[0]; data[0] = data[3]; data[3] = one_byte; one_byte = data[1]; data[1] = data[2]; data[2] = one_byte; } PX_INLINE void swap8Bytes(void *_data) const { char *data = static_cast<char *>(_data); char one_byte; one_byte = data[0]; data[0] = data[7]; data[7] = one_byte; one_byte = data[1]; data[1] = data[6]; data[6] = one_byte; one_byte = data[2]; data[2] = data[5]; data[5] = one_byte; one_byte = data[3]; data[3] = data[4]; data[4] = one_byte; } PX_INLINE void storeDword(uint32_t v) { if ( mEndianSwap ) swap4Bytes(&v); write(&v,sizeof(v)); } PX_INLINE void storeFloat(float v) { if ( mEndianSwap ) swap4Bytes(&v); write(&v,sizeof(v)); } PX_INLINE void storeDouble(double v) { if ( mEndianSwap ) swap8Bytes(&v); write(&v,sizeof(v)); } PX_INLINE void storeByte(uint8_t b) { write(&b,sizeof(b)); } PX_INLINE void storeWord(uint16_t w) { if ( mEndianSwap ) swap2Bytes(&w); write(&w,sizeof(w)); } uint8_t readByte(void) { uint8_t v=0; read(&v,sizeof(v)); return v; } uint16_t readWord(void) { uint16_t v=0; read(&v,sizeof(v)); if ( mEndianSwap ) swap2Bytes(&v); return v; } uint32_t readDword(void) { uint32_t v=0; read(&v,sizeof(v)); if ( mEndianSwap ) swap4Bytes(&v); return v; } float readFloat(void) { float v=0; read(&v,sizeof(v)); if ( mEndianSwap ) swap4Bytes(&v); return v; } double readDouble(void) { double v=0; read(&v,sizeof(v)); if ( mEndianSwap ) swap8Bytes(&v); return v; } private: bool mEndianSwap; // whether or not the endian should be swapped on the current platform EndianMode mEndianMode; // the current endian mode behavior for the stream }; PX_POP_PACK #if !PX_DOXYGEN } // end of namespace using namespace general_PxIOStream2; namespace general_PxIOStream = general_PxIOStream2; } // end of namespace #endif /** @} */ #endif // PSFILEBUFFER_PXFILEBUF_H
8,643
C
24.498525
161
0.693162
NVIDIA-Omniverse/PhysX/physx/include/cooking/PxConvexMeshDesc.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_CONVEX_MESH_DESC_H #define PX_CONVEX_MESH_DESC_H /** \addtogroup cooking @{ */ #include "foundation/PxVec3.h" #include "foundation/PxFlags.h" #include "common/PxCoreUtilityTypes.h" #include "geometry/PxConvexMesh.h" #include "PxSDFDesc.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief Flags which describe the format and behavior of a convex mesh. */ struct PxConvexFlag { enum Enum { /** Denotes the use of 16-bit vertex indices in PxConvexMeshDesc::triangles or PxConvexMeshDesc::polygons. (otherwise, 32-bit indices are assumed) @see #PxConvexMeshDesc.indices */ e16_BIT_INDICES = (1<<0), /** Automatically recomputes the hull from the vertices. If this flag is not set, you must provide the entire geometry manually. \note There are two different algorithms for hull computation, please see PxConvexMeshCookingType. @see PxConvexMeshCookingType */ eCOMPUTE_CONVEX = (1<<1), /** \brief Checks and removes almost zero-area triangles during convex hull computation. The rejected area size is specified in PxCookingParams::areaTestEpsilon \note This flag is only used in combination with eCOMPUTE_CONVEX. @see PxCookingParams PxCookingParams::areaTestEpsilon */ eCHECK_ZERO_AREA_TRIANGLES = (1<<2), /** \brief Quantizes the input vertices using the k-means clustering \note The input vertices are quantized to PxConvexMeshDesc::quantizedCount see http://en.wikipedia.org/wiki/K-means_clustering */ eQUANTIZE_INPUT = (1 << 3), /** \brief Disables the convex mesh validation to speed-up hull creation. Please use separate validation function in checked/debug builds. Creating a convex mesh with invalid input data without prior validation may result in undefined behavior. @see PxCooking::validateConvexMesh */ eDISABLE_MESH_VALIDATION = (1 << 4), /** \brief Enables plane shifting vertex limit algorithm. Plane shifting is an alternative algorithm for the case when the computed hull has more vertices than the specified vertex limit. The default algorithm computes the full hull, and an OBB around the input vertices. This OBB is then sliced with the hull planes until the vertex limit is reached.The default algorithm requires the vertex limit to be set to at least 8, and typically produces results that are much better quality than are produced by plane shifting. When plane shifting is enabled, the hull computation stops when vertex limit is reached. The hull planes are then shifted to contain all input vertices, and the new plane intersection points are then used to generate the final hull with the given vertex limit.Plane shifting may produce sharp edges to vertices very far away from the input cloud, and does not guarantee that all input vertices are inside the resulting hull.However, it can be used with a vertex limit as low as 4. */ ePLANE_SHIFTING = (1 << 5), /** \brief Inertia tensor computation is faster using SIMD code, but the precision is lower, which may result in incorrect inertia for very thin hulls. */ eFAST_INERTIA_COMPUTATION = (1 << 6), /** \brief Convex hulls are created with respect to GPU simulation limitations. Vertex limit and polygon limit is set to 64 and vertex limit per face is internally set to 32. \note Can be used only with eCOMPUTE_CONVEX flag. @deprecated since PhysX 5.2. Setting #PxCookingParams::buildGPUData to true always cooks GPU-compatible meshes. */ eGPU_COMPATIBLE PX_DEPRECATED = (1 << 7), /** \brief Convex hull input vertices are shifted to be around origin to provide better computation stability. It is recommended to provide input vertices around the origin, otherwise use this flag to improve numerical stability. \note Is used only with eCOMPUTE_CONVEX flag. */ eSHIFT_VERTICES = (1 << 8) }; }; /** \brief collection of set bits defined in PxConvexFlag. @see PxConvexFlag */ typedef PxFlags<PxConvexFlag::Enum,PxU16> PxConvexFlags; PX_FLAGS_OPERATORS(PxConvexFlag::Enum,PxU16) /** \brief Descriptor class for #PxConvexMesh. \note The number of vertices and the number of convex polygons in a cooked convex mesh is limited to 256. \note The number of vertices and the number of convex polygons in a GPU compatible convex mesh is limited to 64, and the number of faces per vertex is limited to 32. @see PxConvexMesh PxConvexMeshGeometry PxShape PxPhysics.createConvexMesh() */ class PxConvexMeshDesc { public: /** \brief Vertex positions data in PxBoundedData format. <b>Default:</b> NULL */ PxBoundedData points; /** \brief Polygons data in PxBoundedData format. <p>Pointer to first polygon. </p> <b>Default:</b> NULL @see PxHullPolygon */ PxBoundedData polygons; /** \brief Polygon indices data in PxBoundedData format. <p>Pointer to first index.</p> <b>Default:</b> NULL <p>This is declared as a void pointer because it is actually either an PxU16 or a PxU32 pointer.</p> @see PxHullPolygon PxConvexFlag::e16_BIT_INDICES */ PxBoundedData indices; /** \brief Flags bits, combined from values of the enum ::PxConvexFlag <b>Default:</b> 0 */ PxConvexFlags flags; /** \brief Limits the number of vertices of the result convex mesh. Hard maximum limit is 255 and minimum limit is 4 if PxConvexFlag::ePLANE_SHIFTING is used, otherwise the minimum limit is 8. \note The please see PxConvexFlag::ePLANE_SHIFTING for algorithm explanation \note The maximum limit for GPU compatible convex meshes is 64. @see PxConvexFlag::ePLANE_SHIFTING <b>Range:</b> [4, 255]<br> <b>Default:</b> 255 */ PxU16 vertexLimit; /** \brief Limits the number of polygons of the result convex mesh. Hard maximum limit is 255 and minimum limit is 4. \note The maximum limit for GPU compatible convex meshes is 64. <b>Range:</b> [4, 255]<br> <b>Default:</b> 255 */ PxU16 polygonLimit; /** \brief Maximum number of vertices after quantization. The quantization is done during the vertex cleaning phase. The quantization is applied when PxConvexFlag::eQUANTIZE_INPUT is specified. @see PxConvexFlag::eQUANTIZE_INPUT <b>Range:</b> [4, 65535]<br> <b>Default:</b> 255 */ PxU16 quantizedCount; /** \brief SDF descriptor. When this descriptor is set, signed distance field is calculated for this convex mesh. <b>Default:</b> NULL */ PxSDFDesc* sdfDesc; /** \brief constructor sets to default. */ PX_INLINE PxConvexMeshDesc(); /** \brief (re)sets the structure to the default. */ PX_INLINE void setToDefault(); /** \brief Returns true if the descriptor is valid. \return True if the current settings are valid */ PX_INLINE bool isValid() const; }; PX_INLINE PxConvexMeshDesc::PxConvexMeshDesc() //constructor sets to default : vertexLimit(255), polygonLimit(255), quantizedCount(255), sdfDesc(NULL) { } PX_INLINE void PxConvexMeshDesc::setToDefault() { *this = PxConvexMeshDesc(); } PX_INLINE bool PxConvexMeshDesc::isValid() const { // Check geometry if(points.count < 3 || //at least 1 trig's worth of points (points.count > 0xffff && flags & PxConvexFlag::e16_BIT_INDICES)) return false; if(!points.data) return false; if(points.stride < sizeof(PxVec3)) //should be at least one point's worth of data return false; if (quantizedCount < 4) return false; // Check topology if(polygons.data) { if(polygons.count < 4) // we require 2 neighbors for each vertex - 4 polygons at least return false; if(!indices.data) // indices must be provided together with polygons return false; PxU32 limit = (flags & PxConvexFlag::e16_BIT_INDICES) ? sizeof(PxU16) : sizeof(PxU32); if(indices.stride < limit) return false; limit = sizeof(PxHullPolygon); if(polygons.stride < limit) return false; } else { // We can compute the hull from the vertices if(!(flags & PxConvexFlag::eCOMPUTE_CONVEX)) return false; // If the mesh is convex and we're not allowed to compute the hull, // you have to provide it completely (geometry & topology). } if((flags & PxConvexFlag::ePLANE_SHIFTING) && vertexLimit < 4) { return false; } if (!(flags & PxConvexFlag::ePLANE_SHIFTING) && vertexLimit < 8) { return false; } if(vertexLimit > 255) { return false; } if (polygonLimit < 4) { return false; } if (polygonLimit > 255) { return false; } if (sdfDesc && !sdfDesc->isValid()) { return false; } return true; } #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
10,175
C
28.325648
126
0.733759
NVIDIA-Omniverse/PhysX/physx/include/cooking/PxCookingInternal.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_COOKING_INTERNAL_H #define PX_COOKING_INTERNAL_H /** \addtogroup cooking @{ */ #include "cooking/PxCooking.h" #if !PX_DOXYGEN namespace physx { #endif struct PxTriangleMeshInternalData; struct PxBVHInternalData; class PxTriangleMesh; class PxBVH; #if !PX_DOXYGEN } // namespace physx #endif PX_C_EXPORT PX_PHYSX_COOKING_API physx::PxTriangleMesh* PX_CALL_CONV PxCreateTriangleMeshInternal(const physx::PxTriangleMeshInternalData& data); PX_C_EXPORT PX_PHYSX_COOKING_API physx::PxBVH* PX_CALL_CONV PxCreateBVHInternal(const physx::PxBVHInternalData& data); /** @} */ #endif
2,293
C
39.245613
146
0.766681
NVIDIA-Omniverse/PhysX/physx/include/cooking/PxCooking.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_COOKING_H #define PX_COOKING_H /** \addtogroup cooking @{ */ #include "common/PxPhysXCommonConfig.h" #include "common/PxTolerancesScale.h" #include "cooking/Pxc.h" #include "cooking/PxConvexMeshDesc.h" #include "cooking/PxTriangleMeshDesc.h" #include "cooking/PxTetrahedronMeshDesc.h" #include "cooking/PxMidphaseDesc.h" #include "cooking/PxBVHDesc.h" #include "geometry/PxTriangleMesh.h" #include "geometry/PxTetrahedronMesh.h" #include "geometry/PxBVH.h" #if !PX_DOXYGEN namespace physx { #endif class PxInsertionCallback; class PxFoundation; class PxAllocatorCallback; class PxHeightFieldDesc; /** \brief Result from convex cooking. */ struct PxConvexMeshCookingResult { enum Enum { /** \brief Convex mesh cooking succeeded. */ eSUCCESS, /** \brief Convex mesh cooking failed, algorithm couldn't find 4 initial vertices without a small triangle. @see PxCookingParams::areaTestEpsilon PxConvexFlag::eCHECK_ZERO_AREA_TRIANGLES */ eZERO_AREA_TEST_FAILED, /** \brief Convex mesh cooking succeeded, but the algorithm has reached the 255 polygons limit. The produced hull does not contain all input vertices. Try to simplify the input vertices or try to use the eINFLATE_CONVEX or the eQUANTIZE_INPUT flags. @see PxConvexFlag::eINFLATE_CONVEX PxConvexFlag::eQUANTIZE_INPUT */ ePOLYGONS_LIMIT_REACHED, /** \brief Something unrecoverable happened. Check the error stream to find out what. */ eFAILURE, /** \brief Convex mesh cooking succeeded, but the algorithm could not make the mesh GPU compatible because the in-sphere radius is more than 100x smaller than the largest extent. Collision detection for any pair involving this convex mesh will fall back to CPU. */ eNON_GPU_COMPATIBLE }; }; /** \brief Enumeration for convex mesh cooking algorithms. */ struct PxConvexMeshCookingType { enum Enum { /** \brief The Quickhull algorithm constructs the hull from the given input points. The resulting hull will only contain a subset of the input points. */ eQUICKHULL }; }; /** \brief Result from triangle mesh cooking */ struct PxTriangleMeshCookingResult { enum Enum { /** \brief Everything is A-OK. */ eSUCCESS = 0, /** \brief A triangle is too large for well-conditioned results. Tessellate the mesh for better behavior, see the user guide section on cooking for more details. */ eLARGE_TRIANGLE, /** \brief The mesh cleaning operation removed all triangles, resulting in an empty mesh. */ eEMPTY_MESH, /** \brief Something unrecoverable happened. Check the error stream to find out what. */ eFAILURE }; }; /** \brief Enum for the set of mesh pre-processing parameters. */ struct PxMeshPreprocessingFlag { enum Enum { /** \brief When set, mesh welding is performed. See PxCookingParams::meshWeldTolerance. Mesh cleaning must be enabled. */ eWELD_VERTICES = 1 << 0, /** \brief When set, mesh cleaning is disabled. This makes cooking faster. When mesh cleaning is disabled, mesh welding is also disabled. It is recommended to use only meshes that passed during validateTriangleMesh. */ eDISABLE_CLEAN_MESH = 1 << 1, /** \brief When set, active edges are not computed and just enabled for all edges. This makes cooking faster but contact generation slower. */ eDISABLE_ACTIVE_EDGES_PRECOMPUTE = 1 << 2, /** \brief When set, 32-bit indices will always be created regardless of triangle count. \note By default mesh will be created with 16-bit indices for triangle count <= 0xFFFF and 32-bit otherwise. */ eFORCE_32BIT_INDICES = 1 << 3, /** \brief When set, a list of triangles will be created for each associated vertex in the mesh. */ eENABLE_VERT_MAPPING = 1 << 4, /** \brief When set, inertia tensor is calculated for the mesh. */ eENABLE_INERTIA = 1 << 5 }; }; typedef PxFlags<PxMeshPreprocessingFlag::Enum,PxU32> PxMeshPreprocessingFlags; /** \brief Structure describing parameters affecting mesh cooking. @see PxSetCookingParams() PxGetCookingParams() */ struct PxCookingParams { /** \brief Zero-size area epsilon used in convex hull computation. If the area of a triangle of the hull is below this value, the triangle will be rejected. This test is done only if PxConvexFlag::eCHECK_ZERO_AREA_TRIANGLES is used. @see PxConvexFlag::eCHECK_ZERO_AREA_TRIANGLES <b>Default value:</b> 0.06f*PxTolerancesScale.length*PxTolerancesScale.length <b>Range:</b> (0.0f, PX_MAX_F32) */ float areaTestEpsilon; /** \brief Plane tolerance used in convex hull computation. The value is used during hull construction. When a new point is about to be added to the hull it gets dropped when the point is closer to the hull than the planeTolerance. The planeTolerance is increased according to the hull size. If 0.0f is set all points are accepted when the convex hull is created. This may lead to edge cases where the new points may be merged into an existing polygon and the polygons plane equation might slightly change therefore. This might lead to failures during polygon merging phase in the hull computation. It is recommended to use the default value, however if it is required that all points needs to be accepted or huge thin convexes are created, it might be required to lower the default value. \note The plane tolerance is used only within PxConvexMeshCookingType::eQUICKHULL algorithm. <b>Default value:</b> 0.0007f <b>Range:</b> <0.0f, PX_MAX_F32) */ float planeTolerance; /** \brief Convex hull creation algorithm. <b>Default value:</b> PxConvexMeshCookingType::eQUICKHULL @see PxConvexMeshCookingType */ PxConvexMeshCookingType::Enum convexMeshCookingType; /** \brief When true, the face remap table is not created. This saves a significant amount of memory, but the SDK will not be able to provide the remap information for internal mesh triangles returned by collisions, sweeps or raycasts hits. <b>Default value:</b> false */ bool suppressTriangleMeshRemapTable; /** \brief When true, the triangle adjacency information is created. You can get the adjacency triangles for a given triangle from getTriangle. <b>Default value:</b> false */ bool buildTriangleAdjacencies; /** \brief When true, addigional information required for GPU-accelerated rigid body simulation is created. This can increase memory usage and cooking times for convex meshes and triangle meshes. <b>Default value:</b> false */ bool buildGPUData; /** \brief Tolerance scale is used to check if cooked triangles are not too huge. This check will help with simulation stability. \note The PxTolerancesScale values have to match the values used when creating a PxPhysics or PxScene instance. @see PxTolerancesScale */ PxTolerancesScale scale; /** \brief Mesh pre-processing parameters. Used to control options like whether the mesh cooking performs vertex welding before cooking. <b>Default value:</b> 0 */ PxMeshPreprocessingFlags meshPreprocessParams; /** \brief Mesh weld tolerance. If mesh welding is enabled, this controls the distance at which vertices are welded. If mesh welding is not enabled, this value defines the acceptance distance for mesh validation. Provided no two vertices are within this distance, the mesh is considered to be clean. If not, a warning will be emitted. Having a clean, welded mesh is required to achieve the best possible performance. The default vertex welding uses a snap-to-grid approach. This approach effectively truncates each vertex to integer values using meshWeldTolerance. Once these snapped vertices are produced, all vertices that snap to a given vertex on the grid are remapped to reference a single vertex. Following this, all triangles' indices are remapped to reference this subset of clean vertices. It should be noted that the vertices that we do not alter the position of the vertices; the snap-to-grid is only performed to identify nearby vertices. The mesh validation approach also uses the same snap-to-grid approach to identify nearby vertices. If more than one vertex snaps to a given grid coordinate, we ensure that the distance between the vertices is at least meshWeldTolerance. If this is not the case, a warning is emitted. <b>Default value:</b> 0.0 */ PxReal meshWeldTolerance; /** \brief "Zero-area" epsilon used in mesh cleaning. This is similar to areaTestEpsilon, but for the mesh cleaning operation. If the area of a triangle is below this value, the triangle will be removed. This is only done when mesh cleaning is enabled, i.e. when PxMeshPreprocessingFlag::eDISABLE_CLEAN_MESH is not set. Default value is 0.0f to be consistent with previous PhysX versions, which only removed triangles whose area was exactly zero. @see PxMeshPreprocessingFlag::eDISABLE_CLEAN_MESH <b>Default value:</b> 0.0f <b>Range:</b> (0.0f, PX_MAX_F32) */ PxReal meshAreaMinLimit; /** \brief Maximum edge length. If an edge of a triangle is above this value, a warning is sent to the error stream. This is only a check, corresponding triangles are not removed. This is only done when mesh cleaning is enabled, i.e. when PxMeshPreprocessingFlag::eDISABLE_CLEAN_MESH is not set. Default value is 500.0f to be consistent with previous PhysX versions. This value is internally multiplied by PxTolerancesScale::length before being used. Use 0.0f to disable the checks. @see PxMeshPreprocessingFlag::eDISABLE_CLEAN_MESH <b>Default value:</b> 500.0f <b>Range:</b> (0.0f, PX_MAX_F32) */ PxReal meshEdgeLengthMaxLimit; /** \brief Controls the desired midphase desc structure for triangle meshes. @see PxBVH33MidphaseDesc, PxBVH34MidphaseDesc, PxMidphaseDesc <b>Default value:</b> PxMeshMidPhase::eBVH34 */ PxMidphaseDesc midphaseDesc; /** \brief Vertex limit beyond which additional acceleration structures are computed for each convex mesh. Increase that limit to reduce memory usage. Computing the extra structures all the time does not guarantee optimal performance. There is a per-platform break-even point below which the extra structures actually hurt performance. <b>Default value:</b> 32 */ PxU32 gaussMapLimit; /** \brief Maximum mass ratio allowed on vertices touched by a single tet. If a any tetrahedron exceeds the mass ratio, the masses will get smoothed locally until the maximum mass ratio is matched. Value should not be below 1. Smoothing might not fully converge for values <1.5. The smaller the maximum allowed ratio, the better the stability during simulation. <b>Default value:</b> FLT_MAX */ PxReal maxWeightRatioInTet; PxCookingParams(const PxTolerancesScale& sc): areaTestEpsilon (0.06f*sc.length*sc.length), planeTolerance (0.0007f), convexMeshCookingType (PxConvexMeshCookingType::eQUICKHULL), suppressTriangleMeshRemapTable (false), buildTriangleAdjacencies (false), buildGPUData (false), scale (sc), meshPreprocessParams (0), meshWeldTolerance (0.0f), meshAreaMinLimit (0.0f), meshEdgeLengthMaxLimit (500.0f), gaussMapLimit (32), maxWeightRatioInTet (FLT_MAX) { } }; #if !PX_DOXYGEN } // namespace physx #endif // Immediate cooking /** \brief Gets standalone object insertion interface. This interface allows the creation of standalone objects that can exist without a PxPhysics or PxScene object. @see PxCreateTriangleMesh PxCreateHeightfield PxCreateTetrahedronMesh PxCreateBVH */ PX_C_EXPORT PX_PHYSX_COOKING_API physx::PxInsertionCallback* PxGetStandaloneInsertionCallback(); // ==== BVH ==== /** \brief Cooks a bounding volume hierarchy. The results are written to the stream. PxCookBVH() allows a BVH description to be cooked into a binary stream suitable for loading and performing BVH detection at runtime. \param[in] desc The BVH descriptor. \param[in] stream User stream to output the cooked data. \return true on success. @see PxBVH PxRigidActorExt::getRigidActorShapeLocalBoundsList */ PX_C_EXPORT PX_PHYSX_COOKING_API bool PxCookBVH(const physx::PxBVHDesc& desc, physx::PxOutputStream& stream); /** \brief Cooks and creates a bounding volume hierarchy without going through a stream. \note This method does the same as PxCookBVH, but the produced BVH is not stored into a stream but is either directly inserted in PxPhysics, or created as a standalone object. Use this method if you are unable to cook offline. \note PxInsertionCallback can be obtained through PxPhysics::getPhysicsInsertionCallback() or PxGetStandaloneInsertionCallback(). \param[in] desc The BVH descriptor. \param[in] insertionCallback The insertion interface. \return PxBVH pointer on success @see PxCookBVH() PxInsertionCallback */ PX_C_EXPORT PX_PHYSX_COOKING_API physx::PxBVH* PxCreateBVH(const physx::PxBVHDesc& desc, physx::PxInsertionCallback& insertionCallback); /** \brief Cooks and creates a bounding volume hierarchy without going through a stream. Convenience function for standalone objects. \note This method does the same as PxCookBVH, but the produced BVH is not stored into a stream but is either directly inserted in PxPhysics, or created as a standalone object. Use this method if you are unable to cook offline. \param[in] desc The BVH descriptor. \return PxBVH pointer on success @see PxCookBVH() PxInsertionCallback */ PX_FORCE_INLINE physx::PxBVH* PxCreateBVH(const physx::PxBVHDesc& desc) { return PxCreateBVH(desc, *PxGetStandaloneInsertionCallback()); } // ==== Heightfield ==== /** \brief Cooks a heightfield. The results are written to the stream. To create a heightfield object there is an option to precompute some of calculations done while loading the heightfield data. PxCookHeightField() allows a heightfield description to be cooked into a binary stream suitable for loading and performing collision detection at runtime. \param[in] desc The heightfield descriptor to read the HF from. \param[in] stream User stream to output the cooked data. \return true on success @see PxPhysics.createHeightField() */ PX_C_EXPORT PX_PHYSX_COOKING_API bool PxCookHeightField(const physx::PxHeightFieldDesc& desc, physx::PxOutputStream& stream); /** \brief Cooks and creates a heightfield mesh and inserts it into PxPhysics. \param[in] desc The heightfield descriptor to read the HF from. \param[in] insertionCallback The insertion interface from PxPhysics. \return PxHeightField pointer on success @see PxCookHeightField() PxPhysics.createHeightField() PxInsertionCallback */ PX_C_EXPORT PX_PHYSX_COOKING_API physx::PxHeightField* PxCreateHeightField(const physx::PxHeightFieldDesc& desc, physx::PxInsertionCallback& insertionCallback); /** \brief Cooks and creates a heightfield mesh and inserts it into PxPhysics. Convenience function for standalone objects. \param[in] desc The heightfield descriptor to read the HF from. \return PxHeightField pointer on success @see PxCookHeightField() PxPhysics.createHeightField() PxInsertionCallback */ PX_FORCE_INLINE physx::PxHeightField* PxCreateHeightField(const physx::PxHeightFieldDesc& desc) { return PxCreateHeightField(desc, *PxGetStandaloneInsertionCallback()); } // ==== Convex meshes ==== /** \brief Cooks a convex mesh. The results are written to the stream. To create a triangle mesh object it is necessary to first 'cook' the mesh data into a form which allows the SDK to perform efficient collision detection. PxCookConvexMesh() allows a mesh description to be cooked into a binary stream suitable for loading and performing collision detection at runtime. \note The number of vertices and the number of convex polygons in a cooked convex mesh is limited to 255. \note If those limits are exceeded in either the user-provided data or the final cooked mesh, an error is reported. \param[in] params The cooking parameters \param[in] desc The convex mesh descriptor to read the mesh from. \param[in] stream User stream to output the cooked data. \param[out] condition Result from convex mesh cooking. \return true on success. @see PxCookTriangleMesh() PxConvexMeshCookingResult::Enum */ PX_C_EXPORT PX_PHYSX_COOKING_API bool PxCookConvexMesh(const physx::PxCookingParams& params, const physx::PxConvexMeshDesc& desc, physx::PxOutputStream& stream, physx::PxConvexMeshCookingResult::Enum* condition=NULL); /** \brief Cooks and creates a convex mesh without going through a stream. \note This method does the same as PxCookConvexMesh, but the produced mesh is not stored into a stream but is either directly inserted in PxPhysics, or created as a standalone object. Use this method if you are unable to cook offline. \note PxInsertionCallback can be obtained through PxPhysics::getPhysicsInsertionCallback() or PxGetStandaloneInsertionCallback(). \param[in] params The cooking parameters \param[in] desc The convex mesh descriptor to read the mesh from. \param[in] insertionCallback The insertion interface from PxPhysics. \param[out] condition Result from convex mesh cooking. \return PxConvexMesh pointer on success @see PxCookConvexMesh() PxInsertionCallback */ PX_C_EXPORT PX_PHYSX_COOKING_API physx::PxConvexMesh* PxCreateConvexMesh(const physx::PxCookingParams& params, const physx::PxConvexMeshDesc& desc, physx::PxInsertionCallback& insertionCallback, physx::PxConvexMeshCookingResult::Enum* condition=NULL); /** \brief Cooks and creates a convex mesh without going through a stream. Convenience function for standalone objects. \note This method does the same as cookConvexMesh, but the produced mesh is not stored into a stream but is either directly inserted in PxPhysics, or created as a standalone object. Use this method if you are unable to cook offline. \param[in] params The cooking parameters \param[in] desc The convex mesh descriptor to read the mesh from. \return PxConvexMesh pointer on success @see PxCookConvexMesh() PxInsertionCallback */ PX_FORCE_INLINE physx::PxConvexMesh* PxCreateConvexMesh(const physx::PxCookingParams& params, const physx::PxConvexMeshDesc& desc) { return PxCreateConvexMesh(params, desc, *PxGetStandaloneInsertionCallback()); } /** \brief Verifies if the convex mesh is valid. Prints an error message for each inconsistency found. The convex mesh descriptor must contain an already created convex mesh - the vertices, indices and polygons must be provided. \note This function should be used if PxConvexFlag::eDISABLE_MESH_VALIDATION is planned to be used in release builds. \param[in] params The cooking parameters \param[in] desc The convex mesh descriptor to read the mesh from. \return true if all the validity conditions hold, false otherwise. @see PxCookConvexMesh() */ PX_C_EXPORT PX_PHYSX_COOKING_API bool PxValidateConvexMesh(const physx::PxCookingParams& params, const physx::PxConvexMeshDesc& desc); /** \brief Compute hull polygons from given vertices and triangles. Polygons are needed for PxConvexMeshDesc rather than triangles. Please note that the resulting polygons may have different number of vertices. Some vertices may be removed. The output vertices, indices and polygons must be used to construct a hull. The provided PxAllocatorCallback does allocate the out arrays. It is the user responsibility to deallocated those arrays. \param[in] params The cooking parameters \param[in] mesh Simple triangle mesh containing vertices and triangles used to compute polygons. \param[in] inCallback Memory allocator for out array allocations. \param[out] nbVerts Number of vertices used by polygons. \param[out] vertices Vertices array used by polygons. \param[out] nbIndices Number of indices used by polygons. \param[out] indices Indices array used by polygons. \param[out] nbPolygons Number of created polygons. \param[out] hullPolygons Polygons array. \return true on success @see PxCookConvexMesh() PxConvexFlags PxConvexMeshDesc PxSimpleTriangleMesh */ PX_C_EXPORT PX_PHYSX_COOKING_API bool PxComputeHullPolygons(const physx::PxCookingParams& params, const physx::PxSimpleTriangleMesh& mesh, physx::PxAllocatorCallback& inCallback, physx::PxU32& nbVerts, physx::PxVec3*& vertices, physx::PxU32& nbIndices, physx::PxU32*& indices, physx::PxU32& nbPolygons, physx::PxHullPolygon*& hullPolygons); // ==== Triangle meshes ==== /** \brief Verifies if the triangle mesh is valid. Prints an error message for each inconsistency found. The following conditions are true for a valid triangle mesh: 1. There are no duplicate vertices (within specified vertexWeldTolerance. See PxCookingParams::meshWeldTolerance) 2. There are no large triangles (within specified PxTolerancesScale.) \param[in] params The cooking parameters \param[in] desc The triangle mesh descriptor to read the mesh from. \return true if all the validity conditions hold, false otherwise. @see PxCookTriangleMesh() */ PX_C_EXPORT PX_PHYSX_COOKING_API bool PxValidateTriangleMesh(const physx::PxCookingParams& params, const physx::PxTriangleMeshDesc& desc); /** \brief Cooks a triangle mesh. The results are written to the stream. To create a triangle mesh object it is necessary to first 'cook' the mesh data into a form which allows the SDK to perform efficient collision detection. PxCookTriangleMesh() allows a mesh description to be cooked into a binary stream suitable for loading and performing collision detection at runtime. \param[in] params The cooking parameters \param[in] desc The triangle mesh descriptor to read the mesh from. \param[in] stream User stream to output the cooked data. \param[out] condition Result from triangle mesh cooking. \return true on success @see PxCookConvexMesh() PxPhysics.createTriangleMesh() PxTriangleMeshCookingResult::Enum */ PX_C_EXPORT PX_PHYSX_COOKING_API bool PxCookTriangleMesh(const physx::PxCookingParams& params, const physx::PxTriangleMeshDesc& desc, physx::PxOutputStream& stream, physx::PxTriangleMeshCookingResult::Enum* condition=NULL); /** \brief Cooks and creates a triangle mesh without going through a stream. \note This method does the same as PxCookTriangleMesh, but the produced mesh is not stored into a stream but is either directly inserted in PxPhysics, or created as a standalone object. Use this method if you are unable to cook offline. \note PxInsertionCallback can be obtained through PxPhysics::getPhysicsInsertionCallback() or PxGetStandaloneInsertionCallback(). \param[in] params The cooking parameters \param[in] desc The triangle mesh descriptor to read the mesh from. \param[in] insertionCallback The insertion interface from PxPhysics. \param[out] condition Result from triangle mesh cooking. \return PxTriangleMesh pointer on success. @see PxCookTriangleMesh() PxPhysics.createTriangleMesh() PxInsertionCallback */ PX_C_EXPORT PX_PHYSX_COOKING_API physx::PxTriangleMesh* PxCreateTriangleMesh(const physx::PxCookingParams& params, const physx::PxTriangleMeshDesc& desc, physx::PxInsertionCallback& insertionCallback, physx::PxTriangleMeshCookingResult::Enum* condition=NULL); /** \brief Cooks and creates a triangle mesh without going through a stream. Convenience function for standalone objects. \note This method does the same as cookTriangleMesh, but the produced mesh is not stored into a stream but is either directly inserted in PxPhysics, or created as a standalone object. Use this method if you are unable to cook offline. \return PxTriangleMesh pointer on success. \param[in] params The cooking parameters \param[in] desc The triangle mesh descriptor to read the mesh from. @see PxCookTriangleMesh() PxPhysics.createTriangleMesh() PxInsertionCallback */ PX_FORCE_INLINE physx::PxTriangleMesh* PxCreateTriangleMesh(const physx::PxCookingParams& params, const physx::PxTriangleMeshDesc& desc) { return PxCreateTriangleMesh(params, desc, *PxGetStandaloneInsertionCallback()); } // ==== Tetrahedron & soft body meshes ==== /** \brief Cooks a tetrahedron mesh. The results are written to the stream. To create a tetrahedron mesh object it is necessary to first 'cook' the mesh data into a form which allows the SDK to perform efficient collision detection. PxCookTetrahedronMesh() allows a mesh description to be cooked into a binary stream suitable for loading and performing collision detection at runtime. \param[in] params The cooking parameters \param[in] meshDesc The tetrahedron mesh descriptor to read the mesh from. \param[in] stream User stream to output the cooked data. \return true on success @see PxCookConvexMesh() PxPhysics.createTetrahedronMesh() */ PX_C_EXPORT PX_PHYSX_COOKING_API bool PxCookTetrahedronMesh(const physx::PxCookingParams& params, const physx::PxTetrahedronMeshDesc& meshDesc, physx::PxOutputStream& stream); /** \brief Cooks and creates a tetrahedron mesh without going through a stream. \note This method does the same as PxCookTetrahedronMesh, but the produced mesh is not stored into a stream but is either directly inserted in PxPhysics, or created as a standalone object. Use this method if you are unable to cook offline. \note PxInsertionCallback can be obtained through PxPhysics::getPhysicsInsertionCallback() or PxGetStandaloneInsertionCallback(). \param[in] params The cooking parameters \param[in] meshDesc The tetrahedron mesh descriptor to read the mesh from. \param[in] insertionCallback The insertion interface from PxPhysics. \return PxTetrahedronMesh pointer on success. @see PxCookTetrahedronMesh() PxInsertionCallback */ PX_C_EXPORT PX_PHYSX_COOKING_API physx::PxTetrahedronMesh* PxCreateTetrahedronMesh(const physx::PxCookingParams& params, const physx::PxTetrahedronMeshDesc& meshDesc, physx::PxInsertionCallback& insertionCallback); /** \brief Cooks and creates a tetrahedron mesh without going through a stream. Convenience function for standalone objects. \note This method does the same as PxCookTetrahedronMesh, but the produced mesh is not stored into a stream but is either directly inserted in PxPhysics, or created as a standalone object. Use this method if you are unable to cook offline. \param[in] params The cooking parameters \param[in] meshDesc The tetrahedron mesh descriptor to read the mesh from. \return PxTetrahedronMesh pointer on success. @see PxCookTetrahedronMesh() PxInsertionCallback */ PX_FORCE_INLINE physx::PxTetrahedronMesh* PxCreateTetrahedronMesh(const physx::PxCookingParams& params, const physx::PxTetrahedronMeshDesc& meshDesc) { return PxCreateTetrahedronMesh(params, meshDesc, *PxGetStandaloneInsertionCallback()); } /** \brief Cooks a softbody mesh. The results are written to the stream. To create a softbody mesh object it is necessary to first 'cook' the mesh data into a form which allows the SDK to perform efficient collision detection and to store data used during the FEM calculations. PxCookSoftBodyMesh() allows a mesh description to be cooked into a binary stream suitable for loading and performing collision detection at runtime. \param[in] params The cooking parameters \param[in] simulationMeshDesc The tetrahedron mesh descriptor to read the simulation mesh from. \param[in] collisionMeshDesc The tetrahedron mesh descriptor to read the collision mesh from. \param[in] softbodyDataDesc The softbody data descriptor to read mapping information from. \param[in] stream User stream to output the cooked data. \return true on success @see PxCookConvexMesh() PxPhysics.createTriangleMesh() PxTriangleMeshCookingResult::Enum */ PX_C_EXPORT PX_PHYSX_COOKING_API bool PxCookSoftBodyMesh(const physx::PxCookingParams& params, const physx::PxTetrahedronMeshDesc& simulationMeshDesc, const physx::PxTetrahedronMeshDesc& collisionMeshDesc, const physx::PxSoftBodySimulationDataDesc& softbodyDataDesc, physx::PxOutputStream& stream); /** \brief Cooks and creates a softbody mesh without going through a stream. \note This method does the same as PxCookSoftBodyMesh, but the produced mesh is not stored into a stream but is either directly inserted in PxPhysics, or created as a standalone object. Use this method if you are unable to cook offline. \note PxInsertionCallback can be obtained through PxPhysics::getPhysicsInsertionCallback() or PxGetStandaloneInsertionCallback(). \param[in] params The cooking parameters \param[in] simulationMeshDesc The tetrahedron mesh descriptor to read the simulation mesh from. \param[in] collisionMeshDesc The tetrahedron mesh descriptor to read the collision mesh from. \param[in] softbodyDataDesc The softbody data descriptor to read mapping information from. \param[in] insertionCallback The insertion interface from PxPhysics. \return PxSoftBodyMesh pointer on success. @see PxCookTriangleMesh() PxPhysics.createTriangleMesh() PxInsertionCallback */ PX_C_EXPORT PX_PHYSX_COOKING_API physx::PxSoftBodyMesh* PxCreateSoftBodyMesh(const physx::PxCookingParams& params, const physx::PxTetrahedronMeshDesc& simulationMeshDesc, const physx::PxTetrahedronMeshDesc& collisionMeshDesc, const physx::PxSoftBodySimulationDataDesc& softbodyDataDesc, physx::PxInsertionCallback& insertionCallback); /** \brief Cooks and creates a softbody mesh without going through a stream. Convenience function for standalone objects. \note This method does the same as PxCookSoftBodyMesh, but the produced mesh is not stored into a stream but is either directly inserted in PxPhysics, or created as a standalone object. Use this method if you are unable to cook offline. \param[in] params The cooking parameters \param[in] simulationMeshDesc The tetrahedron mesh descriptor to read the simulation mesh from. \param[in] collisionMeshDesc The tetrahedron mesh descriptor to read the collision mesh from. \param[in] softbodyDataDesc The softbody data descriptor to read mapping information from. \return PxSoftBodyMesh pointer on success. @see PxCookTriangleMesh() PxPhysics.createTriangleMesh() PxInsertionCallback */ PX_FORCE_INLINE physx::PxSoftBodyMesh* PxCreateSoftBodyMesh(const physx::PxCookingParams& params, const physx::PxTetrahedronMeshDesc& simulationMeshDesc, const physx::PxTetrahedronMeshDesc& collisionMeshDesc, const physx::PxSoftBodySimulationDataDesc& softbodyDataDesc) { return PxCreateSoftBodyMesh(params, simulationMeshDesc, collisionMeshDesc, softbodyDataDesc, *PxGetStandaloneInsertionCallback()); } /** \brief Computes the mapping between collision and simulation mesh The softbody deformation is computed on the simulation mesh. To deform the collision mesh accordingly it needs to be specified how its vertices need to be placed and updated inside the deformation mesh. This method computes that embedding information. \param[in] params The cooking parameters \param[in] simulationMesh A tetrahedral mesh that defines the shape of the simulation mesh which is used to compute the body's deformation \param[in] collisionMesh A tetrahedral mesh that defines the shape of the collision mesh which is used for collision detection \param[in] collisionData A data container that contains acceleration structures and surface information of the collision mesh \param[in] vertexToTet Optional indices (array of integers) that specifies the index of the tetrahedron in the simulation mesh that contains a collision mesh vertex. If not provided, the embedding will be computed internally. If the simulation mesh is obtained from PxTetMaker::createVoxelTetrahedronMesh, then the vertexToTet map createVoxelTetrahedronMesh returned should be used here. \return PxCollisionMeshMappingData pointer that describes how the collision mesh is embedded into the simulation mesh @see PxTetMaker::createVoxelTetrahedronMesh */ PX_C_EXPORT PX_PHYSX_COOKING_API physx::PxCollisionMeshMappingData* PxComputeModelsMapping(const physx::PxCookingParams& params, physx::PxTetrahedronMeshData& simulationMesh, const physx::PxTetrahedronMeshData& collisionMesh, const physx::PxSoftBodyCollisionData& collisionData, const physx::PxBoundedData* vertexToTet = NULL); /** \brief Computes data to accelerate collision detection of tetrahedral meshes Computes data structures to speed up collision detection with tetrahedral meshes. \param[in] params The cooking parameters \param[in] collisionMeshDesc Raw tetrahedral mesh descriptor wich will be used for collision detection \return PxCollisionTetrahedronMeshData pointer that describes the collision mesh */ PX_C_EXPORT PX_PHYSX_COOKING_API physx::PxCollisionTetrahedronMeshData* PxComputeCollisionData(const physx::PxCookingParams& params, const physx::PxTetrahedronMeshDesc& collisionMeshDesc); /** \brief Computes data to accelerate collision detection of tetrahedral meshes Computes data to compute and store a softbody's deformation using FEM. \param[in] params The cooking parameters \param[in] simulationMeshDesc Raw tetrahedral mesh descriptor wich will be used for FEM simulation \return PxSimulationTetrahedronMeshData pointer that describes the simulation mesh */ PX_C_EXPORT PX_PHYSX_COOKING_API physx::PxSimulationTetrahedronMeshData* PxComputeSimulationData(const physx::PxCookingParams& params, const physx::PxTetrahedronMeshDesc& simulationMeshDesc); /** \brief Bundles all data required for softbody simulation Creates a container that provides everything to create a PxSoftBody \param[in] simulationMesh The geometry (tetrahedral mesh) to be used as simulation mesh \param[in] simulationData Additional non-tetmesh data that contains mass information etc. for the simulation mesh \param[in] collisionMesh The geometry (tetrahedral mesh) to be used for collision detection \param[in] collisionData Additional non-tetmesh data that contains surface information, acceleration structures etc. for the simulation mesh \param[in] mappingData Mapping that describes how the collision mesh's vertices are embedded into the simulation mesh \param[in] insertionCallback The insertion interface from PxPhysics. \return PxSoftBodyMesh pointer that represents a softbody mesh bundling all data (simulation mesh, collision mesh etc.) @see PxSoftBody createSoftBody() */ PX_C_EXPORT PX_PHYSX_COOKING_API physx::PxSoftBodyMesh* PxAssembleSoftBodyMesh(physx::PxTetrahedronMeshData& simulationMesh, physx::PxSoftBodySimulationData& simulationData, physx::PxTetrahedronMeshData& collisionMesh, physx::PxSoftBodyCollisionData& collisionData, physx::PxCollisionMeshMappingData& mappingData, physx::PxInsertionCallback& insertionCallback); /** \brief Bundles all data required for softbody simulation Creates a container that provides everything to create a PxSoftBody \param[in] simulationMesh Container that provides all information about the simulation mesh (geometry, mass distribution etc.) \param[in] collisionMesh Container that provides all information about the collision mesh (geometry, surface information, acceleration structures etc.) \param[in] mappingData Mapping that describes how the collision mesh's vertices are embedded into the simulation mesh \param[in] insertionCallback The insertion interface from PxPhysics. \return PxSoftBodyMesh pointer that represents a softbody mesh bundling all data (simulation mesh, collision mesh etc.) @see PxSoftBody createSoftBody() */ PX_C_EXPORT PX_PHYSX_COOKING_API physx::PxSoftBodyMesh* PxAssembleSoftBodyMesh_Sim(physx::PxSimulationTetrahedronMeshData& simulationMesh, physx::PxCollisionTetrahedronMeshData& collisionMesh, physx::PxCollisionMeshMappingData& mappingData, physx::PxInsertionCallback& insertionCallback); /** @} */ #endif
37,173
C
41.876586
259
0.788718
NVIDIA-Omniverse/PhysX/physx/include/cooking/PxTetrahedronMeshDesc.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_TETRAHEDRON_MESH_DESC_H #define PX_TETRAHEDRON_MESH_DESC_H /** \addtogroup cooking @{ */ #include "PxPhysXConfig.h" #include "foundation/PxVec3.h" #include "foundation/PxFlags.h" #include "common/PxCoreUtilityTypes.h" #include "geometry/PxSimpleTriangleMesh.h" #include "foundation/PxArray.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief Descriptor class for #PxTetrahedronMesh (contains only pure geometric data). @see PxTetrahedronMesh PxShape */ class PxTetrahedronMeshDesc { public: /** \brief Defines the tetrahedron structure of a mesh. */ enum PxMeshFormat { eTET_MESH, //!< Normal tetmesh with arbitrary tetrahedra eHEX_MESH //!< 6 tetrahedra in a row will form a hexahedron }; /** Optional pointer to first material index, or NULL. There are PxTetrahedronMesh::numTriangles indices in total. Caller may add materialIndexStride bytes to the pointer to access the next triangle. When a tetrahedron mesh collides with another object, a material is required at the collision point. If materialIndices is NULL, then the material of the PxShape instance is used. Otherwise, if the point of contact is on a tetrahedron with index i, then the material index is determined as: PxFEMMaterialTableIndex index = *(PxFEMMaterialTableIndex *)(((PxU8*)materialIndices) + materialIndexStride * i); If the contact point falls on a vertex or an edge, a tetrahedron adjacent to the vertex or edge is selected, and its index used to look up a material. The selection is arbitrary but consistent over time. <b>Default:</b> NULL @see materialIndexStride */ PxTypedStridedData<PxFEMMaterialTableIndex> materialIndices; /** \brief Pointer to first vertex point. */ PxBoundedData points; /** \brief Pointer to first tetrahedron. Caller may add tetrhedronStrideBytes bytes to the pointer to access the next tetrahedron. These are quadruplets of 0 based indices: vert0 vert1 vert2 vert3 vert0 vert1 vert2 vert3 vert0 vert1 vert2 vert3 ... where vertex is either a 32 or 16 bit unsigned integer. There are numTetrahedrons*4 indices. This is declared as a void pointer because it is actually either an PxU16 or a PxU32 pointer. */ PxBoundedData tetrahedrons; /** \brief Flags bits, combined from values of the enum ::PxMeshFlag */ PxMeshFlags flags; /** \brief Used for simulation meshes only. Defines if this tet mesh should be simulated as a tet mesh, or if a set of tetrahedra should be used to represent another shape, e.g. a hexahedral mesh constructed from 6 elements. */ PxU16 tetsPerElement; /** \brief Constructor to build an empty tetmesh description */ PxTetrahedronMeshDesc() { points.count = 0; points.stride = 0; points.data = NULL; tetrahedrons.count = 0; tetrahedrons.stride = 0; tetrahedrons.data = NULL; tetsPerElement = 1; } /** \brief Constructor to build a tetmeshdescription that links to the vertices and indices provided */ PxTetrahedronMeshDesc(physx::PxArray<physx::PxVec3>& meshVertices, physx::PxArray<physx::PxU32>& meshTetIndices, const PxTetrahedronMeshDesc::PxMeshFormat meshFormat = eTET_MESH, PxU16 numberOfTetsPerHexElement = 5) { points.count = meshVertices.size(); points.stride = sizeof(float) * 3; points.data = meshVertices.begin(); tetrahedrons.count = meshTetIndices.size() / 4; tetrahedrons.stride = sizeof(int) * 4; tetrahedrons.data = meshTetIndices.begin(); if (meshFormat == eTET_MESH) tetsPerElement = 1; else tetsPerElement = numberOfTetsPerHexElement; } PX_INLINE bool isValid() const { // Check geometry of the collision mesh if (points.count < 4) //at least 1 tetrahedron's worth of points return false; if ((!tetrahedrons.data) && (points.count % 4)) // Non-indexed mesh => we must ensure the geometry defines an implicit number of tetrahedrons // i.e. numVertices can't be divided by 4 return false; if (points.count > 0xffff && flags & PxMeshFlag::e16_BIT_INDICES) return false; if (!points.data) return false; if (points.stride < sizeof(PxVec3)) //should be at least one point's worth of data return false; //add more validity checks here if (materialIndices.data && materialIndices.stride < sizeof(PxFEMMaterialTableIndex)) return false; // The tetrahedrons pointer is not mandatory if (tetrahedrons.data) { // Indexed collision mesh PxU32 limit = (flags & PxMeshFlag::e16_BIT_INDICES) ? sizeof(PxU16) * 4 : sizeof(PxU32) * 4; if (tetrahedrons.stride < limit) return false; } //The model can only be either a tetmesh (1 tet per element), or have 5 or 6 tets per hex element, otherwise invalid. if (tetsPerElement != 1 && tetsPerElement != 5 && tetsPerElement != 6) return false; return true; } }; ///** //\brief Descriptor class for #PxSoftBodyMesh (contains only additional data used for softbody simulation). //@see PxSoftBodyMesh PxShape //*/ class PxSoftBodySimulationDataDesc { public: /** \brief Pointer to first index of tetrahedron that contains the vertex at the same location in the vertex buffer. if left unassigned it will be computed automatically. If a point is inside multiple tetrahedra (ambiguous case), the frist one found will be taken. */ PxBoundedData vertexToTet; /** \brief Constructor to build an empty simulation description */ PxSoftBodySimulationDataDesc() { vertexToTet.count = 0; vertexToTet.stride = 0; vertexToTet.data = NULL; } /** \brief Constructor to build a simulation description with a defined vertex to tetrahedron mapping */ PxSoftBodySimulationDataDesc(physx::PxArray<physx::PxI32>& vertToTet) { vertexToTet.count = vertToTet.size(); vertexToTet.stride = sizeof(PxI32); vertexToTet.data = vertToTet.begin(); } PX_INLINE bool isValid() const { return true; } }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
7,718
C
31.846808
187
0.728686
NVIDIA-Omniverse/PhysX/physx/include/cooking/Pxc.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PXC_H #define PXC_H #include "foundation/Px.h" // define API function declaration #if !defined PX_PHYSX_STATIC_LIB #if PX_WINDOWS_FAMILY #if defined PX_PHYSX_COOKING_EXPORTS #define PX_PHYSX_COOKING_API __declspec(dllexport) #else #define PX_PHYSX_COOKING_API __declspec(dllimport) #endif #elif PX_UNIX_FAMILY #define PX_PHYSX_COOKING_API PX_UNIX_EXPORT #endif #endif #if !defined(PX_PHYSX_COOKING_API) #define PX_PHYSX_COOKING_API #endif #ifndef PX_C_EXPORT #define PX_C_EXPORT extern "C" #endif #endif
2,238
C
38.982142
74
0.756032
NVIDIA-Omniverse/PhysX/physx/include/cooking/PxBVH33MidphaseDesc.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_BVH_33_MIDPHASE_DESC_H #define PX_BVH_33_MIDPHASE_DESC_H /** \addtogroup cooking @{ */ #include "foundation/PxPreprocessor.h" #include "foundation/PxSimpleTypes.h" #if !PX_DOXYGEN namespace physx { #endif /** * \brief Enumeration for mesh cooking hints. * @deprecated */ struct PX_DEPRECATED PxMeshCookingHint { enum Enum { eSIM_PERFORMANCE = 0, //!< Default value. Favors higher quality hierarchy with higher runtime performance over cooking speed. eCOOKING_PERFORMANCE = 1 //!< Enables fast cooking path at the expense of somewhat lower quality hierarchy construction. }; }; /** \brief Structure describing parameters affecting BVH33 midphase mesh structure. @see PxCookingParams, PxMidphaseDesc @deprecated */ struct PX_DEPRECATED PxBVH33MidphaseDesc { /** \brief Controls the trade-off between mesh size and runtime performance. Using a value of 1.0 will produce a larger cooked mesh with generally higher runtime performance, using 0.0 will produce a smaller cooked mesh, with generally lower runtime performance. Values outside of [0,1] range will be clamped and cause a warning when any mesh gets cooked. <b>Default value:</b> 0.55 <b>Range:</b> [0.0f, 1.0f] */ PxF32 meshSizePerformanceTradeOff; /** \brief Mesh cooking hint. Used to specify mesh hierarchy construction preference. <b>Default value:</b> PxMeshCookingHint::eSIM_PERFORMANCE */ PxMeshCookingHint::Enum meshCookingHint; /** \brief Desc initialization to default value. */ void setToDefault() { meshSizePerformanceTradeOff = 0.55f; meshCookingHint = PxMeshCookingHint::eSIM_PERFORMANCE; } /** \brief Returns true if the descriptor is valid. \return true if the current settings are valid. */ bool isValid() const { if(meshSizePerformanceTradeOff < 0.0f || meshSizePerformanceTradeOff > 1.0f) return false; return true; } }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
3,653
C
31.052631
128
0.748152
NVIDIA-Omniverse/PhysX/physx/include/cooking/PxTriangleMeshDesc.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_TRIANGLE_MESH_DESC_H #define PX_TRIANGLE_MESH_DESC_H /** \addtogroup cooking @{ */ #include "PxPhysXConfig.h" #include "geometry/PxSimpleTriangleMesh.h" #include "PxSDFDesc.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief Descriptor class for #PxTriangleMesh. Note that this class is derived from PxSimpleTriangleMesh which contains the members that describe the basic mesh. The mesh data is *copied* when an PxTriangleMesh object is created from this descriptor. After the call the user may discard the triangle data. @see PxTriangleMesh PxTriangleMeshGeometry PxShape */ class PxTriangleMeshDesc : public PxSimpleTriangleMesh { public: /** Optional pointer to first material index, or NULL. There are PxSimpleTriangleMesh::numTriangles indices in total. Caller may add materialIndexStride bytes to the pointer to access the next triangle. When a triangle mesh collides with another object, a material is required at the collision point. If materialIndices is NULL, then the material of the PxShape instance is used. Otherwise, if the point of contact is on a triangle with index i, then the material index is determined as: PxMaterialTableIndex index = *(PxMaterialTableIndex *)(((PxU8*)materialIndices) + materialIndexStride * i); If the contact point falls on a vertex or an edge, a triangle adjacent to the vertex or edge is selected, and its index used to look up a material. The selection is arbitrary but consistent over time. <b>Default:</b> NULL @see materialIndexStride */ PxTypedStridedData<PxMaterialTableIndex> materialIndices; /** \brief SDF descriptor. When this descriptor is set, signed distance field is calculated for this convex mesh. <b>Default:</b> NULL */ PxSDFDesc* sdfDesc; /** \brief Constructor sets to default. */ PX_INLINE PxTriangleMeshDesc(); /** \brief (re)sets the structure to the default. */ PX_INLINE void setToDefault(); /** \brief Returns true if the descriptor is valid. \return true if the current settings are valid */ PX_INLINE bool isValid() const; }; PX_INLINE PxTriangleMeshDesc::PxTriangleMeshDesc() //constructor sets to default { PxSimpleTriangleMesh::setToDefault(); sdfDesc = NULL; } PX_INLINE void PxTriangleMeshDesc::setToDefault() { *this = PxTriangleMeshDesc(); } PX_INLINE bool PxTriangleMeshDesc::isValid() const { if(points.count < 3) //at least 1 trig's worth of points return false; if ((!triangles.data) && (points.count%3)) // Non-indexed mesh => we must ensure the geometry defines an implicit number of triangles // i.e. numVertices can't be divided by 3 return false; //add more validity checks here if (materialIndices.data && materialIndices.stride < sizeof(PxMaterialTableIndex)) return false; if (sdfDesc && !sdfDesc->isValid()) return false; return PxSimpleTriangleMesh::isValid(); } #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
4,610
C
33.155555
177
0.757701
NVIDIA-Omniverse/PhysX/physx/include/cooking/PxBVH34MidphaseDesc.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_BVH_34_MIDPHASE_DESC_H #define PX_BVH_34_MIDPHASE_DESC_H /** \addtogroup cooking @{ */ #include "foundation/PxPreprocessor.h" #include "foundation/PxSimpleTypes.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief Desired build strategy for PxMeshMidPhase::eBVH34 */ struct PxBVH34BuildStrategy { enum Enum { eFAST = 0, //!< Fast build strategy. Fast build speed, good runtime performance in most cases. Recommended for runtime mesh cooking. eDEFAULT = 1, //!< Default build strategy. Medium build speed, good runtime performance in all cases. eSAH = 2, //!< SAH build strategy. Slower builds, slightly improved runtime performance in some cases. eLAST }; }; /** \brief Structure describing parameters affecting BVH34 midphase mesh structure. @see PxCookingParams, PxMidphaseDesc */ struct PxBVH34MidphaseDesc { /** \brief Mesh cooking hint for max primitives per leaf limit. Less primitives per leaf produces larger meshes with better runtime performance and worse cooking performance. More triangles per leaf results in faster cooking speed and smaller mesh sizes, but with worse runtime performance. <b>Default value:</b> 4 <b>Range:</b> <2, 15> */ PxU32 numPrimsPerLeaf; /** \brief Desired build strategy for the BVH <b>Default value:</b> eDEFAULT */ PxBVH34BuildStrategy::Enum buildStrategy; /** \brief Whether the tree should be quantized or not Quantized trees use up less memory, but the runtime dequantization (to retrieve the node bounds) might have a measurable performance cost. In most cases the cost is too small to matter, and using less memory is more important. Hence, the default is true. One important use case for non-quantized trees is deformable meshes. The refit function for the BVH is not supported for quantized trees. <b>Default value:</b> true */ bool quantized; /** \brief Desc initialization to default value. */ void setToDefault() { numPrimsPerLeaf = 4; buildStrategy = PxBVH34BuildStrategy::eDEFAULT; quantized = true; } /** \brief Returns true if the descriptor is valid. \return true if the current settings are valid. */ bool isValid() const { if(numPrimsPerLeaf < 2 || numPrimsPerLeaf > 15) return false; return true; } }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
4,031
C
30.255814
135
0.745473
NVIDIA-Omniverse/PhysX/physx/include/cooking/PxSDFDesc.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_SDF_DESC_H #define PX_SDF_DESC_H /** \addtogroup cooking @{ */ #include "PxPhysXConfig.h" #include "geometry/PxSimpleTriangleMesh.h" #include "foundation/PxBounds3.h" #if !PX_DOXYGEN namespace physx { #endif class PxSDFBuilder; /** \brief A helper structure to define dimensions in 3D */ struct PxDim3 { PxU32 x, y, z; }; /** \brief Defines the number of bits per subgrid pixel */ class PxSdfBitsPerSubgridPixel { public: enum Enum { e8_BIT_PER_PIXEL = 1, //!< 8 bit per subgrid pixel (values will be stored as normalized integers) e16_BIT_PER_PIXEL = 2, //!< 16 bit per subgrid pixel (values will be stored as normalized integers) e32_BIT_PER_PIXEL = 4 //!< 32 bit per subgrid pixel (values will be stored as floats in world scale units) }; }; /** \brief A structure describing signed distance field for mesh. */ class PxSDFDesc { public: /** \brief Pointer to first sdf array element. */ PxBoundedData sdf; /** \brief Dimensions of sdf */ PxDim3 dims; /** \brief The Lower bound of the original mesh */ PxVec3 meshLower; /** \brief The spacing of each sdf voxel */ PxReal spacing; /** \brief The number of cells in a sparse subgrid block (full block has subgridSize^3 cells and (subgridSize+1)^3 samples). If set to zero, this indicates that only a dense background grid SDF is used without sparse blocks */ PxU32 subgridSize; /** \brief Enumeration that defines the number of bits per subgrid pixel (either 32, 16 or 8bits) */ PxSdfBitsPerSubgridPixel::Enum bitsPerSubgridPixel; /** \brief Number of subgrid blocks in the 3d texture. The full texture dimension will be sdfSubgrids3DTexBlockDim*(subgridSize+1). */ PxDim3 sdfSubgrids3DTexBlockDim; /** \brief The data to create the 3d texture containg the packed subgrid blocks. Stored as PxU8 to support multiple formats (8, 16 and 32 bits per pixel) */ PxBoundedData sdfSubgrids; /** \brief Array with start indices into the subgrid texture for every subgrid block. 10bits for z coordinate, 10bits for y and 10bits for x. Encoding is as follows: slot = (z << 20) | (y << 10) | x */ PxBoundedData sdfStartSlots; /** \brief The minimum value over all subgrid blocks. Used if normalized textures are used which is the case for 8 and 16bit formats */ PxReal subgridsMinSdfValue; /** \brief The maximum value over all subgrid blocks. Used if normalized textures are used which is the case for 8 and 16bit formats */ PxReal subgridsMaxSdfValue; /** \brief The bounds of the sdf. If left unassigned (empty), the bounds of the mesh will be used */ PxBounds3 sdfBounds; /** \brief Narrow band thickness as a fraction of the bounds diagonal length. Every subgrid block that overlaps with the narrow band around the mesh surface will be kept providing high resultion around the mesh surface. The valid range of this parameter is (0, 1). The higher the value, the more subgrids will get created, the more memory will be required. */ PxReal narrowBandThicknessRelativeToSdfBoundsDiagonal; /** \brief The number of threads that are launched to compute the signed distance field */ PxU32 numThreadsForSdfConstruction; /** \brief Optional pointer to the geometry of the mesh that is used to compute the SDF. If it is not set, the geometry of the mesh, that this descriptor is passed to during cooking, will be taken. The mesh data must only be available during cooking. It can be released once cooking completed. */ PxSimpleTriangleMesh baseMesh; /** \brief Optional pointer to an instance of a SDF builder. This siginificantly speeds up the construction of the SDF since the default sdf builer will do almost all computations directly on the GPU. The user must release the instance of the sdfBuilder once cooking completed. */ PxSDFBuilder* sdfBuilder; /** \brief Constructor */ PX_INLINE PxSDFDesc(); /** \brief Returns true if the descriptor is valid. \return true if the current settings are valid */ PX_INLINE bool isValid() const; }; PX_INLINE PxSDFDesc::PxSDFDesc() { sdf.data = NULL; dims.x = 0; dims.y = 0; dims.z = 0; spacing = 0; meshLower = PxVec3(PxZero); subgridSize = 0; subgridsMinSdfValue = 0.0f; subgridsMaxSdfValue = 0.0f; sdfBounds = PxBounds3::empty(); bitsPerSubgridPixel = PxSdfBitsPerSubgridPixel::e16_BIT_PER_PIXEL; narrowBandThicknessRelativeToSdfBoundsDiagonal = 0.01f; numThreadsForSdfConstruction = 1; sdfBuilder = NULL; } PX_INLINE bool PxSDFDesc::isValid() const { // Check validity of user's input(if any) if (sdf.data) { if (dims.x < 1 || dims.y < 1 || dims.z < 1) return false; if (!meshLower.isFinite()) return false; if (spacing <= 0) return false; } return true; } #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
6,608
C
30.322275
221
0.723517
NVIDIA-Omniverse/PhysX/physx/include/cooking/PxMidphaseDesc.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_MIDPHASE_DESC_H #define PX_MIDPHASE_DESC_H /** \addtogroup cooking @{ */ #include "geometry/PxTriangleMesh.h" #include "cooking/PxBVH33MidphaseDesc.h" #include "cooking/PxBVH34MidphaseDesc.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief Structure describing parameters affecting midphase mesh structure. @see PxCookingParams, PxBVH33MidphaseDesc, PxBVH34MidphaseDesc */ class PxMidphaseDesc { public: PX_FORCE_INLINE PxMidphaseDesc() { setToDefault(PxMeshMidPhase::eBVH34); } /** \brief Returns type of midphase mesh structure. \return PxMeshMidPhase::Enum @see PxMeshMidPhase::Enum */ PX_FORCE_INLINE PxMeshMidPhase::Enum getType() const { return mType; } /** \brief Midphase descriptors union @see PxBV33MidphaseDesc, PxBV34MidphaseDesc */ union { PxBVH33MidphaseDesc mBVH33Desc; PxBVH34MidphaseDesc mBVH34Desc; }; /** \brief Initialize the midphase mesh structure descriptor \param[in] type Midphase mesh structure descriptor @see PxBV33MidphaseDesc, PxBV34MidphaseDesc */ void setToDefault(PxMeshMidPhase::Enum type) { mType = type; if(type==PxMeshMidPhase::eBVH33) mBVH33Desc.setToDefault(); else if(type==PxMeshMidPhase::eBVH34) mBVH34Desc.setToDefault(); } /** \brief Returns true if the descriptor is valid. \return true if the current settings are valid. */ bool isValid() const { if(mType==PxMeshMidPhase::eBVH33) return mBVH33Desc.isValid(); else if(mType==PxMeshMidPhase::eBVH34) return mBVH34Desc.isValid(); return false; } /** \brief Assignment operator \return this */ PX_FORCE_INLINE PxMidphaseDesc& operator=(PxMeshMidPhase::Enum descType) { setToDefault(descType); return *this; } protected: PxMeshMidPhase::Enum mType; }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
3,518
C
27.609756
75
0.749289
NVIDIA-Omniverse/PhysX/physx/include/cooking/PxBVHDesc.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_BVH_DESC_H #define PX_BVH_DESC_H /** \addtogroup cooking @{ */ #include "common/PxCoreUtilityTypes.h" #include "foundation/PxTransform.h" #include "foundation/PxBounds3.h" #include "geometry/PxBVHBuildStrategy.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief Descriptor class for #PxBVH. @see PxBVH */ class PxBVHDesc { public: PX_INLINE PxBVHDesc(); /** \brief Pointer to first bounding box. */ PxBoundedData bounds; /** \brief Bounds enlargement Passed bounds are slightly enlarged before creating the BVH. This is done to avoid numerical issues when e.g. raycasts just graze the bounds. The performed operation is: extents = (bounds.maximum - bounds.minimum)/2 enlagedBounds.minimum = passedBounds.minium - extents * enlargement enlagedBounds.maximum = passedBounds.maxium + extents * enlargement Users can pass pre-enlarged bounds to the BVH builder, in which case just set the enlargement value to zero. <b>Default value:</b> 0.01 */ float enlargement; /** \brief Max primitives per leaf limit. <b>Range:</b> [0, 16)<br> <b>Default value:</b> 4 */ PxU32 numPrimsPerLeaf; /** \brief Desired build strategy for the BVH <b>Default value:</b> eDEFAULT */ PxBVHBuildStrategy::Enum buildStrategy; /** \brief Initialize the BVH descriptor */ PX_INLINE void setToDefault(); /** \brief Returns true if the descriptor is valid. \return true if the current settings are valid. */ PX_INLINE bool isValid() const; protected: }; PX_INLINE PxBVHDesc::PxBVHDesc() : enlargement(0.01f), numPrimsPerLeaf(4), buildStrategy(PxBVHBuildStrategy::eDEFAULT) { } PX_INLINE void PxBVHDesc::setToDefault() { *this = PxBVHDesc(); } PX_INLINE bool PxBVHDesc::isValid() const { // Check BVH desc data if(!bounds.data) return false; if(bounds.stride < sizeof(PxBounds3)) //should be at least one bounds' worth of data return false; if(bounds.count == 0) return false; if(enlargement<0.0f) return false; if(numPrimsPerLeaf>=16) return false; return true; } #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
3,787
C
26.057143
118
0.738843
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxHashMap.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_HASHMAP_H #define PX_HASHMAP_H #include "foundation/PxHashInternals.h" // TODO: make this doxy-format // // This header defines two hash maps. Hash maps // * support custom initial table sizes (rounded up internally to power-of-2) // * support custom static allocator objects // * auto-resize, based on a load factor (i.e. a 64-entry .75 load factor hash will resize // when the 49th element is inserted) // * are based on open hashing // * have O(1) contains, erase // // Maps have STL-like copying semantics, and properly initialize and destruct copies of objects // // There are two forms of map: coalesced and uncoalesced. Coalesced maps keep the entries in the // initial segment of an array, so are fast to iterate over; however deletion is approximately // twice as expensive. // // HashMap<T>: // bool insert(const Key& k, const Value& v) O(1) amortized (exponential resize policy) // Value & operator[](const Key& k) O(1) for existing objects, else O(1) amortized // const Entry * find(const Key& k); O(1) // bool erase(const T& k); O(1) // uint32_t size(); constant // void reserve(uint32_t size); O(MAX(currentOccupancy,size)) // void clear(); O(currentOccupancy) (with zero constant for objects // without // destructors) // Iterator getIterator(); // // operator[] creates an entry if one does not exist, initializing with the default constructor. // CoalescedHashMap<T> does not support getIterator, but instead supports // const Key *getEntries(); // // Use of iterators: // // for(HashMap::Iterator iter = test.getIterator(); !iter.done(); ++iter) // myFunction(iter->first, iter->second); #if !PX_DOXYGEN namespace physx { #endif template <class Key, class Value, class HashFn = PxHash<Key>, class Allocator = PxAllocator> class PxHashMap : public physx::PxHashMapBase<Key, Value, HashFn, Allocator> { public: typedef physx::PxHashMapBase<Key, Value, HashFn, Allocator> HashMapBase; typedef typename HashMapBase::Iterator Iterator; PxHashMap(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : HashMapBase(initialTableSize, loadFactor) { } PxHashMap(uint32_t initialTableSize, float loadFactor, const Allocator& alloc) : HashMapBase(initialTableSize, loadFactor, alloc) { } PxHashMap(const Allocator& alloc) : HashMapBase(64, 0.75f, alloc) { } Iterator getIterator() { return Iterator(HashMapBase::mBase); } }; template <class Key, class Value, class HashFn = PxHash<Key>, class Allocator = PxAllocator> class PxCoalescedHashMap : public physx::PxHashMapBase<Key, Value, HashFn, Allocator> { public: typedef physx::PxHashMapBase<Key, Value, HashFn, Allocator> HashMapBase; PxCoalescedHashMap(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : HashMapBase(initialTableSize, loadFactor) { } const PxPair<const Key, Value>* getEntries() const { return HashMapBase::mBase.getEntries(); } }; #if !PX_DOXYGEN } // namespace physx #endif #endif
4,719
C
38.333333
112
0.729816
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxHash.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_HASH_H #define PX_HASH_H #include "foundation/PxBasicTemplates.h" #include "foundation/PxString.h" #if PX_VC #pragma warning(push) #pragma warning(disable : 4302) #endif #if PX_LINUX #include "foundation/PxSimpleTypes.h" #endif /*! Central definition of hash functions */ #if !PX_DOXYGEN namespace physx { #endif // Hash functions // Thomas Wang's 32 bit mix // http://www.cris.com/~Ttwang/tech/inthash.htm PX_FORCE_INLINE uint32_t PxComputeHash(const uint32_t key) { uint32_t k = key; k += ~(k << 15); k ^= (k >> 10); k += (k << 3); k ^= (k >> 6); k += ~(k << 11); k ^= (k >> 16); return uint32_t(k); } PX_FORCE_INLINE uint32_t PxComputeHash(const int32_t key) { return PxComputeHash(uint32_t(key)); } // Thomas Wang's 64 bit mix // http://www.cris.com/~Ttwang/tech/inthash.htm PX_FORCE_INLINE uint32_t PxComputeHash(const uint64_t key) { uint64_t k = key; k += ~(k << 32); k ^= (k >> 22); k += ~(k << 13); k ^= (k >> 8); k += (k << 3); k ^= (k >> 15); k += ~(k << 27); k ^= (k >> 31); return uint32_t(UINT32_MAX & k); } #if PX_APPLE_FAMILY // hash for size_t, to make gcc happy PX_INLINE uint32_t PxComputeHash(const size_t key) { #if PX_P64_FAMILY return PxComputeHash(uint64_t(key)); #else return PxComputeHash(uint32_t(key)); #endif } #endif // Hash function for pointers PX_INLINE uint32_t PxComputeHash(const void* ptr) { #if PX_P64_FAMILY return PxComputeHash(uint64_t(ptr)); #else return PxComputeHash(uint32_t(UINT32_MAX & size_t(ptr))); #endif } // Hash function for pairs template <typename F, typename S> PX_INLINE uint32_t PxComputeHash(const PxPair<F, S>& p) { uint32_t seed = 0x876543; uint32_t m = 1000007; return PxComputeHash(p.second) ^ (m * (PxComputeHash(p.first) ^ (m * seed))); } // hash object for hash map template parameter template <class Key> struct PxHash { uint32_t operator()(const Key& k) const { return PxComputeHash(k); } bool equal(const Key& k0, const Key& k1) const { return k0 == k1; } }; // specialization for strings template <> struct PxHash<const char*> { public: uint32_t operator()(const char* _string) const { // "DJB" string hash const uint8_t* string = reinterpret_cast<const uint8_t*>(_string); uint32_t h = 5381; for(const uint8_t* ptr = string; *ptr; ptr++) h = ((h << 5) + h) ^ uint32_t(*ptr); return h; } bool equal(const char* string0, const char* string1) const { return !Pxstrcmp(string0, string1); } }; #if !PX_DOXYGEN } // namespace physx #endif #if PX_VC #pragma warning(pop) #endif #endif
4,232
C
24.810975
78
0.698724
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxFPU.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_FPU_H #define PX_FPU_H #include "foundation/PxIntrinsics.h" #include "foundation/PxAssert.h" #include "foundation/PxFoundationConfig.h" #define PX_IR(x) ((PxU32&)(x)) // integer representation of a floating-point value. #define PX_SIR(x) ((PxI32&)(x)) // signed integer representation of a floating-point value. #define PX_FR(x) ((PxReal&)(x)) // floating-point representation of a integer value. #define PX_FPU_GUARD PxFPUGuard scopedFpGuard; #define PX_SIMD_GUARD PxSIMDGuard scopedFpGuard; #define PX_SIMD_GUARD_CNDT(x) PxSIMDGuard scopedFpGuard(x); #if !PX_DOXYGEN namespace physx { #endif // sets the default SDK state for scalar and SIMD units class PX_FOUNDATION_API PxFPUGuard { public: PxFPUGuard(); // set fpu control word for PhysX ~PxFPUGuard(); // restore fpu control word private: PxU32 mControlWords[8]; }; // sets default SDK state for simd unit only, lighter weight than FPUGuard class PxSIMDGuard { public: PX_INLINE PxSIMDGuard(bool enable = true); // set simd control word for PhysX PX_INLINE ~PxSIMDGuard(); // restore simd control word private: #if !(PX_LINUX || PX_OSX) || (!PX_EMSCRIPTEN && PX_INTEL_FAMILY) PxU32 mControlWord; bool mEnabled; #endif }; /** \brief Enables floating point exceptions for the scalar and SIMD unit */ PX_FOUNDATION_API void PxEnableFPExceptions(); /** \brief Disables floating point exceptions for the scalar and SIMD unit */ PX_FOUNDATION_API void PxDisableFPExceptions(); #if !PX_DOXYGEN } // namespace physx #endif #if PX_WINDOWS_FAMILY #include "foundation/windows/PxWindowsFPU.h" #elif (PX_LINUX && PX_SSE2) || PX_OSX #include "foundation/unix/PxUnixFPU.h" #else PX_INLINE physx::PxSIMDGuard::PxSIMDGuard(bool) { } PX_INLINE physx::PxSIMDGuard::~PxSIMDGuard() { } #endif #endif
3,478
C
33.79
91
0.751006
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxBitAndData.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_BIT_AND_DATA_H #define PX_BIT_AND_DATA_H #include "foundation/Px.h" /** \addtogroup foundation @{ */ #if !PX_DOXYGEN namespace physx { #endif template <typename storageType, storageType bitMask> class PxBitAndDataT { public: PX_FORCE_INLINE PxBitAndDataT(const PxEMPTY) { } PX_FORCE_INLINE PxBitAndDataT() : mData(0) { } PX_FORCE_INLINE PxBitAndDataT(storageType data, bool bit = false) { mData = bit ? storageType(data | bitMask) : data; } PX_CUDA_CALLABLE PX_FORCE_INLINE operator storageType() const { return storageType(mData & ~bitMask); } PX_CUDA_CALLABLE PX_FORCE_INLINE void setBit() { mData |= bitMask; } PX_CUDA_CALLABLE PX_FORCE_INLINE void clearBit() { mData &= ~bitMask; } PX_CUDA_CALLABLE PX_FORCE_INLINE storageType isBitSet() const { return storageType(mData & bitMask); } protected: storageType mData; }; typedef PxBitAndDataT<PxU8, 0x80> PxBitAndByte; typedef PxBitAndDataT<PxU16, 0x8000> PxBitAndWord; typedef PxBitAndDataT<PxU32, 0x80000000> PxBitAndDword; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
2,791
C
30.727272
74
0.747402
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxInlineAoS.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_INLINE_AOS_H #define PX_INLINE_AOS_H #include "foundation/PxPreprocessor.h" #if PX_WINDOWS #include "windows/PxWindowsTrigConstants.h" #include "windows/PxWindowsInlineAoS.h" #elif(PX_UNIX_FAMILY || PX_SWITCH) #include "unix/PxUnixTrigConstants.h" #include "unix/PxUnixInlineAoS.h" #else #error "Platform not supported!" #endif #endif
2,046
C
43.499999
74
0.76784
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxErrorCallback.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_ERROR_CALLBACK_H #define PX_ERROR_CALLBACK_H /** \addtogroup foundation @{ */ #include "foundation/PxErrors.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief User defined interface class. Used by the library to emit debug information. \note The SDK state should not be modified from within any error reporting functions. <b>Threading:</b> The SDK sequences its calls to the output stream using a mutex, so the class need not be implemented in a thread-safe manner if the SDK is the only client. */ class PxErrorCallback { public: virtual ~PxErrorCallback() { } /** \brief Reports an error code. \param code Error code, see #PxErrorCode \param message Message to display. \param file File error occured in. \param line Line number error occured on. */ virtual void reportError(PxErrorCode::Enum code, const char* message, const char* file, int line) = 0; }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
2,653
C
34.864864
103
0.752733
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxHashSet.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_HASHSET_H #define PX_HASHSET_H #include "foundation/PxHashInternals.h" // TODO: make this doxy-format // This header defines two hash sets. Hash sets // * support custom initial table sizes (rounded up internally to power-of-2) // * support custom static allocator objects // * auto-resize, based on a load factor (i.e. a 64-entry .75 load factor hash will resize // when the 49th element is inserted) // * are based on open hashing // // Sets have STL-like copying semantics, and properly initialize and destruct copies of objects // // There are two forms of set: coalesced and uncoalesced. Coalesced sets keep the entries in the // initial segment of an array, so are fast to iterate over; however deletion is approximately // twice as expensive. // // HashSet<T>: // bool insert(const T& k) amortized O(1) (exponential resize policy) // bool contains(const T& k) const; O(1) // bool erase(const T& k); O(1) // uint32_t size() const; constant // void reserve(uint32_t size); O(MAX(size, currentOccupancy)) // void clear(); O(currentOccupancy) (with zero constant for objects without // destructors) // Iterator getIterator(); // // Use of iterators: // // for(HashSet::Iterator iter = test.getIterator(); !iter.done(); ++iter) // myFunction(*iter); // // CoalescedHashSet<T> does not support getIterator, but instead supports // const Key *getEntries(); // // insertion into a set already containing the element fails returning false, as does // erasure of an element not in the set // #if !PX_DOXYGEN namespace physx { #endif template <class Key, class HashFn = PxHash<Key>, class Allocator = PxAllocator> class PxHashSet : public physx::PxHashSetBase<Key, HashFn, Allocator, false> { public: typedef physx::PxHashSetBase<Key, HashFn, Allocator, false> HashSetBase; typedef typename HashSetBase::Iterator Iterator; PxHashSet(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : HashSetBase(initialTableSize, loadFactor) { } PxHashSet(uint32_t initialTableSize, float loadFactor, const Allocator& alloc) : HashSetBase(initialTableSize, loadFactor, alloc) { } PxHashSet(const Allocator& alloc) : HashSetBase(64, 0.75f, alloc) { } Iterator getIterator() { return Iterator(HashSetBase::mBase); } }; template <class Key, class HashFn = PxHash<Key>, class Allocator = PxAllocator> class PxCoalescedHashSet : public physx::PxHashSetBase<Key, HashFn, Allocator, true> { public: typedef typename physx::PxHashSetBase<Key, HashFn, Allocator, true> HashSetBase; PxCoalescedHashSet(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : HashSetBase(initialTableSize, loadFactor) { } PxCoalescedHashSet(uint32_t initialTableSize, float loadFactor, const Allocator& alloc) : HashSetBase(initialTableSize, loadFactor, alloc) { } PxCoalescedHashSet(const Allocator& alloc) : HashSetBase(64, 0.75f, alloc) { } const Key* getEntries() const { return HashSetBase::mBase.getEntries(); } }; #if !PX_DOXYGEN } // namespace physx #endif #endif
4,787
C
36.116279
112
0.734071
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxPool.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_POOL_H #define PX_POOL_H #include "foundation/PxArray.h" #include "foundation/PxSort.h" #include "foundation/PxBasicTemplates.h" #include "foundation/PxInlineArray.h" #include "foundation/PxMemory.h" namespace physx { /*! Simple allocation pool */ template <class T, class Alloc = typename PxAllocatorTraits<T>::Type> class PxPoolBase : public PxUserAllocated, public Alloc { PX_NOCOPY(PxPoolBase) protected: PxPoolBase(const Alloc& alloc, uint32_t elementsPerSlab, uint32_t slabSize) : Alloc(alloc), mSlabs(alloc), mElementsPerSlab(elementsPerSlab), mUsed(0), mSlabSize(slabSize), mFreeElement(0) { mSlabs.reserve(64); #if PX_CLANG #if PX_LINUX #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunused-local-typedef" #endif // PX_LINUX #endif // PX_CLANG PX_COMPILE_TIME_ASSERT(sizeof(T) >= sizeof(size_t)); #if PX_CLANG #if PX_LINUX #pragma clang diagnostic pop #endif #endif } public: ~PxPoolBase() { if(mUsed) disposeElements(); for(void** slabIt = mSlabs.begin(), *slabEnd = mSlabs.end(); slabIt != slabEnd; ++slabIt) Alloc::deallocate(*slabIt); } // Allocate space for single object PX_INLINE T* allocate() { if(mFreeElement == 0) allocateSlab(); T* p = reinterpret_cast<T*>(mFreeElement); mFreeElement = mFreeElement->mNext; mUsed++; PxMarkSerializedMemory(p, sizeof(T)); return p; } // Put space for a single element back in the lists PX_INLINE void deallocate(T* p) { if(p) { PX_ASSERT(mUsed); mUsed--; push(reinterpret_cast<FreeList*>(p)); } } PX_INLINE T* construct() { T* t = allocate(); return t ? PX_PLACEMENT_NEW(t, T()) : NULL; } template <class A1> PX_INLINE T* construct(A1& a) { T* t = allocate(); return t ? PX_PLACEMENT_NEW(t, T(a)) : NULL; } template <class A1, class A2> PX_INLINE T* construct(A1& a, A2& b) { T* t = allocate(); return t ? PX_PLACEMENT_NEW(t, T(a, b)) : NULL; } template <class A1, class A2, class A3> PX_INLINE T* construct(A1& a, A2& b, A3& c) { T* t = allocate(); return t ? PX_PLACEMENT_NEW(t, T(a, b, c)) : NULL; } template <class A1, class A2, class A3> PX_INLINE T* construct(A1* a, A2& b, A3& c) { T* t = allocate(); return t ? PX_PLACEMENT_NEW(t, T(a, b, c)) : NULL; } template <class A1, class A2, class A3, class A4> PX_INLINE T* construct(A1& a, A2& b, A3& c, A4& d) { T* t = allocate(); return t ? PX_PLACEMENT_NEW(t, T(a, b, c, d)) : NULL; } template <class A1, class A2, class A3, class A4, class A5> PX_INLINE T* construct(A1& a, A2& b, A3& c, A4& d, A5& e) { T* t = allocate(); return t ? PX_PLACEMENT_NEW(t, T(a, b, c, d, e)) : NULL; } template <class A1, class A2, class A3, class A4, class A5, class A6> PX_INLINE T* construct(A1& a, A2& b, A3& c, A4& d, A5& e, A6& f) { T* t = allocate(); return t ? PX_PLACEMENT_NEW(t, T(a, b, c, d, e, f)) : NULL; } template <class A1, class A2, class A3, class A4, class A5, class A6, class A7> PX_INLINE T* construct(A1& a, A2& b, A3& c, A4& d, A5& e, A6& f, A7& g) { T* t = allocate(); return t ? PX_PLACEMENT_NEW(t, T(a, b, c, d, e, f, g)) : NULL; } template <class A1, class A2, class A3, class A4, class A5, class A6, class A7, class A8> PX_INLINE T* construct(A1& a, A2& b, A3& c, A4& d, A5& e, A6& f, A7& g, A8& h) { T* t = allocate(); return t ? PX_PLACEMENT_NEW(t, T(a, b, c, d, e, f, g, h)) : NULL; } PX_INLINE void destroy(T* const p) { if(p) { p->~T(); deallocate(p); } } protected: struct FreeList { FreeList* mNext; }; // All the allocated slabs, sorted by pointer PxArray<void*, Alloc> mSlabs; uint32_t mElementsPerSlab; uint32_t mUsed; uint32_t mSlabSize; FreeList* mFreeElement; // Head of free-list // Helper function to get bitmap of allocated elements void push(FreeList* p) { p->mNext = mFreeElement; mFreeElement = p; } // Allocate a slab and segregate it into the freelist void allocateSlab() { T* slab = reinterpret_cast<T*>(Alloc::allocate(mSlabSize, PX_FL)); mSlabs.pushBack(slab); // Build a chain of nodes for the freelist T* it = slab + mElementsPerSlab; while(--it >= slab) push(reinterpret_cast<FreeList*>(it)); } /* Cleanup method. Go through all active slabs and call destructor for live objects, then free their memory */ void disposeElements() { PxArray<void*, Alloc> freeNodes(*this); while(mFreeElement) { freeNodes.pushBack(mFreeElement); mFreeElement = mFreeElement->mNext; } Alloc& alloc(*this); PxSort(freeNodes.begin(), freeNodes.size(), PxLess<void*>(), alloc); PxSort(mSlabs.begin(), mSlabs.size(), PxLess<void*>(), alloc); typename PxArray<void*, Alloc>::Iterator slabIt = mSlabs.begin(), slabEnd = mSlabs.end(); for(typename PxArray<void*, Alloc>::Iterator freeIt = freeNodes.begin(); slabIt != slabEnd; ++slabIt) { for(T* tIt = reinterpret_cast<T*>(*slabIt), *tEnd = tIt + mElementsPerSlab; tIt != tEnd; ++tIt) { if(freeIt != freeNodes.end() && *freeIt == tIt) ++freeIt; else tIt->~T(); } } } }; // original pool implementation template <class T, class Alloc = typename PxAllocatorTraits<T>::Type> class PxPool : public PxPoolBase<T, Alloc> { public: PxPool(const Alloc& alloc = Alloc(), uint32_t elementsPerSlab = 32) : PxPoolBase<T, Alloc>(alloc, elementsPerSlab, elementsPerSlab * sizeof(T)) { } }; // allows specification of the slab size instead of the occupancy template <class T, uint32_t slabSize, class Alloc = typename PxAllocatorTraits<T>::Type> class PxPool2 : public PxPoolBase<T, Alloc> { public: PxPool2(const Alloc& alloc = Alloc()) : PxPoolBase<T, Alloc>(alloc, slabSize / sizeof(T), slabSize) { } }; } // namespace physx #endif
7,420
C
26.485185
113
0.676146
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxAlloca.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_ALLOCA_H #define PX_ALLOCA_H #include "foundation/PxTempAllocator.h" #if !PX_DOXYGEN namespace physx { #endif template <typename T, typename Alloc = PxTempAllocator> class PxScopedPointer : private Alloc { public: ~PxScopedPointer() { if(mOwned) Alloc::deallocate(mPointer); } operator T*() const { return mPointer; } T* mPointer; bool mOwned; }; #if !PX_DOXYGEN } // namespace physx #endif // Don't use inline for alloca !!! #if PX_WINDOWS_FAMILY #include <malloc.h> #define PxAlloca(x) _alloca(x) #elif PX_LINUX #include <malloc.h> #define PxAlloca(x) alloca(x) #elif PX_APPLE_FAMILY #include <alloca.h> #define PxAlloca(x) alloca(x) #elif PX_SWITCH #include <malloc.h> #define PxAlloca(x) alloca(x) #endif #define PxAllocaAligned(x, alignment) ((size_t(PxAlloca(x + alignment)) + (alignment - 1)) & ~size_t(alignment - 1)) /*! Stack allocation for \c count instances of \c type. Falling back to temp allocator if using more than 1kB. */ #define PX_ALLOCA(var, type, count) \ physx::PxScopedPointer<type> var; \ { \ const uint32_t size = sizeof(type) * (count); \ var.mOwned = size > 1024; \ if(var.mOwned) \ var.mPointer = reinterpret_cast<type*>(physx::PxTempAllocator().allocate(size, PX_FL)); \ else \ var.mPointer = reinterpret_cast<type*>(PxAlloca(size)); \ } #endif
3,164
C
33.780219
116
0.701643
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxAllocator.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_ALLOCATOR_H #define PX_ALLOCATOR_H #include "foundation/PxAllocatorCallback.h" #include "foundation/PxAssert.h" #include "foundation/PxFoundation.h" #include "foundation/Px.h" #if PX_VC #pragma warning(push) #pragma warning(disable : 4577) #endif #if PX_WINDOWS_FAMILY #include <exception> #if(_MSC_VER >= 1923) #include <typeinfo> #else #include <typeinfo.h> #endif #endif #if(PX_APPLE_FAMILY) #include <typeinfo> #endif #include <new> #if PX_VC #pragma warning(pop) #endif // PT: the rules are simple: // - PX_ALLOC/PX_ALLOCATE/PX_FREE is similar to malloc/free. Use that for POD/anything that doesn't need ctor/dtor. // - PX_NEW/PX_DELETE is similar to new/delete. Use that for anything that needs a ctor/dtor. // - Everything goes through the user allocator. // - Inherit from PxUserAllocated to PX_NEW something. Do it even on small classes, it's free. // - You cannot PX_NEW a POD. Use PX_ALLOC. #define PX_ALLOC(n, name) physx::PxAllocator().allocate(n, PX_FL) // PT: use this one to reduce the amount of visible reinterpret_cast #define PX_ALLOCATE(type, count, name) reinterpret_cast<type*>(PX_ALLOC(count*sizeof(type), name)) #define PX_FREE(x) \ if(x) \ { \ physx::PxAllocator().deallocate(x); \ x = NULL; \ } #define PX_FREE_THIS physx::PxAllocator().deallocate(this) #define PX_NEW(T) new (physx::PxReflectionAllocator<T>(), PX_FL) T #define PX_PLACEMENT_NEW(p, T) new (p) T #define PX_DELETE_THIS delete this #define PX_DELETE(x) if(x) { delete x; x = NULL; } #define PX_DELETE_ARRAY(x) if(x) { delete []x; x = NULL; } #define PX_RELEASE(x) if(x) { x->release(); x = NULL; } #if !PX_DOXYGEN namespace physx { #endif /** \brief Allocator used to access the global PxAllocatorCallback instance without providing additional information. */ class PxAllocator { public: PX_FORCE_INLINE PxAllocator(const char* = NULL){} PX_FORCE_INLINE void* allocate(size_t size, const char* file, int line) { return size ? PxGetBroadcastAllocator()->allocate(size, "", file, line) : NULL; } PX_FORCE_INLINE void deallocate(void* ptr) { if(ptr) PxGetBroadcastAllocator()->deallocate(ptr); } }; /** * \brief Bootstrap allocator using malloc/free. * Don't use unless your objects get allocated before foundation is initialized. */ class PxRawAllocator { public: PxRawAllocator(const char* = 0) {} PX_FORCE_INLINE void* allocate(size_t size, const char*, int) { // malloc returns valid pointer for size==0, no need to check return ::malloc(size); } PX_FORCE_INLINE void deallocate(void* ptr) { // free(0) is guaranteed to have no side effect, no need to check ::free(ptr); } }; /** \brief Virtual allocator callback used to provide run-time defined allocators to foundation types like Array or Bitmap. This is used by VirtualAllocator */ class PxVirtualAllocatorCallback { public: PxVirtualAllocatorCallback() {} virtual ~PxVirtualAllocatorCallback() {} virtual void* allocate(const size_t size, const int group, const char* file, const int line) = 0; virtual void deallocate(void* ptr) = 0; }; /** \brief Virtual allocator to be used by foundation types to provide run-time defined allocators. Due to the fact that Array extends its allocator, rather than contains a reference/pointer to it, the VirtualAllocator must be a concrete type containing a pointer to a virtual callback. The callback may not be available at instantiation time, therefore methods are provided to set the callback later. */ class PxVirtualAllocator { public: PxVirtualAllocator(PxVirtualAllocatorCallback* callback = NULL, const int group = 0) : mCallback(callback), mGroup(group) {} PX_FORCE_INLINE void* allocate(const size_t size, const char* file, const int line) { PX_ASSERT(mCallback); if (size) return mCallback->allocate(size, mGroup, file, line); return NULL; } PX_FORCE_INLINE void deallocate(void* ptr) { PX_ASSERT(mCallback); if (ptr) mCallback->deallocate(ptr); } void setCallback(PxVirtualAllocatorCallback* callback) { mCallback = callback; } PxVirtualAllocatorCallback* getCallback() { return mCallback; } private: PxVirtualAllocatorCallback* mCallback; const int mGroup; PxVirtualAllocator& operator=(const PxVirtualAllocator&); }; /** \brief Allocator used to access the global PxAllocatorCallback instance using a static name derived from T. */ template <typename T> class PxReflectionAllocator { static const char* getName(bool reportAllocationNames) { if(!reportAllocationNames) return "<allocation names disabled>"; #if PX_GCC_FAMILY return __PRETTY_FUNCTION__; #else // name() calls malloc(), raw_name() wouldn't return typeid(T).name(); #endif } public: PxReflectionAllocator(const PxEMPTY) {} PxReflectionAllocator(const char* = 0) {} inline PxReflectionAllocator(const PxReflectionAllocator&) {} PX_FORCE_INLINE void* allocate(size_t size, const char* filename, int line) { if(!size) return NULL; bool reportAllocationNames; PxAllocatorCallback* cb = PxGetBroadcastAllocator(&reportAllocationNames); return cb->allocate(size, getName(reportAllocationNames), filename, line); } PX_FORCE_INLINE void deallocate(void* ptr) { if(ptr) PxGetBroadcastAllocator()->deallocate(ptr); } }; template <typename T> struct PxAllocatorTraits { typedef PxReflectionAllocator<T> Type; }; #if !PX_DOXYGEN } // namespace physx #endif #endif
7,239
C
28.672131
126
0.72289
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxTime.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_TIME_H #define PX_TIME_H #include "foundation/PxSimpleTypes.h" #include "foundation/PxFoundationConfig.h" #if PX_LINUX #include <time.h> #endif #if !PX_DOXYGEN namespace physx { #endif struct PxCounterFrequencyToTensOfNanos { PxU64 mNumerator; PxU64 mDenominator; PxCounterFrequencyToTensOfNanos(PxU64 inNum, PxU64 inDenom) : mNumerator(inNum), mDenominator(inDenom) { } // quite slow. PxU64 toTensOfNanos(PxU64 inCounter) const { return (inCounter * mNumerator) / mDenominator; } }; class PX_FOUNDATION_API PxTime { public: typedef PxF64 Second; static const PxU64 sNumTensOfNanoSecondsInASecond = 100000000; // This is supposedly guaranteed to not change after system boot // regardless of processors, speedstep, etc. static const PxCounterFrequencyToTensOfNanos& getBootCounterFrequency(); static PxCounterFrequencyToTensOfNanos getCounterFrequency(); static PxU64 getCurrentCounterValue(); // SLOW!! // Thar be a 64 bit divide in thar! static PxU64 getCurrentTimeInTensOfNanoSeconds() { PxU64 ticks = getCurrentCounterValue(); return getBootCounterFrequency().toTensOfNanos(ticks); } PxTime(); Second getElapsedSeconds(); Second peekElapsedSeconds(); Second getLastTime() const; private: #if PX_LINUX || PX_APPLE_FAMILY Second mLastTime; #else PxI64 mTickCount; #endif }; #if !PX_DOXYGEN } // namespace physx #endif #endif
3,084
C
30.479592
103
0.766213
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxBitMap.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_BITMAP_H #define PX_BITMAP_H #include "foundation/PxAssert.h" #include "foundation/PxMath.h" #include "foundation/PxMemory.h" #include "foundation/PxAllocator.h" #include "foundation/PxUserAllocated.h" #include "foundation/PxIntrinsics.h" #include "foundation/PxBitUtils.h" #if !PX_DOXYGEN namespace physx { #endif /*! Hold a bitmap with operations to set,reset or test given bit. We inhibit copy to prevent unintentional copies. If a copy is desired copy() should be used or alternatively a copy constructor implemented. */ template<class PxAllocator> class PxBitMapBase : public PxUserAllocated { PX_NOCOPY(PxBitMapBase) public: // PX_SERIALIZATION /* todo: explicit */ PxBitMapBase(const PxEMPTY) { if(mMap) mWordCount |= PX_SIGN_BITMASK; } //~PX_SERIALIZATION PX_INLINE PxBitMapBase(const PxAllocator& allocator) : mMap(0), mWordCount(0), mAllocator(allocator) {} PX_INLINE PxBitMapBase() : mMap(0), mWordCount(0) {} PX_INLINE ~PxBitMapBase() { release(); } PX_INLINE void release() { if(mMap && !isInUserMemory()) mAllocator.deallocate(mMap); mMap = NULL; } PX_FORCE_INLINE PxAllocator& getAllocator() { return mAllocator; } PX_INLINE void growAndSet(PxU32 index) { extend(index + 1); mMap[index >> 5] |= 1 << (index & 31); } PX_INLINE void growAndReset(PxU32 index) { extend(index + 1); mMap[index >> 5] &= ~(1 << (index & 31)); } PX_INLINE PxIntBool boundedTest(PxU32 index) const { return PxIntBool(index >> 5 >= getWordCount() ? PxIntFalse : (mMap[index >> 5] & (1 << (index & 31)))); } PX_INLINE void boundedReset(PxU32 index) { if((index >> 5) < getWordCount()) mMap[index >> 5] &= ~(1 << (index & 31)); } // Special optimized versions, when you _know_ your index is in range PX_INLINE void set(PxU32 index) { PX_ASSERT(index<getWordCount() * 32); mMap[index >> 5] |= 1 << (index & 31); } PX_INLINE void reset(PxU32 index) { PX_ASSERT(index<getWordCount() * 32); mMap[index >> 5] &= ~(1 << (index & 31)); } PX_INLINE PxIntBool test(PxU32 index) const { PX_ASSERT(index<getWordCount() * 32); return PxIntBool(mMap[index >> 5] & (1 << (index & 31))); } // nibble == 4 bits PX_INLINE PxU32 getNibbleFast(PxU32 nibIndex) const { const PxU32 bitIndex = nibIndex << 2; PX_ASSERT(bitIndex < getWordCount() * 32); return (mMap[bitIndex >> 5] >> (bitIndex & 31)) & 0xf; } PX_INLINE void andNibbleFast(PxU32 nibIndex, PxU32 mask) { //TODO: there has to be a faster way... const PxU32 bitIndex = nibIndex << 2; const PxU32 shift = (bitIndex & 31); const PxU32 nibMask = (0xfu << shift); PX_ASSERT(bitIndex < getWordCount() * 32); mMap[bitIndex >> 5] &= ((mask << shift) | ~nibMask); } PX_INLINE void orNibbleFast(PxU32 nibIndex, PxU32 mask) { PX_ASSERT(!(mask & ~0xfu)); //check extra bits are not set const PxU32 bitIndex = nibIndex << 2; const PxU32 shift = bitIndex & 31; PX_ASSERT(bitIndex < getWordCount() * 32); mMap[bitIndex >> 5] |= (mask << shift); } void clear() { PxMemSet(mMap, 0, getWordCount() * sizeof(PxU32)); } void resizeAndClear(PxU32 newBitCount) { extendUninitialized(newBitCount); PxMemSet(mMap, 0, getWordCount() * sizeof(PxU32)); } void setEmpty() { mMap = NULL; mWordCount = 0; } void setWords(PxU32* map, PxU32 wordCount) { mMap = map; mWordCount = wordCount | PX_SIGN_BITMASK; } // !!! only sets /last/ bit to value void resize(PxU32 newBitCount, bool value = false) { PX_ASSERT(!value); // only new class supports this PX_UNUSED(value); extend(newBitCount); } PX_FORCE_INLINE PxU32 size() const { return getWordCount() * 32; } void copy(const PxBitMapBase& a) { extendUninitialized(a.getWordCount() << 5); PxMemCopy(mMap, a.mMap, a.getWordCount() * sizeof(PxU32)); if(getWordCount() > a.getWordCount()) PxMemSet(mMap + a.getWordCount(), 0, (getWordCount() - a.getWordCount()) * sizeof(PxU32)); } PX_INLINE PxU32 count() const { // NOTE: we can probably do this faster, since the last steps in PxBitCount can be defered to // the end of the seq. + 64/128bits at a time + native bit counting instructions(360 is fast non micro code). PxU32 count = 0; const PxU32 wordCount = getWordCount(); for(PxU32 i = 0; i<wordCount; i++) count += PxBitCount(mMap[i]); return count; } PX_INLINE PxU32 count(PxU32 start, PxU32 length) const { const PxU32 end = PxMin(getWordCount() << 5, start + length); PxU32 count = 0; for(PxU32 i = start; i<end; i++) count += (test(i) != 0); return count; } //! returns 0 if no bits set (!!!) PxU32 findLast() const { const PxU32 wordCount = getWordCount(); for(PxU32 i = wordCount; i-- > 0;) { if(mMap[i]) return (i << 5) + PxHighestSetBit(mMap[i]); } return PxU32(0); } // the obvious combiners and some used in the SDK struct OR { PX_INLINE PxU32 operator()(PxU32 a, PxU32 b) { return a | b; } }; struct AND { PX_INLINE PxU32 operator()(PxU32 a, PxU32 b) { return a&b; } }; struct XOR { PX_INLINE PxU32 operator()(PxU32 a, PxU32 b) { return a^b; } }; // we use auxiliary functions here so as not to generate combiners for every combination // of allocators template<class Combiner, class _> PX_INLINE void combineInPlace(const PxBitMapBase<_>& b) { combine1<Combiner>(b.mMap, b.getWordCount()); } template<class Combiner, class _1, class _2> PX_INLINE void combine(const PxBitMapBase<_1>& a, const PxBitMapBase<_2>& b) { combine2<Combiner>(a.mMap, a.getWordCount(), b.mMap, b.getWordCount()); } PX_FORCE_INLINE const PxU32* getWords() const { return mMap; } PX_FORCE_INLINE PxU32* getWords() { return mMap; } // PX_SERIALIZATION PX_FORCE_INLINE PxU32 getWordCount() const { return mWordCount & ~PX_SIGN_BITMASK; } // We need one bit to mark arrays that have been deserialized from a user-provided memory block. PX_FORCE_INLINE PxU32 isInUserMemory() const { return mWordCount & PX_SIGN_BITMASK; } //~PX_SERIALIZATION /*! Iterate over indices in a bitmap This iterator is good because it finds the set bit without looping over the cached bits upto 31 times. However it does require a variable shift. */ class Iterator { public: static const PxU32 DONE = 0xffffffff; PX_INLINE Iterator(const PxBitMapBase &map) : mBitMap(map) { reset(); } PX_INLINE Iterator& operator=(const Iterator& other) { PX_ASSERT(&mBitMap == &other.mBitMap); mBlock = other.mBlock; mIndex = other.mIndex; return *this; } PX_INLINE PxU32 getNext() { if(mBlock) { PxU32 block = mBlock; PxU32 index = mIndex; const PxU32 bitIndex = index << 5 | PxLowestSetBit(block); block &= block - 1; PxU32 wordCount = mBitMap.getWordCount(); while(!block && ++index < wordCount) block = mBitMap.mMap[index]; mBlock = block; mIndex = index; return bitIndex; } return DONE; } PX_INLINE void reset() { PxU32 index = 0; PxU32 block = 0; PxU32 wordCount = mBitMap.getWordCount(); while(index < wordCount && ((block = mBitMap.mMap[index]) == 0)) ++index; mBlock = block; mIndex = index; } private: PxU32 mBlock, mIndex; const PxBitMapBase& mBitMap; }; // DS: faster but less general: hasBits() must be true or getNext() is illegal so it is the calling code's responsibility to ensure that getNext() is not called illegally. class PxLoopIterator { PX_NOCOPY(PxLoopIterator) public: PX_FORCE_INLINE PxLoopIterator(const PxBitMapBase &map) : mMap(map.getWords()), mBlock(0), mIndex(-1), mWordCount(PxI32(map.getWordCount())) {} PX_FORCE_INLINE bool hasBits() { PX_ASSERT(mIndex<mWordCount); while (mBlock == 0) { if (++mIndex == mWordCount) return false; mBlock = mMap[mIndex]; } return true; } PX_FORCE_INLINE PxU32 getNext() { PX_ASSERT(mIndex<mWordCount && mBlock != 0); PxU32 result = PxU32(mIndex) << 5 | PxLowestSetBit(mBlock); // will assert if mask is zero mBlock &= (mBlock - 1); return result; } private: const PxU32*const mMap; PxU32 mBlock; // the word we're currently scanning PxI32 mIndex; // the index of the word we're currently looking at PxI32 mWordCount; }; //Class to iterate over the bitmap from a particular start location rather than the beginning of the list class PxCircularIterator { public: static const PxU32 DONE = 0xffffffff; PX_INLINE PxCircularIterator(const PxBitMapBase &map, PxU32 index) : mBitMap(map) { PxU32 localIndex = 0; PxU32 startIndex = 0; const PxU32 wordCount = mBitMap.getWordCount(); if((index << 5) < wordCount) { localIndex = index << 5; startIndex = localIndex; } PxU32 block = 0; if(localIndex < wordCount) { block = mBitMap.mMap[localIndex]; if(block == 0) { localIndex = (localIndex + 1) % wordCount; while(localIndex != startIndex && (block = mBitMap.mMap[localIndex]) == 0) localIndex = (localIndex + 1) % wordCount; } } mIndex = localIndex; mBlock = block; mStartIndex = startIndex; } PX_INLINE PxU32 getNext() { if(mBlock) { PxU32 index = mIndex; PxU32 block = mBlock; const PxU32 startIndex = mStartIndex; PxU32 bitIndex = index << 5 | PxLowestSetBit(block); block &= block - 1; PxU32 wordCount = mBitMap.getWordCount(); while (!block && (index = ((index + 1) % wordCount)) != startIndex) block = mBitMap.mMap[index]; mIndex = index; mBlock = block; return bitIndex; } return DONE; } private: PxU32 mBlock, mIndex; PxU32 mStartIndex; const PxBitMapBase& mBitMap; PX_NOCOPY(PxCircularIterator) }; protected: PxU32* mMap; //one bit per index PxU32 mWordCount; PxAllocator mAllocator; PxU8 mPadding[3]; // PT: "mAllocator" is empty but consumes 1 byte void extend(PxU32 size) { const PxU32 newWordCount = (size + 31) >> 5; if (newWordCount > getWordCount()) { PxU32* newMap = reinterpret_cast<PxU32*>(mAllocator.allocate(newWordCount * sizeof(PxU32), PX_FL)); if (mMap) { PxMemCopy(newMap, mMap, getWordCount() * sizeof(PxU32)); if (!isInUserMemory()) mAllocator.deallocate(mMap); } PxMemSet(newMap + getWordCount(), 0, (newWordCount - getWordCount()) * sizeof(PxU32)); mMap = newMap; // also resets the isInUserMemory bit mWordCount = newWordCount; } } void extendUninitialized(PxU32 size) { PxU32 newWordCount = (size + 31) >> 5; if (newWordCount > getWordCount()) { if (mMap && !isInUserMemory()) mAllocator.deallocate(mMap); // also resets the isInUserMemory bit mWordCount = newWordCount; mMap = reinterpret_cast<PxU32*>(mAllocator.allocate(mWordCount * sizeof(PxU32), PX_FL)); } } template<class Combiner> void combine1(const PxU32* words, PxU32 length) { extend(length << 5); PxU32 combineLength = PxMin(getWordCount(), length); for (PxU32 i = 0; i<combineLength; i++) mMap[i] = Combiner()(mMap[i], words[i]); } template<class Combiner> void combine2(const PxU32* words1, PxU32 length1, const PxU32* words2, PxU32 length2) { extendUninitialized(PxMax(length1, length2) << 5); PxU32 commonSize = PxMin(length1, length2); for (PxU32 i = 0; i<commonSize; i++) mMap[i] = Combiner()(words1[i], words2[i]); for (PxU32 i = commonSize; i<length1; i++) mMap[i] = Combiner()(words1[i], 0); for (PxU32 i = commonSize; i<length2; i++) mMap[i] = Combiner()(0, words2[i]); } friend class Iterator; }; typedef PxBitMapBase<PxAllocator> PxBitMap; typedef PxBitMapBase<PxVirtualAllocator> PxBitMapPinned; #if !PX_DOXYGEN } // namespace physx #endif #endif
13,711
C
26.478958
173
0.658085
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxIntrinsics.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_INTRINSICS_H #define PX_INTRINSICS_H #include "foundation/PxPreprocessor.h" #if PX_WINDOWS_FAMILY #include "windows/PxWindowsIntrinsics.h" #elif(PX_LINUX || PX_APPLE_FAMILY) #include "unix/PxUnixIntrinsics.h" #elif PX_SWITCH #include "switch/PxSwitchIntrinsics.h" #else #error "Platform not supported!" #endif #if PX_WINDOWS_FAMILY #pragma intrinsic(memcmp) #pragma intrinsic(memcpy) #pragma intrinsic(memset) #endif #endif // #ifndef PX_INTRINSICS_H
2,162
C
41.411764
74
0.76827
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxSIMDHelpers.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_SIMD_HELPERS_H #define PX_SIMD_HELPERS_H #include "foundation/PxMat33.h" #include "foundation/PxVecMath.h" #include "foundation/PxTransform.h" #if !PX_DOXYGEN namespace physx { #endif //! A padded version of PxMat33, to safely load its data using SIMD class PxMat33Padded : public PxMat33 { public: explicit PX_FORCE_INLINE PxMat33Padded(const PxQuat& q) { using namespace aos; const QuatV qV = V4LoadU(&q.x); Vec3V column0V, column1V, column2V; QuatGetMat33V(qV, column0V, column1V, column2V); #if defined(PX_SIMD_DISABLED) || (PX_LINUX && (PX_ARM || PX_A64)) V3StoreU(column0V, column0); V3StoreU(column1V, column1); V3StoreU(column2V, column2); #else V4StoreU(column0V, &column0.x); V4StoreU(column1V, &column1.x); V4StoreU(column2V, &column2.x); #endif } PX_FORCE_INLINE ~PxMat33Padded() {} PX_FORCE_INLINE void operator=(const PxMat33& other) { column0 = other.column0; column1 = other.column1; column2 = other.column2; } PxU32 padding; }; #if !PX_DOXYGEN namespace aos { #endif PX_FORCE_INLINE void transformKernelVec4( const FloatVArg wa, const Vec4VArg va, const Vec4VArg pa, const FloatVArg wb, const Vec4VArg vb, const Vec4VArg pb, FloatV& wo, Vec4V& vo, Vec4V& po) { wo = FSub(FMul(wa, wb), V4Dot3(va, vb)); vo = V4ScaleAdd(va, wb, V4ScaleAdd(vb, wa, V4Cross(va, vb))); const Vec4V t1 = V4Scale(pb, FScaleAdd(wa, wa, FLoad(-0.5f))); const Vec4V t2 = V4ScaleAdd(V4Cross(va, pb), wa, t1); const Vec4V t3 = V4ScaleAdd(va, V4Dot3(va, pb), t2); po = V4ScaleAdd(t3, FLoad(2.0f), pa); } // PT: out = a * b template<const bool alignedInput, const bool alignedOutput> PX_FORCE_INLINE void transformMultiply(PxTransform& out, const PxTransform& a, const PxTransform& b) { PX_ASSERT(!alignedInput || (size_t(&a)&15) == 0); PX_ASSERT(!alignedInput || (size_t(&b)&15) == 0); const Vec4V aPos = alignedInput ? V4LoadA(&a.p.x) : V4LoadU(&a.p.x); const Vec4V aRot = alignedInput ? V4LoadA(&a.q.x) : V4LoadU(&a.q.x); const Vec4V bPos = alignedInput ? V4LoadA(&b.p.x) : V4LoadU(&b.p.x); const Vec4V bRot = alignedInput ? V4LoadA(&b.q.x) : V4LoadU(&b.q.x); Vec4V v, p; FloatV w; transformKernelVec4(V4GetW(aRot), aRot, aPos, V4GetW(bRot), bRot, bPos, w, v, p); if(alignedOutput) { PX_ASSERT((size_t(&out)&15) == 0); V4StoreA(p, &out.p.x); V4StoreA(V4SetW(v,w), &out.q.x); } else { V4StoreU(p, &out.p.x); V4StoreU(V4SetW(v,w), &out.q.x); } } // PT: out = a * b PX_FORCE_INLINE void transformMultiply(PxTransform32& out, const PxTransform32& a, const PxTransform32& b) { transformMultiply<true, true>(out, a, b); } #if !PX_DOXYGEN } // namespace aos #endif #if !PX_DOXYGEN } // namespace physx #endif #endif
4,485
C
32.229629
107
0.702564
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxInlineAllocator.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_INLINE_ALLOCATOR_H #define PX_INLINE_ALLOCATOR_H #include "foundation/PxUserAllocated.h" #if !PX_DOXYGEN namespace physx { #endif // this is used by the array class to allocate some space for a small number // of objects along with the metadata template <PxU32 N, typename BaseAllocator> class PxInlineAllocator : private BaseAllocator { public: PxInlineAllocator(const PxEMPTY v) : BaseAllocator(v) { } PxInlineAllocator(const BaseAllocator& alloc = BaseAllocator()) : BaseAllocator(alloc), mBufferUsed(false) { } PxInlineAllocator(const PxInlineAllocator& aloc) : BaseAllocator(aloc), mBufferUsed(false) { } void* allocate(PxU32 size, const char* filename, PxI32 line) { if(!mBufferUsed && size <= N) { mBufferUsed = true; return mBuffer; } return BaseAllocator::allocate(size, filename, line); } void deallocate(void* ptr) { if(ptr == mBuffer) mBufferUsed = false; else BaseAllocator::deallocate(ptr); } PX_FORCE_INLINE PxU8* getInlineBuffer() { return mBuffer; } PX_FORCE_INLINE bool isBufferUsed() const { return mBufferUsed; } protected: PxU8 mBuffer[N]; bool mBufferUsed; }; #if !PX_DOXYGEN } // namespace physx #endif #endif
2,905
C
30.247312
107
0.747676
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxInlineArray.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_INLINE_ARRAY_H #define PX_INLINE_ARRAY_H #include "foundation/PxArray.h" #include "foundation/PxInlineAllocator.h" #if !PX_DOXYGEN namespace physx { #endif // array that pre-allocates for N elements template <typename T, uint32_t N, typename Alloc = typename PxAllocatorTraits<T>::Type> class PxInlineArray : public PxArray<T, PxInlineAllocator<N * sizeof(T), Alloc> > { typedef PxInlineAllocator<N * sizeof(T), Alloc> Allocator; public: PxInlineArray(const PxEMPTY v) : PxArray<T, Allocator>(v) { if(isInlined()) this->mData = reinterpret_cast<T*>(PxArray<T, Allocator>::getInlineBuffer()); } PX_INLINE bool isInlined() const { return Allocator::isBufferUsed(); } PX_INLINE explicit PxInlineArray(const Alloc& alloc = Alloc()) : PxArray<T, Allocator>(alloc) { this->mData = this->allocate(N); this->mCapacity = N; } }; #if !PX_DOXYGEN } // namespace physx #endif #endif
2,609
C
36.285714
94
0.748563
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxBroadcast.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_BROADCAST_H #define PX_BROADCAST_H #include "foundation/PxInlineArray.h" #include "foundation/PxSimpleTypes.h" #include "foundation/PxErrorCallback.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief Abstract listener class that listens to allocation and deallocation events from the foundation memory system. <b>Threading:</b> All methods of this class should be thread safe as it can be called from the user thread or the physics processing thread(s). */ class PxAllocationListener { public: /** \brief callback when memory is allocated. \param size Size of the allocation in bytes. \param typeName Type this data is being allocated for. \param filename File the allocation came from. \param line the allocation came from. \param allocatedMemory memory that will be returned from the allocation. */ virtual void onAllocation(size_t size, const char* typeName, const char* filename, int line, void* allocatedMemory) = 0; /** \brief callback when memory is deallocated. \param allocatedMemory memory just before allocation. */ virtual void onDeallocation(void* allocatedMemory) = 0; protected: virtual ~PxAllocationListener() { } }; /** \brief Broadcast class implementation, registering listeners. <b>Threading:</b> All methods of this class should be thread safe as it can be called from the user thread or the physics processing thread(s). There is not internal locking */ template <class Listener, class Base> class PxBroadcast : public Base { public: static const uint32_t MAX_NB_LISTENERS = 16; /** \brief The default constructor. */ PxBroadcast() { } /** \brief Register new listener. \note It is NOT SAFE to register and deregister listeners while allocations may be taking place. moreover, there is no thread safety to registration/deregistration. \param listener Listener to register. */ void registerListener(Listener& listener) { if(mListeners.size() < MAX_NB_LISTENERS) mListeners.pushBack(&listener); } /** \brief Deregister an existing listener. \note It is NOT SAFE to register and deregister listeners while allocations may be taking place. moreover, there is no thread safety to registration/deregistration. \param listener Listener to deregister. */ void deregisterListener(Listener& listener) { mListeners.findAndReplaceWithLast(&listener); } /** \brief Get number of registered listeners. \return Number of listeners. */ uint32_t getNbListeners() const { return mListeners.size(); } /** \brief Get an existing listener from given index. \param index Index of the listener. \return Listener on given index. */ Listener& getListener(uint32_t index) { PX_ASSERT(index <= mListeners.size()); return *mListeners[index]; } protected: virtual ~PxBroadcast() { } physx::PxInlineArray<Listener*, MAX_NB_LISTENERS, physx::PxAllocator> mListeners; }; /** \brief Abstract base class for an application defined memory allocator that allows an external listener to audit the memory allocations. */ class PxBroadcastingAllocator : public PxBroadcast<PxAllocationListener, PxAllocatorCallback> { PX_NOCOPY(PxBroadcastingAllocator) public: /** \brief The default constructor. */ PxBroadcastingAllocator(PxAllocatorCallback& allocator, PxErrorCallback& error) : mAllocator(allocator), mError(error) { mListeners.clear(); } /** \brief The default constructor. */ virtual ~PxBroadcastingAllocator() { mListeners.clear(); } /** \brief Allocates size bytes of memory, which must be 16-byte aligned. This method should never return NULL. If you run out of memory, then you should terminate the app or take some other appropriate action. <b>Threading:</b> This function should be thread safe as it can be called in the context of the user thread and physics processing thread(s). \param size Number of bytes to allocate. \param typeName Name of the datatype that is being allocated \param filename The source file which allocated the memory \param line The source line which allocated the memory \return The allocated block of memory. */ void* allocate(size_t size, const char* typeName, const char* filename, int line) { void* mem = mAllocator.allocate(size, typeName, filename, line); if(!mem) { mError.reportError(PxErrorCode::eABORT, "User allocator returned NULL.", PX_FL); return NULL; } if((size_t(mem) & 15)) { mError.reportError(PxErrorCode::eABORT, "Allocations must be 16-byte aligned.", PX_FL); return NULL; } for(uint32_t i = 0; i < mListeners.size(); i++) mListeners[i]->onAllocation(size, typeName, filename, line, mem); return mem; } /** \brief Frees memory previously allocated by allocate(). <b>Threading:</b> This function should be thread safe as it can be called in the context of the user thread and physics processing thread(s). \param ptr Memory to free. */ void deallocate(void* ptr) { for(uint32_t i = 0; i < mListeners.size(); i++) { mListeners[i]->onDeallocation(ptr); } mAllocator.deallocate(ptr); } private: PxAllocatorCallback& mAllocator; PxErrorCallback& mError; }; /** \brief Abstract base class for an application defined error callback that allows an external listener to report errors. */ class PxBroadcastingErrorCallback : public PxBroadcast<PxErrorCallback, PxErrorCallback> { PX_NOCOPY(PxBroadcastingErrorCallback) public: /** \brief The default constructor. */ PxBroadcastingErrorCallback(PxErrorCallback& errorCallback) { registerListener(errorCallback); } /** \brief The default destructor. */ virtual ~PxBroadcastingErrorCallback() { mListeners.clear(); } /** \brief Reports an error code. \param code Error code, see #PxErrorCode \param message Message to display. \param file File error occured in. \param line Line number error occured on. */ void reportError(PxErrorCode::Enum code, const char* message, const char* file, int line) { for(uint32_t i = 0; i < mListeners.size(); i++) mListeners[i]->reportError(code, message, file, line); } }; #if !PX_DOXYGEN } // namespace physx #endif #endif
7,838
C
27.299639
119
0.74075
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxFlags.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_FLAGS_H #define PX_FLAGS_H /** \addtogroup foundation @{ */ #include "foundation/Px.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief Container for bitfield flag variables associated with a specific enum type. This allows for type safe manipulation for bitfields. <h3>Example</h3> // enum that defines each bit... struct MyEnum { enum Enum { eMAN = 1, eBEAR = 2, ePIG = 4, }; }; // implements some convenient global operators. PX_FLAGS_OPERATORS(MyEnum::Enum, uint8_t); PxFlags<MyEnum::Enum, uint8_t> myFlags; myFlags |= MyEnum::eMAN; myFlags |= MyEnum::eBEAR | MyEnum::ePIG; if(myFlags & MyEnum::eBEAR) { doSomething(); } */ template <typename enumtype, typename storagetype = uint32_t> class PxFlags { public: typedef storagetype InternalType; PX_CUDA_CALLABLE PX_INLINE explicit PxFlags(const PxEMPTY) { } PX_CUDA_CALLABLE PX_INLINE PxFlags(void); PX_CUDA_CALLABLE PX_INLINE PxFlags(enumtype e); PX_CUDA_CALLABLE PX_INLINE PxFlags(const PxFlags<enumtype, storagetype>& f); PX_CUDA_CALLABLE PX_INLINE explicit PxFlags(storagetype b); PX_CUDA_CALLABLE PX_INLINE bool operator==(enumtype e) const; PX_CUDA_CALLABLE PX_INLINE bool operator==(const PxFlags<enumtype, storagetype>& f) const; PX_CUDA_CALLABLE PX_INLINE bool operator==(bool b) const; PX_CUDA_CALLABLE PX_INLINE bool operator!=(enumtype e) const; PX_CUDA_CALLABLE PX_INLINE bool operator!=(const PxFlags<enumtype, storagetype>& f) const; PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator=(const PxFlags<enumtype, storagetype>& f); PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator=(enumtype e); PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator|=(enumtype e); PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator|=(const PxFlags<enumtype, storagetype>& f); PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator|(enumtype e) const; PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator|(const PxFlags<enumtype, storagetype>& f) const; PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator&=(enumtype e); PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator&=(const PxFlags<enumtype, storagetype>& f); PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator&(enumtype e) const; PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator&(const PxFlags<enumtype, storagetype>& f) const; PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator^=(enumtype e); PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator^=(const PxFlags<enumtype, storagetype>& f); PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator^(enumtype e) const; PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator^(const PxFlags<enumtype, storagetype>& f) const; PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator~(void) const; PX_CUDA_CALLABLE PX_INLINE operator bool(void) const; PX_CUDA_CALLABLE PX_INLINE operator uint8_t(void) const; PX_CUDA_CALLABLE PX_INLINE operator uint16_t(void) const; PX_CUDA_CALLABLE PX_INLINE operator uint32_t(void) const; PX_CUDA_CALLABLE PX_INLINE void clear(enumtype e); PX_CUDA_CALLABLE PX_INLINE void raise(enumtype e); PX_CUDA_CALLABLE PX_INLINE bool isSet(enumtype e) const; PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& setAll(enumtype e); public: friend PX_INLINE PxFlags<enumtype, storagetype> operator&(enumtype a, PxFlags<enumtype, storagetype>& b) { PxFlags<enumtype, storagetype> out; out.mBits = a & b.mBits; return out; } private: storagetype mBits; }; #if !PX_DOXYGEN #define PX_FLAGS_OPERATORS(enumtype, storagetype) \ PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator|(enumtype a, enumtype b) \ { \ PxFlags<enumtype, storagetype> r(a); \ r |= b; \ return r; \ } \ PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator&(enumtype a, enumtype b) \ { \ PxFlags<enumtype, storagetype> r(a); \ r &= b; \ return r; \ } \ PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator~(enumtype a) \ { \ return ~PxFlags<enumtype, storagetype>(a); \ } #define PX_FLAGS_TYPEDEF(x, y) \ typedef PxFlags<x::Enum, y> x##s; \ PX_FLAGS_OPERATORS(x::Enum, y) template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>::PxFlags(void) { mBits = 0; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>::PxFlags(enumtype e) { mBits = static_cast<storagetype>(e); } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>::PxFlags(const PxFlags<enumtype, storagetype>& f) { mBits = f.mBits; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>::PxFlags(storagetype b) { mBits = b; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE bool PxFlags<enumtype, storagetype>::operator==(enumtype e) const { return mBits == static_cast<storagetype>(e); } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE bool PxFlags<enumtype, storagetype>::operator==(const PxFlags<enumtype, storagetype>& f) const { return mBits == f.mBits; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE bool PxFlags<enumtype, storagetype>::operator==(bool b) const { return bool(*this) == b; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE bool PxFlags<enumtype, storagetype>::operator!=(enumtype e) const { return mBits != static_cast<storagetype>(e); } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE bool PxFlags<enumtype, storagetype>::operator!=(const PxFlags<enumtype, storagetype>& f) const { return mBits != f.mBits; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::operator=(enumtype e) { mBits = static_cast<storagetype>(e); return *this; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::operator=(const PxFlags<enumtype, storagetype>& f) { mBits = f.mBits; return *this; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::operator|=(enumtype e) { mBits |= static_cast<storagetype>(e); return *this; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>:: operator|=(const PxFlags<enumtype, storagetype>& f) { mBits |= f.mBits; return *this; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> PxFlags<enumtype, storagetype>::operator|(enumtype e) const { PxFlags<enumtype, storagetype> out(*this); out |= e; return out; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> PxFlags<enumtype, storagetype>:: operator|(const PxFlags<enumtype, storagetype>& f) const { PxFlags<enumtype, storagetype> out(*this); out |= f; return out; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::operator&=(enumtype e) { mBits &= static_cast<storagetype>(e); return *this; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>:: operator&=(const PxFlags<enumtype, storagetype>& f) { mBits &= f.mBits; return *this; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> PxFlags<enumtype, storagetype>::operator&(enumtype e) const { PxFlags<enumtype, storagetype> out = *this; out.mBits &= static_cast<storagetype>(e); return out; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> PxFlags<enumtype, storagetype>:: operator&(const PxFlags<enumtype, storagetype>& f) const { PxFlags<enumtype, storagetype> out = *this; out.mBits &= f.mBits; return out; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::operator^=(enumtype e) { mBits ^= static_cast<storagetype>(e); return *this; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>:: operator^=(const PxFlags<enumtype, storagetype>& f) { mBits ^= f.mBits; return *this; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> PxFlags<enumtype, storagetype>::operator^(enumtype e) const { PxFlags<enumtype, storagetype> out = *this; out.mBits ^= static_cast<storagetype>(e); return out; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> PxFlags<enumtype, storagetype>:: operator^(const PxFlags<enumtype, storagetype>& f) const { PxFlags<enumtype, storagetype> out = *this; out.mBits ^= f.mBits; return out; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> PxFlags<enumtype, storagetype>::operator~(void) const { PxFlags<enumtype, storagetype> out; out.mBits = storagetype(~mBits); return out; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>::operator bool(void) const { return mBits ? true : false; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>::operator uint8_t(void) const { return static_cast<uint8_t>(mBits); } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>::operator uint16_t(void) const { return static_cast<uint16_t>(mBits); } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>::operator uint32_t(void) const { return static_cast<uint32_t>(mBits); } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE void PxFlags<enumtype, storagetype>::clear(enumtype e) { mBits &= ~static_cast<storagetype>(e); } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE void PxFlags<enumtype, storagetype>::raise(enumtype e) { mBits |= static_cast<storagetype>(e); } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE bool PxFlags<enumtype, storagetype>::isSet(enumtype e) const { return (mBits & static_cast<storagetype>(e)) == static_cast<storagetype>(e); } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::setAll(enumtype e) { mBits = static_cast<storagetype>(e); return *this; } } // namespace physx #endif //!PX_DOXYGEN /** @} */ #endif
13,283
C
33.59375
141
0.737258
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxVec2.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_VEC2_H #define PX_VEC2_H /** \addtogroup foundation @{ */ #include "foundation/PxMath.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief 2 Element vector class. This is a 2-dimensional vector class with public data members. */ template<class Type> class PxVec2T { public: /** \brief default constructor leaves data uninitialized. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T() { } /** \brief zero constructor. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T(PxZERO) : x(Type(0.0)), y(Type(0.0)) { } /** \brief Assigns scalar parameter to all elements. Useful to initialize to zero or one. \param[in] a Value to assign to elements. */ explicit PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T(Type a) : x(a), y(a) { } /** \brief Initializes from 2 scalar parameters. \param[in] nx Value to initialize X component. \param[in] ny Value to initialize Y component. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T(Type nx, Type ny) : x(nx), y(ny) { } /** \brief Copy ctor. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T(const PxVec2T& v) : x(v.x), y(v.y) { } // Operators /** \brief Assignment operator */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T& operator=(const PxVec2T& p) { x = p.x; y = p.y; return *this; } /** \brief element access */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type& operator[](unsigned int index) { PX_ASSERT(index <= 1); return reinterpret_cast<Type*>(this)[index]; } /** \brief element access */ PX_CUDA_CALLABLE PX_FORCE_INLINE const Type& operator[](unsigned int index) const { PX_ASSERT(index <= 1); return reinterpret_cast<const Type*>(this)[index]; } /** \brief returns true if the two vectors are exactly equal. */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool operator==(const PxVec2T& v) const { return x == v.x && y == v.y; } /** \brief returns true if the two vectors are not exactly equal. */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool operator!=(const PxVec2T& v) const { return x != v.x || y != v.y; } /** \brief tests for exact zero vector */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool isZero() const { return x == Type(0.0) && y == Type(0.0); } /** \brief returns true if all 2 elems of the vector are finite (not NAN or INF, etc.) */ PX_CUDA_CALLABLE PX_INLINE bool isFinite() const { return PxIsFinite(x) && PxIsFinite(y); } /** \brief is normalized - used by API parameter validation */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool isNormalized() const { const Type unitTolerance = Type(1e-4); return isFinite() && PxAbs(magnitude() - Type(1.0)) < unitTolerance; } /** \brief returns the squared magnitude Avoids calling PxSqrt()! */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type magnitudeSquared() const { return x * x + y * y; } /** \brief returns the magnitude */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type magnitude() const { return PxSqrt(magnitudeSquared()); } /** \brief negation */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T operator-() const { return PxVec2T(-x, -y); } /** \brief vector addition */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T operator+(const PxVec2T& v) const { return PxVec2T(x + v.x, y + v.y); } /** \brief vector difference */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T operator-(const PxVec2T& v) const { return PxVec2T(x - v.x, y - v.y); } /** \brief scalar post-multiplication */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T operator*(Type f) const { return PxVec2T(x * f, y * f); } /** \brief scalar division */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T operator/(Type f) const { f = Type(1.0) / f; return PxVec2T(x * f, y * f); } /** \brief vector addition */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T& operator+=(const PxVec2T& v) { x += v.x; y += v.y; return *this; } /** \brief vector difference */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T& operator-=(const PxVec2T& v) { x -= v.x; y -= v.y; return *this; } /** \brief scalar multiplication */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T& operator*=(Type f) { x *= f; y *= f; return *this; } /** \brief scalar division */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T& operator/=(Type f) { f = Type(1.0) / f; x *= f; y *= f; return *this; } /** \brief returns the scalar product of this and other. */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type dot(const PxVec2T& v) const { return x * v.x + y * v.y; } /** returns a unit vector */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T getNormalized() const { const Type m = magnitudeSquared(); return m > Type(0.0) ? *this * PxRecipSqrt(m) : PxVec2T(Type(0)); } /** \brief normalizes the vector in place */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type normalize() { const Type m = magnitude(); if(m > Type(0.0)) *this /= m; return m; } /** \brief a[i] * b[i], for all i. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T multiply(const PxVec2T& a) const { return PxVec2T(x * a.x, y * a.y); } /** \brief element-wise minimum */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T minimum(const PxVec2T& v) const { return PxVec2T(PxMin(x, v.x), PxMin(y, v.y)); } /** \brief returns MIN(x, y); */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type minElement() const { return PxMin(x, y); } /** \brief element-wise maximum */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T maximum(const PxVec2T& v) const { return PxVec2T(PxMax(x, v.x), PxMax(y, v.y)); } /** \brief returns MAX(x, y); */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type maxElement() const { return PxMax(x, y); } Type x, y; }; template<class Type> PX_CUDA_CALLABLE static PX_FORCE_INLINE PxVec2T<Type> operator*(Type f, const PxVec2T<Type>& v) { return PxVec2T<Type>(f * v.x, f * v.y); } typedef PxVec2T<float> PxVec2; typedef PxVec2T<double> PxVec2d; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
7,543
C
20.554286
95
0.674135
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxPinnedArray.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_PINNED_ARRAY_H #define PX_PINNED_ARRAY_H #include "foundation/PxArray.h" #include "foundation/PxAllocator.h" #include "foundation/PxBounds3.h" #if !PX_DOXYGEN namespace physx { #endif template<class T> using PxPinnedArray = PxArray<T, PxVirtualAllocator>; typedef PxArray<PxBounds3, PxVirtualAllocator> PxBoundsArrayPinned; typedef PxArray<PxReal, PxVirtualAllocator> PxFloatArrayPinned; typedef PxArray<PxU32, PxVirtualAllocator> PxInt32ArrayPinned; typedef PxArray<PxU16, PxVirtualAllocator> PxInt16ArrayPinned; typedef PxArray<PxU8, PxVirtualAllocator> PxInt8ArrayPinned; #if !PX_DOXYGEN } // namespace physx #endif #endif
2,353
C
41.799999
74
0.775181
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxBitUtils.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_BIT_UTILS_H #define PX_BIT_UTILS_H #include "foundation/PxMathIntrinsics.h" #include "foundation/PxAssert.h" #include "foundation/PxIntrinsics.h" #include "foundation/PxMathIntrinsics.h" #if !PX_DOXYGEN namespace physx { #endif PX_INLINE uint32_t PxBitCount(uint32_t v) { // from http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel uint32_t const w = v - ((v >> 1) & 0x55555555); uint32_t const x = (w & 0x33333333) + ((w >> 2) & 0x33333333); return (((x + (x >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24; } PX_INLINE bool PxIsPowerOfTwo(uint32_t x) { return x != 0 && (x & (x - 1)) == 0; } // "Next Largest Power of 2 // Given a binary integer value x, the next largest power of 2 can be computed by a SWAR algorithm // that recursively "folds" the upper bits into the lower bits. This process yields a bit vector with // the same most significant 1 as x, but all 1's below it. Adding 1 to that value yields the next // largest power of 2. For a 32-bit value:" PX_INLINE uint32_t PxNextPowerOfTwo(uint32_t x) { x |= (x >> 1); x |= (x >> 2); x |= (x >> 4); x |= (x >> 8); x |= (x >> 16); return x + 1; } /*! Return the index of the highest set bit. Not valid for zero arg. */ PX_INLINE uint32_t PxLowestSetBit(uint32_t x) { PX_ASSERT(x); return PxLowestSetBitUnsafe(x); } /*! Return the index of the highest set bit. Not valid for zero arg. */ PX_INLINE uint32_t PxHighestSetBit(uint32_t x) { PX_ASSERT(x); return PxHighestSetBitUnsafe(x); } // Helper function to approximate log2 of an integer value // assumes that the input is actually power of two. PX_INLINE uint32_t PxILog2(uint32_t num) { for(uint32_t i = 0; i < 32; i++) { num >>= 1; if(num == 0) return i; } PX_ASSERT(0); return uint32_t(-1); } #if !PX_DOXYGEN } // namespace physx #endif #endif
3,517
C
30.981818
101
0.71652
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxUserAllocated.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_USER_ALLOCATED_H #define PX_USER_ALLOCATED_H #include "PxAllocator.h" #if !PX_DOXYGEN namespace physx { #endif /** Provides new and delete using a UserAllocator. Guarantees that 'delete x;' uses the UserAllocator too. */ class PxUserAllocated { public: // PX_SERIALIZATION PX_INLINE void* operator new(size_t, void* address) { return address; } //~PX_SERIALIZATION // Matching operator delete to the above operator new. Don't ask me // how this makes any sense - Nuernberger. PX_INLINE void operator delete(void*, void*) { } template <typename Alloc> PX_INLINE void* operator new(size_t size, Alloc alloc, const char* fileName, int line) { return alloc.allocate(size, fileName, line); } template <typename Alloc> PX_INLINE void* operator new(size_t size, size_t /*align*/, Alloc alloc, const char* fileName, int line) { // align is not respected, we have 16bit aligned allocator return alloc.allocate(size, fileName, line); } template <typename Alloc> PX_INLINE void* operator new [](size_t size, Alloc alloc, const char* fileName, int line) { return alloc.allocate(size, fileName, line); } template <typename Alloc> PX_INLINE void* operator new [](size_t size, size_t /*align*/, Alloc alloc, const char* fileName, int line) { // align is not respected, we have 16bit aligned allocator return alloc.allocate(size, fileName, line); } // placement delete template <typename Alloc> PX_INLINE void operator delete(void* ptr, Alloc alloc, const char* fileName, int line) { PX_UNUSED(fileName); PX_UNUSED(line); alloc.deallocate(ptr); } template <typename Alloc> PX_INLINE void operator delete [](void* ptr, Alloc alloc, const char* fileName, int line) { PX_UNUSED(fileName); PX_UNUSED(line); alloc.deallocate(ptr); } PX_INLINE void operator delete(void* ptr) { PxAllocator().deallocate(ptr); } PX_INLINE void operator delete [](void* ptr) { PxAllocator().deallocate(ptr); } }; #if !PX_DOXYGEN } // namespace physx #endif #endif
3,782
C
31.333333
109
0.722898
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxVecQuat.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_VEC_QUAT_H #define PX_VEC_QUAT_H #if !PX_DOXYGEN namespace physx { #endif namespace aos { #ifndef PX_PIDIV2 #define PX_PIDIV2 1.570796327f #endif ////////////////////////////////// // QuatV ////////////////////////////////// PX_FORCE_INLINE QuatV QuatVLoadXYZW(const PxF32 x, const PxF32 y, const PxF32 z, const PxF32 w) { return V4LoadXYZW(x, y, z, w); } PX_FORCE_INLINE QuatV QuatVLoadU(const PxF32* v) { return V4LoadU(v); } PX_FORCE_INLINE QuatV QuatVLoadA(const PxF32* v) { return V4LoadA(v); } PX_FORCE_INLINE QuatV QuatV_From_RotationAxisAngle(const Vec3V u, const FloatV a) { // q = cos(a/2) + u*sin(a/2) const FloatV half = FLoad(0.5f); const FloatV hangle = FMul(a, half); const FloatV piByTwo(FLoad(PX_PIDIV2)); const FloatV PiByTwoMinHangle(FSub(piByTwo, hangle)); const Vec4V hangle2(Vec4V_From_Vec3V(V3Merge(hangle, PiByTwoMinHangle, hangle))); /*const FloatV sina = FSin(hangle); const FloatV cosa = FCos(hangle);*/ const Vec4V _sina = V4Sin(hangle2); const FloatV sina = V4GetX(_sina); const FloatV cosa = V4GetY(_sina); const Vec3V v = V3Scale(u, sina); // return V4Sel(BTTTF(), Vec4V_From_Vec3V(v), V4Splat(cosa)); return V4SetW(Vec4V_From_Vec3V(v), cosa); } // Normalize PX_FORCE_INLINE QuatV QuatNormalize(const QuatV q) { return V4Normalize(q); } PX_FORCE_INLINE FloatV QuatLength(const QuatV q) { return V4Length(q); } PX_FORCE_INLINE FloatV QuatLengthSq(const QuatV q) { return V4LengthSq(q); } PX_FORCE_INLINE FloatV QuatDot(const QuatV a, const QuatV b) // convert this PxQuat to a unit quaternion { return V4Dot(a, b); } PX_FORCE_INLINE QuatV QuatConjugate(const QuatV q) { return V4SetW(V4Neg(q), V4GetW(q)); } PX_FORCE_INLINE Vec3V QuatGetImaginaryPart(const QuatV q) { return Vec3V_From_Vec4V(q); } /** brief computes rotation of x-axis */ PX_FORCE_INLINE Vec3V QuatGetBasisVector0(const QuatV q) { /*const PxF32 x2 = x*2.0f; const PxF32 w2 = w*2.0f; return PxVec3( (w * w2) - 1.0f + x*x2, (z * w2) + y*x2, (-y * w2) + z*x2);*/ const FloatV two = FLoad(2.f); const FloatV w = V4GetW(q); const Vec3V u = Vec3V_From_Vec4V(q); const FloatV x2 = FMul(V3GetX(u), two); const FloatV w2 = FMul(w, two); const Vec3V a = V3Scale(u, x2); const Vec3V tmp = V3Merge(w, V3GetZ(u), FNeg(V3GetY(u))); // const Vec3V b = V3Scale(tmp, w2); // const Vec3V ab = V3Add(a, b); const Vec3V ab = V3ScaleAdd(tmp, w2, a); return V3SetX(ab, FSub(V3GetX(ab), FOne())); } /** brief computes rotation of y-axis */ PX_FORCE_INLINE Vec3V QuatGetBasisVector1(const QuatV q) { /*const PxF32 y2 = y*2.0f; const PxF32 w2 = w*2.0f; return PxVec3( (-z * w2) + x*y2, (w * w2) - 1.0f + y*y2, (x * w2) + z*y2);*/ const FloatV two = FLoad(2.f); const FloatV w = V4GetW(q); const Vec3V u = Vec3V_From_Vec4V(q); const FloatV y2 = FMul(V3GetY(u), two); const FloatV w2 = FMul(w, two); const Vec3V a = V3Scale(u, y2); const Vec3V tmp = V3Merge(FNeg(V3GetZ(u)), w, V3GetX(u)); // const Vec3V b = V3Scale(tmp, w2); // const Vec3V ab = V3Add(a, b); const Vec3V ab = V3ScaleAdd(tmp, w2, a); return V3SetY(ab, FSub(V3GetY(ab), FOne())); } /** brief computes rotation of z-axis */ PX_FORCE_INLINE Vec3V QuatGetBasisVector2(const QuatV q) { /*const PxF32 z2 = z*2.0f; const PxF32 w2 = w*2.0f; return PxVec3( (y * w2) + x*z2, (-x * w2) + y*z2, (w * w2) - 1.0f + z*z2);*/ const FloatV two = FLoad(2.f); const FloatV w = V4GetW(q); const Vec3V u = Vec3V_From_Vec4V(q); const FloatV z2 = FMul(V3GetZ(u), two); const FloatV w2 = FMul(w, two); const Vec3V a = V3Scale(u, z2); const Vec3V tmp = V3Merge(V3GetY(u), FNeg(V3GetX(u)), w); /*const Vec3V b = V3Scale(tmp, w2); const Vec3V ab = V3Add(a, b);*/ const Vec3V ab = V3ScaleAdd(tmp, w2, a); return V3SetZ(ab, FSub(V3GetZ(ab), FOne())); } PX_FORCE_INLINE Vec3V QuatRotate(const QuatV q, const Vec3V v) { /* const PxVec3 qv(x,y,z); return (v*(w*w-0.5f) + (qv.cross(v))*w + qv*(qv.dot(v)))*2; */ const FloatV two = FLoad(2.f); // const FloatV half = FloatV_From_F32(0.5f); const FloatV nhalf = FLoad(-0.5f); const Vec3V u = Vec3V_From_Vec4V(q); const FloatV w = V4GetW(q); // const FloatV w2 = FSub(FMul(w, w), half); const FloatV w2 = FScaleAdd(w, w, nhalf); const Vec3V a = V3Scale(v, w2); // const Vec3V b = V3Scale(V3Cross(u, v), w); // const Vec3V c = V3Scale(u, V3Dot(u, v)); // return V3Scale(V3Add(V3Add(a, b), c), two); const Vec3V temp = V3ScaleAdd(V3Cross(u, v), w, a); return V3Scale(V3ScaleAdd(u, V3Dot(u, v), temp), two); } PX_FORCE_INLINE Vec3V QuatTransform(const QuatV q, const Vec3V p, const Vec3V v) { // p + q.rotate(v) const FloatV two = FLoad(2.f); // const FloatV half = FloatV_From_F32(0.5f); const FloatV nhalf = FLoad(-0.5f); const Vec3V u = Vec3V_From_Vec4V(q); const FloatV w = V4GetW(q); // const FloatV w2 = FSub(FMul(w, w), half); const FloatV w2 = FScaleAdd(w, w, nhalf); const Vec3V a = V3Scale(v, w2); /*const Vec3V b = V3Scale(V3Cross(u, v), w); const Vec3V c = V3Scale(u, V3Dot(u, v)); return V3ScaleAdd(V3Add(V3Add(a, b), c), two, p);*/ const Vec3V temp = V3ScaleAdd(V3Cross(u, v), w, a); const Vec3V z = V3ScaleAdd(u, V3Dot(u, v), temp); return V3ScaleAdd(z, two, p); } PX_FORCE_INLINE Vec3V QuatRotateInv(const QuatV q, const Vec3V v) { // const PxVec3 qv(x,y,z); // return (v*(w*w-0.5f) - (qv.cross(v))*w + qv*(qv.dot(v)))*2; const FloatV two = FLoad(2.f); const FloatV nhalf = FLoad(-0.5f); const Vec3V u = Vec3V_From_Vec4V(q); const FloatV w = V4GetW(q); const FloatV w2 = FScaleAdd(w, w, nhalf); const Vec3V a = V3Scale(v, w2); /*const Vec3V b = V3Scale(V3Cross(u, v), w); const Vec3V c = V3Scale(u, V3Dot(u, v)); return V3Scale(V3Add(V3Sub(a, b), c), two);*/ const Vec3V temp = V3NegScaleSub(V3Cross(u, v), w, a); return V3Scale(V3ScaleAdd(u, V3Dot(u, v), temp), two); } PX_FORCE_INLINE QuatV QuatMul(const QuatV a, const QuatV b) { const Vec4V imagA = a; const Vec4V imagB = b; const FloatV rA = V4GetW(a); const FloatV rB = V4GetW(b); const FloatV real = FSub(FMul(rA, rB), V4Dot3(imagA, imagB)); const Vec4V v0 = V4Scale(imagA, rB); const Vec4V v1 = V4Scale(imagB, rA); const Vec4V v2 = V4Cross(imagA, imagB); const Vec4V imag = V4Add(V4Add(v0, v1), v2); return V4SetW(imag, real); } PX_FORCE_INLINE QuatV QuatAdd(const QuatV a, const QuatV b) { return V4Add(a, b); } PX_FORCE_INLINE QuatV QuatNeg(const QuatV q) { return V4Neg(q); } PX_FORCE_INLINE QuatV QuatSub(const QuatV a, const QuatV b) { return V4Sub(a, b); } PX_FORCE_INLINE QuatV QuatScale(const QuatV a, const FloatV b) { return V4Scale(a, b); } PX_FORCE_INLINE QuatV QuatMerge(const FloatV* const floatVArray) { return V4Merge(floatVArray); } PX_FORCE_INLINE QuatV QuatMerge(const FloatVArg x, const FloatVArg y, const FloatVArg z, const FloatVArg w) { return V4Merge(x, y, z, w); } PX_FORCE_INLINE QuatV QuatIdentity() { return V4SetW(V4Zero(), FOne()); } PX_FORCE_INLINE bool isFiniteQuatV(const QuatV q) { return isFiniteVec4V(q); } #if PX_LINUX && PX_CLANG #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wbitwise-instead-of-logical" // bitwise intentionally chosen for performance #endif PX_FORCE_INLINE bool isValidQuatV(const QuatV q) { const FloatV unitTolerance = FLoad(1e-4f); const FloatV tmp = FAbs(FSub(QuatLength(q), FOne())); const BoolV con = FIsGrtr(unitTolerance, tmp); return isFiniteVec4V(q) & (BAllEqTTTT(con) == 1); } PX_FORCE_INLINE bool isSaneQuatV(const QuatV q) { const FloatV unitTolerance = FLoad(1e-2f); const FloatV tmp = FAbs(FSub(QuatLength(q), FOne())); const BoolV con = FIsGrtr(unitTolerance, tmp); return isFiniteVec4V(q) & (BAllEqTTTT(con) == 1); } #if PX_LINUX && PX_CLANG #pragma clang diagnostic pop #endif PX_FORCE_INLINE Mat33V QuatGetMat33V(const QuatVArg q) { // const FloatV two = FloatV_From_F32(2.f); // const FloatV one = FOne(); // const FloatV x = V4GetX(q); // const FloatV y = V4GetY(q); // const FloatV z = V4GetZ(q); // const Vec4V _q = V4Mul(q, two); // ////const FloatV w = V4GetW(q); // const Vec4V t0 = V4Mul(_q, x); // 2xx, 2xy, 2xz, 2xw // const Vec4V t1 = V4Mul(_q, y); // 2xy, 2yy, 2yz, 2yw // const Vec4V t2 = V4Mul(_q, z); // 2xz, 2yz, 2zz, 2zw ////const Vec4V t3 = V4Mul(_q, w); // 2xw, 2yw, 2zw, 2ww // const FloatV xx2 = V4GetX(t0); // const FloatV xy2 = V4GetY(t0); // const FloatV xz2 = V4GetZ(t0); // const FloatV xw2 = V4GetW(t0); // const FloatV yy2 = V4GetY(t1); // const FloatV yz2 = V4GetZ(t1); // const FloatV yw2 = V4GetW(t1); // const FloatV zz2 = V4GetZ(t2); // const FloatV zw2 = V4GetW(t2); ////const FloatV ww2 = V4GetW(t3); // const FloatV c00 = FSub(one, FAdd(yy2, zz2)); // const FloatV c01 = FSub(xy2, zw2); // const FloatV c02 = FAdd(xz2, yw2); // const FloatV c10 = FAdd(xy2, zw2); // const FloatV c11 = FSub(one, FAdd(xx2, zz2)); // const FloatV c12 = FSub(yz2, xw2); // const FloatV c20 = FSub(xz2, yw2); // const FloatV c21 = FAdd(yz2, xw2); // const FloatV c22 = FSub(one, FAdd(xx2, yy2)); // const Vec3V c0 = V3Merge(c00, c10, c20); // const Vec3V c1 = V3Merge(c01, c11, c21); // const Vec3V c2 = V3Merge(c02, c12, c22); // return Mat33V(c0, c1, c2); const FloatV one = FOne(); const FloatV x = V4GetX(q); const FloatV y = V4GetY(q); const FloatV z = V4GetZ(q); const FloatV w = V4GetW(q); const FloatV x2 = FAdd(x, x); const FloatV y2 = FAdd(y, y); const FloatV z2 = FAdd(z, z); const FloatV xx = FMul(x2, x); const FloatV yy = FMul(y2, y); const FloatV zz = FMul(z2, z); const FloatV xy = FMul(x2, y); const FloatV xz = FMul(x2, z); const FloatV xw = FMul(x2, w); const FloatV yz = FMul(y2, z); const FloatV yw = FMul(y2, w); const FloatV zw = FMul(z2, w); const FloatV v = FSub(one, xx); const Vec3V column0 = V3Merge(FSub(FSub(one, yy), zz), FAdd(xy, zw), FSub(xz, yw)); const Vec3V column1 = V3Merge(FSub(xy, zw), FSub(v, zz), FAdd(yz, xw)); const Vec3V column2 = V3Merge(FAdd(xz, yw), FSub(yz, xw), FSub(v, yy)); return Mat33V(column0, column1, column2); } PX_FORCE_INLINE QuatV Mat33GetQuatV(const Mat33V& a) { const FloatV one = FOne(); const FloatV zero = FZero(); const FloatV half = FLoad(0.5f); const FloatV two = FLoad(2.f); const FloatV scale = FLoad(0.25f); const FloatV a00 = V3GetX(a.col0); const FloatV a11 = V3GetY(a.col1); const FloatV a22 = V3GetZ(a.col2); const FloatV a21 = V3GetZ(a.col1); // row=2, col=1; const FloatV a12 = V3GetY(a.col2); // row=1, col=2; const FloatV a02 = V3GetX(a.col2); // row=0, col=2; const FloatV a20 = V3GetZ(a.col0); // row=2, col=0; const FloatV a10 = V3GetY(a.col0); // row=1, col=0; const FloatV a01 = V3GetX(a.col1); // row=0, col=1; const Vec3V vec0 = V3Merge(a21, a02, a10); const Vec3V vec1 = V3Merge(a12, a20, a01); const Vec3V v = V3Sub(vec0, vec1); const Vec3V g = V3Add(vec0, vec1); const FloatV trace = FAdd(a00, FAdd(a11, a22)); if(FAllGrtrOrEq(trace, zero)) { const FloatV h = FSqrt(FAdd(trace, one)); const FloatV w = FMul(half, h); const FloatV s = FMul(half, FRecip(h)); const Vec3V u = V3Scale(v, s); return V4SetW(Vec4V_From_Vec3V(u), w); } else { const FloatV ntrace = FNeg(trace); const Vec3V d = V3Merge(a00, a11, a22); const BoolV con0 = BAllTrue3(V3IsGrtrOrEq(V3Splat(a00), d)); const BoolV con1 = BAllTrue3(V3IsGrtrOrEq(V3Splat(a11), d)); const FloatV t0 = FAdd(one, FScaleAdd(a00, two, ntrace)); const FloatV t1 = FAdd(one, FScaleAdd(a11, two, ntrace)); const FloatV t2 = FAdd(one, FScaleAdd(a22, two, ntrace)); const FloatV t = FSel(con0, t0, FSel(con1, t1, t2)); const FloatV h = FMul(two, FSqrt(t)); const FloatV s = FRecip(h); const FloatV g0 = FMul(scale, h); const Vec3V vs = V3Scale(v, s); const Vec3V gs = V3Scale(g, s); const FloatV gsx = V3GetX(gs); const FloatV gsy = V3GetY(gs); const FloatV gsz = V3GetZ(gs); // vs.x= (a21 - a12)*s; vs.y=(a02 - a20)*s; vs.z=(a10 - a01)*s; // gs.x= (a21 + a12)*s; gs.y=(a02 + a20)*s; gs.z=(a10 + a01)*s; const Vec4V v0 = V4Merge(g0, gsz, gsy, V3GetX(vs)); const Vec4V v1 = V4Merge(gsz, g0, gsx, V3GetY(vs)); const Vec4V v2 = V4Merge(gsy, gsx, g0, V3GetZ(vs)); return V4Sel(con0, v0, V4Sel(con1, v1, v2)); } } } // namespace aos #if !PX_DOXYGEN } // namespace physx #endif #endif
14,038
C
28.680761
111
0.660707
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxPlane.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_PLANE_H #define PX_PLANE_H /** \addtogroup foundation @{ */ #include "foundation/PxTransform.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief Representation of a plane. Plane equation used: n.dot(v) + d = 0 */ class PxPlane { public: /** \brief Constructor */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxPlane() { } /** \brief Constructor from a normal and a distance */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxPlane(float nx, float ny, float nz, float distance) : n(nx, ny, nz), d(distance) { } /** \brief Constructor from a normal and a distance */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxPlane(const PxVec3& normal, float distance) : n(normal), d(distance) { } /** \brief Constructor from a point on the plane and a normal */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxPlane(const PxVec3& point, const PxVec3& normal) : n(normal), d(-point.dot(n)) // p satisfies normal.dot(p) + d = 0 { } /** \brief Constructor from three points */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxPlane(const PxVec3& p0, const PxVec3& p1, const PxVec3& p2) { n = (p1 - p0).cross(p2 - p0).getNormalized(); d = -p0.dot(n); } /** \brief returns true if the two planes are exactly equal */ PX_CUDA_CALLABLE PX_INLINE bool operator==(const PxPlane& p) const { return n == p.n && d == p.d; } PX_CUDA_CALLABLE PX_FORCE_INLINE float distance(const PxVec3& p) const { return p.dot(n) + d; } PX_CUDA_CALLABLE PX_FORCE_INLINE bool contains(const PxVec3& p) const { return PxAbs(distance(p)) < (1.0e-7f); } /** \brief projects p into the plane */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 project(const PxVec3& p) const { return p - n * distance(p); } /** \brief find an arbitrary point in the plane */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 pointInPlane() const { return -n * d; } /** \brief equivalent plane with unit normal */ PX_CUDA_CALLABLE PX_FORCE_INLINE void normalize() { float denom = 1.0f / n.magnitude(); n *= denom; d *= denom; } /** \brief transform plane */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxPlane transform(const PxTransform& pose) const { const PxVec3 transformedNormal = pose.rotate(n); return PxPlane(transformedNormal, d - pose.p.dot(transformedNormal)); } /** \brief inverse-transform plane */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxPlane inverseTransform(const PxTransform& pose) const { const PxVec3 transformedNormal = pose.rotateInv(n); return PxPlane(transformedNormal, d + pose.p.dot(n)); } PxVec3 n; //!< The normal to the plane float d; //!< The distance from the origin }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
4,338
C
25.619632
116
0.709083
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxVecMathAoSScalarInline.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_VEC_MATH_AOS_SCALAR_INLINE_H #define PX_VEC_MATH_AOS_SCALAR_INLINE_H #if COMPILE_VECTOR_INTRINSICS #error Scalar version should not be included when using vector intrinsics. #endif #if PX_GCC_FAMILY #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wstrict-aliasing" #endif #if !PX_DOXYGEN namespace physx { #endif namespace aos { #define BOOL_TO_U32(b) PxU32(- PxI32(b)) #define TRUE_TO_U32 PxU32(-1) #define FALSE_TO_U32 PxU32(0) #define BOOL_TO_U16(b) PxU16(- PxI32(b)) #define PX_VECMATH_ASSERT_ENABLED 0 #if PX_VECMATH_ASSERT_ENABLED #define VECMATHAOS_ASSERT(x) { PX_ASSERT(x); } #else #define VECMATHAOS_ASSERT(x) #endif ///////////////////////////////////////////////////////////////////// ////INTERNAL USE ONLY AND TESTS ///////////////////////////////////////////////////////////////////// namespace internalScalarSimd { PX_FORCE_INLINE PxF32 FStore(const FloatV a) { return a.x; } PX_FORCE_INLINE bool hasZeroElementInFloatV(const FloatV a) { return (0 == a.x); } PX_FORCE_INLINE bool hasZeroElementInVec3V(const Vec3V a) { return (0 == a.x || 0 == a.y || 0 == a.z); } PX_FORCE_INLINE bool hasZeroElementInVec4V(const Vec4V a) { return (0 == a.x || 0 == a.y || 0 == a.z || 0 == a.w); } } namespace vecMathTests { // PT: this function returns an invalid Vec3V (W!=0.0f) just for unit-testing 'isValidVec3V' PX_FORCE_INLINE Vec3V getInvalidVec3V() { Vec3V tmp; tmp.x = tmp.y = tmp.z = 0.0f; tmp.pad = 1.0f; return tmp; } PX_FORCE_INLINE bool allElementsEqualFloatV(const FloatV a, const FloatV b) { return (a.x == b.x); } PX_FORCE_INLINE bool allElementsEqualVec3V(const Vec3V a, const Vec3V b) { return (a.x == b.x && a.y == b.y && a.z == b.z); } PX_FORCE_INLINE bool allElementsEqualVec4V(const Vec4V a, const Vec4V b) { return (a.x == b.x && a.y == b.y && a.z == b.z && a.w == b.w); } PX_FORCE_INLINE bool allElementsEqualBoolV(const BoolV a, const BoolV b) { return (a.ux == b.ux && a.uy == b.uy && a.uz == b.uz && a.uw == b.uw); } PX_FORCE_INLINE bool allElementsEqualVecU32V(const VecU32V a, const VecU32V b) { return (a.u32[0] == b.u32[0] && a.u32[1] == b.u32[1] && a.u32[2] == b.u32[2] && a.u32[3] == b.u32[3]); } PX_FORCE_INLINE bool allElementsEqualVecI32V(const VecI32V a, const VecI32V b) { return (a.i32[0] == b.i32[0] && a.i32[1] == b.i32[1] && a.i32[2] == b.i32[2] && a.i32[3] == b.i32[3]); } #define VECMATH_AOS_EPSILON (1e-3f) PX_FORCE_INLINE bool allElementsNearEqualFloatV(const FloatV a, const FloatV b) { const PxF32 cx = a.x - b.x; return (cx > -VECMATH_AOS_EPSILON && cx < VECMATH_AOS_EPSILON); } PX_FORCE_INLINE bool allElementsNearEqualVec3V(const Vec3V a, const Vec3V b) { const PxF32 cx = a.x - b.x; const PxF32 cy = a.y - b.y; const PxF32 cz = a.z - b.z; return (cx > -VECMATH_AOS_EPSILON && cx < VECMATH_AOS_EPSILON && cy > -VECMATH_AOS_EPSILON && cy < VECMATH_AOS_EPSILON && cz > -VECMATH_AOS_EPSILON && cz < VECMATH_AOS_EPSILON); } PX_FORCE_INLINE bool allElementsNearEqualVec4V(const Vec4V a, const Vec4V b) { const PxF32 cx = a.x - b.x; const PxF32 cy = a.y - b.y; const PxF32 cz = a.z - b.z; const PxF32 cw = a.w - b.w; return (cx > -VECMATH_AOS_EPSILON && cx < VECMATH_AOS_EPSILON && cy > -VECMATH_AOS_EPSILON && cy < VECMATH_AOS_EPSILON && cz > -VECMATH_AOS_EPSILON && cz < VECMATH_AOS_EPSILON && cw > -VECMATH_AOS_EPSILON && cw < VECMATH_AOS_EPSILON); } } /////////////////////////////////////////////////////// PX_FORCE_INLINE bool isValidVec3V(const Vec3V a) { return a.pad == 0.f; } PX_FORCE_INLINE bool isFiniteFloatV(const FloatV a) { return PxIsFinite(a.x); } PX_FORCE_INLINE bool isFiniteVec3V(const Vec3V a) { return PxIsFinite(a.x) && PxIsFinite(a.y) && PxIsFinite(a.z); } PX_FORCE_INLINE bool isFiniteVec4V(const Vec4V a) { return PxIsFinite(a.x) && PxIsFinite(a.y) && PxIsFinite(a.z) && PxIsFinite(a.w); } ///////////////////////////////////////////////////////////////////// ////VECTORISED FUNCTION IMPLEMENTATIONS ///////////////////////////////////////////////////////////////////// PX_FORCE_INLINE FloatV FLoad(const PxF32 f) { return FloatV(f); } PX_FORCE_INLINE Vec3V V3Load(const PxF32 f) { return Vec3V(f, f, f); } PX_FORCE_INLINE Vec4V V4Load(const PxF32 f) { return Vec4V(f, f, f, f); } PX_FORCE_INLINE BoolV BLoad(const bool f) { #if PX_ARM // SD: Android ARM builds fail if this is done with a cast. // Might also fail because of something else but the select // operator here seems to fix everything that failed in release builds. return f ? BTTTT() : BFFFF(); #else return BoolV(BOOL_TO_U32(f), BOOL_TO_U32(f), BOOL_TO_U32(f), BOOL_TO_U32(f)); #endif } PX_FORCE_INLINE Vec3V V3LoadA(const PxVec3& f) { return Vec3V(f.x, f.y, f.z); } PX_FORCE_INLINE Vec3V V3LoadU(const PxVec3& f) { return Vec3V(f.x, f.y, f.z); } PX_FORCE_INLINE Vec3V V3LoadUnsafeA(const PxVec3& f) { return Vec3V(f.x, f.y, f.z); } PX_FORCE_INLINE Vec3V V3LoadA(const PxF32* const f) { return Vec3V(f[0], f[1], f[2]); } PX_FORCE_INLINE Vec3V V3LoadU(const PxF32* const f) { return Vec3V(f[0], f[1], f[2]); } PX_FORCE_INLINE Vec3V Vec3V_From_Vec4V(Vec4V f) { return Vec3V(f.x, f.y, f.z); } PX_FORCE_INLINE Vec3V Vec3V_From_Vec4V_WUndefined(const Vec4V v) { return Vec3V(v.x, v.y, v.z); } PX_FORCE_INLINE Vec4V Vec4V_From_Vec3V(Vec3V f) { return Vec4V(f.x, f.y, f.z, 0.0f); } PX_FORCE_INLINE Vec4V Vec4V_From_FloatV(FloatV f) { return Vec4V(f.x, f.x, f.x, f.x); } PX_FORCE_INLINE Vec3V Vec3V_From_FloatV(FloatV f) { return Vec3V(f.x, f.x, f.x); } PX_FORCE_INLINE Vec3V Vec3V_From_FloatV_WUndefined(FloatV f) { return Vec3V(f.x, f.x, f.x); } PX_FORCE_INLINE Vec4V V4LoadA(const PxF32* const f) { return Vec4V(f[0], f[1], f[2], f[3]); } PX_FORCE_INLINE void V4StoreA(const Vec4V a, PxF32* f) { *reinterpret_cast<Vec4V*>(f) = a; } PX_FORCE_INLINE void V4StoreU(const Vec4V a, PxF32* f) { *reinterpret_cast<PxVec4*>(f) = *reinterpret_cast<const PxVec4*>(&a.x); } PX_FORCE_INLINE void BStoreA(const BoolV a, PxU32* f) { *reinterpret_cast<BoolV*>(f) = a; } PX_FORCE_INLINE void U4StoreA(const VecU32V uv, PxU32* u) { *reinterpret_cast<VecU32V*>(u) = uv; } PX_FORCE_INLINE void I4StoreA(const VecI32V iv, PxI32* i) { *reinterpret_cast<VecI32V*>(i) = iv; } PX_FORCE_INLINE Vec4V V4LoadU(const PxF32* const f) { return Vec4V(f[0], f[1], f[2], f[3]); } PX_FORCE_INLINE Vec4V Vec4V_From_PxVec3_WUndefined(const PxVec3& f) { return Vec4V(f[0], f[1], f[2], 0.0f); } PX_FORCE_INLINE BoolV BLoad(const bool* const f) { return BoolV(BOOL_TO_U32(f[0]), BOOL_TO_U32(f[1]), BOOL_TO_U32(f[2]), BOOL_TO_U32(f[3])); } PX_FORCE_INLINE void FStore(const FloatV a, PxF32* PX_RESTRICT f) { *f = a.x; } PX_FORCE_INLINE void V3StoreA(const Vec3V a, PxVec3& f) { f = PxVec3(a.x, a.y, a.z); } PX_FORCE_INLINE void V3StoreU(const Vec3V a, PxVec3& f) { f = PxVec3(a.x, a.y, a.z); } PX_FORCE_INLINE void Store_From_BoolV(const BoolV b, PxU32* b2) { *b2 = b.ux; } ////////////////////////// // FLOATV ////////////////////////// PX_FORCE_INLINE FloatV FZero() { return FLoad(0.0f); } PX_FORCE_INLINE FloatV FOne() { return FLoad(1.0f); } PX_FORCE_INLINE FloatV FHalf() { return FLoad(0.5f); } PX_FORCE_INLINE FloatV FEps() { return FLoad(PX_EPS_REAL); } PX_FORCE_INLINE FloatV FEps6() { return FLoad(1e-6f); } //! @cond PX_FORCE_INLINE FloatV FMax() { return FLoad(PX_MAX_REAL); } //! @endcond PX_FORCE_INLINE FloatV FNegMax() { return FLoad(-PX_MAX_REAL); } PX_FORCE_INLINE FloatV FNeg(const FloatV f) { return FloatV(-f.x); } PX_FORCE_INLINE FloatV FAdd(const FloatV a, const FloatV b) { return FloatV(a.x + b.x); } PX_FORCE_INLINE FloatV FSub(const FloatV a, const FloatV b) { return FloatV(a.x - b.x); } PX_FORCE_INLINE FloatV FMul(const FloatV a, const FloatV b) { return FloatV(a.x * b.x); } PX_FORCE_INLINE FloatV FDiv(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(b.x != 0.0f); return FloatV(a.x / b.x); } PX_FORCE_INLINE FloatV FDivFast(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(b.x != 0.0f); return FloatV(a.x / b.x); } PX_FORCE_INLINE FloatV FRecip(const FloatV a) { VECMATHAOS_ASSERT(a.x != 0.0f); return 1.0f / a.x; } PX_FORCE_INLINE FloatV FRecipFast(const FloatV a) { VECMATHAOS_ASSERT(a.x != 0.0f); return 1.0f / a.x; } PX_FORCE_INLINE FloatV FRsqrt(const FloatV a) { VECMATHAOS_ASSERT(a.x != 0.0f); return PxRecipSqrt(a.x); } PX_FORCE_INLINE FloatV FSqrt(const FloatV a) { return PxSqrt(a.x); } PX_FORCE_INLINE FloatV FRsqrtFast(const FloatV a) { VECMATHAOS_ASSERT(a.x != 0.0f); return PxRecipSqrt(a.x); } PX_FORCE_INLINE FloatV FScaleAdd(const FloatV a, const FloatV b, const FloatV c) { return FAdd(FMul(a, b), c); } PX_FORCE_INLINE FloatV FNegScaleSub(const FloatV a, const FloatV b, const FloatV c) { return FSub(c, FMul(a, b)); } PX_FORCE_INLINE FloatV FAbs(const FloatV a) { return FloatV(PxAbs(a.x)); } PX_FORCE_INLINE FloatV FSel(const BoolV c, const FloatV a, const FloatV b) { return FloatV(c.ux ? a.x : b.x); } PX_FORCE_INLINE BoolV FIsGrtr(const FloatV a, const FloatV b) { return BLoad(a.x > b.x); } PX_FORCE_INLINE BoolV FIsGrtrOrEq(const FloatV a, const FloatV b) { return BLoad(a.x >= b.x); } PX_FORCE_INLINE BoolV FIsEq(const FloatV a, const FloatV b) { return BLoad(a.x == b.x); } PX_FORCE_INLINE FloatV FMax(const FloatV a, const FloatV b) { return (a.x > b.x ? FloatV(a.x) : FloatV(b.x)); } PX_FORCE_INLINE FloatV FMin(const FloatV a, const FloatV b) { return (a.x > b.x ? FloatV(b.x) : FloatV(a.x)); } PX_FORCE_INLINE FloatV FClamp(const FloatV a, const FloatV minV, const FloatV maxV) { return FMax(FMin(a, maxV), minV); } PX_FORCE_INLINE PxU32 FAllGrtr(const FloatV a, const FloatV b) { return BOOL_TO_U32(a.x > b.x); } PX_FORCE_INLINE PxU32 FAllGrtrOrEq(const FloatV a, const FloatV b) { return BOOL_TO_U32(a.x >= b.x); } PX_FORCE_INLINE PxU32 FAllEq(const FloatV a, const FloatV b) { return BOOL_TO_U32(a.x == b.x); } PX_FORCE_INLINE FloatV FRound(const FloatV a) { return floorf(a.x + 0.5f); } PX_FORCE_INLINE FloatV FSin(const FloatV a) { return sinf(a.x); } PX_FORCE_INLINE FloatV FCos(const FloatV a) { return cosf(a.x); } PX_FORCE_INLINE PxU32 FOutOfBounds(const FloatV a, const FloatV min, const FloatV max) { return BOOL_TO_U32(a.x > max.x || a.x < min.x); } PX_FORCE_INLINE PxU32 FInBounds(const FloatV a, const FloatV min, const FloatV max) { return BOOL_TO_U32(a.x >= min.x && a.x <= max.x); } PX_FORCE_INLINE PxU32 FOutOfBounds(const FloatV a, const FloatV bounds) { return FOutOfBounds(a, FNeg(bounds), bounds); } PX_FORCE_INLINE PxU32 FInBounds(const FloatV a, const FloatV bounds) { return FInBounds(a, FNeg(bounds), bounds); } ///////////////////// // VEC3V ///////////////////// PX_FORCE_INLINE Vec3V V3Splat(const FloatV f) { return Vec3V(f.x, f.x, f.x); } PX_FORCE_INLINE Vec3V V3Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z) { return Vec3V(x.x, y.x, z.x); } PX_FORCE_INLINE Vec3V V3UnitX() { return Vec3V(1.0f, 0.0f, 0.0f); } PX_FORCE_INLINE Vec3V V3UnitY() { return Vec3V(0.0f, 1.0f, 0.0f); } PX_FORCE_INLINE Vec3V V3UnitZ() { return Vec3V(0.0f, 0.0f, 1.0f); } PX_FORCE_INLINE FloatV V3GetX(const Vec3V f) { return FloatV(f.x); } PX_FORCE_INLINE FloatV V3GetY(const Vec3V f) { return FloatV(f.y); } PX_FORCE_INLINE FloatV V3GetZ(const Vec3V f) { return FloatV(f.z); } PX_FORCE_INLINE Vec3V V3SetX(const Vec3V v, const FloatV f) { return Vec3V(f.x, v.y, v.z); } PX_FORCE_INLINE Vec3V V3SetY(const Vec3V v, const FloatV f) { return Vec3V(v.x, f.x, v.z); } PX_FORCE_INLINE Vec3V V3SetZ(const Vec3V v, const FloatV f) { return Vec3V(v.x, v.y, f.x); } PX_FORCE_INLINE Vec3V V3ColX(const Vec3V a, const Vec3V b, const Vec3V c) { return Vec3V(a.x, b.x, c.x); } PX_FORCE_INLINE Vec3V V3ColY(const Vec3V a, const Vec3V b, const Vec3V c) { return Vec3V(a.y, b.y, c.y); } PX_FORCE_INLINE Vec3V V3ColZ(const Vec3V a, const Vec3V b, const Vec3V c) { return Vec3V(a.z, b.z, c.z); } PX_FORCE_INLINE Vec3V V3Zero() { return V3Load(0.0f); } PX_FORCE_INLINE Vec3V V3One() { return V3Load(1.0f); } PX_FORCE_INLINE Vec3V V3Eps() { return V3Load(PX_EPS_REAL); } PX_FORCE_INLINE Vec3V V3Neg(const Vec3V c) { return Vec3V(-c.x, -c.y, -c.z); } PX_FORCE_INLINE Vec3V V3Add(const Vec3V a, const Vec3V b) { return Vec3V(a.x + b.x, a.y + b.y, a.z + b.z); } PX_FORCE_INLINE Vec3V V3Sub(const Vec3V a, const Vec3V b) { return Vec3V(a.x - b.x, a.y - b.y, a.z - b.z); } PX_FORCE_INLINE Vec3V V3Scale(const Vec3V a, const FloatV b) { return Vec3V(a.x * b.x, a.y * b.x, a.z * b.x); } PX_FORCE_INLINE Vec3V V3Mul(const Vec3V a, const Vec3V b) { return Vec3V(a.x * b.x, a.y * b.y, a.z * b.z); } PX_FORCE_INLINE Vec3V V3ScaleInv(const Vec3V a, const FloatV b) { const PxF32 bInv = 1.0f / b.x; return Vec3V(a.x * bInv, a.y * bInv, a.z * bInv); } PX_FORCE_INLINE Vec3V V3Div(const Vec3V a, const Vec3V b) { return Vec3V(a.x / b.x, a.y / b.y, a.z / b.z); } PX_FORCE_INLINE Vec3V V3ScaleInvFast(const Vec3V a, const FloatV b) { const PxF32 bInv = 1.0f / b.x; return Vec3V(a.x * bInv, a.y * bInv, a.z * bInv); } PX_FORCE_INLINE Vec3V V3DivFast(const Vec3V a, const Vec3V b) { return Vec3V(a.x / b.x, a.y / b.y, a.z / b.z); } PX_FORCE_INLINE Vec3V V3Recip(const Vec3V a) { return Vec3V(1.0f / a.x, 1.0f / a.y, 1.0f / a.z); } PX_FORCE_INLINE Vec3V V3RecipFast(const Vec3V a) { return Vec3V(1.0f / a.x, 1.0f / a.y, 1.0f / a.z); } PX_FORCE_INLINE Vec3V V3Rsqrt(const Vec3V a) { return Vec3V(PxRecipSqrt(a.x), PxRecipSqrt(a.y), PxRecipSqrt(a.z)); } PX_FORCE_INLINE Vec3V V3RsqrtFast(const Vec3V a) { return Vec3V(PxRecipSqrt(a.x), PxRecipSqrt(a.y), PxRecipSqrt(a.z)); } PX_FORCE_INLINE Vec3V V3ScaleAdd(const Vec3V a, const FloatV b, const Vec3V c) { return V3Add(V3Scale(a, b), c); } PX_FORCE_INLINE Vec3V V3NegScaleSub(const Vec3V a, const FloatV b, const Vec3V c) { return V3Sub(c, V3Scale(a, b)); } PX_FORCE_INLINE Vec3V V3MulAdd(const Vec3V a, const Vec3V b, const Vec3V c) { return V3Add(V3Mul(a, b), c); } PX_FORCE_INLINE Vec3V V3NegMulSub(const Vec3V a, const Vec3V b, const Vec3V c) { return V3Sub(c, V3Mul(a, b)); } PX_FORCE_INLINE FloatV V3Dot(const Vec3V a, const Vec3V b) { return FloatV(a.x * b.x + a.y * b.y + a.z * b.z); } PX_FORCE_INLINE VecCrossV V3PrepareCross(const Vec3VArg normal) { return normal; } PX_FORCE_INLINE Vec3V V3Cross(const Vec3V a, const Vec3V b) { return Vec3V(a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y - a.y * b.x); } PX_FORCE_INLINE FloatV V3Length(const Vec3V a) { return FloatV(PxSqrt(a.x * a.x + a.y * a.y + a.z * a.z)); } PX_FORCE_INLINE FloatV V3LengthSq(const Vec3V a) { return FloatV(a.x * a.x + a.y * a.y + a.z * a.z); } PX_FORCE_INLINE Vec3V V3Normalize(const Vec3V a) { VECMATHAOS_ASSERT(a.x != 0 || a.y != 0 || a.z != 0); const PxF32 lengthInv = 1.0f / PxSqrt(a.x * a.x + a.y * a.y + a.z * a.z); return Vec3V(a.x * lengthInv, a.y * lengthInv, a.z * lengthInv); } PX_FORCE_INLINE Vec3V V3NormalizeSafe(const Vec3V a, const Vec3V unsafeReturnValue) { const PxF32 length = PxSqrt(a.x * a.x + a.y * a.y + a.z * a.z); if(PX_EPS_REAL >= length) { return unsafeReturnValue; } else { const PxF32 lengthInv = 1.0f / length; return Vec3V(a.x * lengthInv, a.y * lengthInv, a.z * lengthInv); } } PX_FORCE_INLINE Vec3V V3NormalizeFast(const Vec3V a) { VECMATHAOS_ASSERT(a.x != 0 || a.y != 0 || a.z != 0); const PxF32 lengthInv = 1.0f / PxSqrt(a.x * a.x + a.y * a.y + a.z * a.z); return Vec3V(a.x * lengthInv, a.y * lengthInv, a.z * lengthInv); } PX_FORCE_INLINE Vec3V V3Sel(const BoolV c, const Vec3V a, const Vec3V b) { return Vec3V(c.ux ? a.x : b.x, c.uy ? a.y : b.y, c.uz ? a.z : b.z); } PX_FORCE_INLINE BoolV V3IsGrtr(const Vec3V a, const Vec3V b) { return BoolV(BOOL_TO_U32(a.x > b.x), BOOL_TO_U32(a.y > b.y), BOOL_TO_U32(a.z > b.z), FALSE_TO_U32); } PX_FORCE_INLINE BoolV V3IsGrtrOrEq(const Vec3V a, const Vec3V b) { return BoolV(BOOL_TO_U32(a.x >= b.x), BOOL_TO_U32(a.y >= b.y), BOOL_TO_U32(a.z >= b.z), TRUE_TO_U32); } PX_FORCE_INLINE BoolV V3IsEq(const Vec3V a, const Vec3V b) { return BoolV(BOOL_TO_U32(a.x == b.x), BOOL_TO_U32(a.y == b.y), BOOL_TO_U32(a.z == b.z), TRUE_TO_U32); } PX_FORCE_INLINE Vec3V V3Max(const Vec3V a, const Vec3V b) { return Vec3V(a.x > b.x ? a.x : b.x, a.y > b.y ? a.y : b.y, a.z > b.z ? a.z : b.z); } PX_FORCE_INLINE Vec3V V3Min(const Vec3V a, const Vec3V b) { return Vec3V(a.x < b.x ? a.x : b.x, a.y < b.y ? a.y : b.y, a.z < b.z ? a.z : b.z); } PX_FORCE_INLINE FloatV V3ExtractMax(const Vec3V a) { const PxF32 t0 = (a.x >= a.y) ? a.x : a.y; return t0 >= a.z ? t0 : a.z; } PX_FORCE_INLINE FloatV V3ExtractMin(const Vec3V a) { const PxF32 t0 = (a.x <= a.y) ? a.x : a.y; return t0 <= a.z ? t0 : a.z; } // return (a >= 0.0f) ? 1.0f : -1.0f; PX_FORCE_INLINE Vec3V V3Sign(const Vec3V a) { return Vec3V((a.x >= 0.f ? 1.f : -1.f), (a.y >= 0.f ? 1.f : -1.f), (a.z >= 0.f ? 1.f : -1.f)); } PX_FORCE_INLINE Vec3V V3Clamp(const Vec3V a, const Vec3V minV, const Vec3V maxV) { return V3Max(V3Min(a, maxV), minV); } PX_FORCE_INLINE Vec3V V3Abs(const Vec3V a) { return V3Max(a, V3Neg(a)); } PX_FORCE_INLINE PxU32 V3AllGrtr(const Vec3V a, const Vec3V b) { return BOOL_TO_U32((a.x > b.x) & (a.y > b.y) & (a.z > b.z)); } PX_FORCE_INLINE PxU32 V3AllGrtrOrEq(const Vec3V a, const Vec3V b) { return BOOL_TO_U32((a.x >= b.x) & (a.y >= b.y) & (a.z >= b.z)); } PX_FORCE_INLINE PxU32 V3AllEq(const Vec3V a, const Vec3V b) { return BOOL_TO_U32((a.x == b.x) & (a.y == b.y) & (a.z == b.z)); } PX_FORCE_INLINE Vec3V V3Round(const Vec3V a) { return Vec3V(floorf(a.x + 0.5f), floorf(a.y + 0.5f), floorf(a.z + 0.5f)); } PX_FORCE_INLINE Vec3V V3Sin(const Vec3V a) { return Vec3V(sinf(a.x), sinf(a.y), sinf(a.z)); } PX_FORCE_INLINE Vec3V V3Cos(const Vec3V a) { return Vec3V(cosf(a.x), cosf(a.y), cosf(a.z)); } PX_FORCE_INLINE Vec3V V3PermYZZ(const Vec3V a) { return Vec3V(a.y, a.z, a.z); } PX_FORCE_INLINE Vec3V V3PermXYX(const Vec3V a) { return Vec3V(a.x, a.y, a.x); } PX_FORCE_INLINE Vec3V V3PermYZX(const Vec3V a) { return Vec3V(a.y, a.z, a.x); } PX_FORCE_INLINE Vec3V V3PermZXY(const Vec3V a) { return Vec3V(a.z, a.x, a.y); } PX_FORCE_INLINE Vec3V V3PermZZY(const Vec3V a) { return Vec3V(a.z, a.z, a.y); } PX_FORCE_INLINE Vec3V V3PermYXX(const Vec3V a) { return Vec3V(a.y, a.x, a.x); } PX_FORCE_INLINE Vec3V V3Perm_Zero_1Z_0Y(const Vec3V v0, const Vec3V v1) { return Vec3V(0.0f, v1.z, v0.y); } PX_FORCE_INLINE Vec3V V3Perm_0Z_Zero_1X(const Vec3V v0, const Vec3V v1) { return Vec3V(v0.z, 0.0f, v1.x); } PX_FORCE_INLINE Vec3V V3Perm_1Y_0X_Zero(const Vec3V v0, const Vec3V v1) { return Vec3V(v1.y, v0.x, 0.0f); } PX_FORCE_INLINE FloatV V3SumElems(const Vec3V a) { return FloatV(a.x + a.y + a.z); } PX_FORCE_INLINE PxU32 V3OutOfBounds(const Vec3V a, const Vec3V min, const Vec3V max) { return BOOL_TO_U32(a.x > max.x || a.y > max.y || a.z > max.z || a.x < min.x || a.y < min.y || a.z < min.z); } PX_FORCE_INLINE PxU32 V3InBounds(const Vec3V a, const Vec3V min, const Vec3V max) { return BOOL_TO_U32(a.x <= max.x && a.y <= max.y && a.z <= max.z && a.x >= min.x && a.y >= min.y && a.z >= min.z); } PX_FORCE_INLINE PxU32 V3OutOfBounds(const Vec3V a, const Vec3V bounds) { return V3OutOfBounds(a, V3Neg(bounds), bounds); } PX_FORCE_INLINE PxU32 V3InBounds(const Vec3V a, const Vec3V bounds) { return V3InBounds(a, V3Neg(bounds), bounds); } PX_FORCE_INLINE void V3Transpose(Vec3V& col0, Vec3V& col1, Vec3V& col2) { const PxF32 t01 = col0.y, t02 = col0.z, t12 = col1.z; col0.y = col1.x; col0.z = col2.x; col1.z = col2.y; col1.x = t01; col2.x = t02; col2.y = t12; } ///////////////////////// // VEC4V ///////////////////////// PX_FORCE_INLINE Vec4V V4Splat(const FloatV f) { return Vec4V(f.x, f.x, f.x, f.x); } PX_FORCE_INLINE Vec4V V4Merge(const FloatV* const floatVArray) { return Vec4V(floatVArray[0].x, floatVArray[1].x, floatVArray[2].x, floatVArray[3].x); } PX_FORCE_INLINE Vec4V V4Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z, const FloatVArg w) { return Vec4V(x.x, y.x, z.x, w.x); } PX_FORCE_INLINE Vec4V V4MergeW(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { return Vec4V(x.w, y.w, z.w, w.w); } PX_FORCE_INLINE Vec4V V4MergeZ(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { return Vec4V(x.z, y.z, z.z, w.z); } PX_FORCE_INLINE Vec4V V4MergeY(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { return Vec4V(x.y, y.y, z.y, w.y); } PX_FORCE_INLINE Vec4V V4MergeX(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { return Vec4V(x.x, y.x, z.x, w.x); } PX_FORCE_INLINE Vec4V V4UnpackXY(const Vec4VArg a, const Vec4VArg b) { return Vec4V(a.x, b.x, a.y, b.y); } PX_FORCE_INLINE Vec4V V4UnpackZW(const Vec4VArg a, const Vec4VArg b) { return Vec4V(a.z, b.z, a.w, b.w); } PX_FORCE_INLINE Vec4V V4UnitX() { return Vec4V(1.0f, 0.0f, 0.0f, 0.0f); } PX_FORCE_INLINE Vec4V V4UnitY() { return Vec4V(0.0f, 1.0f, 0.0f, 0.0f); } PX_FORCE_INLINE Vec4V V4UnitZ() { return Vec4V(0.0f, 0.0f, 1.0f, 0.0f); } PX_FORCE_INLINE Vec4V V4UnitW() { return Vec4V(0.0f, 0.0f, 0.0f, 1.0f); } PX_FORCE_INLINE FloatV V4GetX(const Vec4V f) { return FloatV(f.x); } PX_FORCE_INLINE FloatV V4GetY(const Vec4V f) { return FloatV(f.y); } PX_FORCE_INLINE FloatV V4GetZ(const Vec4V f) { return FloatV(f.z); } PX_FORCE_INLINE FloatV V4GetW(const Vec4V f) { return FloatV(f.w); } PX_FORCE_INLINE Vec4V V4SetX(const Vec4V v, const FloatV f) { return Vec4V(f.x, v.y, v.z, v.w); } PX_FORCE_INLINE Vec4V V4SetY(const Vec4V v, const FloatV f) { return Vec4V(v.x, f.x, v.z, v.w); } PX_FORCE_INLINE Vec4V V4SetZ(const Vec4V v, const FloatV f) { return Vec4V(v.x, v.y, f.x, v.w); } PX_FORCE_INLINE Vec4V V4SetW(const Vec4V v, const FloatV f) { return Vec4V(v.x, v.y, v.z, f.x); } PX_FORCE_INLINE Vec4V V4SetW(const Vec3V v, const FloatV f) { return Vec4V(v.x, v.y, v.z, f.x); } PX_FORCE_INLINE Vec4V V4ClearW(const Vec4V v) { return Vec4V(v.x, v.y, v.z, 0.0f); } PX_FORCE_INLINE Vec4V V4PermYXWZ(const Vec4V v) { return Vec4V(v.y, v.x, v.w, v.z); } PX_FORCE_INLINE Vec4V V4PermXZXZ(const Vec4V v) { return Vec4V(v.x, v.z, v.x, v.z); } PX_FORCE_INLINE Vec4V V4PermYWYW(const Vec4V v) { return Vec4V(v.y, v.w, v.y, v.w); } PX_FORCE_INLINE Vec4V V4PermYZXW(const Vec4V v) { return Vec4V(v.y, v.z, v.x, v.w); } PX_FORCE_INLINE Vec4V V4PermZWXY(const Vec4V v) { return Vec4V(v.z, v.w, v.x, v.y); } template <PxU8 _x, PxU8 _y, PxU8 _z, PxU8 _w> PX_FORCE_INLINE Vec4V V4Perm(const Vec4V v) { const PxF32 f[4] = { v.x, v.y, v.z, v.w }; return Vec4V(f[_x], f[_y], f[_z], f[_w]); } PX_FORCE_INLINE Vec4V V4Zero() { return V4Load(0.0f); } PX_FORCE_INLINE Vec4V V4One() { return V4Load(1.0f); } PX_FORCE_INLINE Vec4V V4Eps() { return V4Load(PX_EPS_REAL); } PX_FORCE_INLINE Vec4V V4Neg(const Vec4V c) { return Vec4V(-c.x, -c.y, -c.z, -c.w); } PX_FORCE_INLINE Vec4V V4Add(const Vec4V a, const Vec4V b) { return Vec4V(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w); } PX_FORCE_INLINE Vec4V V4Sub(const Vec4V a, const Vec4V b) { return Vec4V(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w); } PX_FORCE_INLINE Vec4V V4Scale(const Vec4V a, const FloatV b) { return Vec4V(a.x * b.x, a.y * b.x, a.z * b.x, a.w * b.x); } PX_FORCE_INLINE Vec4V V4Mul(const Vec4V a, const Vec4V b) { return Vec4V(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); } PX_FORCE_INLINE Vec4V V4ScaleInv(const Vec4V a, const FloatV b) { const PxF32 bInv = 1.0f / b.x; return Vec4V(a.x * bInv, a.y * bInv, a.z * bInv, a.w * bInv); } PX_FORCE_INLINE Vec4V V4Div(const Vec4V a, const Vec4V b) { VECMATHAOS_ASSERT(b.x != 0 && b.y != 0 && b.z != 0 && b.w != 0); return Vec4V(a.x / b.x, a.y / b.y, a.z / b.z, a.w / b.w); } PX_FORCE_INLINE Vec4V V4ScaleInvFast(const Vec4V a, const FloatV b) { const PxF32 bInv = 1.0f / b.x; return Vec4V(a.x * bInv, a.y * bInv, a.z * bInv, a.w * bInv); } PX_FORCE_INLINE Vec4V V4DivFast(const Vec4V a, const Vec4V b) { return Vec4V(a.x / b.x, a.y / b.y, a.z / b.z, a.w / b.w); } PX_FORCE_INLINE Vec4V V4Recip(const Vec4V a) { return Vec4V(1.0f / a.x, 1.0f / a.y, 1.0f / a.z, 1.0f / a.w); } PX_FORCE_INLINE Vec4V V4RecipFast(const Vec4V a) { return Vec4V(1.0f / a.x, 1.0f / a.y, 1.0f / a.z, 1.0f / a.w); } PX_FORCE_INLINE Vec4V V4Rsqrt(const Vec4V a) { return Vec4V(PxRecipSqrt(a.x), PxRecipSqrt(a.y), PxRecipSqrt(a.z), PxRecipSqrt(a.w)); } PX_FORCE_INLINE Vec4V V4RsqrtFast(const Vec4V a) { return Vec4V(PxRecipSqrt(a.x), PxRecipSqrt(a.y), PxRecipSqrt(a.z), PxRecipSqrt(a.w)); } PX_FORCE_INLINE Vec4V V4Sqrt(const Vec4V a) { return Vec4V(PxSqrt(a.x), PxSqrt(a.y), PxSqrt(a.z), PxSqrt(a.w)); } PX_FORCE_INLINE Vec4V V4ScaleAdd(const Vec4V a, const FloatV b, const Vec4V c) { return V4Add(V4Scale(a, b), c); } PX_FORCE_INLINE Vec4V V4NegScaleSub(const Vec4V a, const FloatV b, const Vec4V c) { return V4Sub(c, V4Scale(a, b)); } PX_FORCE_INLINE Vec4V V4MulAdd(const Vec4V a, const Vec4V b, const Vec4V c) { return V4Add(V4Mul(a, b), c); } PX_FORCE_INLINE Vec4V V4NegMulSub(const Vec4V a, const Vec4V b, const Vec4V c) { return V4Sub(c, V4Mul(a, b)); } PX_FORCE_INLINE FloatV V4SumElements(const Vec4V a) { return FloatV(a.x + a.y + a.z + a.w); } PX_FORCE_INLINE FloatV V4Dot(const Vec4V a, const Vec4V b) { return FloatV(a.x * b.x + a.y * b.y + a.z * b.z + a.w * b.w); } PX_FORCE_INLINE FloatV V4Dot3(const Vec4V a, const Vec4V b) { return FloatV(a.x * b.x + a.y * b.y + a.z * b.z); } PX_FORCE_INLINE Vec4V V4Cross(const Vec4V a, const Vec4V b) { return Vec4V(a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y - a.y * b.x, 0.0f); } PX_FORCE_INLINE FloatV V4Length(const Vec4V a) { return FloatV(PxSqrt(a.x * a.x + a.y * a.y + a.z * a.z + a.w * a.w)); } PX_FORCE_INLINE FloatV V4LengthSq(const Vec4V a) { return V4Dot(a, a); } PX_FORCE_INLINE Vec4V V4Normalize(const Vec4V a) { VECMATHAOS_ASSERT(0 != a.x || 0 != a.y || 0 != a.z || 0 != a.w); const FloatV length = FloatV(V4Length(a)); return V4ScaleInv(a, length); } PX_FORCE_INLINE Vec4V V4NormalizeSafe(const Vec4V a, const Vec4V unsafeReturnValue) { const FloatV length = FloatV(V4Length(a)); if(PX_EPS_REAL >= length.x) { return unsafeReturnValue; } else { return V4ScaleInv(a, length); } } PX_FORCE_INLINE Vec4V V4NormalizeFast(const Vec4V a) { VECMATHAOS_ASSERT(0 != a.x || 0 != a.y || 0 != a.z || 0 != a.w); const FloatV length = FloatV(V4Length(a)); return V4ScaleInv(a, length); } PX_FORCE_INLINE Vec4V V4Sel(const BoolV c, const Vec4V a, const Vec4V b) { return Vec4V(c.ux ? a.x : b.x, c.uy ? a.y : b.y, c.uz ? a.z : b.z, c.uw ? a.w : b.w); } PX_FORCE_INLINE BoolV V4IsGrtr(const Vec4V a, const Vec4V b) { return BoolV(BOOL_TO_U32(a.x > b.x), BOOL_TO_U32(a.y > b.y), BOOL_TO_U32(a.z > b.z), BOOL_TO_U32(a.w > b.w)); } PX_FORCE_INLINE BoolV V4IsGrtrOrEq(const Vec4V a, const Vec4V b) { return BoolV(BOOL_TO_U32(a.x >= b.x), BOOL_TO_U32(a.y >= b.y), BOOL_TO_U32(a.z >= b.z), BOOL_TO_U32(a.w >= b.w)); } PX_FORCE_INLINE BoolV V4IsEq(const Vec4V a, const Vec4V b) { return BoolV(BOOL_TO_U32(a.x == b.x), BOOL_TO_U32(a.y == b.y), BOOL_TO_U32(a.z == b.z), BOOL_TO_U32(a.w == b.w)); } PX_FORCE_INLINE Vec4V V4Max(const Vec4V a, const Vec4V b) { return Vec4V(a.x > b.x ? a.x : b.x, a.y > b.y ? a.y : b.y, a.z > b.z ? a.z : b.z, a.w > b.w ? a.w : b.w); } PX_FORCE_INLINE Vec4V V4Min(const Vec4V a, const Vec4V b) { return Vec4V(a.x < b.x ? a.x : b.x, a.y < b.y ? a.y : b.y, a.z < b.z ? a.z : b.z, a.w < b.w ? a.w : b.w); } PX_FORCE_INLINE FloatV V4ExtractMax(const Vec4V a) { const PxF32 t0 = (a.x >= a.y) ? a.x : a.y; const PxF32 t1 = (a.z >= a.w) ? a.x : a.w; return t0 >= t1 ? t0 : t1; } PX_FORCE_INLINE FloatV V4ExtractMin(const Vec4V a) { const PxF32 t0 = (a.x <= a.y) ? a.x : a.y; const PxF32 t1 = (a.z <= a.w) ? a.x : a.w; return t0 <= t1 ? t0 : t1; } PX_FORCE_INLINE Vec4V V4Clamp(const Vec4V a, const Vec4V minV, const Vec4V maxV) { return V4Max(V4Min(a, maxV), minV); } PX_FORCE_INLINE Vec4V V4Round(const Vec4V a) { return Vec4V(floorf(a.x + 0.5f), floorf(a.y + 0.5f), floorf(a.z + 0.5f), floorf(a.w + 0.5f)); } PX_FORCE_INLINE Vec4V V4Sin(const Vec4V a) { return Vec4V(sinf(a.x), sinf(a.y), sinf(a.z), sinf(a.w)); } PX_FORCE_INLINE Vec4V V4Cos(const Vec4V a) { return Vec4V(cosf(a.x), cosf(a.y), cosf(a.z), cosf(a.w)); } PX_FORCE_INLINE PxU32 V4AllGrtr(const Vec4V a, const Vec4V b) { return BOOL_TO_U32((a.x > b.x) & (a.y > b.y) & (a.z > b.z) & (a.w > b.w)); } PX_FORCE_INLINE PxU32 V4AllGrtrOrEq(const Vec4V a, const Vec4V b) { return BOOL_TO_U32((a.x >= b.x) & (a.y >= b.y) & (a.z >= b.z) & (a.w >= b.w)); } PX_FORCE_INLINE PxU32 V4AllGrtrOrEq3(const Vec4V a, const Vec4V b) { return BOOL_TO_U32((a.x >= b.x) & (a.y >= b.y) & (a.z >= b.z)); } PX_FORCE_INLINE PxU32 V4AllEq(const Vec4V a, const Vec4V b) { return BOOL_TO_U32((a.x == b.x) & (a.y == b.y) & (a.z == b.z) & (a.w == b.w)); } PX_FORCE_INLINE PxU32 V4AnyGrtr3(const Vec4V a, const Vec4V b) { return BOOL_TO_U32((a.x > b.x) | (a.y > b.y) | (a.z > b.z)); } PX_FORCE_INLINE void V4Transpose(Vec4V& col0, Vec4V& col1, Vec4V& col2, Vec4V& col3) { const PxF32 t01 = col0.y, t02 = col0.z, t03 = col0.w; const PxF32 t12 = col1.z, t13 = col1.w; const PxF32 t23 = col2.w; col0.y = col1.x; col0.z = col2.x; col0.w = col3.x; col1.z = col2.y; col1.w = col3.y; col2.w = col3.z; col1.x = t01; col2.x = t02; col3.x = t03; col2.y = t12; col3.y = t13; col3.z = t23; } PX_FORCE_INLINE BoolV BFFFF() { return BoolV(FALSE_TO_U32, FALSE_TO_U32, FALSE_TO_U32, FALSE_TO_U32); } PX_FORCE_INLINE BoolV BFFFT() { return BoolV(FALSE_TO_U32, FALSE_TO_U32, FALSE_TO_U32, TRUE_TO_U32); } PX_FORCE_INLINE BoolV BFFTF() { return BoolV(FALSE_TO_U32, FALSE_TO_U32, TRUE_TO_U32, FALSE_TO_U32); } PX_FORCE_INLINE BoolV BFFTT() { return BoolV(FALSE_TO_U32, FALSE_TO_U32, TRUE_TO_U32, TRUE_TO_U32); } PX_FORCE_INLINE BoolV BFTFF() { return BoolV(FALSE_TO_U32, TRUE_TO_U32, FALSE_TO_U32, FALSE_TO_U32); } PX_FORCE_INLINE BoolV BFTFT() { return BoolV(FALSE_TO_U32, TRUE_TO_U32, FALSE_TO_U32, TRUE_TO_U32); } PX_FORCE_INLINE BoolV BFTTF() { return BoolV(FALSE_TO_U32, TRUE_TO_U32, TRUE_TO_U32, FALSE_TO_U32); } PX_FORCE_INLINE BoolV BFTTT() { return BoolV(FALSE_TO_U32, TRUE_TO_U32, TRUE_TO_U32, TRUE_TO_U32); } PX_FORCE_INLINE BoolV BTFFF() { return BoolV(TRUE_TO_U32, FALSE_TO_U32, FALSE_TO_U32, FALSE_TO_U32); } PX_FORCE_INLINE BoolV BTFFT() { return BoolV(TRUE_TO_U32, FALSE_TO_U32, FALSE_TO_U32, TRUE_TO_U32); } PX_FORCE_INLINE BoolV BTFTF() { return BoolV(TRUE_TO_U32, FALSE_TO_U32, TRUE_TO_U32, FALSE_TO_U32); } PX_FORCE_INLINE BoolV BTFTT() { return BoolV(TRUE_TO_U32, FALSE_TO_U32, TRUE_TO_U32, TRUE_TO_U32); } PX_FORCE_INLINE BoolV BTTFF() { return BoolV(TRUE_TO_U32, TRUE_TO_U32, FALSE_TO_U32, FALSE_TO_U32); } PX_FORCE_INLINE BoolV BTTFT() { return BoolV(TRUE_TO_U32, TRUE_TO_U32, FALSE_TO_U32, TRUE_TO_U32); } PX_FORCE_INLINE BoolV BTTTF() { return BoolV(TRUE_TO_U32, TRUE_TO_U32, TRUE_TO_U32, FALSE_TO_U32); } PX_FORCE_INLINE BoolV BTTTT() { return BoolV(TRUE_TO_U32, TRUE_TO_U32, TRUE_TO_U32, TRUE_TO_U32); } PX_FORCE_INLINE BoolV BXMask() { return BTFFF(); } PX_FORCE_INLINE BoolV BYMask() { return BFTFF(); } PX_FORCE_INLINE BoolV BZMask() { return BFFTF(); } PX_FORCE_INLINE BoolV BWMask() { return BFFFT(); } PX_FORCE_INLINE BoolV BGetX(const BoolV a) { return BoolV(a.ux, a.ux, a.ux, a.ux); } PX_FORCE_INLINE BoolV BGetY(const BoolV a) { return BoolV(a.uy, a.uy, a.uy, a.uy); } PX_FORCE_INLINE BoolV BGetZ(const BoolV a) { return BoolV(a.uz, a.uz, a.uz, a.uz); } PX_FORCE_INLINE BoolV BGetW(const BoolV a) { return BoolV(a.uw, a.uw, a.uw, a.uw); } PX_FORCE_INLINE BoolV BSetX(const BoolV v, const BoolV f) { return BoolV(f.ux, v.uy, v.uz, v.uw); } PX_FORCE_INLINE BoolV BSetY(const BoolV v, const BoolV f) { return BoolV(v.ux, f.uy, v.uz, v.uw); } PX_FORCE_INLINE BoolV BSetZ(const BoolV v, const BoolV f) { return BoolV(v.ux, v.uy, f.uz, v.uw); } PX_FORCE_INLINE BoolV BSetW(const BoolV v, const BoolV f) { return BoolV(v.ux, v.uy, v.uz, f.uw); } template <int index> BoolV BSplatElement(BoolV a) { PxU32* b = reinterpret_cast<PxU32*>(&a); return BoolV(b[index], b[index], b[index], b[index]); } PX_FORCE_INLINE BoolV BAnd(const BoolV a, const BoolV b) { return BoolV(BOOL_TO_U32(a.ux && b.ux), BOOL_TO_U32(a.uy && b.uy), BOOL_TO_U32(a.uz && b.uz), BOOL_TO_U32(a.uw && b.uw)); } PX_FORCE_INLINE BoolV BAndNot(const BoolV a, const BoolV b) { return BoolV(a.ux & ~b.ux, a.uy & ~b.uy, a.uz & ~b.uz, a.uw & ~b.uw); } PX_FORCE_INLINE BoolV BNot(const BoolV a) { return BoolV(~a.ux, ~a.uy, ~a.uz, ~a.uw); } PX_FORCE_INLINE BoolV BOr(const BoolV a, const BoolV b) { return BoolV(BOOL_TO_U32(a.ux || b.ux), BOOL_TO_U32(a.uy || b.uy), BOOL_TO_U32(a.uz || b.uz), BOOL_TO_U32(a.uw || b.uw)); } PX_FORCE_INLINE PxU32 BAllEq(const BoolV a, const BoolV b) { return (a.ux == b.ux && a.uy == b.uy && a.uz == b.uz && a.uw == b.uw ? 1 : 0); } PX_FORCE_INLINE PxU32 BAllEqTTTT(const BoolV a) { return BAllEq(a, BTTTT()); } PX_FORCE_INLINE PxU32 BAllEqFFFF(const BoolV a) { return BAllEq(a, BFFFF()); } PX_FORCE_INLINE BoolV BAllTrue4(const BoolV a) { return (a.ux & a.uy & a.uz & a.uw) ? BTTTT() : BFFFF(); } PX_FORCE_INLINE BoolV BAnyTrue4(const BoolV a) { return (a.ux | a.uy | a.uz | a.uw) ? BTTTT() : BFFFF(); } PX_FORCE_INLINE BoolV BAllTrue3(const BoolV a) { return (a.ux & a.uy & a.uz) ? BTTTT() : BFFFF(); } PX_FORCE_INLINE BoolV BAnyTrue3(const BoolV a) { return (a.ux | a.uy | a.uz) ? BTTTT() : BFFFF(); } PX_FORCE_INLINE PxU32 BGetBitMask(const BoolV a) { return (a.ux & 1) | (a.uy & 2) | (a.uz & 4) | (a.uw & 8); } ////////////////////////////////// // MAT33V ////////////////////////////////// PX_FORCE_INLINE Vec3V M33MulV3(const Mat33V& a, const Vec3V b) { return Vec3V(a.col0.x * b.x + a.col1.x * b.y + a.col2.x * b.z, a.col0.y * b.x + a.col1.y * b.y + a.col2.y * b.z, a.col0.z * b.x + a.col1.z * b.y + a.col2.z * b.z); } PX_FORCE_INLINE Vec3V M33TrnspsMulV3(const Mat33V& a, const Vec3V b) { return Vec3V(a.col0.x * b.x + a.col0.y * b.y + a.col0.z * b.z, a.col1.x * b.x + a.col1.y * b.y + a.col1.z * b.z, a.col2.x * b.x + a.col2.y * b.y + a.col2.z * b.z); } PX_FORCE_INLINE Vec3V M33MulV3AddV3(const Mat33V& A, const Vec3V b, const Vec3V c) { const FloatV x = V3GetX(b); const FloatV y = V3GetY(b); const FloatV z = V3GetZ(b); Vec3V result = V3ScaleAdd(A.col0, x, c); result = V3ScaleAdd(A.col1, y, result); return V3ScaleAdd(A.col2, z, result); } PX_FORCE_INLINE Mat33V M33MulM33(const Mat33V& a, const Mat33V& b) { return Mat33V(M33MulV3(a, b.col0), M33MulV3(a, b.col1), M33MulV3(a, b.col2)); } PX_FORCE_INLINE Mat33V M33Add(const Mat33V& a, const Mat33V& b) { return Mat33V(V3Add(a.col0, b.col0), V3Add(a.col1, b.col1), V3Add(a.col2, b.col2)); } PX_FORCE_INLINE Mat33V M33Scale(const Mat33V& a, const FloatV& b) { return Mat33V(V3Scale(a.col0, b), V3Scale(a.col1, b), V3Scale(a.col2, b)); } PX_FORCE_INLINE Mat33V M33Sub(const Mat33V& a, const Mat33V& b) { return Mat33V(V3Sub(a.col0, b.col0), V3Sub(a.col1, b.col1), V3Sub(a.col2, b.col2)); } PX_FORCE_INLINE Mat33V M33Neg(const Mat33V& a) { return Mat33V(V3Neg(a.col0), V3Neg(a.col1), V3Neg(a.col2)); } PX_FORCE_INLINE Mat33V M33Abs(const Mat33V& a) { return Mat33V(V3Abs(a.col0), V3Abs(a.col1), V3Abs(a.col2)); } PX_FORCE_INLINE Mat33V M33Diagonal(const Vec3VArg d) { const Vec3V x = V3Mul(V3UnitX(), d); const Vec3V y = V3Mul(V3UnitY(), d); const Vec3V z = V3Mul(V3UnitZ(), d); return Mat33V(x, y, z); } PX_FORCE_INLINE Mat33V M33Inverse(const Mat33V& a) { const PxF32 det = a.col0.x * (a.col1.y * a.col2.z - a.col1.z * a.col2.y) - a.col1.x * (a.col0.y * a.col2.z - a.col2.y * a.col0.z) + a.col2.x * (a.col0.y * a.col1.z - a.col1.y * a.col0.z); const PxF32 invDet = 1.0f / det; Mat33V ret; ret.col0.x = invDet * (a.col1.y * a.col2.z - a.col2.y * a.col1.z); ret.col0.y = invDet * (a.col2.y * a.col0.z - a.col0.y * a.col2.z); ret.col0.z = invDet * (a.col0.y * a.col1.z - a.col1.y * a.col0.z); ret.col1.x = invDet * (a.col2.x * a.col1.z - a.col1.x * a.col2.z); ret.col1.y = invDet * (a.col0.x * a.col2.z - a.col2.x * a.col0.z); ret.col1.z = invDet * (a.col1.x * a.col0.z - a.col0.x * a.col1.z); ret.col2.x = invDet * (a.col1.x * a.col2.y - a.col2.x * a.col1.y); ret.col2.y = invDet * (a.col2.x * a.col0.y - a.col0.x * a.col2.y); ret.col2.z = invDet * (a.col0.x * a.col1.y - a.col1.x * a.col0.y); return ret; } PX_FORCE_INLINE Mat33V Mat33V_From_PxMat33(const PxMat33& m) { return Mat33V(V3LoadU(m.column0), V3LoadU(m.column1), V3LoadU(m.column2)); } PX_FORCE_INLINE void PxMat33_From_Mat33V(const Mat33V& m, PxMat33& out) { PX_ASSERT((size_t(&out) & 15) == 0); V3StoreU(m.col0, out.column0); V3StoreU(m.col1, out.column1); V3StoreU(m.col2, out.column2); } PX_FORCE_INLINE Mat33V M33Trnsps(const Mat33V& a) { return Mat33V(Vec3V(a.col0.x, a.col1.x, a.col2.x), Vec3V(a.col0.y, a.col1.y, a.col2.y), Vec3V(a.col0.z, a.col1.z, a.col2.z)); } PX_FORCE_INLINE Mat33V M33Identity() { return Mat33V(V3UnitX(), V3UnitY(), V3UnitZ()); } ////////////////////////////////// // MAT34V ////////////////////////////////// PX_FORCE_INLINE Vec3V M34MulV3(const Mat34V& a, const Vec3V b) { return Vec3V(a.col0.x * b.x + a.col1.x * b.y + a.col2.x * b.z + a.col3.x, a.col0.y * b.x + a.col1.y * b.y + a.col2.y * b.z + a.col3.y, a.col0.z * b.x + a.col1.z * b.y + a.col2.z * b.z + a.col3.z); } PX_FORCE_INLINE Vec3V M34Mul33V3(const Mat34V& a, const Vec3V b) { return Vec3V(a.col0.x * b.x + a.col1.x * b.y + a.col2.x * b.z, a.col0.y * b.x + a.col1.y * b.y + a.col2.y * b.z, a.col0.z * b.x + a.col1.z * b.y + a.col2.z * b.z); } PX_FORCE_INLINE Vec3V M34TrnspsMul33V3(const Mat34V& a, const Vec3V b) { return Vec3V(a.col0.x * b.x + a.col0.y * b.y + a.col0.z * b.z, a.col1.x * b.x + a.col1.y * b.y + a.col1.z * b.z, a.col2.x * b.x + a.col2.y * b.y + a.col2.z * b.z); } PX_FORCE_INLINE Mat34V M34MulM34(const Mat34V& a, const Mat34V& b) { return Mat34V(M34Mul33V3(a, b.col0), M34Mul33V3(a, b.col1), M34Mul33V3(a, b.col2), M34MulV3(a, b.col3)); } PX_FORCE_INLINE Mat33V M34MulM33(const Mat34V& a, const Mat33V& b) { return Mat33V(M34Mul33V3(a, b.col0), M34Mul33V3(a, b.col1), M34Mul33V3(a, b.col2)); } PX_FORCE_INLINE Mat33V M34Mul33V3(const Mat34V& a, const Mat33V& b) { return Mat33V(M34Mul33V3(a, b.col0), M34Mul33V3(a, b.col1), M34Mul33V3(a, b.col2)); } PX_FORCE_INLINE Mat33V M34Mul33MM34(const Mat34V& a, const Mat34V& b) { return Mat33V(M34Mul33V3(a, b.col0), M34Mul33V3(a, b.col1), M34Mul33V3(a, b.col2)); } PX_FORCE_INLINE Mat34V M34Add(const Mat34V& a, const Mat34V& b) { return Mat34V(V3Add(a.col0, b.col0), V3Add(a.col1, b.col1), V3Add(a.col2, b.col2), V3Add(a.col3, b.col3)); } PX_FORCE_INLINE Mat33V M34Trnsps33(const Mat34V& a) { return Mat33V(Vec3V(a.col0.x, a.col1.x, a.col2.x), Vec3V(a.col0.y, a.col1.y, a.col2.y), Vec3V(a.col0.z, a.col1.z, a.col2.z)); } ////////////////////////////////// // MAT44V ////////////////////////////////// PX_FORCE_INLINE Vec4V M44MulV4(const Mat44V& a, const Vec4V b) { return Vec4V(a.col0.x * b.x + a.col1.x * b.y + a.col2.x * b.z + a.col3.x * b.w, a.col0.y * b.x + a.col1.y * b.y + a.col2.y * b.z + a.col3.y * b.w, a.col0.z * b.x + a.col1.z * b.y + a.col2.z * b.z + a.col3.z * b.w, a.col0.w * b.x + a.col1.w * b.y + a.col2.w * b.z + a.col3.w * b.w); } PX_FORCE_INLINE Vec4V M44TrnspsMulV4(const Mat44V& a, const Vec4V b) { return Vec4V(a.col0.x * b.x + a.col0.y * b.y + a.col0.z * b.z + a.col0.w * b.w, a.col1.x * b.x + a.col1.y * b.y + a.col1.z * b.z + a.col1.w * b.w, a.col2.x * b.x + a.col2.y * b.y + a.col2.z * b.z + a.col2.w * b.w, a.col3.x * b.x + a.col3.y * b.y + a.col3.z * b.z + a.col3.w * b.w); } PX_FORCE_INLINE Mat44V M44MulM44(const Mat44V& a, const Mat44V& b) { return Mat44V(M44MulV4(a, b.col0), M44MulV4(a, b.col1), M44MulV4(a, b.col2), M44MulV4(a, b.col3)); } PX_FORCE_INLINE Mat44V M44Add(const Mat44V& a, const Mat44V& b) { return Mat44V(V4Add(a.col0, b.col0), V4Add(a.col1, b.col1), V4Add(a.col2, b.col2), V4Add(a.col3, b.col3)); } PX_FORCE_INLINE Mat44V M44Inverse(const Mat44V& a) { PxF32 tmp[12]; PxF32 dst[16]; PxF32 det; const PxF32 src[16] = { a.col0.x, a.col0.y, a.col0.z, a.col0.w, a.col1.x, a.col1.y, a.col1.z, a.col1.w, a.col2.x, a.col2.y, a.col2.z, a.col2.w, a.col3.x, a.col3.y, a.col3.z, a.col3.w }; tmp[0] = src[10] * src[15]; tmp[1] = src[11] * src[14]; tmp[2] = src[9] * src[15]; tmp[3] = src[11] * src[13]; tmp[4] = src[9] * src[14]; tmp[5] = src[10] * src[13]; tmp[6] = src[8] * src[15]; tmp[7] = src[11] * src[12]; tmp[8] = src[8] * src[14]; tmp[9] = src[10] * src[12]; tmp[10] = src[8] * src[13]; tmp[11] = src[9] * src[12]; dst[0] = tmp[0] * src[5] + tmp[3] * src[6] + tmp[4] * src[7]; dst[0] -= tmp[1] * src[5] + tmp[2] * src[6] + tmp[5] * src[7]; dst[1] = tmp[1] * src[4] + tmp[6] * src[6] + tmp[9] * src[7]; dst[1] -= tmp[0] * src[4] + tmp[7] * src[6] + tmp[8] * src[7]; dst[2] = tmp[2] * src[4] + tmp[7] * src[5] + tmp[10] * src[7]; dst[2] -= tmp[3] * src[4] + tmp[6] * src[5] + tmp[11] * src[7]; dst[3] = tmp[5] * src[4] + tmp[8] * src[5] + tmp[11] * src[6]; dst[3] -= tmp[4] * src[4] + tmp[9] * src[5] + tmp[10] * src[6]; dst[4] = tmp[1] * src[1] + tmp[2] * src[2] + tmp[5] * src[3]; dst[4] -= tmp[0] * src[1] + tmp[3] * src[2] + tmp[4] * src[3]; dst[5] = tmp[0] * src[0] + tmp[7] * src[2] + tmp[8] * src[3]; dst[5] -= tmp[1] * src[0] + tmp[6] * src[2] + tmp[9] * src[3]; dst[6] = tmp[3] * src[0] + tmp[6] * src[1] + tmp[11] * src[3]; dst[6] -= tmp[2] * src[0] + tmp[7] * src[1] + tmp[10] * src[3]; dst[7] = tmp[4] * src[0] + tmp[9] * src[1] + tmp[10] * src[2]; dst[7] -= tmp[5] * src[0] + tmp[8] * src[1] + tmp[11] * src[2]; tmp[0] = src[2] * src[7]; tmp[1] = src[3] * src[6]; tmp[2] = src[1] * src[7]; tmp[3] = src[3] * src[5]; tmp[4] = src[1] * src[6]; tmp[5] = src[2] * src[5]; tmp[6] = src[0] * src[7]; tmp[7] = src[3] * src[4]; tmp[8] = src[0] * src[6]; tmp[9] = src[2] * src[4]; tmp[10] = src[0] * src[5]; tmp[11] = src[1] * src[4]; dst[8] = tmp[0] * src[13] + tmp[3] * src[14] + tmp[4] * src[15]; dst[8] -= tmp[1] * src[13] + tmp[2] * src[14] + tmp[5] * src[15]; dst[9] = tmp[1] * src[12] + tmp[6] * src[14] + tmp[9] * src[15]; dst[9] -= tmp[0] * src[12] + tmp[7] * src[14] + tmp[8] * src[15]; dst[10] = tmp[2] * src[12] + tmp[7] * src[13] + tmp[10] * src[15]; dst[10] -= tmp[3] * src[12] + tmp[6] * src[13] + tmp[11] * src[15]; dst[11] = tmp[5] * src[12] + tmp[8] * src[13] + tmp[11] * src[14]; dst[11] -= tmp[4] * src[12] + tmp[9] * src[13] + tmp[10] * src[14]; dst[12] = tmp[2] * src[10] + tmp[5] * src[11] + tmp[1] * src[9]; dst[12] -= tmp[4] * src[11] + tmp[0] * src[9] + tmp[3] * src[10]; dst[13] = tmp[8] * src[11] + tmp[0] * src[8] + tmp[7] * src[10]; dst[13] -= tmp[6] * src[10] + tmp[9] * src[11] + tmp[1] * src[8]; dst[14] = tmp[6] * src[9] + tmp[11] * src[11] + tmp[3] * src[8]; dst[14] -= tmp[10] * src[11] + tmp[2] * src[8] + tmp[7] * src[9]; dst[15] = tmp[10] * src[10] + tmp[4] * src[8] + tmp[9] * src[9]; dst[15] -= tmp[8] * src[9] + tmp[11] * src[10] + tmp[5] * src[8]; det = src[0] * dst[0] + src[1] * dst[1] + src[2] * dst[2] + src[3] * dst[3]; det = 1.0f / det; for(PxU32 j = 0; j < 16; j++) { dst[j] *= det; } return Mat44V(Vec4V(dst[0], dst[4], dst[8], dst[12]), Vec4V(dst[1], dst[5], dst[9], dst[13]), Vec4V(dst[2], dst[6], dst[10], dst[14]), Vec4V(dst[3], dst[7], dst[11], dst[15])); } PX_FORCE_INLINE Mat44V M44Trnsps(const Mat44V& a) { return Mat44V(Vec4V(a.col0.x, a.col1.x, a.col2.x, a.col3.x), Vec4V(a.col0.y, a.col1.y, a.col2.y, a.col3.y), Vec4V(a.col0.z, a.col1.z, a.col2.z, a.col3.z), Vec4V(a.col0.w, a.col1.w, a.col2.w, a.col3.w)); } PX_FORCE_INLINE Vec4V V4LoadXYZW(const PxF32& x, const PxF32& y, const PxF32& z, const PxF32& w) { return Vec4V(x, y, z, w); } /* PX_FORCE_INLINE VecU16V V4U32PK(VecU32V a, VecU32V b) { return VecU16V( PxU16(PxClamp<PxU32>((a).u32[0], 0, 0xFFFF)), PxU16(PxClamp<PxU32>((a).u32[1], 0, 0xFFFF)), PxU16(PxClamp<PxU32>((a).u32[2], 0, 0xFFFF)), PxU16(PxClamp<PxU32>((a).u32[3], 0, 0xFFFF)), PxU16(PxClamp<PxU32>((b).u32[0], 0, 0xFFFF)), PxU16(PxClamp<PxU32>((b).u32[1], 0, 0xFFFF)), PxU16(PxClamp<PxU32>((b).u32[2], 0, 0xFFFF)), PxU16(PxClamp<PxU32>((b).u32[3], 0, 0xFFFF))); } */ PX_FORCE_INLINE VecU32V V4U32Sel(const BoolV c, const VecU32V a, const VecU32V b) { return VecU32V(c.ux ? a.u32[0] : b.u32[0], c.uy ? a.u32[1] : b.u32[1], c.uz ? a.u32[2] : b.u32[2], c.uw ? a.u32[3] : b.u32[3]); } PX_FORCE_INLINE VecU32V V4U32or(VecU32V a, VecU32V b) { return VecU32V((a).u32[0] | (b).u32[0], (a).u32[1] | (b).u32[1], (a).u32[2] | (b).u32[2], (a).u32[3] | (b).u32[3]); } PX_FORCE_INLINE VecU32V V4U32xor(VecU32V a, VecU32V b) { return VecU32V((a).u32[0] ^ (b).u32[0], (a).u32[1] ^ (b).u32[1], (a).u32[2] ^ (b).u32[2], (a).u32[3] ^ (b).u32[3]); } PX_FORCE_INLINE VecU32V V4U32and(VecU32V a, VecU32V b) { return VecU32V((a).u32[0] & (b).u32[0], (a).u32[1] & (b).u32[1], (a).u32[2] & (b).u32[2], (a).u32[3] & (b).u32[3]); } PX_FORCE_INLINE VecU32V V4U32Andc(VecU32V a, VecU32V b) { return VecU32V((a).u32[0] & ~(b).u32[0], (a).u32[1] & ~(b).u32[1], (a).u32[2] & ~(b).u32[2], (a).u32[3] & ~(b).u32[3]); } /* PX_FORCE_INLINE VecU16V V4U16Or(VecU16V a, VecU16V b) { return VecU16V( (a).u16[0]|(b).u16[0], (a).u16[1]|(b).u16[1], (a).u16[2]|(b).u16[2], (a).u16[3]|(b).u16[3], (a).u16[4]|(b).u16[4], (a).u16[5]|(b).u16[5], (a).u16[6]|(b).u16[6], (a).u16[7]|(b).u16[7]); } */ /* PX_FORCE_INLINE VecU16V V4U16And(VecU16V a, VecU16V b) { return VecU16V( (a).u16[0]&(b).u16[0], (a).u16[1]&(b).u16[1], (a).u16[2]&(b).u16[2], (a).u16[3]&(b).u16[3], (a).u16[4]&(b).u16[4], (a).u16[5]&(b).u16[5], (a).u16[6]&(b).u16[6], (a).u16[7]&(b).u16[7]); } */ /* PX_FORCE_INLINE VecU16V V4U16Andc(VecU16V a, VecU16V b) { return VecU16V( (a).u16[0]&~(b).u16[0], (a).u16[1]&~(b).u16[1], (a).u16[2]&~(b).u16[2], (a).u16[3]&~(b).u16[3], (a).u16[4]&~(b).u16[4], (a).u16[5]&~(b).u16[5], (a).u16[6]&~(b).u16[6], (a).u16[7]&~(b).u16[7]); } */ /* template<int a> PX_FORCE_INLINE VecI32V V4ISplat() { return VecI32V(a, a, a, a); } template<PxU32 a> PX_FORCE_INLINE VecU32V V4USplat() { return VecU32V(a, a, a, a); } */ /* PX_FORCE_INLINE void V4U16StoreAligned(VecU16V val, VecU16V* address) { *address = val; } */ PX_FORCE_INLINE void V4U32StoreAligned(VecU32V val, VecU32V* address) { *address = val; } PX_FORCE_INLINE Vec4V V4Andc(const Vec4V a, const VecU32V b) { VecU32V r = V4U32Andc(*reinterpret_cast<const VecU32V*>(&a), b); return (*reinterpret_cast<const Vec4V*>(&r)); } PX_FORCE_INLINE VecU32V V4IsGrtrV32u(const Vec4V a, const Vec4V b) { return VecU32V(a.x > b.x ? 0xFFFFffff : 0, a.y > b.y ? 0xFFFFffff : 0, a.z > b.z ? 0xFFFFffff : 0, a.w > b.w ? 0xFFFFffff : 0); } PX_FORCE_INLINE VecU16V V4U16LoadAligned(VecU16V* addr) { return *addr; } PX_FORCE_INLINE VecU16V V4U16LoadUnaligned(VecU16V* addr) { return *addr; } PX_FORCE_INLINE VecU16V V4U16CompareGt(VecU16V a, VecU16V b) { return VecU16V ( BOOL_TO_U16(a.u16[0] > b.u16[0]), BOOL_TO_U16(a.u16[1] > b.u16[1]), BOOL_TO_U16(a.u16[2] > b.u16[2]), BOOL_TO_U16(a.u16[3] > b.u16[3]), BOOL_TO_U16(a.u16[4] > b.u16[4]), BOOL_TO_U16(a.u16[5] > b.u16[5]), BOOL_TO_U16(a.u16[6] > b.u16[6]), BOOL_TO_U16(a.u16[7] > b.u16[7]) ); } PX_FORCE_INLINE VecU16V V4I16CompareGt(VecU16V a, VecU16V b) { return VecU16V ( BOOL_TO_U16(a.i16[0] > b.i16[0]), BOOL_TO_U16(a.i16[1] > b.i16[1]), BOOL_TO_U16(a.i16[2] > b.i16[2]), BOOL_TO_U16(a.i16[3] > b.i16[3]), BOOL_TO_U16(a.i16[4] > b.i16[4]), BOOL_TO_U16(a.i16[5] > b.i16[5]), BOOL_TO_U16(a.i16[6] > b.i16[6]), BOOL_TO_U16(a.i16[7] > b.i16[7]) ); } PX_FORCE_INLINE Vec4V Vec4V_From_VecU32V(VecU32V a) { return Vec4V(PxF32((a).u32[0]), PxF32((a).u32[1]), PxF32((a).u32[2]), PxF32((a).u32[3])); } PX_FORCE_INLINE Vec4V Vec4V_From_VecI32V(VecI32V a) { return Vec4V(PxF32((a).i32[0]), PxF32((a).i32[1]), PxF32((a).i32[2]), PxF32((a).i32[3])); } PX_FORCE_INLINE VecI32V VecI32V_From_Vec4V(Vec4V a) { float* data = reinterpret_cast<float*>(&a); return VecI32V(PxI32(data[0]), PxI32(data[1]), PxI32(data[2]), PxI32(data[3])); } PX_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecU32V(VecU32V a) { Vec4V b = *reinterpret_cast<Vec4V*>(&a); return b; } PX_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecI32V(VecI32V a) { Vec4V b = *reinterpret_cast<Vec4V*>(&a); return b; } PX_FORCE_INLINE VecU32V VecU32V_ReinterpretFrom_Vec4V(Vec4V a) { VecU32V b = *reinterpret_cast<VecU32V*>(&a); return b; } PX_FORCE_INLINE VecI32V VecI32V_ReinterpretFrom_Vec4V(Vec4V a) { VecI32V b = *reinterpret_cast<VecI32V*>(&a); return b; } template <int index> PX_FORCE_INLINE VecU32V V4U32SplatElement(VecU32V a) { return VecU32V((a).u32[index], (a).u32[index], (a).u32[index], (a).u32[index]); } template <int index> PX_FORCE_INLINE VecU32V V4U32SplatElement(BoolV a) { const PxU32 u = (&a.ux)[index]; return VecU32V(u, u, u, u); } template <int index> PX_FORCE_INLINE Vec4V V4SplatElement(Vec4V a) { float* data = reinterpret_cast<float*>(&a); return Vec4V(data[index], data[index], data[index], data[index]); } PX_FORCE_INLINE VecU32V U4LoadXYZW(PxU32 x, PxU32 y, PxU32 z, PxU32 w) { return VecU32V(x, y, z, w); } PX_FORCE_INLINE Vec4V V4Abs(const Vec4V a) { return V4Max(a, V4Neg(a)); } PX_FORCE_INLINE BoolV V4IsEqU32(const VecU32V a, const VecU32V b) { return BoolV(BOOL_TO_U32(a.u32[0] == b.u32[0]), BOOL_TO_U32(a.u32[1] == b.u32[1]), BOOL_TO_U32(a.u32[2] == b.u32[2]), BOOL_TO_U32(a.u32[3] == b.u32[3])); } PX_FORCE_INLINE VecU32V U4Load(const PxU32 i) { return VecU32V(i, i, i, i); } PX_FORCE_INLINE VecU32V U4LoadU(const PxU32* i) { return VecU32V(i[0], i[1], i[2], i[3]); } PX_FORCE_INLINE VecU32V U4LoadA(const PxU32* i) { return VecU32V(i[0], i[1], i[2], i[3]); } PX_FORCE_INLINE VecI32V I4LoadXYZW(const PxI32& x, const PxI32& y, const PxI32& z, const PxI32& w) { return VecI32V(x, y, z, w); } PX_FORCE_INLINE VecI32V I4Load(const PxI32 i) { return VecI32V(i, i, i, i); } PX_FORCE_INLINE VecI32V I4LoadU(const PxI32* i) { return VecI32V(i[0], i[1], i[2], i[3]); } PX_FORCE_INLINE VecI32V I4LoadA(const PxI32* i) { return VecI32V(i[0], i[1], i[2], i[3]); } PX_FORCE_INLINE VecI32V VecI32V_Add(const VecI32VArg a, const VecI32VArg b) { return VecI32V(a.i32[0] + b.i32[0], a.i32[1] + b.i32[1], a.i32[2] + b.i32[2], a.i32[3] + b.i32[3]); } PX_FORCE_INLINE VecI32V VecI32V_Sub(const VecI32VArg a, const VecI32VArg b) { return VecI32V(a.i32[0] - b.i32[0], a.i32[1] - b.i32[1], a.i32[2] - b.i32[2], a.i32[3] - b.i32[3]); } PX_FORCE_INLINE BoolV VecI32V_IsGrtr(const VecI32VArg a, const VecI32VArg b) { return BoolV(BOOL_TO_U32(a.i32[0] > b.i32[0]), BOOL_TO_U32(a.i32[1] > b.i32[1]), BOOL_TO_U32(a.i32[2] > b.i32[2]), BOOL_TO_U32(a.i32[3] > b.i32[3])); } PX_FORCE_INLINE BoolV VecI32V_IsEq(const VecI32VArg a, const VecI32VArg b) { return BoolV(BOOL_TO_U32(a.i32[0] == b.i32[0]), BOOL_TO_U32(a.i32[1] == b.i32[1]), BOOL_TO_U32(a.i32[2] == b.i32[2]), BOOL_TO_U32(a.i32[3] == b.i32[3])); } PX_FORCE_INLINE VecI32V V4I32Sel(const BoolV c, const VecI32V a, const VecI32V b) { return VecI32V(c.ux ? a.i32[0] : b.i32[0], c.uy ? a.i32[1] : b.i32[1], c.uz ? a.i32[2] : b.i32[2], c.uw ? a.i32[3] : b.i32[3]); } PX_FORCE_INLINE VecI32V VecI32V_Zero() { return VecI32V(0, 0, 0, 0); } PX_FORCE_INLINE VecI32V VecI32V_One() { return VecI32V(1, 1, 1, 1); } PX_FORCE_INLINE VecI32V VecI32V_Two() { return VecI32V(2, 2, 2, 2); } PX_FORCE_INLINE VecI32V VecI32V_MinusOne() { return VecI32V(-1, -1, -1, -1); } PX_FORCE_INLINE VecU32V U4Zero() { return VecU32V(0, 0, 0, 0); } PX_FORCE_INLINE VecU32V U4One() { return VecU32V(1, 1, 1, 1); } PX_FORCE_INLINE VecU32V U4Two() { return VecU32V(2, 2, 2, 2); } PX_FORCE_INLINE VecShiftV VecI32V_PrepareShift(const VecI32VArg shift) { return shift; } PX_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const VecShiftVArg count) { return VecI32V(a.i32[0] << count.i32[0], a.i32[1] << count.i32[1], a.i32[2] << count.i32[2], a.i32[3] << count.i32[3]); } PX_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const VecShiftVArg count) { return VecI32V(a.i32[0] >> count.i32[0], a.i32[1] >> count.i32[1], a.i32[2] >> count.i32[2], a.i32[3] >> count.i32[3]); } PX_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const PxU32 count) { return VecI32V(a.i32[0] << count, a.i32[1] << count, a.i32[2] << count, a.i32[3] << count); } PX_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const PxU32 count) { return VecI32V(a.i32[0] >> count, a.i32[1] >> count, a.i32[2] >> count, a.i32[3] >> count); } PX_FORCE_INLINE VecI32V VecI32V_And(const VecI32VArg a, const VecI32VArg b) { return VecI32V(a.i32[0] & b.i32[0], a.i32[1] & b.i32[1], a.i32[2] & b.i32[2], a.i32[3] & b.i32[3]); } PX_FORCE_INLINE VecI32V VecI32V_Or(const VecI32VArg a, const VecI32VArg b) { return VecI32V(a.i32[0] | b.i32[0], a.i32[1] | b.i32[1], a.i32[2] | b.i32[2], a.i32[3] | b.i32[3]); } PX_FORCE_INLINE VecI32V VecI32V_GetX(const VecI32VArg a) { return VecI32V(a.i32[0], a.i32[0], a.i32[0], a.i32[0]); } PX_FORCE_INLINE VecI32V VecI32V_GetY(const VecI32VArg a) { return VecI32V(a.i32[1], a.i32[1], a.i32[1], a.i32[1]); } PX_FORCE_INLINE VecI32V VecI32V_GetZ(const VecI32VArg a) { return VecI32V(a.i32[2], a.i32[2], a.i32[2], a.i32[2]); } PX_FORCE_INLINE VecI32V VecI32V_GetW(const VecI32VArg a) { return VecI32V(a.i32[3], a.i32[3], a.i32[3], a.i32[3]); } PX_FORCE_INLINE VecI32V VecI32V_Sel(const BoolV c, const VecI32VArg a, const VecI32VArg b) { return VecI32V(c.ux ? a.i32[0] : b.i32[0], c.uy ? a.i32[1] : b.i32[1], c.uz ? a.i32[2] : b.i32[2], c.uw ? a.i32[3] : b.i32[3]); } PX_FORCE_INLINE VecI32V VecI32V_Merge(const VecI32VArg a, const VecI32VArg b, const VecI32VArg c, const VecI32VArg d) { return VecI32V(a.i32[0], b.i32[0], c.i32[0], d.i32[0]); } PX_FORCE_INLINE void PxI32_From_VecI32V(const VecI32VArg a, PxI32* i) { *i = a.i32[0]; } PX_FORCE_INLINE VecI32V VecI32V_From_BoolV(const BoolVArg b) { return VecI32V(PxI32(b.ux), PxI32(b.uy), PxI32(b.uz), PxI32(b.uw)); } PX_FORCE_INLINE VecU32V VecU32V_From_BoolV(const BoolVArg b) { return VecU32V(b.ux, b.uy, b.uz, b.uw); } PX_FORCE_INLINE void QuatGetMat33V(const QuatVArg q, Vec3V& column0, Vec3V& column1, Vec3V& column2) { const FloatV one = FOne(); const FloatV x = V4GetX(q); const FloatV y = V4GetY(q); const FloatV z = V4GetZ(q); const FloatV w = V4GetW(q); const FloatV x2 = FAdd(x, x); const FloatV y2 = FAdd(y, y); const FloatV z2 = FAdd(z, z); const FloatV xx = FMul(x2, x); const FloatV yy = FMul(y2, y); const FloatV zz = FMul(z2, z); const FloatV xy = FMul(x2, y); const FloatV xz = FMul(x2, z); const FloatV xw = FMul(x2, w); const FloatV yz = FMul(y2, z); const FloatV yw = FMul(y2, w); const FloatV zw = FMul(z2, w); const FloatV v = FSub(one, xx); column0 = V3Merge(FSub(FSub(one, yy), zz), FAdd(xy, zw), FSub(xz, yw)); column1 = V3Merge(FSub(xy, zw), FSub(v, zz), FAdd(yz, xw)); column2 = V3Merge(FAdd(xz, yw), FSub(yz, xw), FSub(v, yy)); } // not used /* PX_FORCE_INLINE Vec4V V4LoadAligned(Vec4V* addr) { return *addr; } */ /* PX_FORCE_INLINE Vec4V V4LoadUnaligned(Vec4V* addr) { return *addr; } */ /* PX_FORCE_INLINE Vec4V V4Ceil(const Vec4V a) { return Vec4V(PxCeil(a.x), PxCeil(a.y), PxCeil(a.z), PxCeil(a.w)); } PX_FORCE_INLINE Vec4V V4Floor(const Vec4V a) { return Vec4V(PxFloor(a.x), PxFloor(a.y), PxFloor(a.z), PxFloor(a.w)); } */ /* PX_FORCE_INLINE VecU32V V4ConvertToU32VSaturate(const Vec4V a, PxU32 power) { PX_ASSERT(power == 0 && "Non-zero power not supported in convertToU32VSaturate"); PX_UNUSED(power); // prevent warning in release builds PxF32 ffffFFFFasFloat = PxF32(0xFFFF0000); return VecU32V( PxU32(PxClamp<PxF32>((a).x, 0.0f, ffffFFFFasFloat)), PxU32(PxClamp<PxF32>((a).y, 0.0f, ffffFFFFasFloat)), PxU32(PxClamp<PxF32>((a).z, 0.0f, ffffFFFFasFloat)), PxU32(PxClamp<PxF32>((a).w, 0.0f, ffffFFFFasFloat))); } */ } // namespace aos #if !PX_DOXYGEN } // namespace physx #endif #if PX_GCC_FAMILY #pragma GCC diagnostic pop #endif #endif
58,429
C
24.382276
154
0.626008
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxMutex.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_MUTEX_H #define PX_MUTEX_H #include "foundation/PxAllocator.h" /* * This <new> inclusion is a best known fix for gcc 4.4.1 error: * Creating object file for apex/src/PsAllocator.cpp ... * In file included from apex/include/PsFoundation.h:30, * from apex/src/PsAllocator.cpp:26: * apex/include/PsMutex.h: In constructor 'physx::PxMutexT<Alloc>::MutexT(const Alloc&)': * apex/include/PsMutex.h:92: error: no matching function for call to 'operator new(unsigned int, * physx::PxMutexImpl*&)' * <built-in>:0: note: candidates are: void* operator new(unsigned int) */ #include <new> #if !PX_DOXYGEN namespace physx { #endif class PX_FOUNDATION_API PxMutexImpl { public: /** The constructor for Mutex creates a mutex. It is initially unlocked. */ PxMutexImpl(); /** The destructor for Mutex deletes the mutex. */ ~PxMutexImpl(); /** Acquire (lock) the mutex. If the mutex is already locked by another thread, this method blocks until the mutex is unlocked. */ void lock(); /** Acquire (lock) the mutex. If the mutex is already locked by another thread, this method returns false without blocking. */ bool trylock(); /** Release (unlock) the mutex. */ void unlock(); /** Size of this class. */ static uint32_t getSize(); }; template <typename Alloc = PxReflectionAllocator<PxMutexImpl> > class PxMutexT : protected Alloc { PX_NOCOPY(PxMutexT) public: class ScopedLock { PxMutexT<Alloc>& mMutex; PX_NOCOPY(ScopedLock) public: PX_INLINE ScopedLock(PxMutexT<Alloc>& mutex) : mMutex(mutex) { mMutex.lock(); } PX_INLINE ~ScopedLock() { mMutex.unlock(); } }; /** The constructor for Mutex creates a mutex. It is initially unlocked. */ PxMutexT(const Alloc& alloc = Alloc()) : Alloc(alloc) { mImpl = reinterpret_cast<PxMutexImpl*>(Alloc::allocate(PxMutexImpl::getSize(), PX_FL)); PX_PLACEMENT_NEW(mImpl, PxMutexImpl)(); } /** The destructor for Mutex deletes the mutex. */ ~PxMutexT() { mImpl->~PxMutexImpl(); Alloc::deallocate(mImpl); } /** Acquire (lock) the mutex. If the mutex is already locked by another thread, this method blocks until the mutex is unlocked. */ PX_FORCE_INLINE void lock() const { mImpl->lock(); } /** Acquire (lock) the mutex. If the mutex is already locked by another thread, this method returns false without blocking, returns true if lock is successfully acquired */ PX_FORCE_INLINE bool trylock() const { return mImpl->trylock(); } /** Release (unlock) the mutex, the calling thread must have previously called lock() or method will error */ PX_FORCE_INLINE void unlock() const { mImpl->unlock(); } private: PxMutexImpl* mImpl; }; class PX_FOUNDATION_API PxReadWriteLock { PX_NOCOPY(PxReadWriteLock) public: PxReadWriteLock(); ~PxReadWriteLock(); // "takeLock" can only be false if the thread already holds the mutex, e.g. if it already acquired the write lock void lockReader(bool takeLock); void lockWriter(); void unlockReader(); void unlockWriter(); private: class ReadWriteLockImpl* mImpl; }; typedef PxMutexT<> PxMutex; #if !PX_DOXYGEN } // namespace physx #endif #endif
4,872
C
25.483696
114
0.721059
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxAllocatorCallback.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_ALLOCATOR_CALLBACK_H #define PX_ALLOCATOR_CALLBACK_H /** \addtogroup foundation @{ */ #include "foundation/PxFoundationConfig.h" #include "foundation/Px.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief Abstract base class for an application defined memory allocator that can be used by the Nv library. \note The SDK state should not be modified from within any allocation/free function. <b>Threading:</b> All methods of this class should be thread safe as it can be called from the user thread or the physics processing thread(s). */ class PxAllocatorCallback { public: virtual ~PxAllocatorCallback() { } /** \brief Allocates size bytes of memory, which must be 16-byte aligned. This method should never return NULL. If you run out of memory, then you should terminate the app or take some other appropriate action. <b>Threading:</b> This function should be thread safe as it can be called in the context of the user thread and physics processing thread(s). \param size Number of bytes to allocate. \param typeName Name of the datatype that is being allocated \param filename The source file which allocated the memory \param line The source line which allocated the memory \return The allocated block of memory. */ virtual void* allocate(size_t size, const char* typeName, const char* filename, int line) = 0; /** \brief Frees memory previously allocated by allocate(). <b>Threading:</b> This function should be thread safe as it can be called in the context of the user thread and physics processing thread(s). \param ptr Memory to free. */ virtual void deallocate(void* ptr) = 0; }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
3,412
C
34.926315
108
0.754103
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxIO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_IO_H #define PX_IO_H /** \addtogroup common @{ */ #include "foundation/PxSimpleTypes.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief Input stream class for I/O. The user needs to supply a PxInputStream implementation to a number of methods to allow the SDK to read data. */ class PxInputStream { public: /** \brief read from the stream. The number of bytes read may be less than the number requested. \param[in] dest the destination address to which the data will be read \param[in] count the number of bytes requested \return the number of bytes read from the stream. */ virtual uint32_t read(void* dest, uint32_t count) = 0; virtual ~PxInputStream() { } }; /** \brief Input data class for I/O which provides random read access. The user needs to supply a PxInputData implementation to a number of methods to allow the SDK to read data. */ class PxInputData : public PxInputStream { public: /** \brief return the length of the input data \return size in bytes of the input data */ virtual uint32_t getLength() const = 0; /** \brief seek to the given offset from the start of the data. \param[in] offset the offset to seek to. If greater than the length of the data, this call is equivalent to seek(length); */ virtual void seek(uint32_t offset) = 0; /** \brief return the current offset from the start of the data \return the offset to seek to. */ virtual uint32_t tell() const = 0; virtual ~PxInputData() { } }; /** \brief Output stream class for I/O. The user needs to supply a PxOutputStream implementation to a number of methods to allow the SDK to write data. */ class PxOutputStream { public: /** \brief write to the stream. The number of bytes written may be less than the number sent. \param[in] src the destination address from which the data will be written \param[in] count the number of bytes to be written \return the number of bytes written to the stream by this call. */ virtual uint32_t write(const void* src, uint32_t count) = 0; virtual ~PxOutputStream() { } }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
3,836
C
26.604316
111
0.735401
NVIDIA-Omniverse/PhysX/physx/include/foundation/Px.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_H #define PX_H /** \addtogroup foundation @{ */ #include "foundation/PxSimpleTypes.h" /** files to always include */ #include <string.h> #include <stdlib.h> #if !PX_DOXYGEN namespace physx { #endif typedef uint32_t PxU32; class PxAllocatorCallback; class PxErrorCallback; struct PxErrorCode; class PxInputStream; class PxInputData; class PxOutputStream; template<class Type> class PxVec2T; typedef PxVec2T<float> PxVec2; template<class Type> class PxVec3T; typedef PxVec3T<float> PxVec3; template<class Type> class PxVec4T; typedef PxVec4T<float> PxVec4; template<class Type> class PxQuatT; typedef PxQuatT<float> PxQuat; template<class Type> class PxMat33T; typedef PxMat33T<float> PxMat33; template<class Type> class PxMat34T; typedef PxMat34T<float> PxMat34; template<class Type> class PxMat44T; typedef PxMat44T<float> PxMat44; template<class Type> class PxTransformT; typedef PxTransformT<float> PxTransform; class PxPlane; class PxBounds3; /** enum for empty constructor tag*/ enum PxEMPTY { PxEmpty }; /** enum for zero constructor tag for vectors and matrices */ enum PxZERO { PxZero }; /** enum for identity constructor flag for quaternions, transforms, and matrices */ enum PxIDENTITY { PxIdentity }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
3,004
C
26.568807
83
0.763316
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxPhysicsVersion.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_PHYSICS_VERSION_H #define PX_PHYSICS_VERSION_H /* VersionNumbers: The combination of these numbers uniquely identifies the API, and should be incremented when the SDK API changes. This may include changes to file formats. This header is included in the main SDK header files so that the entire SDK and everything that builds on it is completely rebuilt when this file changes. Thus, this file is not to include a frequently changing build number. See BuildNumber.h for that. Each of these three values should stay below 255 because sometimes they are stored in a byte. */ /** \addtogroup foundation @{ */ #define PX_PHYSICS_VERSION_MAJOR 5 #define PX_PHYSICS_VERSION_MINOR 3 #define PX_PHYSICS_VERSION_BUGFIX 1 /** The constant PX_PHYSICS_VERSION is used when creating certain PhysX module objects. This is to ensure that the application is using the same header version as the library was built with. */ #define PX_PHYSICS_VERSION ((PX_PHYSICS_VERSION_MAJOR<<24) + (PX_PHYSICS_VERSION_MINOR<<16) + (PX_PHYSICS_VERSION_BUGFIX<<8) + 0) #endif /** @} */
2,775
C
42.374999
129
0.765405
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxMathIntrinsics.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_MATH_INTRINSICS_H #define PX_MATH_INTRINSICS_H #include "foundation/PxPreprocessor.h" #if PX_WINDOWS_FAMILY #include "foundation/windows/PxWindowsMathIntrinsics.h" #elif(PX_LINUX || PX_APPLE_FAMILY) #include "foundation/unix/PxUnixMathIntrinsics.h" #elif PX_SWITCH #include "foundation/switch/PxSwitchMathIntrinsics.h" #else #error "Platform not supported!" #endif /** Platform specific defines */ #if PX_WINDOWS_FAMILY #pragma intrinsic(abs) #pragma intrinsic(labs) #endif #endif
2,193
C
40.396226
74
0.770178
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxProfiler.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #ifndef PX_PROFILER_H #define PX_PROFILER_H #include "foundation/Px.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief The pure virtual callback interface for general purpose instrumentation and profiling of GameWorks modules as well as applications */ class PxProfilerCallback { protected: virtual ~PxProfilerCallback() {} public: /************************************************************************************************************************** Instrumented profiling events ***************************************************************************************************************************/ /** \brief Mark the beginning of a nested profile block \param[in] eventName Event name. Must be a persistent const char * \param[in] detached True for cross thread events \param[in] contextId the context id of this zone. Zones with the same id belong to the same group. 0 is used for no specific group. \return Returns implementation-specific profiler data for this event */ virtual void* zoneStart(const char* eventName, bool detached, uint64_t contextId) = 0; /** \brief Mark the end of a nested profile block \param[in] profilerData The data returned by the corresponding zoneStart call (or NULL if not available) \param[in] eventName The name of the zone ending, must match the corresponding name passed with 'zoneStart'. Must be a persistent const char *. \param[in] detached True for cross thread events. Should match the value passed to zoneStart. \param[in] contextId The context of this zone. Should match the value passed to zoneStart. \note eventName plus contextId can be used to uniquely match up start and end of a zone. */ virtual void zoneEnd(void* profilerData, const char* eventName, bool detached, uint64_t contextId) = 0; }; class PxProfileScoped { public: PX_FORCE_INLINE PxProfileScoped(PxProfilerCallback* callback, const char* eventName, bool detached, uint64_t contextId) : mCallback(callback), mProfilerData(NULL) { if(mCallback) { mEventName = eventName; mContextId = contextId; mDetached = detached; mProfilerData = mCallback->zoneStart(eventName, detached, contextId); } } PX_FORCE_INLINE ~PxProfileScoped() { if(mCallback) mCallback->zoneEnd(mProfilerData, mEventName, mDetached, mContextId); } PxProfilerCallback* mCallback; const char* mEventName; void* mProfilerData; uint64_t mContextId; bool mDetached; }; #if !PX_DOXYGEN } // namespace physx #endif #endif
4,065
C
38.096153
163
0.711439
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxVecMathAoSScalar.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_VEC_MATH_AOS_SCALAR_H #define PX_VEC_MATH_AOS_SCALAR_H #if COMPILE_VECTOR_INTRINSICS #error Scalar version should not be included when using vector intrinsics. #endif #if !PX_DOXYGEN namespace physx { #endif namespace aos { struct VecI16V; struct VecU16V; struct VecI32V; struct VecU32V; struct Vec4V; typedef Vec4V QuatV; PX_ALIGN_PREFIX(16) struct FloatV { PxF32 x; PxF32 pad[3]; FloatV() { } FloatV(const PxF32 _x) : x(_x) { } } PX_ALIGN_SUFFIX(16); PX_ALIGN_PREFIX(16) struct Vec4V { PxF32 x, y, z, w; Vec4V() { } Vec4V(const PxF32 _x, const PxF32 _y, const PxF32 _z, const PxF32 _w) : x(_x), y(_y), z(_z), w(_w) { } } PX_ALIGN_SUFFIX(16); PX_ALIGN_PREFIX(16) struct Vec3V { PxF32 x, y, z; PxF32 pad; Vec3V() { } Vec3V(const PxF32 _x, const PxF32 _y, const PxF32 _z) : x(_x), y(_y), z(_z), pad(0.0f) { } } PX_ALIGN_SUFFIX(16); PX_ALIGN_PREFIX(16) struct BoolV { PxU32 ux, uy, uz, uw; BoolV() { } BoolV(const PxU32 _x, const PxU32 _y, const PxU32 _z, const PxU32 _w) : ux(_x), uy(_y), uz(_z), uw(_w) { } } PX_ALIGN_SUFFIX(16); struct Mat33V { Mat33V() { } Mat33V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2) : col0(c0), col1(c1), col2(c2) { } Vec3V col0; Vec3V col1; Vec3V col2; }; struct Mat34V { Mat34V() { } Mat34V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2, const Vec3V& c3) : col0(c0), col1(c1), col2(c2), col3(c3) { } Vec3V col0; Vec3V col1; Vec3V col2; Vec3V col3; }; struct Mat43V { Mat43V() { } Mat43V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2) : col0(c0), col1(c1), col2(c2) { } Vec4V col0; Vec4V col1; Vec4V col2; }; struct Mat44V { Mat44V() { } Mat44V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2, const Vec4V& c3) : col0(c0), col1(c1), col2(c2), col3(c3) { } Vec4V col0; Vec4V col1; Vec4V col2; Vec4V col3; }; PX_ALIGN_PREFIX(16) struct VecU32V { PxU32 u32[4]; PX_FORCE_INLINE VecU32V() { } PX_FORCE_INLINE VecU32V(PxU32 a, PxU32 b, PxU32 c, PxU32 d) { u32[0] = a; u32[1] = b; u32[2] = c; u32[3] = d; } } PX_ALIGN_SUFFIX(16); PX_ALIGN_PREFIX(16) struct VecI32V { PxI32 i32[4]; PX_FORCE_INLINE VecI32V() { } PX_FORCE_INLINE VecI32V(PxI32 a, PxI32 b, PxI32 c, PxI32 d) { i32[0] = a; i32[1] = b; i32[2] = c; i32[3] = d; } } PX_ALIGN_SUFFIX(16); PX_ALIGN_PREFIX(16) struct VecI16V { PxI16 i16[8]; PX_FORCE_INLINE VecI16V() { } PX_FORCE_INLINE VecI16V(PxI16 a, PxI16 b, PxI16 c, PxI16 d, PxI16 e, PxI16 f, PxI16 g, PxI16 h) { i16[0] = a; i16[1] = b; i16[2] = c; i16[3] = d; i16[4] = e; i16[5] = f; i16[6] = g; i16[7] = h; } } PX_ALIGN_SUFFIX(16); PX_ALIGN_PREFIX(16) struct VecU16V { union { PxU16 u16[8]; PxI16 i16[8]; }; PX_FORCE_INLINE VecU16V() { } PX_FORCE_INLINE VecU16V(PxU16 a, PxU16 b, PxU16 c, PxU16 d, PxU16 e, PxU16 f, PxU16 g, PxU16 h) { u16[0] = a; u16[1] = b; u16[2] = c; u16[3] = d; u16[4] = e; u16[5] = f; u16[6] = g; u16[7] = h; } } PX_ALIGN_SUFFIX(16); #define FloatVArg FloatV & #define Vec3VArg Vec3V & #define Vec4VArg Vec4V & #define BoolVArg BoolV & #define VecU32VArg VecU32V & #define VecI32VArg VecI32V & #define VecU16VArg VecU16V & #define VecI16VArg VecI16V & #define QuatVArg QuatV & #define VecCrossV Vec3V typedef VecI32V VecShiftV; #define VecShiftVArg VecShiftV & } // namespace aos #if !PX_DOXYGEN } // namespace physx #endif #endif
5,084
C
19.178571
116
0.672109
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxSocket.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_SOCKET_H #define PX_SOCKET_H #include "foundation/PxUserAllocated.h" #if !PX_DOXYGEN namespace physx { #endif /** Socket abstraction API */ class PX_FOUNDATION_API PxSocket : public PxUserAllocated { public: static const uint32_t DEFAULT_BUFFER_SIZE; PxSocket(bool inEnableBuffering = true, bool blocking = true); virtual ~PxSocket(); /*! Opens a network socket for input and/or output \param host Name of the host to connect to. This can be an IP, URL, etc \param port The port to connect to on the remote host \param timeout Timeout in ms until the connection must be established. \return True if the connection was successful, false otherwise */ bool connect(const char* host, uint16_t port, uint32_t timeout = 1000); /*! Opens a network socket for input and/or output as a server. Put the connection in listening mode \param port The port on which the socket listens */ bool listen(uint16_t port); /*! Accept a connection on a socket that is in listening mode \note This method only supports a single connection client. Additional clients that connect to the listening port will overwrite the existing socket handle. \param block whether or not the call should block \return whether a connection was established */ bool accept(bool block); /*! Disconnects an open socket */ void disconnect(); /*! Returns whether the socket is currently open (connected) or not. \return True if the socket is connected, false otherwise */ bool isConnected() const; /*! Returns the name of the connected host. This is the same as the string that was supplied to the connect call. \return The name of the connected host */ const char* getHost() const; /*! Returns the port of the connected host. This is the same as the port that was supplied to the connect call. \return The port of the connected host */ uint16_t getPort() const; /*! Flushes the output stream. Until the stream is flushed, there is no guarantee that the written data has actually reached the destination storage. Flush forces all buffered data to be sent to the output. \note flush always blocks. If the socket is in non-blocking mode, this will result the thread spinning. \return True if the flush was successful, false otherwise */ bool flush(); /*! Writes data to the output stream. \param data Pointer to a block of data to write to the stream \param length Amount of data to write, in bytes \return Number of bytes actually written. This could be lower than length if the socket is non-blocking. */ uint32_t write(const uint8_t* data, uint32_t length); /*! Reads data from the output stream. \param data Pointer to a buffer where the read data will be stored. \param length Amount of data to read, in bytes. \return Number of bytes actually read. This could be lower than length if the stream end is encountered or the socket is non-blocking. */ uint32_t read(uint8_t* data, uint32_t length); /*! Sets blocking mode of the socket. Socket must be connected, otherwise calling this method won't take any effect. */ void setBlocking(bool blocking); /*! Returns whether read/write/flush calls to the socket are blocking. \return True if the socket is blocking. */ bool isBlocking() const; private: class SocketImpl* mImpl; }; #if !PX_DOXYGEN } // namespace physx #endif #endif
5,087
C
26.06383
98
0.744643
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxBasicTemplates.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_BASIC_TEMPLATES_H #define PX_BASIC_TEMPLATES_H #include "foundation/PxPreprocessor.h" #if !PX_DOXYGEN namespace physx { #endif template <typename A> struct PxEqual { bool operator()(const A& a, const A& b) const { return a == b; } }; template <typename A> struct PxLess { bool operator()(const A& a, const A& b) const { return a < b; } }; template <typename A> struct PxGreater { bool operator()(const A& a, const A& b) const { return a > b; } }; template <class F, class S> class PxPair { public: F first; S second; PX_CUDA_CALLABLE PX_INLINE PxPair() : first(F()), second(S()) { } PX_CUDA_CALLABLE PX_INLINE PxPair(const F& f, const S& s) : first(f), second(s) { } PX_CUDA_CALLABLE PX_INLINE PxPair(const PxPair& p) : first(p.first), second(p.second) { } PX_CUDA_CALLABLE PX_INLINE PxPair& operator=(const PxPair& p) { first = p.first; second = p.second; return *this; } PX_CUDA_CALLABLE PX_INLINE bool operator==(const PxPair& p) const { return first == p.first && second == p.second; } PX_CUDA_CALLABLE PX_INLINE bool operator<(const PxPair& p) const { if (first < p.first) return true; else return !(p.first < first) && (second < p.second); } }; template <unsigned int A> struct PxLogTwo { static const unsigned int value = PxLogTwo<(A >> 1)>::value + 1; }; template <> struct PxLogTwo<1> { static const unsigned int value = 0; }; template <typename T> struct PxUnConst { typedef T Type; }; template <typename T> struct PxUnConst<const T> { typedef T Type; }; template <typename T> T PxPointerOffset(void* p, ptrdiff_t offset) { return reinterpret_cast<T>(reinterpret_cast<char*>(p) + offset); } template <typename T> T PxPointerOffset(const void* p, ptrdiff_t offset) { return reinterpret_cast<T>(reinterpret_cast<const char*>(p) + offset); } template <class T> PX_CUDA_CALLABLE PX_INLINE void PxSwap(T& x, T& y) { const T tmp = x; x = y; y = tmp; } #if !PX_DOXYGEN } // namespace physx #endif #endif
3,782
C
24.910959
87
0.695664
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxPreprocessor.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_PREPROCESSOR_H #define PX_PREPROCESSOR_H #include <stddef.h> /** \addtogroup foundation @{ */ #ifndef PX_ENABLE_FEATURES_UNDER_CONSTRUCTION #define PX_ENABLE_FEATURES_UNDER_CONSTRUCTION 0 #endif #define PX_STRINGIZE_HELPER(X) #X #define PX_STRINGIZE(X) PX_STRINGIZE_HELPER(X) #define PX_CONCAT_HELPER(X, Y) X##Y #define PX_CONCAT(X, Y) PX_CONCAT_HELPER(X, Y) /* The following preprocessor identifiers specify compiler, OS, and architecture. All definitions have a value of 1 or 0, use '#if' instead of '#ifdef'. */ /** Compiler defines, see http://sourceforge.net/p/predef/wiki/Compilers/ */ #if defined(_MSC_VER) #if _MSC_VER >= 1920 #define PX_VC 16 #elif _MSC_VER >= 1910 #define PX_VC 15 #elif _MSC_VER >= 1900 #define PX_VC 14 #elif _MSC_VER >= 1800 #define PX_VC 12 #elif _MSC_VER >= 1700 #define PX_VC 11 #elif _MSC_VER >= 1600 #define PX_VC 10 #elif _MSC_VER >= 1500 #define PX_VC 9 #else #error "Unknown VC version" #endif #elif defined(__clang__) #define PX_CLANG 1 #if defined (__clang_major__) #define PX_CLANG_MAJOR __clang_major__ #elif defined (_clang_major) #define PX_CLANG_MAJOR _clang_major #else #define PX_CLANG_MAJOR 0 #endif #elif defined(__GNUC__) // note: __clang__ implies __GNUC__ #define PX_GCC 1 #else #error "Unknown compiler" #endif /** Operating system defines, see http://sourceforge.net/p/predef/wiki/OperatingSystems/ */ #if defined(_WIN64) #define PX_WIN64 1 #elif defined(_WIN32) // note: _M_PPC implies _WIN32 #define PX_WIN32 1 #elif defined(__linux__) || defined (__EMSCRIPTEN__) #define PX_LINUX 1 #elif defined(__APPLE__) #define PX_OSX 1 #elif defined(__NX__) #define PX_SWITCH 1 #else #error "Unknown operating system" #endif /** Architecture defines, see http://sourceforge.net/p/predef/wiki/Architectures/ */ #if defined(__x86_64__) || defined(_M_X64) #define PX_X64 1 #elif defined(__i386__) || defined(_M_IX86) || defined (__EMSCRIPTEN__) #define PX_X86 1 #elif defined(__arm64__) || defined(__aarch64__) || defined(_M_ARM64) #define PX_A64 1 #elif defined(__arm__) || defined(_M_ARM) #define PX_ARM 1 #elif defined(__ppc__) || defined(_M_PPC) || defined(__CELLOS_LV2__) #define PX_PPC 1 #else #error "Unknown architecture" #endif /** SIMD defines */ #if !defined(PX_SIMD_DISABLED) #if defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) || (defined (__EMSCRIPTEN__) && defined(__SSE2__)) #define PX_SSE2 1 #endif #if defined(_M_ARM) || defined(__ARM_NEON__) || defined(__ARM_NEON) #define PX_NEON 1 #endif #if defined(_M_PPC) || defined(__CELLOS_LV2__) #define PX_VMX 1 #endif #endif /** define anything not defined on this platform to 0 */ #ifndef PX_VC #define PX_VC 0 #endif #ifndef PX_CLANG #define PX_CLANG 0 #endif #ifndef PX_GCC #define PX_GCC 0 #endif #ifndef PX_WIN64 #define PX_WIN64 0 #endif #ifndef PX_WIN32 #define PX_WIN32 0 #endif #ifndef PX_LINUX #define PX_LINUX 0 #endif #ifndef PX_OSX #define PX_OSX 0 #endif #ifndef PX_SWITCH #define PX_SWITCH 0 #endif #ifndef PX_X64 #define PX_X64 0 #endif #ifndef PX_X86 #define PX_X86 0 #endif #ifndef PX_A64 #define PX_A64 0 #endif #ifndef PX_ARM #define PX_ARM 0 #endif #ifndef PX_PPC #define PX_PPC 0 #endif #ifndef PX_SSE2 #define PX_SSE2 0 #endif #ifndef PX_NEON #define PX_NEON 0 #endif #ifndef PX_VMX #define PX_VMX 0 #endif /* define anything not defined through the command line to 0 */ #ifndef PX_DEBUG #define PX_DEBUG 0 #endif #ifndef PX_CHECKED #define PX_CHECKED 0 #endif #ifndef PX_PROFILE #define PX_PROFILE 0 #endif #ifndef PX_DEBUG_CRT #define PX_DEBUG_CRT 0 #endif #ifndef PX_NVTX #define PX_NVTX 0 #endif #ifndef PX_DOXYGEN #define PX_DOXYGEN 0 #endif /** family shortcuts */ // compiler #define PX_GCC_FAMILY (PX_CLANG || PX_GCC) // os #define PX_WINDOWS_FAMILY (PX_WIN32 || PX_WIN64) #define PX_LINUX_FAMILY PX_LINUX #define PX_APPLE_FAMILY PX_OSX // equivalent to #if __APPLE__ #define PX_UNIX_FAMILY (PX_LINUX_FAMILY || PX_APPLE_FAMILY) // shortcut for unix/posix platforms #if defined(__EMSCRIPTEN__) #define PX_EMSCRIPTEN 1 #else #define PX_EMSCRIPTEN 0 #endif // architecture #define PX_INTEL_FAMILY (PX_X64 || PX_X86) #define PX_ARM_FAMILY (PX_ARM || PX_A64) #define PX_P64_FAMILY (PX_X64 || PX_A64) // shortcut for 64-bit architectures /** C++ standard library defines */ #if defined(_LIBCPP_VERSION) || PX_WIN64 || PX_WIN32 || PX_EMSCRIPTEN #define PX_LIBCPP 1 #else #define PX_LIBCPP 0 #endif // legacy define for PhysX #define PX_WINDOWS (PX_WINDOWS_FAMILY && !PX_ARM_FAMILY) /** Assert macro */ #ifndef PX_ENABLE_ASSERTS #if PX_DEBUG && !defined(__CUDACC__) #define PX_ENABLE_ASSERTS 1 #else #define PX_ENABLE_ASSERTS 0 #endif #endif /** DLL export macros */ #ifndef PX_C_EXPORT #if PX_WINDOWS_FAMILY || PX_LINUX #define PX_C_EXPORT extern "C" #else #define PX_C_EXPORT #endif #endif #if PX_UNIX_FAMILY&& __GNUC__ >= 4 #define PX_UNIX_EXPORT __attribute__((visibility("default"))) #else #define PX_UNIX_EXPORT #endif #if PX_WINDOWS_FAMILY #define PX_DLL_EXPORT __declspec(dllexport) #define PX_DLL_IMPORT __declspec(dllimport) #else #define PX_DLL_EXPORT PX_UNIX_EXPORT #define PX_DLL_IMPORT #endif /** Calling convention */ #ifndef PX_CALL_CONV #if PX_WINDOWS_FAMILY #define PX_CALL_CONV __cdecl #else #define PX_CALL_CONV #endif #endif /** Pack macros - disabled on SPU because they are not supported */ #if PX_VC #define PX_PUSH_PACK_DEFAULT __pragma(pack(push, 8)) #define PX_POP_PACK __pragma(pack(pop)) #elif PX_GCC_FAMILY #define PX_PUSH_PACK_DEFAULT _Pragma("pack(push, 8)") #define PX_POP_PACK _Pragma("pack(pop)") #else #define PX_PUSH_PACK_DEFAULT #define PX_POP_PACK #endif /** Inline macro */ #define PX_INLINE inline #if PX_WINDOWS_FAMILY #pragma inline_depth(255) #endif /** Force inline macro */ #if PX_VC #define PX_FORCE_INLINE __forceinline #elif PX_LINUX // Workaround; Fedora Core 3 do not agree with force inline and PxcPool #define PX_FORCE_INLINE inline #elif PX_GCC_FAMILY #define PX_FORCE_INLINE inline __attribute__((always_inline)) #else #define PX_FORCE_INLINE inline #endif /** Noinline macro */ #if PX_WINDOWS_FAMILY #define PX_NOINLINE __declspec(noinline) #elif PX_GCC_FAMILY #define PX_NOINLINE __attribute__((noinline)) #else #define PX_NOINLINE #endif /** Restrict macro */ #if defined(__CUDACC__) #define PX_RESTRICT __restrict__ #else #define PX_RESTRICT __restrict #endif /** Noalias macro */ #if PX_WINDOWS_FAMILY #define PX_NOALIAS __declspec(noalias) #else #define PX_NOALIAS #endif /** Override macro */ #if PX_WINDOWS_FAMILY #define PX_OVERRIDE override #else // PT: we don't really need to support it on all platforms, as long as // we compile the code on at least one platform that supports it. #define PX_OVERRIDE #endif /** Final macro */ #define PX_FINAL final /** Unused attribute macro. Only on GCC for now. */ #if PX_GCC_FAMILY #define PX_UNUSED_ATTRIBUTE __attribute__((unused)) #else #define PX_UNUSED_ATTRIBUTE #endif /** Alignment macros PX_ALIGN_PREFIX and PX_ALIGN_SUFFIX can be used for type alignment instead of aligning individual variables as follows: PX_ALIGN_PREFIX(16) struct A { ... } PX_ALIGN_SUFFIX(16); This declaration style is parsed correctly by Visual Assist. */ #ifndef PX_ALIGN #if PX_WINDOWS_FAMILY #define PX_ALIGN(alignment, decl) __declspec(align(alignment)) decl #define PX_ALIGN_PREFIX(alignment) __declspec(align(alignment)) #define PX_ALIGN_SUFFIX(alignment) #elif PX_GCC_FAMILY #define PX_ALIGN(alignment, decl) decl __attribute__((aligned(alignment))) #define PX_ALIGN_PREFIX(alignment) #define PX_ALIGN_SUFFIX(alignment) __attribute__((aligned(alignment))) #elif defined __CUDACC__ #define PX_ALIGN(alignment, decl) __align__(alignment) decl #define PX_ALIGN_PREFIX(alignment) #define PX_ALIGN_SUFFIX(alignment) __align__(alignment)) #else #define PX_ALIGN(alignment, decl) #define PX_ALIGN_PREFIX(alignment) #define PX_ALIGN_SUFFIX(alignment) #endif #endif /** Deprecated macro - To deprecate a function: Place PX_DEPRECATED at the start of the function header (leftmost word). - To deprecate a 'typedef', a 'struct' or a 'class': Place PX_DEPRECATED directly after the keywords ('typedef', 'struct', 'class'). Use these macro definitions to create warnings for deprecated functions \#define PX_DEPRECATED __declspec(deprecated) // Microsoft \#define PX_DEPRECATED __attribute__((deprecated())) // GCC */ #define PX_DEPRECATED /** General defines */ #if PX_LINUX && PX_CLANG && !(defined __CUDACC__) #define PX_COMPILE_TIME_ASSERT(exp) \ _Pragma(" clang diagnostic push") \ _Pragma(" clang diagnostic ignored \"-Wc++98-compat\"") \ static_assert(exp, "") \ _Pragma(" clang diagnostic pop") #else #define PX_COMPILE_TIME_ASSERT(exp) static_assert(exp, "") #endif #if PX_GCC_FAMILY #define PX_OFFSET_OF(X, Y) __builtin_offsetof(X, Y) #else #define PX_OFFSET_OF(X, Y) offsetof(X, Y) #endif #define PX_OFFSETOF_BASE 0x100 // casting the null ptr takes a special-case code path, which we don't want #define PX_OFFSET_OF_RT(Class, Member) (reinterpret_cast<size_t>(&reinterpret_cast<Class*>(PX_OFFSETOF_BASE)->Member) - size_t(PX_OFFSETOF_BASE)) #if PX_WINDOWS_FAMILY // check that exactly one of NDEBUG and _DEBUG is defined #if !defined(NDEBUG) ^ defined(_DEBUG) #error Exactly one of NDEBUG and _DEBUG needs to be defined! #endif #endif // make sure PX_CHECKED is defined in all _DEBUG configurations as well #if !PX_CHECKED && PX_DEBUG #error PX_CHECKED must be defined when PX_DEBUG is defined #endif #ifdef __CUDACC__ #define PX_CUDA_CALLABLE __host__ __device__ #else #define PX_CUDA_CALLABLE #endif // avoid unreferenced parameter warning // preferred solution: omit the parameter's name from the declaration template <class T> PX_CUDA_CALLABLE PX_INLINE void PX_UNUSED(T const&) { } // Ensure that the application hasn't tweaked the pack value to less than 8, which would break // matching between the API headers and the binaries // This assert works on win32/win64, but may need further specialization on other platforms. // Some GCC compilers need the compiler flag -malign-double to be set. // Apparently the apple-clang-llvm compiler doesn't support malign-double. #if PX_APPLE_FAMILY || (PX_CLANG && !PX_ARM) struct PxPackValidation { char _; long a; }; #elif PX_CLANG && PX_ARM struct PxPackValidation { char _; double a; }; #else struct PxPackValidation { char _; long long a; }; #endif // clang (as of version 3.9) cannot align doubles on 8 byte boundary when compiling for Intel 32 bit target #if !PX_APPLE_FAMILY && !PX_EMSCRIPTEN && !(PX_CLANG && PX_X86) PX_COMPILE_TIME_ASSERT(PX_OFFSET_OF(PxPackValidation, a) == 8); #endif // use in a cpp file to suppress LNK4221 #if PX_VC #define PX_DUMMY_SYMBOL \ namespace \ { \ char PxDummySymbol; \ } #else #define PX_DUMMY_SYMBOL #endif #if PX_GCC_FAMILY #define PX_WEAK_SYMBOL __attribute__((weak)) // this is to support SIMD constant merging in template specialization #else #define PX_WEAK_SYMBOL #endif // Macro for avoiding default assignment and copy, because doing this by inheritance can increase class size on some // platforms. #define PX_NOCOPY(Class) \ protected: \ Class(const Class&); \ Class& operator=(const Class&); //#define DISABLE_CUDA_PHYSX #ifndef DISABLE_CUDA_PHYSX //CUDA is currently supported on x86_64 windows and linux, and ARM_64 linux #define PX_SUPPORT_GPU_PHYSX ((PX_X64 && (PX_WINDOWS_FAMILY || PX_LINUX)) || (PX_A64 && PX_LINUX)) #else #define PX_SUPPORT_GPU_PHYSX 0 #endif #ifndef PX_SUPPORT_EXTERN_TEMPLATE #define PX_SUPPORT_EXTERN_TEMPLATE (PX_VC != 11) #else #define PX_SUPPORT_EXTERN_TEMPLATE 0 #endif #define PX_FL __FILE__, __LINE__ /** @} */ #endif
13,549
C
23.908088
145
0.708466
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxAlignedMalloc.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_ALIGNED_MALLOC_H #define PX_ALIGNED_MALLOC_H #include "PxUserAllocated.h" /*! Allocate aligned memory. Alignment must be a power of 2! -- should be templated by a base allocator */ #if !PX_DOXYGEN namespace physx { #endif /** Allocator, which is used to access the global PxAllocatorCallback instance (used for dynamic data types template instantiation), which can align memory */ // SCS: AlignedMalloc with 3 params not found, seems not used on PC either // disabled for now to avoid GCC error template <uint32_t N, typename BaseAllocator = PxAllocator> class PxAlignedAllocator : public BaseAllocator { public: PxAlignedAllocator(const BaseAllocator& base = BaseAllocator()) : BaseAllocator(base) { } void* allocate(size_t size, const char* file, int line) { size_t pad = N - 1 + sizeof(size_t); // store offset for delete. uint8_t* base = reinterpret_cast<uint8_t*>(BaseAllocator::allocate(size + pad, file, line)); if (!base) return NULL; uint8_t* ptr = reinterpret_cast<uint8_t*>(size_t(base + pad) & ~(size_t(N) - 1)); // aligned pointer, ensuring N // is a size_t // wide mask reinterpret_cast<size_t*>(ptr)[-1] = size_t(ptr - base); // store offset return ptr; } void deallocate(void* ptr) { if (ptr == NULL) return; uint8_t* base = reinterpret_cast<uint8_t*>(ptr) - reinterpret_cast<size_t*>(ptr)[-1]; BaseAllocator::deallocate(base); } }; #if !PX_DOXYGEN } // namespace physx #endif #endif
3,233
C
34.933333
115
0.7176
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxAtomic.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_ATOMIC_H #define PX_ATOMIC_H #include "foundation/PxFoundationConfig.h" #include "foundation/PxSimpleTypes.h" #if !PX_DOXYGEN namespace physx { #endif /* set *dest equal to val. Return the old value of *dest */ PX_FOUNDATION_API PxI32 PxAtomicExchange(volatile PxI32* dest, PxI32 val); /* if *dest == comp, replace with exch. Return original value of *dest */ PX_FOUNDATION_API PxI32 PxAtomicCompareExchange(volatile PxI32* dest, PxI32 exch, PxI32 comp); /* if *dest == comp, replace with exch. Return original value of *dest */ PX_FOUNDATION_API void* PxAtomicCompareExchangePointer(volatile void** dest, void* exch, void* comp); /* increment the specified location. Return the incremented value */ PX_FOUNDATION_API PxI32 PxAtomicIncrement(volatile PxI32* val); /* decrement the specified location. Return the decremented value */ PX_FOUNDATION_API PxI32 PxAtomicDecrement(volatile PxI32* val); /* add delta to *val. Return the new value */ PX_FOUNDATION_API PxI32 PxAtomicAdd(volatile PxI32* val, PxI32 delta); /* compute the maximum of dest and val. Return the new value */ PX_FOUNDATION_API PxI32 PxAtomicMax(volatile PxI32* val, PxI32 val2); #if !PX_DOXYGEN } // namespace physx #endif #endif
2,917
C
43.212121
101
0.763113
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxString.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_STRING_H #define PX_STRING_H #include "foundation/PxPreprocessor.h" #include "foundation/PxSimpleTypes.h" #include "foundation/PxFoundationConfig.h" #include <stdarg.h> #if !PX_DOXYGEN namespace physx { #endif // the following functions have C99 semantics. Note that C99 requires for snprintf and vsnprintf: // * the resulting string is always NULL-terminated regardless of truncation. // * in the case of truncation the return value is the number of characters that would have been created. PX_FOUNDATION_API int32_t Pxsscanf(const char* buffer, const char* format, ...); PX_FOUNDATION_API int32_t Pxstrcmp(const char* str1, const char* str2); PX_FOUNDATION_API int32_t Pxstrncmp(const char* str1, const char* str2, size_t count); PX_FOUNDATION_API int32_t Pxsnprintf(char* dst, size_t dstSize, const char* format, ...); PX_FOUNDATION_API int32_t Pxvsnprintf(char* dst, size_t dstSize, const char* src, va_list arg); // strlcat and strlcpy have BSD semantics: // * dstSize is always the size of the destination buffer // * the resulting string is always NULL-terminated regardless of truncation // * in the case of truncation the return value is the length of the string that would have been created PX_FOUNDATION_API size_t Pxstrlcat(char* dst, size_t dstSize, const char* src); PX_FOUNDATION_API size_t Pxstrlcpy(char* dst, size_t dstSize, const char* src); // case-insensitive string comparison PX_FOUNDATION_API int32_t Pxstricmp(const char* str1, const char* str2); PX_FOUNDATION_API int32_t Pxstrnicmp(const char* str1, const char* str2, size_t count); // in-place string case conversion PX_FOUNDATION_API void Pxstrlwr(char* str); PX_FOUNDATION_API void Pxstrupr(char* str); /** \brief Prints the string literally (does not consume % specifier), trying to make sure it's visible to the app programmer */ PX_FOUNDATION_API void PxPrintString(const char*); #if !PX_DOXYGEN } // namespace physx #endif #endif
3,633
C
44.424999
110
0.763832
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxHashInternals.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_HASH_INTERNALS_H #define PX_HASH_INTERNALS_H #include "foundation/PxAllocator.h" #include "foundation/PxBitUtils.h" #include "foundation/PxMathIntrinsics.h" #include "foundation/PxBasicTemplates.h" #include "foundation/PxHash.h" #if PX_VC #pragma warning(push) #pragma warning(disable : 4127) // conditional expression is constant #endif #if !PX_DOXYGEN namespace physx { #endif template <class Entry, class Key, class HashFn, class GetKey, class PxAllocator, bool compacting> class PxHashBase : private PxAllocator { void init(uint32_t initialTableSize, float loadFactor) { mBuffer = NULL; mEntries = NULL; mEntriesNext = NULL; mHash = NULL; mEntriesCapacity = 0; mHashSize = 0; mLoadFactor = loadFactor; mFreeList = uint32_t(EOL); mTimestamp = 0; mEntriesCount = 0; if(initialTableSize) reserveInternal(initialTableSize); } public: typedef Entry EntryType; PxHashBase(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : PxAllocator("hashBase") { init(initialTableSize, loadFactor); } PxHashBase(uint32_t initialTableSize, float loadFactor, const PxAllocator& alloc) : PxAllocator(alloc) { init(initialTableSize, loadFactor); } PxHashBase(const PxAllocator& alloc) : PxAllocator(alloc) { init(64, 0.75f); } ~PxHashBase() { destroy(); // No need to clear() if(mBuffer) PxAllocator::deallocate(mBuffer); } static const uint32_t EOL = 0xffffffff; PX_INLINE Entry* create(const Key& k, bool& exists) { uint32_t h = 0; if(mHashSize) { h = hash(k); uint32_t index = mHash[h]; while(index != EOL && !HashFn().equal(GetKey()(mEntries[index]), k)) index = mEntriesNext[index]; exists = index != EOL; if(exists) return mEntries + index; } else exists = false; if(freeListEmpty()) { grow(); h = hash(k); } uint32_t entryIndex = freeListGetNext(); mEntriesNext[entryIndex] = mHash[h]; mHash[h] = entryIndex; mEntriesCount++; mTimestamp++; return mEntries + entryIndex; } PX_INLINE const Entry* find(const Key& k) const { if(!mEntriesCount) return NULL; const uint32_t h = hash(k); uint32_t index = mHash[h]; while(index != EOL && !HashFn().equal(GetKey()(mEntries[index]), k)) index = mEntriesNext[index]; return index != EOL ? mEntries + index : NULL; } PX_INLINE bool erase(const Key& k, Entry& e) { if(!mEntriesCount) return false; const uint32_t h = hash(k); uint32_t* ptr = mHash + h; while(*ptr != EOL && !HashFn().equal(GetKey()(mEntries[*ptr]), k)) ptr = mEntriesNext + *ptr; if(*ptr == EOL) return false; PX_PLACEMENT_NEW(&e, Entry)(mEntries[*ptr]); return eraseInternal(ptr); } PX_INLINE bool erase(const Key& k) { if(!mEntriesCount) return false; const uint32_t h = hash(k); uint32_t* ptr = mHash + h; while(*ptr != EOL && !HashFn().equal(GetKey()(mEntries[*ptr]), k)) ptr = mEntriesNext + *ptr; if(*ptr == EOL) return false; return eraseInternal(ptr); } PX_INLINE uint32_t size() const { return mEntriesCount; } PX_INLINE uint32_t capacity() const { return mHashSize; } void clear() { if(!mHashSize || mEntriesCount == 0) return; destroy(); intrinsics::memSet(mHash, EOL, mHashSize * sizeof(uint32_t)); const uint32_t sizeMinus1 = mEntriesCapacity - 1; for(uint32_t i = 0; i < sizeMinus1; i++) { PxPrefetchLine(mEntriesNext + i, 128); mEntriesNext[i] = i + 1; } mEntriesNext[mEntriesCapacity - 1] = uint32_t(EOL); mFreeList = 0; mEntriesCount = 0; } void reserve(uint32_t size) { if(size > mHashSize) reserveInternal(size); } PX_INLINE const Entry* getEntries() const { return mEntries; } PX_INLINE Entry* insertUnique(const Key& k) { PX_ASSERT(find(k) == NULL); uint32_t h = hash(k); uint32_t entryIndex = freeListGetNext(); mEntriesNext[entryIndex] = mHash[h]; mHash[h] = entryIndex; mEntriesCount++; mTimestamp++; return mEntries + entryIndex; } private: void destroy() { for(uint32_t i = 0; i < mHashSize; i++) { for(uint32_t j = mHash[i]; j != EOL; j = mEntriesNext[j]) mEntries[j].~Entry(); } } template <typename HK, typename GK, class A, bool comp> PX_NOINLINE void copy(const PxHashBase<Entry, Key, HK, GK, A, comp>& other); // free list management - if we're coalescing, then we use mFreeList to hold // the top of the free list and it should always be equal to size(). Otherwise, // we build a free list in the next() pointers. PX_INLINE void freeListAdd(uint32_t index) { if(compacting) { mFreeList--; PX_ASSERT(mFreeList == mEntriesCount); } else { mEntriesNext[index] = mFreeList; mFreeList = index; } } PX_INLINE void freeListAdd(uint32_t start, uint32_t end) { if(!compacting) { for(uint32_t i = start; i < end - 1; i++) // add the new entries to the free list mEntriesNext[i] = i + 1; // link in old free list mEntriesNext[end - 1] = mFreeList; PX_ASSERT(mFreeList != end - 1); mFreeList = start; } else if(mFreeList == EOL) // don't reset the free ptr for the compacting hash unless it's empty mFreeList = start; } PX_INLINE uint32_t freeListGetNext() { PX_ASSERT(!freeListEmpty()); if(compacting) { PX_ASSERT(mFreeList == mEntriesCount); return mFreeList++; } else { uint32_t entryIndex = mFreeList; mFreeList = mEntriesNext[mFreeList]; return entryIndex; } } PX_INLINE bool freeListEmpty() const { if(compacting) return mEntriesCount == mEntriesCapacity; else return mFreeList == EOL; } PX_INLINE void replaceWithLast(uint32_t index) { PX_PLACEMENT_NEW(mEntries + index, Entry)(mEntries[mEntriesCount]); mEntries[mEntriesCount].~Entry(); mEntriesNext[index] = mEntriesNext[mEntriesCount]; uint32_t h = hash(GetKey()(mEntries[index])); uint32_t* ptr; for(ptr = mHash + h; *ptr != mEntriesCount; ptr = mEntriesNext + *ptr) PX_ASSERT(*ptr != EOL); *ptr = index; } PX_INLINE uint32_t hash(const Key& k, uint32_t hashSize) const { return HashFn()(k) & (hashSize - 1); } PX_INLINE uint32_t hash(const Key& k) const { return hash(k, mHashSize); } PX_INLINE bool eraseInternal(uint32_t* ptr) { const uint32_t index = *ptr; *ptr = mEntriesNext[index]; mEntries[index].~Entry(); mEntriesCount--; mTimestamp++; if (compacting && index != mEntriesCount) replaceWithLast(index); freeListAdd(index); return true; } PX_NOINLINE void reserveInternal(uint32_t size) { if(!PxIsPowerOfTwo(size)) size = PxNextPowerOfTwo(size); PX_ASSERT(!(size & (size - 1))); // decide whether iteration can be done on the entries directly bool resizeCompact = compacting || freeListEmpty(); // define new table sizes uint32_t oldEntriesCapacity = mEntriesCapacity; uint32_t newEntriesCapacity = uint32_t(float(size) * mLoadFactor); uint32_t newHashSize = size; // allocate new common buffer and setup pointers to new tables uint8_t* newBuffer; uint32_t* newHash; uint32_t* newEntriesNext; Entry* newEntries; { uint32_t newHashByteOffset = 0; uint32_t newEntriesNextBytesOffset = newHashByteOffset + newHashSize * sizeof(uint32_t); uint32_t newEntriesByteOffset = newEntriesNextBytesOffset + newEntriesCapacity * sizeof(uint32_t); newEntriesByteOffset += (16 - (newEntriesByteOffset & 15)) & 15; uint32_t newBufferByteSize = newEntriesByteOffset + newEntriesCapacity * sizeof(Entry); newBuffer = reinterpret_cast<uint8_t*>(PxAllocator::allocate(newBufferByteSize, PX_FL)); PX_ASSERT(newBuffer); newHash = reinterpret_cast<uint32_t*>(newBuffer + newHashByteOffset); newEntriesNext = reinterpret_cast<uint32_t*>(newBuffer + newEntriesNextBytesOffset); newEntries = reinterpret_cast<Entry*>(newBuffer + newEntriesByteOffset); } // initialize new hash table intrinsics::memSet(newHash, int32_t(EOL), newHashSize * sizeof(uint32_t)); // iterate over old entries, re-hash and create new entries if(resizeCompact) { // check that old free list is empty - we don't need to copy the next entries PX_ASSERT(compacting || mFreeList == EOL); for(uint32_t index = 0; index < mEntriesCount; ++index) { uint32_t h = hash(GetKey()(mEntries[index]), newHashSize); newEntriesNext[index] = newHash[h]; newHash[h] = index; PX_PLACEMENT_NEW(newEntries + index, Entry)(mEntries[index]); mEntries[index].~Entry(); } } else { // copy old free list, only required for non compact resizing intrinsics::memCopy(newEntriesNext, mEntriesNext, mEntriesCapacity * sizeof(uint32_t)); for(uint32_t bucket = 0; bucket < mHashSize; bucket++) { uint32_t index = mHash[bucket]; while(index != EOL) { uint32_t h = hash(GetKey()(mEntries[index]), newHashSize); newEntriesNext[index] = newHash[h]; PX_ASSERT(index != newHash[h]); newHash[h] = index; PX_PLACEMENT_NEW(newEntries + index, Entry)(mEntries[index]); mEntries[index].~Entry(); index = mEntriesNext[index]; } } } // swap buffer and pointers PxAllocator::deallocate(mBuffer); mBuffer = newBuffer; mHash = newHash; mHashSize = newHashSize; mEntriesNext = newEntriesNext; mEntries = newEntries; mEntriesCapacity = newEntriesCapacity; freeListAdd(oldEntriesCapacity, newEntriesCapacity); } void grow() { PX_ASSERT((mFreeList == EOL) || (compacting && (mEntriesCount == mEntriesCapacity))); uint32_t size = mHashSize == 0 ? 16 : mHashSize * 2; reserve(size); } uint8_t* mBuffer; Entry* mEntries; uint32_t* mEntriesNext; // same size as mEntries uint32_t* mHash; uint32_t mEntriesCapacity; uint32_t mHashSize; float mLoadFactor; uint32_t mFreeList; uint32_t mTimestamp; uint32_t mEntriesCount; // number of entries public: class Iter { public: PX_INLINE Iter(PxHashBase& b) : mBucket(0), mEntry(uint32_t(b.EOL)), mTimestamp(b.mTimestamp), mBase(b) { if(mBase.mEntriesCapacity > 0) { mEntry = mBase.mHash[0]; skip(); } } PX_INLINE void check() const { PX_ASSERT(mTimestamp == mBase.mTimestamp); } PX_INLINE const Entry& operator*() const { check(); return mBase.mEntries[mEntry]; } PX_INLINE Entry& operator*() { check(); return mBase.mEntries[mEntry]; } PX_INLINE const Entry* operator->() const { check(); return mBase.mEntries + mEntry; } PX_INLINE Entry* operator->() { check(); return mBase.mEntries + mEntry; } PX_INLINE Iter operator++() { check(); advance(); return *this; } PX_INLINE Iter operator++(int) { check(); Iter i = *this; advance(); return i; } PX_INLINE bool done() const { check(); return mEntry == mBase.EOL; } private: PX_INLINE void advance() { mEntry = mBase.mEntriesNext[mEntry]; skip(); } PX_INLINE void skip() { while(mEntry == mBase.EOL) { if(++mBucket == mBase.mHashSize) break; mEntry = mBase.mHash[mBucket]; } } Iter& operator=(const Iter&); uint32_t mBucket; uint32_t mEntry; uint32_t mTimestamp; PxHashBase& mBase; }; /*! Iterate over entries in a hash base and allow entry erase while iterating */ class PxEraseIterator { public: PX_INLINE PxEraseIterator(PxHashBase& b): mBase(b) { reset(); } PX_INLINE Entry* eraseCurrentGetNext(bool eraseCurrent) { if(eraseCurrent && mCurrentEntryIndexPtr) { mBase.eraseInternal(mCurrentEntryIndexPtr); // if next was valid return the same ptr, if next was EOL search new hash entry if(*mCurrentEntryIndexPtr != mBase.EOL) return mBase.mEntries + *mCurrentEntryIndexPtr; else return traverseHashEntries(); } // traverse mHash to find next entry if(mCurrentEntryIndexPtr == NULL) return traverseHashEntries(); const uint32_t index = *mCurrentEntryIndexPtr; if(mBase.mEntriesNext[index] == mBase.EOL) { return traverseHashEntries(); } else { mCurrentEntryIndexPtr = mBase.mEntriesNext + index; return mBase.mEntries + *mCurrentEntryIndexPtr; } } PX_INLINE void reset() { mCurrentHashIndex = 0; mCurrentEntryIndexPtr = NULL; } private: PX_INLINE Entry* traverseHashEntries() { mCurrentEntryIndexPtr = NULL; while (mCurrentEntryIndexPtr == NULL && mCurrentHashIndex < mBase.mHashSize) { if (mBase.mHash[mCurrentHashIndex] != mBase.EOL) { mCurrentEntryIndexPtr = mBase.mHash + mCurrentHashIndex; mCurrentHashIndex++; return mBase.mEntries + *mCurrentEntryIndexPtr; } else { mCurrentHashIndex++; } } return NULL; } PxEraseIterator& operator=(const PxEraseIterator&); private: uint32_t* mCurrentEntryIndexPtr; uint32_t mCurrentHashIndex; PxHashBase& mBase; }; }; template <class Entry, class Key, class HashFn, class GetKey, class PxAllocator, bool compacting> template <typename HK, typename GK, class A, bool comp> PX_NOINLINE void PxHashBase<Entry, Key, HashFn, GetKey, PxAllocator, compacting>::copy(const PxHashBase<Entry, Key, HK, GK, A, comp>& other) { reserve(other.mEntriesCount); for(uint32_t i = 0; i < other.mEntriesCount; i++) { for(uint32_t j = other.mHash[i]; j != EOL; j = other.mEntriesNext[j]) { const Entry& otherEntry = other.mEntries[j]; bool exists; Entry* newEntry = create(GK()(otherEntry), exists); PX_ASSERT(!exists); PX_PLACEMENT_NEW(newEntry, Entry)(otherEntry); } } } template <class Key, class HashFn, class PxAllocator = typename PxAllocatorTraits<Key>::Type, bool Coalesced = false> class PxHashSetBase { PX_NOCOPY(PxHashSetBase) public: struct GetKey { PX_INLINE const Key& operator()(const Key& e) { return e; } }; typedef PxHashBase<Key, Key, HashFn, GetKey, PxAllocator, Coalesced> BaseMap; typedef typename BaseMap::Iter Iterator; PxHashSetBase(uint32_t initialTableSize, float loadFactor, const PxAllocator& alloc) : mBase(initialTableSize, loadFactor, alloc) { } PxHashSetBase(const PxAllocator& alloc) : mBase(64, 0.75f, alloc) { } PxHashSetBase(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : mBase(initialTableSize, loadFactor) { } bool insert(const Key& k) { bool exists; Key* e = mBase.create(k, exists); if(!exists) PX_PLACEMENT_NEW(e, Key)(k); return !exists; } PX_INLINE bool contains(const Key& k) const { return mBase.find(k) != 0; } PX_INLINE bool erase(const Key& k) { return mBase.erase(k); } PX_INLINE uint32_t size() const { return mBase.size(); } PX_INLINE uint32_t capacity() const { return mBase.capacity(); } PX_INLINE void reserve(uint32_t size) { mBase.reserve(size); } PX_INLINE void clear() { mBase.clear(); } protected: BaseMap mBase; }; template <class Key, class Value, class HashFn, class PxAllocator = typename PxAllocatorTraits<PxPair<const Key, Value> >::Type> class PxHashMapBase { PX_NOCOPY(PxHashMapBase) public: typedef PxPair<const Key, Value> Entry; struct GetKey { PX_INLINE const Key& operator()(const Entry& e) { return e.first; } }; typedef PxHashBase<Entry, Key, HashFn, GetKey, PxAllocator, true> BaseMap; typedef typename BaseMap::Iter Iterator; typedef typename BaseMap::PxEraseIterator EraseIterator; PxHashMapBase(uint32_t initialTableSize, float loadFactor, const PxAllocator& alloc) : mBase(initialTableSize, loadFactor, alloc) { } PxHashMapBase(const PxAllocator& alloc) : mBase(64, 0.75f, alloc) { } PxHashMapBase(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : mBase(initialTableSize, loadFactor) { } bool insert(const Key /*&*/ k, const Value /*&*/ v) { bool exists; Entry* e = mBase.create(k, exists); if(!exists) PX_PLACEMENT_NEW(e, Entry)(k, v); return !exists; } Value& operator[](const Key& k) { bool exists; Entry* e = mBase.create(k, exists); if(!exists) PX_PLACEMENT_NEW(e, Entry)(k, Value()); return e->second; } PX_INLINE const Entry* find(const Key& k) const { return mBase.find(k); } PX_INLINE bool erase(const Key& k) { return mBase.erase(k); } PX_INLINE bool erase(const Key& k, Entry& e) { return mBase.erase(k, e); } PX_INLINE uint32_t size() const { return mBase.size(); } PX_INLINE uint32_t capacity() const { return mBase.capacity(); } PX_INLINE Iterator getIterator() { return Iterator(mBase); } PX_INLINE EraseIterator getEraseIterator() { return EraseIterator(mBase); } PX_INLINE void reserve(uint32_t size) { mBase.reserve(size); } PX_INLINE void clear() { mBase.clear(); } protected: BaseMap mBase; }; #if !PX_DOXYGEN } // namespace physx #endif #if PX_VC #pragma warning(pop) #endif #endif
18,386
C
22.186633
128
0.681279
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxSync.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_SYNC_H #define PX_SYNC_H #include "foundation/PxAllocator.h" #if !PX_DOXYGEN namespace physx { #endif /*! Implementation notes: * - Calling set() on an already signaled Sync does not change its state. * - Calling reset() on an already reset Sync does not change its state. * - Calling set() on a reset Sync wakes all waiting threads (potential for thread contention). * - Calling wait() on an already signaled Sync will return true immediately. * - NOTE: be careful when pulsing an event with set() followed by reset(), because a * thread that is not waiting on the event will miss the signal. */ class PX_FOUNDATION_API PxSyncImpl { public: static const uint32_t waitForever = 0xffffffff; PxSyncImpl(); ~PxSyncImpl(); /** Wait on the object for at most the given number of ms. Returns * true if the object is signaled. Sync::waitForever will block forever * or until the object is signaled. */ bool wait(uint32_t milliseconds = waitForever); /** Signal the synchronization object, waking all threads waiting on it */ void set(); /** Reset the synchronization object */ void reset(); /** Size of this class. */ static uint32_t getSize(); }; /*! Implementation notes: * - Calling set() on an already signaled Sync does not change its state. * - Calling reset() on an already reset Sync does not change its state. * - Calling set() on a reset Sync wakes all waiting threads (potential for thread contention). * - Calling wait() on an already signaled Sync will return true immediately. * - NOTE: be careful when pulsing an event with set() followed by reset(), because a * thread that is not waiting on the event will miss the signal. */ template <typename Alloc = PxReflectionAllocator<PxSyncImpl> > class PxSyncT : protected Alloc { public: static const uint32_t waitForever = PxSyncImpl::waitForever; PxSyncT(const Alloc& alloc = Alloc()) : Alloc(alloc) { mImpl = reinterpret_cast<PxSyncImpl*>(Alloc::allocate(PxSyncImpl::getSize(), PX_FL)); PX_PLACEMENT_NEW(mImpl, PxSyncImpl)(); } ~PxSyncT() { mImpl->~PxSyncImpl(); Alloc::deallocate(mImpl); } /** Wait on the object for at most the given number of ms. Returns * true if the object is signaled. Sync::waitForever will block forever * or until the object is signaled. */ bool wait(uint32_t milliseconds = PxSyncImpl::waitForever) { return mImpl->wait(milliseconds); } /** Signal the synchronization object, waking all threads waiting on it */ void set() { mImpl->set(); } /** Reset the synchronization object */ void reset() { mImpl->reset(); } private: class PxSyncImpl* mImpl; }; typedef PxSyncT<> PxSync; #if !PX_DOXYGEN } // namespace physx #endif #endif
4,410
C
30.507143
94
0.73356
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxMemory.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_MEMORY_H #define PX_MEMORY_H /** \addtogroup foundation @{ */ #include "foundation/Px.h" #include "foundation/PxMathIntrinsics.h" #include "foundation/PxSimpleTypes.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief Sets the bytes of the provided buffer to zero. \param dest [out] Pointer to block of memory to set zero. \param count [in] Number of bytes to set to zero. \return Pointer to memory block (same as input) */ PX_FORCE_INLINE void* PxMemZero(void* dest, PxU32 count) { return physx::intrinsics::memZero(dest, count); } /** \brief Sets the bytes of the provided buffer to the specified value. \param dest [out] Pointer to block of memory to set to the specified value. \param c [in] Value to set the bytes of the block of memory to. \param count [in] Number of bytes to set to the specified value. \return Pointer to memory block (same as input) */ PX_FORCE_INLINE void* PxMemSet(void* dest, PxI32 c, PxU32 count) { return physx::intrinsics::memSet(dest, c, count); } /** \brief Copies the bytes of one memory block to another. The memory blocks must not overlap. \note Use #PxMemMove if memory blocks overlap. \param dest [out] Pointer to block of memory to copy to. \param src [in] Pointer to block of memory to copy from. \param count [in] Number of bytes to copy. \return Pointer to destination memory block */ PX_FORCE_INLINE void* PxMemCopy(void* dest, const void* src, PxU32 count) { return physx::intrinsics::memCopy(dest, src, count); } /** \brief Copies the bytes of one memory block to another. The memory blocks can overlap. \note Use #PxMemCopy if memory blocks do not overlap. \param dest [out] Pointer to block of memory to copy to. \param src [in] Pointer to block of memory to copy from. \param count [in] Number of bytes to copy. \return Pointer to destination memory block */ PX_FORCE_INLINE void* PxMemMove(void* dest, const void* src, PxU32 count) { return physx::intrinsics::memMove(dest, src, count); } /** Mark a specified amount of memory with 0xcd pattern. This is used to check that the meta data definition for serialized classes is complete in checked builds. \param ptr [out] Pointer to block of memory to initialize. \param byteSize [in] Number of bytes to initialize. */ PX_INLINE void PxMarkSerializedMemory(void* ptr, PxU32 byteSize) { #if PX_CHECKED PxMemSet(ptr, 0xcd, byteSize); #else PX_UNUSED(ptr); PX_UNUSED(byteSize); #endif } #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
4,244
C
32.164062
95
0.736805
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxSortInternals.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_SORT_INTERNALS_H #define PX_SORT_INTERNALS_H /** \addtogroup foundation @{ */ #include "foundation/PxAssert.h" #include "foundation/PxMathIntrinsics.h" #include "foundation/PxBasicTemplates.h" #include "foundation/PxUserAllocated.h" #if !PX_DOXYGEN namespace physx { #endif template <class T, class Predicate> PX_INLINE void PxMedian3(T* elements, int32_t first, int32_t last, Predicate& compare) { /* This creates sentinels because we know there is an element at the start minimum(or equal) than the pivot and an element at the end greater(or equal) than the pivot. Plus the median of 3 reduces the chance of degenerate behavour. */ int32_t mid = (first + last) / 2; if(compare(elements[mid], elements[first])) PxSwap(elements[first], elements[mid]); if(compare(elements[last], elements[first])) PxSwap(elements[first], elements[last]); if(compare(elements[last], elements[mid])) PxSwap(elements[mid], elements[last]); // keep the pivot at last-1 PxSwap(elements[mid], elements[last - 1]); } template <class T, class Predicate> PX_INLINE int32_t PxPartition(T* elements, int32_t first, int32_t last, Predicate& compare) { PxMedian3(elements, first, last, compare); /* WARNING: using the line: T partValue = elements[last-1]; and changing the scan loops to: while(comparator.greater(partValue, elements[++i])); while(comparator.greater(elements[--j], partValue); triggers a compiler optimizer bug on xenon where it stores a double to the stack for partValue then loads it as a single...:-( */ int32_t i = first; // we know first is less than pivot(but i gets pre incremented) int32_t j = last - 1; // pivot is in last-1 (but j gets pre decremented) for(;;) { while(compare(elements[++i], elements[last - 1])) ; while(compare(elements[last - 1], elements[--j])) ; if(i >= j) break; PX_ASSERT(i <= last && j >= first); PxSwap(elements[i], elements[j]); } // put the pivot in place PX_ASSERT(i <= last && first <= (last - 1)); PxSwap(elements[i], elements[last - 1]); return i; } template <class T, class Predicate> PX_INLINE void PxSmallSort(T* elements, int32_t first, int32_t last, Predicate& compare) { // selection sort - could reduce to fsel on 360 with floats. for(int32_t i = first; i < last; i++) { int32_t m = i; for(int32_t j = i + 1; j <= last; j++) if(compare(elements[j], elements[m])) m = j; if(m != i) PxSwap(elements[m], elements[i]); } } template <class PxAllocator> class PxStack { PxAllocator mAllocator; uint32_t mSize, mCapacity; int32_t* mMemory; bool mRealloc; public: PxStack(int32_t* memory, uint32_t capacity, const PxAllocator& inAllocator) : mAllocator(inAllocator), mSize(0), mCapacity(capacity), mMemory(memory), mRealloc(false) { } ~PxStack() { if(mRealloc) mAllocator.deallocate(mMemory); } void grow() { mCapacity *= 2; int32_t* newMem = reinterpret_cast<int32_t*>(mAllocator.allocate(sizeof(int32_t) * mCapacity, PX_FL)); intrinsics::memCopy(newMem, mMemory, mSize * sizeof(int32_t)); if(mRealloc) mAllocator.deallocate(mMemory); mRealloc = true; mMemory = newMem; } PX_INLINE void push(int32_t start, int32_t end) { if(mSize >= mCapacity - 1) grow(); mMemory[mSize++] = start; mMemory[mSize++] = end; } PX_INLINE void pop(int32_t& start, int32_t& end) { PX_ASSERT(!empty()); end = mMemory[--mSize]; start = mMemory[--mSize]; } PX_INLINE bool empty() { return mSize == 0; } }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
5,256
C
27.112299
95
0.705289
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxUnionCast.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_UNION_CAST_H #define PX_UNION_CAST_H #include "foundation/Px.h" /** \addtogroup foundation @{ */ #if !PX_DOXYGEN namespace physx { #endif // Needed for clang 7 #if PX_CLANG && PX_CLANG_MAJOR >= 7 #define USE_VOLATILE_UNION volatile #else #define USE_VOLATILE_UNION #endif template <class A, class B> PX_FORCE_INLINE A PxUnionCast(B b) { union AB { AB(B bb) : _b(bb) { } B _b; A _a; } USE_VOLATILE_UNION u(b); return u._a; } #undef USE_VOLATILE_UNION #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
2,247
C
29.378378
74
0.735648
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxQuat.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_QUAT_H #define PX_QUAT_H /** \addtogroup foundation @{ */ #include "foundation/PxVec3.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief This is a quaternion class. For more information on quaternion mathematics consult a mathematics source on complex numbers. */ template<class Type> class PxQuatT { public: /** \brief Default constructor, does not do any initialization. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT() { } //! identity constructor PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT(PxIDENTITY) : x(Type(0.0)), y(Type(0.0)), z(Type(0.0)), w(Type(1.0)) { } /** \brief Constructor from a scalar: sets the real part w to the scalar value, and the imaginary parts (x,y,z) to zero */ explicit PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT(Type r) : x(Type(0.0)), y(Type(0.0)), z(Type(0.0)), w(r) { } /** \brief Constructor. Take note of the order of the elements! */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT(Type nx, Type ny, Type nz, Type nw) : x(nx), y(ny), z(nz), w(nw) { } /** \brief Creates from angle-axis representation. Axis must be normalized! Angle is in radians! <b>Unit:</b> Radians */ PX_CUDA_CALLABLE PX_INLINE PxQuatT(Type angleRadians, const PxVec3T<Type>& unitAxis) { PX_ASSERT(PxAbs(Type(1.0) - unitAxis.magnitude()) < Type(1e-3)); const Type a = angleRadians * Type(0.5); Type s; PxSinCos(a, s, w); x = unitAxis.x * s; y = unitAxis.y * s; z = unitAxis.z * s; } /** \brief Copy ctor. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT(const PxQuatT& v) : x(v.x), y(v.y), z(v.z), w(v.w) { } /** \brief Creates from orientation matrix. \param[in] m Rotation matrix to extract quaternion from. */ PX_CUDA_CALLABLE PX_INLINE explicit PxQuatT(const PxMat33T<Type>& m); /* defined in PxMat33.h */ /** \brief returns true if quat is identity */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool isIdentity() const { return x==Type(0.0) && y==Type(0.0) && z==Type(0.0) && w==Type(1.0); } /** \brief returns true if all elements are finite (not NAN or INF, etc.) */ PX_CUDA_CALLABLE bool isFinite() const { return PxIsFinite(x) && PxIsFinite(y) && PxIsFinite(z) && PxIsFinite(w); } /** \brief returns true if finite and magnitude is close to unit */ PX_CUDA_CALLABLE bool isUnit() const { const Type unitTolerance = Type(1e-3); return isFinite() && PxAbs(magnitude() - Type(1.0)) < unitTolerance; } /** \brief returns true if finite and magnitude is reasonably close to unit to allow for some accumulation of error vs isValid */ PX_CUDA_CALLABLE bool isSane() const { const Type unitTolerance = Type(1e-2); return isFinite() && PxAbs(magnitude() - Type(1.0)) < unitTolerance; } /** \brief returns true if the two quaternions are exactly equal */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool operator==(const PxQuatT& q) const { return x == q.x && y == q.y && z == q.z && w == q.w; } /** \brief converts this quaternion to angle-axis representation */ PX_CUDA_CALLABLE PX_INLINE void toRadiansAndUnitAxis(Type& angle, PxVec3T<Type>& axis) const { const Type quatEpsilon = Type(1.0e-8); const Type s2 = x * x + y * y + z * z; if(s2 < quatEpsilon * quatEpsilon) // can't extract a sensible axis { angle = Type(0.0); axis = PxVec3T<Type>(Type(1.0), Type(0.0), Type(0.0)); } else { const Type s = PxRecipSqrt(s2); axis = PxVec3T<Type>(x, y, z) * s; angle = PxAbs(w) < quatEpsilon ? Type(PxPi) : PxAtan2(s2 * s, w) * Type(2.0); } } /** \brief Gets the angle between this quat and the identity quaternion. <b>Unit:</b> Radians */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type getAngle() const { return PxAcos(w) * Type(2.0); } /** \brief Gets the angle between this quat and the argument <b>Unit:</b> Radians */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type getAngle(const PxQuatT& q) const { return PxAcos(dot(q)) * Type(2.0); } /** \brief This is the squared 4D vector length, should be 1 for unit quaternions. */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type magnitudeSquared() const { return x * x + y * y + z * z + w * w; } /** \brief returns the scalar product of this and other. */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type dot(const PxQuatT& v) const { return x * v.x + y * v.y + z * v.z + w * v.w; } PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT getNormalized() const { const Type s = Type(1.0) / magnitude(); return PxQuatT(x * s, y * s, z * s, w * s); } PX_CUDA_CALLABLE PX_FORCE_INLINE Type magnitude() const { return PxSqrt(magnitudeSquared()); } // modifiers: /** \brief maps to the closest unit quaternion. */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type normalize() // convert this PxQuatT to a unit quaternion { const Type mag = magnitude(); if(mag != Type(0.0)) { const Type imag = Type(1.0) / mag; x *= imag; y *= imag; z *= imag; w *= imag; } return mag; } /* \brief returns the conjugate. \note for unit quaternions, this is the inverse. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT getConjugate() const { return PxQuatT(-x, -y, -z, w); } /* \brief returns imaginary part. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> getImaginaryPart() const { return PxVec3T<Type>(x, y, z); } /** brief computes rotation of x-axis */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> getBasisVector0() const { const Type x2 = x * Type(2.0); const Type w2 = w * Type(2.0); return PxVec3T<Type>((w * w2) - Type(1.0) + x * x2, (z * w2) + y * x2, (-y * w2) + z * x2); } /** brief computes rotation of y-axis */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> getBasisVector1() const { const Type y2 = y * Type(2.0); const Type w2 = w * Type(2.0); return PxVec3T<Type>((-z * w2) + x * y2, (w * w2) - Type(1.0) + y * y2, (x * w2) + z * y2); } /** brief computes rotation of z-axis */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> getBasisVector2() const { const Type z2 = z * Type(2.0); const Type w2 = w * Type(2.0); return PxVec3T<Type>((y * w2) + x * z2, (-x * w2) + y * z2, (w * w2) - Type(1.0) + z * z2); } /** rotates passed vec by this (assumed unitary) */ PX_CUDA_CALLABLE PX_FORCE_INLINE const PxVec3T<Type> rotate(const PxVec3T<Type>& v) const { const Type vx = Type(2.0) * v.x; const Type vy = Type(2.0) * v.y; const Type vz = Type(2.0) * v.z; const Type w2 = w * w - 0.5f; const Type dot2 = (x * vx + y * vy + z * vz); return PxVec3T<Type>((vx * w2 + (y * vz - z * vy) * w + x * dot2), (vy * w2 + (z * vx - x * vz) * w + y * dot2), (vz * w2 + (x * vy - y * vx) * w + z * dot2)); } /** inverse rotates passed vec by this (assumed unitary) */ PX_CUDA_CALLABLE PX_FORCE_INLINE const PxVec3T<Type> rotateInv(const PxVec3T<Type>& v) const { const Type vx = Type(2.0) * v.x; const Type vy = Type(2.0) * v.y; const Type vz = Type(2.0) * v.z; const Type w2 = w * w - 0.5f; const Type dot2 = (x * vx + y * vy + z * vz); return PxVec3T<Type>((vx * w2 - (y * vz - z * vy) * w + x * dot2), (vy * w2 - (z * vx - x * vz) * w + y * dot2), (vz * w2 - (x * vy - y * vx) * w + z * dot2)); } /** \brief Assignment operator */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT& operator=(const PxQuatT& p) { x = p.x; y = p.y; z = p.z; w = p.w; return *this; } PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT& operator*=(const PxQuatT& q) { const Type tx = w * q.x + q.w * x + y * q.z - q.y * z; const Type ty = w * q.y + q.w * y + z * q.x - q.z * x; const Type tz = w * q.z + q.w * z + x * q.y - q.x * y; w = w * q.w - q.x * x - y * q.y - q.z * z; x = tx; y = ty; z = tz; return *this; } PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT& operator+=(const PxQuatT& q) { x += q.x; y += q.y; z += q.z; w += q.w; return *this; } PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT& operator-=(const PxQuatT& q) { x -= q.x; y -= q.y; z -= q.z; w -= q.w; return *this; } PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT& operator*=(const Type s) { x *= s; y *= s; z *= s; w *= s; return *this; } /** quaternion multiplication */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT operator*(const PxQuatT& q) const { return PxQuatT(w * q.x + q.w * x + y * q.z - q.y * z, w * q.y + q.w * y + z * q.x - q.z * x, w * q.z + q.w * z + x * q.y - q.x * y, w * q.w - x * q.x - y * q.y - z * q.z); } /** quaternion addition */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT operator+(const PxQuatT& q) const { return PxQuatT(x + q.x, y + q.y, z + q.z, w + q.w); } /** quaternion subtraction */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT operator-() const { return PxQuatT(-x, -y, -z, -w); } PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT operator-(const PxQuatT& q) const { return PxQuatT(x - q.x, y - q.y, z - q.z, w - q.w); } PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT operator*(Type r) const { return PxQuatT(x * r, y * r, z * r, w * r); } /** the quaternion elements */ Type x, y, z, w; }; typedef PxQuatT<float> PxQuat; typedef PxQuatT<double> PxQuatd; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
10,768
C
25.459459
116
0.633265
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxSort.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_SORT_H #define PX_SORT_H /** \addtogroup foundation @{ */ #include "foundation/PxSortInternals.h" #include "foundation/PxAlloca.h" #define PX_SORT_PARANOIA PX_DEBUG /** \brief Sorts an array of objects in ascending order, assuming that the predicate implements the < operator: @see PxLess, PxGreater */ #if PX_VC #pragma warning(push) #pragma warning(disable : 4706) // disable the warning that we did an assignment within a conditional expression, as // this was intentional. #endif #if !PX_DOXYGEN namespace physx { #endif template <class T, class Predicate, class PxAllocator> void PxSort(T* elements, uint32_t count, const Predicate& compare, const PxAllocator& inAllocator, const uint32_t initialStackSize = 32) { static const uint32_t SMALL_SORT_CUTOFF = 5; // must be >= 3 since we need 3 for median PX_ALLOCA(stackMem, int32_t, initialStackSize); PxStack<PxAllocator> stack(stackMem, initialStackSize, inAllocator); int32_t first = 0, last = int32_t(count - 1); if(last > first) { for(;;) { while(last > first) { PX_ASSERT(first >= 0 && last < int32_t(count)); if(uint32_t(last - first) < SMALL_SORT_CUTOFF) { PxSmallSort(elements, first, last, compare); break; } else { const int32_t partIndex = PxPartition(elements, first, last, compare); // push smaller sublist to minimize stack usage if((partIndex - first) < (last - partIndex)) { stack.push(first, partIndex - 1); first = partIndex + 1; } else { stack.push(partIndex + 1, last); last = partIndex - 1; } } } if(stack.empty()) break; stack.pop(first, last); } } #if PX_SORT_PARANOIA for(uint32_t i = 1; i < count; i++) PX_ASSERT(!compare(elements[i], elements[i - 1])); #endif } template <class T, class Predicate> void PxSort(T* elements, uint32_t count, const Predicate& compare) { PxSort(elements, count, compare, typename PxAllocatorTraits<T>::Type()); } template <class T> void PxSort(T* elements, uint32_t count) { PxSort(elements, count, PxLess<T>(), typename PxAllocatorTraits<T>::Type()); } #if !PX_DOXYGEN } // namespace physx #endif #if PX_VC #pragma warning(pop) #endif /** @} */ #endif
3,935
C
28.818182
116
0.708513
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxMathUtils.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_MATH_UTILS_H #define PX_MATH_UTILS_H /** \addtogroup common @{ */ #include "foundation/PxFoundationConfig.h" #include "foundation/Px.h" #include "foundation/PxVec4.h" #include "foundation/PxAssert.h" #include "foundation/PxPlane.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief finds the shortest rotation between two vectors. \param[in] from the vector to start from \param[in] target the vector to rotate to \return a rotation about an axis normal to the two vectors which takes one to the other via the shortest path */ PX_FOUNDATION_API PxQuat PxShortestRotation(const PxVec3& from, const PxVec3& target); /* \brief diagonalizes a 3x3 symmetric matrix y The returned matrix satisfies M = R * D * R', where R is the rotation matrix for the output quaternion, R' its transpose, and D the diagonal matrix If the matrix is not symmetric, the result is undefined. \param[in] m the matrix to diagonalize \param[out] axes a quaternion rotation which diagonalizes the matrix \return the vector diagonal of the diagonalized matrix. */ PX_FOUNDATION_API PxVec3 PxDiagonalize(const PxMat33& m, PxQuat& axes); /** \brief creates a transform from the endpoints of a segment, suitable for an actor transform for a PxCapsuleGeometry \param[in] p0 one end of major axis of the capsule \param[in] p1 the other end of the axis of the capsule \param[out] halfHeight the halfHeight of the capsule. This parameter is optional. \return A PxTransform which will transform the vector (1,0,0) to the capsule axis shrunk by the halfHeight */ PX_FOUNDATION_API PxTransform PxTransformFromSegment(const PxVec3& p0, const PxVec3& p1, PxReal* halfHeight = NULL); /** \brief creates a transform from a plane equation, suitable for an actor transform for a PxPlaneGeometry \param[in] plane the desired plane equation \return a PxTransform which will transform the plane PxPlane(1,0,0,0) to the specified plane */ PX_FOUNDATION_API PxTransform PxTransformFromPlaneEquation(const PxPlane& plane); /** \brief creates a plane equation from a transform, such as the actor transform for a PxPlaneGeometry \param[in] pose the transform \return the plane */ PX_INLINE PxPlane PxPlaneEquationFromTransform(const PxTransform& pose) { return PxPlane(1.0f, 0.0f, 0.0f, 0.0f).transform(pose); } /** \brief Spherical linear interpolation of two quaternions. \param[in] t is the interpolation parameter in range (0, 1) \param[in] left is the start of the interpolation \param[in] right is the end of the interpolation \return Returns left when t=0, right when t=1 and a linear interpolation of left and right when 0 < t < 1. Returns angle between -PI and PI in radians */ PX_CUDA_CALLABLE PX_INLINE PxQuat PxSlerp(const PxReal t, const PxQuat& left, const PxQuat& right) { const PxReal quatEpsilon = (PxReal(1.0e-8f)); PxReal cosine = left.dot(right); PxReal sign = PxReal(1); if (cosine < 0) { cosine = -cosine; sign = PxReal(-1); } PxReal sine = PxReal(1) - cosine * cosine; if (sine >= quatEpsilon * quatEpsilon) { sine = PxSqrt(sine); const PxReal angle = PxAtan2(sine, cosine); const PxReal i_sin_angle = PxReal(1) / sine; const PxReal leftw = PxSin(angle * (PxReal(1) - t)) * i_sin_angle; const PxReal rightw = PxSin(angle * t) * i_sin_angle * sign; return left * leftw + right * rightw; } return left; } /** \brief integrate transform. \param[in] curTrans The current transform \param[in] linvel Linear velocity \param[in] angvel Angular velocity \param[in] timeStep The time-step for integration \param[out] result The integrated transform */ PX_FOUNDATION_API void PxIntegrateTransform(const PxTransform& curTrans, const PxVec3& linvel, const PxVec3& angvel, PxReal timeStep, PxTransform& result); //! \brief Compute the exponent of a PxVec3 PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuat PxExp(const PxVec3& v) { const PxReal m = v.magnitudeSquared(); return m < 1e-24f ? PxQuat(PxIdentity) : PxQuat(PxSqrt(m), v * PxRecipSqrt(m)); } /** \brief computes a oriented bounding box around the scaled basis. \param basis Input = skewed basis, Output = (normalized) orthogonal basis. \return Bounding box extent. */ PX_FOUNDATION_API PxVec3 PxOptimizeBoundingBox(PxMat33& basis); /** \brief return Returns the log of a PxQuat */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 PxLog(const PxQuat& q) { const PxReal s = q.getImaginaryPart().magnitude(); if (s < 1e-12f) return PxVec3(0.0f); // force the half-angle to have magnitude <= pi/2 PxReal halfAngle = q.w < 0 ? PxAtan2(-s, -q.w) : PxAtan2(s, q.w); PX_ASSERT(halfAngle >= -PxPi / 2 && halfAngle <= PxPi / 2); return q.getImaginaryPart().getNormalized() * 2.f * halfAngle; } /** \brief return Returns 0 if v.x is largest element of v, 1 if v.y is largest element, 2 if v.z is largest element. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 PxLargestAxis(const PxVec3& v) { PxU32 m = PxU32(v.y > v.x ? 1 : 0); return v.z > v[m] ? 2 : m; } /** \brief Compute tan(theta/2) given sin(theta) and cos(theta) as inputs. \param[in] sin has value sin(theta) \param[in] cos has value cos(theta) \return Returns tan(theta/2) */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal PxTanHalf(PxReal sin, PxReal cos) { // PT: avoids divide by zero for singularity. We return sqrt(FLT_MAX) instead of FLT_MAX // to make sure the calling code doesn't generate INF values when manipulating the returned value // (some joints multiply it by 4, etc). if (cos == -1.0f) return sin < 0.0f ? -sqrtf(FLT_MAX) : sqrtf(FLT_MAX); // PT: half-angle formula: tan(a/2) = sin(a)/(1+cos(a)) return sin / (1.0f + cos); } /** \brief Compute the closest point on an 2d ellipse to a given 2d point. \param[in] point is a 2d point in the y-z plane represented by (point.y, point.z) \param[in] radii are the radii of the ellipse (radii.y and radii.z) in the y-z plane. \return Returns the 2d position on the surface of the ellipse that is closest to point. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 PxEllipseClamp(const PxVec3& point, const PxVec3& radii) { // lagrange multiplier method with Newton/Halley hybrid root-finder. // see http://www.geometrictools.com/Documentation/DistancePointToEllipse2.pdf // for proof of Newton step robustness and initial estimate. // Halley converges much faster but sometimes overshoots - when that happens we take // a newton step instead // converges in 1-2 iterations where D&C works well, and it's good with 4 iterations // with any ellipse that isn't completely crazy const PxU32 MAX_ITERATIONS = 20; const PxReal convergenceThreshold = 1e-4f; // iteration requires first quadrant but we recover generality later PxVec3 q(0, PxAbs(point.y), PxAbs(point.z)); const PxReal tinyEps = 1e-6f; // very close to minor axis is numerically problematic but trivial if (radii.y >= radii.z) { if (q.z < tinyEps) return PxVec3(0, point.y > 0 ? radii.y : -radii.y, 0); } else { if (q.y < tinyEps) return PxVec3(0, 0, point.z > 0 ? radii.z : -radii.z); } PxVec3 denom, e2 = radii.multiply(radii), eq = radii.multiply(q); // we can use any initial guess which is > maximum(-e.y^2,-e.z^2) and for which f(t) is > 0. // this guess works well near the axes, but is weak along the diagonals. PxReal t = PxMax(eq.y - e2.y, eq.z - e2.z); for (PxU32 i = 0; i < MAX_ITERATIONS; i++) { denom = PxVec3(0, 1 / (t + e2.y), 1 / (t + e2.z)); PxVec3 denom2 = eq.multiply(denom); PxVec3 fv = denom2.multiply(denom2); PxReal f = fv.y + fv.z - 1; // although in exact arithmetic we are guaranteed f>0, we can get here // on the first iteration via catastrophic cancellation if the point is // very close to the origin. In that case we just behave as if f=0 if (f < convergenceThreshold) return e2.multiply(point).multiply(denom); PxReal df = fv.dot(denom) * -2.0f; t = t - f / df; } // we didn't converge, so clamp what we have PxVec3 r = e2.multiply(point).multiply(denom); return r * PxRecipSqrt(PxSqr(r.y / radii.y) + PxSqr(r.z / radii.z)); } /** \brief Compute from an input quaternion q a pair of quaternions (swing, twist) such that q = swing * twist with the caveats that swing.x = twist.y = twist.z = 0. \param[in] q is the quaternion to be decomposed into swing and twist quaternions. \param[out] swing is the swing component of the quaternion decomposition. \param[out] twist is the twist component of the quaternion decomposition. */ PX_CUDA_CALLABLE PX_FORCE_INLINE void PxSeparateSwingTwist(const PxQuat& q, PxQuat& swing, PxQuat& twist) { twist = q.x != 0.0f ? PxQuat(q.x, 0, 0, q.w).getNormalized() : PxQuat(PxIdentity); swing = q * twist.getConjugate(); } /** \brief Compute the angle between two non-unit vectors \param[in] v0 is one of the non-unit vectors \param[in] v1 is the other of the two non-unit vectors \return Returns the angle (in radians) between the two vector v0 and v1. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxF32 PxComputeAngle(const PxVec3& v0, const PxVec3& v1) { const PxF32 cos = v0.dot(v1); // |v0|*|v1|*Cos(Angle) const PxF32 sin = (v0.cross(v1)).magnitude(); // |v0|*|v1|*Sin(Angle) return PxAtan2(sin, cos); } /** \brief Compute two normalized vectors (right and up) that are perpendicular to an input normalized vector (dir). \param[in] dir is a normalized vector that is used to compute the perpendicular vectors. \param[out] right is the first of the two vectors perpendicular to dir \param[out] up is the second of the two vectors perpendicular to dir */ PX_CUDA_CALLABLE PX_INLINE void PxComputeBasisVectors(const PxVec3& dir, PxVec3& right, PxVec3& up) { // Derive two remaining vectors if (PxAbs(dir.y) <= 0.9999f) { right = PxVec3(dir.z, 0.0f, -dir.x); right.normalize(); // PT: normalize not needed for 'up' because dir & right are unit vectors, // and by construction the angle between them is 90 degrees (i.e. sin(angle)=1) up = PxVec3(dir.y * right.z, dir.z * right.x - dir.x * right.z, -dir.y * right.x); } else { right = PxVec3(1.0f, 0.0f, 0.0f); up = PxVec3(0.0f, dir.z, -dir.y); up.normalize(); } } /** \brief Compute three normalized vectors (dir, right and up) that are parallel to (dir) and perpendicular to (right, up) the normalized direction vector (p1 - p0)/||p1 - p0||. \param[in] p0 is used to compute the normalized vector dir = (p1 - p0)/||p1 - p0||. \param[in] p1 is used to compute the normalized vector dir = (p1 - p0)/||p1 - p0||. \param[out] dir is the normalized vector (p1 - p0)/||p1 - p0||. \param[out] right is the first of the two normalized vectors perpendicular to dir \param[out] up is the second of the two normalized vectors perpendicular to dir */ PX_INLINE void PxComputeBasisVectors(const PxVec3& p0, const PxVec3& p1, PxVec3& dir, PxVec3& right, PxVec3& up) { // Compute the new direction vector dir = p1 - p0; dir.normalize(); // Derive two remaining vectors PxComputeBasisVectors(dir, right, up); } /** \brief Compute (i+1)%3 */ PX_INLINE PxU32 PxGetNextIndex3(PxU32 i) { return (i + 1 + (i >> 1)) & 3; } PX_INLINE PX_CUDA_CALLABLE void computeBarycentric(const PxVec3& a, const PxVec3& b, const PxVec3& c, const PxVec3& d, const PxVec3& p, PxVec4& bary) { const PxVec3 ba = b - a; const PxVec3 ca = c - a; const PxVec3 da = d - a; const PxVec3 pa = p - a; const PxReal detBcd = ba.dot(ca.cross(da)); const PxReal detPcd = pa.dot(ca.cross(da)); bary.y = detPcd / detBcd; const PxReal detBpd = ba.dot(pa.cross(da)); bary.z = detBpd / detBcd; const PxReal detBcp = ba.dot(ca.cross(pa)); bary.w = detBcp / detBcd; bary.x = 1 - bary.y - bary.z - bary.w; } PX_INLINE PX_CUDA_CALLABLE void computeBarycentric(const PxVec3& a, const PxVec3& b, const PxVec3& c, const PxVec3& p, PxVec4& bary) { const PxVec3 v0 = b - a; const PxVec3 v1 = c - a; const PxVec3 v2 = p - a; const float d00 = v0.dot(v0); const float d01 = v0.dot(v1); const float d11 = v1.dot(v1); const float d20 = v2.dot(v0); const float d21 = v2.dot(v1); const float denom = d00 * d11 - d01 * d01; const float v = (d11 * d20 - d01 * d21) / denom; const float w = (d00 * d21 - d01 * d20) / denom; const float u = 1.f - v - w; bary.x = u; bary.y = v; bary.z = w; bary.w = 0.f; } // lerp struct Interpolation { PX_INLINE PX_CUDA_CALLABLE static float PxLerp(float a, float b, float t) { return a + t * (b - a); } PX_INLINE PX_CUDA_CALLABLE static PxReal PxBiLerp( const PxReal f00, const PxReal f10, const PxReal f01, const PxReal f11, const PxReal tx, const PxReal ty) { return PxLerp( PxLerp(f00, f10, tx), PxLerp(f01, f11, tx), ty); } PX_INLINE PX_CUDA_CALLABLE static PxReal PxTriLerp( const PxReal f000, const PxReal f100, const PxReal f010, const PxReal f110, const PxReal f001, const PxReal f101, const PxReal f011, const PxReal f111, const PxReal tx, const PxReal ty, const PxReal tz) { return PxLerp( PxBiLerp(f000, f100, f010, f110, tx, ty), PxBiLerp(f001, f101, f011, f111, tx, ty), tz); } PX_INLINE PX_CUDA_CALLABLE static PxU32 PxSDFIdx(PxU32 i, PxU32 j, PxU32 k, PxU32 nbX, PxU32 nbY) { return i + j * nbX + k * nbX*nbY; } PX_INLINE PX_CUDA_CALLABLE static PxReal PxSDFSampleImpl(const PxReal* PX_RESTRICT sdf, const PxVec3& localPos, const PxVec3& sdfBoxLower, const PxVec3& sdfBoxHigher, const PxReal sdfDx, const PxReal invSdfDx, const PxU32 dimX, const PxU32 dimY, const PxU32 dimZ, PxReal tolerance) { PxVec3 clampedGridPt = localPos.maximum(sdfBoxLower).minimum(sdfBoxHigher); const PxVec3 diff = (localPos - clampedGridPt); if (diff.magnitudeSquared() > tolerance*tolerance) return PX_MAX_F32; PxVec3 f = (clampedGridPt - sdfBoxLower) * invSdfDx; PxU32 i = PxU32(f.x); PxU32 j = PxU32(f.y); PxU32 k = PxU32(f.z); f -= PxVec3(PxReal(i), PxReal(j), PxReal(k)); if (i >= (dimX - 1)) { i = dimX - 2; clampedGridPt.x -= f.x * sdfDx; f.x = 1.f; } if (j >= (dimY - 1)) { j = dimY - 2; clampedGridPt.y -= f.y * sdfDx; f.y = 1.f; } if (k >= (dimZ - 1)) { k = dimZ - 2; clampedGridPt.z -= f.z * sdfDx; f.z = 1.f; } const PxReal s000 = sdf[Interpolation::PxSDFIdx(i, j, k, dimX, dimY)]; const PxReal s100 = sdf[Interpolation::PxSDFIdx(i + 1, j, k, dimX, dimY)]; const PxReal s010 = sdf[Interpolation::PxSDFIdx(i, j + 1, k, dimX, dimY)]; const PxReal s110 = sdf[Interpolation::PxSDFIdx(i + 1, j + 1, k, dimX, dimY)]; const PxReal s001 = sdf[Interpolation::PxSDFIdx(i, j, k + 1, dimX, dimY)]; const PxReal s101 = sdf[Interpolation::PxSDFIdx(i + 1, j, k + 1, dimX, dimY)]; const PxReal s011 = sdf[Interpolation::PxSDFIdx(i, j + 1, k + 1, dimX, dimY)]; const PxReal s111 = sdf[Interpolation::PxSDFIdx(i + 1, j + 1, k + 1, dimX, dimY)]; PxReal dist = PxTriLerp( s000, s100, s010, s110, s001, s101, s011, s111, f.x, f.y, f.z); dist += diff.magnitude(); return dist; } }; PX_INLINE PX_CUDA_CALLABLE PxReal PxSdfSample(const PxReal* PX_RESTRICT sdf, const PxVec3& localPos, const PxVec3& sdfBoxLower, const PxVec3& sdfBoxHigher, const PxReal sdfDx, const PxReal invSdfDx, const PxU32 dimX, const PxU32 dimY, const PxU32 dimZ, PxVec3& gradient, PxReal tolerance = PX_MAX_F32) { PxReal dist = Interpolation::PxSDFSampleImpl(sdf, localPos, sdfBoxLower, sdfBoxHigher, sdfDx, invSdfDx, dimX, dimY, dimZ, tolerance); if (dist < tolerance) { PxVec3 grad; grad.x = Interpolation::PxSDFSampleImpl(sdf, localPos + PxVec3(sdfDx, 0.f, 0.f), sdfBoxLower, sdfBoxHigher, sdfDx, invSdfDx, dimX, dimY, dimZ, tolerance) - Interpolation::PxSDFSampleImpl(sdf, localPos - PxVec3(sdfDx, 0.f, 0.f), sdfBoxLower, sdfBoxHigher, sdfDx, invSdfDx, dimX, dimY, dimZ, tolerance); grad.y = Interpolation::PxSDFSampleImpl(sdf, localPos + PxVec3(0.f, sdfDx, 0.f), sdfBoxLower, sdfBoxHigher, sdfDx, invSdfDx, dimX, dimY, dimZ, tolerance) - Interpolation::PxSDFSampleImpl(sdf, localPos - PxVec3(0.f, sdfDx, 0.f), sdfBoxLower, sdfBoxHigher, sdfDx, invSdfDx, dimX, dimY, dimZ, tolerance); grad.z = Interpolation::PxSDFSampleImpl(sdf, localPos + PxVec3(0.f, 0.f, sdfDx), sdfBoxLower, sdfBoxHigher, sdfDx, invSdfDx, dimX, dimY, dimZ, tolerance) - Interpolation::PxSDFSampleImpl(sdf, localPos - PxVec3(0.f, 0.f, sdfDx), sdfBoxLower, sdfBoxHigher, sdfDx, invSdfDx, dimX, dimY, dimZ, tolerance); gradient = grad; } return dist; } #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
18,062
C
33.145558
174
0.704186
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxVec3.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_VEC3_H #define PX_VEC3_H /** \addtogroup foundation @{ */ #include "foundation/PxMath.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief 3 Element vector class. This is a 3-dimensional vector class with public data members. */ template<class Type> class PxVec3T { public: /** \brief default constructor leaves data uninitialized. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T() { } /** \brief zero constructor. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T(PxZERO) : x(Type(0.0)), y(Type(0.0)), z(Type(0.0)) { } /** \brief Assigns scalar parameter to all elements. Useful to initialize to zero or one. \param[in] a Value to assign to elements. */ explicit PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T(Type a) : x(a), y(a), z(a) { } /** \brief Initializes from 3 scalar parameters. \param[in] nx Value to initialize X component. \param[in] ny Value to initialize Y component. \param[in] nz Value to initialize Z component. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T(Type nx, Type ny, Type nz) : x(nx), y(ny), z(nz) { } /** \brief Copy ctor. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T(const PxVec3T& v) : x(v.x), y(v.y), z(v.z) { } // Operators /** \brief Assignment operator */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T& operator=(const PxVec3T& p) { x = p.x; y = p.y; z = p.z; return *this; } /** \brief element access */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type& operator[](unsigned int index) { PX_ASSERT(index <= 2); return reinterpret_cast<Type*>(this)[index]; } /** \brief element access */ PX_CUDA_CALLABLE PX_FORCE_INLINE const Type& operator[](unsigned int index) const { PX_ASSERT(index <= 2); return reinterpret_cast<const Type*>(this)[index]; } /** \brief returns true if the two vectors are exactly equal. */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool operator==(const PxVec3T& v) const { return x == v.x && y == v.y && z == v.z; } /** \brief returns true if the two vectors are not exactly equal. */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool operator!=(const PxVec3T& v) const { return x != v.x || y != v.y || z != v.z; } /** \brief tests for exact zero vector */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool isZero() const { return x == Type(0.0) && y == Type(0.0) && z == Type(0.0); } /** \brief returns true if all 3 elems of the vector are finite (not NAN or INF, etc.) */ PX_CUDA_CALLABLE PX_INLINE bool isFinite() const { return PxIsFinite(x) && PxIsFinite(y) && PxIsFinite(z); } /** \brief is normalized - used by API parameter validation */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool isNormalized() const { const float unitTolerance = Type(1e-4); // PT: do we need a different epsilon for float & double? return isFinite() && PxAbs(magnitude() - Type(1.0)) < unitTolerance; } /** \brief returns the squared magnitude Avoids calling PxSqrt()! */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type magnitudeSquared() const { return x * x + y * y + z * z; } /** \brief returns the magnitude */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type magnitude() const { return PxSqrt(magnitudeSquared()); } /** \brief negation */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T operator-() const { return PxVec3T(-x, -y, -z); } /** \brief vector addition */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T operator+(const PxVec3T& v) const { return PxVec3T(x + v.x, y + v.y, z + v.z); } /** \brief vector difference */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T operator-(const PxVec3T& v) const { return PxVec3T(x - v.x, y - v.y, z - v.z); } /** \brief scalar post-multiplication */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T operator*(Type f) const { return PxVec3T(x * f, y * f, z * f); } /** \brief scalar division */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T operator/(Type f) const { f = Type(1.0) / f; return PxVec3T(x * f, y * f, z * f); } /** \brief vector addition */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T& operator+=(const PxVec3T& v) { x += v.x; y += v.y; z += v.z; return *this; } /** \brief vector difference */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T& operator-=(const PxVec3T& v) { x -= v.x; y -= v.y; z -= v.z; return *this; } /** \brief scalar multiplication */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T& operator*=(Type f) { x *= f; y *= f; z *= f; return *this; } /** \brief scalar division */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T& operator/=(Type f) { f = Type(1.0) / f; x *= f; y *= f; z *= f; return *this; } /** \brief returns the scalar product of this and other. */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type dot(const PxVec3T& v) const { return x * v.x + y * v.y + z * v.z; } /** \brief cross product */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T cross(const PxVec3T& v) const { return PxVec3T(y * v.z - z * v.y, z * v.x - x * v.z, x * v.y - y * v.x); } /** returns a unit vector */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T getNormalized() const { const Type m = magnitudeSquared(); return m > Type(0.0) ? *this * PxRecipSqrt(m) : PxVec3T(Type(0)); } /** \brief normalizes the vector in place */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type normalize() { const Type m = magnitude(); if(m > Type(0.0)) *this /= m; return m; } /** \brief normalizes the vector in place. Does nothing if vector magnitude is under PX_NORMALIZATION_EPSILON. Returns vector magnitude if >= PX_NORMALIZATION_EPSILON and 0.0f otherwise. */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type normalizeSafe() { const Type mag = magnitude(); if(mag < PX_NORMALIZATION_EPSILON) // PT: do we need a different epsilon for float & double? return Type(0.0); *this *= Type(1.0) / mag; return mag; } /** \brief normalizes the vector in place. Asserts if vector magnitude is under PX_NORMALIZATION_EPSILON. returns vector magnitude. */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type normalizeFast() { const Type mag = magnitude(); PX_ASSERT(mag >= PX_NORMALIZATION_EPSILON); // PT: do we need a different epsilon for float & double? *this *= Type(1.0) / mag; return mag; } /** \brief a[i] * b[i], for all i. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T multiply(const PxVec3T& a) const { return PxVec3T(x * a.x, y * a.y, z * a.z); } /** \brief element-wise minimum */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T minimum(const PxVec3T& v) const { return PxVec3T(PxMin(x, v.x), PxMin(y, v.y), PxMin(z, v.z)); } /** \brief returns MIN(x, y, z); */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type minElement() const { return PxMin(x, PxMin(y, z)); } /** \brief element-wise maximum */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T maximum(const PxVec3T& v) const { return PxVec3T(PxMax(x, v.x), PxMax(y, v.y), PxMax(z, v.z)); } /** \brief returns MAX(x, y, z); */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type maxElement() const { return PxMax(x, PxMax(y, z)); } /** \brief returns absolute values of components; */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T abs() const { return PxVec3T(PxAbs(x), PxAbs(y), PxAbs(z)); } Type x, y, z; }; template<class Type> PX_CUDA_CALLABLE static PX_FORCE_INLINE PxVec3T<Type> operator*(Type f, const PxVec3T<Type>& v) { return PxVec3T<Type>(f * v.x, f * v.y, f * v.z); } typedef PxVec3T<float> PxVec3; typedef PxVec3T<double> PxVec3d; //! A padded version of PxVec3, to safely load its data using SIMD class PxVec3Padded : public PxVec3 { public: PX_FORCE_INLINE PxVec3Padded() {} PX_FORCE_INLINE ~PxVec3Padded() {} PX_FORCE_INLINE PxVec3Padded(const PxVec3& p) : PxVec3(p) {} PX_FORCE_INLINE PxVec3Padded(float f) : PxVec3(f) {} /** \brief Assignment operator. To fix this: error: definition of implicit copy assignment operator for 'PxVec3Padded' is deprecated because it has a user-declared destructor [-Werror,-Wdeprecated] */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3Padded& operator=(const PxVec3Padded& p) { x = p.x; y = p.y; z = p.z; return *this; } PxU32 padding; }; PX_COMPILE_TIME_ASSERT(sizeof(PxVec3Padded) == 16); typedef PxVec3Padded PxVec3p; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
9,876
C
22.294811
153
0.669198
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxBounds3.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_BOUNDS3_H #define PX_BOUNDS3_H /** \addtogroup foundation @{ */ #include "foundation/PxTransform.h" #include "foundation/PxMat33.h" #if !PX_DOXYGEN namespace physx { #endif // maximum extents defined such that floating point exceptions are avoided for standard use cases #define PX_MAX_BOUNDS_EXTENTS (PX_MAX_REAL * 0.25f) /** \brief Class representing 3D range or axis aligned bounding box. Stored as minimum and maximum extent corners. Alternate representation would be center and dimensions. May be empty or nonempty. For nonempty bounds, minimum <= maximum has to hold for all axes. Empty bounds have to be represented as minimum = PX_MAX_BOUNDS_EXTENTS and maximum = -PX_MAX_BOUNDS_EXTENTS for all axes. All other representations are invalid and the behavior is undefined. */ class PxBounds3 { public: /** \brief Default constructor, not performing any initialization for performance reason. \remark Use empty() function below to construct empty bounds. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3() { } /** \brief Construct from two bounding points */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3(const PxVec3& minimum, const PxVec3& maximum); PX_CUDA_CALLABLE PX_FORCE_INLINE void operator=(const PxBounds3& other) { minimum = other.minimum; maximum = other.maximum; } PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3(const PxBounds3& other) { minimum = other.minimum; maximum = other.maximum; } /** \brief Return empty bounds. */ static PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3 empty(); /** \brief returns the AABB containing v0 and v1. \param v0 first point included in the AABB. \param v1 second point included in the AABB. */ static PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3 boundsOfPoints(const PxVec3& v0, const PxVec3& v1); /** \brief returns the AABB from center and extents vectors. \param center Center vector \param extent Extents vector */ static PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3 centerExtents(const PxVec3& center, const PxVec3& extent); /** \brief Construct from center, extent, and (not necessarily orthogonal) basis */ static PX_CUDA_CALLABLE PX_INLINE PxBounds3 basisExtent(const PxVec3& center, const PxMat33& basis, const PxVec3& extent); /** \brief Construct from pose and extent */ static PX_CUDA_CALLABLE PX_INLINE PxBounds3 poseExtent(const PxTransform& pose, const PxVec3& extent); /** \brief gets the transformed bounds of the passed AABB (resulting in a bigger AABB). This version is safe to call for empty bounds. \param[in] matrix Transform to apply, can contain scaling as well \param[in] bounds The bounds to transform. */ static PX_CUDA_CALLABLE PX_INLINE PxBounds3 transformSafe(const PxMat33& matrix, const PxBounds3& bounds); /** \brief gets the transformed bounds of the passed AABB (resulting in a bigger AABB). Calling this method for empty bounds leads to undefined behavior. Use #transformSafe() instead. \param[in] matrix Transform to apply, can contain scaling as well \param[in] bounds The bounds to transform. */ static PX_CUDA_CALLABLE PX_INLINE PxBounds3 transformFast(const PxMat33& matrix, const PxBounds3& bounds); /** \brief gets the transformed bounds of the passed AABB (resulting in a bigger AABB). This version is safe to call for empty bounds. \param[in] transform Transform to apply, can contain scaling as well \param[in] bounds The bounds to transform. */ static PX_CUDA_CALLABLE PX_INLINE PxBounds3 transformSafe(const PxTransform& transform, const PxBounds3& bounds); /** \brief gets the transformed bounds of the passed AABB (resulting in a bigger AABB). Calling this method for empty bounds leads to undefined behavior. Use #transformSafe() instead. \param[in] transform Transform to apply, can contain scaling as well \param[in] bounds The bounds to transform. */ static PX_CUDA_CALLABLE PX_INLINE PxBounds3 transformFast(const PxTransform& transform, const PxBounds3& bounds); /** \brief Sets empty to true */ PX_CUDA_CALLABLE PX_FORCE_INLINE void setEmpty(); /** \brief Sets the bounds to maximum size [-PX_MAX_BOUNDS_EXTENTS, PX_MAX_BOUNDS_EXTENTS]. */ PX_CUDA_CALLABLE PX_FORCE_INLINE void setMaximal(); /** \brief expands the volume to include v \param v Point to expand to. */ PX_CUDA_CALLABLE PX_FORCE_INLINE void include(const PxVec3& v); /** \brief expands the volume to include b. \param b Bounds to perform union with. */ PX_CUDA_CALLABLE PX_FORCE_INLINE void include(const PxBounds3& b); PX_CUDA_CALLABLE PX_FORCE_INLINE bool isEmpty() const; /** \brief indicates whether the intersection of this and b is empty or not. \param b Bounds to test for intersection. */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool intersects(const PxBounds3& b) const; /** \brief computes the 1D-intersection between two AABBs, on a given axis. \param a the other AABB \param axis the axis (0, 1, 2) */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool intersects1D(const PxBounds3& a, uint32_t axis) const; /** \brief indicates if these bounds contain v. \param v Point to test against bounds. */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool contains(const PxVec3& v) const; /** \brief checks a box is inside another box. \param box the other AABB */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool isInside(const PxBounds3& box) const; /** \brief returns the center of this axis aligned box. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 getCenter() const; /** \brief get component of the box's center along a given axis */ PX_CUDA_CALLABLE PX_FORCE_INLINE float getCenter(uint32_t axis) const; /** \brief get component of the box's extents along a given axis */ PX_CUDA_CALLABLE PX_FORCE_INLINE float getExtents(uint32_t axis) const; /** \brief returns the dimensions (width/height/depth) of this axis aligned box. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 getDimensions() const; /** \brief returns the extents, which are half of the width/height/depth. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 getExtents() const; /** \brief scales the AABB. This version is safe to call for empty bounds. \param scale Factor to scale AABB by. */ PX_CUDA_CALLABLE PX_FORCE_INLINE void scaleSafe(float scale); /** \brief scales the AABB. Calling this method for empty bounds leads to undefined behavior. Use #scaleSafe() instead. \param scale Factor to scale AABB by. */ PX_CUDA_CALLABLE PX_FORCE_INLINE void scaleFast(float scale); /** fattens the AABB in all 3 dimensions by the given distance. This version is safe to call for empty bounds. */ PX_CUDA_CALLABLE PX_FORCE_INLINE void fattenSafe(float distance); /** fattens the AABB in all 3 dimensions by the given distance. Calling this method for empty bounds leads to undefined behavior. Use #fattenSafe() instead. */ PX_CUDA_CALLABLE PX_FORCE_INLINE void fattenFast(float distance); /** checks that the AABB values are not NaN */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite() const; /** checks that the AABB values describe a valid configuration. */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool isValid() const; /** Finds the closest point in the box to the point p. If p is contained, this will be p, otherwise it will be the closest point on the surface of the box. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 closestPoint(const PxVec3& p) const; PxVec3 minimum, maximum; }; PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3::PxBounds3(const PxVec3& minimum_, const PxVec3& maximum_) : minimum(minimum_), maximum(maximum_) { } PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3 PxBounds3::empty() { return PxBounds3(PxVec3(PX_MAX_BOUNDS_EXTENTS), PxVec3(-PX_MAX_BOUNDS_EXTENTS)); } PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxBounds3::isFinite() const { return minimum.isFinite() && maximum.isFinite(); } PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3 PxBounds3::boundsOfPoints(const PxVec3& v0, const PxVec3& v1) { return PxBounds3(v0.minimum(v1), v0.maximum(v1)); } PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3 PxBounds3::centerExtents(const PxVec3& center, const PxVec3& extent) { return PxBounds3(center - extent, center + extent); } PX_CUDA_CALLABLE PX_INLINE PxBounds3 PxBounds3::basisExtent(const PxVec3& center, const PxMat33& basis, const PxVec3& extent) { // extended basis vectors const PxVec3 c0 = basis.column0 * extent.x; const PxVec3 c1 = basis.column1 * extent.y; const PxVec3 c2 = basis.column2 * extent.z; // find combination of base vectors that produces max. distance for each component = sum of abs() const PxVec3 w( PxAbs(c0.x) + PxAbs(c1.x) + PxAbs(c2.x), PxAbs(c0.y) + PxAbs(c1.y) + PxAbs(c2.y), PxAbs(c0.z) + PxAbs(c1.z) + PxAbs(c2.z)); return PxBounds3(center - w, center + w); } PX_CUDA_CALLABLE PX_INLINE PxBounds3 PxBounds3::poseExtent(const PxTransform& pose, const PxVec3& extent) { return basisExtent(pose.p, PxMat33(pose.q), extent); } PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::setEmpty() { minimum = PxVec3(PX_MAX_BOUNDS_EXTENTS); maximum = PxVec3(-PX_MAX_BOUNDS_EXTENTS); } PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::setMaximal() { minimum = PxVec3(-PX_MAX_BOUNDS_EXTENTS); maximum = PxVec3(PX_MAX_BOUNDS_EXTENTS); } PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::include(const PxVec3& v) { PX_ASSERT(isValid()); minimum = minimum.minimum(v); maximum = maximum.maximum(v); } PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::include(const PxBounds3& b) { PX_ASSERT(isValid()); minimum = minimum.minimum(b.minimum); maximum = maximum.maximum(b.maximum); } PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxBounds3::isEmpty() const { PX_ASSERT(isValid()); return minimum.x > maximum.x; } PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxBounds3::intersects(const PxBounds3& b) const { PX_ASSERT(isValid() && b.isValid()); return !(b.minimum.x > maximum.x || minimum.x > b.maximum.x || b.minimum.y > maximum.y || minimum.y > b.maximum.y || b.minimum.z > maximum.z || minimum.z > b.maximum.z); } PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxBounds3::intersects1D(const PxBounds3& a, uint32_t axis) const { PX_ASSERT(isValid() && a.isValid()); return maximum[axis] >= a.minimum[axis] && a.maximum[axis] >= minimum[axis]; } PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxBounds3::contains(const PxVec3& v) const { PX_ASSERT(isValid()); return !(v.x < minimum.x || v.x > maximum.x || v.y < minimum.y || v.y > maximum.y || v.z < minimum.z || v.z > maximum.z); } PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxBounds3::isInside(const PxBounds3& box) const { PX_ASSERT(isValid() && box.isValid()); if(box.minimum.x > minimum.x) return false; if(box.minimum.y > minimum.y) return false; if(box.minimum.z > minimum.z) return false; if(box.maximum.x < maximum.x) return false; if(box.maximum.y < maximum.y) return false; if(box.maximum.z < maximum.z) return false; return true; } PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 PxBounds3::getCenter() const { PX_ASSERT(isValid()); return (minimum + maximum) * 0.5f; } PX_CUDA_CALLABLE PX_FORCE_INLINE float PxBounds3::getCenter(uint32_t axis) const { PX_ASSERT(isValid()); return (minimum[axis] + maximum[axis]) * 0.5f; } PX_CUDA_CALLABLE PX_FORCE_INLINE float PxBounds3::getExtents(uint32_t axis) const { PX_ASSERT(isValid()); return (maximum[axis] - minimum[axis]) * 0.5f; } PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 PxBounds3::getDimensions() const { PX_ASSERT(isValid()); return maximum - minimum; } PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 PxBounds3::getExtents() const { PX_ASSERT(isValid()); return getDimensions() * 0.5f; } PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::scaleSafe(float scale) { PX_ASSERT(isValid()); if(!isEmpty()) scaleFast(scale); } PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::scaleFast(float scale) { PX_ASSERT(isValid()); *this = centerExtents(getCenter(), getExtents() * scale); } PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::fattenSafe(float distance) { PX_ASSERT(isValid()); if(!isEmpty()) fattenFast(distance); } PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::fattenFast(float distance) { PX_ASSERT(isValid()); minimum.x -= distance; minimum.y -= distance; minimum.z -= distance; maximum.x += distance; maximum.y += distance; maximum.z += distance; } PX_CUDA_CALLABLE PX_INLINE PxBounds3 PxBounds3::transformSafe(const PxMat33& matrix, const PxBounds3& bounds) { PX_ASSERT(bounds.isValid()); return !bounds.isEmpty() ? transformFast(matrix, bounds) : bounds; } PX_CUDA_CALLABLE PX_INLINE PxBounds3 PxBounds3::transformFast(const PxMat33& matrix, const PxBounds3& bounds) { PX_ASSERT(bounds.isValid()); return PxBounds3::basisExtent(matrix * bounds.getCenter(), matrix, bounds.getExtents()); } PX_CUDA_CALLABLE PX_INLINE PxBounds3 PxBounds3::transformSafe(const PxTransform& transform, const PxBounds3& bounds) { PX_ASSERT(bounds.isValid()); return !bounds.isEmpty() ? transformFast(transform, bounds) : bounds; } PX_CUDA_CALLABLE PX_INLINE PxBounds3 PxBounds3::transformFast(const PxTransform& transform, const PxBounds3& bounds) { PX_ASSERT(bounds.isValid()); return PxBounds3::basisExtent(transform.transform(bounds.getCenter()), PxMat33(transform.q), bounds.getExtents()); } PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxBounds3::isValid() const { return (isFinite() && (((minimum.x <= maximum.x) && (minimum.y <= maximum.y) && (minimum.z <= maximum.z)) || ((minimum.x == PX_MAX_BOUNDS_EXTENTS) && (minimum.y == PX_MAX_BOUNDS_EXTENTS) && (minimum.z == PX_MAX_BOUNDS_EXTENTS) && (maximum.x == -PX_MAX_BOUNDS_EXTENTS) && (maximum.y == -PX_MAX_BOUNDS_EXTENTS) && (maximum.z == -PX_MAX_BOUNDS_EXTENTS)))); } PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 PxBounds3::closestPoint(const PxVec3& p) const { return minimum.maximum(maximum.minimum(p)); } #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
15,601
C
30.079681
123
0.730274
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxArray.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_ARRAY_H #define PX_ARRAY_H #include "foundation/PxAssert.h" #include "foundation/PxMathIntrinsics.h" #include "foundation/PxAllocator.h" #include "foundation/PxBasicTemplates.h" #include "foundation/PxMemory.h" namespace physx { /*! An array is a sequential container. Implementation note * entries between 0 and size are valid objects * we use inheritance to build this because the array is included inline in a lot of objects and we want the allocator to take no space if it's not stateful, which aggregation doesn't allow. Also, we want the metadata at the front for the inline case where the allocator contains some inline storage space */ template <class T, class Alloc = typename PxAllocatorTraits<T>::Type> class PxArray : protected Alloc { public: typedef T* Iterator; typedef const T* ConstIterator; explicit PxArray(const PxEMPTY v) : Alloc(v) { if(mData) mCapacity |= PX_SIGN_BITMASK; } /*! Default array constructor. Initialize an empty array */ PX_INLINE explicit PxArray(const Alloc& alloc = Alloc()) : Alloc(alloc), mData(0), mSize(0), mCapacity(0) { } /*! Initialize array with given capacity */ PX_INLINE explicit PxArray(uint32_t size, const T& a = T(), const Alloc& alloc = Alloc()) : Alloc(alloc), mData(0), mSize(0), mCapacity(0) { resize(size, a); } /*! Copy-constructor. Copy all entries from other array */ template <class A> PX_INLINE explicit PxArray(const PxArray<T, A>& other, const Alloc& alloc = Alloc()) : Alloc(alloc) { copy(other); } // This is necessary else the basic default copy constructor is used in the case of both arrays being of the same // template instance // The C++ standard clearly states that a template constructor is never a copy constructor [2]. In other words, // the presence of a template constructor does not suppress the implicit declaration of the copy constructor. // Also never make a copy constructor explicit, or copy-initialization* will no longer work. This is because // 'binding an rvalue to a const reference requires an accessible copy constructor' (http://gcc.gnu.org/bugs/) // *http://stackoverflow.com/questions/1051379/is-there-a-difference-in-c-between-copy-initialization-and-assignment-initializ PX_INLINE PxArray(const PxArray& other, const Alloc& alloc = Alloc()) : Alloc(alloc) { copy(other); } /*! Initialize array with given length */ PX_INLINE explicit PxArray(const T* first, const T* last, const Alloc& alloc = Alloc()) : Alloc(alloc), mSize(last < first ? 0 : uint32_t(last - first)), mCapacity(mSize) { mData = allocate(mSize); copy(mData, mData + mSize, first); } /*! Destructor */ PX_INLINE ~PxArray() { destroy(mData, mData + mSize); if(capacity() && !isInUserMemory()) deallocate(mData); } /*! Assignment operator. Copy content (deep-copy) */ template <class A> PX_INLINE PxArray& operator=(const PxArray<T, A>& rhs) { if(&rhs == this) return *this; clear(); reserve(rhs.mSize); copy(mData, mData + rhs.mSize, rhs.mData); mSize = rhs.mSize; return *this; } PX_INLINE PxArray& operator=(const PxArray& t) // Needs to be declared, see comment at copy-constructor { return operator=<Alloc>(t); } /*! Array indexing operator. \param i The index of the element that will be returned. \return The element i in the array. */ PX_FORCE_INLINE const T& operator[](uint32_t i) const { PX_ASSERT(i < mSize); return mData[i]; } /*! Array indexing operator. \param i The index of the element that will be returned. \return The element i in the array. */ PX_FORCE_INLINE T& operator[](uint32_t i) { PX_ASSERT(i < mSize); return mData[i]; } /*! Returns a pointer to the initial element of the array. \return a pointer to the initial element of the array. */ PX_FORCE_INLINE ConstIterator begin() const { return mData; } PX_FORCE_INLINE Iterator begin() { return mData; } /*! Returns an iterator beyond the last element of the array. Do not dereference. \return a pointer to the element beyond the last element of the array. */ PX_FORCE_INLINE ConstIterator end() const { return mData + mSize; } PX_FORCE_INLINE Iterator end() { return mData + mSize; } /*! Returns a reference to the first element of the array. Undefined if the array is empty. \return a reference to the first element of the array */ PX_FORCE_INLINE const T& front() const { PX_ASSERT(mSize); return mData[0]; } PX_FORCE_INLINE T& front() { PX_ASSERT(mSize); return mData[0]; } /*! Returns a reference to the last element of the array. Undefined if the array is empty \return a reference to the last element of the array */ PX_FORCE_INLINE const T& back() const { PX_ASSERT(mSize); return mData[mSize - 1]; } PX_FORCE_INLINE T& back() { PX_ASSERT(mSize); return mData[mSize - 1]; } /*! Returns the number of entries in the array. This can, and probably will, differ from the array capacity. \return The number of of entries in the array. */ PX_FORCE_INLINE uint32_t size() const { return mSize; } /*! Clears the array. */ PX_INLINE void clear() { destroy(mData, mData + mSize); mSize = 0; } /*! Returns whether the array is empty (i.e. whether its size is 0). \return true if the array is empty */ PX_FORCE_INLINE bool empty() const { return mSize == 0; } /*! Finds the first occurrence of an element in the array. \param a The element to find. */ PX_INLINE Iterator find(const T& a) { uint32_t index; for(index = 0; index < mSize && mData[index] != a; index++) ; return mData + index; } PX_INLINE ConstIterator find(const T& a) const { uint32_t index; for(index = 0; index < mSize && mData[index] != a; index++) ; return mData + index; } ///////////////////////////////////////////////////////////////////////// /*! Adds one element to the end of the array. Operation is O(1). \param a The element that will be added to this array. */ ///////////////////////////////////////////////////////////////////////// PX_FORCE_INLINE T& pushBack(const T& a) { if(capacity() <= mSize) return growAndPushBack(a); PX_PLACEMENT_NEW(reinterpret_cast<void*>(mData + mSize), T)(a); return mData[mSize++]; } ///////////////////////////////////////////////////////////////////////// /*! Returns the element at the end of the array. Only legal if the array is non-empty. */ ///////////////////////////////////////////////////////////////////////// PX_INLINE T popBack() { PX_ASSERT(mSize); T t = mData[mSize - 1]; mData[--mSize].~T(); return t; } ///////////////////////////////////////////////////////////////////////// /*! Construct one element at the end of the array. Operation is O(1). */ ///////////////////////////////////////////////////////////////////////// PX_INLINE T& insert() { if(capacity() <= mSize) grow(capacityIncrement()); T* ptr = mData + mSize++; PX_PLACEMENT_NEW(ptr, T); // not 'T()' because PODs should not get default-initialized. return *ptr; } ///////////////////////////////////////////////////////////////////////// /*! Subtracts the element on position i from the array and replace it with the last element. Operation is O(1) \param i The position of the element that will be subtracted from this array. */ ///////////////////////////////////////////////////////////////////////// PX_INLINE void replaceWithLast(uint32_t i) { PX_ASSERT(i < mSize); mData[i] = mData[--mSize]; mData[mSize].~T(); } PX_INLINE void replaceWithLast(Iterator i) { replaceWithLast(static_cast<uint32_t>(i - mData)); } ///////////////////////////////////////////////////////////////////////// /*! Replaces the first occurrence of the element a with the last element Operation is O(n) \param a The position of the element that will be subtracted from this array. \return true if the element has been removed. */ ///////////////////////////////////////////////////////////////////////// PX_INLINE bool findAndReplaceWithLast(const T& a) { uint32_t index = 0; while(index < mSize && mData[index] != a) ++index; if(index == mSize) return false; replaceWithLast(index); return true; } ///////////////////////////////////////////////////////////////////////// /*! Subtracts the element on position i from the array. Shift the entire array one step. Operation is O(n) \param i The position of the element that will be subtracted from this array. */ ///////////////////////////////////////////////////////////////////////// PX_INLINE void remove(uint32_t i) { PX_ASSERT(i < mSize); T* it = mData + i; it->~T(); while (++i < mSize) { PX_PLACEMENT_NEW(it, T(mData[i])); ++it; it->~T(); } --mSize; } ///////////////////////////////////////////////////////////////////////// /*! Removes a range from the array. Shifts the array so order is maintained. Operation is O(n) \param begin The starting position of the element that will be subtracted from this array. \param count The number of elments that will be subtracted from this array. */ ///////////////////////////////////////////////////////////////////////// PX_INLINE void removeRange(uint32_t begin, uint32_t count) { PX_ASSERT(begin < mSize); PX_ASSERT((begin + count) <= mSize); for(uint32_t i = 0; i < count; i++) mData[begin + i].~T(); // call the destructor on the ones being removed first. T* dest = &mData[begin]; // location we are copying the tail end objects to T* src = &mData[begin + count]; // start of tail objects uint32_t move_count = mSize - (begin + count); // compute remainder that needs to be copied down for(uint32_t i = 0; i < move_count; i++) { PX_PLACEMENT_NEW(dest, T(*src)); // copy the old one to the new location src->~T(); // call the destructor on the old location dest++; src++; } mSize -= count; } ////////////////////////////////////////////////////////////////////////// /*! Resize array */ ////////////////////////////////////////////////////////////////////////// PX_NOINLINE void resize(const uint32_t size, const T& a = T()); PX_NOINLINE void resizeUninitialized(const uint32_t size); ////////////////////////////////////////////////////////////////////////// /*! Resize array such that only as much memory is allocated to hold the existing elements */ ////////////////////////////////////////////////////////////////////////// PX_INLINE void shrink() { recreate(mSize); } ////////////////////////////////////////////////////////////////////////// /*! Deletes all array elements and frees memory. */ ////////////////////////////////////////////////////////////////////////// PX_INLINE void reset() { resize(0); shrink(); } ////////////////////////////////////////////////////////////////////////// /*! Resets or clears the array depending on occupancy. */ ////////////////////////////////////////////////////////////////////////// PX_INLINE void resetOrClear() { const PxU32 c = capacity(); const PxU32 s = size(); if(s>=c/2) clear(); else reset(); } ////////////////////////////////////////////////////////////////////////// /*! Ensure that the array has at least size capacity. */ ////////////////////////////////////////////////////////////////////////// PX_INLINE void reserve(const uint32_t capacity) { if(capacity > this->capacity()) grow(capacity); } ////////////////////////////////////////////////////////////////////////// /*! Query the capacity(allocated mem) for the array. */ ////////////////////////////////////////////////////////////////////////// PX_FORCE_INLINE uint32_t capacity() const { return mCapacity & ~PX_SIGN_BITMASK; } ////////////////////////////////////////////////////////////////////////// /*! Unsafe function to force the size of the array */ ////////////////////////////////////////////////////////////////////////// PX_FORCE_INLINE void forceSize_Unsafe(uint32_t size) { PX_ASSERT(size <= mCapacity); mSize = size; } ////////////////////////////////////////////////////////////////////////// /*! Swap contents of an array without allocating temporary storage */ ////////////////////////////////////////////////////////////////////////// PX_INLINE void swap(PxArray<T, Alloc>& other) { PxSwap(mData, other.mData); PxSwap(mSize, other.mSize); PxSwap(mCapacity, other.mCapacity); } ////////////////////////////////////////////////////////////////////////// /*! Assign a range of values to this vector (resizes to length of range) */ ////////////////////////////////////////////////////////////////////////// PX_INLINE void assign(const T* first, const T* last) { resizeUninitialized(uint32_t(last - first)); copy(begin(), end(), first); } // We need one bit to mark arrays that have been deserialized from a user-provided memory block. // For alignment & memory saving purpose we store that bit in the rarely used capacity member. PX_FORCE_INLINE uint32_t isInUserMemory() const { return mCapacity & PX_SIGN_BITMASK; } /// return reference to allocator PX_INLINE Alloc& getAllocator() { return *this; } protected: // constructor for where we don't own the memory PxArray(T* memory, uint32_t size, uint32_t capacity, const Alloc& alloc = Alloc()) : Alloc(alloc), mData(memory), mSize(size), mCapacity(capacity | PX_SIGN_BITMASK) { } template <class A> PX_NOINLINE void copy(const PxArray<T, A>& other); PX_INLINE T* allocate(uint32_t size) { if(size > 0) { T* p = reinterpret_cast<T*>(Alloc::allocate(sizeof(T) * size, PX_FL)); PxMarkSerializedMemory(p, sizeof(T) * size); return p; } return 0; } PX_INLINE void deallocate(void* mem) { Alloc::deallocate(mem); } static PX_INLINE void create(T* first, T* last, const T& a) { for(; first < last; ++first) ::PX_PLACEMENT_NEW(first, T(a)); } static PX_INLINE void copy(T* first, T* last, const T* src) { if(last <= first) return; for(; first < last; ++first, ++src) ::PX_PLACEMENT_NEW(first, T(*src)); } static PX_INLINE void destroy(T* first, T* last) { for(; first < last; ++first) first->~T(); } /*! Called when pushBack() needs to grow the array. \param a The element that will be added to this array. */ PX_NOINLINE T& growAndPushBack(const T& a); /*! Resizes the available memory for the array. \param capacity The number of entries that the set should be able to hold. */ PX_INLINE void grow(uint32_t capacity) { PX_ASSERT(this->capacity() < capacity); recreate(capacity); } /*! Creates a new memory block, copies all entries to the new block and destroys old entries. \param capacity The number of entries that the set should be able to hold. */ PX_NOINLINE void recreate(uint32_t capacity); // The idea here is to prevent accidental bugs with pushBack or insert. Unfortunately // it interacts badly with InlineArrays with smaller inline allocations. // TODO(dsequeira): policy template arg, this is exactly what they're for. PX_INLINE uint32_t capacityIncrement() const { const uint32_t capacity = this->capacity(); return capacity == 0 ? 1 : capacity * 2; } T* mData; uint32_t mSize; uint32_t mCapacity; }; template <class T, class Alloc> PX_NOINLINE void PxArray<T, Alloc>::resize(const uint32_t size, const T& a) { reserve(size); create(mData + mSize, mData + size, a); destroy(mData + size, mData + mSize); mSize = size; } template <class T, class Alloc> template <class A> PX_NOINLINE void PxArray<T, Alloc>::copy(const PxArray<T, A>& other) { if(!other.empty()) { mData = allocate(mSize = mCapacity = other.size()); copy(mData, mData + mSize, other.begin()); } else { mData = NULL; mSize = 0; mCapacity = 0; } // mData = allocate(other.mSize); // mSize = other.mSize; // mCapacity = other.mSize; // copy(mData, mData + mSize, other.mData); } template <class T, class Alloc> PX_NOINLINE void PxArray<T, Alloc>::resizeUninitialized(const uint32_t size) { reserve(size); mSize = size; } template <class T, class Alloc> PX_NOINLINE T& PxArray<T, Alloc>::growAndPushBack(const T& a) { uint32_t capacity = capacityIncrement(); T* newData = allocate(capacity); PX_ASSERT((!capacity) || (newData && (newData != mData))); copy(newData, newData + mSize, mData); // inserting element before destroying old array // avoids referencing destroyed object when duplicating array element. PX_PLACEMENT_NEW(reinterpret_cast<void*>(newData + mSize), T)(a); destroy(mData, mData + mSize); if(!isInUserMemory()) deallocate(mData); mData = newData; mCapacity = capacity; return mData[mSize++]; } template <class T, class Alloc> PX_NOINLINE void PxArray<T, Alloc>::recreate(uint32_t capacity) { T* newData = allocate(capacity); PX_ASSERT((!capacity) || (newData && (newData != mData))); copy(newData, newData + mSize, mData); destroy(mData, mData + mSize); if(!isInUserMemory()) deallocate(mData); mData = newData; mCapacity = capacity; } template <class T, class Alloc> PX_INLINE void swap(PxArray<T, Alloc>& x, PxArray<T, Alloc>& y) { x.swap(y); } } // namespace physx #endif
18,941
C
25.235457
127
0.597645
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxStrideIterator.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_STRIDE_ITERATOR_H #define PX_STRIDE_ITERATOR_H #include "foundation/Px.h" #include "foundation/PxAssert.h" /** \addtogroup foundation @{ */ #if !PX_DOXYGEN namespace physx { #endif /** \brief Iterator class for iterating over arrays of data that may be interleaved with other data. This class is used for iterating over arrays of elements that may have a larger element to element offset, called the stride, than the size of the element itself (non-contiguous). The template parameter T denotes the type of the element accessed. The stride itself is stored as a member field so multiple instances of a PxStrideIterator class can have different strides. This is useful for cases were the stride depends on runtime configuration. The stride iterator can be used for index based access, e.g.: \code PxStrideIterator<PxVec3> strideArray(...); for (unsigned i = 0; i < 10; ++i) { PxVec3& vec = strideArray[i]; ... } \endcode or iteration by increment, e.g.: \code PxStrideIterator<PxVec3> strideBegin(...); PxStrideIterator<PxVec3> strideEnd(strideBegin + 10); for (PxStrideIterator<PxVec3> it = strideBegin; it < strideEnd; ++it) { PxVec3& vec = *it; ... } \endcode Two special cases: - A stride of sizeof(T) represents a regular c array of type T. - A stride of 0 can be used to describe re-occurrence of the same element multiple times. */ template <typename T> class PxStrideIterator { #if !PX_DOXYGEN template <typename X> struct StripConst { typedef X Type; }; template <typename X> struct StripConst<const X> { typedef X Type; }; #endif public: /** \brief Constructor. Optionally takes a pointer to an element and a stride. \param[in] ptr pointer to element, defaults to NULL. \param[in] stride stride for accessing consecutive elements, defaults to the size of one element. */ explicit PX_INLINE PxStrideIterator(T* ptr = NULL, PxU32 stride = sizeof(T)) : mPtr(ptr), mStride(stride) { PX_ASSERT(mStride == 0 || sizeof(T) <= mStride); } /** \brief Copy constructor. \param[in] strideIterator PxStrideIterator to be copied. */ PX_INLINE PxStrideIterator(const PxStrideIterator<typename StripConst<T>::Type>& strideIterator) : mPtr(strideIterator.ptr()), mStride(strideIterator.stride()) { PX_ASSERT(mStride == 0 || sizeof(T) <= mStride); } /** \brief Get pointer to element. */ PX_INLINE T* ptr() const { return mPtr; } /** \brief Get stride. */ PX_INLINE PxU32 stride() const { return mStride; } /** \brief Indirection operator. */ PX_INLINE T& operator*() const { return *mPtr; } /** \brief Dereferencing operator. */ PX_INLINE T* operator->() const { return mPtr; } /** \brief Indexing operator. */ PX_INLINE T& operator[](unsigned int i) const { return *byteAdd(mPtr, i * stride()); } /** \brief Pre-increment operator. */ PX_INLINE PxStrideIterator& operator++() { mPtr = byteAdd(mPtr, stride()); return *this; } /** \brief Post-increment operator. */ PX_INLINE PxStrideIterator operator++(int) { PxStrideIterator tmp = *this; mPtr = byteAdd(mPtr, stride()); return tmp; } /** \brief Pre-decrement operator. */ PX_INLINE PxStrideIterator& operator--() { mPtr = byteSub(mPtr, stride()); return *this; } /** \brief Post-decrement operator. */ PX_INLINE PxStrideIterator operator--(int) { PxStrideIterator tmp = *this; mPtr = byteSub(mPtr, stride()); return tmp; } /** \brief Addition operator. */ PX_INLINE PxStrideIterator operator+(unsigned int i) const { return PxStrideIterator(byteAdd(mPtr, i * stride()), stride()); } /** \brief Subtraction operator. */ PX_INLINE PxStrideIterator operator-(unsigned int i) const { return PxStrideIterator(byteSub(mPtr, i * stride()), stride()); } /** \brief Addition compound assignment operator. */ PX_INLINE PxStrideIterator& operator+=(unsigned int i) { mPtr = byteAdd(mPtr, i * stride()); return *this; } /** \brief Subtraction compound assignment operator. */ PX_INLINE PxStrideIterator& operator-=(unsigned int i) { mPtr = byteSub(mPtr, i * stride()); return *this; } /** \brief Iterator difference. */ PX_INLINE int operator-(const PxStrideIterator& other) const { PX_ASSERT(isCompatible(other)); int byteDiff = static_cast<int>(reinterpret_cast<const PxU8*>(mPtr) - reinterpret_cast<const PxU8*>(other.mPtr)); return byteDiff / static_cast<int>(stride()); } /** \brief Equality operator. */ PX_INLINE bool operator==(const PxStrideIterator& other) const { PX_ASSERT(isCompatible(other)); return mPtr == other.mPtr; } /** \brief Inequality operator. */ PX_INLINE bool operator!=(const PxStrideIterator& other) const { PX_ASSERT(isCompatible(other)); return mPtr != other.mPtr; } /** \brief Less than operator. */ PX_INLINE bool operator<(const PxStrideIterator& other) const { PX_ASSERT(isCompatible(other)); return mPtr < other.mPtr; } /** \brief Greater than operator. */ PX_INLINE bool operator>(const PxStrideIterator& other) const { PX_ASSERT(isCompatible(other)); return mPtr > other.mPtr; } /** \brief Less or equal than operator. */ PX_INLINE bool operator<=(const PxStrideIterator& other) const { PX_ASSERT(isCompatible(other)); return mPtr <= other.mPtr; } /** \brief Greater or equal than operator. */ PX_INLINE bool operator>=(const PxStrideIterator& other) const { PX_ASSERT(isCompatible(other)); return mPtr >= other.mPtr; } private: PX_INLINE static T* byteAdd(T* ptr, PxU32 bytes) { return const_cast<T*>(reinterpret_cast<const T*>(reinterpret_cast<const PxU8*>(ptr) + bytes)); } PX_INLINE static T* byteSub(T* ptr, PxU32 bytes) { return const_cast<T*>(reinterpret_cast<const T*>(reinterpret_cast<const PxU8*>(ptr) - bytes)); } PX_INLINE bool isCompatible(const PxStrideIterator& other) const { int byteDiff = static_cast<int>(reinterpret_cast<const PxU8*>(mPtr) - reinterpret_cast<const PxU8*>(other.mPtr)); return (stride() == other.stride()) && (abs(byteDiff) % stride() == 0); } T* mPtr; PxU32 mStride; }; /** \brief Addition operator. */ template <typename T> PX_INLINE PxStrideIterator<T> operator+(int i, PxStrideIterator<T> it) { it += i; return it; } /** \brief Stride iterator factory function which infers the iterator type. */ template <typename T> PX_INLINE PxStrideIterator<T> PxMakeIterator(T* ptr, PxU32 stride = sizeof(T)) { return PxStrideIterator<T>(ptr, stride); } /** \brief Stride iterator factory function which infers the iterator type. */ template <typename T> PX_INLINE PxStrideIterator<const T> PxMakeIterator(const T* ptr, PxU32 stride = sizeof(T)) { return PxStrideIterator<const T>(ptr, stride); } #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
8,531
C
23.101695
115
0.703552
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxErrors.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_ERRORS_H #define PX_ERRORS_H /** \addtogroup foundation @{ */ #include "foundation/Px.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief Error codes These error codes are passed to #PxErrorCallback @see PxErrorCallback */ struct PxErrorCode { enum Enum { eNO_ERROR = 0, //! \brief An informational message. eDEBUG_INFO = 1, //! \brief a warning message for the user to help with debugging eDEBUG_WARNING = 2, //! \brief method called with invalid parameter(s) eINVALID_PARAMETER = 4, //! \brief method was called at a time when an operation is not possible eINVALID_OPERATION = 8, //! \brief method failed to allocate some memory eOUT_OF_MEMORY = 16, /** \brief The library failed for some reason. Possibly you have passed invalid values like NaNs, which are not checked for. */ eINTERNAL_ERROR = 32, //! \brief An unrecoverable error, execution should be halted and log output flushed eABORT = 64, //! \brief The SDK has determined that an operation may result in poor performance. ePERF_WARNING = 128, //! \brief A bit mask for including all errors eMASK_ALL = -1 }; }; #if PX_CHECKED #define PX_CHECK_MSG(exp, msg) (!!(exp) || (PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL, msg), 0) ) #define PX_CHECK_AND_RETURN(exp, msg) { if(!(exp)) { PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL, msg); return; } } #define PX_CHECK_AND_RETURN_NULL(exp, msg) { if(!(exp)) { PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL, msg); return 0; } } #define PX_CHECK_AND_RETURN_VAL(exp, msg, r) { if(!(exp)) { PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL, msg); return r; } } #else #define PX_CHECK_MSG(exp, msg) #define PX_CHECK_AND_RETURN(exp, msg) #define PX_CHECK_AND_RETURN_NULL(exp, msg) #define PX_CHECK_AND_RETURN_VAL(exp, msg, r) #endif // shortcut macros: // usage: PxGetFoundation().error(PX_WARN, "static friction %f is is lower than dynamic friction %d", sfr, dfr); #define PX_WARN ::physx::PxErrorCode::eDEBUG_WARNING, PX_FL #define PX_INFO ::physx::PxErrorCode::eDEBUG_INFO, PX_FL #if PX_DEBUG || PX_CHECKED #define PX_WARN_ONCE(string) \ { \ static PxU32 timestamp = 0; \ const PxU32 ts = PxGetWarnOnceTimeStamp(); \ if(timestamp != ts) \ { \ timestamp = ts; \ PxGetFoundation().error(PX_WARN, string); \ } \ } #define PX_WARN_ONCE_IF(condition, string) \ { \ if(condition) \ { \ PX_WARN_ONCE(string) \ } \ } #else #define PX_WARN_ONCE(string) ((void)0) #define PX_WARN_ONCE_IF(condition, string) ((void)0) #endif #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
4,739
C
34.639097
151
0.660477
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxUtilities.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_UTILITIES_H #define PX_UTILITIES_H #include "foundation/PxVec3.h" #include "foundation/PxAssert.h" #include "foundation/PxIntrinsics.h" #include "foundation/PxBasicTemplates.h" #if !PX_DOXYGEN namespace physx { #endif PX_INLINE char PxLittleEndian() { int i = 1; return *(reinterpret_cast<char*>(&i)); } // PT: checked casts PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 PxTo32(PxU64 value) { PX_ASSERT(value <= 0xffffffff); return PxU32(value); } PX_CUDA_CALLABLE PX_FORCE_INLINE PxU16 PxTo16(PxU32 value) { PX_ASSERT(value <= 0xffff); return PxU16(value); } PX_CUDA_CALLABLE PX_FORCE_INLINE PxU8 PxTo8(PxU16 value) { PX_ASSERT(value <= 0xff); return PxU8(value); } PX_CUDA_CALLABLE PX_FORCE_INLINE PxU8 PxTo8(PxU32 value) { PX_ASSERT(value <= 0xff); return PxU8(value); } PX_CUDA_CALLABLE PX_FORCE_INLINE PxU8 PxTo8(PxI32 value) { PX_ASSERT(value <= 0xff); PX_ASSERT(value >= 0); return PxU8(value); } PX_CUDA_CALLABLE PX_FORCE_INLINE PxI8 PxToI8(PxU32 value) { PX_ASSERT(value <= 0x7f); return PxI8(value); } //! @cond /*! Get number of elements in array */ template <typename T, size_t N> char (&PxArraySizeHelper(T (&array)[N]))[N]; #define PX_ARRAY_SIZE(_array) (sizeof(physx::PxArraySizeHelper(_array))) //! @endcond /*! Sort two elements using operator< On return x will be the smaller of the two */ template <class T> PX_CUDA_CALLABLE PX_FORCE_INLINE void PxOrder(T& x, T& y) { if(y < x) PxSwap(x, y); } // most architectures can do predication on real comparisons, and on VMX, it matters PX_CUDA_CALLABLE PX_FORCE_INLINE void PxOrder(PxReal& x, PxReal& y) { PxReal newX = PxMin(x, y); PxReal newY = PxMax(x, y); x = newX; y = newY; } /*! Sort two elements using operator< and also keep order of any extra data */ template <class T, class E1> PX_CUDA_CALLABLE PX_FORCE_INLINE void PxOrder(T& x, T& y, E1& xe1, E1& ye1) { if(y < x) { swap(x, y); swap(xe1, ye1); } } #if PX_GCC_FAMILY && !PX_EMSCRIPTEN __attribute__((noreturn)) #endif PX_INLINE void PxDebugBreak() { #if PX_WINDOWS __debugbreak(); #elif PX_LINUX __builtin_trap(); #elif PX_GCC_FAMILY __builtin_trap(); #else PX_ASSERT(false); #endif } #if !PX_DOXYGEN } // namespace physx #endif #endif
3,911
C
25.612245
84
0.724367
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxSimpleTypes.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_SIMPLE_TYPES_H #define PX_SIMPLE_TYPES_H /** \addtogroup foundation @{ */ // Platform specific types: // Design note: Its OK to use int for general loop variables and temps. #include "foundation/PxPreprocessor.h" #if PX_VC #pragma warning(push) #pragma warning(disable : 4668) // suppressing warning generated by Microsoft Visual Studio when including this standard // header #endif #if PX_LINUX #define __STDC_LIMIT_MACROS #endif #include <stdint.h> #if PX_VC #pragma warning(pop) #endif #if PX_VC // we could use inttypes.h starting with VC12 #define PX_PRIu64 "I64u" #else #if !PX_APPLE_FAMILY #define __STDC_FORMAT_MACROS #endif #include <inttypes.h> #define PX_PRIu64 PRIu64 #endif #if !PX_DOXYGEN namespace physx { #endif typedef int64_t PxI64; typedef uint64_t PxU64; typedef int32_t PxI32; typedef uint32_t PxU32; typedef int16_t PxI16; typedef uint16_t PxU16; typedef int8_t PxI8; typedef uint8_t PxU8; typedef float PxF32; typedef double PxF64; typedef float PxReal; // Int-as-bool type - has some uses for efficiency and with SIMD typedef PxI32 PxIntBool; static const PxIntBool PxIntFalse = 0; static const PxIntBool PxIntTrue = 1; #if !PX_DOXYGEN } // namespace physx #endif #define PX_SIGN_BITMASK 0x80000000 // Type ranges #define PX_MAX_F32 3.4028234663852885981170418348452e+38F // maximum possible float value #define PX_MAX_F64 DBL_MAX // maximum possible double value #define PX_EPS_F32 FLT_EPSILON // maximum relative error of float rounding #define PX_EPS_F64 DBL_EPSILON // maximum relative error of double rounding #define PX_MAX_REAL PX_MAX_F32 #define PX_EPS_REAL PX_EPS_F32 #define PX_NORMALIZATION_EPSILON float(1e-20f) // Legacy type ranges used by PhysX #define PX_MAX_I8 INT8_MAX #define PX_MIN_I8 INT8_MIN #define PX_MAX_U8 UINT8_MAX #define PX_MIN_U8 UINT8_MIN #define PX_MAX_I16 INT16_MAX #define PX_MIN_I16 INT16_MIN #define PX_MAX_U16 UINT16_MAX #define PX_MIN_U16 UINT16_MIN #define PX_MAX_I32 INT32_MAX #define PX_MIN_I32 INT32_MIN #define PX_MAX_U32 UINT32_MAX #define PX_MIN_U32 UINT32_MIN /** @} */ #endif
3,771
C
30.433333
120
0.760276
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxVec4.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_VEC4_H #define PX_VEC4_H /** \addtogroup foundation @{ */ #include "foundation/PxMath.h" #include "foundation/PxVec3.h" /** \brief 4 Element vector class. This is a 4-dimensional vector class with public data members. */ #if !PX_DOXYGEN namespace physx { #endif template<class Type> class PxVec4T { public: /** \brief default constructor leaves data uninitialized. */ PX_CUDA_CALLABLE PX_INLINE PxVec4T() { } /** \brief zero constructor. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec4T(PxZERO) : x(Type(0.0)), y(Type(0.0)), z(Type(0.0)), w(Type(0.0)) { } /** \brief Assigns scalar parameter to all elements. Useful to initialize to zero or one. \param[in] a Value to assign to elements. */ explicit PX_CUDA_CALLABLE PX_INLINE PxVec4T(Type a) : x(a), y(a), z(a), w(a) { } /** \brief Initializes from 3 scalar parameters. \param[in] nx Value to initialize X component. \param[in] ny Value to initialize Y component. \param[in] nz Value to initialize Z component. \param[in] nw Value to initialize W component. */ PX_CUDA_CALLABLE PX_INLINE PxVec4T(Type nx, Type ny, Type nz, Type nw) : x(nx), y(ny), z(nz), w(nw) { } /** \brief Initializes from 3 scalar parameters. \param[in] v Value to initialize the X, Y, and Z components. \param[in] nw Value to initialize W component. */ PX_CUDA_CALLABLE PX_INLINE PxVec4T(const PxVec3T<Type>& v, Type nw) : x(v.x), y(v.y), z(v.z), w(nw) { } /** \brief Initializes from an array of scalar parameters. \param[in] v Value to initialize with. */ explicit PX_CUDA_CALLABLE PX_INLINE PxVec4T(const Type v[]) : x(v[0]), y(v[1]), z(v[2]), w(v[3]) { } /** \brief Copy ctor. */ PX_CUDA_CALLABLE PX_INLINE PxVec4T(const PxVec4T& v) : x(v.x), y(v.y), z(v.z), w(v.w) { } // Operators /** \brief Assignment operator */ PX_CUDA_CALLABLE PX_INLINE PxVec4T& operator=(const PxVec4T& p) { x = p.x; y = p.y; z = p.z; w = p.w; return *this; } /** \brief element access */ PX_CUDA_CALLABLE PX_INLINE Type& operator[](unsigned int index) { PX_ASSERT(index <= 3); return reinterpret_cast<Type*>(this)[index]; } /** \brief element access */ PX_CUDA_CALLABLE PX_INLINE const Type& operator[](unsigned int index) const { PX_ASSERT(index <= 3); return reinterpret_cast<const Type*>(this)[index]; } /** \brief returns true if the two vectors are exactly equal. */ PX_CUDA_CALLABLE PX_INLINE bool operator==(const PxVec4T& v) const { return x == v.x && y == v.y && z == v.z && w == v.w; } /** \brief returns true if the two vectors are not exactly equal. */ PX_CUDA_CALLABLE PX_INLINE bool operator!=(const PxVec4T& v) const { return x != v.x || y != v.y || z != v.z || w != v.w; } /** \brief tests for exact zero vector */ PX_CUDA_CALLABLE PX_INLINE bool isZero() const { return x == Type(0) && y == Type(0) && z == Type(0) && w == Type(0); } /** \brief returns true if all 3 elems of the vector are finite (not NAN or INF, etc.) */ PX_CUDA_CALLABLE PX_INLINE bool isFinite() const { return PxIsFinite(x) && PxIsFinite(y) && PxIsFinite(z) && PxIsFinite(w); } /** \brief is normalized - used by API parameter validation */ PX_CUDA_CALLABLE PX_INLINE bool isNormalized() const { const Type unitTolerance = Type(1e-4); return isFinite() && PxAbs(magnitude() - Type(1.0)) < unitTolerance; } /** \brief returns the squared magnitude Avoids calling PxSqrt()! */ PX_CUDA_CALLABLE PX_INLINE Type magnitudeSquared() const { return x * x + y * y + z * z + w * w; } /** \brief returns the magnitude */ PX_CUDA_CALLABLE PX_INLINE Type magnitude() const { return PxSqrt(magnitudeSquared()); } /** \brief negation */ PX_CUDA_CALLABLE PX_INLINE PxVec4T operator-() const { return PxVec4T(-x, -y, -z, -w); } /** \brief vector addition */ PX_CUDA_CALLABLE PX_INLINE PxVec4T operator+(const PxVec4T& v) const { return PxVec4T(x + v.x, y + v.y, z + v.z, w + v.w); } /** \brief vector difference */ PX_CUDA_CALLABLE PX_INLINE PxVec4T operator-(const PxVec4T& v) const { return PxVec4T(x - v.x, y - v.y, z - v.z, w - v.w); } /** \brief scalar post-multiplication */ PX_CUDA_CALLABLE PX_INLINE PxVec4T operator*(Type f) const { return PxVec4T(x * f, y * f, z * f, w * f); } /** \brief scalar division */ PX_CUDA_CALLABLE PX_INLINE PxVec4T operator/(Type f) const { f = Type(1.0) / f; return PxVec4T(x * f, y * f, z * f, w * f); } /** \brief vector addition */ PX_CUDA_CALLABLE PX_INLINE PxVec4T& operator+=(const PxVec4T& v) { x += v.x; y += v.y; z += v.z; w += v.w; return *this; } /** \brief vector difference */ PX_CUDA_CALLABLE PX_INLINE PxVec4T& operator-=(const PxVec4T& v) { x -= v.x; y -= v.y; z -= v.z; w -= v.w; return *this; } /** \brief scalar multiplication */ PX_CUDA_CALLABLE PX_INLINE PxVec4T& operator*=(Type f) { x *= f; y *= f; z *= f; w *= f; return *this; } /** \brief scalar division */ PX_CUDA_CALLABLE PX_INLINE PxVec4T& operator/=(Type f) { f = Type(1.0) / f; x *= f; y *= f; z *= f; w *= f; return *this; } /** \brief returns the scalar product of this and other. */ PX_CUDA_CALLABLE PX_INLINE Type dot(const PxVec4T& v) const { return x * v.x + y * v.y + z * v.z + w * v.w; } /** returns a unit vector */ PX_CUDA_CALLABLE PX_INLINE PxVec4T getNormalized() const { const Type m = magnitudeSquared(); return m > Type(0.0) ? *this * PxRecipSqrt(m) : PxVec4T(Type(0)); } /** \brief normalizes the vector in place */ PX_CUDA_CALLABLE PX_INLINE Type normalize() { const Type m = magnitude(); if(m > Type(0.0)) *this /= m; return m; } /** \brief a[i] * b[i], for all i. */ PX_CUDA_CALLABLE PX_INLINE PxVec4T multiply(const PxVec4T& a) const { return PxVec4T(x * a.x, y * a.y, z * a.z, w * a.w); } /** \brief element-wise minimum */ PX_CUDA_CALLABLE PX_INLINE PxVec4T minimum(const PxVec4T& v) const { return PxVec4(PxMin(x, v.x), PxMin(y, v.y), PxMin(z, v.z), PxMin(w, v.w)); } /** \brief element-wise maximum */ PX_CUDA_CALLABLE PX_INLINE PxVec4T maximum(const PxVec4T& v) const { return PxVec4T(PxMax(x, v.x), PxMax(y, v.y), PxMax(z, v.z), PxMax(w, v.w)); } PX_CUDA_CALLABLE PX_INLINE PxVec3T<Type> getXYZ() const { return PxVec3T<Type>(x, y, z); } Type x, y, z, w; }; template<class Type> PX_CUDA_CALLABLE static PX_INLINE PxVec4T<Type> operator*(Type f, const PxVec4T<Type>& v) { return PxVec4T<Type>(f * v.x, f * v.y, f * v.z, f * v.w); } typedef PxVec4T<float> PxVec4; typedef PxVec4T<double> PxVec4d; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
8,347
C
21.623306
106
0.653049
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxThread.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_THREAD_H #define PX_THREAD_H #include "foundation/PxUserAllocated.h" // todo: these need to go somewhere else // PT: looks like this is still used on some platforms #if PX_WINDOWS_FAMILY #define PxSpinLockPause() __asm pause #elif PX_LINUX || PX_APPLE_FAMILY || PX_SWITCH #define PxSpinLockPause() asm("nop") #else #error "Platform not supported!" #endif #if !PX_DOXYGEN namespace physx { #endif struct PxThreadPriority { enum Enum { eHIGH = 0, //!< High priority eABOVE_NORMAL = 1, //!< Above Normal priority eNORMAL = 2, //!< Normal/default priority eBELOW_NORMAL = 3, //!< Below Normal priority eLOW = 4, //!< Low priority. eFORCE_DWORD = 0xffFFffFF }; }; class PxRunnable { public: PxRunnable() {} virtual ~PxRunnable() {} virtual void execute() {} }; class PX_FOUNDATION_API PxThreadImpl { public: typedef size_t Id; // space for a pointer or an integer typedef void* (*ExecuteFn)(void*); static PxU32 getDefaultStackSize(); static Id getId(); /** Construct (but do not start) the thread object. The OS thread object will not be created until start() is called. Executes in the context of the spawning thread. */ PxThreadImpl(); /** Construct and start the the thread, passing the given arg to the given fn. (pthread style) */ PxThreadImpl(ExecuteFn fn, void* arg, const char* name); /** Deallocate all resources associated with the thread. Should be called in the context of the spawning thread. */ ~PxThreadImpl(); /** Create the OS thread and start it running. Called in the context of the spawning thread. If an affinity mask has previously been set then it will be applied after the thread has been created. */ void start(PxU32 stackSize, PxRunnable* r); /** Violently kill the current thread. Blunt instrument, not recommended since it can leave all kinds of things unreleased (stack, memory, mutexes...) Should be called in the context of the spawning thread. */ void kill(); /** Stop the thread. Signals the spawned thread that it should stop, so the thread should check regularly */ void signalQuit(); /** Wait for a thread to stop. Should be called in the context of the spawning thread. Returns false if the thread has not been started. */ bool waitForQuit(); /** check whether the thread is signalled to quit. Called in the context of the spawned thread. */ bool quitIsSignalled(); /** Cleanly shut down this thread. Called in the context of the spawned thread. */ void quit(); /** Change the affinity mask for this thread. The mask is a platform specific value. On Windows, Linux, and Switch platforms, each set mask bit represents the index of a logical processor that the OS may schedule thread execution on. Bits outside the range of valid logical processors may be ignored or cause the function to return an error. On Apple platforms, this function has no effect. If the thread has not yet been started then the mask is stored and applied when the thread is started. If the thread has already been started then this method returns the previous affinity mask on success, otherwise it returns zero. */ PxU32 setAffinityMask(PxU32 mask); static PxThreadPriority::Enum getPriority(Id threadId); /** Set thread priority. */ void setPriority(PxThreadPriority::Enum prio); /** set the thread's name */ void setName(const char* name); /** Put the current thread to sleep for the given number of milliseconds */ static void sleep(PxU32 ms); /** Yield the current thread's slot on the CPU */ static void yield(); /** Inform the processor that we're in a busy wait to give it a chance to do something clever. yield() yields the thread, while yieldProcessor() aims to yield the processor */ static void yieldProcessor(); /** Return the number of physical cores (does not include hyper-threaded cores), returns 0 on failure */ static PxU32 getNbPhysicalCores(); /** Size of this class. */ static PxU32 getSize(); }; /** Thread abstraction API */ template <typename Alloc = PxReflectionAllocator<PxThreadImpl> > class PxThreadT : protected Alloc, public PxUserAllocated, public PxRunnable { public: typedef PxThreadImpl::Id Id; // space for a pointer or an integer /** Construct (but do not start) the thread object. Executes in the context of the spawning thread */ PxThreadT(const Alloc& alloc = Alloc()) : Alloc(alloc) { mImpl = reinterpret_cast<PxThreadImpl*>(Alloc::allocate(PxThreadImpl::getSize(), PX_FL)); PX_PLACEMENT_NEW(mImpl, PxThreadImpl)(); } /** Construct and start the the thread, passing the given arg to the given fn. (pthread style) */ PxThreadT(PxThreadImpl::ExecuteFn fn, void* arg, const char* name, const Alloc& alloc = Alloc()) : Alloc(alloc) { mImpl = reinterpret_cast<PxThreadImpl*>(Alloc::allocate(PxThreadImpl::getSize(), PX_FL)); PX_PLACEMENT_NEW(mImpl, PxThreadImpl)(fn, arg, name); } /** Deallocate all resources associated with the thread. Should be called in the context of the spawning thread. */ virtual ~PxThreadT() { mImpl->~PxThreadImpl(); Alloc::deallocate(mImpl); } /** start the thread running. Called in the context of the spawning thread. */ void start(PxU32 stackSize = PxThreadImpl::getDefaultStackSize()) { mImpl->start(stackSize, this); } /** Violently kill the current thread. Blunt instrument, not recommended since it can leave all kinds of things unreleased (stack, memory, mutexes...) Should be called in the context of the spawning thread. */ void kill() { mImpl->kill(); } /** The virtual execute() method is the user defined function that will run in the new thread. Called in the context of the spawned thread. */ virtual void execute(void) { } /** stop the thread. Signals the spawned thread that it should stop, so the thread should check regularly */ void signalQuit() { mImpl->signalQuit(); } /** Wait for a thread to stop. Should be called in the context of the spawning thread. Returns false if the thread has not been started. */ bool waitForQuit() { return mImpl->waitForQuit(); } /** check whether the thread is signalled to quit. Called in the context of the spawned thread. */ bool quitIsSignalled() { return mImpl->quitIsSignalled(); } /** Cleanly shut down this thread. Called in the context of the spawned thread. */ void quit() { mImpl->quit(); } PxU32 setAffinityMask(PxU32 mask) { return mImpl->setAffinityMask(mask); } static PxThreadPriority::Enum getPriority(PxThreadImpl::Id threadId) { return PxThreadImpl::getPriority(threadId); } /** Set thread priority. */ void setPriority(PxThreadPriority::Enum prio) { mImpl->setPriority(prio); } /** set the thread's name */ void setName(const char* name) { mImpl->setName(name); } /** Put the current thread to sleep for the given number of milliseconds */ static void sleep(PxU32 ms) { PxThreadImpl::sleep(ms); } /** Yield the current thread's slot on the CPU */ static void yield() { PxThreadImpl::yield(); } /** Inform the processor that we're in a busy wait to give it a chance to do something clever yield() yields the thread, while yieldProcessor() aims to yield the processor */ static void yieldProcesor() { PxThreadImpl::yieldProcessor(); } static PxU32 getDefaultStackSize() { return PxThreadImpl::getDefaultStackSize(); } static PxThreadImpl::Id getId() { return PxThreadImpl::getId(); } static PxU32 getNbPhysicalCores() { return PxThreadImpl::getNbPhysicalCores(); } private: class PxThreadImpl* mImpl; }; typedef PxThreadT<> PxThread; PX_FOUNDATION_API PxU32 PxTlsAlloc(); PX_FOUNDATION_API void PxTlsFree(PxU32 index); PX_FOUNDATION_API void* PxTlsGet(PxU32 index); PX_FOUNDATION_API size_t PxTlsGetValue(PxU32 index); PX_FOUNDATION_API PxU32 PxTlsSet(PxU32 index, void* value); PX_FOUNDATION_API PxU32 PxTlsSetValue(PxU32 index, size_t value); #if !PX_DOXYGEN } // namespace physx #endif #endif
9,703
C
25.227027
112
0.725961
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxSList.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_SLIST_H #define PX_SLIST_H #include "foundation/Px.h" #include "foundation/PxAssert.h" #include "foundation/PxAlignedMalloc.h" #if PX_P64_FAMILY #define PX_SLIST_ALIGNMENT 16 #else #define PX_SLIST_ALIGNMENT 8 #endif #if !PX_DOXYGEN namespace physx { #endif #if PX_VC #pragma warning(push) #pragma warning(disable : 4324) // Padding was added at the end of a structure because of a __declspec(align) value. #endif PX_ALIGN_PREFIX(PX_SLIST_ALIGNMENT) class PxSListEntry { friend struct PxSListImpl; public: PxSListEntry() : mNext(NULL) { PX_ASSERT((size_t(this) & (PX_SLIST_ALIGNMENT - 1)) == 0); } // Only use on elements returned by SList::flush() // because the operation is not atomic. PxSListEntry* next() { return mNext; } private: PxSListEntry* mNext; }PX_ALIGN_SUFFIX(PX_SLIST_ALIGNMENT); #if PX_VC #pragma warning(pop) #endif // template-less implementation struct PX_FOUNDATION_API PxSListImpl { PxSListImpl(); ~PxSListImpl(); void push(PxSListEntry* entry); PxSListEntry* pop(); PxSListEntry* flush(); static uint32_t getSize(); }; template <typename Alloc = PxReflectionAllocator<PxSListImpl> > class PxSListT : protected Alloc { public: PxSListT(const Alloc& alloc = Alloc()) : Alloc(alloc) { mImpl = reinterpret_cast<PxSListImpl*>(Alloc::allocate(PxSListImpl::getSize(), PX_FL)); PX_ASSERT((size_t(mImpl) & (PX_SLIST_ALIGNMENT - 1)) == 0); PX_PLACEMENT_NEW(mImpl, PxSListImpl)(); } ~PxSListT() { mImpl->~PxSListImpl(); Alloc::deallocate(mImpl); } // pushes a new element to the list void push(PxSListEntry& entry) { mImpl->push(&entry); } // pops an element from the list PxSListEntry* pop() { return mImpl->pop(); } // removes all items from list, returns pointer to first element PxSListEntry* flush() { return mImpl->flush(); } private: PxSListImpl* mImpl; }; typedef PxSListT<> PxSList; #if !PX_DOXYGEN } // namespace physx #endif #endif
3,648
C
26.231343
117
0.730811