file_path
stringlengths
21
202
content
stringlengths
12
1.02M
size
int64
12
1.02M
lang
stringclasses
9 values
avg_line_length
float64
3.33
100
max_line_length
int64
10
993
alphanum_fraction
float64
0.27
0.93
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxHashSet.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_HASHSET_H #define PX_HASHSET_H #include "foundation/PxHashInternals.h" // TODO: make this doxy-format // This header defines two hash sets. Hash sets // * support custom initial table sizes (rounded up internally to power-of-2) // * support custom static allocator objects // * auto-resize, based on a load factor (i.e. a 64-entry .75 load factor hash will resize // when the 49th element is inserted) // * are based on open hashing // // Sets have STL-like copying semantics, and properly initialize and destruct copies of objects // // There are two forms of set: coalesced and uncoalesced. Coalesced sets keep the entries in the // initial segment of an array, so are fast to iterate over; however deletion is approximately // twice as expensive. // // HashSet<T>: // bool insert(const T& k) amortized O(1) (exponential resize policy) // bool contains(const T& k) const; O(1) // bool erase(const T& k); O(1) // uint32_t size() const; constant // void reserve(uint32_t size); O(MAX(size, currentOccupancy)) // void clear(); O(currentOccupancy) (with zero constant for objects without // destructors) // Iterator getIterator(); // // Use of iterators: // // for(HashSet::Iterator iter = test.getIterator(); !iter.done(); ++iter) // myFunction(*iter); // // CoalescedHashSet<T> does not support getIterator, but instead supports // const Key *getEntries(); // // insertion into a set already containing the element fails returning false, as does // erasure of an element not in the set // #if !PX_DOXYGEN namespace physx { #endif template <class Key, class HashFn = PxHash<Key>, class Allocator = PxAllocator> class PxHashSet : public physx::PxHashSetBase<Key, HashFn, Allocator, false> { public: typedef physx::PxHashSetBase<Key, HashFn, Allocator, false> HashSetBase; typedef typename HashSetBase::Iterator Iterator; PxHashSet(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : HashSetBase(initialTableSize, loadFactor) { } PxHashSet(uint32_t initialTableSize, float loadFactor, const Allocator& alloc) : HashSetBase(initialTableSize, loadFactor, alloc) { } PxHashSet(const Allocator& alloc) : HashSetBase(64, 0.75f, alloc) { } Iterator getIterator() { return Iterator(HashSetBase::mBase); } }; template <class Key, class HashFn = PxHash<Key>, class Allocator = PxAllocator> class PxCoalescedHashSet : public physx::PxHashSetBase<Key, HashFn, Allocator, true> { public: typedef typename physx::PxHashSetBase<Key, HashFn, Allocator, true> HashSetBase; PxCoalescedHashSet(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : HashSetBase(initialTableSize, loadFactor) { } PxCoalescedHashSet(uint32_t initialTableSize, float loadFactor, const Allocator& alloc) : HashSetBase(initialTableSize, loadFactor, alloc) { } PxCoalescedHashSet(const Allocator& alloc) : HashSetBase(64, 0.75f, alloc) { } const Key* getEntries() const { return HashSetBase::mBase.getEntries(); } }; #if !PX_DOXYGEN } // namespace physx #endif #endif
4,787
C
36.116279
112
0.734071
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxPool.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_POOL_H #define PX_POOL_H #include "foundation/PxArray.h" #include "foundation/PxSort.h" #include "foundation/PxBasicTemplates.h" #include "foundation/PxInlineArray.h" #include "foundation/PxMemory.h" namespace physx { /*! Simple allocation pool */ template <class T, class Alloc = typename PxAllocatorTraits<T>::Type> class PxPoolBase : public PxUserAllocated, public Alloc { PX_NOCOPY(PxPoolBase) protected: PxPoolBase(const Alloc& alloc, uint32_t elementsPerSlab, uint32_t slabSize) : Alloc(alloc), mSlabs(alloc), mElementsPerSlab(elementsPerSlab), mUsed(0), mSlabSize(slabSize), mFreeElement(0) { mSlabs.reserve(64); #if PX_CLANG #if PX_LINUX #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunused-local-typedef" #endif // PX_LINUX #endif // PX_CLANG PX_COMPILE_TIME_ASSERT(sizeof(T) >= sizeof(size_t)); #if PX_CLANG #if PX_LINUX #pragma clang diagnostic pop #endif #endif } public: ~PxPoolBase() { if(mUsed) disposeElements(); for(void** slabIt = mSlabs.begin(), *slabEnd = mSlabs.end(); slabIt != slabEnd; ++slabIt) Alloc::deallocate(*slabIt); } // Allocate space for single object PX_INLINE T* allocate() { if(mFreeElement == 0) allocateSlab(); T* p = reinterpret_cast<T*>(mFreeElement); mFreeElement = mFreeElement->mNext; mUsed++; PxMarkSerializedMemory(p, sizeof(T)); return p; } // Put space for a single element back in the lists PX_INLINE void deallocate(T* p) { if(p) { PX_ASSERT(mUsed); mUsed--; push(reinterpret_cast<FreeList*>(p)); } } PX_INLINE T* construct() { T* t = allocate(); return t ? PX_PLACEMENT_NEW(t, T()) : NULL; } template <class A1> PX_INLINE T* construct(A1& a) { T* t = allocate(); return t ? PX_PLACEMENT_NEW(t, T(a)) : NULL; } template <class A1, class A2> PX_INLINE T* construct(A1& a, A2& b) { T* t = allocate(); return t ? PX_PLACEMENT_NEW(t, T(a, b)) : NULL; } template <class A1, class A2, class A3> PX_INLINE T* construct(A1& a, A2& b, A3& c) { T* t = allocate(); return t ? PX_PLACEMENT_NEW(t, T(a, b, c)) : NULL; } template <class A1, class A2, class A3> PX_INLINE T* construct(A1* a, A2& b, A3& c) { T* t = allocate(); return t ? PX_PLACEMENT_NEW(t, T(a, b, c)) : NULL; } template <class A1, class A2, class A3, class A4> PX_INLINE T* construct(A1& a, A2& b, A3& c, A4& d) { T* t = allocate(); return t ? PX_PLACEMENT_NEW(t, T(a, b, c, d)) : NULL; } template <class A1, class A2, class A3, class A4, class A5> PX_INLINE T* construct(A1& a, A2& b, A3& c, A4& d, A5& e) { T* t = allocate(); return t ? PX_PLACEMENT_NEW(t, T(a, b, c, d, e)) : NULL; } template <class A1, class A2, class A3, class A4, class A5, class A6> PX_INLINE T* construct(A1& a, A2& b, A3& c, A4& d, A5& e, A6& f) { T* t = allocate(); return t ? PX_PLACEMENT_NEW(t, T(a, b, c, d, e, f)) : NULL; } template <class A1, class A2, class A3, class A4, class A5, class A6, class A7> PX_INLINE T* construct(A1& a, A2& b, A3& c, A4& d, A5& e, A6& f, A7& g) { T* t = allocate(); return t ? PX_PLACEMENT_NEW(t, T(a, b, c, d, e, f, g)) : NULL; } template <class A1, class A2, class A3, class A4, class A5, class A6, class A7, class A8> PX_INLINE T* construct(A1& a, A2& b, A3& c, A4& d, A5& e, A6& f, A7& g, A8& h) { T* t = allocate(); return t ? PX_PLACEMENT_NEW(t, T(a, b, c, d, e, f, g, h)) : NULL; } PX_INLINE void destroy(T* const p) { if(p) { p->~T(); deallocate(p); } } protected: struct FreeList { FreeList* mNext; }; // All the allocated slabs, sorted by pointer PxArray<void*, Alloc> mSlabs; uint32_t mElementsPerSlab; uint32_t mUsed; uint32_t mSlabSize; FreeList* mFreeElement; // Head of free-list // Helper function to get bitmap of allocated elements void push(FreeList* p) { p->mNext = mFreeElement; mFreeElement = p; } // Allocate a slab and segregate it into the freelist void allocateSlab() { T* slab = reinterpret_cast<T*>(Alloc::allocate(mSlabSize, PX_FL)); mSlabs.pushBack(slab); // Build a chain of nodes for the freelist T* it = slab + mElementsPerSlab; while(--it >= slab) push(reinterpret_cast<FreeList*>(it)); } /* Cleanup method. Go through all active slabs and call destructor for live objects, then free their memory */ void disposeElements() { PxArray<void*, Alloc> freeNodes(*this); while(mFreeElement) { freeNodes.pushBack(mFreeElement); mFreeElement = mFreeElement->mNext; } Alloc& alloc(*this); PxSort(freeNodes.begin(), freeNodes.size(), PxLess<void*>(), alloc); PxSort(mSlabs.begin(), mSlabs.size(), PxLess<void*>(), alloc); typename PxArray<void*, Alloc>::Iterator slabIt = mSlabs.begin(), slabEnd = mSlabs.end(); for(typename PxArray<void*, Alloc>::Iterator freeIt = freeNodes.begin(); slabIt != slabEnd; ++slabIt) { for(T* tIt = reinterpret_cast<T*>(*slabIt), *tEnd = tIt + mElementsPerSlab; tIt != tEnd; ++tIt) { if(freeIt != freeNodes.end() && *freeIt == tIt) ++freeIt; else tIt->~T(); } } } }; // original pool implementation template <class T, class Alloc = typename PxAllocatorTraits<T>::Type> class PxPool : public PxPoolBase<T, Alloc> { public: PxPool(const Alloc& alloc = Alloc(), uint32_t elementsPerSlab = 32) : PxPoolBase<T, Alloc>(alloc, elementsPerSlab, elementsPerSlab * sizeof(T)) { } }; // allows specification of the slab size instead of the occupancy template <class T, uint32_t slabSize, class Alloc = typename PxAllocatorTraits<T>::Type> class PxPool2 : public PxPoolBase<T, Alloc> { public: PxPool2(const Alloc& alloc = Alloc()) : PxPoolBase<T, Alloc>(alloc, slabSize / sizeof(T), slabSize) { } }; } // namespace physx #endif
7,420
C
26.485185
113
0.676146
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxAlloca.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_ALLOCA_H #define PX_ALLOCA_H #include "foundation/PxTempAllocator.h" #if !PX_DOXYGEN namespace physx { #endif template <typename T, typename Alloc = PxTempAllocator> class PxScopedPointer : private Alloc { public: ~PxScopedPointer() { if(mOwned) Alloc::deallocate(mPointer); } operator T*() const { return mPointer; } T* mPointer; bool mOwned; }; #if !PX_DOXYGEN } // namespace physx #endif // Don't use inline for alloca !!! #if PX_WINDOWS_FAMILY #include <malloc.h> #define PxAlloca(x) _alloca(x) #elif PX_LINUX #include <malloc.h> #define PxAlloca(x) alloca(x) #elif PX_APPLE_FAMILY #include <alloca.h> #define PxAlloca(x) alloca(x) #elif PX_SWITCH #include <malloc.h> #define PxAlloca(x) alloca(x) #endif #define PxAllocaAligned(x, alignment) ((size_t(PxAlloca(x + alignment)) + (alignment - 1)) & ~size_t(alignment - 1)) /*! Stack allocation for \c count instances of \c type. Falling back to temp allocator if using more than 1kB. */ #define PX_ALLOCA(var, type, count) \ physx::PxScopedPointer<type> var; \ { \ const uint32_t size = sizeof(type) * (count); \ var.mOwned = size > 1024; \ if(var.mOwned) \ var.mPointer = reinterpret_cast<type*>(physx::PxTempAllocator().allocate(size, PX_FL)); \ else \ var.mPointer = reinterpret_cast<type*>(PxAlloca(size)); \ } #endif
3,164
C
33.780219
116
0.701643
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxAllocator.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_ALLOCATOR_H #define PX_ALLOCATOR_H #include "foundation/PxAllocatorCallback.h" #include "foundation/PxAssert.h" #include "foundation/PxFoundation.h" #include "foundation/Px.h" #if PX_VC #pragma warning(push) #pragma warning(disable : 4577) #endif #if PX_WINDOWS_FAMILY #include <exception> #if(_MSC_VER >= 1923) #include <typeinfo> #else #include <typeinfo.h> #endif #endif #if(PX_APPLE_FAMILY) #include <typeinfo> #endif #include <new> #if PX_VC #pragma warning(pop) #endif // PT: the rules are simple: // - PX_ALLOC/PX_ALLOCATE/PX_FREE is similar to malloc/free. Use that for POD/anything that doesn't need ctor/dtor. // - PX_NEW/PX_DELETE is similar to new/delete. Use that for anything that needs a ctor/dtor. // - Everything goes through the user allocator. // - Inherit from PxUserAllocated to PX_NEW something. Do it even on small classes, it's free. // - You cannot PX_NEW a POD. Use PX_ALLOC. #define PX_ALLOC(n, name) physx::PxAllocator().allocate(n, PX_FL) // PT: use this one to reduce the amount of visible reinterpret_cast #define PX_ALLOCATE(type, count, name) reinterpret_cast<type*>(PX_ALLOC(count*sizeof(type), name)) #define PX_FREE(x) \ if(x) \ { \ physx::PxAllocator().deallocate(x); \ x = NULL; \ } #define PX_FREE_THIS physx::PxAllocator().deallocate(this) #define PX_NEW(T) new (physx::PxReflectionAllocator<T>(), PX_FL) T #define PX_PLACEMENT_NEW(p, T) new (p) T #define PX_DELETE_THIS delete this #define PX_DELETE(x) if(x) { delete x; x = NULL; } #define PX_DELETE_ARRAY(x) if(x) { delete []x; x = NULL; } #define PX_RELEASE(x) if(x) { x->release(); x = NULL; } #if !PX_DOXYGEN namespace physx { #endif /** \brief Allocator used to access the global PxAllocatorCallback instance without providing additional information. */ class PxAllocator { public: PX_FORCE_INLINE PxAllocator(const char* = NULL){} PX_FORCE_INLINE void* allocate(size_t size, const char* file, int line) { return size ? PxGetBroadcastAllocator()->allocate(size, "", file, line) : NULL; } PX_FORCE_INLINE void deallocate(void* ptr) { if(ptr) PxGetBroadcastAllocator()->deallocate(ptr); } }; /** * \brief Bootstrap allocator using malloc/free. * Don't use unless your objects get allocated before foundation is initialized. */ class PxRawAllocator { public: PxRawAllocator(const char* = 0) {} PX_FORCE_INLINE void* allocate(size_t size, const char*, int) { // malloc returns valid pointer for size==0, no need to check return ::malloc(size); } PX_FORCE_INLINE void deallocate(void* ptr) { // free(0) is guaranteed to have no side effect, no need to check ::free(ptr); } }; /** \brief Virtual allocator callback used to provide run-time defined allocators to foundation types like Array or Bitmap. This is used by VirtualAllocator */ class PxVirtualAllocatorCallback { public: PxVirtualAllocatorCallback() {} virtual ~PxVirtualAllocatorCallback() {} virtual void* allocate(const size_t size, const int group, const char* file, const int line) = 0; virtual void deallocate(void* ptr) = 0; }; /** \brief Virtual allocator to be used by foundation types to provide run-time defined allocators. Due to the fact that Array extends its allocator, rather than contains a reference/pointer to it, the VirtualAllocator must be a concrete type containing a pointer to a virtual callback. The callback may not be available at instantiation time, therefore methods are provided to set the callback later. */ class PxVirtualAllocator { public: PxVirtualAllocator(PxVirtualAllocatorCallback* callback = NULL, const int group = 0) : mCallback(callback), mGroup(group) {} PX_FORCE_INLINE void* allocate(const size_t size, const char* file, const int line) { PX_ASSERT(mCallback); if (size) return mCallback->allocate(size, mGroup, file, line); return NULL; } PX_FORCE_INLINE void deallocate(void* ptr) { PX_ASSERT(mCallback); if (ptr) mCallback->deallocate(ptr); } void setCallback(PxVirtualAllocatorCallback* callback) { mCallback = callback; } PxVirtualAllocatorCallback* getCallback() { return mCallback; } private: PxVirtualAllocatorCallback* mCallback; const int mGroup; PxVirtualAllocator& operator=(const PxVirtualAllocator&); }; /** \brief Allocator used to access the global PxAllocatorCallback instance using a static name derived from T. */ template <typename T> class PxReflectionAllocator { static const char* getName(bool reportAllocationNames) { if(!reportAllocationNames) return "<allocation names disabled>"; #if PX_GCC_FAMILY return __PRETTY_FUNCTION__; #else // name() calls malloc(), raw_name() wouldn't return typeid(T).name(); #endif } public: PxReflectionAllocator(const PxEMPTY) {} PxReflectionAllocator(const char* = 0) {} inline PxReflectionAllocator(const PxReflectionAllocator&) {} PX_FORCE_INLINE void* allocate(size_t size, const char* filename, int line) { if(!size) return NULL; bool reportAllocationNames; PxAllocatorCallback* cb = PxGetBroadcastAllocator(&reportAllocationNames); return cb->allocate(size, getName(reportAllocationNames), filename, line); } PX_FORCE_INLINE void deallocate(void* ptr) { if(ptr) PxGetBroadcastAllocator()->deallocate(ptr); } }; template <typename T> struct PxAllocatorTraits { typedef PxReflectionAllocator<T> Type; }; #if !PX_DOXYGEN } // namespace physx #endif #endif
7,239
C
28.672131
126
0.72289
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxTime.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_TIME_H #define PX_TIME_H #include "foundation/PxSimpleTypes.h" #include "foundation/PxFoundationConfig.h" #if PX_LINUX #include <time.h> #endif #if !PX_DOXYGEN namespace physx { #endif struct PxCounterFrequencyToTensOfNanos { PxU64 mNumerator; PxU64 mDenominator; PxCounterFrequencyToTensOfNanos(PxU64 inNum, PxU64 inDenom) : mNumerator(inNum), mDenominator(inDenom) { } // quite slow. PxU64 toTensOfNanos(PxU64 inCounter) const { return (inCounter * mNumerator) / mDenominator; } }; class PX_FOUNDATION_API PxTime { public: typedef PxF64 Second; static const PxU64 sNumTensOfNanoSecondsInASecond = 100000000; // This is supposedly guaranteed to not change after system boot // regardless of processors, speedstep, etc. static const PxCounterFrequencyToTensOfNanos& getBootCounterFrequency(); static PxCounterFrequencyToTensOfNanos getCounterFrequency(); static PxU64 getCurrentCounterValue(); // SLOW!! // Thar be a 64 bit divide in thar! static PxU64 getCurrentTimeInTensOfNanoSeconds() { PxU64 ticks = getCurrentCounterValue(); return getBootCounterFrequency().toTensOfNanos(ticks); } PxTime(); Second getElapsedSeconds(); Second peekElapsedSeconds(); Second getLastTime() const; private: #if PX_LINUX || PX_APPLE_FAMILY Second mLastTime; #else PxI64 mTickCount; #endif }; #if !PX_DOXYGEN } // namespace physx #endif #endif
3,084
C
30.479592
103
0.766213
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxBitMap.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_BITMAP_H #define PX_BITMAP_H #include "foundation/PxAssert.h" #include "foundation/PxMath.h" #include "foundation/PxMemory.h" #include "foundation/PxAllocator.h" #include "foundation/PxUserAllocated.h" #include "foundation/PxIntrinsics.h" #include "foundation/PxBitUtils.h" #if !PX_DOXYGEN namespace physx { #endif /*! Hold a bitmap with operations to set,reset or test given bit. We inhibit copy to prevent unintentional copies. If a copy is desired copy() should be used or alternatively a copy constructor implemented. */ template<class PxAllocator> class PxBitMapBase : public PxUserAllocated { PX_NOCOPY(PxBitMapBase) public: // PX_SERIALIZATION /* todo: explicit */ PxBitMapBase(const PxEMPTY) { if(mMap) mWordCount |= PX_SIGN_BITMASK; } //~PX_SERIALIZATION PX_INLINE PxBitMapBase(const PxAllocator& allocator) : mMap(0), mWordCount(0), mAllocator(allocator) {} PX_INLINE PxBitMapBase() : mMap(0), mWordCount(0) {} PX_INLINE ~PxBitMapBase() { release(); } PX_INLINE void release() { if(mMap && !isInUserMemory()) mAllocator.deallocate(mMap); mMap = NULL; } PX_FORCE_INLINE PxAllocator& getAllocator() { return mAllocator; } PX_INLINE void growAndSet(PxU32 index) { extend(index + 1); mMap[index >> 5] |= 1 << (index & 31); } PX_INLINE void growAndReset(PxU32 index) { extend(index + 1); mMap[index >> 5] &= ~(1 << (index & 31)); } PX_INLINE PxIntBool boundedTest(PxU32 index) const { return PxIntBool(index >> 5 >= getWordCount() ? PxIntFalse : (mMap[index >> 5] & (1 << (index & 31)))); } PX_INLINE void boundedReset(PxU32 index) { if((index >> 5) < getWordCount()) mMap[index >> 5] &= ~(1 << (index & 31)); } // Special optimized versions, when you _know_ your index is in range PX_INLINE void set(PxU32 index) { PX_ASSERT(index<getWordCount() * 32); mMap[index >> 5] |= 1 << (index & 31); } PX_INLINE void reset(PxU32 index) { PX_ASSERT(index<getWordCount() * 32); mMap[index >> 5] &= ~(1 << (index & 31)); } PX_INLINE PxIntBool test(PxU32 index) const { PX_ASSERT(index<getWordCount() * 32); return PxIntBool(mMap[index >> 5] & (1 << (index & 31))); } // nibble == 4 bits PX_INLINE PxU32 getNibbleFast(PxU32 nibIndex) const { const PxU32 bitIndex = nibIndex << 2; PX_ASSERT(bitIndex < getWordCount() * 32); return (mMap[bitIndex >> 5] >> (bitIndex & 31)) & 0xf; } PX_INLINE void andNibbleFast(PxU32 nibIndex, PxU32 mask) { //TODO: there has to be a faster way... const PxU32 bitIndex = nibIndex << 2; const PxU32 shift = (bitIndex & 31); const PxU32 nibMask = (0xfu << shift); PX_ASSERT(bitIndex < getWordCount() * 32); mMap[bitIndex >> 5] &= ((mask << shift) | ~nibMask); } PX_INLINE void orNibbleFast(PxU32 nibIndex, PxU32 mask) { PX_ASSERT(!(mask & ~0xfu)); //check extra bits are not set const PxU32 bitIndex = nibIndex << 2; const PxU32 shift = bitIndex & 31; PX_ASSERT(bitIndex < getWordCount() * 32); mMap[bitIndex >> 5] |= (mask << shift); } void clear() { PxMemSet(mMap, 0, getWordCount() * sizeof(PxU32)); } void resizeAndClear(PxU32 newBitCount) { extendUninitialized(newBitCount); PxMemSet(mMap, 0, getWordCount() * sizeof(PxU32)); } void setEmpty() { mMap = NULL; mWordCount = 0; } void setWords(PxU32* map, PxU32 wordCount) { mMap = map; mWordCount = wordCount | PX_SIGN_BITMASK; } // !!! only sets /last/ bit to value void resize(PxU32 newBitCount, bool value = false) { PX_ASSERT(!value); // only new class supports this PX_UNUSED(value); extend(newBitCount); } PX_FORCE_INLINE PxU32 size() const { return getWordCount() * 32; } void copy(const PxBitMapBase& a) { extendUninitialized(a.getWordCount() << 5); PxMemCopy(mMap, a.mMap, a.getWordCount() * sizeof(PxU32)); if(getWordCount() > a.getWordCount()) PxMemSet(mMap + a.getWordCount(), 0, (getWordCount() - a.getWordCount()) * sizeof(PxU32)); } PX_INLINE PxU32 count() const { // NOTE: we can probably do this faster, since the last steps in PxBitCount can be defered to // the end of the seq. + 64/128bits at a time + native bit counting instructions(360 is fast non micro code). PxU32 count = 0; const PxU32 wordCount = getWordCount(); for(PxU32 i = 0; i<wordCount; i++) count += PxBitCount(mMap[i]); return count; } PX_INLINE PxU32 count(PxU32 start, PxU32 length) const { const PxU32 end = PxMin(getWordCount() << 5, start + length); PxU32 count = 0; for(PxU32 i = start; i<end; i++) count += (test(i) != 0); return count; } //! returns 0 if no bits set (!!!) PxU32 findLast() const { const PxU32 wordCount = getWordCount(); for(PxU32 i = wordCount; i-- > 0;) { if(mMap[i]) return (i << 5) + PxHighestSetBit(mMap[i]); } return PxU32(0); } // the obvious combiners and some used in the SDK struct OR { PX_INLINE PxU32 operator()(PxU32 a, PxU32 b) { return a | b; } }; struct AND { PX_INLINE PxU32 operator()(PxU32 a, PxU32 b) { return a&b; } }; struct XOR { PX_INLINE PxU32 operator()(PxU32 a, PxU32 b) { return a^b; } }; // we use auxiliary functions here so as not to generate combiners for every combination // of allocators template<class Combiner, class _> PX_INLINE void combineInPlace(const PxBitMapBase<_>& b) { combine1<Combiner>(b.mMap, b.getWordCount()); } template<class Combiner, class _1, class _2> PX_INLINE void combine(const PxBitMapBase<_1>& a, const PxBitMapBase<_2>& b) { combine2<Combiner>(a.mMap, a.getWordCount(), b.mMap, b.getWordCount()); } PX_FORCE_INLINE const PxU32* getWords() const { return mMap; } PX_FORCE_INLINE PxU32* getWords() { return mMap; } // PX_SERIALIZATION PX_FORCE_INLINE PxU32 getWordCount() const { return mWordCount & ~PX_SIGN_BITMASK; } // We need one bit to mark arrays that have been deserialized from a user-provided memory block. PX_FORCE_INLINE PxU32 isInUserMemory() const { return mWordCount & PX_SIGN_BITMASK; } //~PX_SERIALIZATION /*! Iterate over indices in a bitmap This iterator is good because it finds the set bit without looping over the cached bits upto 31 times. However it does require a variable shift. */ class Iterator { public: static const PxU32 DONE = 0xffffffff; PX_INLINE Iterator(const PxBitMapBase &map) : mBitMap(map) { reset(); } PX_INLINE Iterator& operator=(const Iterator& other) { PX_ASSERT(&mBitMap == &other.mBitMap); mBlock = other.mBlock; mIndex = other.mIndex; return *this; } PX_INLINE PxU32 getNext() { if(mBlock) { PxU32 block = mBlock; PxU32 index = mIndex; const PxU32 bitIndex = index << 5 | PxLowestSetBit(block); block &= block - 1; PxU32 wordCount = mBitMap.getWordCount(); while(!block && ++index < wordCount) block = mBitMap.mMap[index]; mBlock = block; mIndex = index; return bitIndex; } return DONE; } PX_INLINE void reset() { PxU32 index = 0; PxU32 block = 0; PxU32 wordCount = mBitMap.getWordCount(); while(index < wordCount && ((block = mBitMap.mMap[index]) == 0)) ++index; mBlock = block; mIndex = index; } private: PxU32 mBlock, mIndex; const PxBitMapBase& mBitMap; }; // DS: faster but less general: hasBits() must be true or getNext() is illegal so it is the calling code's responsibility to ensure that getNext() is not called illegally. class PxLoopIterator { PX_NOCOPY(PxLoopIterator) public: PX_FORCE_INLINE PxLoopIterator(const PxBitMapBase &map) : mMap(map.getWords()), mBlock(0), mIndex(-1), mWordCount(PxI32(map.getWordCount())) {} PX_FORCE_INLINE bool hasBits() { PX_ASSERT(mIndex<mWordCount); while (mBlock == 0) { if (++mIndex == mWordCount) return false; mBlock = mMap[mIndex]; } return true; } PX_FORCE_INLINE PxU32 getNext() { PX_ASSERT(mIndex<mWordCount && mBlock != 0); PxU32 result = PxU32(mIndex) << 5 | PxLowestSetBit(mBlock); // will assert if mask is zero mBlock &= (mBlock - 1); return result; } private: const PxU32*const mMap; PxU32 mBlock; // the word we're currently scanning PxI32 mIndex; // the index of the word we're currently looking at PxI32 mWordCount; }; //Class to iterate over the bitmap from a particular start location rather than the beginning of the list class PxCircularIterator { public: static const PxU32 DONE = 0xffffffff; PX_INLINE PxCircularIterator(const PxBitMapBase &map, PxU32 index) : mBitMap(map) { PxU32 localIndex = 0; PxU32 startIndex = 0; const PxU32 wordCount = mBitMap.getWordCount(); if((index << 5) < wordCount) { localIndex = index << 5; startIndex = localIndex; } PxU32 block = 0; if(localIndex < wordCount) { block = mBitMap.mMap[localIndex]; if(block == 0) { localIndex = (localIndex + 1) % wordCount; while(localIndex != startIndex && (block = mBitMap.mMap[localIndex]) == 0) localIndex = (localIndex + 1) % wordCount; } } mIndex = localIndex; mBlock = block; mStartIndex = startIndex; } PX_INLINE PxU32 getNext() { if(mBlock) { PxU32 index = mIndex; PxU32 block = mBlock; const PxU32 startIndex = mStartIndex; PxU32 bitIndex = index << 5 | PxLowestSetBit(block); block &= block - 1; PxU32 wordCount = mBitMap.getWordCount(); while (!block && (index = ((index + 1) % wordCount)) != startIndex) block = mBitMap.mMap[index]; mIndex = index; mBlock = block; return bitIndex; } return DONE; } private: PxU32 mBlock, mIndex; PxU32 mStartIndex; const PxBitMapBase& mBitMap; PX_NOCOPY(PxCircularIterator) }; protected: PxU32* mMap; //one bit per index PxU32 mWordCount; PxAllocator mAllocator; PxU8 mPadding[3]; // PT: "mAllocator" is empty but consumes 1 byte void extend(PxU32 size) { const PxU32 newWordCount = (size + 31) >> 5; if (newWordCount > getWordCount()) { PxU32* newMap = reinterpret_cast<PxU32*>(mAllocator.allocate(newWordCount * sizeof(PxU32), PX_FL)); if (mMap) { PxMemCopy(newMap, mMap, getWordCount() * sizeof(PxU32)); if (!isInUserMemory()) mAllocator.deallocate(mMap); } PxMemSet(newMap + getWordCount(), 0, (newWordCount - getWordCount()) * sizeof(PxU32)); mMap = newMap; // also resets the isInUserMemory bit mWordCount = newWordCount; } } void extendUninitialized(PxU32 size) { PxU32 newWordCount = (size + 31) >> 5; if (newWordCount > getWordCount()) { if (mMap && !isInUserMemory()) mAllocator.deallocate(mMap); // also resets the isInUserMemory bit mWordCount = newWordCount; mMap = reinterpret_cast<PxU32*>(mAllocator.allocate(mWordCount * sizeof(PxU32), PX_FL)); } } template<class Combiner> void combine1(const PxU32* words, PxU32 length) { extend(length << 5); PxU32 combineLength = PxMin(getWordCount(), length); for (PxU32 i = 0; i<combineLength; i++) mMap[i] = Combiner()(mMap[i], words[i]); } template<class Combiner> void combine2(const PxU32* words1, PxU32 length1, const PxU32* words2, PxU32 length2) { extendUninitialized(PxMax(length1, length2) << 5); PxU32 commonSize = PxMin(length1, length2); for (PxU32 i = 0; i<commonSize; i++) mMap[i] = Combiner()(words1[i], words2[i]); for (PxU32 i = commonSize; i<length1; i++) mMap[i] = Combiner()(words1[i], 0); for (PxU32 i = commonSize; i<length2; i++) mMap[i] = Combiner()(0, words2[i]); } friend class Iterator; }; typedef PxBitMapBase<PxAllocator> PxBitMap; typedef PxBitMapBase<PxVirtualAllocator> PxBitMapPinned; #if !PX_DOXYGEN } // namespace physx #endif #endif
13,711
C
26.478958
173
0.658085
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxSIMDHelpers.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_SIMD_HELPERS_H #define PX_SIMD_HELPERS_H #include "foundation/PxMat33.h" #include "foundation/PxVecMath.h" #include "foundation/PxTransform.h" #if !PX_DOXYGEN namespace physx { #endif //! A padded version of PxMat33, to safely load its data using SIMD class PxMat33Padded : public PxMat33 { public: explicit PX_FORCE_INLINE PxMat33Padded(const PxQuat& q) { using namespace aos; const QuatV qV = V4LoadU(&q.x); Vec3V column0V, column1V, column2V; QuatGetMat33V(qV, column0V, column1V, column2V); #if defined(PX_SIMD_DISABLED) || (PX_LINUX && (PX_ARM || PX_A64)) V3StoreU(column0V, column0); V3StoreU(column1V, column1); V3StoreU(column2V, column2); #else V4StoreU(column0V, &column0.x); V4StoreU(column1V, &column1.x); V4StoreU(column2V, &column2.x); #endif } PX_FORCE_INLINE ~PxMat33Padded() {} PX_FORCE_INLINE void operator=(const PxMat33& other) { column0 = other.column0; column1 = other.column1; column2 = other.column2; } PxU32 padding; }; #if !PX_DOXYGEN namespace aos { #endif PX_FORCE_INLINE void transformKernelVec4( const FloatVArg wa, const Vec4VArg va, const Vec4VArg pa, const FloatVArg wb, const Vec4VArg vb, const Vec4VArg pb, FloatV& wo, Vec4V& vo, Vec4V& po) { wo = FSub(FMul(wa, wb), V4Dot3(va, vb)); vo = V4ScaleAdd(va, wb, V4ScaleAdd(vb, wa, V4Cross(va, vb))); const Vec4V t1 = V4Scale(pb, FScaleAdd(wa, wa, FLoad(-0.5f))); const Vec4V t2 = V4ScaleAdd(V4Cross(va, pb), wa, t1); const Vec4V t3 = V4ScaleAdd(va, V4Dot3(va, pb), t2); po = V4ScaleAdd(t3, FLoad(2.0f), pa); } // PT: out = a * b template<const bool alignedInput, const bool alignedOutput> PX_FORCE_INLINE void transformMultiply(PxTransform& out, const PxTransform& a, const PxTransform& b) { PX_ASSERT(!alignedInput || (size_t(&a)&15) == 0); PX_ASSERT(!alignedInput || (size_t(&b)&15) == 0); const Vec4V aPos = alignedInput ? V4LoadA(&a.p.x) : V4LoadU(&a.p.x); const Vec4V aRot = alignedInput ? V4LoadA(&a.q.x) : V4LoadU(&a.q.x); const Vec4V bPos = alignedInput ? V4LoadA(&b.p.x) : V4LoadU(&b.p.x); const Vec4V bRot = alignedInput ? V4LoadA(&b.q.x) : V4LoadU(&b.q.x); Vec4V v, p; FloatV w; transformKernelVec4(V4GetW(aRot), aRot, aPos, V4GetW(bRot), bRot, bPos, w, v, p); if(alignedOutput) { PX_ASSERT((size_t(&out)&15) == 0); V4StoreA(p, &out.p.x); V4StoreA(V4SetW(v,w), &out.q.x); } else { V4StoreU(p, &out.p.x); V4StoreU(V4SetW(v,w), &out.q.x); } } // PT: out = a * b PX_FORCE_INLINE void transformMultiply(PxTransform32& out, const PxTransform32& a, const PxTransform32& b) { transformMultiply<true, true>(out, a, b); } #if !PX_DOXYGEN } // namespace aos #endif #if !PX_DOXYGEN } // namespace physx #endif #endif
4,485
C
32.229629
107
0.702564
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxInlineAllocator.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_INLINE_ALLOCATOR_H #define PX_INLINE_ALLOCATOR_H #include "foundation/PxUserAllocated.h" #if !PX_DOXYGEN namespace physx { #endif // this is used by the array class to allocate some space for a small number // of objects along with the metadata template <PxU32 N, typename BaseAllocator> class PxInlineAllocator : private BaseAllocator { public: PxInlineAllocator(const PxEMPTY v) : BaseAllocator(v) { } PxInlineAllocator(const BaseAllocator& alloc = BaseAllocator()) : BaseAllocator(alloc), mBufferUsed(false) { } PxInlineAllocator(const PxInlineAllocator& aloc) : BaseAllocator(aloc), mBufferUsed(false) { } void* allocate(PxU32 size, const char* filename, PxI32 line) { if(!mBufferUsed && size <= N) { mBufferUsed = true; return mBuffer; } return BaseAllocator::allocate(size, filename, line); } void deallocate(void* ptr) { if(ptr == mBuffer) mBufferUsed = false; else BaseAllocator::deallocate(ptr); } PX_FORCE_INLINE PxU8* getInlineBuffer() { return mBuffer; } PX_FORCE_INLINE bool isBufferUsed() const { return mBufferUsed; } protected: PxU8 mBuffer[N]; bool mBufferUsed; }; #if !PX_DOXYGEN } // namespace physx #endif #endif
2,905
C
30.247312
107
0.747676
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxBroadcast.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_BROADCAST_H #define PX_BROADCAST_H #include "foundation/PxInlineArray.h" #include "foundation/PxSimpleTypes.h" #include "foundation/PxErrorCallback.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief Abstract listener class that listens to allocation and deallocation events from the foundation memory system. <b>Threading:</b> All methods of this class should be thread safe as it can be called from the user thread or the physics processing thread(s). */ class PxAllocationListener { public: /** \brief callback when memory is allocated. \param size Size of the allocation in bytes. \param typeName Type this data is being allocated for. \param filename File the allocation came from. \param line the allocation came from. \param allocatedMemory memory that will be returned from the allocation. */ virtual void onAllocation(size_t size, const char* typeName, const char* filename, int line, void* allocatedMemory) = 0; /** \brief callback when memory is deallocated. \param allocatedMemory memory just before allocation. */ virtual void onDeallocation(void* allocatedMemory) = 0; protected: virtual ~PxAllocationListener() { } }; /** \brief Broadcast class implementation, registering listeners. <b>Threading:</b> All methods of this class should be thread safe as it can be called from the user thread or the physics processing thread(s). There is not internal locking */ template <class Listener, class Base> class PxBroadcast : public Base { public: static const uint32_t MAX_NB_LISTENERS = 16; /** \brief The default constructor. */ PxBroadcast() { } /** \brief Register new listener. \note It is NOT SAFE to register and deregister listeners while allocations may be taking place. moreover, there is no thread safety to registration/deregistration. \param listener Listener to register. */ void registerListener(Listener& listener) { if(mListeners.size() < MAX_NB_LISTENERS) mListeners.pushBack(&listener); } /** \brief Deregister an existing listener. \note It is NOT SAFE to register and deregister listeners while allocations may be taking place. moreover, there is no thread safety to registration/deregistration. \param listener Listener to deregister. */ void deregisterListener(Listener& listener) { mListeners.findAndReplaceWithLast(&listener); } /** \brief Get number of registered listeners. \return Number of listeners. */ uint32_t getNbListeners() const { return mListeners.size(); } /** \brief Get an existing listener from given index. \param index Index of the listener. \return Listener on given index. */ Listener& getListener(uint32_t index) { PX_ASSERT(index <= mListeners.size()); return *mListeners[index]; } protected: virtual ~PxBroadcast() { } physx::PxInlineArray<Listener*, MAX_NB_LISTENERS, physx::PxAllocator> mListeners; }; /** \brief Abstract base class for an application defined memory allocator that allows an external listener to audit the memory allocations. */ class PxBroadcastingAllocator : public PxBroadcast<PxAllocationListener, PxAllocatorCallback> { PX_NOCOPY(PxBroadcastingAllocator) public: /** \brief The default constructor. */ PxBroadcastingAllocator(PxAllocatorCallback& allocator, PxErrorCallback& error) : mAllocator(allocator), mError(error) { mListeners.clear(); } /** \brief The default constructor. */ virtual ~PxBroadcastingAllocator() { mListeners.clear(); } /** \brief Allocates size bytes of memory, which must be 16-byte aligned. This method should never return NULL. If you run out of memory, then you should terminate the app or take some other appropriate action. <b>Threading:</b> This function should be thread safe as it can be called in the context of the user thread and physics processing thread(s). \param size Number of bytes to allocate. \param typeName Name of the datatype that is being allocated \param filename The source file which allocated the memory \param line The source line which allocated the memory \return The allocated block of memory. */ void* allocate(size_t size, const char* typeName, const char* filename, int line) { void* mem = mAllocator.allocate(size, typeName, filename, line); if(!mem) { mError.reportError(PxErrorCode::eABORT, "User allocator returned NULL.", PX_FL); return NULL; } if((size_t(mem) & 15)) { mError.reportError(PxErrorCode::eABORT, "Allocations must be 16-byte aligned.", PX_FL); return NULL; } for(uint32_t i = 0; i < mListeners.size(); i++) mListeners[i]->onAllocation(size, typeName, filename, line, mem); return mem; } /** \brief Frees memory previously allocated by allocate(). <b>Threading:</b> This function should be thread safe as it can be called in the context of the user thread and physics processing thread(s). \param ptr Memory to free. */ void deallocate(void* ptr) { for(uint32_t i = 0; i < mListeners.size(); i++) { mListeners[i]->onDeallocation(ptr); } mAllocator.deallocate(ptr); } private: PxAllocatorCallback& mAllocator; PxErrorCallback& mError; }; /** \brief Abstract base class for an application defined error callback that allows an external listener to report errors. */ class PxBroadcastingErrorCallback : public PxBroadcast<PxErrorCallback, PxErrorCallback> { PX_NOCOPY(PxBroadcastingErrorCallback) public: /** \brief The default constructor. */ PxBroadcastingErrorCallback(PxErrorCallback& errorCallback) { registerListener(errorCallback); } /** \brief The default destructor. */ virtual ~PxBroadcastingErrorCallback() { mListeners.clear(); } /** \brief Reports an error code. \param code Error code, see #PxErrorCode \param message Message to display. \param file File error occured in. \param line Line number error occured on. */ void reportError(PxErrorCode::Enum code, const char* message, const char* file, int line) { for(uint32_t i = 0; i < mListeners.size(); i++) mListeners[i]->reportError(code, message, file, line); } }; #if !PX_DOXYGEN } // namespace physx #endif #endif
7,838
C
27.299639
119
0.74075
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxFlags.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_FLAGS_H #define PX_FLAGS_H /** \addtogroup foundation @{ */ #include "foundation/Px.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief Container for bitfield flag variables associated with a specific enum type. This allows for type safe manipulation for bitfields. <h3>Example</h3> // enum that defines each bit... struct MyEnum { enum Enum { eMAN = 1, eBEAR = 2, ePIG = 4, }; }; // implements some convenient global operators. PX_FLAGS_OPERATORS(MyEnum::Enum, uint8_t); PxFlags<MyEnum::Enum, uint8_t> myFlags; myFlags |= MyEnum::eMAN; myFlags |= MyEnum::eBEAR | MyEnum::ePIG; if(myFlags & MyEnum::eBEAR) { doSomething(); } */ template <typename enumtype, typename storagetype = uint32_t> class PxFlags { public: typedef storagetype InternalType; PX_CUDA_CALLABLE PX_INLINE explicit PxFlags(const PxEMPTY) { } PX_CUDA_CALLABLE PX_INLINE PxFlags(void); PX_CUDA_CALLABLE PX_INLINE PxFlags(enumtype e); PX_CUDA_CALLABLE PX_INLINE PxFlags(const PxFlags<enumtype, storagetype>& f); PX_CUDA_CALLABLE PX_INLINE explicit PxFlags(storagetype b); PX_CUDA_CALLABLE PX_INLINE bool operator==(enumtype e) const; PX_CUDA_CALLABLE PX_INLINE bool operator==(const PxFlags<enumtype, storagetype>& f) const; PX_CUDA_CALLABLE PX_INLINE bool operator==(bool b) const; PX_CUDA_CALLABLE PX_INLINE bool operator!=(enumtype e) const; PX_CUDA_CALLABLE PX_INLINE bool operator!=(const PxFlags<enumtype, storagetype>& f) const; PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator=(const PxFlags<enumtype, storagetype>& f); PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator=(enumtype e); PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator|=(enumtype e); PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator|=(const PxFlags<enumtype, storagetype>& f); PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator|(enumtype e) const; PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator|(const PxFlags<enumtype, storagetype>& f) const; PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator&=(enumtype e); PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator&=(const PxFlags<enumtype, storagetype>& f); PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator&(enumtype e) const; PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator&(const PxFlags<enumtype, storagetype>& f) const; PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator^=(enumtype e); PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator^=(const PxFlags<enumtype, storagetype>& f); PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator^(enumtype e) const; PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator^(const PxFlags<enumtype, storagetype>& f) const; PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator~(void) const; PX_CUDA_CALLABLE PX_INLINE operator bool(void) const; PX_CUDA_CALLABLE PX_INLINE operator uint8_t(void) const; PX_CUDA_CALLABLE PX_INLINE operator uint16_t(void) const; PX_CUDA_CALLABLE PX_INLINE operator uint32_t(void) const; PX_CUDA_CALLABLE PX_INLINE void clear(enumtype e); PX_CUDA_CALLABLE PX_INLINE void raise(enumtype e); PX_CUDA_CALLABLE PX_INLINE bool isSet(enumtype e) const; PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& setAll(enumtype e); public: friend PX_INLINE PxFlags<enumtype, storagetype> operator&(enumtype a, PxFlags<enumtype, storagetype>& b) { PxFlags<enumtype, storagetype> out; out.mBits = a & b.mBits; return out; } private: storagetype mBits; }; #if !PX_DOXYGEN #define PX_FLAGS_OPERATORS(enumtype, storagetype) \ PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator|(enumtype a, enumtype b) \ { \ PxFlags<enumtype, storagetype> r(a); \ r |= b; \ return r; \ } \ PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator&(enumtype a, enumtype b) \ { \ PxFlags<enumtype, storagetype> r(a); \ r &= b; \ return r; \ } \ PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator~(enumtype a) \ { \ return ~PxFlags<enumtype, storagetype>(a); \ } #define PX_FLAGS_TYPEDEF(x, y) \ typedef PxFlags<x::Enum, y> x##s; \ PX_FLAGS_OPERATORS(x::Enum, y) template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>::PxFlags(void) { mBits = 0; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>::PxFlags(enumtype e) { mBits = static_cast<storagetype>(e); } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>::PxFlags(const PxFlags<enumtype, storagetype>& f) { mBits = f.mBits; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>::PxFlags(storagetype b) { mBits = b; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE bool PxFlags<enumtype, storagetype>::operator==(enumtype e) const { return mBits == static_cast<storagetype>(e); } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE bool PxFlags<enumtype, storagetype>::operator==(const PxFlags<enumtype, storagetype>& f) const { return mBits == f.mBits; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE bool PxFlags<enumtype, storagetype>::operator==(bool b) const { return bool(*this) == b; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE bool PxFlags<enumtype, storagetype>::operator!=(enumtype e) const { return mBits != static_cast<storagetype>(e); } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE bool PxFlags<enumtype, storagetype>::operator!=(const PxFlags<enumtype, storagetype>& f) const { return mBits != f.mBits; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::operator=(enumtype e) { mBits = static_cast<storagetype>(e); return *this; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::operator=(const PxFlags<enumtype, storagetype>& f) { mBits = f.mBits; return *this; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::operator|=(enumtype e) { mBits |= static_cast<storagetype>(e); return *this; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>:: operator|=(const PxFlags<enumtype, storagetype>& f) { mBits |= f.mBits; return *this; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> PxFlags<enumtype, storagetype>::operator|(enumtype e) const { PxFlags<enumtype, storagetype> out(*this); out |= e; return out; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> PxFlags<enumtype, storagetype>:: operator|(const PxFlags<enumtype, storagetype>& f) const { PxFlags<enumtype, storagetype> out(*this); out |= f; return out; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::operator&=(enumtype e) { mBits &= static_cast<storagetype>(e); return *this; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>:: operator&=(const PxFlags<enumtype, storagetype>& f) { mBits &= f.mBits; return *this; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> PxFlags<enumtype, storagetype>::operator&(enumtype e) const { PxFlags<enumtype, storagetype> out = *this; out.mBits &= static_cast<storagetype>(e); return out; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> PxFlags<enumtype, storagetype>:: operator&(const PxFlags<enumtype, storagetype>& f) const { PxFlags<enumtype, storagetype> out = *this; out.mBits &= f.mBits; return out; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::operator^=(enumtype e) { mBits ^= static_cast<storagetype>(e); return *this; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>:: operator^=(const PxFlags<enumtype, storagetype>& f) { mBits ^= f.mBits; return *this; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> PxFlags<enumtype, storagetype>::operator^(enumtype e) const { PxFlags<enumtype, storagetype> out = *this; out.mBits ^= static_cast<storagetype>(e); return out; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> PxFlags<enumtype, storagetype>:: operator^(const PxFlags<enumtype, storagetype>& f) const { PxFlags<enumtype, storagetype> out = *this; out.mBits ^= f.mBits; return out; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> PxFlags<enumtype, storagetype>::operator~(void) const { PxFlags<enumtype, storagetype> out; out.mBits = storagetype(~mBits); return out; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>::operator bool(void) const { return mBits ? true : false; } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>::operator uint8_t(void) const { return static_cast<uint8_t>(mBits); } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>::operator uint16_t(void) const { return static_cast<uint16_t>(mBits); } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>::operator uint32_t(void) const { return static_cast<uint32_t>(mBits); } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE void PxFlags<enumtype, storagetype>::clear(enumtype e) { mBits &= ~static_cast<storagetype>(e); } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE void PxFlags<enumtype, storagetype>::raise(enumtype e) { mBits |= static_cast<storagetype>(e); } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE bool PxFlags<enumtype, storagetype>::isSet(enumtype e) const { return (mBits & static_cast<storagetype>(e)) == static_cast<storagetype>(e); } template <typename enumtype, typename storagetype> PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::setAll(enumtype e) { mBits = static_cast<storagetype>(e); return *this; } } // namespace physx #endif //!PX_DOXYGEN /** @} */ #endif
13,283
C
33.59375
141
0.737258
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxVec2.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_VEC2_H #define PX_VEC2_H /** \addtogroup foundation @{ */ #include "foundation/PxMath.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief 2 Element vector class. This is a 2-dimensional vector class with public data members. */ template<class Type> class PxVec2T { public: /** \brief default constructor leaves data uninitialized. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T() { } /** \brief zero constructor. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T(PxZERO) : x(Type(0.0)), y(Type(0.0)) { } /** \brief Assigns scalar parameter to all elements. Useful to initialize to zero or one. \param[in] a Value to assign to elements. */ explicit PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T(Type a) : x(a), y(a) { } /** \brief Initializes from 2 scalar parameters. \param[in] nx Value to initialize X component. \param[in] ny Value to initialize Y component. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T(Type nx, Type ny) : x(nx), y(ny) { } /** \brief Copy ctor. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T(const PxVec2T& v) : x(v.x), y(v.y) { } // Operators /** \brief Assignment operator */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T& operator=(const PxVec2T& p) { x = p.x; y = p.y; return *this; } /** \brief element access */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type& operator[](unsigned int index) { PX_ASSERT(index <= 1); return reinterpret_cast<Type*>(this)[index]; } /** \brief element access */ PX_CUDA_CALLABLE PX_FORCE_INLINE const Type& operator[](unsigned int index) const { PX_ASSERT(index <= 1); return reinterpret_cast<const Type*>(this)[index]; } /** \brief returns true if the two vectors are exactly equal. */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool operator==(const PxVec2T& v) const { return x == v.x && y == v.y; } /** \brief returns true if the two vectors are not exactly equal. */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool operator!=(const PxVec2T& v) const { return x != v.x || y != v.y; } /** \brief tests for exact zero vector */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool isZero() const { return x == Type(0.0) && y == Type(0.0); } /** \brief returns true if all 2 elems of the vector are finite (not NAN or INF, etc.) */ PX_CUDA_CALLABLE PX_INLINE bool isFinite() const { return PxIsFinite(x) && PxIsFinite(y); } /** \brief is normalized - used by API parameter validation */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool isNormalized() const { const Type unitTolerance = Type(1e-4); return isFinite() && PxAbs(magnitude() - Type(1.0)) < unitTolerance; } /** \brief returns the squared magnitude Avoids calling PxSqrt()! */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type magnitudeSquared() const { return x * x + y * y; } /** \brief returns the magnitude */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type magnitude() const { return PxSqrt(magnitudeSquared()); } /** \brief negation */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T operator-() const { return PxVec2T(-x, -y); } /** \brief vector addition */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T operator+(const PxVec2T& v) const { return PxVec2T(x + v.x, y + v.y); } /** \brief vector difference */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T operator-(const PxVec2T& v) const { return PxVec2T(x - v.x, y - v.y); } /** \brief scalar post-multiplication */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T operator*(Type f) const { return PxVec2T(x * f, y * f); } /** \brief scalar division */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T operator/(Type f) const { f = Type(1.0) / f; return PxVec2T(x * f, y * f); } /** \brief vector addition */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T& operator+=(const PxVec2T& v) { x += v.x; y += v.y; return *this; } /** \brief vector difference */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T& operator-=(const PxVec2T& v) { x -= v.x; y -= v.y; return *this; } /** \brief scalar multiplication */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T& operator*=(Type f) { x *= f; y *= f; return *this; } /** \brief scalar division */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T& operator/=(Type f) { f = Type(1.0) / f; x *= f; y *= f; return *this; } /** \brief returns the scalar product of this and other. */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type dot(const PxVec2T& v) const { return x * v.x + y * v.y; } /** returns a unit vector */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T getNormalized() const { const Type m = magnitudeSquared(); return m > Type(0.0) ? *this * PxRecipSqrt(m) : PxVec2T(Type(0)); } /** \brief normalizes the vector in place */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type normalize() { const Type m = magnitude(); if(m > Type(0.0)) *this /= m; return m; } /** \brief a[i] * b[i], for all i. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T multiply(const PxVec2T& a) const { return PxVec2T(x * a.x, y * a.y); } /** \brief element-wise minimum */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T minimum(const PxVec2T& v) const { return PxVec2T(PxMin(x, v.x), PxMin(y, v.y)); } /** \brief returns MIN(x, y); */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type minElement() const { return PxMin(x, y); } /** \brief element-wise maximum */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T maximum(const PxVec2T& v) const { return PxVec2T(PxMax(x, v.x), PxMax(y, v.y)); } /** \brief returns MAX(x, y); */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type maxElement() const { return PxMax(x, y); } Type x, y; }; template<class Type> PX_CUDA_CALLABLE static PX_FORCE_INLINE PxVec2T<Type> operator*(Type f, const PxVec2T<Type>& v) { return PxVec2T<Type>(f * v.x, f * v.y); } typedef PxVec2T<float> PxVec2; typedef PxVec2T<double> PxVec2d; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
7,543
C
20.554286
95
0.674135
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxBitUtils.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_BIT_UTILS_H #define PX_BIT_UTILS_H #include "foundation/PxMathIntrinsics.h" #include "foundation/PxAssert.h" #include "foundation/PxIntrinsics.h" #include "foundation/PxMathIntrinsics.h" #if !PX_DOXYGEN namespace physx { #endif PX_INLINE uint32_t PxBitCount(uint32_t v) { // from http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel uint32_t const w = v - ((v >> 1) & 0x55555555); uint32_t const x = (w & 0x33333333) + ((w >> 2) & 0x33333333); return (((x + (x >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24; } PX_INLINE bool PxIsPowerOfTwo(uint32_t x) { return x != 0 && (x & (x - 1)) == 0; } // "Next Largest Power of 2 // Given a binary integer value x, the next largest power of 2 can be computed by a SWAR algorithm // that recursively "folds" the upper bits into the lower bits. This process yields a bit vector with // the same most significant 1 as x, but all 1's below it. Adding 1 to that value yields the next // largest power of 2. For a 32-bit value:" PX_INLINE uint32_t PxNextPowerOfTwo(uint32_t x) { x |= (x >> 1); x |= (x >> 2); x |= (x >> 4); x |= (x >> 8); x |= (x >> 16); return x + 1; } /*! Return the index of the highest set bit. Not valid for zero arg. */ PX_INLINE uint32_t PxLowestSetBit(uint32_t x) { PX_ASSERT(x); return PxLowestSetBitUnsafe(x); } /*! Return the index of the highest set bit. Not valid for zero arg. */ PX_INLINE uint32_t PxHighestSetBit(uint32_t x) { PX_ASSERT(x); return PxHighestSetBitUnsafe(x); } // Helper function to approximate log2 of an integer value // assumes that the input is actually power of two. PX_INLINE uint32_t PxILog2(uint32_t num) { for(uint32_t i = 0; i < 32; i++) { num >>= 1; if(num == 0) return i; } PX_ASSERT(0); return uint32_t(-1); } #if !PX_DOXYGEN } // namespace physx #endif #endif
3,517
C
30.981818
101
0.71652
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxUserAllocated.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_USER_ALLOCATED_H #define PX_USER_ALLOCATED_H #include "PxAllocator.h" #if !PX_DOXYGEN namespace physx { #endif /** Provides new and delete using a UserAllocator. Guarantees that 'delete x;' uses the UserAllocator too. */ class PxUserAllocated { public: // PX_SERIALIZATION PX_INLINE void* operator new(size_t, void* address) { return address; } //~PX_SERIALIZATION // Matching operator delete to the above operator new. Don't ask me // how this makes any sense - Nuernberger. PX_INLINE void operator delete(void*, void*) { } template <typename Alloc> PX_INLINE void* operator new(size_t size, Alloc alloc, const char* fileName, int line) { return alloc.allocate(size, fileName, line); } template <typename Alloc> PX_INLINE void* operator new(size_t size, size_t /*align*/, Alloc alloc, const char* fileName, int line) { // align is not respected, we have 16bit aligned allocator return alloc.allocate(size, fileName, line); } template <typename Alloc> PX_INLINE void* operator new [](size_t size, Alloc alloc, const char* fileName, int line) { return alloc.allocate(size, fileName, line); } template <typename Alloc> PX_INLINE void* operator new [](size_t size, size_t /*align*/, Alloc alloc, const char* fileName, int line) { // align is not respected, we have 16bit aligned allocator return alloc.allocate(size, fileName, line); } // placement delete template <typename Alloc> PX_INLINE void operator delete(void* ptr, Alloc alloc, const char* fileName, int line) { PX_UNUSED(fileName); PX_UNUSED(line); alloc.deallocate(ptr); } template <typename Alloc> PX_INLINE void operator delete [](void* ptr, Alloc alloc, const char* fileName, int line) { PX_UNUSED(fileName); PX_UNUSED(line); alloc.deallocate(ptr); } PX_INLINE void operator delete(void* ptr) { PxAllocator().deallocate(ptr); } PX_INLINE void operator delete [](void* ptr) { PxAllocator().deallocate(ptr); } }; #if !PX_DOXYGEN } // namespace physx #endif #endif
3,782
C
31.333333
109
0.722898
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxVecQuat.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_VEC_QUAT_H #define PX_VEC_QUAT_H #if !PX_DOXYGEN namespace physx { #endif namespace aos { #ifndef PX_PIDIV2 #define PX_PIDIV2 1.570796327f #endif ////////////////////////////////// // QuatV ////////////////////////////////// PX_FORCE_INLINE QuatV QuatVLoadXYZW(const PxF32 x, const PxF32 y, const PxF32 z, const PxF32 w) { return V4LoadXYZW(x, y, z, w); } PX_FORCE_INLINE QuatV QuatVLoadU(const PxF32* v) { return V4LoadU(v); } PX_FORCE_INLINE QuatV QuatVLoadA(const PxF32* v) { return V4LoadA(v); } PX_FORCE_INLINE QuatV QuatV_From_RotationAxisAngle(const Vec3V u, const FloatV a) { // q = cos(a/2) + u*sin(a/2) const FloatV half = FLoad(0.5f); const FloatV hangle = FMul(a, half); const FloatV piByTwo(FLoad(PX_PIDIV2)); const FloatV PiByTwoMinHangle(FSub(piByTwo, hangle)); const Vec4V hangle2(Vec4V_From_Vec3V(V3Merge(hangle, PiByTwoMinHangle, hangle))); /*const FloatV sina = FSin(hangle); const FloatV cosa = FCos(hangle);*/ const Vec4V _sina = V4Sin(hangle2); const FloatV sina = V4GetX(_sina); const FloatV cosa = V4GetY(_sina); const Vec3V v = V3Scale(u, sina); // return V4Sel(BTTTF(), Vec4V_From_Vec3V(v), V4Splat(cosa)); return V4SetW(Vec4V_From_Vec3V(v), cosa); } // Normalize PX_FORCE_INLINE QuatV QuatNormalize(const QuatV q) { return V4Normalize(q); } PX_FORCE_INLINE FloatV QuatLength(const QuatV q) { return V4Length(q); } PX_FORCE_INLINE FloatV QuatLengthSq(const QuatV q) { return V4LengthSq(q); } PX_FORCE_INLINE FloatV QuatDot(const QuatV a, const QuatV b) // convert this PxQuat to a unit quaternion { return V4Dot(a, b); } PX_FORCE_INLINE QuatV QuatConjugate(const QuatV q) { return V4SetW(V4Neg(q), V4GetW(q)); } PX_FORCE_INLINE Vec3V QuatGetImaginaryPart(const QuatV q) { return Vec3V_From_Vec4V(q); } /** brief computes rotation of x-axis */ PX_FORCE_INLINE Vec3V QuatGetBasisVector0(const QuatV q) { /*const PxF32 x2 = x*2.0f; const PxF32 w2 = w*2.0f; return PxVec3( (w * w2) - 1.0f + x*x2, (z * w2) + y*x2, (-y * w2) + z*x2);*/ const FloatV two = FLoad(2.f); const FloatV w = V4GetW(q); const Vec3V u = Vec3V_From_Vec4V(q); const FloatV x2 = FMul(V3GetX(u), two); const FloatV w2 = FMul(w, two); const Vec3V a = V3Scale(u, x2); const Vec3V tmp = V3Merge(w, V3GetZ(u), FNeg(V3GetY(u))); // const Vec3V b = V3Scale(tmp, w2); // const Vec3V ab = V3Add(a, b); const Vec3V ab = V3ScaleAdd(tmp, w2, a); return V3SetX(ab, FSub(V3GetX(ab), FOne())); } /** brief computes rotation of y-axis */ PX_FORCE_INLINE Vec3V QuatGetBasisVector1(const QuatV q) { /*const PxF32 y2 = y*2.0f; const PxF32 w2 = w*2.0f; return PxVec3( (-z * w2) + x*y2, (w * w2) - 1.0f + y*y2, (x * w2) + z*y2);*/ const FloatV two = FLoad(2.f); const FloatV w = V4GetW(q); const Vec3V u = Vec3V_From_Vec4V(q); const FloatV y2 = FMul(V3GetY(u), two); const FloatV w2 = FMul(w, two); const Vec3V a = V3Scale(u, y2); const Vec3V tmp = V3Merge(FNeg(V3GetZ(u)), w, V3GetX(u)); // const Vec3V b = V3Scale(tmp, w2); // const Vec3V ab = V3Add(a, b); const Vec3V ab = V3ScaleAdd(tmp, w2, a); return V3SetY(ab, FSub(V3GetY(ab), FOne())); } /** brief computes rotation of z-axis */ PX_FORCE_INLINE Vec3V QuatGetBasisVector2(const QuatV q) { /*const PxF32 z2 = z*2.0f; const PxF32 w2 = w*2.0f; return PxVec3( (y * w2) + x*z2, (-x * w2) + y*z2, (w * w2) - 1.0f + z*z2);*/ const FloatV two = FLoad(2.f); const FloatV w = V4GetW(q); const Vec3V u = Vec3V_From_Vec4V(q); const FloatV z2 = FMul(V3GetZ(u), two); const FloatV w2 = FMul(w, two); const Vec3V a = V3Scale(u, z2); const Vec3V tmp = V3Merge(V3GetY(u), FNeg(V3GetX(u)), w); /*const Vec3V b = V3Scale(tmp, w2); const Vec3V ab = V3Add(a, b);*/ const Vec3V ab = V3ScaleAdd(tmp, w2, a); return V3SetZ(ab, FSub(V3GetZ(ab), FOne())); } PX_FORCE_INLINE Vec3V QuatRotate(const QuatV q, const Vec3V v) { /* const PxVec3 qv(x,y,z); return (v*(w*w-0.5f) + (qv.cross(v))*w + qv*(qv.dot(v)))*2; */ const FloatV two = FLoad(2.f); // const FloatV half = FloatV_From_F32(0.5f); const FloatV nhalf = FLoad(-0.5f); const Vec3V u = Vec3V_From_Vec4V(q); const FloatV w = V4GetW(q); // const FloatV w2 = FSub(FMul(w, w), half); const FloatV w2 = FScaleAdd(w, w, nhalf); const Vec3V a = V3Scale(v, w2); // const Vec3V b = V3Scale(V3Cross(u, v), w); // const Vec3V c = V3Scale(u, V3Dot(u, v)); // return V3Scale(V3Add(V3Add(a, b), c), two); const Vec3V temp = V3ScaleAdd(V3Cross(u, v), w, a); return V3Scale(V3ScaleAdd(u, V3Dot(u, v), temp), two); } PX_FORCE_INLINE Vec3V QuatTransform(const QuatV q, const Vec3V p, const Vec3V v) { // p + q.rotate(v) const FloatV two = FLoad(2.f); // const FloatV half = FloatV_From_F32(0.5f); const FloatV nhalf = FLoad(-0.5f); const Vec3V u = Vec3V_From_Vec4V(q); const FloatV w = V4GetW(q); // const FloatV w2 = FSub(FMul(w, w), half); const FloatV w2 = FScaleAdd(w, w, nhalf); const Vec3V a = V3Scale(v, w2); /*const Vec3V b = V3Scale(V3Cross(u, v), w); const Vec3V c = V3Scale(u, V3Dot(u, v)); return V3ScaleAdd(V3Add(V3Add(a, b), c), two, p);*/ const Vec3V temp = V3ScaleAdd(V3Cross(u, v), w, a); const Vec3V z = V3ScaleAdd(u, V3Dot(u, v), temp); return V3ScaleAdd(z, two, p); } PX_FORCE_INLINE Vec3V QuatRotateInv(const QuatV q, const Vec3V v) { // const PxVec3 qv(x,y,z); // return (v*(w*w-0.5f) - (qv.cross(v))*w + qv*(qv.dot(v)))*2; const FloatV two = FLoad(2.f); const FloatV nhalf = FLoad(-0.5f); const Vec3V u = Vec3V_From_Vec4V(q); const FloatV w = V4GetW(q); const FloatV w2 = FScaleAdd(w, w, nhalf); const Vec3V a = V3Scale(v, w2); /*const Vec3V b = V3Scale(V3Cross(u, v), w); const Vec3V c = V3Scale(u, V3Dot(u, v)); return V3Scale(V3Add(V3Sub(a, b), c), two);*/ const Vec3V temp = V3NegScaleSub(V3Cross(u, v), w, a); return V3Scale(V3ScaleAdd(u, V3Dot(u, v), temp), two); } PX_FORCE_INLINE QuatV QuatMul(const QuatV a, const QuatV b) { const Vec4V imagA = a; const Vec4V imagB = b; const FloatV rA = V4GetW(a); const FloatV rB = V4GetW(b); const FloatV real = FSub(FMul(rA, rB), V4Dot3(imagA, imagB)); const Vec4V v0 = V4Scale(imagA, rB); const Vec4V v1 = V4Scale(imagB, rA); const Vec4V v2 = V4Cross(imagA, imagB); const Vec4V imag = V4Add(V4Add(v0, v1), v2); return V4SetW(imag, real); } PX_FORCE_INLINE QuatV QuatAdd(const QuatV a, const QuatV b) { return V4Add(a, b); } PX_FORCE_INLINE QuatV QuatNeg(const QuatV q) { return V4Neg(q); } PX_FORCE_INLINE QuatV QuatSub(const QuatV a, const QuatV b) { return V4Sub(a, b); } PX_FORCE_INLINE QuatV QuatScale(const QuatV a, const FloatV b) { return V4Scale(a, b); } PX_FORCE_INLINE QuatV QuatMerge(const FloatV* const floatVArray) { return V4Merge(floatVArray); } PX_FORCE_INLINE QuatV QuatMerge(const FloatVArg x, const FloatVArg y, const FloatVArg z, const FloatVArg w) { return V4Merge(x, y, z, w); } PX_FORCE_INLINE QuatV QuatIdentity() { return V4SetW(V4Zero(), FOne()); } PX_FORCE_INLINE bool isFiniteQuatV(const QuatV q) { return isFiniteVec4V(q); } #if PX_LINUX && PX_CLANG #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wbitwise-instead-of-logical" // bitwise intentionally chosen for performance #endif PX_FORCE_INLINE bool isValidQuatV(const QuatV q) { const FloatV unitTolerance = FLoad(1e-4f); const FloatV tmp = FAbs(FSub(QuatLength(q), FOne())); const BoolV con = FIsGrtr(unitTolerance, tmp); return isFiniteVec4V(q) & (BAllEqTTTT(con) == 1); } PX_FORCE_INLINE bool isSaneQuatV(const QuatV q) { const FloatV unitTolerance = FLoad(1e-2f); const FloatV tmp = FAbs(FSub(QuatLength(q), FOne())); const BoolV con = FIsGrtr(unitTolerance, tmp); return isFiniteVec4V(q) & (BAllEqTTTT(con) == 1); } #if PX_LINUX && PX_CLANG #pragma clang diagnostic pop #endif PX_FORCE_INLINE Mat33V QuatGetMat33V(const QuatVArg q) { // const FloatV two = FloatV_From_F32(2.f); // const FloatV one = FOne(); // const FloatV x = V4GetX(q); // const FloatV y = V4GetY(q); // const FloatV z = V4GetZ(q); // const Vec4V _q = V4Mul(q, two); // ////const FloatV w = V4GetW(q); // const Vec4V t0 = V4Mul(_q, x); // 2xx, 2xy, 2xz, 2xw // const Vec4V t1 = V4Mul(_q, y); // 2xy, 2yy, 2yz, 2yw // const Vec4V t2 = V4Mul(_q, z); // 2xz, 2yz, 2zz, 2zw ////const Vec4V t3 = V4Mul(_q, w); // 2xw, 2yw, 2zw, 2ww // const FloatV xx2 = V4GetX(t0); // const FloatV xy2 = V4GetY(t0); // const FloatV xz2 = V4GetZ(t0); // const FloatV xw2 = V4GetW(t0); // const FloatV yy2 = V4GetY(t1); // const FloatV yz2 = V4GetZ(t1); // const FloatV yw2 = V4GetW(t1); // const FloatV zz2 = V4GetZ(t2); // const FloatV zw2 = V4GetW(t2); ////const FloatV ww2 = V4GetW(t3); // const FloatV c00 = FSub(one, FAdd(yy2, zz2)); // const FloatV c01 = FSub(xy2, zw2); // const FloatV c02 = FAdd(xz2, yw2); // const FloatV c10 = FAdd(xy2, zw2); // const FloatV c11 = FSub(one, FAdd(xx2, zz2)); // const FloatV c12 = FSub(yz2, xw2); // const FloatV c20 = FSub(xz2, yw2); // const FloatV c21 = FAdd(yz2, xw2); // const FloatV c22 = FSub(one, FAdd(xx2, yy2)); // const Vec3V c0 = V3Merge(c00, c10, c20); // const Vec3V c1 = V3Merge(c01, c11, c21); // const Vec3V c2 = V3Merge(c02, c12, c22); // return Mat33V(c0, c1, c2); const FloatV one = FOne(); const FloatV x = V4GetX(q); const FloatV y = V4GetY(q); const FloatV z = V4GetZ(q); const FloatV w = V4GetW(q); const FloatV x2 = FAdd(x, x); const FloatV y2 = FAdd(y, y); const FloatV z2 = FAdd(z, z); const FloatV xx = FMul(x2, x); const FloatV yy = FMul(y2, y); const FloatV zz = FMul(z2, z); const FloatV xy = FMul(x2, y); const FloatV xz = FMul(x2, z); const FloatV xw = FMul(x2, w); const FloatV yz = FMul(y2, z); const FloatV yw = FMul(y2, w); const FloatV zw = FMul(z2, w); const FloatV v = FSub(one, xx); const Vec3V column0 = V3Merge(FSub(FSub(one, yy), zz), FAdd(xy, zw), FSub(xz, yw)); const Vec3V column1 = V3Merge(FSub(xy, zw), FSub(v, zz), FAdd(yz, xw)); const Vec3V column2 = V3Merge(FAdd(xz, yw), FSub(yz, xw), FSub(v, yy)); return Mat33V(column0, column1, column2); } PX_FORCE_INLINE QuatV Mat33GetQuatV(const Mat33V& a) { const FloatV one = FOne(); const FloatV zero = FZero(); const FloatV half = FLoad(0.5f); const FloatV two = FLoad(2.f); const FloatV scale = FLoad(0.25f); const FloatV a00 = V3GetX(a.col0); const FloatV a11 = V3GetY(a.col1); const FloatV a22 = V3GetZ(a.col2); const FloatV a21 = V3GetZ(a.col1); // row=2, col=1; const FloatV a12 = V3GetY(a.col2); // row=1, col=2; const FloatV a02 = V3GetX(a.col2); // row=0, col=2; const FloatV a20 = V3GetZ(a.col0); // row=2, col=0; const FloatV a10 = V3GetY(a.col0); // row=1, col=0; const FloatV a01 = V3GetX(a.col1); // row=0, col=1; const Vec3V vec0 = V3Merge(a21, a02, a10); const Vec3V vec1 = V3Merge(a12, a20, a01); const Vec3V v = V3Sub(vec0, vec1); const Vec3V g = V3Add(vec0, vec1); const FloatV trace = FAdd(a00, FAdd(a11, a22)); if(FAllGrtrOrEq(trace, zero)) { const FloatV h = FSqrt(FAdd(trace, one)); const FloatV w = FMul(half, h); const FloatV s = FMul(half, FRecip(h)); const Vec3V u = V3Scale(v, s); return V4SetW(Vec4V_From_Vec3V(u), w); } else { const FloatV ntrace = FNeg(trace); const Vec3V d = V3Merge(a00, a11, a22); const BoolV con0 = BAllTrue3(V3IsGrtrOrEq(V3Splat(a00), d)); const BoolV con1 = BAllTrue3(V3IsGrtrOrEq(V3Splat(a11), d)); const FloatV t0 = FAdd(one, FScaleAdd(a00, two, ntrace)); const FloatV t1 = FAdd(one, FScaleAdd(a11, two, ntrace)); const FloatV t2 = FAdd(one, FScaleAdd(a22, two, ntrace)); const FloatV t = FSel(con0, t0, FSel(con1, t1, t2)); const FloatV h = FMul(two, FSqrt(t)); const FloatV s = FRecip(h); const FloatV g0 = FMul(scale, h); const Vec3V vs = V3Scale(v, s); const Vec3V gs = V3Scale(g, s); const FloatV gsx = V3GetX(gs); const FloatV gsy = V3GetY(gs); const FloatV gsz = V3GetZ(gs); // vs.x= (a21 - a12)*s; vs.y=(a02 - a20)*s; vs.z=(a10 - a01)*s; // gs.x= (a21 + a12)*s; gs.y=(a02 + a20)*s; gs.z=(a10 + a01)*s; const Vec4V v0 = V4Merge(g0, gsz, gsy, V3GetX(vs)); const Vec4V v1 = V4Merge(gsz, g0, gsx, V3GetY(vs)); const Vec4V v2 = V4Merge(gsy, gsx, g0, V3GetZ(vs)); return V4Sel(con0, v0, V4Sel(con1, v1, v2)); } } } // namespace aos #if !PX_DOXYGEN } // namespace physx #endif #endif
14,038
C
28.680761
111
0.660707
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxPlane.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_PLANE_H #define PX_PLANE_H /** \addtogroup foundation @{ */ #include "foundation/PxTransform.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief Representation of a plane. Plane equation used: n.dot(v) + d = 0 */ class PxPlane { public: /** \brief Constructor */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxPlane() { } /** \brief Constructor from a normal and a distance */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxPlane(float nx, float ny, float nz, float distance) : n(nx, ny, nz), d(distance) { } /** \brief Constructor from a normal and a distance */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxPlane(const PxVec3& normal, float distance) : n(normal), d(distance) { } /** \brief Constructor from a point on the plane and a normal */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxPlane(const PxVec3& point, const PxVec3& normal) : n(normal), d(-point.dot(n)) // p satisfies normal.dot(p) + d = 0 { } /** \brief Constructor from three points */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxPlane(const PxVec3& p0, const PxVec3& p1, const PxVec3& p2) { n = (p1 - p0).cross(p2 - p0).getNormalized(); d = -p0.dot(n); } /** \brief returns true if the two planes are exactly equal */ PX_CUDA_CALLABLE PX_INLINE bool operator==(const PxPlane& p) const { return n == p.n && d == p.d; } PX_CUDA_CALLABLE PX_FORCE_INLINE float distance(const PxVec3& p) const { return p.dot(n) + d; } PX_CUDA_CALLABLE PX_FORCE_INLINE bool contains(const PxVec3& p) const { return PxAbs(distance(p)) < (1.0e-7f); } /** \brief projects p into the plane */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 project(const PxVec3& p) const { return p - n * distance(p); } /** \brief find an arbitrary point in the plane */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 pointInPlane() const { return -n * d; } /** \brief equivalent plane with unit normal */ PX_CUDA_CALLABLE PX_FORCE_INLINE void normalize() { float denom = 1.0f / n.magnitude(); n *= denom; d *= denom; } /** \brief transform plane */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxPlane transform(const PxTransform& pose) const { const PxVec3 transformedNormal = pose.rotate(n); return PxPlane(transformedNormal, d - pose.p.dot(transformedNormal)); } /** \brief inverse-transform plane */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxPlane inverseTransform(const PxTransform& pose) const { const PxVec3 transformedNormal = pose.rotateInv(n); return PxPlane(transformedNormal, d + pose.p.dot(n)); } PxVec3 n; //!< The normal to the plane float d; //!< The distance from the origin }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
4,338
C
25.619632
116
0.709083
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxVecMathAoSScalarInline.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_VEC_MATH_AOS_SCALAR_INLINE_H #define PX_VEC_MATH_AOS_SCALAR_INLINE_H #if COMPILE_VECTOR_INTRINSICS #error Scalar version should not be included when using vector intrinsics. #endif #if PX_GCC_FAMILY #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wstrict-aliasing" #endif #if !PX_DOXYGEN namespace physx { #endif namespace aos { #define BOOL_TO_U32(b) PxU32(- PxI32(b)) #define TRUE_TO_U32 PxU32(-1) #define FALSE_TO_U32 PxU32(0) #define BOOL_TO_U16(b) PxU16(- PxI32(b)) #define PX_VECMATH_ASSERT_ENABLED 0 #if PX_VECMATH_ASSERT_ENABLED #define VECMATHAOS_ASSERT(x) { PX_ASSERT(x); } #else #define VECMATHAOS_ASSERT(x) #endif ///////////////////////////////////////////////////////////////////// ////INTERNAL USE ONLY AND TESTS ///////////////////////////////////////////////////////////////////// namespace internalScalarSimd { PX_FORCE_INLINE PxF32 FStore(const FloatV a) { return a.x; } PX_FORCE_INLINE bool hasZeroElementInFloatV(const FloatV a) { return (0 == a.x); } PX_FORCE_INLINE bool hasZeroElementInVec3V(const Vec3V a) { return (0 == a.x || 0 == a.y || 0 == a.z); } PX_FORCE_INLINE bool hasZeroElementInVec4V(const Vec4V a) { return (0 == a.x || 0 == a.y || 0 == a.z || 0 == a.w); } } namespace vecMathTests { // PT: this function returns an invalid Vec3V (W!=0.0f) just for unit-testing 'isValidVec3V' PX_FORCE_INLINE Vec3V getInvalidVec3V() { Vec3V tmp; tmp.x = tmp.y = tmp.z = 0.0f; tmp.pad = 1.0f; return tmp; } PX_FORCE_INLINE bool allElementsEqualFloatV(const FloatV a, const FloatV b) { return (a.x == b.x); } PX_FORCE_INLINE bool allElementsEqualVec3V(const Vec3V a, const Vec3V b) { return (a.x == b.x && a.y == b.y && a.z == b.z); } PX_FORCE_INLINE bool allElementsEqualVec4V(const Vec4V a, const Vec4V b) { return (a.x == b.x && a.y == b.y && a.z == b.z && a.w == b.w); } PX_FORCE_INLINE bool allElementsEqualBoolV(const BoolV a, const BoolV b) { return (a.ux == b.ux && a.uy == b.uy && a.uz == b.uz && a.uw == b.uw); } PX_FORCE_INLINE bool allElementsEqualVecU32V(const VecU32V a, const VecU32V b) { return (a.u32[0] == b.u32[0] && a.u32[1] == b.u32[1] && a.u32[2] == b.u32[2] && a.u32[3] == b.u32[3]); } PX_FORCE_INLINE bool allElementsEqualVecI32V(const VecI32V a, const VecI32V b) { return (a.i32[0] == b.i32[0] && a.i32[1] == b.i32[1] && a.i32[2] == b.i32[2] && a.i32[3] == b.i32[3]); } #define VECMATH_AOS_EPSILON (1e-3f) PX_FORCE_INLINE bool allElementsNearEqualFloatV(const FloatV a, const FloatV b) { const PxF32 cx = a.x - b.x; return (cx > -VECMATH_AOS_EPSILON && cx < VECMATH_AOS_EPSILON); } PX_FORCE_INLINE bool allElementsNearEqualVec3V(const Vec3V a, const Vec3V b) { const PxF32 cx = a.x - b.x; const PxF32 cy = a.y - b.y; const PxF32 cz = a.z - b.z; return (cx > -VECMATH_AOS_EPSILON && cx < VECMATH_AOS_EPSILON && cy > -VECMATH_AOS_EPSILON && cy < VECMATH_AOS_EPSILON && cz > -VECMATH_AOS_EPSILON && cz < VECMATH_AOS_EPSILON); } PX_FORCE_INLINE bool allElementsNearEqualVec4V(const Vec4V a, const Vec4V b) { const PxF32 cx = a.x - b.x; const PxF32 cy = a.y - b.y; const PxF32 cz = a.z - b.z; const PxF32 cw = a.w - b.w; return (cx > -VECMATH_AOS_EPSILON && cx < VECMATH_AOS_EPSILON && cy > -VECMATH_AOS_EPSILON && cy < VECMATH_AOS_EPSILON && cz > -VECMATH_AOS_EPSILON && cz < VECMATH_AOS_EPSILON && cw > -VECMATH_AOS_EPSILON && cw < VECMATH_AOS_EPSILON); } } /////////////////////////////////////////////////////// PX_FORCE_INLINE bool isValidVec3V(const Vec3V a) { return a.pad == 0.f; } PX_FORCE_INLINE bool isFiniteFloatV(const FloatV a) { return PxIsFinite(a.x); } PX_FORCE_INLINE bool isFiniteVec3V(const Vec3V a) { return PxIsFinite(a.x) && PxIsFinite(a.y) && PxIsFinite(a.z); } PX_FORCE_INLINE bool isFiniteVec4V(const Vec4V a) { return PxIsFinite(a.x) && PxIsFinite(a.y) && PxIsFinite(a.z) && PxIsFinite(a.w); } ///////////////////////////////////////////////////////////////////// ////VECTORISED FUNCTION IMPLEMENTATIONS ///////////////////////////////////////////////////////////////////// PX_FORCE_INLINE FloatV FLoad(const PxF32 f) { return FloatV(f); } PX_FORCE_INLINE Vec3V V3Load(const PxF32 f) { return Vec3V(f, f, f); } PX_FORCE_INLINE Vec4V V4Load(const PxF32 f) { return Vec4V(f, f, f, f); } PX_FORCE_INLINE BoolV BLoad(const bool f) { #if PX_ARM // SD: Android ARM builds fail if this is done with a cast. // Might also fail because of something else but the select // operator here seems to fix everything that failed in release builds. return f ? BTTTT() : BFFFF(); #else return BoolV(BOOL_TO_U32(f), BOOL_TO_U32(f), BOOL_TO_U32(f), BOOL_TO_U32(f)); #endif } PX_FORCE_INLINE Vec3V V3LoadA(const PxVec3& f) { return Vec3V(f.x, f.y, f.z); } PX_FORCE_INLINE Vec3V V3LoadU(const PxVec3& f) { return Vec3V(f.x, f.y, f.z); } PX_FORCE_INLINE Vec3V V3LoadUnsafeA(const PxVec3& f) { return Vec3V(f.x, f.y, f.z); } PX_FORCE_INLINE Vec3V V3LoadA(const PxF32* const f) { return Vec3V(f[0], f[1], f[2]); } PX_FORCE_INLINE Vec3V V3LoadU(const PxF32* const f) { return Vec3V(f[0], f[1], f[2]); } PX_FORCE_INLINE Vec3V Vec3V_From_Vec4V(Vec4V f) { return Vec3V(f.x, f.y, f.z); } PX_FORCE_INLINE Vec3V Vec3V_From_Vec4V_WUndefined(const Vec4V v) { return Vec3V(v.x, v.y, v.z); } PX_FORCE_INLINE Vec4V Vec4V_From_Vec3V(Vec3V f) { return Vec4V(f.x, f.y, f.z, 0.0f); } PX_FORCE_INLINE Vec4V Vec4V_From_FloatV(FloatV f) { return Vec4V(f.x, f.x, f.x, f.x); } PX_FORCE_INLINE Vec3V Vec3V_From_FloatV(FloatV f) { return Vec3V(f.x, f.x, f.x); } PX_FORCE_INLINE Vec3V Vec3V_From_FloatV_WUndefined(FloatV f) { return Vec3V(f.x, f.x, f.x); } PX_FORCE_INLINE Vec4V V4LoadA(const PxF32* const f) { return Vec4V(f[0], f[1], f[2], f[3]); } PX_FORCE_INLINE void V4StoreA(const Vec4V a, PxF32* f) { *reinterpret_cast<Vec4V*>(f) = a; } PX_FORCE_INLINE void V4StoreU(const Vec4V a, PxF32* f) { *reinterpret_cast<PxVec4*>(f) = *reinterpret_cast<const PxVec4*>(&a.x); } PX_FORCE_INLINE void BStoreA(const BoolV a, PxU32* f) { *reinterpret_cast<BoolV*>(f) = a; } PX_FORCE_INLINE void U4StoreA(const VecU32V uv, PxU32* u) { *reinterpret_cast<VecU32V*>(u) = uv; } PX_FORCE_INLINE void I4StoreA(const VecI32V iv, PxI32* i) { *reinterpret_cast<VecI32V*>(i) = iv; } PX_FORCE_INLINE Vec4V V4LoadU(const PxF32* const f) { return Vec4V(f[0], f[1], f[2], f[3]); } PX_FORCE_INLINE Vec4V Vec4V_From_PxVec3_WUndefined(const PxVec3& f) { return Vec4V(f[0], f[1], f[2], 0.0f); } PX_FORCE_INLINE BoolV BLoad(const bool* const f) { return BoolV(BOOL_TO_U32(f[0]), BOOL_TO_U32(f[1]), BOOL_TO_U32(f[2]), BOOL_TO_U32(f[3])); } PX_FORCE_INLINE void FStore(const FloatV a, PxF32* PX_RESTRICT f) { *f = a.x; } PX_FORCE_INLINE void V3StoreA(const Vec3V a, PxVec3& f) { f = PxVec3(a.x, a.y, a.z); } PX_FORCE_INLINE void V3StoreU(const Vec3V a, PxVec3& f) { f = PxVec3(a.x, a.y, a.z); } PX_FORCE_INLINE void Store_From_BoolV(const BoolV b, PxU32* b2) { *b2 = b.ux; } ////////////////////////// // FLOATV ////////////////////////// PX_FORCE_INLINE FloatV FZero() { return FLoad(0.0f); } PX_FORCE_INLINE FloatV FOne() { return FLoad(1.0f); } PX_FORCE_INLINE FloatV FHalf() { return FLoad(0.5f); } PX_FORCE_INLINE FloatV FEps() { return FLoad(PX_EPS_REAL); } PX_FORCE_INLINE FloatV FEps6() { return FLoad(1e-6f); } //! @cond PX_FORCE_INLINE FloatV FMax() { return FLoad(PX_MAX_REAL); } //! @endcond PX_FORCE_INLINE FloatV FNegMax() { return FLoad(-PX_MAX_REAL); } PX_FORCE_INLINE FloatV FNeg(const FloatV f) { return FloatV(-f.x); } PX_FORCE_INLINE FloatV FAdd(const FloatV a, const FloatV b) { return FloatV(a.x + b.x); } PX_FORCE_INLINE FloatV FSub(const FloatV a, const FloatV b) { return FloatV(a.x - b.x); } PX_FORCE_INLINE FloatV FMul(const FloatV a, const FloatV b) { return FloatV(a.x * b.x); } PX_FORCE_INLINE FloatV FDiv(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(b.x != 0.0f); return FloatV(a.x / b.x); } PX_FORCE_INLINE FloatV FDivFast(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(b.x != 0.0f); return FloatV(a.x / b.x); } PX_FORCE_INLINE FloatV FRecip(const FloatV a) { VECMATHAOS_ASSERT(a.x != 0.0f); return 1.0f / a.x; } PX_FORCE_INLINE FloatV FRecipFast(const FloatV a) { VECMATHAOS_ASSERT(a.x != 0.0f); return 1.0f / a.x; } PX_FORCE_INLINE FloatV FRsqrt(const FloatV a) { VECMATHAOS_ASSERT(a.x != 0.0f); return PxRecipSqrt(a.x); } PX_FORCE_INLINE FloatV FSqrt(const FloatV a) { return PxSqrt(a.x); } PX_FORCE_INLINE FloatV FRsqrtFast(const FloatV a) { VECMATHAOS_ASSERT(a.x != 0.0f); return PxRecipSqrt(a.x); } PX_FORCE_INLINE FloatV FScaleAdd(const FloatV a, const FloatV b, const FloatV c) { return FAdd(FMul(a, b), c); } PX_FORCE_INLINE FloatV FNegScaleSub(const FloatV a, const FloatV b, const FloatV c) { return FSub(c, FMul(a, b)); } PX_FORCE_INLINE FloatV FAbs(const FloatV a) { return FloatV(PxAbs(a.x)); } PX_FORCE_INLINE FloatV FSel(const BoolV c, const FloatV a, const FloatV b) { return FloatV(c.ux ? a.x : b.x); } PX_FORCE_INLINE BoolV FIsGrtr(const FloatV a, const FloatV b) { return BLoad(a.x > b.x); } PX_FORCE_INLINE BoolV FIsGrtrOrEq(const FloatV a, const FloatV b) { return BLoad(a.x >= b.x); } PX_FORCE_INLINE BoolV FIsEq(const FloatV a, const FloatV b) { return BLoad(a.x == b.x); } PX_FORCE_INLINE FloatV FMax(const FloatV a, const FloatV b) { return (a.x > b.x ? FloatV(a.x) : FloatV(b.x)); } PX_FORCE_INLINE FloatV FMin(const FloatV a, const FloatV b) { return (a.x > b.x ? FloatV(b.x) : FloatV(a.x)); } PX_FORCE_INLINE FloatV FClamp(const FloatV a, const FloatV minV, const FloatV maxV) { return FMax(FMin(a, maxV), minV); } PX_FORCE_INLINE PxU32 FAllGrtr(const FloatV a, const FloatV b) { return BOOL_TO_U32(a.x > b.x); } PX_FORCE_INLINE PxU32 FAllGrtrOrEq(const FloatV a, const FloatV b) { return BOOL_TO_U32(a.x >= b.x); } PX_FORCE_INLINE PxU32 FAllEq(const FloatV a, const FloatV b) { return BOOL_TO_U32(a.x == b.x); } PX_FORCE_INLINE FloatV FRound(const FloatV a) { return floorf(a.x + 0.5f); } PX_FORCE_INLINE FloatV FSin(const FloatV a) { return sinf(a.x); } PX_FORCE_INLINE FloatV FCos(const FloatV a) { return cosf(a.x); } PX_FORCE_INLINE PxU32 FOutOfBounds(const FloatV a, const FloatV min, const FloatV max) { return BOOL_TO_U32(a.x > max.x || a.x < min.x); } PX_FORCE_INLINE PxU32 FInBounds(const FloatV a, const FloatV min, const FloatV max) { return BOOL_TO_U32(a.x >= min.x && a.x <= max.x); } PX_FORCE_INLINE PxU32 FOutOfBounds(const FloatV a, const FloatV bounds) { return FOutOfBounds(a, FNeg(bounds), bounds); } PX_FORCE_INLINE PxU32 FInBounds(const FloatV a, const FloatV bounds) { return FInBounds(a, FNeg(bounds), bounds); } ///////////////////// // VEC3V ///////////////////// PX_FORCE_INLINE Vec3V V3Splat(const FloatV f) { return Vec3V(f.x, f.x, f.x); } PX_FORCE_INLINE Vec3V V3Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z) { return Vec3V(x.x, y.x, z.x); } PX_FORCE_INLINE Vec3V V3UnitX() { return Vec3V(1.0f, 0.0f, 0.0f); } PX_FORCE_INLINE Vec3V V3UnitY() { return Vec3V(0.0f, 1.0f, 0.0f); } PX_FORCE_INLINE Vec3V V3UnitZ() { return Vec3V(0.0f, 0.0f, 1.0f); } PX_FORCE_INLINE FloatV V3GetX(const Vec3V f) { return FloatV(f.x); } PX_FORCE_INLINE FloatV V3GetY(const Vec3V f) { return FloatV(f.y); } PX_FORCE_INLINE FloatV V3GetZ(const Vec3V f) { return FloatV(f.z); } PX_FORCE_INLINE Vec3V V3SetX(const Vec3V v, const FloatV f) { return Vec3V(f.x, v.y, v.z); } PX_FORCE_INLINE Vec3V V3SetY(const Vec3V v, const FloatV f) { return Vec3V(v.x, f.x, v.z); } PX_FORCE_INLINE Vec3V V3SetZ(const Vec3V v, const FloatV f) { return Vec3V(v.x, v.y, f.x); } PX_FORCE_INLINE Vec3V V3ColX(const Vec3V a, const Vec3V b, const Vec3V c) { return Vec3V(a.x, b.x, c.x); } PX_FORCE_INLINE Vec3V V3ColY(const Vec3V a, const Vec3V b, const Vec3V c) { return Vec3V(a.y, b.y, c.y); } PX_FORCE_INLINE Vec3V V3ColZ(const Vec3V a, const Vec3V b, const Vec3V c) { return Vec3V(a.z, b.z, c.z); } PX_FORCE_INLINE Vec3V V3Zero() { return V3Load(0.0f); } PX_FORCE_INLINE Vec3V V3One() { return V3Load(1.0f); } PX_FORCE_INLINE Vec3V V3Eps() { return V3Load(PX_EPS_REAL); } PX_FORCE_INLINE Vec3V V3Neg(const Vec3V c) { return Vec3V(-c.x, -c.y, -c.z); } PX_FORCE_INLINE Vec3V V3Add(const Vec3V a, const Vec3V b) { return Vec3V(a.x + b.x, a.y + b.y, a.z + b.z); } PX_FORCE_INLINE Vec3V V3Sub(const Vec3V a, const Vec3V b) { return Vec3V(a.x - b.x, a.y - b.y, a.z - b.z); } PX_FORCE_INLINE Vec3V V3Scale(const Vec3V a, const FloatV b) { return Vec3V(a.x * b.x, a.y * b.x, a.z * b.x); } PX_FORCE_INLINE Vec3V V3Mul(const Vec3V a, const Vec3V b) { return Vec3V(a.x * b.x, a.y * b.y, a.z * b.z); } PX_FORCE_INLINE Vec3V V3ScaleInv(const Vec3V a, const FloatV b) { const PxF32 bInv = 1.0f / b.x; return Vec3V(a.x * bInv, a.y * bInv, a.z * bInv); } PX_FORCE_INLINE Vec3V V3Div(const Vec3V a, const Vec3V b) { return Vec3V(a.x / b.x, a.y / b.y, a.z / b.z); } PX_FORCE_INLINE Vec3V V3ScaleInvFast(const Vec3V a, const FloatV b) { const PxF32 bInv = 1.0f / b.x; return Vec3V(a.x * bInv, a.y * bInv, a.z * bInv); } PX_FORCE_INLINE Vec3V V3DivFast(const Vec3V a, const Vec3V b) { return Vec3V(a.x / b.x, a.y / b.y, a.z / b.z); } PX_FORCE_INLINE Vec3V V3Recip(const Vec3V a) { return Vec3V(1.0f / a.x, 1.0f / a.y, 1.0f / a.z); } PX_FORCE_INLINE Vec3V V3RecipFast(const Vec3V a) { return Vec3V(1.0f / a.x, 1.0f / a.y, 1.0f / a.z); } PX_FORCE_INLINE Vec3V V3Rsqrt(const Vec3V a) { return Vec3V(PxRecipSqrt(a.x), PxRecipSqrt(a.y), PxRecipSqrt(a.z)); } PX_FORCE_INLINE Vec3V V3RsqrtFast(const Vec3V a) { return Vec3V(PxRecipSqrt(a.x), PxRecipSqrt(a.y), PxRecipSqrt(a.z)); } PX_FORCE_INLINE Vec3V V3ScaleAdd(const Vec3V a, const FloatV b, const Vec3V c) { return V3Add(V3Scale(a, b), c); } PX_FORCE_INLINE Vec3V V3NegScaleSub(const Vec3V a, const FloatV b, const Vec3V c) { return V3Sub(c, V3Scale(a, b)); } PX_FORCE_INLINE Vec3V V3MulAdd(const Vec3V a, const Vec3V b, const Vec3V c) { return V3Add(V3Mul(a, b), c); } PX_FORCE_INLINE Vec3V V3NegMulSub(const Vec3V a, const Vec3V b, const Vec3V c) { return V3Sub(c, V3Mul(a, b)); } PX_FORCE_INLINE FloatV V3Dot(const Vec3V a, const Vec3V b) { return FloatV(a.x * b.x + a.y * b.y + a.z * b.z); } PX_FORCE_INLINE VecCrossV V3PrepareCross(const Vec3VArg normal) { return normal; } PX_FORCE_INLINE Vec3V V3Cross(const Vec3V a, const Vec3V b) { return Vec3V(a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y - a.y * b.x); } PX_FORCE_INLINE FloatV V3Length(const Vec3V a) { return FloatV(PxSqrt(a.x * a.x + a.y * a.y + a.z * a.z)); } PX_FORCE_INLINE FloatV V3LengthSq(const Vec3V a) { return FloatV(a.x * a.x + a.y * a.y + a.z * a.z); } PX_FORCE_INLINE Vec3V V3Normalize(const Vec3V a) { VECMATHAOS_ASSERT(a.x != 0 || a.y != 0 || a.z != 0); const PxF32 lengthInv = 1.0f / PxSqrt(a.x * a.x + a.y * a.y + a.z * a.z); return Vec3V(a.x * lengthInv, a.y * lengthInv, a.z * lengthInv); } PX_FORCE_INLINE Vec3V V3NormalizeSafe(const Vec3V a, const Vec3V unsafeReturnValue) { const PxF32 length = PxSqrt(a.x * a.x + a.y * a.y + a.z * a.z); if(PX_EPS_REAL >= length) { return unsafeReturnValue; } else { const PxF32 lengthInv = 1.0f / length; return Vec3V(a.x * lengthInv, a.y * lengthInv, a.z * lengthInv); } } PX_FORCE_INLINE Vec3V V3NormalizeFast(const Vec3V a) { VECMATHAOS_ASSERT(a.x != 0 || a.y != 0 || a.z != 0); const PxF32 lengthInv = 1.0f / PxSqrt(a.x * a.x + a.y * a.y + a.z * a.z); return Vec3V(a.x * lengthInv, a.y * lengthInv, a.z * lengthInv); } PX_FORCE_INLINE Vec3V V3Sel(const BoolV c, const Vec3V a, const Vec3V b) { return Vec3V(c.ux ? a.x : b.x, c.uy ? a.y : b.y, c.uz ? a.z : b.z); } PX_FORCE_INLINE BoolV V3IsGrtr(const Vec3V a, const Vec3V b) { return BoolV(BOOL_TO_U32(a.x > b.x), BOOL_TO_U32(a.y > b.y), BOOL_TO_U32(a.z > b.z), FALSE_TO_U32); } PX_FORCE_INLINE BoolV V3IsGrtrOrEq(const Vec3V a, const Vec3V b) { return BoolV(BOOL_TO_U32(a.x >= b.x), BOOL_TO_U32(a.y >= b.y), BOOL_TO_U32(a.z >= b.z), TRUE_TO_U32); } PX_FORCE_INLINE BoolV V3IsEq(const Vec3V a, const Vec3V b) { return BoolV(BOOL_TO_U32(a.x == b.x), BOOL_TO_U32(a.y == b.y), BOOL_TO_U32(a.z == b.z), TRUE_TO_U32); } PX_FORCE_INLINE Vec3V V3Max(const Vec3V a, const Vec3V b) { return Vec3V(a.x > b.x ? a.x : b.x, a.y > b.y ? a.y : b.y, a.z > b.z ? a.z : b.z); } PX_FORCE_INLINE Vec3V V3Min(const Vec3V a, const Vec3V b) { return Vec3V(a.x < b.x ? a.x : b.x, a.y < b.y ? a.y : b.y, a.z < b.z ? a.z : b.z); } PX_FORCE_INLINE FloatV V3ExtractMax(const Vec3V a) { const PxF32 t0 = (a.x >= a.y) ? a.x : a.y; return t0 >= a.z ? t0 : a.z; } PX_FORCE_INLINE FloatV V3ExtractMin(const Vec3V a) { const PxF32 t0 = (a.x <= a.y) ? a.x : a.y; return t0 <= a.z ? t0 : a.z; } // return (a >= 0.0f) ? 1.0f : -1.0f; PX_FORCE_INLINE Vec3V V3Sign(const Vec3V a) { return Vec3V((a.x >= 0.f ? 1.f : -1.f), (a.y >= 0.f ? 1.f : -1.f), (a.z >= 0.f ? 1.f : -1.f)); } PX_FORCE_INLINE Vec3V V3Clamp(const Vec3V a, const Vec3V minV, const Vec3V maxV) { return V3Max(V3Min(a, maxV), minV); } PX_FORCE_INLINE Vec3V V3Abs(const Vec3V a) { return V3Max(a, V3Neg(a)); } PX_FORCE_INLINE PxU32 V3AllGrtr(const Vec3V a, const Vec3V b) { return BOOL_TO_U32((a.x > b.x) & (a.y > b.y) & (a.z > b.z)); } PX_FORCE_INLINE PxU32 V3AllGrtrOrEq(const Vec3V a, const Vec3V b) { return BOOL_TO_U32((a.x >= b.x) & (a.y >= b.y) & (a.z >= b.z)); } PX_FORCE_INLINE PxU32 V3AllEq(const Vec3V a, const Vec3V b) { return BOOL_TO_U32((a.x == b.x) & (a.y == b.y) & (a.z == b.z)); } PX_FORCE_INLINE Vec3V V3Round(const Vec3V a) { return Vec3V(floorf(a.x + 0.5f), floorf(a.y + 0.5f), floorf(a.z + 0.5f)); } PX_FORCE_INLINE Vec3V V3Sin(const Vec3V a) { return Vec3V(sinf(a.x), sinf(a.y), sinf(a.z)); } PX_FORCE_INLINE Vec3V V3Cos(const Vec3V a) { return Vec3V(cosf(a.x), cosf(a.y), cosf(a.z)); } PX_FORCE_INLINE Vec3V V3PermYZZ(const Vec3V a) { return Vec3V(a.y, a.z, a.z); } PX_FORCE_INLINE Vec3V V3PermXYX(const Vec3V a) { return Vec3V(a.x, a.y, a.x); } PX_FORCE_INLINE Vec3V V3PermYZX(const Vec3V a) { return Vec3V(a.y, a.z, a.x); } PX_FORCE_INLINE Vec3V V3PermZXY(const Vec3V a) { return Vec3V(a.z, a.x, a.y); } PX_FORCE_INLINE Vec3V V3PermZZY(const Vec3V a) { return Vec3V(a.z, a.z, a.y); } PX_FORCE_INLINE Vec3V V3PermYXX(const Vec3V a) { return Vec3V(a.y, a.x, a.x); } PX_FORCE_INLINE Vec3V V3Perm_Zero_1Z_0Y(const Vec3V v0, const Vec3V v1) { return Vec3V(0.0f, v1.z, v0.y); } PX_FORCE_INLINE Vec3V V3Perm_0Z_Zero_1X(const Vec3V v0, const Vec3V v1) { return Vec3V(v0.z, 0.0f, v1.x); } PX_FORCE_INLINE Vec3V V3Perm_1Y_0X_Zero(const Vec3V v0, const Vec3V v1) { return Vec3V(v1.y, v0.x, 0.0f); } PX_FORCE_INLINE FloatV V3SumElems(const Vec3V a) { return FloatV(a.x + a.y + a.z); } PX_FORCE_INLINE PxU32 V3OutOfBounds(const Vec3V a, const Vec3V min, const Vec3V max) { return BOOL_TO_U32(a.x > max.x || a.y > max.y || a.z > max.z || a.x < min.x || a.y < min.y || a.z < min.z); } PX_FORCE_INLINE PxU32 V3InBounds(const Vec3V a, const Vec3V min, const Vec3V max) { return BOOL_TO_U32(a.x <= max.x && a.y <= max.y && a.z <= max.z && a.x >= min.x && a.y >= min.y && a.z >= min.z); } PX_FORCE_INLINE PxU32 V3OutOfBounds(const Vec3V a, const Vec3V bounds) { return V3OutOfBounds(a, V3Neg(bounds), bounds); } PX_FORCE_INLINE PxU32 V3InBounds(const Vec3V a, const Vec3V bounds) { return V3InBounds(a, V3Neg(bounds), bounds); } PX_FORCE_INLINE void V3Transpose(Vec3V& col0, Vec3V& col1, Vec3V& col2) { const PxF32 t01 = col0.y, t02 = col0.z, t12 = col1.z; col0.y = col1.x; col0.z = col2.x; col1.z = col2.y; col1.x = t01; col2.x = t02; col2.y = t12; } ///////////////////////// // VEC4V ///////////////////////// PX_FORCE_INLINE Vec4V V4Splat(const FloatV f) { return Vec4V(f.x, f.x, f.x, f.x); } PX_FORCE_INLINE Vec4V V4Merge(const FloatV* const floatVArray) { return Vec4V(floatVArray[0].x, floatVArray[1].x, floatVArray[2].x, floatVArray[3].x); } PX_FORCE_INLINE Vec4V V4Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z, const FloatVArg w) { return Vec4V(x.x, y.x, z.x, w.x); } PX_FORCE_INLINE Vec4V V4MergeW(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { return Vec4V(x.w, y.w, z.w, w.w); } PX_FORCE_INLINE Vec4V V4MergeZ(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { return Vec4V(x.z, y.z, z.z, w.z); } PX_FORCE_INLINE Vec4V V4MergeY(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { return Vec4V(x.y, y.y, z.y, w.y); } PX_FORCE_INLINE Vec4V V4MergeX(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { return Vec4V(x.x, y.x, z.x, w.x); } PX_FORCE_INLINE Vec4V V4UnpackXY(const Vec4VArg a, const Vec4VArg b) { return Vec4V(a.x, b.x, a.y, b.y); } PX_FORCE_INLINE Vec4V V4UnpackZW(const Vec4VArg a, const Vec4VArg b) { return Vec4V(a.z, b.z, a.w, b.w); } PX_FORCE_INLINE Vec4V V4UnitX() { return Vec4V(1.0f, 0.0f, 0.0f, 0.0f); } PX_FORCE_INLINE Vec4V V4UnitY() { return Vec4V(0.0f, 1.0f, 0.0f, 0.0f); } PX_FORCE_INLINE Vec4V V4UnitZ() { return Vec4V(0.0f, 0.0f, 1.0f, 0.0f); } PX_FORCE_INLINE Vec4V V4UnitW() { return Vec4V(0.0f, 0.0f, 0.0f, 1.0f); } PX_FORCE_INLINE FloatV V4GetX(const Vec4V f) { return FloatV(f.x); } PX_FORCE_INLINE FloatV V4GetY(const Vec4V f) { return FloatV(f.y); } PX_FORCE_INLINE FloatV V4GetZ(const Vec4V f) { return FloatV(f.z); } PX_FORCE_INLINE FloatV V4GetW(const Vec4V f) { return FloatV(f.w); } PX_FORCE_INLINE Vec4V V4SetX(const Vec4V v, const FloatV f) { return Vec4V(f.x, v.y, v.z, v.w); } PX_FORCE_INLINE Vec4V V4SetY(const Vec4V v, const FloatV f) { return Vec4V(v.x, f.x, v.z, v.w); } PX_FORCE_INLINE Vec4V V4SetZ(const Vec4V v, const FloatV f) { return Vec4V(v.x, v.y, f.x, v.w); } PX_FORCE_INLINE Vec4V V4SetW(const Vec4V v, const FloatV f) { return Vec4V(v.x, v.y, v.z, f.x); } PX_FORCE_INLINE Vec4V V4SetW(const Vec3V v, const FloatV f) { return Vec4V(v.x, v.y, v.z, f.x); } PX_FORCE_INLINE Vec4V V4ClearW(const Vec4V v) { return Vec4V(v.x, v.y, v.z, 0.0f); } PX_FORCE_INLINE Vec4V V4PermYXWZ(const Vec4V v) { return Vec4V(v.y, v.x, v.w, v.z); } PX_FORCE_INLINE Vec4V V4PermXZXZ(const Vec4V v) { return Vec4V(v.x, v.z, v.x, v.z); } PX_FORCE_INLINE Vec4V V4PermYWYW(const Vec4V v) { return Vec4V(v.y, v.w, v.y, v.w); } PX_FORCE_INLINE Vec4V V4PermYZXW(const Vec4V v) { return Vec4V(v.y, v.z, v.x, v.w); } PX_FORCE_INLINE Vec4V V4PermZWXY(const Vec4V v) { return Vec4V(v.z, v.w, v.x, v.y); } template <PxU8 _x, PxU8 _y, PxU8 _z, PxU8 _w> PX_FORCE_INLINE Vec4V V4Perm(const Vec4V v) { const PxF32 f[4] = { v.x, v.y, v.z, v.w }; return Vec4V(f[_x], f[_y], f[_z], f[_w]); } PX_FORCE_INLINE Vec4V V4Zero() { return V4Load(0.0f); } PX_FORCE_INLINE Vec4V V4One() { return V4Load(1.0f); } PX_FORCE_INLINE Vec4V V4Eps() { return V4Load(PX_EPS_REAL); } PX_FORCE_INLINE Vec4V V4Neg(const Vec4V c) { return Vec4V(-c.x, -c.y, -c.z, -c.w); } PX_FORCE_INLINE Vec4V V4Add(const Vec4V a, const Vec4V b) { return Vec4V(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w); } PX_FORCE_INLINE Vec4V V4Sub(const Vec4V a, const Vec4V b) { return Vec4V(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w); } PX_FORCE_INLINE Vec4V V4Scale(const Vec4V a, const FloatV b) { return Vec4V(a.x * b.x, a.y * b.x, a.z * b.x, a.w * b.x); } PX_FORCE_INLINE Vec4V V4Mul(const Vec4V a, const Vec4V b) { return Vec4V(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); } PX_FORCE_INLINE Vec4V V4ScaleInv(const Vec4V a, const FloatV b) { const PxF32 bInv = 1.0f / b.x; return Vec4V(a.x * bInv, a.y * bInv, a.z * bInv, a.w * bInv); } PX_FORCE_INLINE Vec4V V4Div(const Vec4V a, const Vec4V b) { VECMATHAOS_ASSERT(b.x != 0 && b.y != 0 && b.z != 0 && b.w != 0); return Vec4V(a.x / b.x, a.y / b.y, a.z / b.z, a.w / b.w); } PX_FORCE_INLINE Vec4V V4ScaleInvFast(const Vec4V a, const FloatV b) { const PxF32 bInv = 1.0f / b.x; return Vec4V(a.x * bInv, a.y * bInv, a.z * bInv, a.w * bInv); } PX_FORCE_INLINE Vec4V V4DivFast(const Vec4V a, const Vec4V b) { return Vec4V(a.x / b.x, a.y / b.y, a.z / b.z, a.w / b.w); } PX_FORCE_INLINE Vec4V V4Recip(const Vec4V a) { return Vec4V(1.0f / a.x, 1.0f / a.y, 1.0f / a.z, 1.0f / a.w); } PX_FORCE_INLINE Vec4V V4RecipFast(const Vec4V a) { return Vec4V(1.0f / a.x, 1.0f / a.y, 1.0f / a.z, 1.0f / a.w); } PX_FORCE_INLINE Vec4V V4Rsqrt(const Vec4V a) { return Vec4V(PxRecipSqrt(a.x), PxRecipSqrt(a.y), PxRecipSqrt(a.z), PxRecipSqrt(a.w)); } PX_FORCE_INLINE Vec4V V4RsqrtFast(const Vec4V a) { return Vec4V(PxRecipSqrt(a.x), PxRecipSqrt(a.y), PxRecipSqrt(a.z), PxRecipSqrt(a.w)); } PX_FORCE_INLINE Vec4V V4Sqrt(const Vec4V a) { return Vec4V(PxSqrt(a.x), PxSqrt(a.y), PxSqrt(a.z), PxSqrt(a.w)); } PX_FORCE_INLINE Vec4V V4ScaleAdd(const Vec4V a, const FloatV b, const Vec4V c) { return V4Add(V4Scale(a, b), c); } PX_FORCE_INLINE Vec4V V4NegScaleSub(const Vec4V a, const FloatV b, const Vec4V c) { return V4Sub(c, V4Scale(a, b)); } PX_FORCE_INLINE Vec4V V4MulAdd(const Vec4V a, const Vec4V b, const Vec4V c) { return V4Add(V4Mul(a, b), c); } PX_FORCE_INLINE Vec4V V4NegMulSub(const Vec4V a, const Vec4V b, const Vec4V c) { return V4Sub(c, V4Mul(a, b)); } PX_FORCE_INLINE FloatV V4SumElements(const Vec4V a) { return FloatV(a.x + a.y + a.z + a.w); } PX_FORCE_INLINE FloatV V4Dot(const Vec4V a, const Vec4V b) { return FloatV(a.x * b.x + a.y * b.y + a.z * b.z + a.w * b.w); } PX_FORCE_INLINE FloatV V4Dot3(const Vec4V a, const Vec4V b) { return FloatV(a.x * b.x + a.y * b.y + a.z * b.z); } PX_FORCE_INLINE Vec4V V4Cross(const Vec4V a, const Vec4V b) { return Vec4V(a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y - a.y * b.x, 0.0f); } PX_FORCE_INLINE FloatV V4Length(const Vec4V a) { return FloatV(PxSqrt(a.x * a.x + a.y * a.y + a.z * a.z + a.w * a.w)); } PX_FORCE_INLINE FloatV V4LengthSq(const Vec4V a) { return V4Dot(a, a); } PX_FORCE_INLINE Vec4V V4Normalize(const Vec4V a) { VECMATHAOS_ASSERT(0 != a.x || 0 != a.y || 0 != a.z || 0 != a.w); const FloatV length = FloatV(V4Length(a)); return V4ScaleInv(a, length); } PX_FORCE_INLINE Vec4V V4NormalizeSafe(const Vec4V a, const Vec4V unsafeReturnValue) { const FloatV length = FloatV(V4Length(a)); if(PX_EPS_REAL >= length.x) { return unsafeReturnValue; } else { return V4ScaleInv(a, length); } } PX_FORCE_INLINE Vec4V V4NormalizeFast(const Vec4V a) { VECMATHAOS_ASSERT(0 != a.x || 0 != a.y || 0 != a.z || 0 != a.w); const FloatV length = FloatV(V4Length(a)); return V4ScaleInv(a, length); } PX_FORCE_INLINE Vec4V V4Sel(const BoolV c, const Vec4V a, const Vec4V b) { return Vec4V(c.ux ? a.x : b.x, c.uy ? a.y : b.y, c.uz ? a.z : b.z, c.uw ? a.w : b.w); } PX_FORCE_INLINE BoolV V4IsGrtr(const Vec4V a, const Vec4V b) { return BoolV(BOOL_TO_U32(a.x > b.x), BOOL_TO_U32(a.y > b.y), BOOL_TO_U32(a.z > b.z), BOOL_TO_U32(a.w > b.w)); } PX_FORCE_INLINE BoolV V4IsGrtrOrEq(const Vec4V a, const Vec4V b) { return BoolV(BOOL_TO_U32(a.x >= b.x), BOOL_TO_U32(a.y >= b.y), BOOL_TO_U32(a.z >= b.z), BOOL_TO_U32(a.w >= b.w)); } PX_FORCE_INLINE BoolV V4IsEq(const Vec4V a, const Vec4V b) { return BoolV(BOOL_TO_U32(a.x == b.x), BOOL_TO_U32(a.y == b.y), BOOL_TO_U32(a.z == b.z), BOOL_TO_U32(a.w == b.w)); } PX_FORCE_INLINE Vec4V V4Max(const Vec4V a, const Vec4V b) { return Vec4V(a.x > b.x ? a.x : b.x, a.y > b.y ? a.y : b.y, a.z > b.z ? a.z : b.z, a.w > b.w ? a.w : b.w); } PX_FORCE_INLINE Vec4V V4Min(const Vec4V a, const Vec4V b) { return Vec4V(a.x < b.x ? a.x : b.x, a.y < b.y ? a.y : b.y, a.z < b.z ? a.z : b.z, a.w < b.w ? a.w : b.w); } PX_FORCE_INLINE FloatV V4ExtractMax(const Vec4V a) { const PxF32 t0 = (a.x >= a.y) ? a.x : a.y; const PxF32 t1 = (a.z >= a.w) ? a.x : a.w; return t0 >= t1 ? t0 : t1; } PX_FORCE_INLINE FloatV V4ExtractMin(const Vec4V a) { const PxF32 t0 = (a.x <= a.y) ? a.x : a.y; const PxF32 t1 = (a.z <= a.w) ? a.x : a.w; return t0 <= t1 ? t0 : t1; } PX_FORCE_INLINE Vec4V V4Clamp(const Vec4V a, const Vec4V minV, const Vec4V maxV) { return V4Max(V4Min(a, maxV), minV); } PX_FORCE_INLINE Vec4V V4Round(const Vec4V a) { return Vec4V(floorf(a.x + 0.5f), floorf(a.y + 0.5f), floorf(a.z + 0.5f), floorf(a.w + 0.5f)); } PX_FORCE_INLINE Vec4V V4Sin(const Vec4V a) { return Vec4V(sinf(a.x), sinf(a.y), sinf(a.z), sinf(a.w)); } PX_FORCE_INLINE Vec4V V4Cos(const Vec4V a) { return Vec4V(cosf(a.x), cosf(a.y), cosf(a.z), cosf(a.w)); } PX_FORCE_INLINE PxU32 V4AllGrtr(const Vec4V a, const Vec4V b) { return BOOL_TO_U32((a.x > b.x) & (a.y > b.y) & (a.z > b.z) & (a.w > b.w)); } PX_FORCE_INLINE PxU32 V4AllGrtrOrEq(const Vec4V a, const Vec4V b) { return BOOL_TO_U32((a.x >= b.x) & (a.y >= b.y) & (a.z >= b.z) & (a.w >= b.w)); } PX_FORCE_INLINE PxU32 V4AllGrtrOrEq3(const Vec4V a, const Vec4V b) { return BOOL_TO_U32((a.x >= b.x) & (a.y >= b.y) & (a.z >= b.z)); } PX_FORCE_INLINE PxU32 V4AllEq(const Vec4V a, const Vec4V b) { return BOOL_TO_U32((a.x == b.x) & (a.y == b.y) & (a.z == b.z) & (a.w == b.w)); } PX_FORCE_INLINE PxU32 V4AnyGrtr3(const Vec4V a, const Vec4V b) { return BOOL_TO_U32((a.x > b.x) | (a.y > b.y) | (a.z > b.z)); } PX_FORCE_INLINE void V4Transpose(Vec4V& col0, Vec4V& col1, Vec4V& col2, Vec4V& col3) { const PxF32 t01 = col0.y, t02 = col0.z, t03 = col0.w; const PxF32 t12 = col1.z, t13 = col1.w; const PxF32 t23 = col2.w; col0.y = col1.x; col0.z = col2.x; col0.w = col3.x; col1.z = col2.y; col1.w = col3.y; col2.w = col3.z; col1.x = t01; col2.x = t02; col3.x = t03; col2.y = t12; col3.y = t13; col3.z = t23; } PX_FORCE_INLINE BoolV BFFFF() { return BoolV(FALSE_TO_U32, FALSE_TO_U32, FALSE_TO_U32, FALSE_TO_U32); } PX_FORCE_INLINE BoolV BFFFT() { return BoolV(FALSE_TO_U32, FALSE_TO_U32, FALSE_TO_U32, TRUE_TO_U32); } PX_FORCE_INLINE BoolV BFFTF() { return BoolV(FALSE_TO_U32, FALSE_TO_U32, TRUE_TO_U32, FALSE_TO_U32); } PX_FORCE_INLINE BoolV BFFTT() { return BoolV(FALSE_TO_U32, FALSE_TO_U32, TRUE_TO_U32, TRUE_TO_U32); } PX_FORCE_INLINE BoolV BFTFF() { return BoolV(FALSE_TO_U32, TRUE_TO_U32, FALSE_TO_U32, FALSE_TO_U32); } PX_FORCE_INLINE BoolV BFTFT() { return BoolV(FALSE_TO_U32, TRUE_TO_U32, FALSE_TO_U32, TRUE_TO_U32); } PX_FORCE_INLINE BoolV BFTTF() { return BoolV(FALSE_TO_U32, TRUE_TO_U32, TRUE_TO_U32, FALSE_TO_U32); } PX_FORCE_INLINE BoolV BFTTT() { return BoolV(FALSE_TO_U32, TRUE_TO_U32, TRUE_TO_U32, TRUE_TO_U32); } PX_FORCE_INLINE BoolV BTFFF() { return BoolV(TRUE_TO_U32, FALSE_TO_U32, FALSE_TO_U32, FALSE_TO_U32); } PX_FORCE_INLINE BoolV BTFFT() { return BoolV(TRUE_TO_U32, FALSE_TO_U32, FALSE_TO_U32, TRUE_TO_U32); } PX_FORCE_INLINE BoolV BTFTF() { return BoolV(TRUE_TO_U32, FALSE_TO_U32, TRUE_TO_U32, FALSE_TO_U32); } PX_FORCE_INLINE BoolV BTFTT() { return BoolV(TRUE_TO_U32, FALSE_TO_U32, TRUE_TO_U32, TRUE_TO_U32); } PX_FORCE_INLINE BoolV BTTFF() { return BoolV(TRUE_TO_U32, TRUE_TO_U32, FALSE_TO_U32, FALSE_TO_U32); } PX_FORCE_INLINE BoolV BTTFT() { return BoolV(TRUE_TO_U32, TRUE_TO_U32, FALSE_TO_U32, TRUE_TO_U32); } PX_FORCE_INLINE BoolV BTTTF() { return BoolV(TRUE_TO_U32, TRUE_TO_U32, TRUE_TO_U32, FALSE_TO_U32); } PX_FORCE_INLINE BoolV BTTTT() { return BoolV(TRUE_TO_U32, TRUE_TO_U32, TRUE_TO_U32, TRUE_TO_U32); } PX_FORCE_INLINE BoolV BXMask() { return BTFFF(); } PX_FORCE_INLINE BoolV BYMask() { return BFTFF(); } PX_FORCE_INLINE BoolV BZMask() { return BFFTF(); } PX_FORCE_INLINE BoolV BWMask() { return BFFFT(); } PX_FORCE_INLINE BoolV BGetX(const BoolV a) { return BoolV(a.ux, a.ux, a.ux, a.ux); } PX_FORCE_INLINE BoolV BGetY(const BoolV a) { return BoolV(a.uy, a.uy, a.uy, a.uy); } PX_FORCE_INLINE BoolV BGetZ(const BoolV a) { return BoolV(a.uz, a.uz, a.uz, a.uz); } PX_FORCE_INLINE BoolV BGetW(const BoolV a) { return BoolV(a.uw, a.uw, a.uw, a.uw); } PX_FORCE_INLINE BoolV BSetX(const BoolV v, const BoolV f) { return BoolV(f.ux, v.uy, v.uz, v.uw); } PX_FORCE_INLINE BoolV BSetY(const BoolV v, const BoolV f) { return BoolV(v.ux, f.uy, v.uz, v.uw); } PX_FORCE_INLINE BoolV BSetZ(const BoolV v, const BoolV f) { return BoolV(v.ux, v.uy, f.uz, v.uw); } PX_FORCE_INLINE BoolV BSetW(const BoolV v, const BoolV f) { return BoolV(v.ux, v.uy, v.uz, f.uw); } template <int index> BoolV BSplatElement(BoolV a) { PxU32* b = reinterpret_cast<PxU32*>(&a); return BoolV(b[index], b[index], b[index], b[index]); } PX_FORCE_INLINE BoolV BAnd(const BoolV a, const BoolV b) { return BoolV(BOOL_TO_U32(a.ux && b.ux), BOOL_TO_U32(a.uy && b.uy), BOOL_TO_U32(a.uz && b.uz), BOOL_TO_U32(a.uw && b.uw)); } PX_FORCE_INLINE BoolV BAndNot(const BoolV a, const BoolV b) { return BoolV(a.ux & ~b.ux, a.uy & ~b.uy, a.uz & ~b.uz, a.uw & ~b.uw); } PX_FORCE_INLINE BoolV BNot(const BoolV a) { return BoolV(~a.ux, ~a.uy, ~a.uz, ~a.uw); } PX_FORCE_INLINE BoolV BOr(const BoolV a, const BoolV b) { return BoolV(BOOL_TO_U32(a.ux || b.ux), BOOL_TO_U32(a.uy || b.uy), BOOL_TO_U32(a.uz || b.uz), BOOL_TO_U32(a.uw || b.uw)); } PX_FORCE_INLINE PxU32 BAllEq(const BoolV a, const BoolV b) { return (a.ux == b.ux && a.uy == b.uy && a.uz == b.uz && a.uw == b.uw ? 1 : 0); } PX_FORCE_INLINE PxU32 BAllEqTTTT(const BoolV a) { return BAllEq(a, BTTTT()); } PX_FORCE_INLINE PxU32 BAllEqFFFF(const BoolV a) { return BAllEq(a, BFFFF()); } PX_FORCE_INLINE BoolV BAllTrue4(const BoolV a) { return (a.ux & a.uy & a.uz & a.uw) ? BTTTT() : BFFFF(); } PX_FORCE_INLINE BoolV BAnyTrue4(const BoolV a) { return (a.ux | a.uy | a.uz | a.uw) ? BTTTT() : BFFFF(); } PX_FORCE_INLINE BoolV BAllTrue3(const BoolV a) { return (a.ux & a.uy & a.uz) ? BTTTT() : BFFFF(); } PX_FORCE_INLINE BoolV BAnyTrue3(const BoolV a) { return (a.ux | a.uy | a.uz) ? BTTTT() : BFFFF(); } PX_FORCE_INLINE PxU32 BGetBitMask(const BoolV a) { return (a.ux & 1) | (a.uy & 2) | (a.uz & 4) | (a.uw & 8); } ////////////////////////////////// // MAT33V ////////////////////////////////// PX_FORCE_INLINE Vec3V M33MulV3(const Mat33V& a, const Vec3V b) { return Vec3V(a.col0.x * b.x + a.col1.x * b.y + a.col2.x * b.z, a.col0.y * b.x + a.col1.y * b.y + a.col2.y * b.z, a.col0.z * b.x + a.col1.z * b.y + a.col2.z * b.z); } PX_FORCE_INLINE Vec3V M33TrnspsMulV3(const Mat33V& a, const Vec3V b) { return Vec3V(a.col0.x * b.x + a.col0.y * b.y + a.col0.z * b.z, a.col1.x * b.x + a.col1.y * b.y + a.col1.z * b.z, a.col2.x * b.x + a.col2.y * b.y + a.col2.z * b.z); } PX_FORCE_INLINE Vec3V M33MulV3AddV3(const Mat33V& A, const Vec3V b, const Vec3V c) { const FloatV x = V3GetX(b); const FloatV y = V3GetY(b); const FloatV z = V3GetZ(b); Vec3V result = V3ScaleAdd(A.col0, x, c); result = V3ScaleAdd(A.col1, y, result); return V3ScaleAdd(A.col2, z, result); } PX_FORCE_INLINE Mat33V M33MulM33(const Mat33V& a, const Mat33V& b) { return Mat33V(M33MulV3(a, b.col0), M33MulV3(a, b.col1), M33MulV3(a, b.col2)); } PX_FORCE_INLINE Mat33V M33Add(const Mat33V& a, const Mat33V& b) { return Mat33V(V3Add(a.col0, b.col0), V3Add(a.col1, b.col1), V3Add(a.col2, b.col2)); } PX_FORCE_INLINE Mat33V M33Scale(const Mat33V& a, const FloatV& b) { return Mat33V(V3Scale(a.col0, b), V3Scale(a.col1, b), V3Scale(a.col2, b)); } PX_FORCE_INLINE Mat33V M33Sub(const Mat33V& a, const Mat33V& b) { return Mat33V(V3Sub(a.col0, b.col0), V3Sub(a.col1, b.col1), V3Sub(a.col2, b.col2)); } PX_FORCE_INLINE Mat33V M33Neg(const Mat33V& a) { return Mat33V(V3Neg(a.col0), V3Neg(a.col1), V3Neg(a.col2)); } PX_FORCE_INLINE Mat33V M33Abs(const Mat33V& a) { return Mat33V(V3Abs(a.col0), V3Abs(a.col1), V3Abs(a.col2)); } PX_FORCE_INLINE Mat33V M33Diagonal(const Vec3VArg d) { const Vec3V x = V3Mul(V3UnitX(), d); const Vec3V y = V3Mul(V3UnitY(), d); const Vec3V z = V3Mul(V3UnitZ(), d); return Mat33V(x, y, z); } PX_FORCE_INLINE Mat33V M33Inverse(const Mat33V& a) { const PxF32 det = a.col0.x * (a.col1.y * a.col2.z - a.col1.z * a.col2.y) - a.col1.x * (a.col0.y * a.col2.z - a.col2.y * a.col0.z) + a.col2.x * (a.col0.y * a.col1.z - a.col1.y * a.col0.z); const PxF32 invDet = 1.0f / det; Mat33V ret; ret.col0.x = invDet * (a.col1.y * a.col2.z - a.col2.y * a.col1.z); ret.col0.y = invDet * (a.col2.y * a.col0.z - a.col0.y * a.col2.z); ret.col0.z = invDet * (a.col0.y * a.col1.z - a.col1.y * a.col0.z); ret.col1.x = invDet * (a.col2.x * a.col1.z - a.col1.x * a.col2.z); ret.col1.y = invDet * (a.col0.x * a.col2.z - a.col2.x * a.col0.z); ret.col1.z = invDet * (a.col1.x * a.col0.z - a.col0.x * a.col1.z); ret.col2.x = invDet * (a.col1.x * a.col2.y - a.col2.x * a.col1.y); ret.col2.y = invDet * (a.col2.x * a.col0.y - a.col0.x * a.col2.y); ret.col2.z = invDet * (a.col0.x * a.col1.y - a.col1.x * a.col0.y); return ret; } PX_FORCE_INLINE Mat33V Mat33V_From_PxMat33(const PxMat33& m) { return Mat33V(V3LoadU(m.column0), V3LoadU(m.column1), V3LoadU(m.column2)); } PX_FORCE_INLINE void PxMat33_From_Mat33V(const Mat33V& m, PxMat33& out) { PX_ASSERT((size_t(&out) & 15) == 0); V3StoreU(m.col0, out.column0); V3StoreU(m.col1, out.column1); V3StoreU(m.col2, out.column2); } PX_FORCE_INLINE Mat33V M33Trnsps(const Mat33V& a) { return Mat33V(Vec3V(a.col0.x, a.col1.x, a.col2.x), Vec3V(a.col0.y, a.col1.y, a.col2.y), Vec3V(a.col0.z, a.col1.z, a.col2.z)); } PX_FORCE_INLINE Mat33V M33Identity() { return Mat33V(V3UnitX(), V3UnitY(), V3UnitZ()); } ////////////////////////////////// // MAT34V ////////////////////////////////// PX_FORCE_INLINE Vec3V M34MulV3(const Mat34V& a, const Vec3V b) { return Vec3V(a.col0.x * b.x + a.col1.x * b.y + a.col2.x * b.z + a.col3.x, a.col0.y * b.x + a.col1.y * b.y + a.col2.y * b.z + a.col3.y, a.col0.z * b.x + a.col1.z * b.y + a.col2.z * b.z + a.col3.z); } PX_FORCE_INLINE Vec3V M34Mul33V3(const Mat34V& a, const Vec3V b) { return Vec3V(a.col0.x * b.x + a.col1.x * b.y + a.col2.x * b.z, a.col0.y * b.x + a.col1.y * b.y + a.col2.y * b.z, a.col0.z * b.x + a.col1.z * b.y + a.col2.z * b.z); } PX_FORCE_INLINE Vec3V M34TrnspsMul33V3(const Mat34V& a, const Vec3V b) { return Vec3V(a.col0.x * b.x + a.col0.y * b.y + a.col0.z * b.z, a.col1.x * b.x + a.col1.y * b.y + a.col1.z * b.z, a.col2.x * b.x + a.col2.y * b.y + a.col2.z * b.z); } PX_FORCE_INLINE Mat34V M34MulM34(const Mat34V& a, const Mat34V& b) { return Mat34V(M34Mul33V3(a, b.col0), M34Mul33V3(a, b.col1), M34Mul33V3(a, b.col2), M34MulV3(a, b.col3)); } PX_FORCE_INLINE Mat33V M34MulM33(const Mat34V& a, const Mat33V& b) { return Mat33V(M34Mul33V3(a, b.col0), M34Mul33V3(a, b.col1), M34Mul33V3(a, b.col2)); } PX_FORCE_INLINE Mat33V M34Mul33V3(const Mat34V& a, const Mat33V& b) { return Mat33V(M34Mul33V3(a, b.col0), M34Mul33V3(a, b.col1), M34Mul33V3(a, b.col2)); } PX_FORCE_INLINE Mat33V M34Mul33MM34(const Mat34V& a, const Mat34V& b) { return Mat33V(M34Mul33V3(a, b.col0), M34Mul33V3(a, b.col1), M34Mul33V3(a, b.col2)); } PX_FORCE_INLINE Mat34V M34Add(const Mat34V& a, const Mat34V& b) { return Mat34V(V3Add(a.col0, b.col0), V3Add(a.col1, b.col1), V3Add(a.col2, b.col2), V3Add(a.col3, b.col3)); } PX_FORCE_INLINE Mat33V M34Trnsps33(const Mat34V& a) { return Mat33V(Vec3V(a.col0.x, a.col1.x, a.col2.x), Vec3V(a.col0.y, a.col1.y, a.col2.y), Vec3V(a.col0.z, a.col1.z, a.col2.z)); } ////////////////////////////////// // MAT44V ////////////////////////////////// PX_FORCE_INLINE Vec4V M44MulV4(const Mat44V& a, const Vec4V b) { return Vec4V(a.col0.x * b.x + a.col1.x * b.y + a.col2.x * b.z + a.col3.x * b.w, a.col0.y * b.x + a.col1.y * b.y + a.col2.y * b.z + a.col3.y * b.w, a.col0.z * b.x + a.col1.z * b.y + a.col2.z * b.z + a.col3.z * b.w, a.col0.w * b.x + a.col1.w * b.y + a.col2.w * b.z + a.col3.w * b.w); } PX_FORCE_INLINE Vec4V M44TrnspsMulV4(const Mat44V& a, const Vec4V b) { return Vec4V(a.col0.x * b.x + a.col0.y * b.y + a.col0.z * b.z + a.col0.w * b.w, a.col1.x * b.x + a.col1.y * b.y + a.col1.z * b.z + a.col1.w * b.w, a.col2.x * b.x + a.col2.y * b.y + a.col2.z * b.z + a.col2.w * b.w, a.col3.x * b.x + a.col3.y * b.y + a.col3.z * b.z + a.col3.w * b.w); } PX_FORCE_INLINE Mat44V M44MulM44(const Mat44V& a, const Mat44V& b) { return Mat44V(M44MulV4(a, b.col0), M44MulV4(a, b.col1), M44MulV4(a, b.col2), M44MulV4(a, b.col3)); } PX_FORCE_INLINE Mat44V M44Add(const Mat44V& a, const Mat44V& b) { return Mat44V(V4Add(a.col0, b.col0), V4Add(a.col1, b.col1), V4Add(a.col2, b.col2), V4Add(a.col3, b.col3)); } PX_FORCE_INLINE Mat44V M44Inverse(const Mat44V& a) { PxF32 tmp[12]; PxF32 dst[16]; PxF32 det; const PxF32 src[16] = { a.col0.x, a.col0.y, a.col0.z, a.col0.w, a.col1.x, a.col1.y, a.col1.z, a.col1.w, a.col2.x, a.col2.y, a.col2.z, a.col2.w, a.col3.x, a.col3.y, a.col3.z, a.col3.w }; tmp[0] = src[10] * src[15]; tmp[1] = src[11] * src[14]; tmp[2] = src[9] * src[15]; tmp[3] = src[11] * src[13]; tmp[4] = src[9] * src[14]; tmp[5] = src[10] * src[13]; tmp[6] = src[8] * src[15]; tmp[7] = src[11] * src[12]; tmp[8] = src[8] * src[14]; tmp[9] = src[10] * src[12]; tmp[10] = src[8] * src[13]; tmp[11] = src[9] * src[12]; dst[0] = tmp[0] * src[5] + tmp[3] * src[6] + tmp[4] * src[7]; dst[0] -= tmp[1] * src[5] + tmp[2] * src[6] + tmp[5] * src[7]; dst[1] = tmp[1] * src[4] + tmp[6] * src[6] + tmp[9] * src[7]; dst[1] -= tmp[0] * src[4] + tmp[7] * src[6] + tmp[8] * src[7]; dst[2] = tmp[2] * src[4] + tmp[7] * src[5] + tmp[10] * src[7]; dst[2] -= tmp[3] * src[4] + tmp[6] * src[5] + tmp[11] * src[7]; dst[3] = tmp[5] * src[4] + tmp[8] * src[5] + tmp[11] * src[6]; dst[3] -= tmp[4] * src[4] + tmp[9] * src[5] + tmp[10] * src[6]; dst[4] = tmp[1] * src[1] + tmp[2] * src[2] + tmp[5] * src[3]; dst[4] -= tmp[0] * src[1] + tmp[3] * src[2] + tmp[4] * src[3]; dst[5] = tmp[0] * src[0] + tmp[7] * src[2] + tmp[8] * src[3]; dst[5] -= tmp[1] * src[0] + tmp[6] * src[2] + tmp[9] * src[3]; dst[6] = tmp[3] * src[0] + tmp[6] * src[1] + tmp[11] * src[3]; dst[6] -= tmp[2] * src[0] + tmp[7] * src[1] + tmp[10] * src[3]; dst[7] = tmp[4] * src[0] + tmp[9] * src[1] + tmp[10] * src[2]; dst[7] -= tmp[5] * src[0] + tmp[8] * src[1] + tmp[11] * src[2]; tmp[0] = src[2] * src[7]; tmp[1] = src[3] * src[6]; tmp[2] = src[1] * src[7]; tmp[3] = src[3] * src[5]; tmp[4] = src[1] * src[6]; tmp[5] = src[2] * src[5]; tmp[6] = src[0] * src[7]; tmp[7] = src[3] * src[4]; tmp[8] = src[0] * src[6]; tmp[9] = src[2] * src[4]; tmp[10] = src[0] * src[5]; tmp[11] = src[1] * src[4]; dst[8] = tmp[0] * src[13] + tmp[3] * src[14] + tmp[4] * src[15]; dst[8] -= tmp[1] * src[13] + tmp[2] * src[14] + tmp[5] * src[15]; dst[9] = tmp[1] * src[12] + tmp[6] * src[14] + tmp[9] * src[15]; dst[9] -= tmp[0] * src[12] + tmp[7] * src[14] + tmp[8] * src[15]; dst[10] = tmp[2] * src[12] + tmp[7] * src[13] + tmp[10] * src[15]; dst[10] -= tmp[3] * src[12] + tmp[6] * src[13] + tmp[11] * src[15]; dst[11] = tmp[5] * src[12] + tmp[8] * src[13] + tmp[11] * src[14]; dst[11] -= tmp[4] * src[12] + tmp[9] * src[13] + tmp[10] * src[14]; dst[12] = tmp[2] * src[10] + tmp[5] * src[11] + tmp[1] * src[9]; dst[12] -= tmp[4] * src[11] + tmp[0] * src[9] + tmp[3] * src[10]; dst[13] = tmp[8] * src[11] + tmp[0] * src[8] + tmp[7] * src[10]; dst[13] -= tmp[6] * src[10] + tmp[9] * src[11] + tmp[1] * src[8]; dst[14] = tmp[6] * src[9] + tmp[11] * src[11] + tmp[3] * src[8]; dst[14] -= tmp[10] * src[11] + tmp[2] * src[8] + tmp[7] * src[9]; dst[15] = tmp[10] * src[10] + tmp[4] * src[8] + tmp[9] * src[9]; dst[15] -= tmp[8] * src[9] + tmp[11] * src[10] + tmp[5] * src[8]; det = src[0] * dst[0] + src[1] * dst[1] + src[2] * dst[2] + src[3] * dst[3]; det = 1.0f / det; for(PxU32 j = 0; j < 16; j++) { dst[j] *= det; } return Mat44V(Vec4V(dst[0], dst[4], dst[8], dst[12]), Vec4V(dst[1], dst[5], dst[9], dst[13]), Vec4V(dst[2], dst[6], dst[10], dst[14]), Vec4V(dst[3], dst[7], dst[11], dst[15])); } PX_FORCE_INLINE Mat44V M44Trnsps(const Mat44V& a) { return Mat44V(Vec4V(a.col0.x, a.col1.x, a.col2.x, a.col3.x), Vec4V(a.col0.y, a.col1.y, a.col2.y, a.col3.y), Vec4V(a.col0.z, a.col1.z, a.col2.z, a.col3.z), Vec4V(a.col0.w, a.col1.w, a.col2.w, a.col3.w)); } PX_FORCE_INLINE Vec4V V4LoadXYZW(const PxF32& x, const PxF32& y, const PxF32& z, const PxF32& w) { return Vec4V(x, y, z, w); } /* PX_FORCE_INLINE VecU16V V4U32PK(VecU32V a, VecU32V b) { return VecU16V( PxU16(PxClamp<PxU32>((a).u32[0], 0, 0xFFFF)), PxU16(PxClamp<PxU32>((a).u32[1], 0, 0xFFFF)), PxU16(PxClamp<PxU32>((a).u32[2], 0, 0xFFFF)), PxU16(PxClamp<PxU32>((a).u32[3], 0, 0xFFFF)), PxU16(PxClamp<PxU32>((b).u32[0], 0, 0xFFFF)), PxU16(PxClamp<PxU32>((b).u32[1], 0, 0xFFFF)), PxU16(PxClamp<PxU32>((b).u32[2], 0, 0xFFFF)), PxU16(PxClamp<PxU32>((b).u32[3], 0, 0xFFFF))); } */ PX_FORCE_INLINE VecU32V V4U32Sel(const BoolV c, const VecU32V a, const VecU32V b) { return VecU32V(c.ux ? a.u32[0] : b.u32[0], c.uy ? a.u32[1] : b.u32[1], c.uz ? a.u32[2] : b.u32[2], c.uw ? a.u32[3] : b.u32[3]); } PX_FORCE_INLINE VecU32V V4U32or(VecU32V a, VecU32V b) { return VecU32V((a).u32[0] | (b).u32[0], (a).u32[1] | (b).u32[1], (a).u32[2] | (b).u32[2], (a).u32[3] | (b).u32[3]); } PX_FORCE_INLINE VecU32V V4U32xor(VecU32V a, VecU32V b) { return VecU32V((a).u32[0] ^ (b).u32[0], (a).u32[1] ^ (b).u32[1], (a).u32[2] ^ (b).u32[2], (a).u32[3] ^ (b).u32[3]); } PX_FORCE_INLINE VecU32V V4U32and(VecU32V a, VecU32V b) { return VecU32V((a).u32[0] & (b).u32[0], (a).u32[1] & (b).u32[1], (a).u32[2] & (b).u32[2], (a).u32[3] & (b).u32[3]); } PX_FORCE_INLINE VecU32V V4U32Andc(VecU32V a, VecU32V b) { return VecU32V((a).u32[0] & ~(b).u32[0], (a).u32[1] & ~(b).u32[1], (a).u32[2] & ~(b).u32[2], (a).u32[3] & ~(b).u32[3]); } /* PX_FORCE_INLINE VecU16V V4U16Or(VecU16V a, VecU16V b) { return VecU16V( (a).u16[0]|(b).u16[0], (a).u16[1]|(b).u16[1], (a).u16[2]|(b).u16[2], (a).u16[3]|(b).u16[3], (a).u16[4]|(b).u16[4], (a).u16[5]|(b).u16[5], (a).u16[6]|(b).u16[6], (a).u16[7]|(b).u16[7]); } */ /* PX_FORCE_INLINE VecU16V V4U16And(VecU16V a, VecU16V b) { return VecU16V( (a).u16[0]&(b).u16[0], (a).u16[1]&(b).u16[1], (a).u16[2]&(b).u16[2], (a).u16[3]&(b).u16[3], (a).u16[4]&(b).u16[4], (a).u16[5]&(b).u16[5], (a).u16[6]&(b).u16[6], (a).u16[7]&(b).u16[7]); } */ /* PX_FORCE_INLINE VecU16V V4U16Andc(VecU16V a, VecU16V b) { return VecU16V( (a).u16[0]&~(b).u16[0], (a).u16[1]&~(b).u16[1], (a).u16[2]&~(b).u16[2], (a).u16[3]&~(b).u16[3], (a).u16[4]&~(b).u16[4], (a).u16[5]&~(b).u16[5], (a).u16[6]&~(b).u16[6], (a).u16[7]&~(b).u16[7]); } */ /* template<int a> PX_FORCE_INLINE VecI32V V4ISplat() { return VecI32V(a, a, a, a); } template<PxU32 a> PX_FORCE_INLINE VecU32V V4USplat() { return VecU32V(a, a, a, a); } */ /* PX_FORCE_INLINE void V4U16StoreAligned(VecU16V val, VecU16V* address) { *address = val; } */ PX_FORCE_INLINE void V4U32StoreAligned(VecU32V val, VecU32V* address) { *address = val; } PX_FORCE_INLINE Vec4V V4Andc(const Vec4V a, const VecU32V b) { VecU32V r = V4U32Andc(*reinterpret_cast<const VecU32V*>(&a), b); return (*reinterpret_cast<const Vec4V*>(&r)); } PX_FORCE_INLINE VecU32V V4IsGrtrV32u(const Vec4V a, const Vec4V b) { return VecU32V(a.x > b.x ? 0xFFFFffff : 0, a.y > b.y ? 0xFFFFffff : 0, a.z > b.z ? 0xFFFFffff : 0, a.w > b.w ? 0xFFFFffff : 0); } PX_FORCE_INLINE VecU16V V4U16LoadAligned(VecU16V* addr) { return *addr; } PX_FORCE_INLINE VecU16V V4U16LoadUnaligned(VecU16V* addr) { return *addr; } PX_FORCE_INLINE VecU16V V4U16CompareGt(VecU16V a, VecU16V b) { return VecU16V ( BOOL_TO_U16(a.u16[0] > b.u16[0]), BOOL_TO_U16(a.u16[1] > b.u16[1]), BOOL_TO_U16(a.u16[2] > b.u16[2]), BOOL_TO_U16(a.u16[3] > b.u16[3]), BOOL_TO_U16(a.u16[4] > b.u16[4]), BOOL_TO_U16(a.u16[5] > b.u16[5]), BOOL_TO_U16(a.u16[6] > b.u16[6]), BOOL_TO_U16(a.u16[7] > b.u16[7]) ); } PX_FORCE_INLINE VecU16V V4I16CompareGt(VecU16V a, VecU16V b) { return VecU16V ( BOOL_TO_U16(a.i16[0] > b.i16[0]), BOOL_TO_U16(a.i16[1] > b.i16[1]), BOOL_TO_U16(a.i16[2] > b.i16[2]), BOOL_TO_U16(a.i16[3] > b.i16[3]), BOOL_TO_U16(a.i16[4] > b.i16[4]), BOOL_TO_U16(a.i16[5] > b.i16[5]), BOOL_TO_U16(a.i16[6] > b.i16[6]), BOOL_TO_U16(a.i16[7] > b.i16[7]) ); } PX_FORCE_INLINE Vec4V Vec4V_From_VecU32V(VecU32V a) { return Vec4V(PxF32((a).u32[0]), PxF32((a).u32[1]), PxF32((a).u32[2]), PxF32((a).u32[3])); } PX_FORCE_INLINE Vec4V Vec4V_From_VecI32V(VecI32V a) { return Vec4V(PxF32((a).i32[0]), PxF32((a).i32[1]), PxF32((a).i32[2]), PxF32((a).i32[3])); } PX_FORCE_INLINE VecI32V VecI32V_From_Vec4V(Vec4V a) { float* data = reinterpret_cast<float*>(&a); return VecI32V(PxI32(data[0]), PxI32(data[1]), PxI32(data[2]), PxI32(data[3])); } PX_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecU32V(VecU32V a) { Vec4V b = *reinterpret_cast<Vec4V*>(&a); return b; } PX_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecI32V(VecI32V a) { Vec4V b = *reinterpret_cast<Vec4V*>(&a); return b; } PX_FORCE_INLINE VecU32V VecU32V_ReinterpretFrom_Vec4V(Vec4V a) { VecU32V b = *reinterpret_cast<VecU32V*>(&a); return b; } PX_FORCE_INLINE VecI32V VecI32V_ReinterpretFrom_Vec4V(Vec4V a) { VecI32V b = *reinterpret_cast<VecI32V*>(&a); return b; } template <int index> PX_FORCE_INLINE VecU32V V4U32SplatElement(VecU32V a) { return VecU32V((a).u32[index], (a).u32[index], (a).u32[index], (a).u32[index]); } template <int index> PX_FORCE_INLINE VecU32V V4U32SplatElement(BoolV a) { const PxU32 u = (&a.ux)[index]; return VecU32V(u, u, u, u); } template <int index> PX_FORCE_INLINE Vec4V V4SplatElement(Vec4V a) { float* data = reinterpret_cast<float*>(&a); return Vec4V(data[index], data[index], data[index], data[index]); } PX_FORCE_INLINE VecU32V U4LoadXYZW(PxU32 x, PxU32 y, PxU32 z, PxU32 w) { return VecU32V(x, y, z, w); } PX_FORCE_INLINE Vec4V V4Abs(const Vec4V a) { return V4Max(a, V4Neg(a)); } PX_FORCE_INLINE BoolV V4IsEqU32(const VecU32V a, const VecU32V b) { return BoolV(BOOL_TO_U32(a.u32[0] == b.u32[0]), BOOL_TO_U32(a.u32[1] == b.u32[1]), BOOL_TO_U32(a.u32[2] == b.u32[2]), BOOL_TO_U32(a.u32[3] == b.u32[3])); } PX_FORCE_INLINE VecU32V U4Load(const PxU32 i) { return VecU32V(i, i, i, i); } PX_FORCE_INLINE VecU32V U4LoadU(const PxU32* i) { return VecU32V(i[0], i[1], i[2], i[3]); } PX_FORCE_INLINE VecU32V U4LoadA(const PxU32* i) { return VecU32V(i[0], i[1], i[2], i[3]); } PX_FORCE_INLINE VecI32V I4LoadXYZW(const PxI32& x, const PxI32& y, const PxI32& z, const PxI32& w) { return VecI32V(x, y, z, w); } PX_FORCE_INLINE VecI32V I4Load(const PxI32 i) { return VecI32V(i, i, i, i); } PX_FORCE_INLINE VecI32V I4LoadU(const PxI32* i) { return VecI32V(i[0], i[1], i[2], i[3]); } PX_FORCE_INLINE VecI32V I4LoadA(const PxI32* i) { return VecI32V(i[0], i[1], i[2], i[3]); } PX_FORCE_INLINE VecI32V VecI32V_Add(const VecI32VArg a, const VecI32VArg b) { return VecI32V(a.i32[0] + b.i32[0], a.i32[1] + b.i32[1], a.i32[2] + b.i32[2], a.i32[3] + b.i32[3]); } PX_FORCE_INLINE VecI32V VecI32V_Sub(const VecI32VArg a, const VecI32VArg b) { return VecI32V(a.i32[0] - b.i32[0], a.i32[1] - b.i32[1], a.i32[2] - b.i32[2], a.i32[3] - b.i32[3]); } PX_FORCE_INLINE BoolV VecI32V_IsGrtr(const VecI32VArg a, const VecI32VArg b) { return BoolV(BOOL_TO_U32(a.i32[0] > b.i32[0]), BOOL_TO_U32(a.i32[1] > b.i32[1]), BOOL_TO_U32(a.i32[2] > b.i32[2]), BOOL_TO_U32(a.i32[3] > b.i32[3])); } PX_FORCE_INLINE BoolV VecI32V_IsEq(const VecI32VArg a, const VecI32VArg b) { return BoolV(BOOL_TO_U32(a.i32[0] == b.i32[0]), BOOL_TO_U32(a.i32[1] == b.i32[1]), BOOL_TO_U32(a.i32[2] == b.i32[2]), BOOL_TO_U32(a.i32[3] == b.i32[3])); } PX_FORCE_INLINE VecI32V V4I32Sel(const BoolV c, const VecI32V a, const VecI32V b) { return VecI32V(c.ux ? a.i32[0] : b.i32[0], c.uy ? a.i32[1] : b.i32[1], c.uz ? a.i32[2] : b.i32[2], c.uw ? a.i32[3] : b.i32[3]); } PX_FORCE_INLINE VecI32V VecI32V_Zero() { return VecI32V(0, 0, 0, 0); } PX_FORCE_INLINE VecI32V VecI32V_One() { return VecI32V(1, 1, 1, 1); } PX_FORCE_INLINE VecI32V VecI32V_Two() { return VecI32V(2, 2, 2, 2); } PX_FORCE_INLINE VecI32V VecI32V_MinusOne() { return VecI32V(-1, -1, -1, -1); } PX_FORCE_INLINE VecU32V U4Zero() { return VecU32V(0, 0, 0, 0); } PX_FORCE_INLINE VecU32V U4One() { return VecU32V(1, 1, 1, 1); } PX_FORCE_INLINE VecU32V U4Two() { return VecU32V(2, 2, 2, 2); } PX_FORCE_INLINE VecShiftV VecI32V_PrepareShift(const VecI32VArg shift) { return shift; } PX_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const VecShiftVArg count) { return VecI32V(a.i32[0] << count.i32[0], a.i32[1] << count.i32[1], a.i32[2] << count.i32[2], a.i32[3] << count.i32[3]); } PX_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const VecShiftVArg count) { return VecI32V(a.i32[0] >> count.i32[0], a.i32[1] >> count.i32[1], a.i32[2] >> count.i32[2], a.i32[3] >> count.i32[3]); } PX_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const PxU32 count) { return VecI32V(a.i32[0] << count, a.i32[1] << count, a.i32[2] << count, a.i32[3] << count); } PX_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const PxU32 count) { return VecI32V(a.i32[0] >> count, a.i32[1] >> count, a.i32[2] >> count, a.i32[3] >> count); } PX_FORCE_INLINE VecI32V VecI32V_And(const VecI32VArg a, const VecI32VArg b) { return VecI32V(a.i32[0] & b.i32[0], a.i32[1] & b.i32[1], a.i32[2] & b.i32[2], a.i32[3] & b.i32[3]); } PX_FORCE_INLINE VecI32V VecI32V_Or(const VecI32VArg a, const VecI32VArg b) { return VecI32V(a.i32[0] | b.i32[0], a.i32[1] | b.i32[1], a.i32[2] | b.i32[2], a.i32[3] | b.i32[3]); } PX_FORCE_INLINE VecI32V VecI32V_GetX(const VecI32VArg a) { return VecI32V(a.i32[0], a.i32[0], a.i32[0], a.i32[0]); } PX_FORCE_INLINE VecI32V VecI32V_GetY(const VecI32VArg a) { return VecI32V(a.i32[1], a.i32[1], a.i32[1], a.i32[1]); } PX_FORCE_INLINE VecI32V VecI32V_GetZ(const VecI32VArg a) { return VecI32V(a.i32[2], a.i32[2], a.i32[2], a.i32[2]); } PX_FORCE_INLINE VecI32V VecI32V_GetW(const VecI32VArg a) { return VecI32V(a.i32[3], a.i32[3], a.i32[3], a.i32[3]); } PX_FORCE_INLINE VecI32V VecI32V_Sel(const BoolV c, const VecI32VArg a, const VecI32VArg b) { return VecI32V(c.ux ? a.i32[0] : b.i32[0], c.uy ? a.i32[1] : b.i32[1], c.uz ? a.i32[2] : b.i32[2], c.uw ? a.i32[3] : b.i32[3]); } PX_FORCE_INLINE VecI32V VecI32V_Merge(const VecI32VArg a, const VecI32VArg b, const VecI32VArg c, const VecI32VArg d) { return VecI32V(a.i32[0], b.i32[0], c.i32[0], d.i32[0]); } PX_FORCE_INLINE void PxI32_From_VecI32V(const VecI32VArg a, PxI32* i) { *i = a.i32[0]; } PX_FORCE_INLINE VecI32V VecI32V_From_BoolV(const BoolVArg b) { return VecI32V(PxI32(b.ux), PxI32(b.uy), PxI32(b.uz), PxI32(b.uw)); } PX_FORCE_INLINE VecU32V VecU32V_From_BoolV(const BoolVArg b) { return VecU32V(b.ux, b.uy, b.uz, b.uw); } PX_FORCE_INLINE void QuatGetMat33V(const QuatVArg q, Vec3V& column0, Vec3V& column1, Vec3V& column2) { const FloatV one = FOne(); const FloatV x = V4GetX(q); const FloatV y = V4GetY(q); const FloatV z = V4GetZ(q); const FloatV w = V4GetW(q); const FloatV x2 = FAdd(x, x); const FloatV y2 = FAdd(y, y); const FloatV z2 = FAdd(z, z); const FloatV xx = FMul(x2, x); const FloatV yy = FMul(y2, y); const FloatV zz = FMul(z2, z); const FloatV xy = FMul(x2, y); const FloatV xz = FMul(x2, z); const FloatV xw = FMul(x2, w); const FloatV yz = FMul(y2, z); const FloatV yw = FMul(y2, w); const FloatV zw = FMul(z2, w); const FloatV v = FSub(one, xx); column0 = V3Merge(FSub(FSub(one, yy), zz), FAdd(xy, zw), FSub(xz, yw)); column1 = V3Merge(FSub(xy, zw), FSub(v, zz), FAdd(yz, xw)); column2 = V3Merge(FAdd(xz, yw), FSub(yz, xw), FSub(v, yy)); } // not used /* PX_FORCE_INLINE Vec4V V4LoadAligned(Vec4V* addr) { return *addr; } */ /* PX_FORCE_INLINE Vec4V V4LoadUnaligned(Vec4V* addr) { return *addr; } */ /* PX_FORCE_INLINE Vec4V V4Ceil(const Vec4V a) { return Vec4V(PxCeil(a.x), PxCeil(a.y), PxCeil(a.z), PxCeil(a.w)); } PX_FORCE_INLINE Vec4V V4Floor(const Vec4V a) { return Vec4V(PxFloor(a.x), PxFloor(a.y), PxFloor(a.z), PxFloor(a.w)); } */ /* PX_FORCE_INLINE VecU32V V4ConvertToU32VSaturate(const Vec4V a, PxU32 power) { PX_ASSERT(power == 0 && "Non-zero power not supported in convertToU32VSaturate"); PX_UNUSED(power); // prevent warning in release builds PxF32 ffffFFFFasFloat = PxF32(0xFFFF0000); return VecU32V( PxU32(PxClamp<PxF32>((a).x, 0.0f, ffffFFFFasFloat)), PxU32(PxClamp<PxF32>((a).y, 0.0f, ffffFFFFasFloat)), PxU32(PxClamp<PxF32>((a).z, 0.0f, ffffFFFFasFloat)), PxU32(PxClamp<PxF32>((a).w, 0.0f, ffffFFFFasFloat))); } */ } // namespace aos #if !PX_DOXYGEN } // namespace physx #endif #if PX_GCC_FAMILY #pragma GCC diagnostic pop #endif #endif
58,429
C
24.382276
154
0.626008
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxMutex.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_MUTEX_H #define PX_MUTEX_H #include "foundation/PxAllocator.h" /* * This <new> inclusion is a best known fix for gcc 4.4.1 error: * Creating object file for apex/src/PsAllocator.cpp ... * In file included from apex/include/PsFoundation.h:30, * from apex/src/PsAllocator.cpp:26: * apex/include/PsMutex.h: In constructor 'physx::PxMutexT<Alloc>::MutexT(const Alloc&)': * apex/include/PsMutex.h:92: error: no matching function for call to 'operator new(unsigned int, * physx::PxMutexImpl*&)' * <built-in>:0: note: candidates are: void* operator new(unsigned int) */ #include <new> #if !PX_DOXYGEN namespace physx { #endif class PX_FOUNDATION_API PxMutexImpl { public: /** The constructor for Mutex creates a mutex. It is initially unlocked. */ PxMutexImpl(); /** The destructor for Mutex deletes the mutex. */ ~PxMutexImpl(); /** Acquire (lock) the mutex. If the mutex is already locked by another thread, this method blocks until the mutex is unlocked. */ void lock(); /** Acquire (lock) the mutex. If the mutex is already locked by another thread, this method returns false without blocking. */ bool trylock(); /** Release (unlock) the mutex. */ void unlock(); /** Size of this class. */ static uint32_t getSize(); }; template <typename Alloc = PxReflectionAllocator<PxMutexImpl> > class PxMutexT : protected Alloc { PX_NOCOPY(PxMutexT) public: class ScopedLock { PxMutexT<Alloc>& mMutex; PX_NOCOPY(ScopedLock) public: PX_INLINE ScopedLock(PxMutexT<Alloc>& mutex) : mMutex(mutex) { mMutex.lock(); } PX_INLINE ~ScopedLock() { mMutex.unlock(); } }; /** The constructor for Mutex creates a mutex. It is initially unlocked. */ PxMutexT(const Alloc& alloc = Alloc()) : Alloc(alloc) { mImpl = reinterpret_cast<PxMutexImpl*>(Alloc::allocate(PxMutexImpl::getSize(), PX_FL)); PX_PLACEMENT_NEW(mImpl, PxMutexImpl)(); } /** The destructor for Mutex deletes the mutex. */ ~PxMutexT() { mImpl->~PxMutexImpl(); Alloc::deallocate(mImpl); } /** Acquire (lock) the mutex. If the mutex is already locked by another thread, this method blocks until the mutex is unlocked. */ PX_FORCE_INLINE void lock() const { mImpl->lock(); } /** Acquire (lock) the mutex. If the mutex is already locked by another thread, this method returns false without blocking, returns true if lock is successfully acquired */ PX_FORCE_INLINE bool trylock() const { return mImpl->trylock(); } /** Release (unlock) the mutex, the calling thread must have previously called lock() or method will error */ PX_FORCE_INLINE void unlock() const { mImpl->unlock(); } private: PxMutexImpl* mImpl; }; class PX_FOUNDATION_API PxReadWriteLock { PX_NOCOPY(PxReadWriteLock) public: PxReadWriteLock(); ~PxReadWriteLock(); // "takeLock" can only be false if the thread already holds the mutex, e.g. if it already acquired the write lock void lockReader(bool takeLock); void lockWriter(); void unlockReader(); void unlockWriter(); private: class ReadWriteLockImpl* mImpl; }; typedef PxMutexT<> PxMutex; #if !PX_DOXYGEN } // namespace physx #endif #endif
4,872
C
25.483696
114
0.721059
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxAllocatorCallback.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_ALLOCATOR_CALLBACK_H #define PX_ALLOCATOR_CALLBACK_H /** \addtogroup foundation @{ */ #include "foundation/PxFoundationConfig.h" #include "foundation/Px.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief Abstract base class for an application defined memory allocator that can be used by the Nv library. \note The SDK state should not be modified from within any allocation/free function. <b>Threading:</b> All methods of this class should be thread safe as it can be called from the user thread or the physics processing thread(s). */ class PxAllocatorCallback { public: virtual ~PxAllocatorCallback() { } /** \brief Allocates size bytes of memory, which must be 16-byte aligned. This method should never return NULL. If you run out of memory, then you should terminate the app or take some other appropriate action. <b>Threading:</b> This function should be thread safe as it can be called in the context of the user thread and physics processing thread(s). \param size Number of bytes to allocate. \param typeName Name of the datatype that is being allocated \param filename The source file which allocated the memory \param line The source line which allocated the memory \return The allocated block of memory. */ virtual void* allocate(size_t size, const char* typeName, const char* filename, int line) = 0; /** \brief Frees memory previously allocated by allocate(). <b>Threading:</b> This function should be thread safe as it can be called in the context of the user thread and physics processing thread(s). \param ptr Memory to free. */ virtual void deallocate(void* ptr) = 0; }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
3,412
C
34.926315
108
0.754103
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxIO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_IO_H #define PX_IO_H /** \addtogroup common @{ */ #include "foundation/PxSimpleTypes.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief Input stream class for I/O. The user needs to supply a PxInputStream implementation to a number of methods to allow the SDK to read data. */ class PxInputStream { public: /** \brief read from the stream. The number of bytes read may be less than the number requested. \param[in] dest the destination address to which the data will be read \param[in] count the number of bytes requested \return the number of bytes read from the stream. */ virtual uint32_t read(void* dest, uint32_t count) = 0; virtual ~PxInputStream() { } }; /** \brief Input data class for I/O which provides random read access. The user needs to supply a PxInputData implementation to a number of methods to allow the SDK to read data. */ class PxInputData : public PxInputStream { public: /** \brief return the length of the input data \return size in bytes of the input data */ virtual uint32_t getLength() const = 0; /** \brief seek to the given offset from the start of the data. \param[in] offset the offset to seek to. If greater than the length of the data, this call is equivalent to seek(length); */ virtual void seek(uint32_t offset) = 0; /** \brief return the current offset from the start of the data \return the offset to seek to. */ virtual uint32_t tell() const = 0; virtual ~PxInputData() { } }; /** \brief Output stream class for I/O. The user needs to supply a PxOutputStream implementation to a number of methods to allow the SDK to write data. */ class PxOutputStream { public: /** \brief write to the stream. The number of bytes written may be less than the number sent. \param[in] src the destination address from which the data will be written \param[in] count the number of bytes to be written \return the number of bytes written to the stream by this call. */ virtual uint32_t write(const void* src, uint32_t count) = 0; virtual ~PxOutputStream() { } }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
3,836
C
26.604316
111
0.735401
NVIDIA-Omniverse/PhysX/physx/include/foundation/Px.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_H #define PX_H /** \addtogroup foundation @{ */ #include "foundation/PxSimpleTypes.h" /** files to always include */ #include <string.h> #include <stdlib.h> #if !PX_DOXYGEN namespace physx { #endif typedef uint32_t PxU32; class PxAllocatorCallback; class PxErrorCallback; struct PxErrorCode; class PxInputStream; class PxInputData; class PxOutputStream; template<class Type> class PxVec2T; typedef PxVec2T<float> PxVec2; template<class Type> class PxVec3T; typedef PxVec3T<float> PxVec3; template<class Type> class PxVec4T; typedef PxVec4T<float> PxVec4; template<class Type> class PxQuatT; typedef PxQuatT<float> PxQuat; template<class Type> class PxMat33T; typedef PxMat33T<float> PxMat33; template<class Type> class PxMat34T; typedef PxMat34T<float> PxMat34; template<class Type> class PxMat44T; typedef PxMat44T<float> PxMat44; template<class Type> class PxTransformT; typedef PxTransformT<float> PxTransform; class PxPlane; class PxBounds3; /** enum for empty constructor tag*/ enum PxEMPTY { PxEmpty }; /** enum for zero constructor tag for vectors and matrices */ enum PxZERO { PxZero }; /** enum for identity constructor flag for quaternions, transforms, and matrices */ enum PxIDENTITY { PxIdentity }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
3,004
C
26.568807
83
0.763316
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxPhysicsVersion.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_PHYSICS_VERSION_H #define PX_PHYSICS_VERSION_H /* VersionNumbers: The combination of these numbers uniquely identifies the API, and should be incremented when the SDK API changes. This may include changes to file formats. This header is included in the main SDK header files so that the entire SDK and everything that builds on it is completely rebuilt when this file changes. Thus, this file is not to include a frequently changing build number. See BuildNumber.h for that. Each of these three values should stay below 255 because sometimes they are stored in a byte. */ /** \addtogroup foundation @{ */ #define PX_PHYSICS_VERSION_MAJOR 5 #define PX_PHYSICS_VERSION_MINOR 3 #define PX_PHYSICS_VERSION_BUGFIX 1 /** The constant PX_PHYSICS_VERSION is used when creating certain PhysX module objects. This is to ensure that the application is using the same header version as the library was built with. */ #define PX_PHYSICS_VERSION ((PX_PHYSICS_VERSION_MAJOR<<24) + (PX_PHYSICS_VERSION_MINOR<<16) + (PX_PHYSICS_VERSION_BUGFIX<<8) + 0) #endif /** @} */
2,775
C
42.374999
129
0.765405
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxProfiler.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #ifndef PX_PROFILER_H #define PX_PROFILER_H #include "foundation/Px.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief The pure virtual callback interface for general purpose instrumentation and profiling of GameWorks modules as well as applications */ class PxProfilerCallback { protected: virtual ~PxProfilerCallback() {} public: /************************************************************************************************************************** Instrumented profiling events ***************************************************************************************************************************/ /** \brief Mark the beginning of a nested profile block \param[in] eventName Event name. Must be a persistent const char * \param[in] detached True for cross thread events \param[in] contextId the context id of this zone. Zones with the same id belong to the same group. 0 is used for no specific group. \return Returns implementation-specific profiler data for this event */ virtual void* zoneStart(const char* eventName, bool detached, uint64_t contextId) = 0; /** \brief Mark the end of a nested profile block \param[in] profilerData The data returned by the corresponding zoneStart call (or NULL if not available) \param[in] eventName The name of the zone ending, must match the corresponding name passed with 'zoneStart'. Must be a persistent const char *. \param[in] detached True for cross thread events. Should match the value passed to zoneStart. \param[in] contextId The context of this zone. Should match the value passed to zoneStart. \note eventName plus contextId can be used to uniquely match up start and end of a zone. */ virtual void zoneEnd(void* profilerData, const char* eventName, bool detached, uint64_t contextId) = 0; }; class PxProfileScoped { public: PX_FORCE_INLINE PxProfileScoped(PxProfilerCallback* callback, const char* eventName, bool detached, uint64_t contextId) : mCallback(callback), mProfilerData(NULL) { if(mCallback) { mEventName = eventName; mContextId = contextId; mDetached = detached; mProfilerData = mCallback->zoneStart(eventName, detached, contextId); } } PX_FORCE_INLINE ~PxProfileScoped() { if(mCallback) mCallback->zoneEnd(mProfilerData, mEventName, mDetached, mContextId); } PxProfilerCallback* mCallback; const char* mEventName; void* mProfilerData; uint64_t mContextId; bool mDetached; }; #if !PX_DOXYGEN } // namespace physx #endif #endif
4,065
C
38.096153
163
0.711439
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxVecMathAoSScalar.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_VEC_MATH_AOS_SCALAR_H #define PX_VEC_MATH_AOS_SCALAR_H #if COMPILE_VECTOR_INTRINSICS #error Scalar version should not be included when using vector intrinsics. #endif #if !PX_DOXYGEN namespace physx { #endif namespace aos { struct VecI16V; struct VecU16V; struct VecI32V; struct VecU32V; struct Vec4V; typedef Vec4V QuatV; PX_ALIGN_PREFIX(16) struct FloatV { PxF32 x; PxF32 pad[3]; FloatV() { } FloatV(const PxF32 _x) : x(_x) { } } PX_ALIGN_SUFFIX(16); PX_ALIGN_PREFIX(16) struct Vec4V { PxF32 x, y, z, w; Vec4V() { } Vec4V(const PxF32 _x, const PxF32 _y, const PxF32 _z, const PxF32 _w) : x(_x), y(_y), z(_z), w(_w) { } } PX_ALIGN_SUFFIX(16); PX_ALIGN_PREFIX(16) struct Vec3V { PxF32 x, y, z; PxF32 pad; Vec3V() { } Vec3V(const PxF32 _x, const PxF32 _y, const PxF32 _z) : x(_x), y(_y), z(_z), pad(0.0f) { } } PX_ALIGN_SUFFIX(16); PX_ALIGN_PREFIX(16) struct BoolV { PxU32 ux, uy, uz, uw; BoolV() { } BoolV(const PxU32 _x, const PxU32 _y, const PxU32 _z, const PxU32 _w) : ux(_x), uy(_y), uz(_z), uw(_w) { } } PX_ALIGN_SUFFIX(16); struct Mat33V { Mat33V() { } Mat33V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2) : col0(c0), col1(c1), col2(c2) { } Vec3V col0; Vec3V col1; Vec3V col2; }; struct Mat34V { Mat34V() { } Mat34V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2, const Vec3V& c3) : col0(c0), col1(c1), col2(c2), col3(c3) { } Vec3V col0; Vec3V col1; Vec3V col2; Vec3V col3; }; struct Mat43V { Mat43V() { } Mat43V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2) : col0(c0), col1(c1), col2(c2) { } Vec4V col0; Vec4V col1; Vec4V col2; }; struct Mat44V { Mat44V() { } Mat44V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2, const Vec4V& c3) : col0(c0), col1(c1), col2(c2), col3(c3) { } Vec4V col0; Vec4V col1; Vec4V col2; Vec4V col3; }; PX_ALIGN_PREFIX(16) struct VecU32V { PxU32 u32[4]; PX_FORCE_INLINE VecU32V() { } PX_FORCE_INLINE VecU32V(PxU32 a, PxU32 b, PxU32 c, PxU32 d) { u32[0] = a; u32[1] = b; u32[2] = c; u32[3] = d; } } PX_ALIGN_SUFFIX(16); PX_ALIGN_PREFIX(16) struct VecI32V { PxI32 i32[4]; PX_FORCE_INLINE VecI32V() { } PX_FORCE_INLINE VecI32V(PxI32 a, PxI32 b, PxI32 c, PxI32 d) { i32[0] = a; i32[1] = b; i32[2] = c; i32[3] = d; } } PX_ALIGN_SUFFIX(16); PX_ALIGN_PREFIX(16) struct VecI16V { PxI16 i16[8]; PX_FORCE_INLINE VecI16V() { } PX_FORCE_INLINE VecI16V(PxI16 a, PxI16 b, PxI16 c, PxI16 d, PxI16 e, PxI16 f, PxI16 g, PxI16 h) { i16[0] = a; i16[1] = b; i16[2] = c; i16[3] = d; i16[4] = e; i16[5] = f; i16[6] = g; i16[7] = h; } } PX_ALIGN_SUFFIX(16); PX_ALIGN_PREFIX(16) struct VecU16V { union { PxU16 u16[8]; PxI16 i16[8]; }; PX_FORCE_INLINE VecU16V() { } PX_FORCE_INLINE VecU16V(PxU16 a, PxU16 b, PxU16 c, PxU16 d, PxU16 e, PxU16 f, PxU16 g, PxU16 h) { u16[0] = a; u16[1] = b; u16[2] = c; u16[3] = d; u16[4] = e; u16[5] = f; u16[6] = g; u16[7] = h; } } PX_ALIGN_SUFFIX(16); #define FloatVArg FloatV & #define Vec3VArg Vec3V & #define Vec4VArg Vec4V & #define BoolVArg BoolV & #define VecU32VArg VecU32V & #define VecI32VArg VecI32V & #define VecU16VArg VecU16V & #define VecI16VArg VecI16V & #define QuatVArg QuatV & #define VecCrossV Vec3V typedef VecI32V VecShiftV; #define VecShiftVArg VecShiftV & } // namespace aos #if !PX_DOXYGEN } // namespace physx #endif #endif
5,084
C
19.178571
116
0.672109
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxSocket.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_SOCKET_H #define PX_SOCKET_H #include "foundation/PxUserAllocated.h" #if !PX_DOXYGEN namespace physx { #endif /** Socket abstraction API */ class PX_FOUNDATION_API PxSocket : public PxUserAllocated { public: static const uint32_t DEFAULT_BUFFER_SIZE; PxSocket(bool inEnableBuffering = true, bool blocking = true); virtual ~PxSocket(); /*! Opens a network socket for input and/or output \param host Name of the host to connect to. This can be an IP, URL, etc \param port The port to connect to on the remote host \param timeout Timeout in ms until the connection must be established. \return True if the connection was successful, false otherwise */ bool connect(const char* host, uint16_t port, uint32_t timeout = 1000); /*! Opens a network socket for input and/or output as a server. Put the connection in listening mode \param port The port on which the socket listens */ bool listen(uint16_t port); /*! Accept a connection on a socket that is in listening mode \note This method only supports a single connection client. Additional clients that connect to the listening port will overwrite the existing socket handle. \param block whether or not the call should block \return whether a connection was established */ bool accept(bool block); /*! Disconnects an open socket */ void disconnect(); /*! Returns whether the socket is currently open (connected) or not. \return True if the socket is connected, false otherwise */ bool isConnected() const; /*! Returns the name of the connected host. This is the same as the string that was supplied to the connect call. \return The name of the connected host */ const char* getHost() const; /*! Returns the port of the connected host. This is the same as the port that was supplied to the connect call. \return The port of the connected host */ uint16_t getPort() const; /*! Flushes the output stream. Until the stream is flushed, there is no guarantee that the written data has actually reached the destination storage. Flush forces all buffered data to be sent to the output. \note flush always blocks. If the socket is in non-blocking mode, this will result the thread spinning. \return True if the flush was successful, false otherwise */ bool flush(); /*! Writes data to the output stream. \param data Pointer to a block of data to write to the stream \param length Amount of data to write, in bytes \return Number of bytes actually written. This could be lower than length if the socket is non-blocking. */ uint32_t write(const uint8_t* data, uint32_t length); /*! Reads data from the output stream. \param data Pointer to a buffer where the read data will be stored. \param length Amount of data to read, in bytes. \return Number of bytes actually read. This could be lower than length if the stream end is encountered or the socket is non-blocking. */ uint32_t read(uint8_t* data, uint32_t length); /*! Sets blocking mode of the socket. Socket must be connected, otherwise calling this method won't take any effect. */ void setBlocking(bool blocking); /*! Returns whether read/write/flush calls to the socket are blocking. \return True if the socket is blocking. */ bool isBlocking() const; private: class SocketImpl* mImpl; }; #if !PX_DOXYGEN } // namespace physx #endif #endif
5,087
C
26.06383
98
0.744643
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxBasicTemplates.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_BASIC_TEMPLATES_H #define PX_BASIC_TEMPLATES_H #include "foundation/PxPreprocessor.h" #if !PX_DOXYGEN namespace physx { #endif template <typename A> struct PxEqual { bool operator()(const A& a, const A& b) const { return a == b; } }; template <typename A> struct PxLess { bool operator()(const A& a, const A& b) const { return a < b; } }; template <typename A> struct PxGreater { bool operator()(const A& a, const A& b) const { return a > b; } }; template <class F, class S> class PxPair { public: F first; S second; PX_CUDA_CALLABLE PX_INLINE PxPair() : first(F()), second(S()) { } PX_CUDA_CALLABLE PX_INLINE PxPair(const F& f, const S& s) : first(f), second(s) { } PX_CUDA_CALLABLE PX_INLINE PxPair(const PxPair& p) : first(p.first), second(p.second) { } PX_CUDA_CALLABLE PX_INLINE PxPair& operator=(const PxPair& p) { first = p.first; second = p.second; return *this; } PX_CUDA_CALLABLE PX_INLINE bool operator==(const PxPair& p) const { return first == p.first && second == p.second; } PX_CUDA_CALLABLE PX_INLINE bool operator<(const PxPair& p) const { if (first < p.first) return true; else return !(p.first < first) && (second < p.second); } }; template <unsigned int A> struct PxLogTwo { static const unsigned int value = PxLogTwo<(A >> 1)>::value + 1; }; template <> struct PxLogTwo<1> { static const unsigned int value = 0; }; template <typename T> struct PxUnConst { typedef T Type; }; template <typename T> struct PxUnConst<const T> { typedef T Type; }; template <typename T> T PxPointerOffset(void* p, ptrdiff_t offset) { return reinterpret_cast<T>(reinterpret_cast<char*>(p) + offset); } template <typename T> T PxPointerOffset(const void* p, ptrdiff_t offset) { return reinterpret_cast<T>(reinterpret_cast<const char*>(p) + offset); } template <class T> PX_CUDA_CALLABLE PX_INLINE void PxSwap(T& x, T& y) { const T tmp = x; x = y; y = tmp; } #if !PX_DOXYGEN } // namespace physx #endif #endif
3,782
C
24.910959
87
0.695664
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxPreprocessor.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_PREPROCESSOR_H #define PX_PREPROCESSOR_H #include <stddef.h> /** \addtogroup foundation @{ */ #ifndef PX_ENABLE_FEATURES_UNDER_CONSTRUCTION #define PX_ENABLE_FEATURES_UNDER_CONSTRUCTION 0 #endif #define PX_STRINGIZE_HELPER(X) #X #define PX_STRINGIZE(X) PX_STRINGIZE_HELPER(X) #define PX_CONCAT_HELPER(X, Y) X##Y #define PX_CONCAT(X, Y) PX_CONCAT_HELPER(X, Y) /* The following preprocessor identifiers specify compiler, OS, and architecture. All definitions have a value of 1 or 0, use '#if' instead of '#ifdef'. */ /** Compiler defines, see http://sourceforge.net/p/predef/wiki/Compilers/ */ #if defined(_MSC_VER) #if _MSC_VER >= 1920 #define PX_VC 16 #elif _MSC_VER >= 1910 #define PX_VC 15 #elif _MSC_VER >= 1900 #define PX_VC 14 #elif _MSC_VER >= 1800 #define PX_VC 12 #elif _MSC_VER >= 1700 #define PX_VC 11 #elif _MSC_VER >= 1600 #define PX_VC 10 #elif _MSC_VER >= 1500 #define PX_VC 9 #else #error "Unknown VC version" #endif #elif defined(__clang__) #define PX_CLANG 1 #if defined (__clang_major__) #define PX_CLANG_MAJOR __clang_major__ #elif defined (_clang_major) #define PX_CLANG_MAJOR _clang_major #else #define PX_CLANG_MAJOR 0 #endif #elif defined(__GNUC__) // note: __clang__ implies __GNUC__ #define PX_GCC 1 #else #error "Unknown compiler" #endif /** Operating system defines, see http://sourceforge.net/p/predef/wiki/OperatingSystems/ */ #if defined(_WIN64) #define PX_WIN64 1 #elif defined(_WIN32) // note: _M_PPC implies _WIN32 #define PX_WIN32 1 #elif defined(__linux__) || defined (__EMSCRIPTEN__) #define PX_LINUX 1 #elif defined(__APPLE__) #define PX_OSX 1 #elif defined(__NX__) #define PX_SWITCH 1 #else #error "Unknown operating system" #endif /** Architecture defines, see http://sourceforge.net/p/predef/wiki/Architectures/ */ #if defined(__x86_64__) || defined(_M_X64) #define PX_X64 1 #elif defined(__i386__) || defined(_M_IX86) || defined (__EMSCRIPTEN__) #define PX_X86 1 #elif defined(__arm64__) || defined(__aarch64__) || defined(_M_ARM64) #define PX_A64 1 #elif defined(__arm__) || defined(_M_ARM) #define PX_ARM 1 #elif defined(__ppc__) || defined(_M_PPC) || defined(__CELLOS_LV2__) #define PX_PPC 1 #else #error "Unknown architecture" #endif /** SIMD defines */ #if !defined(PX_SIMD_DISABLED) #if defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) || (defined (__EMSCRIPTEN__) && defined(__SSE2__)) #define PX_SSE2 1 #endif #if defined(_M_ARM) || defined(__ARM_NEON__) || defined(__ARM_NEON) #define PX_NEON 1 #endif #if defined(_M_PPC) || defined(__CELLOS_LV2__) #define PX_VMX 1 #endif #endif /** define anything not defined on this platform to 0 */ #ifndef PX_VC #define PX_VC 0 #endif #ifndef PX_CLANG #define PX_CLANG 0 #endif #ifndef PX_GCC #define PX_GCC 0 #endif #ifndef PX_WIN64 #define PX_WIN64 0 #endif #ifndef PX_WIN32 #define PX_WIN32 0 #endif #ifndef PX_LINUX #define PX_LINUX 0 #endif #ifndef PX_OSX #define PX_OSX 0 #endif #ifndef PX_SWITCH #define PX_SWITCH 0 #endif #ifndef PX_X64 #define PX_X64 0 #endif #ifndef PX_X86 #define PX_X86 0 #endif #ifndef PX_A64 #define PX_A64 0 #endif #ifndef PX_ARM #define PX_ARM 0 #endif #ifndef PX_PPC #define PX_PPC 0 #endif #ifndef PX_SSE2 #define PX_SSE2 0 #endif #ifndef PX_NEON #define PX_NEON 0 #endif #ifndef PX_VMX #define PX_VMX 0 #endif /* define anything not defined through the command line to 0 */ #ifndef PX_DEBUG #define PX_DEBUG 0 #endif #ifndef PX_CHECKED #define PX_CHECKED 0 #endif #ifndef PX_PROFILE #define PX_PROFILE 0 #endif #ifndef PX_DEBUG_CRT #define PX_DEBUG_CRT 0 #endif #ifndef PX_NVTX #define PX_NVTX 0 #endif #ifndef PX_DOXYGEN #define PX_DOXYGEN 0 #endif /** family shortcuts */ // compiler #define PX_GCC_FAMILY (PX_CLANG || PX_GCC) // os #define PX_WINDOWS_FAMILY (PX_WIN32 || PX_WIN64) #define PX_LINUX_FAMILY PX_LINUX #define PX_APPLE_FAMILY PX_OSX // equivalent to #if __APPLE__ #define PX_UNIX_FAMILY (PX_LINUX_FAMILY || PX_APPLE_FAMILY) // shortcut for unix/posix platforms #if defined(__EMSCRIPTEN__) #define PX_EMSCRIPTEN 1 #else #define PX_EMSCRIPTEN 0 #endif // architecture #define PX_INTEL_FAMILY (PX_X64 || PX_X86) #define PX_ARM_FAMILY (PX_ARM || PX_A64) #define PX_P64_FAMILY (PX_X64 || PX_A64) // shortcut for 64-bit architectures /** C++ standard library defines */ #if defined(_LIBCPP_VERSION) || PX_WIN64 || PX_WIN32 || PX_EMSCRIPTEN #define PX_LIBCPP 1 #else #define PX_LIBCPP 0 #endif // legacy define for PhysX #define PX_WINDOWS (PX_WINDOWS_FAMILY && !PX_ARM_FAMILY) /** Assert macro */ #ifndef PX_ENABLE_ASSERTS #if PX_DEBUG && !defined(__CUDACC__) #define PX_ENABLE_ASSERTS 1 #else #define PX_ENABLE_ASSERTS 0 #endif #endif /** DLL export macros */ #ifndef PX_C_EXPORT #if PX_WINDOWS_FAMILY || PX_LINUX #define PX_C_EXPORT extern "C" #else #define PX_C_EXPORT #endif #endif #if PX_UNIX_FAMILY&& __GNUC__ >= 4 #define PX_UNIX_EXPORT __attribute__((visibility("default"))) #else #define PX_UNIX_EXPORT #endif #if PX_WINDOWS_FAMILY #define PX_DLL_EXPORT __declspec(dllexport) #define PX_DLL_IMPORT __declspec(dllimport) #else #define PX_DLL_EXPORT PX_UNIX_EXPORT #define PX_DLL_IMPORT #endif /** Calling convention */ #ifndef PX_CALL_CONV #if PX_WINDOWS_FAMILY #define PX_CALL_CONV __cdecl #else #define PX_CALL_CONV #endif #endif /** Pack macros - disabled on SPU because they are not supported */ #if PX_VC #define PX_PUSH_PACK_DEFAULT __pragma(pack(push, 8)) #define PX_POP_PACK __pragma(pack(pop)) #elif PX_GCC_FAMILY #define PX_PUSH_PACK_DEFAULT _Pragma("pack(push, 8)") #define PX_POP_PACK _Pragma("pack(pop)") #else #define PX_PUSH_PACK_DEFAULT #define PX_POP_PACK #endif /** Inline macro */ #define PX_INLINE inline #if PX_WINDOWS_FAMILY #pragma inline_depth(255) #endif /** Force inline macro */ #if PX_VC #define PX_FORCE_INLINE __forceinline #elif PX_LINUX // Workaround; Fedora Core 3 do not agree with force inline and PxcPool #define PX_FORCE_INLINE inline #elif PX_GCC_FAMILY #define PX_FORCE_INLINE inline __attribute__((always_inline)) #else #define PX_FORCE_INLINE inline #endif /** Noinline macro */ #if PX_WINDOWS_FAMILY #define PX_NOINLINE __declspec(noinline) #elif PX_GCC_FAMILY #define PX_NOINLINE __attribute__((noinline)) #else #define PX_NOINLINE #endif /** Restrict macro */ #if defined(__CUDACC__) #define PX_RESTRICT __restrict__ #else #define PX_RESTRICT __restrict #endif /** Noalias macro */ #if PX_WINDOWS_FAMILY #define PX_NOALIAS __declspec(noalias) #else #define PX_NOALIAS #endif /** Override macro */ #if PX_WINDOWS_FAMILY #define PX_OVERRIDE override #else // PT: we don't really need to support it on all platforms, as long as // we compile the code on at least one platform that supports it. #define PX_OVERRIDE #endif /** Final macro */ #define PX_FINAL final /** Unused attribute macro. Only on GCC for now. */ #if PX_GCC_FAMILY #define PX_UNUSED_ATTRIBUTE __attribute__((unused)) #else #define PX_UNUSED_ATTRIBUTE #endif /** Alignment macros PX_ALIGN_PREFIX and PX_ALIGN_SUFFIX can be used for type alignment instead of aligning individual variables as follows: PX_ALIGN_PREFIX(16) struct A { ... } PX_ALIGN_SUFFIX(16); This declaration style is parsed correctly by Visual Assist. */ #ifndef PX_ALIGN #if PX_WINDOWS_FAMILY #define PX_ALIGN(alignment, decl) __declspec(align(alignment)) decl #define PX_ALIGN_PREFIX(alignment) __declspec(align(alignment)) #define PX_ALIGN_SUFFIX(alignment) #elif PX_GCC_FAMILY #define PX_ALIGN(alignment, decl) decl __attribute__((aligned(alignment))) #define PX_ALIGN_PREFIX(alignment) #define PX_ALIGN_SUFFIX(alignment) __attribute__((aligned(alignment))) #elif defined __CUDACC__ #define PX_ALIGN(alignment, decl) __align__(alignment) decl #define PX_ALIGN_PREFIX(alignment) #define PX_ALIGN_SUFFIX(alignment) __align__(alignment)) #else #define PX_ALIGN(alignment, decl) #define PX_ALIGN_PREFIX(alignment) #define PX_ALIGN_SUFFIX(alignment) #endif #endif /** Deprecated macro - To deprecate a function: Place PX_DEPRECATED at the start of the function header (leftmost word). - To deprecate a 'typedef', a 'struct' or a 'class': Place PX_DEPRECATED directly after the keywords ('typedef', 'struct', 'class'). Use these macro definitions to create warnings for deprecated functions \#define PX_DEPRECATED __declspec(deprecated) // Microsoft \#define PX_DEPRECATED __attribute__((deprecated())) // GCC */ #define PX_DEPRECATED /** General defines */ #if PX_LINUX && PX_CLANG && !(defined __CUDACC__) #define PX_COMPILE_TIME_ASSERT(exp) \ _Pragma(" clang diagnostic push") \ _Pragma(" clang diagnostic ignored \"-Wc++98-compat\"") \ static_assert(exp, "") \ _Pragma(" clang diagnostic pop") #else #define PX_COMPILE_TIME_ASSERT(exp) static_assert(exp, "") #endif #if PX_GCC_FAMILY #define PX_OFFSET_OF(X, Y) __builtin_offsetof(X, Y) #else #define PX_OFFSET_OF(X, Y) offsetof(X, Y) #endif #define PX_OFFSETOF_BASE 0x100 // casting the null ptr takes a special-case code path, which we don't want #define PX_OFFSET_OF_RT(Class, Member) (reinterpret_cast<size_t>(&reinterpret_cast<Class*>(PX_OFFSETOF_BASE)->Member) - size_t(PX_OFFSETOF_BASE)) #if PX_WINDOWS_FAMILY // check that exactly one of NDEBUG and _DEBUG is defined #if !defined(NDEBUG) ^ defined(_DEBUG) #error Exactly one of NDEBUG and _DEBUG needs to be defined! #endif #endif // make sure PX_CHECKED is defined in all _DEBUG configurations as well #if !PX_CHECKED && PX_DEBUG #error PX_CHECKED must be defined when PX_DEBUG is defined #endif #ifdef __CUDACC__ #define PX_CUDA_CALLABLE __host__ __device__ #else #define PX_CUDA_CALLABLE #endif // avoid unreferenced parameter warning // preferred solution: omit the parameter's name from the declaration template <class T> PX_CUDA_CALLABLE PX_INLINE void PX_UNUSED(T const&) { } // Ensure that the application hasn't tweaked the pack value to less than 8, which would break // matching between the API headers and the binaries // This assert works on win32/win64, but may need further specialization on other platforms. // Some GCC compilers need the compiler flag -malign-double to be set. // Apparently the apple-clang-llvm compiler doesn't support malign-double. #if PX_APPLE_FAMILY || (PX_CLANG && !PX_ARM) struct PxPackValidation { char _; long a; }; #elif PX_CLANG && PX_ARM struct PxPackValidation { char _; double a; }; #else struct PxPackValidation { char _; long long a; }; #endif // clang (as of version 3.9) cannot align doubles on 8 byte boundary when compiling for Intel 32 bit target #if !PX_APPLE_FAMILY && !PX_EMSCRIPTEN && !(PX_CLANG && PX_X86) PX_COMPILE_TIME_ASSERT(PX_OFFSET_OF(PxPackValidation, a) == 8); #endif // use in a cpp file to suppress LNK4221 #if PX_VC #define PX_DUMMY_SYMBOL \ namespace \ { \ char PxDummySymbol; \ } #else #define PX_DUMMY_SYMBOL #endif #if PX_GCC_FAMILY #define PX_WEAK_SYMBOL __attribute__((weak)) // this is to support SIMD constant merging in template specialization #else #define PX_WEAK_SYMBOL #endif // Macro for avoiding default assignment and copy, because doing this by inheritance can increase class size on some // platforms. #define PX_NOCOPY(Class) \ protected: \ Class(const Class&); \ Class& operator=(const Class&); //#define DISABLE_CUDA_PHYSX #ifndef DISABLE_CUDA_PHYSX //CUDA is currently supported on x86_64 windows and linux, and ARM_64 linux #define PX_SUPPORT_GPU_PHYSX ((PX_X64 && (PX_WINDOWS_FAMILY || PX_LINUX)) || (PX_A64 && PX_LINUX)) #else #define PX_SUPPORT_GPU_PHYSX 0 #endif #ifndef PX_SUPPORT_EXTERN_TEMPLATE #define PX_SUPPORT_EXTERN_TEMPLATE (PX_VC != 11) #else #define PX_SUPPORT_EXTERN_TEMPLATE 0 #endif #define PX_FL __FILE__, __LINE__ /** @} */ #endif
13,549
C
23.908088
145
0.708466
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxAlignedMalloc.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_ALIGNED_MALLOC_H #define PX_ALIGNED_MALLOC_H #include "PxUserAllocated.h" /*! Allocate aligned memory. Alignment must be a power of 2! -- should be templated by a base allocator */ #if !PX_DOXYGEN namespace physx { #endif /** Allocator, which is used to access the global PxAllocatorCallback instance (used for dynamic data types template instantiation), which can align memory */ // SCS: AlignedMalloc with 3 params not found, seems not used on PC either // disabled for now to avoid GCC error template <uint32_t N, typename BaseAllocator = PxAllocator> class PxAlignedAllocator : public BaseAllocator { public: PxAlignedAllocator(const BaseAllocator& base = BaseAllocator()) : BaseAllocator(base) { } void* allocate(size_t size, const char* file, int line) { size_t pad = N - 1 + sizeof(size_t); // store offset for delete. uint8_t* base = reinterpret_cast<uint8_t*>(BaseAllocator::allocate(size + pad, file, line)); if (!base) return NULL; uint8_t* ptr = reinterpret_cast<uint8_t*>(size_t(base + pad) & ~(size_t(N) - 1)); // aligned pointer, ensuring N // is a size_t // wide mask reinterpret_cast<size_t*>(ptr)[-1] = size_t(ptr - base); // store offset return ptr; } void deallocate(void* ptr) { if (ptr == NULL) return; uint8_t* base = reinterpret_cast<uint8_t*>(ptr) - reinterpret_cast<size_t*>(ptr)[-1]; BaseAllocator::deallocate(base); } }; #if !PX_DOXYGEN } // namespace physx #endif #endif
3,233
C
34.933333
115
0.7176
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxString.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_STRING_H #define PX_STRING_H #include "foundation/PxPreprocessor.h" #include "foundation/PxSimpleTypes.h" #include "foundation/PxFoundationConfig.h" #include <stdarg.h> #if !PX_DOXYGEN namespace physx { #endif // the following functions have C99 semantics. Note that C99 requires for snprintf and vsnprintf: // * the resulting string is always NULL-terminated regardless of truncation. // * in the case of truncation the return value is the number of characters that would have been created. PX_FOUNDATION_API int32_t Pxsscanf(const char* buffer, const char* format, ...); PX_FOUNDATION_API int32_t Pxstrcmp(const char* str1, const char* str2); PX_FOUNDATION_API int32_t Pxstrncmp(const char* str1, const char* str2, size_t count); PX_FOUNDATION_API int32_t Pxsnprintf(char* dst, size_t dstSize, const char* format, ...); PX_FOUNDATION_API int32_t Pxvsnprintf(char* dst, size_t dstSize, const char* src, va_list arg); // strlcat and strlcpy have BSD semantics: // * dstSize is always the size of the destination buffer // * the resulting string is always NULL-terminated regardless of truncation // * in the case of truncation the return value is the length of the string that would have been created PX_FOUNDATION_API size_t Pxstrlcat(char* dst, size_t dstSize, const char* src); PX_FOUNDATION_API size_t Pxstrlcpy(char* dst, size_t dstSize, const char* src); // case-insensitive string comparison PX_FOUNDATION_API int32_t Pxstricmp(const char* str1, const char* str2); PX_FOUNDATION_API int32_t Pxstrnicmp(const char* str1, const char* str2, size_t count); // in-place string case conversion PX_FOUNDATION_API void Pxstrlwr(char* str); PX_FOUNDATION_API void Pxstrupr(char* str); /** \brief Prints the string literally (does not consume % specifier), trying to make sure it's visible to the app programmer */ PX_FOUNDATION_API void PxPrintString(const char*); #if !PX_DOXYGEN } // namespace physx #endif #endif
3,633
C
44.424999
110
0.763832
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxHashInternals.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_HASH_INTERNALS_H #define PX_HASH_INTERNALS_H #include "foundation/PxAllocator.h" #include "foundation/PxBitUtils.h" #include "foundation/PxMathIntrinsics.h" #include "foundation/PxBasicTemplates.h" #include "foundation/PxHash.h" #if PX_VC #pragma warning(push) #pragma warning(disable : 4127) // conditional expression is constant #endif #if !PX_DOXYGEN namespace physx { #endif template <class Entry, class Key, class HashFn, class GetKey, class PxAllocator, bool compacting> class PxHashBase : private PxAllocator { void init(uint32_t initialTableSize, float loadFactor) { mBuffer = NULL; mEntries = NULL; mEntriesNext = NULL; mHash = NULL; mEntriesCapacity = 0; mHashSize = 0; mLoadFactor = loadFactor; mFreeList = uint32_t(EOL); mTimestamp = 0; mEntriesCount = 0; if(initialTableSize) reserveInternal(initialTableSize); } public: typedef Entry EntryType; PxHashBase(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : PxAllocator("hashBase") { init(initialTableSize, loadFactor); } PxHashBase(uint32_t initialTableSize, float loadFactor, const PxAllocator& alloc) : PxAllocator(alloc) { init(initialTableSize, loadFactor); } PxHashBase(const PxAllocator& alloc) : PxAllocator(alloc) { init(64, 0.75f); } ~PxHashBase() { destroy(); // No need to clear() if(mBuffer) PxAllocator::deallocate(mBuffer); } static const uint32_t EOL = 0xffffffff; PX_INLINE Entry* create(const Key& k, bool& exists) { uint32_t h = 0; if(mHashSize) { h = hash(k); uint32_t index = mHash[h]; while(index != EOL && !HashFn().equal(GetKey()(mEntries[index]), k)) index = mEntriesNext[index]; exists = index != EOL; if(exists) return mEntries + index; } else exists = false; if(freeListEmpty()) { grow(); h = hash(k); } uint32_t entryIndex = freeListGetNext(); mEntriesNext[entryIndex] = mHash[h]; mHash[h] = entryIndex; mEntriesCount++; mTimestamp++; return mEntries + entryIndex; } PX_INLINE const Entry* find(const Key& k) const { if(!mEntriesCount) return NULL; const uint32_t h = hash(k); uint32_t index = mHash[h]; while(index != EOL && !HashFn().equal(GetKey()(mEntries[index]), k)) index = mEntriesNext[index]; return index != EOL ? mEntries + index : NULL; } PX_INLINE bool erase(const Key& k, Entry& e) { if(!mEntriesCount) return false; const uint32_t h = hash(k); uint32_t* ptr = mHash + h; while(*ptr != EOL && !HashFn().equal(GetKey()(mEntries[*ptr]), k)) ptr = mEntriesNext + *ptr; if(*ptr == EOL) return false; PX_PLACEMENT_NEW(&e, Entry)(mEntries[*ptr]); return eraseInternal(ptr); } PX_INLINE bool erase(const Key& k) { if(!mEntriesCount) return false; const uint32_t h = hash(k); uint32_t* ptr = mHash + h; while(*ptr != EOL && !HashFn().equal(GetKey()(mEntries[*ptr]), k)) ptr = mEntriesNext + *ptr; if(*ptr == EOL) return false; return eraseInternal(ptr); } PX_INLINE uint32_t size() const { return mEntriesCount; } PX_INLINE uint32_t capacity() const { return mHashSize; } void clear() { if(!mHashSize || mEntriesCount == 0) return; destroy(); intrinsics::memSet(mHash, EOL, mHashSize * sizeof(uint32_t)); const uint32_t sizeMinus1 = mEntriesCapacity - 1; for(uint32_t i = 0; i < sizeMinus1; i++) { PxPrefetchLine(mEntriesNext + i, 128); mEntriesNext[i] = i + 1; } mEntriesNext[mEntriesCapacity - 1] = uint32_t(EOL); mFreeList = 0; mEntriesCount = 0; } void reserve(uint32_t size) { if(size > mHashSize) reserveInternal(size); } PX_INLINE const Entry* getEntries() const { return mEntries; } PX_INLINE Entry* insertUnique(const Key& k) { PX_ASSERT(find(k) == NULL); uint32_t h = hash(k); uint32_t entryIndex = freeListGetNext(); mEntriesNext[entryIndex] = mHash[h]; mHash[h] = entryIndex; mEntriesCount++; mTimestamp++; return mEntries + entryIndex; } private: void destroy() { for(uint32_t i = 0; i < mHashSize; i++) { for(uint32_t j = mHash[i]; j != EOL; j = mEntriesNext[j]) mEntries[j].~Entry(); } } template <typename HK, typename GK, class A, bool comp> PX_NOINLINE void copy(const PxHashBase<Entry, Key, HK, GK, A, comp>& other); // free list management - if we're coalescing, then we use mFreeList to hold // the top of the free list and it should always be equal to size(). Otherwise, // we build a free list in the next() pointers. PX_INLINE void freeListAdd(uint32_t index) { if(compacting) { mFreeList--; PX_ASSERT(mFreeList == mEntriesCount); } else { mEntriesNext[index] = mFreeList; mFreeList = index; } } PX_INLINE void freeListAdd(uint32_t start, uint32_t end) { if(!compacting) { for(uint32_t i = start; i < end - 1; i++) // add the new entries to the free list mEntriesNext[i] = i + 1; // link in old free list mEntriesNext[end - 1] = mFreeList; PX_ASSERT(mFreeList != end - 1); mFreeList = start; } else if(mFreeList == EOL) // don't reset the free ptr for the compacting hash unless it's empty mFreeList = start; } PX_INLINE uint32_t freeListGetNext() { PX_ASSERT(!freeListEmpty()); if(compacting) { PX_ASSERT(mFreeList == mEntriesCount); return mFreeList++; } else { uint32_t entryIndex = mFreeList; mFreeList = mEntriesNext[mFreeList]; return entryIndex; } } PX_INLINE bool freeListEmpty() const { if(compacting) return mEntriesCount == mEntriesCapacity; else return mFreeList == EOL; } PX_INLINE void replaceWithLast(uint32_t index) { PX_PLACEMENT_NEW(mEntries + index, Entry)(mEntries[mEntriesCount]); mEntries[mEntriesCount].~Entry(); mEntriesNext[index] = mEntriesNext[mEntriesCount]; uint32_t h = hash(GetKey()(mEntries[index])); uint32_t* ptr; for(ptr = mHash + h; *ptr != mEntriesCount; ptr = mEntriesNext + *ptr) PX_ASSERT(*ptr != EOL); *ptr = index; } PX_INLINE uint32_t hash(const Key& k, uint32_t hashSize) const { return HashFn()(k) & (hashSize - 1); } PX_INLINE uint32_t hash(const Key& k) const { return hash(k, mHashSize); } PX_INLINE bool eraseInternal(uint32_t* ptr) { const uint32_t index = *ptr; *ptr = mEntriesNext[index]; mEntries[index].~Entry(); mEntriesCount--; mTimestamp++; if (compacting && index != mEntriesCount) replaceWithLast(index); freeListAdd(index); return true; } PX_NOINLINE void reserveInternal(uint32_t size) { if(!PxIsPowerOfTwo(size)) size = PxNextPowerOfTwo(size); PX_ASSERT(!(size & (size - 1))); // decide whether iteration can be done on the entries directly bool resizeCompact = compacting || freeListEmpty(); // define new table sizes uint32_t oldEntriesCapacity = mEntriesCapacity; uint32_t newEntriesCapacity = uint32_t(float(size) * mLoadFactor); uint32_t newHashSize = size; // allocate new common buffer and setup pointers to new tables uint8_t* newBuffer; uint32_t* newHash; uint32_t* newEntriesNext; Entry* newEntries; { uint32_t newHashByteOffset = 0; uint32_t newEntriesNextBytesOffset = newHashByteOffset + newHashSize * sizeof(uint32_t); uint32_t newEntriesByteOffset = newEntriesNextBytesOffset + newEntriesCapacity * sizeof(uint32_t); newEntriesByteOffset += (16 - (newEntriesByteOffset & 15)) & 15; uint32_t newBufferByteSize = newEntriesByteOffset + newEntriesCapacity * sizeof(Entry); newBuffer = reinterpret_cast<uint8_t*>(PxAllocator::allocate(newBufferByteSize, PX_FL)); PX_ASSERT(newBuffer); newHash = reinterpret_cast<uint32_t*>(newBuffer + newHashByteOffset); newEntriesNext = reinterpret_cast<uint32_t*>(newBuffer + newEntriesNextBytesOffset); newEntries = reinterpret_cast<Entry*>(newBuffer + newEntriesByteOffset); } // initialize new hash table intrinsics::memSet(newHash, int32_t(EOL), newHashSize * sizeof(uint32_t)); // iterate over old entries, re-hash and create new entries if(resizeCompact) { // check that old free list is empty - we don't need to copy the next entries PX_ASSERT(compacting || mFreeList == EOL); for(uint32_t index = 0; index < mEntriesCount; ++index) { uint32_t h = hash(GetKey()(mEntries[index]), newHashSize); newEntriesNext[index] = newHash[h]; newHash[h] = index; PX_PLACEMENT_NEW(newEntries + index, Entry)(mEntries[index]); mEntries[index].~Entry(); } } else { // copy old free list, only required for non compact resizing intrinsics::memCopy(newEntriesNext, mEntriesNext, mEntriesCapacity * sizeof(uint32_t)); for(uint32_t bucket = 0; bucket < mHashSize; bucket++) { uint32_t index = mHash[bucket]; while(index != EOL) { uint32_t h = hash(GetKey()(mEntries[index]), newHashSize); newEntriesNext[index] = newHash[h]; PX_ASSERT(index != newHash[h]); newHash[h] = index; PX_PLACEMENT_NEW(newEntries + index, Entry)(mEntries[index]); mEntries[index].~Entry(); index = mEntriesNext[index]; } } } // swap buffer and pointers PxAllocator::deallocate(mBuffer); mBuffer = newBuffer; mHash = newHash; mHashSize = newHashSize; mEntriesNext = newEntriesNext; mEntries = newEntries; mEntriesCapacity = newEntriesCapacity; freeListAdd(oldEntriesCapacity, newEntriesCapacity); } void grow() { PX_ASSERT((mFreeList == EOL) || (compacting && (mEntriesCount == mEntriesCapacity))); uint32_t size = mHashSize == 0 ? 16 : mHashSize * 2; reserve(size); } uint8_t* mBuffer; Entry* mEntries; uint32_t* mEntriesNext; // same size as mEntries uint32_t* mHash; uint32_t mEntriesCapacity; uint32_t mHashSize; float mLoadFactor; uint32_t mFreeList; uint32_t mTimestamp; uint32_t mEntriesCount; // number of entries public: class Iter { public: PX_INLINE Iter(PxHashBase& b) : mBucket(0), mEntry(uint32_t(b.EOL)), mTimestamp(b.mTimestamp), mBase(b) { if(mBase.mEntriesCapacity > 0) { mEntry = mBase.mHash[0]; skip(); } } PX_INLINE void check() const { PX_ASSERT(mTimestamp == mBase.mTimestamp); } PX_INLINE const Entry& operator*() const { check(); return mBase.mEntries[mEntry]; } PX_INLINE Entry& operator*() { check(); return mBase.mEntries[mEntry]; } PX_INLINE const Entry* operator->() const { check(); return mBase.mEntries + mEntry; } PX_INLINE Entry* operator->() { check(); return mBase.mEntries + mEntry; } PX_INLINE Iter operator++() { check(); advance(); return *this; } PX_INLINE Iter operator++(int) { check(); Iter i = *this; advance(); return i; } PX_INLINE bool done() const { check(); return mEntry == mBase.EOL; } private: PX_INLINE void advance() { mEntry = mBase.mEntriesNext[mEntry]; skip(); } PX_INLINE void skip() { while(mEntry == mBase.EOL) { if(++mBucket == mBase.mHashSize) break; mEntry = mBase.mHash[mBucket]; } } Iter& operator=(const Iter&); uint32_t mBucket; uint32_t mEntry; uint32_t mTimestamp; PxHashBase& mBase; }; /*! Iterate over entries in a hash base and allow entry erase while iterating */ class PxEraseIterator { public: PX_INLINE PxEraseIterator(PxHashBase& b): mBase(b) { reset(); } PX_INLINE Entry* eraseCurrentGetNext(bool eraseCurrent) { if(eraseCurrent && mCurrentEntryIndexPtr) { mBase.eraseInternal(mCurrentEntryIndexPtr); // if next was valid return the same ptr, if next was EOL search new hash entry if(*mCurrentEntryIndexPtr != mBase.EOL) return mBase.mEntries + *mCurrentEntryIndexPtr; else return traverseHashEntries(); } // traverse mHash to find next entry if(mCurrentEntryIndexPtr == NULL) return traverseHashEntries(); const uint32_t index = *mCurrentEntryIndexPtr; if(mBase.mEntriesNext[index] == mBase.EOL) { return traverseHashEntries(); } else { mCurrentEntryIndexPtr = mBase.mEntriesNext + index; return mBase.mEntries + *mCurrentEntryIndexPtr; } } PX_INLINE void reset() { mCurrentHashIndex = 0; mCurrentEntryIndexPtr = NULL; } private: PX_INLINE Entry* traverseHashEntries() { mCurrentEntryIndexPtr = NULL; while (mCurrentEntryIndexPtr == NULL && mCurrentHashIndex < mBase.mHashSize) { if (mBase.mHash[mCurrentHashIndex] != mBase.EOL) { mCurrentEntryIndexPtr = mBase.mHash + mCurrentHashIndex; mCurrentHashIndex++; return mBase.mEntries + *mCurrentEntryIndexPtr; } else { mCurrentHashIndex++; } } return NULL; } PxEraseIterator& operator=(const PxEraseIterator&); private: uint32_t* mCurrentEntryIndexPtr; uint32_t mCurrentHashIndex; PxHashBase& mBase; }; }; template <class Entry, class Key, class HashFn, class GetKey, class PxAllocator, bool compacting> template <typename HK, typename GK, class A, bool comp> PX_NOINLINE void PxHashBase<Entry, Key, HashFn, GetKey, PxAllocator, compacting>::copy(const PxHashBase<Entry, Key, HK, GK, A, comp>& other) { reserve(other.mEntriesCount); for(uint32_t i = 0; i < other.mEntriesCount; i++) { for(uint32_t j = other.mHash[i]; j != EOL; j = other.mEntriesNext[j]) { const Entry& otherEntry = other.mEntries[j]; bool exists; Entry* newEntry = create(GK()(otherEntry), exists); PX_ASSERT(!exists); PX_PLACEMENT_NEW(newEntry, Entry)(otherEntry); } } } template <class Key, class HashFn, class PxAllocator = typename PxAllocatorTraits<Key>::Type, bool Coalesced = false> class PxHashSetBase { PX_NOCOPY(PxHashSetBase) public: struct GetKey { PX_INLINE const Key& operator()(const Key& e) { return e; } }; typedef PxHashBase<Key, Key, HashFn, GetKey, PxAllocator, Coalesced> BaseMap; typedef typename BaseMap::Iter Iterator; PxHashSetBase(uint32_t initialTableSize, float loadFactor, const PxAllocator& alloc) : mBase(initialTableSize, loadFactor, alloc) { } PxHashSetBase(const PxAllocator& alloc) : mBase(64, 0.75f, alloc) { } PxHashSetBase(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : mBase(initialTableSize, loadFactor) { } bool insert(const Key& k) { bool exists; Key* e = mBase.create(k, exists); if(!exists) PX_PLACEMENT_NEW(e, Key)(k); return !exists; } PX_INLINE bool contains(const Key& k) const { return mBase.find(k) != 0; } PX_INLINE bool erase(const Key& k) { return mBase.erase(k); } PX_INLINE uint32_t size() const { return mBase.size(); } PX_INLINE uint32_t capacity() const { return mBase.capacity(); } PX_INLINE void reserve(uint32_t size) { mBase.reserve(size); } PX_INLINE void clear() { mBase.clear(); } protected: BaseMap mBase; }; template <class Key, class Value, class HashFn, class PxAllocator = typename PxAllocatorTraits<PxPair<const Key, Value> >::Type> class PxHashMapBase { PX_NOCOPY(PxHashMapBase) public: typedef PxPair<const Key, Value> Entry; struct GetKey { PX_INLINE const Key& operator()(const Entry& e) { return e.first; } }; typedef PxHashBase<Entry, Key, HashFn, GetKey, PxAllocator, true> BaseMap; typedef typename BaseMap::Iter Iterator; typedef typename BaseMap::PxEraseIterator EraseIterator; PxHashMapBase(uint32_t initialTableSize, float loadFactor, const PxAllocator& alloc) : mBase(initialTableSize, loadFactor, alloc) { } PxHashMapBase(const PxAllocator& alloc) : mBase(64, 0.75f, alloc) { } PxHashMapBase(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : mBase(initialTableSize, loadFactor) { } bool insert(const Key /*&*/ k, const Value /*&*/ v) { bool exists; Entry* e = mBase.create(k, exists); if(!exists) PX_PLACEMENT_NEW(e, Entry)(k, v); return !exists; } Value& operator[](const Key& k) { bool exists; Entry* e = mBase.create(k, exists); if(!exists) PX_PLACEMENT_NEW(e, Entry)(k, Value()); return e->second; } PX_INLINE const Entry* find(const Key& k) const { return mBase.find(k); } PX_INLINE bool erase(const Key& k) { return mBase.erase(k); } PX_INLINE bool erase(const Key& k, Entry& e) { return mBase.erase(k, e); } PX_INLINE uint32_t size() const { return mBase.size(); } PX_INLINE uint32_t capacity() const { return mBase.capacity(); } PX_INLINE Iterator getIterator() { return Iterator(mBase); } PX_INLINE EraseIterator getEraseIterator() { return EraseIterator(mBase); } PX_INLINE void reserve(uint32_t size) { mBase.reserve(size); } PX_INLINE void clear() { mBase.clear(); } protected: BaseMap mBase; }; #if !PX_DOXYGEN } // namespace physx #endif #if PX_VC #pragma warning(pop) #endif #endif
18,386
C
22.186633
128
0.681279
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxSync.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_SYNC_H #define PX_SYNC_H #include "foundation/PxAllocator.h" #if !PX_DOXYGEN namespace physx { #endif /*! Implementation notes: * - Calling set() on an already signaled Sync does not change its state. * - Calling reset() on an already reset Sync does not change its state. * - Calling set() on a reset Sync wakes all waiting threads (potential for thread contention). * - Calling wait() on an already signaled Sync will return true immediately. * - NOTE: be careful when pulsing an event with set() followed by reset(), because a * thread that is not waiting on the event will miss the signal. */ class PX_FOUNDATION_API PxSyncImpl { public: static const uint32_t waitForever = 0xffffffff; PxSyncImpl(); ~PxSyncImpl(); /** Wait on the object for at most the given number of ms. Returns * true if the object is signaled. Sync::waitForever will block forever * or until the object is signaled. */ bool wait(uint32_t milliseconds = waitForever); /** Signal the synchronization object, waking all threads waiting on it */ void set(); /** Reset the synchronization object */ void reset(); /** Size of this class. */ static uint32_t getSize(); }; /*! Implementation notes: * - Calling set() on an already signaled Sync does not change its state. * - Calling reset() on an already reset Sync does not change its state. * - Calling set() on a reset Sync wakes all waiting threads (potential for thread contention). * - Calling wait() on an already signaled Sync will return true immediately. * - NOTE: be careful when pulsing an event with set() followed by reset(), because a * thread that is not waiting on the event will miss the signal. */ template <typename Alloc = PxReflectionAllocator<PxSyncImpl> > class PxSyncT : protected Alloc { public: static const uint32_t waitForever = PxSyncImpl::waitForever; PxSyncT(const Alloc& alloc = Alloc()) : Alloc(alloc) { mImpl = reinterpret_cast<PxSyncImpl*>(Alloc::allocate(PxSyncImpl::getSize(), PX_FL)); PX_PLACEMENT_NEW(mImpl, PxSyncImpl)(); } ~PxSyncT() { mImpl->~PxSyncImpl(); Alloc::deallocate(mImpl); } /** Wait on the object for at most the given number of ms. Returns * true if the object is signaled. Sync::waitForever will block forever * or until the object is signaled. */ bool wait(uint32_t milliseconds = PxSyncImpl::waitForever) { return mImpl->wait(milliseconds); } /** Signal the synchronization object, waking all threads waiting on it */ void set() { mImpl->set(); } /** Reset the synchronization object */ void reset() { mImpl->reset(); } private: class PxSyncImpl* mImpl; }; typedef PxSyncT<> PxSync; #if !PX_DOXYGEN } // namespace physx #endif #endif
4,410
C
30.507143
94
0.73356
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxMemory.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_MEMORY_H #define PX_MEMORY_H /** \addtogroup foundation @{ */ #include "foundation/Px.h" #include "foundation/PxMathIntrinsics.h" #include "foundation/PxSimpleTypes.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief Sets the bytes of the provided buffer to zero. \param dest [out] Pointer to block of memory to set zero. \param count [in] Number of bytes to set to zero. \return Pointer to memory block (same as input) */ PX_FORCE_INLINE void* PxMemZero(void* dest, PxU32 count) { return physx::intrinsics::memZero(dest, count); } /** \brief Sets the bytes of the provided buffer to the specified value. \param dest [out] Pointer to block of memory to set to the specified value. \param c [in] Value to set the bytes of the block of memory to. \param count [in] Number of bytes to set to the specified value. \return Pointer to memory block (same as input) */ PX_FORCE_INLINE void* PxMemSet(void* dest, PxI32 c, PxU32 count) { return physx::intrinsics::memSet(dest, c, count); } /** \brief Copies the bytes of one memory block to another. The memory blocks must not overlap. \note Use #PxMemMove if memory blocks overlap. \param dest [out] Pointer to block of memory to copy to. \param src [in] Pointer to block of memory to copy from. \param count [in] Number of bytes to copy. \return Pointer to destination memory block */ PX_FORCE_INLINE void* PxMemCopy(void* dest, const void* src, PxU32 count) { return physx::intrinsics::memCopy(dest, src, count); } /** \brief Copies the bytes of one memory block to another. The memory blocks can overlap. \note Use #PxMemCopy if memory blocks do not overlap. \param dest [out] Pointer to block of memory to copy to. \param src [in] Pointer to block of memory to copy from. \param count [in] Number of bytes to copy. \return Pointer to destination memory block */ PX_FORCE_INLINE void* PxMemMove(void* dest, const void* src, PxU32 count) { return physx::intrinsics::memMove(dest, src, count); } /** Mark a specified amount of memory with 0xcd pattern. This is used to check that the meta data definition for serialized classes is complete in checked builds. \param ptr [out] Pointer to block of memory to initialize. \param byteSize [in] Number of bytes to initialize. */ PX_INLINE void PxMarkSerializedMemory(void* ptr, PxU32 byteSize) { #if PX_CHECKED PxMemSet(ptr, 0xcd, byteSize); #else PX_UNUSED(ptr); PX_UNUSED(byteSize); #endif } #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
4,244
C
32.164062
95
0.736805
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxSortInternals.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_SORT_INTERNALS_H #define PX_SORT_INTERNALS_H /** \addtogroup foundation @{ */ #include "foundation/PxAssert.h" #include "foundation/PxMathIntrinsics.h" #include "foundation/PxBasicTemplates.h" #include "foundation/PxUserAllocated.h" #if !PX_DOXYGEN namespace physx { #endif template <class T, class Predicate> PX_INLINE void PxMedian3(T* elements, int32_t first, int32_t last, Predicate& compare) { /* This creates sentinels because we know there is an element at the start minimum(or equal) than the pivot and an element at the end greater(or equal) than the pivot. Plus the median of 3 reduces the chance of degenerate behavour. */ int32_t mid = (first + last) / 2; if(compare(elements[mid], elements[first])) PxSwap(elements[first], elements[mid]); if(compare(elements[last], elements[first])) PxSwap(elements[first], elements[last]); if(compare(elements[last], elements[mid])) PxSwap(elements[mid], elements[last]); // keep the pivot at last-1 PxSwap(elements[mid], elements[last - 1]); } template <class T, class Predicate> PX_INLINE int32_t PxPartition(T* elements, int32_t first, int32_t last, Predicate& compare) { PxMedian3(elements, first, last, compare); /* WARNING: using the line: T partValue = elements[last-1]; and changing the scan loops to: while(comparator.greater(partValue, elements[++i])); while(comparator.greater(elements[--j], partValue); triggers a compiler optimizer bug on xenon where it stores a double to the stack for partValue then loads it as a single...:-( */ int32_t i = first; // we know first is less than pivot(but i gets pre incremented) int32_t j = last - 1; // pivot is in last-1 (but j gets pre decremented) for(;;) { while(compare(elements[++i], elements[last - 1])) ; while(compare(elements[last - 1], elements[--j])) ; if(i >= j) break; PX_ASSERT(i <= last && j >= first); PxSwap(elements[i], elements[j]); } // put the pivot in place PX_ASSERT(i <= last && first <= (last - 1)); PxSwap(elements[i], elements[last - 1]); return i; } template <class T, class Predicate> PX_INLINE void PxSmallSort(T* elements, int32_t first, int32_t last, Predicate& compare) { // selection sort - could reduce to fsel on 360 with floats. for(int32_t i = first; i < last; i++) { int32_t m = i; for(int32_t j = i + 1; j <= last; j++) if(compare(elements[j], elements[m])) m = j; if(m != i) PxSwap(elements[m], elements[i]); } } template <class PxAllocator> class PxStack { PxAllocator mAllocator; uint32_t mSize, mCapacity; int32_t* mMemory; bool mRealloc; public: PxStack(int32_t* memory, uint32_t capacity, const PxAllocator& inAllocator) : mAllocator(inAllocator), mSize(0), mCapacity(capacity), mMemory(memory), mRealloc(false) { } ~PxStack() { if(mRealloc) mAllocator.deallocate(mMemory); } void grow() { mCapacity *= 2; int32_t* newMem = reinterpret_cast<int32_t*>(mAllocator.allocate(sizeof(int32_t) * mCapacity, PX_FL)); intrinsics::memCopy(newMem, mMemory, mSize * sizeof(int32_t)); if(mRealloc) mAllocator.deallocate(mMemory); mRealloc = true; mMemory = newMem; } PX_INLINE void push(int32_t start, int32_t end) { if(mSize >= mCapacity - 1) grow(); mMemory[mSize++] = start; mMemory[mSize++] = end; } PX_INLINE void pop(int32_t& start, int32_t& end) { PX_ASSERT(!empty()); end = mMemory[--mSize]; start = mMemory[--mSize]; } PX_INLINE bool empty() { return mSize == 0; } }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
5,256
C
27.112299
95
0.705289
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxQuat.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_QUAT_H #define PX_QUAT_H /** \addtogroup foundation @{ */ #include "foundation/PxVec3.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief This is a quaternion class. For more information on quaternion mathematics consult a mathematics source on complex numbers. */ template<class Type> class PxQuatT { public: /** \brief Default constructor, does not do any initialization. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT() { } //! identity constructor PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT(PxIDENTITY) : x(Type(0.0)), y(Type(0.0)), z(Type(0.0)), w(Type(1.0)) { } /** \brief Constructor from a scalar: sets the real part w to the scalar value, and the imaginary parts (x,y,z) to zero */ explicit PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT(Type r) : x(Type(0.0)), y(Type(0.0)), z(Type(0.0)), w(r) { } /** \brief Constructor. Take note of the order of the elements! */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT(Type nx, Type ny, Type nz, Type nw) : x(nx), y(ny), z(nz), w(nw) { } /** \brief Creates from angle-axis representation. Axis must be normalized! Angle is in radians! <b>Unit:</b> Radians */ PX_CUDA_CALLABLE PX_INLINE PxQuatT(Type angleRadians, const PxVec3T<Type>& unitAxis) { PX_ASSERT(PxAbs(Type(1.0) - unitAxis.magnitude()) < Type(1e-3)); const Type a = angleRadians * Type(0.5); Type s; PxSinCos(a, s, w); x = unitAxis.x * s; y = unitAxis.y * s; z = unitAxis.z * s; } /** \brief Copy ctor. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT(const PxQuatT& v) : x(v.x), y(v.y), z(v.z), w(v.w) { } /** \brief Creates from orientation matrix. \param[in] m Rotation matrix to extract quaternion from. */ PX_CUDA_CALLABLE PX_INLINE explicit PxQuatT(const PxMat33T<Type>& m); /* defined in PxMat33.h */ /** \brief returns true if quat is identity */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool isIdentity() const { return x==Type(0.0) && y==Type(0.0) && z==Type(0.0) && w==Type(1.0); } /** \brief returns true if all elements are finite (not NAN or INF, etc.) */ PX_CUDA_CALLABLE bool isFinite() const { return PxIsFinite(x) && PxIsFinite(y) && PxIsFinite(z) && PxIsFinite(w); } /** \brief returns true if finite and magnitude is close to unit */ PX_CUDA_CALLABLE bool isUnit() const { const Type unitTolerance = Type(1e-3); return isFinite() && PxAbs(magnitude() - Type(1.0)) < unitTolerance; } /** \brief returns true if finite and magnitude is reasonably close to unit to allow for some accumulation of error vs isValid */ PX_CUDA_CALLABLE bool isSane() const { const Type unitTolerance = Type(1e-2); return isFinite() && PxAbs(magnitude() - Type(1.0)) < unitTolerance; } /** \brief returns true if the two quaternions are exactly equal */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool operator==(const PxQuatT& q) const { return x == q.x && y == q.y && z == q.z && w == q.w; } /** \brief converts this quaternion to angle-axis representation */ PX_CUDA_CALLABLE PX_INLINE void toRadiansAndUnitAxis(Type& angle, PxVec3T<Type>& axis) const { const Type quatEpsilon = Type(1.0e-8); const Type s2 = x * x + y * y + z * z; if(s2 < quatEpsilon * quatEpsilon) // can't extract a sensible axis { angle = Type(0.0); axis = PxVec3T<Type>(Type(1.0), Type(0.0), Type(0.0)); } else { const Type s = PxRecipSqrt(s2); axis = PxVec3T<Type>(x, y, z) * s; angle = PxAbs(w) < quatEpsilon ? Type(PxPi) : PxAtan2(s2 * s, w) * Type(2.0); } } /** \brief Gets the angle between this quat and the identity quaternion. <b>Unit:</b> Radians */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type getAngle() const { return PxAcos(w) * Type(2.0); } /** \brief Gets the angle between this quat and the argument <b>Unit:</b> Radians */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type getAngle(const PxQuatT& q) const { return PxAcos(dot(q)) * Type(2.0); } /** \brief This is the squared 4D vector length, should be 1 for unit quaternions. */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type magnitudeSquared() const { return x * x + y * y + z * z + w * w; } /** \brief returns the scalar product of this and other. */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type dot(const PxQuatT& v) const { return x * v.x + y * v.y + z * v.z + w * v.w; } PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT getNormalized() const { const Type s = Type(1.0) / magnitude(); return PxQuatT(x * s, y * s, z * s, w * s); } PX_CUDA_CALLABLE PX_FORCE_INLINE Type magnitude() const { return PxSqrt(magnitudeSquared()); } // modifiers: /** \brief maps to the closest unit quaternion. */ PX_CUDA_CALLABLE PX_FORCE_INLINE Type normalize() // convert this PxQuatT to a unit quaternion { const Type mag = magnitude(); if(mag != Type(0.0)) { const Type imag = Type(1.0) / mag; x *= imag; y *= imag; z *= imag; w *= imag; } return mag; } /* \brief returns the conjugate. \note for unit quaternions, this is the inverse. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT getConjugate() const { return PxQuatT(-x, -y, -z, w); } /* \brief returns imaginary part. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> getImaginaryPart() const { return PxVec3T<Type>(x, y, z); } /** brief computes rotation of x-axis */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> getBasisVector0() const { const Type x2 = x * Type(2.0); const Type w2 = w * Type(2.0); return PxVec3T<Type>((w * w2) - Type(1.0) + x * x2, (z * w2) + y * x2, (-y * w2) + z * x2); } /** brief computes rotation of y-axis */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> getBasisVector1() const { const Type y2 = y * Type(2.0); const Type w2 = w * Type(2.0); return PxVec3T<Type>((-z * w2) + x * y2, (w * w2) - Type(1.0) + y * y2, (x * w2) + z * y2); } /** brief computes rotation of z-axis */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> getBasisVector2() const { const Type z2 = z * Type(2.0); const Type w2 = w * Type(2.0); return PxVec3T<Type>((y * w2) + x * z2, (-x * w2) + y * z2, (w * w2) - Type(1.0) + z * z2); } /** rotates passed vec by this (assumed unitary) */ PX_CUDA_CALLABLE PX_FORCE_INLINE const PxVec3T<Type> rotate(const PxVec3T<Type>& v) const { const Type vx = Type(2.0) * v.x; const Type vy = Type(2.0) * v.y; const Type vz = Type(2.0) * v.z; const Type w2 = w * w - 0.5f; const Type dot2 = (x * vx + y * vy + z * vz); return PxVec3T<Type>((vx * w2 + (y * vz - z * vy) * w + x * dot2), (vy * w2 + (z * vx - x * vz) * w + y * dot2), (vz * w2 + (x * vy - y * vx) * w + z * dot2)); } /** inverse rotates passed vec by this (assumed unitary) */ PX_CUDA_CALLABLE PX_FORCE_INLINE const PxVec3T<Type> rotateInv(const PxVec3T<Type>& v) const { const Type vx = Type(2.0) * v.x; const Type vy = Type(2.0) * v.y; const Type vz = Type(2.0) * v.z; const Type w2 = w * w - 0.5f; const Type dot2 = (x * vx + y * vy + z * vz); return PxVec3T<Type>((vx * w2 - (y * vz - z * vy) * w + x * dot2), (vy * w2 - (z * vx - x * vz) * w + y * dot2), (vz * w2 - (x * vy - y * vx) * w + z * dot2)); } /** \brief Assignment operator */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT& operator=(const PxQuatT& p) { x = p.x; y = p.y; z = p.z; w = p.w; return *this; } PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT& operator*=(const PxQuatT& q) { const Type tx = w * q.x + q.w * x + y * q.z - q.y * z; const Type ty = w * q.y + q.w * y + z * q.x - q.z * x; const Type tz = w * q.z + q.w * z + x * q.y - q.x * y; w = w * q.w - q.x * x - y * q.y - q.z * z; x = tx; y = ty; z = tz; return *this; } PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT& operator+=(const PxQuatT& q) { x += q.x; y += q.y; z += q.z; w += q.w; return *this; } PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT& operator-=(const PxQuatT& q) { x -= q.x; y -= q.y; z -= q.z; w -= q.w; return *this; } PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT& operator*=(const Type s) { x *= s; y *= s; z *= s; w *= s; return *this; } /** quaternion multiplication */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT operator*(const PxQuatT& q) const { return PxQuatT(w * q.x + q.w * x + y * q.z - q.y * z, w * q.y + q.w * y + z * q.x - q.z * x, w * q.z + q.w * z + x * q.y - q.x * y, w * q.w - x * q.x - y * q.y - z * q.z); } /** quaternion addition */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT operator+(const PxQuatT& q) const { return PxQuatT(x + q.x, y + q.y, z + q.z, w + q.w); } /** quaternion subtraction */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT operator-() const { return PxQuatT(-x, -y, -z, -w); } PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT operator-(const PxQuatT& q) const { return PxQuatT(x - q.x, y - q.y, z - q.z, w - q.w); } PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT operator*(Type r) const { return PxQuatT(x * r, y * r, z * r, w * r); } /** the quaternion elements */ Type x, y, z, w; }; typedef PxQuatT<float> PxQuat; typedef PxQuatT<double> PxQuatd; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
10,768
C
25.459459
116
0.633265
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxSort.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_SORT_H #define PX_SORT_H /** \addtogroup foundation @{ */ #include "foundation/PxSortInternals.h" #include "foundation/PxAlloca.h" #define PX_SORT_PARANOIA PX_DEBUG /** \brief Sorts an array of objects in ascending order, assuming that the predicate implements the < operator: @see PxLess, PxGreater */ #if PX_VC #pragma warning(push) #pragma warning(disable : 4706) // disable the warning that we did an assignment within a conditional expression, as // this was intentional. #endif #if !PX_DOXYGEN namespace physx { #endif template <class T, class Predicate, class PxAllocator> void PxSort(T* elements, uint32_t count, const Predicate& compare, const PxAllocator& inAllocator, const uint32_t initialStackSize = 32) { static const uint32_t SMALL_SORT_CUTOFF = 5; // must be >= 3 since we need 3 for median PX_ALLOCA(stackMem, int32_t, initialStackSize); PxStack<PxAllocator> stack(stackMem, initialStackSize, inAllocator); int32_t first = 0, last = int32_t(count - 1); if(last > first) { for(;;) { while(last > first) { PX_ASSERT(first >= 0 && last < int32_t(count)); if(uint32_t(last - first) < SMALL_SORT_CUTOFF) { PxSmallSort(elements, first, last, compare); break; } else { const int32_t partIndex = PxPartition(elements, first, last, compare); // push smaller sublist to minimize stack usage if((partIndex - first) < (last - partIndex)) { stack.push(first, partIndex - 1); first = partIndex + 1; } else { stack.push(partIndex + 1, last); last = partIndex - 1; } } } if(stack.empty()) break; stack.pop(first, last); } } #if PX_SORT_PARANOIA for(uint32_t i = 1; i < count; i++) PX_ASSERT(!compare(elements[i], elements[i - 1])); #endif } template <class T, class Predicate> void PxSort(T* elements, uint32_t count, const Predicate& compare) { PxSort(elements, count, compare, typename PxAllocatorTraits<T>::Type()); } template <class T> void PxSort(T* elements, uint32_t count) { PxSort(elements, count, PxLess<T>(), typename PxAllocatorTraits<T>::Type()); } #if !PX_DOXYGEN } // namespace physx #endif #if PX_VC #pragma warning(pop) #endif /** @} */ #endif
3,935
C
28.818182
116
0.708513
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxMathUtils.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_MATH_UTILS_H #define PX_MATH_UTILS_H /** \addtogroup common @{ */ #include "foundation/PxFoundationConfig.h" #include "foundation/Px.h" #include "foundation/PxVec4.h" #include "foundation/PxAssert.h" #include "foundation/PxPlane.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief finds the shortest rotation between two vectors. \param[in] from the vector to start from \param[in] target the vector to rotate to \return a rotation about an axis normal to the two vectors which takes one to the other via the shortest path */ PX_FOUNDATION_API PxQuat PxShortestRotation(const PxVec3& from, const PxVec3& target); /* \brief diagonalizes a 3x3 symmetric matrix y The returned matrix satisfies M = R * D * R', where R is the rotation matrix for the output quaternion, R' its transpose, and D the diagonal matrix If the matrix is not symmetric, the result is undefined. \param[in] m the matrix to diagonalize \param[out] axes a quaternion rotation which diagonalizes the matrix \return the vector diagonal of the diagonalized matrix. */ PX_FOUNDATION_API PxVec3 PxDiagonalize(const PxMat33& m, PxQuat& axes); /** \brief creates a transform from the endpoints of a segment, suitable for an actor transform for a PxCapsuleGeometry \param[in] p0 one end of major axis of the capsule \param[in] p1 the other end of the axis of the capsule \param[out] halfHeight the halfHeight of the capsule. This parameter is optional. \return A PxTransform which will transform the vector (1,0,0) to the capsule axis shrunk by the halfHeight */ PX_FOUNDATION_API PxTransform PxTransformFromSegment(const PxVec3& p0, const PxVec3& p1, PxReal* halfHeight = NULL); /** \brief creates a transform from a plane equation, suitable for an actor transform for a PxPlaneGeometry \param[in] plane the desired plane equation \return a PxTransform which will transform the plane PxPlane(1,0,0,0) to the specified plane */ PX_FOUNDATION_API PxTransform PxTransformFromPlaneEquation(const PxPlane& plane); /** \brief creates a plane equation from a transform, such as the actor transform for a PxPlaneGeometry \param[in] pose the transform \return the plane */ PX_INLINE PxPlane PxPlaneEquationFromTransform(const PxTransform& pose) { return PxPlane(1.0f, 0.0f, 0.0f, 0.0f).transform(pose); } /** \brief Spherical linear interpolation of two quaternions. \param[in] t is the interpolation parameter in range (0, 1) \param[in] left is the start of the interpolation \param[in] right is the end of the interpolation \return Returns left when t=0, right when t=1 and a linear interpolation of left and right when 0 < t < 1. Returns angle between -PI and PI in radians */ PX_CUDA_CALLABLE PX_INLINE PxQuat PxSlerp(const PxReal t, const PxQuat& left, const PxQuat& right) { const PxReal quatEpsilon = (PxReal(1.0e-8f)); PxReal cosine = left.dot(right); PxReal sign = PxReal(1); if (cosine < 0) { cosine = -cosine; sign = PxReal(-1); } PxReal sine = PxReal(1) - cosine * cosine; if (sine >= quatEpsilon * quatEpsilon) { sine = PxSqrt(sine); const PxReal angle = PxAtan2(sine, cosine); const PxReal i_sin_angle = PxReal(1) / sine; const PxReal leftw = PxSin(angle * (PxReal(1) - t)) * i_sin_angle; const PxReal rightw = PxSin(angle * t) * i_sin_angle * sign; return left * leftw + right * rightw; } return left; } /** \brief integrate transform. \param[in] curTrans The current transform \param[in] linvel Linear velocity \param[in] angvel Angular velocity \param[in] timeStep The time-step for integration \param[out] result The integrated transform */ PX_FOUNDATION_API void PxIntegrateTransform(const PxTransform& curTrans, const PxVec3& linvel, const PxVec3& angvel, PxReal timeStep, PxTransform& result); //! \brief Compute the exponent of a PxVec3 PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuat PxExp(const PxVec3& v) { const PxReal m = v.magnitudeSquared(); return m < 1e-24f ? PxQuat(PxIdentity) : PxQuat(PxSqrt(m), v * PxRecipSqrt(m)); } /** \brief computes a oriented bounding box around the scaled basis. \param basis Input = skewed basis, Output = (normalized) orthogonal basis. \return Bounding box extent. */ PX_FOUNDATION_API PxVec3 PxOptimizeBoundingBox(PxMat33& basis); /** \brief return Returns the log of a PxQuat */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 PxLog(const PxQuat& q) { const PxReal s = q.getImaginaryPart().magnitude(); if (s < 1e-12f) return PxVec3(0.0f); // force the half-angle to have magnitude <= pi/2 PxReal halfAngle = q.w < 0 ? PxAtan2(-s, -q.w) : PxAtan2(s, q.w); PX_ASSERT(halfAngle >= -PxPi / 2 && halfAngle <= PxPi / 2); return q.getImaginaryPart().getNormalized() * 2.f * halfAngle; } /** \brief return Returns 0 if v.x is largest element of v, 1 if v.y is largest element, 2 if v.z is largest element. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 PxLargestAxis(const PxVec3& v) { PxU32 m = PxU32(v.y > v.x ? 1 : 0); return v.z > v[m] ? 2 : m; } /** \brief Compute tan(theta/2) given sin(theta) and cos(theta) as inputs. \param[in] sin has value sin(theta) \param[in] cos has value cos(theta) \return Returns tan(theta/2) */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal PxTanHalf(PxReal sin, PxReal cos) { // PT: avoids divide by zero for singularity. We return sqrt(FLT_MAX) instead of FLT_MAX // to make sure the calling code doesn't generate INF values when manipulating the returned value // (some joints multiply it by 4, etc). if (cos == -1.0f) return sin < 0.0f ? -sqrtf(FLT_MAX) : sqrtf(FLT_MAX); // PT: half-angle formula: tan(a/2) = sin(a)/(1+cos(a)) return sin / (1.0f + cos); } /** \brief Compute the closest point on an 2d ellipse to a given 2d point. \param[in] point is a 2d point in the y-z plane represented by (point.y, point.z) \param[in] radii are the radii of the ellipse (radii.y and radii.z) in the y-z plane. \return Returns the 2d position on the surface of the ellipse that is closest to point. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 PxEllipseClamp(const PxVec3& point, const PxVec3& radii) { // lagrange multiplier method with Newton/Halley hybrid root-finder. // see http://www.geometrictools.com/Documentation/DistancePointToEllipse2.pdf // for proof of Newton step robustness and initial estimate. // Halley converges much faster but sometimes overshoots - when that happens we take // a newton step instead // converges in 1-2 iterations where D&C works well, and it's good with 4 iterations // with any ellipse that isn't completely crazy const PxU32 MAX_ITERATIONS = 20; const PxReal convergenceThreshold = 1e-4f; // iteration requires first quadrant but we recover generality later PxVec3 q(0, PxAbs(point.y), PxAbs(point.z)); const PxReal tinyEps = 1e-6f; // very close to minor axis is numerically problematic but trivial if (radii.y >= radii.z) { if (q.z < tinyEps) return PxVec3(0, point.y > 0 ? radii.y : -radii.y, 0); } else { if (q.y < tinyEps) return PxVec3(0, 0, point.z > 0 ? radii.z : -radii.z); } PxVec3 denom, e2 = radii.multiply(radii), eq = radii.multiply(q); // we can use any initial guess which is > maximum(-e.y^2,-e.z^2) and for which f(t) is > 0. // this guess works well near the axes, but is weak along the diagonals. PxReal t = PxMax(eq.y - e2.y, eq.z - e2.z); for (PxU32 i = 0; i < MAX_ITERATIONS; i++) { denom = PxVec3(0, 1 / (t + e2.y), 1 / (t + e2.z)); PxVec3 denom2 = eq.multiply(denom); PxVec3 fv = denom2.multiply(denom2); PxReal f = fv.y + fv.z - 1; // although in exact arithmetic we are guaranteed f>0, we can get here // on the first iteration via catastrophic cancellation if the point is // very close to the origin. In that case we just behave as if f=0 if (f < convergenceThreshold) return e2.multiply(point).multiply(denom); PxReal df = fv.dot(denom) * -2.0f; t = t - f / df; } // we didn't converge, so clamp what we have PxVec3 r = e2.multiply(point).multiply(denom); return r * PxRecipSqrt(PxSqr(r.y / radii.y) + PxSqr(r.z / radii.z)); } /** \brief Compute from an input quaternion q a pair of quaternions (swing, twist) such that q = swing * twist with the caveats that swing.x = twist.y = twist.z = 0. \param[in] q is the quaternion to be decomposed into swing and twist quaternions. \param[out] swing is the swing component of the quaternion decomposition. \param[out] twist is the twist component of the quaternion decomposition. */ PX_CUDA_CALLABLE PX_FORCE_INLINE void PxSeparateSwingTwist(const PxQuat& q, PxQuat& swing, PxQuat& twist) { twist = q.x != 0.0f ? PxQuat(q.x, 0, 0, q.w).getNormalized() : PxQuat(PxIdentity); swing = q * twist.getConjugate(); } /** \brief Compute the angle between two non-unit vectors \param[in] v0 is one of the non-unit vectors \param[in] v1 is the other of the two non-unit vectors \return Returns the angle (in radians) between the two vector v0 and v1. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxF32 PxComputeAngle(const PxVec3& v0, const PxVec3& v1) { const PxF32 cos = v0.dot(v1); // |v0|*|v1|*Cos(Angle) const PxF32 sin = (v0.cross(v1)).magnitude(); // |v0|*|v1|*Sin(Angle) return PxAtan2(sin, cos); } /** \brief Compute two normalized vectors (right and up) that are perpendicular to an input normalized vector (dir). \param[in] dir is a normalized vector that is used to compute the perpendicular vectors. \param[out] right is the first of the two vectors perpendicular to dir \param[out] up is the second of the two vectors perpendicular to dir */ PX_CUDA_CALLABLE PX_INLINE void PxComputeBasisVectors(const PxVec3& dir, PxVec3& right, PxVec3& up) { // Derive two remaining vectors if (PxAbs(dir.y) <= 0.9999f) { right = PxVec3(dir.z, 0.0f, -dir.x); right.normalize(); // PT: normalize not needed for 'up' because dir & right are unit vectors, // and by construction the angle between them is 90 degrees (i.e. sin(angle)=1) up = PxVec3(dir.y * right.z, dir.z * right.x - dir.x * right.z, -dir.y * right.x); } else { right = PxVec3(1.0f, 0.0f, 0.0f); up = PxVec3(0.0f, dir.z, -dir.y); up.normalize(); } } /** \brief Compute three normalized vectors (dir, right and up) that are parallel to (dir) and perpendicular to (right, up) the normalized direction vector (p1 - p0)/||p1 - p0||. \param[in] p0 is used to compute the normalized vector dir = (p1 - p0)/||p1 - p0||. \param[in] p1 is used to compute the normalized vector dir = (p1 - p0)/||p1 - p0||. \param[out] dir is the normalized vector (p1 - p0)/||p1 - p0||. \param[out] right is the first of the two normalized vectors perpendicular to dir \param[out] up is the second of the two normalized vectors perpendicular to dir */ PX_INLINE void PxComputeBasisVectors(const PxVec3& p0, const PxVec3& p1, PxVec3& dir, PxVec3& right, PxVec3& up) { // Compute the new direction vector dir = p1 - p0; dir.normalize(); // Derive two remaining vectors PxComputeBasisVectors(dir, right, up); } /** \brief Compute (i+1)%3 */ PX_INLINE PxU32 PxGetNextIndex3(PxU32 i) { return (i + 1 + (i >> 1)) & 3; } PX_INLINE PX_CUDA_CALLABLE void computeBarycentric(const PxVec3& a, const PxVec3& b, const PxVec3& c, const PxVec3& d, const PxVec3& p, PxVec4& bary) { const PxVec3 ba = b - a; const PxVec3 ca = c - a; const PxVec3 da = d - a; const PxVec3 pa = p - a; const PxReal detBcd = ba.dot(ca.cross(da)); const PxReal detPcd = pa.dot(ca.cross(da)); bary.y = detPcd / detBcd; const PxReal detBpd = ba.dot(pa.cross(da)); bary.z = detBpd / detBcd; const PxReal detBcp = ba.dot(ca.cross(pa)); bary.w = detBcp / detBcd; bary.x = 1 - bary.y - bary.z - bary.w; } PX_INLINE PX_CUDA_CALLABLE void computeBarycentric(const PxVec3& a, const PxVec3& b, const PxVec3& c, const PxVec3& p, PxVec4& bary) { const PxVec3 v0 = b - a; const PxVec3 v1 = c - a; const PxVec3 v2 = p - a; const float d00 = v0.dot(v0); const float d01 = v0.dot(v1); const float d11 = v1.dot(v1); const float d20 = v2.dot(v0); const float d21 = v2.dot(v1); const float denom = d00 * d11 - d01 * d01; const float v = (d11 * d20 - d01 * d21) / denom; const float w = (d00 * d21 - d01 * d20) / denom; const float u = 1.f - v - w; bary.x = u; bary.y = v; bary.z = w; bary.w = 0.f; } // lerp struct Interpolation { PX_INLINE PX_CUDA_CALLABLE static float PxLerp(float a, float b, float t) { return a + t * (b - a); } PX_INLINE PX_CUDA_CALLABLE static PxReal PxBiLerp( const PxReal f00, const PxReal f10, const PxReal f01, const PxReal f11, const PxReal tx, const PxReal ty) { return PxLerp( PxLerp(f00, f10, tx), PxLerp(f01, f11, tx), ty); } PX_INLINE PX_CUDA_CALLABLE static PxReal PxTriLerp( const PxReal f000, const PxReal f100, const PxReal f010, const PxReal f110, const PxReal f001, const PxReal f101, const PxReal f011, const PxReal f111, const PxReal tx, const PxReal ty, const PxReal tz) { return PxLerp( PxBiLerp(f000, f100, f010, f110, tx, ty), PxBiLerp(f001, f101, f011, f111, tx, ty), tz); } PX_INLINE PX_CUDA_CALLABLE static PxU32 PxSDFIdx(PxU32 i, PxU32 j, PxU32 k, PxU32 nbX, PxU32 nbY) { return i + j * nbX + k * nbX*nbY; } PX_INLINE PX_CUDA_CALLABLE static PxReal PxSDFSampleImpl(const PxReal* PX_RESTRICT sdf, const PxVec3& localPos, const PxVec3& sdfBoxLower, const PxVec3& sdfBoxHigher, const PxReal sdfDx, const PxReal invSdfDx, const PxU32 dimX, const PxU32 dimY, const PxU32 dimZ, PxReal tolerance) { PxVec3 clampedGridPt = localPos.maximum(sdfBoxLower).minimum(sdfBoxHigher); const PxVec3 diff = (localPos - clampedGridPt); if (diff.magnitudeSquared() > tolerance*tolerance) return PX_MAX_F32; PxVec3 f = (clampedGridPt - sdfBoxLower) * invSdfDx; PxU32 i = PxU32(f.x); PxU32 j = PxU32(f.y); PxU32 k = PxU32(f.z); f -= PxVec3(PxReal(i), PxReal(j), PxReal(k)); if (i >= (dimX - 1)) { i = dimX - 2; clampedGridPt.x -= f.x * sdfDx; f.x = 1.f; } if (j >= (dimY - 1)) { j = dimY - 2; clampedGridPt.y -= f.y * sdfDx; f.y = 1.f; } if (k >= (dimZ - 1)) { k = dimZ - 2; clampedGridPt.z -= f.z * sdfDx; f.z = 1.f; } const PxReal s000 = sdf[Interpolation::PxSDFIdx(i, j, k, dimX, dimY)]; const PxReal s100 = sdf[Interpolation::PxSDFIdx(i + 1, j, k, dimX, dimY)]; const PxReal s010 = sdf[Interpolation::PxSDFIdx(i, j + 1, k, dimX, dimY)]; const PxReal s110 = sdf[Interpolation::PxSDFIdx(i + 1, j + 1, k, dimX, dimY)]; const PxReal s001 = sdf[Interpolation::PxSDFIdx(i, j, k + 1, dimX, dimY)]; const PxReal s101 = sdf[Interpolation::PxSDFIdx(i + 1, j, k + 1, dimX, dimY)]; const PxReal s011 = sdf[Interpolation::PxSDFIdx(i, j + 1, k + 1, dimX, dimY)]; const PxReal s111 = sdf[Interpolation::PxSDFIdx(i + 1, j + 1, k + 1, dimX, dimY)]; PxReal dist = PxTriLerp( s000, s100, s010, s110, s001, s101, s011, s111, f.x, f.y, f.z); dist += diff.magnitude(); return dist; } }; PX_INLINE PX_CUDA_CALLABLE PxReal PxSdfSample(const PxReal* PX_RESTRICT sdf, const PxVec3& localPos, const PxVec3& sdfBoxLower, const PxVec3& sdfBoxHigher, const PxReal sdfDx, const PxReal invSdfDx, const PxU32 dimX, const PxU32 dimY, const PxU32 dimZ, PxVec3& gradient, PxReal tolerance = PX_MAX_F32) { PxReal dist = Interpolation::PxSDFSampleImpl(sdf, localPos, sdfBoxLower, sdfBoxHigher, sdfDx, invSdfDx, dimX, dimY, dimZ, tolerance); if (dist < tolerance) { PxVec3 grad; grad.x = Interpolation::PxSDFSampleImpl(sdf, localPos + PxVec3(sdfDx, 0.f, 0.f), sdfBoxLower, sdfBoxHigher, sdfDx, invSdfDx, dimX, dimY, dimZ, tolerance) - Interpolation::PxSDFSampleImpl(sdf, localPos - PxVec3(sdfDx, 0.f, 0.f), sdfBoxLower, sdfBoxHigher, sdfDx, invSdfDx, dimX, dimY, dimZ, tolerance); grad.y = Interpolation::PxSDFSampleImpl(sdf, localPos + PxVec3(0.f, sdfDx, 0.f), sdfBoxLower, sdfBoxHigher, sdfDx, invSdfDx, dimX, dimY, dimZ, tolerance) - Interpolation::PxSDFSampleImpl(sdf, localPos - PxVec3(0.f, sdfDx, 0.f), sdfBoxLower, sdfBoxHigher, sdfDx, invSdfDx, dimX, dimY, dimZ, tolerance); grad.z = Interpolation::PxSDFSampleImpl(sdf, localPos + PxVec3(0.f, 0.f, sdfDx), sdfBoxLower, sdfBoxHigher, sdfDx, invSdfDx, dimX, dimY, dimZ, tolerance) - Interpolation::PxSDFSampleImpl(sdf, localPos - PxVec3(0.f, 0.f, sdfDx), sdfBoxLower, sdfBoxHigher, sdfDx, invSdfDx, dimX, dimY, dimZ, tolerance); gradient = grad; } return dist; } #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
18,062
C
33.145558
174
0.704186
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxBounds3.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_BOUNDS3_H #define PX_BOUNDS3_H /** \addtogroup foundation @{ */ #include "foundation/PxTransform.h" #include "foundation/PxMat33.h" #if !PX_DOXYGEN namespace physx { #endif // maximum extents defined such that floating point exceptions are avoided for standard use cases #define PX_MAX_BOUNDS_EXTENTS (PX_MAX_REAL * 0.25f) /** \brief Class representing 3D range or axis aligned bounding box. Stored as minimum and maximum extent corners. Alternate representation would be center and dimensions. May be empty or nonempty. For nonempty bounds, minimum <= maximum has to hold for all axes. Empty bounds have to be represented as minimum = PX_MAX_BOUNDS_EXTENTS and maximum = -PX_MAX_BOUNDS_EXTENTS for all axes. All other representations are invalid and the behavior is undefined. */ class PxBounds3 { public: /** \brief Default constructor, not performing any initialization for performance reason. \remark Use empty() function below to construct empty bounds. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3() { } /** \brief Construct from two bounding points */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3(const PxVec3& minimum, const PxVec3& maximum); PX_CUDA_CALLABLE PX_FORCE_INLINE void operator=(const PxBounds3& other) { minimum = other.minimum; maximum = other.maximum; } PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3(const PxBounds3& other) { minimum = other.minimum; maximum = other.maximum; } /** \brief Return empty bounds. */ static PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3 empty(); /** \brief returns the AABB containing v0 and v1. \param v0 first point included in the AABB. \param v1 second point included in the AABB. */ static PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3 boundsOfPoints(const PxVec3& v0, const PxVec3& v1); /** \brief returns the AABB from center and extents vectors. \param center Center vector \param extent Extents vector */ static PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3 centerExtents(const PxVec3& center, const PxVec3& extent); /** \brief Construct from center, extent, and (not necessarily orthogonal) basis */ static PX_CUDA_CALLABLE PX_INLINE PxBounds3 basisExtent(const PxVec3& center, const PxMat33& basis, const PxVec3& extent); /** \brief Construct from pose and extent */ static PX_CUDA_CALLABLE PX_INLINE PxBounds3 poseExtent(const PxTransform& pose, const PxVec3& extent); /** \brief gets the transformed bounds of the passed AABB (resulting in a bigger AABB). This version is safe to call for empty bounds. \param[in] matrix Transform to apply, can contain scaling as well \param[in] bounds The bounds to transform. */ static PX_CUDA_CALLABLE PX_INLINE PxBounds3 transformSafe(const PxMat33& matrix, const PxBounds3& bounds); /** \brief gets the transformed bounds of the passed AABB (resulting in a bigger AABB). Calling this method for empty bounds leads to undefined behavior. Use #transformSafe() instead. \param[in] matrix Transform to apply, can contain scaling as well \param[in] bounds The bounds to transform. */ static PX_CUDA_CALLABLE PX_INLINE PxBounds3 transformFast(const PxMat33& matrix, const PxBounds3& bounds); /** \brief gets the transformed bounds of the passed AABB (resulting in a bigger AABB). This version is safe to call for empty bounds. \param[in] transform Transform to apply, can contain scaling as well \param[in] bounds The bounds to transform. */ static PX_CUDA_CALLABLE PX_INLINE PxBounds3 transformSafe(const PxTransform& transform, const PxBounds3& bounds); /** \brief gets the transformed bounds of the passed AABB (resulting in a bigger AABB). Calling this method for empty bounds leads to undefined behavior. Use #transformSafe() instead. \param[in] transform Transform to apply, can contain scaling as well \param[in] bounds The bounds to transform. */ static PX_CUDA_CALLABLE PX_INLINE PxBounds3 transformFast(const PxTransform& transform, const PxBounds3& bounds); /** \brief Sets empty to true */ PX_CUDA_CALLABLE PX_FORCE_INLINE void setEmpty(); /** \brief Sets the bounds to maximum size [-PX_MAX_BOUNDS_EXTENTS, PX_MAX_BOUNDS_EXTENTS]. */ PX_CUDA_CALLABLE PX_FORCE_INLINE void setMaximal(); /** \brief expands the volume to include v \param v Point to expand to. */ PX_CUDA_CALLABLE PX_FORCE_INLINE void include(const PxVec3& v); /** \brief expands the volume to include b. \param b Bounds to perform union with. */ PX_CUDA_CALLABLE PX_FORCE_INLINE void include(const PxBounds3& b); PX_CUDA_CALLABLE PX_FORCE_INLINE bool isEmpty() const; /** \brief indicates whether the intersection of this and b is empty or not. \param b Bounds to test for intersection. */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool intersects(const PxBounds3& b) const; /** \brief computes the 1D-intersection between two AABBs, on a given axis. \param a the other AABB \param axis the axis (0, 1, 2) */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool intersects1D(const PxBounds3& a, uint32_t axis) const; /** \brief indicates if these bounds contain v. \param v Point to test against bounds. */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool contains(const PxVec3& v) const; /** \brief checks a box is inside another box. \param box the other AABB */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool isInside(const PxBounds3& box) const; /** \brief returns the center of this axis aligned box. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 getCenter() const; /** \brief get component of the box's center along a given axis */ PX_CUDA_CALLABLE PX_FORCE_INLINE float getCenter(uint32_t axis) const; /** \brief get component of the box's extents along a given axis */ PX_CUDA_CALLABLE PX_FORCE_INLINE float getExtents(uint32_t axis) const; /** \brief returns the dimensions (width/height/depth) of this axis aligned box. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 getDimensions() const; /** \brief returns the extents, which are half of the width/height/depth. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 getExtents() const; /** \brief scales the AABB. This version is safe to call for empty bounds. \param scale Factor to scale AABB by. */ PX_CUDA_CALLABLE PX_FORCE_INLINE void scaleSafe(float scale); /** \brief scales the AABB. Calling this method for empty bounds leads to undefined behavior. Use #scaleSafe() instead. \param scale Factor to scale AABB by. */ PX_CUDA_CALLABLE PX_FORCE_INLINE void scaleFast(float scale); /** fattens the AABB in all 3 dimensions by the given distance. This version is safe to call for empty bounds. */ PX_CUDA_CALLABLE PX_FORCE_INLINE void fattenSafe(float distance); /** fattens the AABB in all 3 dimensions by the given distance. Calling this method for empty bounds leads to undefined behavior. Use #fattenSafe() instead. */ PX_CUDA_CALLABLE PX_FORCE_INLINE void fattenFast(float distance); /** checks that the AABB values are not NaN */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite() const; /** checks that the AABB values describe a valid configuration. */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool isValid() const; /** Finds the closest point in the box to the point p. If p is contained, this will be p, otherwise it will be the closest point on the surface of the box. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 closestPoint(const PxVec3& p) const; PxVec3 minimum, maximum; }; PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3::PxBounds3(const PxVec3& minimum_, const PxVec3& maximum_) : minimum(minimum_), maximum(maximum_) { } PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3 PxBounds3::empty() { return PxBounds3(PxVec3(PX_MAX_BOUNDS_EXTENTS), PxVec3(-PX_MAX_BOUNDS_EXTENTS)); } PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxBounds3::isFinite() const { return minimum.isFinite() && maximum.isFinite(); } PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3 PxBounds3::boundsOfPoints(const PxVec3& v0, const PxVec3& v1) { return PxBounds3(v0.minimum(v1), v0.maximum(v1)); } PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3 PxBounds3::centerExtents(const PxVec3& center, const PxVec3& extent) { return PxBounds3(center - extent, center + extent); } PX_CUDA_CALLABLE PX_INLINE PxBounds3 PxBounds3::basisExtent(const PxVec3& center, const PxMat33& basis, const PxVec3& extent) { // extended basis vectors const PxVec3 c0 = basis.column0 * extent.x; const PxVec3 c1 = basis.column1 * extent.y; const PxVec3 c2 = basis.column2 * extent.z; // find combination of base vectors that produces max. distance for each component = sum of abs() const PxVec3 w( PxAbs(c0.x) + PxAbs(c1.x) + PxAbs(c2.x), PxAbs(c0.y) + PxAbs(c1.y) + PxAbs(c2.y), PxAbs(c0.z) + PxAbs(c1.z) + PxAbs(c2.z)); return PxBounds3(center - w, center + w); } PX_CUDA_CALLABLE PX_INLINE PxBounds3 PxBounds3::poseExtent(const PxTransform& pose, const PxVec3& extent) { return basisExtent(pose.p, PxMat33(pose.q), extent); } PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::setEmpty() { minimum = PxVec3(PX_MAX_BOUNDS_EXTENTS); maximum = PxVec3(-PX_MAX_BOUNDS_EXTENTS); } PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::setMaximal() { minimum = PxVec3(-PX_MAX_BOUNDS_EXTENTS); maximum = PxVec3(PX_MAX_BOUNDS_EXTENTS); } PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::include(const PxVec3& v) { PX_ASSERT(isValid()); minimum = minimum.minimum(v); maximum = maximum.maximum(v); } PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::include(const PxBounds3& b) { PX_ASSERT(isValid()); minimum = minimum.minimum(b.minimum); maximum = maximum.maximum(b.maximum); } PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxBounds3::isEmpty() const { PX_ASSERT(isValid()); return minimum.x > maximum.x; } PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxBounds3::intersects(const PxBounds3& b) const { PX_ASSERT(isValid() && b.isValid()); return !(b.minimum.x > maximum.x || minimum.x > b.maximum.x || b.minimum.y > maximum.y || minimum.y > b.maximum.y || b.minimum.z > maximum.z || minimum.z > b.maximum.z); } PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxBounds3::intersects1D(const PxBounds3& a, uint32_t axis) const { PX_ASSERT(isValid() && a.isValid()); return maximum[axis] >= a.minimum[axis] && a.maximum[axis] >= minimum[axis]; } PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxBounds3::contains(const PxVec3& v) const { PX_ASSERT(isValid()); return !(v.x < minimum.x || v.x > maximum.x || v.y < minimum.y || v.y > maximum.y || v.z < minimum.z || v.z > maximum.z); } PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxBounds3::isInside(const PxBounds3& box) const { PX_ASSERT(isValid() && box.isValid()); if(box.minimum.x > minimum.x) return false; if(box.minimum.y > minimum.y) return false; if(box.minimum.z > minimum.z) return false; if(box.maximum.x < maximum.x) return false; if(box.maximum.y < maximum.y) return false; if(box.maximum.z < maximum.z) return false; return true; } PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 PxBounds3::getCenter() const { PX_ASSERT(isValid()); return (minimum + maximum) * 0.5f; } PX_CUDA_CALLABLE PX_FORCE_INLINE float PxBounds3::getCenter(uint32_t axis) const { PX_ASSERT(isValid()); return (minimum[axis] + maximum[axis]) * 0.5f; } PX_CUDA_CALLABLE PX_FORCE_INLINE float PxBounds3::getExtents(uint32_t axis) const { PX_ASSERT(isValid()); return (maximum[axis] - minimum[axis]) * 0.5f; } PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 PxBounds3::getDimensions() const { PX_ASSERT(isValid()); return maximum - minimum; } PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 PxBounds3::getExtents() const { PX_ASSERT(isValid()); return getDimensions() * 0.5f; } PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::scaleSafe(float scale) { PX_ASSERT(isValid()); if(!isEmpty()) scaleFast(scale); } PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::scaleFast(float scale) { PX_ASSERT(isValid()); *this = centerExtents(getCenter(), getExtents() * scale); } PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::fattenSafe(float distance) { PX_ASSERT(isValid()); if(!isEmpty()) fattenFast(distance); } PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::fattenFast(float distance) { PX_ASSERT(isValid()); minimum.x -= distance; minimum.y -= distance; minimum.z -= distance; maximum.x += distance; maximum.y += distance; maximum.z += distance; } PX_CUDA_CALLABLE PX_INLINE PxBounds3 PxBounds3::transformSafe(const PxMat33& matrix, const PxBounds3& bounds) { PX_ASSERT(bounds.isValid()); return !bounds.isEmpty() ? transformFast(matrix, bounds) : bounds; } PX_CUDA_CALLABLE PX_INLINE PxBounds3 PxBounds3::transformFast(const PxMat33& matrix, const PxBounds3& bounds) { PX_ASSERT(bounds.isValid()); return PxBounds3::basisExtent(matrix * bounds.getCenter(), matrix, bounds.getExtents()); } PX_CUDA_CALLABLE PX_INLINE PxBounds3 PxBounds3::transformSafe(const PxTransform& transform, const PxBounds3& bounds) { PX_ASSERT(bounds.isValid()); return !bounds.isEmpty() ? transformFast(transform, bounds) : bounds; } PX_CUDA_CALLABLE PX_INLINE PxBounds3 PxBounds3::transformFast(const PxTransform& transform, const PxBounds3& bounds) { PX_ASSERT(bounds.isValid()); return PxBounds3::basisExtent(transform.transform(bounds.getCenter()), PxMat33(transform.q), bounds.getExtents()); } PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxBounds3::isValid() const { return (isFinite() && (((minimum.x <= maximum.x) && (minimum.y <= maximum.y) && (minimum.z <= maximum.z)) || ((minimum.x == PX_MAX_BOUNDS_EXTENTS) && (minimum.y == PX_MAX_BOUNDS_EXTENTS) && (minimum.z == PX_MAX_BOUNDS_EXTENTS) && (maximum.x == -PX_MAX_BOUNDS_EXTENTS) && (maximum.y == -PX_MAX_BOUNDS_EXTENTS) && (maximum.z == -PX_MAX_BOUNDS_EXTENTS)))); } PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 PxBounds3::closestPoint(const PxVec3& p) const { return minimum.maximum(maximum.minimum(p)); } #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
15,601
C
30.079681
123
0.730274
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxArray.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_ARRAY_H #define PX_ARRAY_H #include "foundation/PxAssert.h" #include "foundation/PxMathIntrinsics.h" #include "foundation/PxAllocator.h" #include "foundation/PxBasicTemplates.h" #include "foundation/PxMemory.h" namespace physx { /*! An array is a sequential container. Implementation note * entries between 0 and size are valid objects * we use inheritance to build this because the array is included inline in a lot of objects and we want the allocator to take no space if it's not stateful, which aggregation doesn't allow. Also, we want the metadata at the front for the inline case where the allocator contains some inline storage space */ template <class T, class Alloc = typename PxAllocatorTraits<T>::Type> class PxArray : protected Alloc { public: typedef T* Iterator; typedef const T* ConstIterator; explicit PxArray(const PxEMPTY v) : Alloc(v) { if(mData) mCapacity |= PX_SIGN_BITMASK; } /*! Default array constructor. Initialize an empty array */ PX_INLINE explicit PxArray(const Alloc& alloc = Alloc()) : Alloc(alloc), mData(0), mSize(0), mCapacity(0) { } /*! Initialize array with given capacity */ PX_INLINE explicit PxArray(uint32_t size, const T& a = T(), const Alloc& alloc = Alloc()) : Alloc(alloc), mData(0), mSize(0), mCapacity(0) { resize(size, a); } /*! Copy-constructor. Copy all entries from other array */ template <class A> PX_INLINE explicit PxArray(const PxArray<T, A>& other, const Alloc& alloc = Alloc()) : Alloc(alloc) { copy(other); } // This is necessary else the basic default copy constructor is used in the case of both arrays being of the same // template instance // The C++ standard clearly states that a template constructor is never a copy constructor [2]. In other words, // the presence of a template constructor does not suppress the implicit declaration of the copy constructor. // Also never make a copy constructor explicit, or copy-initialization* will no longer work. This is because // 'binding an rvalue to a const reference requires an accessible copy constructor' (http://gcc.gnu.org/bugs/) // *http://stackoverflow.com/questions/1051379/is-there-a-difference-in-c-between-copy-initialization-and-assignment-initializ PX_INLINE PxArray(const PxArray& other, const Alloc& alloc = Alloc()) : Alloc(alloc) { copy(other); } /*! Initialize array with given length */ PX_INLINE explicit PxArray(const T* first, const T* last, const Alloc& alloc = Alloc()) : Alloc(alloc), mSize(last < first ? 0 : uint32_t(last - first)), mCapacity(mSize) { mData = allocate(mSize); copy(mData, mData + mSize, first); } /*! Destructor */ PX_INLINE ~PxArray() { destroy(mData, mData + mSize); if(capacity() && !isInUserMemory()) deallocate(mData); } /*! Assignment operator. Copy content (deep-copy) */ template <class A> PX_INLINE PxArray& operator=(const PxArray<T, A>& rhs) { if(&rhs == this) return *this; clear(); reserve(rhs.mSize); copy(mData, mData + rhs.mSize, rhs.mData); mSize = rhs.mSize; return *this; } PX_INLINE PxArray& operator=(const PxArray& t) // Needs to be declared, see comment at copy-constructor { return operator=<Alloc>(t); } /*! Array indexing operator. \param i The index of the element that will be returned. \return The element i in the array. */ PX_FORCE_INLINE const T& operator[](uint32_t i) const { PX_ASSERT(i < mSize); return mData[i]; } /*! Array indexing operator. \param i The index of the element that will be returned. \return The element i in the array. */ PX_FORCE_INLINE T& operator[](uint32_t i) { PX_ASSERT(i < mSize); return mData[i]; } /*! Returns a pointer to the initial element of the array. \return a pointer to the initial element of the array. */ PX_FORCE_INLINE ConstIterator begin() const { return mData; } PX_FORCE_INLINE Iterator begin() { return mData; } /*! Returns an iterator beyond the last element of the array. Do not dereference. \return a pointer to the element beyond the last element of the array. */ PX_FORCE_INLINE ConstIterator end() const { return mData + mSize; } PX_FORCE_INLINE Iterator end() { return mData + mSize; } /*! Returns a reference to the first element of the array. Undefined if the array is empty. \return a reference to the first element of the array */ PX_FORCE_INLINE const T& front() const { PX_ASSERT(mSize); return mData[0]; } PX_FORCE_INLINE T& front() { PX_ASSERT(mSize); return mData[0]; } /*! Returns a reference to the last element of the array. Undefined if the array is empty \return a reference to the last element of the array */ PX_FORCE_INLINE const T& back() const { PX_ASSERT(mSize); return mData[mSize - 1]; } PX_FORCE_INLINE T& back() { PX_ASSERT(mSize); return mData[mSize - 1]; } /*! Returns the number of entries in the array. This can, and probably will, differ from the array capacity. \return The number of of entries in the array. */ PX_FORCE_INLINE uint32_t size() const { return mSize; } /*! Clears the array. */ PX_INLINE void clear() { destroy(mData, mData + mSize); mSize = 0; } /*! Returns whether the array is empty (i.e. whether its size is 0). \return true if the array is empty */ PX_FORCE_INLINE bool empty() const { return mSize == 0; } /*! Finds the first occurrence of an element in the array. \param a The element to find. */ PX_INLINE Iterator find(const T& a) { uint32_t index; for(index = 0; index < mSize && mData[index] != a; index++) ; return mData + index; } PX_INLINE ConstIterator find(const T& a) const { uint32_t index; for(index = 0; index < mSize && mData[index] != a; index++) ; return mData + index; } ///////////////////////////////////////////////////////////////////////// /*! Adds one element to the end of the array. Operation is O(1). \param a The element that will be added to this array. */ ///////////////////////////////////////////////////////////////////////// PX_FORCE_INLINE T& pushBack(const T& a) { if(capacity() <= mSize) return growAndPushBack(a); PX_PLACEMENT_NEW(reinterpret_cast<void*>(mData + mSize), T)(a); return mData[mSize++]; } ///////////////////////////////////////////////////////////////////////// /*! Returns the element at the end of the array. Only legal if the array is non-empty. */ ///////////////////////////////////////////////////////////////////////// PX_INLINE T popBack() { PX_ASSERT(mSize); T t = mData[mSize - 1]; mData[--mSize].~T(); return t; } ///////////////////////////////////////////////////////////////////////// /*! Construct one element at the end of the array. Operation is O(1). */ ///////////////////////////////////////////////////////////////////////// PX_INLINE T& insert() { if(capacity() <= mSize) grow(capacityIncrement()); T* ptr = mData + mSize++; PX_PLACEMENT_NEW(ptr, T); // not 'T()' because PODs should not get default-initialized. return *ptr; } ///////////////////////////////////////////////////////////////////////// /*! Subtracts the element on position i from the array and replace it with the last element. Operation is O(1) \param i The position of the element that will be subtracted from this array. */ ///////////////////////////////////////////////////////////////////////// PX_INLINE void replaceWithLast(uint32_t i) { PX_ASSERT(i < mSize); mData[i] = mData[--mSize]; mData[mSize].~T(); } PX_INLINE void replaceWithLast(Iterator i) { replaceWithLast(static_cast<uint32_t>(i - mData)); } ///////////////////////////////////////////////////////////////////////// /*! Replaces the first occurrence of the element a with the last element Operation is O(n) \param a The position of the element that will be subtracted from this array. \return true if the element has been removed. */ ///////////////////////////////////////////////////////////////////////// PX_INLINE bool findAndReplaceWithLast(const T& a) { uint32_t index = 0; while(index < mSize && mData[index] != a) ++index; if(index == mSize) return false; replaceWithLast(index); return true; } ///////////////////////////////////////////////////////////////////////// /*! Subtracts the element on position i from the array. Shift the entire array one step. Operation is O(n) \param i The position of the element that will be subtracted from this array. */ ///////////////////////////////////////////////////////////////////////// PX_INLINE void remove(uint32_t i) { PX_ASSERT(i < mSize); T* it = mData + i; it->~T(); while (++i < mSize) { PX_PLACEMENT_NEW(it, T(mData[i])); ++it; it->~T(); } --mSize; } ///////////////////////////////////////////////////////////////////////// /*! Removes a range from the array. Shifts the array so order is maintained. Operation is O(n) \param begin The starting position of the element that will be subtracted from this array. \param count The number of elments that will be subtracted from this array. */ ///////////////////////////////////////////////////////////////////////// PX_INLINE void removeRange(uint32_t begin, uint32_t count) { PX_ASSERT(begin < mSize); PX_ASSERT((begin + count) <= mSize); for(uint32_t i = 0; i < count; i++) mData[begin + i].~T(); // call the destructor on the ones being removed first. T* dest = &mData[begin]; // location we are copying the tail end objects to T* src = &mData[begin + count]; // start of tail objects uint32_t move_count = mSize - (begin + count); // compute remainder that needs to be copied down for(uint32_t i = 0; i < move_count; i++) { PX_PLACEMENT_NEW(dest, T(*src)); // copy the old one to the new location src->~T(); // call the destructor on the old location dest++; src++; } mSize -= count; } ////////////////////////////////////////////////////////////////////////// /*! Resize array */ ////////////////////////////////////////////////////////////////////////// PX_NOINLINE void resize(const uint32_t size, const T& a = T()); PX_NOINLINE void resizeUninitialized(const uint32_t size); ////////////////////////////////////////////////////////////////////////// /*! Resize array such that only as much memory is allocated to hold the existing elements */ ////////////////////////////////////////////////////////////////////////// PX_INLINE void shrink() { recreate(mSize); } ////////////////////////////////////////////////////////////////////////// /*! Deletes all array elements and frees memory. */ ////////////////////////////////////////////////////////////////////////// PX_INLINE void reset() { resize(0); shrink(); } ////////////////////////////////////////////////////////////////////////// /*! Resets or clears the array depending on occupancy. */ ////////////////////////////////////////////////////////////////////////// PX_INLINE void resetOrClear() { const PxU32 c = capacity(); const PxU32 s = size(); if(s>=c/2) clear(); else reset(); } ////////////////////////////////////////////////////////////////////////// /*! Ensure that the array has at least size capacity. */ ////////////////////////////////////////////////////////////////////////// PX_INLINE void reserve(const uint32_t capacity) { if(capacity > this->capacity()) grow(capacity); } ////////////////////////////////////////////////////////////////////////// /*! Query the capacity(allocated mem) for the array. */ ////////////////////////////////////////////////////////////////////////// PX_FORCE_INLINE uint32_t capacity() const { return mCapacity & ~PX_SIGN_BITMASK; } ////////////////////////////////////////////////////////////////////////// /*! Unsafe function to force the size of the array */ ////////////////////////////////////////////////////////////////////////// PX_FORCE_INLINE void forceSize_Unsafe(uint32_t size) { PX_ASSERT(size <= mCapacity); mSize = size; } ////////////////////////////////////////////////////////////////////////// /*! Swap contents of an array without allocating temporary storage */ ////////////////////////////////////////////////////////////////////////// PX_INLINE void swap(PxArray<T, Alloc>& other) { PxSwap(mData, other.mData); PxSwap(mSize, other.mSize); PxSwap(mCapacity, other.mCapacity); } ////////////////////////////////////////////////////////////////////////// /*! Assign a range of values to this vector (resizes to length of range) */ ////////////////////////////////////////////////////////////////////////// PX_INLINE void assign(const T* first, const T* last) { resizeUninitialized(uint32_t(last - first)); copy(begin(), end(), first); } // We need one bit to mark arrays that have been deserialized from a user-provided memory block. // For alignment & memory saving purpose we store that bit in the rarely used capacity member. PX_FORCE_INLINE uint32_t isInUserMemory() const { return mCapacity & PX_SIGN_BITMASK; } /// return reference to allocator PX_INLINE Alloc& getAllocator() { return *this; } protected: // constructor for where we don't own the memory PxArray(T* memory, uint32_t size, uint32_t capacity, const Alloc& alloc = Alloc()) : Alloc(alloc), mData(memory), mSize(size), mCapacity(capacity | PX_SIGN_BITMASK) { } template <class A> PX_NOINLINE void copy(const PxArray<T, A>& other); PX_INLINE T* allocate(uint32_t size) { if(size > 0) { T* p = reinterpret_cast<T*>(Alloc::allocate(sizeof(T) * size, PX_FL)); PxMarkSerializedMemory(p, sizeof(T) * size); return p; } return 0; } PX_INLINE void deallocate(void* mem) { Alloc::deallocate(mem); } static PX_INLINE void create(T* first, T* last, const T& a) { for(; first < last; ++first) ::PX_PLACEMENT_NEW(first, T(a)); } static PX_INLINE void copy(T* first, T* last, const T* src) { if(last <= first) return; for(; first < last; ++first, ++src) ::PX_PLACEMENT_NEW(first, T(*src)); } static PX_INLINE void destroy(T* first, T* last) { for(; first < last; ++first) first->~T(); } /*! Called when pushBack() needs to grow the array. \param a The element that will be added to this array. */ PX_NOINLINE T& growAndPushBack(const T& a); /*! Resizes the available memory for the array. \param capacity The number of entries that the set should be able to hold. */ PX_INLINE void grow(uint32_t capacity) { PX_ASSERT(this->capacity() < capacity); recreate(capacity); } /*! Creates a new memory block, copies all entries to the new block and destroys old entries. \param capacity The number of entries that the set should be able to hold. */ PX_NOINLINE void recreate(uint32_t capacity); // The idea here is to prevent accidental bugs with pushBack or insert. Unfortunately // it interacts badly with InlineArrays with smaller inline allocations. // TODO(dsequeira): policy template arg, this is exactly what they're for. PX_INLINE uint32_t capacityIncrement() const { const uint32_t capacity = this->capacity(); return capacity == 0 ? 1 : capacity * 2; } T* mData; uint32_t mSize; uint32_t mCapacity; }; template <class T, class Alloc> PX_NOINLINE void PxArray<T, Alloc>::resize(const uint32_t size, const T& a) { reserve(size); create(mData + mSize, mData + size, a); destroy(mData + size, mData + mSize); mSize = size; } template <class T, class Alloc> template <class A> PX_NOINLINE void PxArray<T, Alloc>::copy(const PxArray<T, A>& other) { if(!other.empty()) { mData = allocate(mSize = mCapacity = other.size()); copy(mData, mData + mSize, other.begin()); } else { mData = NULL; mSize = 0; mCapacity = 0; } // mData = allocate(other.mSize); // mSize = other.mSize; // mCapacity = other.mSize; // copy(mData, mData + mSize, other.mData); } template <class T, class Alloc> PX_NOINLINE void PxArray<T, Alloc>::resizeUninitialized(const uint32_t size) { reserve(size); mSize = size; } template <class T, class Alloc> PX_NOINLINE T& PxArray<T, Alloc>::growAndPushBack(const T& a) { uint32_t capacity = capacityIncrement(); T* newData = allocate(capacity); PX_ASSERT((!capacity) || (newData && (newData != mData))); copy(newData, newData + mSize, mData); // inserting element before destroying old array // avoids referencing destroyed object when duplicating array element. PX_PLACEMENT_NEW(reinterpret_cast<void*>(newData + mSize), T)(a); destroy(mData, mData + mSize); if(!isInUserMemory()) deallocate(mData); mData = newData; mCapacity = capacity; return mData[mSize++]; } template <class T, class Alloc> PX_NOINLINE void PxArray<T, Alloc>::recreate(uint32_t capacity) { T* newData = allocate(capacity); PX_ASSERT((!capacity) || (newData && (newData != mData))); copy(newData, newData + mSize, mData); destroy(mData, mData + mSize); if(!isInUserMemory()) deallocate(mData); mData = newData; mCapacity = capacity; } template <class T, class Alloc> PX_INLINE void swap(PxArray<T, Alloc>& x, PxArray<T, Alloc>& y) { x.swap(y); } } // namespace physx #endif
18,941
C
25.235457
127
0.597645
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxStrideIterator.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_STRIDE_ITERATOR_H #define PX_STRIDE_ITERATOR_H #include "foundation/Px.h" #include "foundation/PxAssert.h" /** \addtogroup foundation @{ */ #if !PX_DOXYGEN namespace physx { #endif /** \brief Iterator class for iterating over arrays of data that may be interleaved with other data. This class is used for iterating over arrays of elements that may have a larger element to element offset, called the stride, than the size of the element itself (non-contiguous). The template parameter T denotes the type of the element accessed. The stride itself is stored as a member field so multiple instances of a PxStrideIterator class can have different strides. This is useful for cases were the stride depends on runtime configuration. The stride iterator can be used for index based access, e.g.: \code PxStrideIterator<PxVec3> strideArray(...); for (unsigned i = 0; i < 10; ++i) { PxVec3& vec = strideArray[i]; ... } \endcode or iteration by increment, e.g.: \code PxStrideIterator<PxVec3> strideBegin(...); PxStrideIterator<PxVec3> strideEnd(strideBegin + 10); for (PxStrideIterator<PxVec3> it = strideBegin; it < strideEnd; ++it) { PxVec3& vec = *it; ... } \endcode Two special cases: - A stride of sizeof(T) represents a regular c array of type T. - A stride of 0 can be used to describe re-occurrence of the same element multiple times. */ template <typename T> class PxStrideIterator { #if !PX_DOXYGEN template <typename X> struct StripConst { typedef X Type; }; template <typename X> struct StripConst<const X> { typedef X Type; }; #endif public: /** \brief Constructor. Optionally takes a pointer to an element and a stride. \param[in] ptr pointer to element, defaults to NULL. \param[in] stride stride for accessing consecutive elements, defaults to the size of one element. */ explicit PX_INLINE PxStrideIterator(T* ptr = NULL, PxU32 stride = sizeof(T)) : mPtr(ptr), mStride(stride) { PX_ASSERT(mStride == 0 || sizeof(T) <= mStride); } /** \brief Copy constructor. \param[in] strideIterator PxStrideIterator to be copied. */ PX_INLINE PxStrideIterator(const PxStrideIterator<typename StripConst<T>::Type>& strideIterator) : mPtr(strideIterator.ptr()), mStride(strideIterator.stride()) { PX_ASSERT(mStride == 0 || sizeof(T) <= mStride); } /** \brief Get pointer to element. */ PX_INLINE T* ptr() const { return mPtr; } /** \brief Get stride. */ PX_INLINE PxU32 stride() const { return mStride; } /** \brief Indirection operator. */ PX_INLINE T& operator*() const { return *mPtr; } /** \brief Dereferencing operator. */ PX_INLINE T* operator->() const { return mPtr; } /** \brief Indexing operator. */ PX_INLINE T& operator[](unsigned int i) const { return *byteAdd(mPtr, i * stride()); } /** \brief Pre-increment operator. */ PX_INLINE PxStrideIterator& operator++() { mPtr = byteAdd(mPtr, stride()); return *this; } /** \brief Post-increment operator. */ PX_INLINE PxStrideIterator operator++(int) { PxStrideIterator tmp = *this; mPtr = byteAdd(mPtr, stride()); return tmp; } /** \brief Pre-decrement operator. */ PX_INLINE PxStrideIterator& operator--() { mPtr = byteSub(mPtr, stride()); return *this; } /** \brief Post-decrement operator. */ PX_INLINE PxStrideIterator operator--(int) { PxStrideIterator tmp = *this; mPtr = byteSub(mPtr, stride()); return tmp; } /** \brief Addition operator. */ PX_INLINE PxStrideIterator operator+(unsigned int i) const { return PxStrideIterator(byteAdd(mPtr, i * stride()), stride()); } /** \brief Subtraction operator. */ PX_INLINE PxStrideIterator operator-(unsigned int i) const { return PxStrideIterator(byteSub(mPtr, i * stride()), stride()); } /** \brief Addition compound assignment operator. */ PX_INLINE PxStrideIterator& operator+=(unsigned int i) { mPtr = byteAdd(mPtr, i * stride()); return *this; } /** \brief Subtraction compound assignment operator. */ PX_INLINE PxStrideIterator& operator-=(unsigned int i) { mPtr = byteSub(mPtr, i * stride()); return *this; } /** \brief Iterator difference. */ PX_INLINE int operator-(const PxStrideIterator& other) const { PX_ASSERT(isCompatible(other)); int byteDiff = static_cast<int>(reinterpret_cast<const PxU8*>(mPtr) - reinterpret_cast<const PxU8*>(other.mPtr)); return byteDiff / static_cast<int>(stride()); } /** \brief Equality operator. */ PX_INLINE bool operator==(const PxStrideIterator& other) const { PX_ASSERT(isCompatible(other)); return mPtr == other.mPtr; } /** \brief Inequality operator. */ PX_INLINE bool operator!=(const PxStrideIterator& other) const { PX_ASSERT(isCompatible(other)); return mPtr != other.mPtr; } /** \brief Less than operator. */ PX_INLINE bool operator<(const PxStrideIterator& other) const { PX_ASSERT(isCompatible(other)); return mPtr < other.mPtr; } /** \brief Greater than operator. */ PX_INLINE bool operator>(const PxStrideIterator& other) const { PX_ASSERT(isCompatible(other)); return mPtr > other.mPtr; } /** \brief Less or equal than operator. */ PX_INLINE bool operator<=(const PxStrideIterator& other) const { PX_ASSERT(isCompatible(other)); return mPtr <= other.mPtr; } /** \brief Greater or equal than operator. */ PX_INLINE bool operator>=(const PxStrideIterator& other) const { PX_ASSERT(isCompatible(other)); return mPtr >= other.mPtr; } private: PX_INLINE static T* byteAdd(T* ptr, PxU32 bytes) { return const_cast<T*>(reinterpret_cast<const T*>(reinterpret_cast<const PxU8*>(ptr) + bytes)); } PX_INLINE static T* byteSub(T* ptr, PxU32 bytes) { return const_cast<T*>(reinterpret_cast<const T*>(reinterpret_cast<const PxU8*>(ptr) - bytes)); } PX_INLINE bool isCompatible(const PxStrideIterator& other) const { int byteDiff = static_cast<int>(reinterpret_cast<const PxU8*>(mPtr) - reinterpret_cast<const PxU8*>(other.mPtr)); return (stride() == other.stride()) && (abs(byteDiff) % stride() == 0); } T* mPtr; PxU32 mStride; }; /** \brief Addition operator. */ template <typename T> PX_INLINE PxStrideIterator<T> operator+(int i, PxStrideIterator<T> it) { it += i; return it; } /** \brief Stride iterator factory function which infers the iterator type. */ template <typename T> PX_INLINE PxStrideIterator<T> PxMakeIterator(T* ptr, PxU32 stride = sizeof(T)) { return PxStrideIterator<T>(ptr, stride); } /** \brief Stride iterator factory function which infers the iterator type. */ template <typename T> PX_INLINE PxStrideIterator<const T> PxMakeIterator(const T* ptr, PxU32 stride = sizeof(T)) { return PxStrideIterator<const T>(ptr, stride); } #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
8,531
C
23.101695
115
0.703552
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxErrors.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_ERRORS_H #define PX_ERRORS_H /** \addtogroup foundation @{ */ #include "foundation/Px.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief Error codes These error codes are passed to #PxErrorCallback @see PxErrorCallback */ struct PxErrorCode { enum Enum { eNO_ERROR = 0, //! \brief An informational message. eDEBUG_INFO = 1, //! \brief a warning message for the user to help with debugging eDEBUG_WARNING = 2, //! \brief method called with invalid parameter(s) eINVALID_PARAMETER = 4, //! \brief method was called at a time when an operation is not possible eINVALID_OPERATION = 8, //! \brief method failed to allocate some memory eOUT_OF_MEMORY = 16, /** \brief The library failed for some reason. Possibly you have passed invalid values like NaNs, which are not checked for. */ eINTERNAL_ERROR = 32, //! \brief An unrecoverable error, execution should be halted and log output flushed eABORT = 64, //! \brief The SDK has determined that an operation may result in poor performance. ePERF_WARNING = 128, //! \brief A bit mask for including all errors eMASK_ALL = -1 }; }; #if PX_CHECKED #define PX_CHECK_MSG(exp, msg) (!!(exp) || (PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL, msg), 0) ) #define PX_CHECK_AND_RETURN(exp, msg) { if(!(exp)) { PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL, msg); return; } } #define PX_CHECK_AND_RETURN_NULL(exp, msg) { if(!(exp)) { PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL, msg); return 0; } } #define PX_CHECK_AND_RETURN_VAL(exp, msg, r) { if(!(exp)) { PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL, msg); return r; } } #else #define PX_CHECK_MSG(exp, msg) #define PX_CHECK_AND_RETURN(exp, msg) #define PX_CHECK_AND_RETURN_NULL(exp, msg) #define PX_CHECK_AND_RETURN_VAL(exp, msg, r) #endif // shortcut macros: // usage: PxGetFoundation().error(PX_WARN, "static friction %f is is lower than dynamic friction %d", sfr, dfr); #define PX_WARN ::physx::PxErrorCode::eDEBUG_WARNING, PX_FL #define PX_INFO ::physx::PxErrorCode::eDEBUG_INFO, PX_FL #if PX_DEBUG || PX_CHECKED #define PX_WARN_ONCE(string) \ { \ static PxU32 timestamp = 0; \ const PxU32 ts = PxGetWarnOnceTimeStamp(); \ if(timestamp != ts) \ { \ timestamp = ts; \ PxGetFoundation().error(PX_WARN, string); \ } \ } #define PX_WARN_ONCE_IF(condition, string) \ { \ if(condition) \ { \ PX_WARN_ONCE(string) \ } \ } #else #define PX_WARN_ONCE(string) ((void)0) #define PX_WARN_ONCE_IF(condition, string) ((void)0) #endif #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
4,739
C
34.639097
151
0.660477
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxUtilities.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_UTILITIES_H #define PX_UTILITIES_H #include "foundation/PxVec3.h" #include "foundation/PxAssert.h" #include "foundation/PxIntrinsics.h" #include "foundation/PxBasicTemplates.h" #if !PX_DOXYGEN namespace physx { #endif PX_INLINE char PxLittleEndian() { int i = 1; return *(reinterpret_cast<char*>(&i)); } // PT: checked casts PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 PxTo32(PxU64 value) { PX_ASSERT(value <= 0xffffffff); return PxU32(value); } PX_CUDA_CALLABLE PX_FORCE_INLINE PxU16 PxTo16(PxU32 value) { PX_ASSERT(value <= 0xffff); return PxU16(value); } PX_CUDA_CALLABLE PX_FORCE_INLINE PxU8 PxTo8(PxU16 value) { PX_ASSERT(value <= 0xff); return PxU8(value); } PX_CUDA_CALLABLE PX_FORCE_INLINE PxU8 PxTo8(PxU32 value) { PX_ASSERT(value <= 0xff); return PxU8(value); } PX_CUDA_CALLABLE PX_FORCE_INLINE PxU8 PxTo8(PxI32 value) { PX_ASSERT(value <= 0xff); PX_ASSERT(value >= 0); return PxU8(value); } PX_CUDA_CALLABLE PX_FORCE_INLINE PxI8 PxToI8(PxU32 value) { PX_ASSERT(value <= 0x7f); return PxI8(value); } //! @cond /*! Get number of elements in array */ template <typename T, size_t N> char (&PxArraySizeHelper(T (&array)[N]))[N]; #define PX_ARRAY_SIZE(_array) (sizeof(physx::PxArraySizeHelper(_array))) //! @endcond /*! Sort two elements using operator< On return x will be the smaller of the two */ template <class T> PX_CUDA_CALLABLE PX_FORCE_INLINE void PxOrder(T& x, T& y) { if(y < x) PxSwap(x, y); } // most architectures can do predication on real comparisons, and on VMX, it matters PX_CUDA_CALLABLE PX_FORCE_INLINE void PxOrder(PxReal& x, PxReal& y) { PxReal newX = PxMin(x, y); PxReal newY = PxMax(x, y); x = newX; y = newY; } /*! Sort two elements using operator< and also keep order of any extra data */ template <class T, class E1> PX_CUDA_CALLABLE PX_FORCE_INLINE void PxOrder(T& x, T& y, E1& xe1, E1& ye1) { if(y < x) { swap(x, y); swap(xe1, ye1); } } #if PX_GCC_FAMILY && !PX_EMSCRIPTEN __attribute__((noreturn)) #endif PX_INLINE void PxDebugBreak() { #if PX_WINDOWS __debugbreak(); #elif PX_LINUX __builtin_trap(); #elif PX_GCC_FAMILY __builtin_trap(); #else PX_ASSERT(false); #endif } #if !PX_DOXYGEN } // namespace physx #endif #endif
3,911
C
25.612245
84
0.724367
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxSimpleTypes.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_SIMPLE_TYPES_H #define PX_SIMPLE_TYPES_H /** \addtogroup foundation @{ */ // Platform specific types: // Design note: Its OK to use int for general loop variables and temps. #include "foundation/PxPreprocessor.h" #if PX_VC #pragma warning(push) #pragma warning(disable : 4668) // suppressing warning generated by Microsoft Visual Studio when including this standard // header #endif #if PX_LINUX #define __STDC_LIMIT_MACROS #endif #include <stdint.h> #if PX_VC #pragma warning(pop) #endif #if PX_VC // we could use inttypes.h starting with VC12 #define PX_PRIu64 "I64u" #else #if !PX_APPLE_FAMILY #define __STDC_FORMAT_MACROS #endif #include <inttypes.h> #define PX_PRIu64 PRIu64 #endif #if !PX_DOXYGEN namespace physx { #endif typedef int64_t PxI64; typedef uint64_t PxU64; typedef int32_t PxI32; typedef uint32_t PxU32; typedef int16_t PxI16; typedef uint16_t PxU16; typedef int8_t PxI8; typedef uint8_t PxU8; typedef float PxF32; typedef double PxF64; typedef float PxReal; // Int-as-bool type - has some uses for efficiency and with SIMD typedef PxI32 PxIntBool; static const PxIntBool PxIntFalse = 0; static const PxIntBool PxIntTrue = 1; #if !PX_DOXYGEN } // namespace physx #endif #define PX_SIGN_BITMASK 0x80000000 // Type ranges #define PX_MAX_F32 3.4028234663852885981170418348452e+38F // maximum possible float value #define PX_MAX_F64 DBL_MAX // maximum possible double value #define PX_EPS_F32 FLT_EPSILON // maximum relative error of float rounding #define PX_EPS_F64 DBL_EPSILON // maximum relative error of double rounding #define PX_MAX_REAL PX_MAX_F32 #define PX_EPS_REAL PX_EPS_F32 #define PX_NORMALIZATION_EPSILON float(1e-20f) // Legacy type ranges used by PhysX #define PX_MAX_I8 INT8_MAX #define PX_MIN_I8 INT8_MIN #define PX_MAX_U8 UINT8_MAX #define PX_MIN_U8 UINT8_MIN #define PX_MAX_I16 INT16_MAX #define PX_MIN_I16 INT16_MIN #define PX_MAX_U16 UINT16_MAX #define PX_MIN_U16 UINT16_MIN #define PX_MAX_I32 INT32_MAX #define PX_MIN_I32 INT32_MIN #define PX_MAX_U32 UINT32_MAX #define PX_MIN_U32 UINT32_MIN /** @} */ #endif
3,771
C
30.433333
120
0.760276
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxThread.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_THREAD_H #define PX_THREAD_H #include "foundation/PxUserAllocated.h" // todo: these need to go somewhere else // PT: looks like this is still used on some platforms #if PX_WINDOWS_FAMILY #define PxSpinLockPause() __asm pause #elif PX_LINUX || PX_APPLE_FAMILY || PX_SWITCH #define PxSpinLockPause() asm("nop") #else #error "Platform not supported!" #endif #if !PX_DOXYGEN namespace physx { #endif struct PxThreadPriority { enum Enum { eHIGH = 0, //!< High priority eABOVE_NORMAL = 1, //!< Above Normal priority eNORMAL = 2, //!< Normal/default priority eBELOW_NORMAL = 3, //!< Below Normal priority eLOW = 4, //!< Low priority. eFORCE_DWORD = 0xffFFffFF }; }; class PxRunnable { public: PxRunnable() {} virtual ~PxRunnable() {} virtual void execute() {} }; class PX_FOUNDATION_API PxThreadImpl { public: typedef size_t Id; // space for a pointer or an integer typedef void* (*ExecuteFn)(void*); static PxU32 getDefaultStackSize(); static Id getId(); /** Construct (but do not start) the thread object. The OS thread object will not be created until start() is called. Executes in the context of the spawning thread. */ PxThreadImpl(); /** Construct and start the the thread, passing the given arg to the given fn. (pthread style) */ PxThreadImpl(ExecuteFn fn, void* arg, const char* name); /** Deallocate all resources associated with the thread. Should be called in the context of the spawning thread. */ ~PxThreadImpl(); /** Create the OS thread and start it running. Called in the context of the spawning thread. If an affinity mask has previously been set then it will be applied after the thread has been created. */ void start(PxU32 stackSize, PxRunnable* r); /** Violently kill the current thread. Blunt instrument, not recommended since it can leave all kinds of things unreleased (stack, memory, mutexes...) Should be called in the context of the spawning thread. */ void kill(); /** Stop the thread. Signals the spawned thread that it should stop, so the thread should check regularly */ void signalQuit(); /** Wait for a thread to stop. Should be called in the context of the spawning thread. Returns false if the thread has not been started. */ bool waitForQuit(); /** check whether the thread is signalled to quit. Called in the context of the spawned thread. */ bool quitIsSignalled(); /** Cleanly shut down this thread. Called in the context of the spawned thread. */ void quit(); /** Change the affinity mask for this thread. The mask is a platform specific value. On Windows, Linux, and Switch platforms, each set mask bit represents the index of a logical processor that the OS may schedule thread execution on. Bits outside the range of valid logical processors may be ignored or cause the function to return an error. On Apple platforms, this function has no effect. If the thread has not yet been started then the mask is stored and applied when the thread is started. If the thread has already been started then this method returns the previous affinity mask on success, otherwise it returns zero. */ PxU32 setAffinityMask(PxU32 mask); static PxThreadPriority::Enum getPriority(Id threadId); /** Set thread priority. */ void setPriority(PxThreadPriority::Enum prio); /** set the thread's name */ void setName(const char* name); /** Put the current thread to sleep for the given number of milliseconds */ static void sleep(PxU32 ms); /** Yield the current thread's slot on the CPU */ static void yield(); /** Inform the processor that we're in a busy wait to give it a chance to do something clever. yield() yields the thread, while yieldProcessor() aims to yield the processor */ static void yieldProcessor(); /** Return the number of physical cores (does not include hyper-threaded cores), returns 0 on failure */ static PxU32 getNbPhysicalCores(); /** Size of this class. */ static PxU32 getSize(); }; /** Thread abstraction API */ template <typename Alloc = PxReflectionAllocator<PxThreadImpl> > class PxThreadT : protected Alloc, public PxUserAllocated, public PxRunnable { public: typedef PxThreadImpl::Id Id; // space for a pointer or an integer /** Construct (but do not start) the thread object. Executes in the context of the spawning thread */ PxThreadT(const Alloc& alloc = Alloc()) : Alloc(alloc) { mImpl = reinterpret_cast<PxThreadImpl*>(Alloc::allocate(PxThreadImpl::getSize(), PX_FL)); PX_PLACEMENT_NEW(mImpl, PxThreadImpl)(); } /** Construct and start the the thread, passing the given arg to the given fn. (pthread style) */ PxThreadT(PxThreadImpl::ExecuteFn fn, void* arg, const char* name, const Alloc& alloc = Alloc()) : Alloc(alloc) { mImpl = reinterpret_cast<PxThreadImpl*>(Alloc::allocate(PxThreadImpl::getSize(), PX_FL)); PX_PLACEMENT_NEW(mImpl, PxThreadImpl)(fn, arg, name); } /** Deallocate all resources associated with the thread. Should be called in the context of the spawning thread. */ virtual ~PxThreadT() { mImpl->~PxThreadImpl(); Alloc::deallocate(mImpl); } /** start the thread running. Called in the context of the spawning thread. */ void start(PxU32 stackSize = PxThreadImpl::getDefaultStackSize()) { mImpl->start(stackSize, this); } /** Violently kill the current thread. Blunt instrument, not recommended since it can leave all kinds of things unreleased (stack, memory, mutexes...) Should be called in the context of the spawning thread. */ void kill() { mImpl->kill(); } /** The virtual execute() method is the user defined function that will run in the new thread. Called in the context of the spawned thread. */ virtual void execute(void) { } /** stop the thread. Signals the spawned thread that it should stop, so the thread should check regularly */ void signalQuit() { mImpl->signalQuit(); } /** Wait for a thread to stop. Should be called in the context of the spawning thread. Returns false if the thread has not been started. */ bool waitForQuit() { return mImpl->waitForQuit(); } /** check whether the thread is signalled to quit. Called in the context of the spawned thread. */ bool quitIsSignalled() { return mImpl->quitIsSignalled(); } /** Cleanly shut down this thread. Called in the context of the spawned thread. */ void quit() { mImpl->quit(); } PxU32 setAffinityMask(PxU32 mask) { return mImpl->setAffinityMask(mask); } static PxThreadPriority::Enum getPriority(PxThreadImpl::Id threadId) { return PxThreadImpl::getPriority(threadId); } /** Set thread priority. */ void setPriority(PxThreadPriority::Enum prio) { mImpl->setPriority(prio); } /** set the thread's name */ void setName(const char* name) { mImpl->setName(name); } /** Put the current thread to sleep for the given number of milliseconds */ static void sleep(PxU32 ms) { PxThreadImpl::sleep(ms); } /** Yield the current thread's slot on the CPU */ static void yield() { PxThreadImpl::yield(); } /** Inform the processor that we're in a busy wait to give it a chance to do something clever yield() yields the thread, while yieldProcessor() aims to yield the processor */ static void yieldProcesor() { PxThreadImpl::yieldProcessor(); } static PxU32 getDefaultStackSize() { return PxThreadImpl::getDefaultStackSize(); } static PxThreadImpl::Id getId() { return PxThreadImpl::getId(); } static PxU32 getNbPhysicalCores() { return PxThreadImpl::getNbPhysicalCores(); } private: class PxThreadImpl* mImpl; }; typedef PxThreadT<> PxThread; PX_FOUNDATION_API PxU32 PxTlsAlloc(); PX_FOUNDATION_API void PxTlsFree(PxU32 index); PX_FOUNDATION_API void* PxTlsGet(PxU32 index); PX_FOUNDATION_API size_t PxTlsGetValue(PxU32 index); PX_FOUNDATION_API PxU32 PxTlsSet(PxU32 index, void* value); PX_FOUNDATION_API PxU32 PxTlsSetValue(PxU32 index, size_t value); #if !PX_DOXYGEN } // namespace physx #endif #endif
9,703
C
25.227027
112
0.725961
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxSList.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_SLIST_H #define PX_SLIST_H #include "foundation/Px.h" #include "foundation/PxAssert.h" #include "foundation/PxAlignedMalloc.h" #if PX_P64_FAMILY #define PX_SLIST_ALIGNMENT 16 #else #define PX_SLIST_ALIGNMENT 8 #endif #if !PX_DOXYGEN namespace physx { #endif #if PX_VC #pragma warning(push) #pragma warning(disable : 4324) // Padding was added at the end of a structure because of a __declspec(align) value. #endif PX_ALIGN_PREFIX(PX_SLIST_ALIGNMENT) class PxSListEntry { friend struct PxSListImpl; public: PxSListEntry() : mNext(NULL) { PX_ASSERT((size_t(this) & (PX_SLIST_ALIGNMENT - 1)) == 0); } // Only use on elements returned by SList::flush() // because the operation is not atomic. PxSListEntry* next() { return mNext; } private: PxSListEntry* mNext; }PX_ALIGN_SUFFIX(PX_SLIST_ALIGNMENT); #if PX_VC #pragma warning(pop) #endif // template-less implementation struct PX_FOUNDATION_API PxSListImpl { PxSListImpl(); ~PxSListImpl(); void push(PxSListEntry* entry); PxSListEntry* pop(); PxSListEntry* flush(); static uint32_t getSize(); }; template <typename Alloc = PxReflectionAllocator<PxSListImpl> > class PxSListT : protected Alloc { public: PxSListT(const Alloc& alloc = Alloc()) : Alloc(alloc) { mImpl = reinterpret_cast<PxSListImpl*>(Alloc::allocate(PxSListImpl::getSize(), PX_FL)); PX_ASSERT((size_t(mImpl) & (PX_SLIST_ALIGNMENT - 1)) == 0); PX_PLACEMENT_NEW(mImpl, PxSListImpl)(); } ~PxSListT() { mImpl->~PxSListImpl(); Alloc::deallocate(mImpl); } // pushes a new element to the list void push(PxSListEntry& entry) { mImpl->push(&entry); } // pops an element from the list PxSListEntry* pop() { return mImpl->pop(); } // removes all items from list, returns pointer to first element PxSListEntry* flush() { return mImpl->flush(); } private: PxSListImpl* mImpl; }; typedef PxSListT<> PxSList; #if !PX_DOXYGEN } // namespace physx #endif #endif
3,648
C
26.231343
117
0.730811
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxTransform.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_TRANSFORM_H #define PX_TRANSFORM_H /** \addtogroup foundation @{ */ #include "foundation/PxQuat.h" #if !PX_DOXYGEN namespace physx { #endif /*! \brief class representing a rigid euclidean transform as a quaternion and a vector */ template<class Type> class PxTransformT { public: PxQuatT<Type> q; PxVec3T<Type> p; PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT() { } PX_CUDA_CALLABLE PX_FORCE_INLINE explicit PxTransformT(PxIDENTITY) : q(PxIdentity), p(PxZero) { } PX_CUDA_CALLABLE PX_FORCE_INLINE explicit PxTransformT(const PxVec3T<Type>& position) : q(PxIdentity), p(position) { } PX_CUDA_CALLABLE PX_FORCE_INLINE explicit PxTransformT(const PxQuatT<Type>& orientation) : q(orientation), p(Type(0)) { PX_ASSERT(orientation.isSane()); } PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT(Type x, Type y, Type z, PxQuatT<Type> aQ = PxQuatT<Type>(PxIdentity)) : q(aQ), p(x, y, z) { } PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT(const PxVec3T<Type>& p0, const PxQuatT<Type>& q0) : q(q0), p(p0) { PX_ASSERT(q0.isSane()); } PX_CUDA_CALLABLE PX_FORCE_INLINE explicit PxTransformT(const PxMat44T<Type>& m); // defined in PxMat44.h PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT(const PxTransformT& other) { p = other.p; q = other.q; } PX_CUDA_CALLABLE PX_FORCE_INLINE void operator=(const PxTransformT& other) { p = other.p; q = other.q; } /** \brief returns true if the two transforms are exactly equal */ PX_CUDA_CALLABLE PX_INLINE bool operator==(const PxTransformT& t) const { return p == t.p && q == t.q; } PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT operator*(const PxTransformT& x) const { PX_ASSERT(x.isSane()); return transform(x); } //! Equals matrix multiplication PX_CUDA_CALLABLE PX_INLINE PxTransformT& operator*=(const PxTransformT& other) { *this = *this * other; return *this; } PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT getInverse() const { PX_ASSERT(isFinite()); return PxTransformT(q.rotateInv(-p), q.getConjugate()); } /** \brief return a normalized transform (i.e. one in which the quaternion has unit magnitude) */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT getNormalized() const { return PxTransformT(p, q.getNormalized()); } PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> transform(const PxVec3T<Type>& input) const { PX_ASSERT(isFinite()); return q.rotate(input) + p; } PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> transformInv(const PxVec3T<Type>& input) const { PX_ASSERT(isFinite()); return q.rotateInv(input - p); } PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> rotate(const PxVec3T<Type>& input) const { PX_ASSERT(isFinite()); return q.rotate(input); } PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> rotateInv(const PxVec3T<Type>& input) const { PX_ASSERT(isFinite()); return q.rotateInv(input); } //! Transform transform to parent (returns compound transform: first src, then *this) PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT transform(const PxTransformT& src) const { PX_ASSERT(src.isSane()); PX_ASSERT(isSane()); // src = [srct, srcr] -> [r*srct + t, r*srcr] return PxTransformT(q.rotate(src.p) + p, q * src.q); } //! Transform transform from parent (returns compound transform: first src, then this->inverse) PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT transformInv(const PxTransformT& src) const { PX_ASSERT(src.isSane()); PX_ASSERT(isFinite()); // src = [srct, srcr] -> [r^-1*(srct-t), r^-1*srcr] const PxQuatT<Type> qinv = q.getConjugate(); return PxTransformT(qinv.rotate(src.p - p), qinv * src.q); } /** \brief returns true if finite and q is a unit quaternion */ PX_CUDA_CALLABLE bool isValid() const { return p.isFinite() && q.isFinite() && q.isUnit(); } /** \brief returns true if finite and quat magnitude is reasonably close to unit to allow for some accumulation of error vs isValid */ PX_CUDA_CALLABLE bool isSane() const { return isFinite() && q.isSane(); } /** \brief returns true if all elems are finite (not NAN or INF, etc.) */ PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite() const { return p.isFinite() && q.isFinite(); } }; typedef PxTransformT<float> PxTransform; typedef PxTransformT<double> PxTransformd; /*! \brief A generic padded & aligned transform class. This can be used for safe faster loads & stores, and faster address computations (the default PxTransformT often generating imuls for this otherwise). Padding bytes can be reused to store useful data if needed. */ struct PX_ALIGN_PREFIX(16) PxTransformPadded : PxTransform { PX_FORCE_INLINE PxTransformPadded() { } PX_FORCE_INLINE PxTransformPadded(const PxTransformPadded& other) : PxTransform(other) { } PX_FORCE_INLINE explicit PxTransformPadded(const PxTransform& other) : PxTransform(other) { } PX_FORCE_INLINE explicit PxTransformPadded(PxIDENTITY) : PxTransform(PxIdentity) { } PX_FORCE_INLINE explicit PxTransformPadded(const PxVec3& position) : PxTransform(position) { } PX_FORCE_INLINE explicit PxTransformPadded(const PxQuat& orientation) : PxTransform(orientation) { } PX_FORCE_INLINE PxTransformPadded(const PxVec3& p0, const PxQuat& q0) : PxTransform(p0, q0) { } PX_FORCE_INLINE void operator=(const PxTransformPadded& other) { p = other.p; q = other.q; } PX_FORCE_INLINE void operator=(const PxTransform& other) { p = other.p; q = other.q; } PxU32 padding; } PX_ALIGN_SUFFIX(16); PX_COMPILE_TIME_ASSERT(sizeof(PxTransformPadded)==32); typedef PxTransformPadded PxTransform32; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
7,325
C
26.855513
136
0.723003
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxMat33.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_MAT33_H #define PX_MAT33_H /** \addtogroup foundation @{ */ #include "foundation/PxVec3.h" #include "foundation/PxQuat.h" #if !PX_DOXYGEN namespace physx { #endif /*! \brief 3x3 matrix class Some clarifications, as there have been much confusion about matrix formats etc in the past. Short: - Matrix have base vectors in columns (vectors are column matrices, 3x1 matrices). - Matrix is physically stored in column major format - Matrices are concaternated from left Long: Given three base vectors a, b and c the matrix is stored as |a.x b.x c.x| |a.y b.y c.y| |a.z b.z c.z| Vectors are treated as columns, so the vector v is |x| |y| |z| And matrices are applied _before_ the vector (pre-multiplication) v' = M*v |x'| |a.x b.x c.x| |x| |a.x*x + b.x*y + c.x*z| |y'| = |a.y b.y c.y| * |y| = |a.y*x + b.y*y + c.y*z| |z'| |a.z b.z c.z| |z| |a.z*x + b.z*y + c.z*z| Physical storage and indexing: To be compatible with popular 3d rendering APIs (read D3d and OpenGL) the physical indexing is |0 3 6| |1 4 7| |2 5 8| index = column*3 + row which in C++ translates to M[column][row] The mathematical indexing is M_row,column and this is what is used for _-notation so _12 is 1st row, second column and operator(row, column)! */ template<class Type> class PxMat33T { public: //! Default constructor PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat33T() { } //! identity constructor PX_CUDA_CALLABLE PX_INLINE PxMat33T(PxIDENTITY) : column0(Type(1.0), Type(0.0), Type(0.0)), column1(Type(0.0), Type(1.0), Type(0.0)), column2(Type(0.0), Type(0.0), Type(1.0)) { } //! zero constructor PX_CUDA_CALLABLE PX_INLINE PxMat33T(PxZERO) : column0(Type(0.0)), column1(Type(0.0)), column2(Type(0.0)) { } //! Construct from three base vectors PX_CUDA_CALLABLE PxMat33T(const PxVec3T<Type>& col0, const PxVec3T<Type>& col1, const PxVec3T<Type>& col2) : column0(col0), column1(col1), column2(col2) { } //! constructor from a scalar, which generates a multiple of the identity matrix explicit PX_CUDA_CALLABLE PX_INLINE PxMat33T(Type r) : column0(r, Type(0.0), Type(0.0)), column1(Type(0.0), r, Type(0.0)), column2(Type(0.0), Type(0.0), r) { } //! Construct from Type[9] explicit PX_CUDA_CALLABLE PX_INLINE PxMat33T(Type values[]) : column0(values[0], values[1], values[2]), column1(values[3], values[4], values[5]), column2(values[6], values[7], values[8]) { } //! Construct from a quaternion explicit PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat33T(const PxQuatT<Type>& q) { // PT: TODO: PX-566 const Type x = q.x; const Type y = q.y; const Type z = q.z; const Type w = q.w; const Type x2 = x + x; const Type y2 = y + y; const Type z2 = z + z; const Type xx = x2 * x; const Type yy = y2 * y; const Type zz = z2 * z; const Type xy = x2 * y; const Type xz = x2 * z; const Type xw = x2 * w; const Type yz = y2 * z; const Type yw = y2 * w; const Type zw = z2 * w; column0 = PxVec3T<Type>(Type(1.0) - yy - zz, xy + zw, xz - yw); column1 = PxVec3T<Type>(xy - zw, Type(1.0) - xx - zz, yz + xw); column2 = PxVec3T<Type>(xz + yw, yz - xw, Type(1.0) - xx - yy); } //! Copy constructor PX_CUDA_CALLABLE PX_INLINE PxMat33T(const PxMat33T& other) : column0(other.column0), column1(other.column1), column2(other.column2) { } //! Assignment operator PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat33T& operator=(const PxMat33T& other) { column0 = other.column0; column1 = other.column1; column2 = other.column2; return *this; } //! Construct from diagonal, off-diagonals are zero. PX_CUDA_CALLABLE PX_INLINE static const PxMat33T createDiagonal(const PxVec3T<Type>& d) { return PxMat33T(PxVec3T<Type>(d.x, Type(0.0), Type(0.0)), PxVec3T<Type>(Type(0.0), d.y, Type(0.0)), PxVec3T<Type>(Type(0.0), Type(0.0), d.z)); } //! Computes the outer product of two vectors PX_CUDA_CALLABLE PX_INLINE static const PxMat33T outer(const PxVec3T<Type>& a, const PxVec3T<Type>& b) { return PxMat33T(a * b.x, a * b.y, a * b.z); } /** \brief returns true if the two matrices are exactly equal */ PX_CUDA_CALLABLE PX_INLINE bool operator==(const PxMat33T& m) const { return column0 == m.column0 && column1 == m.column1 && column2 == m.column2; } //! Get transposed matrix PX_CUDA_CALLABLE PX_FORCE_INLINE const PxMat33T getTranspose() const { const PxVec3T<Type> v0(column0.x, column1.x, column2.x); const PxVec3T<Type> v1(column0.y, column1.y, column2.y); const PxVec3T<Type> v2(column0.z, column1.z, column2.z); return PxMat33T(v0, v1, v2); } //! Get the real inverse PX_CUDA_CALLABLE PX_INLINE const PxMat33T getInverse() const { const Type det = getDeterminant(); PxMat33T inverse; if(det != Type(0.0)) { const Type invDet = Type(1.0) / det; inverse.column0.x = invDet * (column1.y * column2.z - column2.y * column1.z); inverse.column0.y = invDet * -(column0.y * column2.z - column2.y * column0.z); inverse.column0.z = invDet * (column0.y * column1.z - column0.z * column1.y); inverse.column1.x = invDet * -(column1.x * column2.z - column1.z * column2.x); inverse.column1.y = invDet * (column0.x * column2.z - column0.z * column2.x); inverse.column1.z = invDet * -(column0.x * column1.z - column0.z * column1.x); inverse.column2.x = invDet * (column1.x * column2.y - column1.y * column2.x); inverse.column2.y = invDet * -(column0.x * column2.y - column0.y * column2.x); inverse.column2.z = invDet * (column0.x * column1.y - column1.x * column0.y); return inverse; } else { return PxMat33T(PxIdentity); } } //! Get determinant PX_CUDA_CALLABLE PX_INLINE Type getDeterminant() const { return column0.dot(column1.cross(column2)); } //! Unary minus PX_CUDA_CALLABLE PX_INLINE const PxMat33T operator-() const { return PxMat33T(-column0, -column1, -column2); } //! Add PX_CUDA_CALLABLE PX_INLINE const PxMat33T operator+(const PxMat33T& other) const { return PxMat33T(column0 + other.column0, column1 + other.column1, column2 + other.column2); } //! Subtract PX_CUDA_CALLABLE PX_INLINE const PxMat33T operator-(const PxMat33T& other) const { return PxMat33T(column0 - other.column0, column1 - other.column1, column2 - other.column2); } //! Scalar multiplication PX_CUDA_CALLABLE PX_INLINE const PxMat33T operator*(Type scalar) const { return PxMat33T(column0 * scalar, column1 * scalar, column2 * scalar); } template<class Type2> PX_CUDA_CALLABLE PX_INLINE friend PxMat33T<Type2> operator*(Type2, const PxMat33T<Type2>&); //! Matrix vector multiplication (returns 'this->transform(vec)') PX_CUDA_CALLABLE PX_INLINE const PxVec3T<Type> operator*(const PxVec3T<Type>& vec) const { return transform(vec); } // a <op>= b operators //! Matrix multiplication PX_CUDA_CALLABLE PX_FORCE_INLINE const PxMat33T operator*(const PxMat33T& other) const { // Rows from this <dot> columns from other // column0 = transform(other.column0) etc return PxMat33T(transform(other.column0), transform(other.column1), transform(other.column2)); } //! Equals-add PX_CUDA_CALLABLE PX_INLINE PxMat33T& operator+=(const PxMat33T& other) { column0 += other.column0; column1 += other.column1; column2 += other.column2; return *this; } //! Equals-sub PX_CUDA_CALLABLE PX_INLINE PxMat33T& operator-=(const PxMat33T& other) { column0 -= other.column0; column1 -= other.column1; column2 -= other.column2; return *this; } //! Equals scalar multiplication PX_CUDA_CALLABLE PX_INLINE PxMat33T& operator*=(Type scalar) { column0 *= scalar; column1 *= scalar; column2 *= scalar; return *this; } //! Equals matrix multiplication PX_CUDA_CALLABLE PX_INLINE PxMat33T& operator*=(const PxMat33T& other) { *this = *this * other; return *this; } //! Element access, mathematical way! PX_CUDA_CALLABLE PX_FORCE_INLINE Type operator()(PxU32 row, PxU32 col) const { return (*this)[col][row]; } //! Element access, mathematical way! PX_CUDA_CALLABLE PX_FORCE_INLINE Type& operator()(PxU32 row, PxU32 col) { return (*this)[col][row]; } // Transform etc //! Transform vector by matrix, equal to v' = M*v PX_CUDA_CALLABLE PX_FORCE_INLINE const PxVec3T<Type> transform(const PxVec3T<Type>& other) const { return column0 * other.x + column1 * other.y + column2 * other.z; } //! Transform vector by matrix transpose, v' = M^t*v PX_CUDA_CALLABLE PX_INLINE const PxVec3T<Type> transformTranspose(const PxVec3T<Type>& other) const { return PxVec3T<Type>(column0.dot(other), column1.dot(other), column2.dot(other)); } PX_CUDA_CALLABLE PX_FORCE_INLINE const Type* front() const { return &column0.x; } PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type>& operator[](PxU32 num) { return (&column0)[num]; } PX_CUDA_CALLABLE PX_FORCE_INLINE const PxVec3T<Type>& operator[](PxU32 num) const { return (&column0)[num]; } // Data, see above for format! PxVec3T<Type> column0, column1, column2; // the three base vectors }; template<class Type> PX_CUDA_CALLABLE PX_INLINE PxMat33T<Type> operator*(Type scalar, const PxMat33T<Type>& m) { return PxMat33T<Type>(scalar * m.column0, scalar * m.column1, scalar * m.column2); } // implementation from PxQuat.h template<class Type> PX_CUDA_CALLABLE PX_INLINE PxQuatT<Type>::PxQuatT(const PxMat33T<Type>& m) { if(m.column2.z < Type(0)) { if(m.column0.x > m.column1.y) { const Type t = Type(1.0) + m.column0.x - m.column1.y - m.column2.z; *this = PxQuatT<Type>(t, m.column0.y + m.column1.x, m.column2.x + m.column0.z, m.column1.z - m.column2.y) * (Type(0.5) / PxSqrt(t)); } else { const Type t = Type(1.0) - m.column0.x + m.column1.y - m.column2.z; *this = PxQuatT<Type>(m.column0.y + m.column1.x, t, m.column1.z + m.column2.y, m.column2.x - m.column0.z) * (Type(0.5) / PxSqrt(t)); } } else { if(m.column0.x < -m.column1.y) { const Type t = Type(1.0) - m.column0.x - m.column1.y + m.column2.z; *this = PxQuatT<Type>(m.column2.x + m.column0.z, m.column1.z + m.column2.y, t, m.column0.y - m.column1.x) * (Type(0.5) / PxSqrt(t)); } else { const Type t = Type(1.0) + m.column0.x + m.column1.y + m.column2.z; *this = PxQuatT<Type>(m.column1.z - m.column2.y, m.column2.x - m.column0.z, m.column0.y - m.column1.x, t) * (Type(0.5) / PxSqrt(t)); } } } typedef PxMat33T<float> PxMat33; typedef PxMat33T<double> PxMat33d; /** \brief Sets a rotation matrix around the X axis. \param m [out] output rotation matrix \param angle [in] desired angle */ PX_INLINE void PxSetRotX(PxMat33& m, PxReal angle) { m = PxMat33(PxIdentity); PxReal sin, cos; PxSinCos(angle, sin, cos); m[1][1] = m[2][2] = cos; m[1][2] = sin; m[2][1] = -sin; } /** \brief Sets a rotation matrix around the Y axis. \param m [out] output rotation matrix \param angle [in] desired angle */ PX_INLINE void PxSetRotY(PxMat33& m, PxReal angle) { m = PxMat33(PxIdentity); PxReal sin, cos; PxSinCos(angle, sin, cos); m[0][0] = m[2][2] = cos; m[0][2] = -sin; m[2][0] = sin; } /** \brief Sets a rotation matrix around the Z axis. \param m [out] output rotation matrix \param angle [in] desired angle */ PX_INLINE void PxSetRotZ(PxMat33& m, PxReal angle) { m = PxMat33(PxIdentity); PxReal sin, cos; PxSinCos(angle, sin, cos); m[0][0] = m[1][1] = cos; m[0][1] = sin; m[1][0] = -sin; } /** \brief Returns a rotation quaternion around the X axis. \param angle [in] desired angle \return Quaternion that rotates around the desired axis */ PX_INLINE PxQuat PxGetRotXQuat(float angle) { PxMat33 m; PxSetRotX(m, angle); return PxQuat(m); } /** \brief Returns a rotation quaternion around the Y axis. \param angle [in] desired angle \return Quaternion that rotates around the desired axis */ PX_INLINE PxQuat PxGetRotYQuat(float angle) { PxMat33 m; PxSetRotY(m, angle); return PxQuat(m); } /** \brief Returns a rotation quaternion around the Z axis. \param angle [in] desired angle \return Quaternion that rotates around the desired axis */ PX_INLINE PxQuat PxGetRotZQuat(float angle) { PxMat33 m; PxSetRotZ(m, angle); return PxQuat(m); } #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
13,950
C
26.194932
135
0.677993
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxVecTransform.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_VEC_TRANSFORM_H #define PX_VEC_TRANSFORM_H #include "foundation/PxVecMath.h" #include "foundation/PxTransform.h" #if !PX_DOXYGEN namespace physx { #endif namespace aos { class PxTransformV { public: QuatV q; Vec3V p; PX_FORCE_INLINE PxTransformV(const PxTransform& orientation) { // const PxQuat oq = orientation.q; // const PxF32 f[4] = {oq.x, oq.y, oq.z, oq.w}; q = QuatVLoadXYZW(orientation.q.x, orientation.q.y, orientation.q.z, orientation.q.w); // q = QuatV_From_F32Array(&oq.x); p = V3LoadU(orientation.p); } PX_FORCE_INLINE PxTransformV(const Vec3VArg p0 = V3Zero(), const QuatVArg q0 = QuatIdentity()) : q(q0), p(p0) { PX_ASSERT(isSaneQuatV(q0)); } PX_FORCE_INLINE PxTransformV operator*(const PxTransformV& x) const { PX_ASSERT(x.isSane()); return transform(x); } PX_FORCE_INLINE PxTransformV getInverse() const { PX_ASSERT(isFinite()); // return PxTransform(q.rotateInv(-p),q.getConjugate()); return PxTransformV(QuatRotateInv(q, V3Neg(p)), QuatConjugate(q)); } PX_FORCE_INLINE void normalize() { p = V3Zero(); q = QuatIdentity(); } PX_FORCE_INLINE void invalidate() { p = V3Splat(FMax()); q = QuatIdentity(); } PX_FORCE_INLINE Vec3V transform(const Vec3VArg input) const { PX_ASSERT(isFinite()); // return q.rotate(input) + p; return QuatTransform(q, p, input); } PX_FORCE_INLINE Vec3V transformInv(const Vec3VArg input) const { PX_ASSERT(isFinite()); // return q.rotateInv(input-p); return QuatRotateInv(q, V3Sub(input, p)); } PX_FORCE_INLINE Vec3V rotate(const Vec3VArg input) const { PX_ASSERT(isFinite()); // return q.rotate(input); return QuatRotate(q, input); } PX_FORCE_INLINE Vec3V rotateInv(const Vec3VArg input) const { PX_ASSERT(isFinite()); // return q.rotateInv(input); return QuatRotateInv(q, input); } //! Transform transform to parent (returns compound transform: first src, then *this) PX_FORCE_INLINE PxTransformV transform(const PxTransformV& src) const { PX_ASSERT(src.isSane()); PX_ASSERT(isSane()); // src = [srct, srcr] -> [r*srct + t, r*srcr] // return PxTransform(q.rotate(src.p) + p, q*src.q); return PxTransformV(V3Add(QuatRotate(q, src.p), p), QuatMul(q, src.q)); } #if PX_LINUX && PX_CLANG #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wbitwise-instead-of-logical" // bitwise intentionally chosen for performance #endif /** \brief returns true if finite and q is a unit quaternion */ PX_FORCE_INLINE bool isValid() const { // return p.isFinite() && q.isFinite() && q.isValid(); return isFiniteVec3V(p) & isFiniteQuatV(q) & isValidQuatV(q); } /** \brief returns true if finite and quat magnitude is reasonably close to unit to allow for some accumulation of error vs isValid */ PX_FORCE_INLINE bool isSane() const { // return isFinite() && q.isSane(); return isFinite() & isSaneQuatV(q); } /** \brief returns true if all elems are finite (not NAN or INF, etc.) */ PX_FORCE_INLINE bool isFinite() const { // return p.isFinite() && q.isFinite(); return isFiniteVec3V(p) & isFiniteQuatV(q); } #if PX_LINUX && PX_CLANG #pragma clang diagnostic pop #endif //! Transform transform from parent (returns compound transform: first src, then this->inverse) PX_FORCE_INLINE PxTransformV transformInv(const PxTransformV& src) const { PX_ASSERT(src.isSane()); PX_ASSERT(isFinite()); // src = [srct, srcr] -> [r^-1*(srct-t), r^-1*srcr] /*PxQuat qinv = q.getConjugate(); return PxTransform(qinv.rotate(src.p - p), qinv*src.q);*/ const QuatV qinv = QuatConjugate(q); const Vec3V v = QuatRotate(qinv, V3Sub(src.p, p)); const QuatV rot = QuatMul(qinv, src.q); return PxTransformV(v, rot); } static PX_FORCE_INLINE PxTransformV createIdentity() { return PxTransformV(V3Zero()); } }; PX_FORCE_INLINE PxTransformV loadTransformA(const PxTransform& transform) { const QuatV q0 = QuatVLoadA(&transform.q.x); const Vec3V p0 = V3LoadA(&transform.p.x); return PxTransformV(p0, q0); } PX_FORCE_INLINE PxTransformV loadTransformU(const PxTransform& transform) { const QuatV q0 = QuatVLoadU(&transform.q.x); const Vec3V p0 = V3LoadU(&transform.p.x); return PxTransformV(p0, q0); } class PxMatTransformV { public: Mat33V rot; Vec3V p; PX_FORCE_INLINE PxMatTransformV() { p = V3Zero(); rot = M33Identity(); } PX_FORCE_INLINE PxMatTransformV(const Vec3VArg _p, const Mat33V& _rot) { p = _p; rot = _rot; } PX_FORCE_INLINE PxMatTransformV(const PxTransformV& other) { p = other.p; QuatGetMat33V(other.q, rot.col0, rot.col1, rot.col2); } PX_FORCE_INLINE PxMatTransformV(const Vec3VArg _p, const QuatV& quat) { p = _p; QuatGetMat33V(quat, rot.col0, rot.col1, rot.col2); } PX_FORCE_INLINE Vec3V getCol0() const { return rot.col0; } PX_FORCE_INLINE Vec3V getCol1() const { return rot.col1; } PX_FORCE_INLINE Vec3V getCol2() const { return rot.col2; } PX_FORCE_INLINE void setCol0(const Vec3VArg col0) { rot.col0 = col0; } PX_FORCE_INLINE void setCol1(const Vec3VArg col1) { rot.col1 = col1; } PX_FORCE_INLINE void setCol2(const Vec3VArg col2) { rot.col2 = col2; } PX_FORCE_INLINE Vec3V transform(const Vec3VArg input) const { return V3Add(p, M33MulV3(rot, input)); } PX_FORCE_INLINE Vec3V transformInv(const Vec3VArg input) const { return M33TrnspsMulV3(rot, V3Sub(input, p)); // QuatRotateInv(q, V3Sub(input, p)); } PX_FORCE_INLINE Vec3V rotate(const Vec3VArg input) const { return M33MulV3(rot, input); } PX_FORCE_INLINE Vec3V rotateInv(const Vec3VArg input) const { return M33TrnspsMulV3(rot, input); } PX_FORCE_INLINE PxMatTransformV transformInv(const PxMatTransformV& src) const { const Vec3V v = M33TrnspsMulV3(rot, V3Sub(src.p, p)); const Mat33V mat = M33MulM33(M33Trnsps(rot), src.rot); return PxMatTransformV(v, mat); } }; } #if !PX_DOXYGEN } // namespace physx #endif #endif
7,614
C
25.078767
117
0.708563
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxMath.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_MATH_H #define PX_MATH_H /** \addtogroup foundation @{ */ #include "foundation/PxPreprocessor.h" #if PX_VC #pragma warning(push) #pragma warning(disable : 4985) // 'symbol name': attributes not present on previous declaration #endif #include <math.h> #if PX_VC #pragma warning(pop) #endif #if (PX_LINUX_FAMILY && !PX_ARM_FAMILY) // Force linking against nothing newer than glibc v2.17 to remain compatible with platforms with older glibc versions __asm__(".symver expf,expf@GLIBC_2.2.5"); __asm__(".symver powf,powf@GLIBC_2.2.5"); #endif #include <float.h> #include "foundation/PxMathIntrinsics.h" #include "foundation/PxAssert.h" #if !PX_DOXYGEN namespace physx { #endif // constants static const float PxPi = float(3.141592653589793); static const float PxHalfPi = float(1.57079632679489661923); static const float PxTwoPi = float(6.28318530717958647692); static const float PxInvPi = float(0.31830988618379067154); static const float PxInvTwoPi = float(0.15915494309189533577); static const float PxPiDivTwo = float(1.57079632679489661923); static const float PxPiDivFour = float(0.78539816339744830962); static const float PxSqrt2 = float(1.4142135623730951); static const float PxInvSqrt2 = float(0.7071067811865476); /** \brief The return value is the greater of the two specified values. */ template <class T> PX_CUDA_CALLABLE PX_FORCE_INLINE T PxMax(T a, T b) { return a < b ? b : a; } //! overload for float to use fsel on xbox template <> PX_CUDA_CALLABLE PX_FORCE_INLINE float PxMax(float a, float b) { return intrinsics::selectMax(a, b); } /** \brief The return value is the lesser of the two specified values. */ template <class T> PX_CUDA_CALLABLE PX_FORCE_INLINE T PxMin(T a, T b) { return a < b ? a : b; } template <> //! overload for float to use fsel on xbox PX_CUDA_CALLABLE PX_FORCE_INLINE float PxMin(float a, float b) { return intrinsics::selectMin(a, b); } /* Many of these are just implemented as PX_CUDA_CALLABLE PX_FORCE_INLINE calls to the C lib right now, but later we could replace some of them with some approximations or more clever stuff. */ /** \brief abs returns the absolute value of its argument. */ PX_CUDA_CALLABLE PX_FORCE_INLINE float PxAbs(float a) { return intrinsics::abs(a); } PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxEquals(float a, float b, float eps) { return (PxAbs(a - b) < eps); } /** \brief abs returns the absolute value of its argument. */ PX_CUDA_CALLABLE PX_FORCE_INLINE double PxAbs(double a) { return ::fabs(a); } /** \brief abs returns the absolute value of its argument. */ PX_CUDA_CALLABLE PX_FORCE_INLINE int32_t PxAbs(int32_t a) { return ::abs(a); } /** \brief Clamps v to the range [hi,lo] */ template <class T> PX_CUDA_CALLABLE PX_FORCE_INLINE T PxClamp(T v, T lo, T hi) { PX_ASSERT(lo <= hi); return PxMin(hi, PxMax(lo, v)); } //! \brief Square root. PX_CUDA_CALLABLE PX_FORCE_INLINE float PxSqrt(float a) { return intrinsics::sqrt(a); } //! \brief Square root. PX_CUDA_CALLABLE PX_FORCE_INLINE double PxSqrt(double a) { return ::sqrt(a); } //! \brief reciprocal square root. PX_CUDA_CALLABLE PX_FORCE_INLINE float PxRecipSqrt(float a) { return intrinsics::recipSqrt(a); } //! \brief reciprocal square root. PX_CUDA_CALLABLE PX_FORCE_INLINE double PxRecipSqrt(double a) { return 1 / ::sqrt(a); } //! \brief square of the argument PX_CUDA_CALLABLE PX_FORCE_INLINE PxF32 PxSqr(const PxF32 a) { return a * a; } //! trigonometry -- all angles are in radians. //! \brief Sine of an angle ( <b>Unit:</b> Radians ) PX_CUDA_CALLABLE PX_FORCE_INLINE float PxSin(float a) { return intrinsics::sin(a); } //! \brief Sine of an angle ( <b>Unit:</b> Radians ) PX_CUDA_CALLABLE PX_FORCE_INLINE double PxSin(double a) { return ::sin(a); } //! \brief Cosine of an angle (<b>Unit:</b> Radians) PX_CUDA_CALLABLE PX_FORCE_INLINE float PxCos(float a) { return intrinsics::cos(a); } //! \brief Cosine of an angle (<b>Unit:</b> Radians) PX_CUDA_CALLABLE PX_FORCE_INLINE double PxCos(double a) { return ::cos(a); } //! \brief compute sine and cosine at the same time PX_CUDA_CALLABLE PX_FORCE_INLINE void PxSinCos(const PxF32 a, PxF32& sin, PxF32& cos) { #if defined(__CUDACC__) && __CUDA_ARCH__ >= 350 __sincosf(a, &sin, &cos); #else sin = PxSin(a); cos = PxCos(a); #endif } //! \brief compute sine and cosine at the same time PX_CUDA_CALLABLE PX_FORCE_INLINE void PxSinCos(const double a, double& sin, double& cos) { sin = PxSin(a); cos = PxCos(a); } /** \brief Tangent of an angle. <b>Unit:</b> Radians */ PX_CUDA_CALLABLE PX_FORCE_INLINE float PxTan(float a) { return ::tanf(a); } /** \brief Tangent of an angle. <b>Unit:</b> Radians */ PX_CUDA_CALLABLE PX_FORCE_INLINE double PxTan(double a) { return ::tan(a); } /** \brief Arcsine. Returns angle between -PI/2 and PI/2 in radians <b>Unit:</b> Radians */ PX_CUDA_CALLABLE PX_FORCE_INLINE float PxAsin(float f) { return ::asinf(PxClamp(f, -1.0f, 1.0f)); } /** \brief Arcsine. Returns angle between -PI/2 and PI/2 in radians <b>Unit:</b> Radians */ PX_CUDA_CALLABLE PX_FORCE_INLINE double PxAsin(double f) { return ::asin(PxClamp(f, -1.0, 1.0)); } /** \brief Arccosine. Returns angle between 0 and PI in radians <b>Unit:</b> Radians */ PX_CUDA_CALLABLE PX_FORCE_INLINE float PxAcos(float f) { return ::acosf(PxClamp(f, -1.0f, 1.0f)); } /** \brief Arccosine. Returns angle between 0 and PI in radians <b>Unit:</b> Radians */ PX_CUDA_CALLABLE PX_FORCE_INLINE double PxAcos(double f) { return ::acos(PxClamp(f, -1.0, 1.0)); } /** \brief ArcTangent. Returns angle between -PI/2 and PI/2 in radians <b>Unit:</b> Radians */ PX_CUDA_CALLABLE PX_FORCE_INLINE float PxAtan(float a) { return ::atanf(a); } /** \brief ArcTangent. Returns angle between -PI/2 and PI/2 in radians <b>Unit:</b> Radians */ PX_CUDA_CALLABLE PX_FORCE_INLINE double PxAtan(double a) { return ::atan(a); } /** \brief Arctangent of (x/y) with correct sign. Returns angle between -PI and PI in radians <b>Unit:</b> Radians */ PX_CUDA_CALLABLE PX_FORCE_INLINE float PxAtan2(float x, float y) { return ::atan2f(x, y); } /** \brief Arctangent of (x/y) with correct sign. Returns angle between -PI and PI in radians <b>Unit:</b> Radians */ PX_CUDA_CALLABLE PX_FORCE_INLINE double PxAtan2(double x, double y) { return ::atan2(x, y); } /** \brief Converts degrees to radians. */ PX_CUDA_CALLABLE PX_FORCE_INLINE PxF32 PxDegToRad(const PxF32 a) { return 0.01745329251994329547f * a; } //! \brief returns true if the passed number is a finite floating point number as opposed to INF, NAN, etc. PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxIsFinite(float f) { return intrinsics::isFinite(f); } //! \brief returns true if the passed number is a finite floating point number as opposed to INF, NAN, etc. PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxIsFinite(double f) { return intrinsics::isFinite(f); } PX_CUDA_CALLABLE PX_FORCE_INLINE float PxFloor(float a) { return ::floorf(a); } PX_CUDA_CALLABLE PX_FORCE_INLINE float PxExp(float a) { return ::expf(a); } PX_CUDA_CALLABLE PX_FORCE_INLINE float PxCeil(float a) { return ::ceilf(a); } PX_CUDA_CALLABLE PX_FORCE_INLINE float PxSign(float a) { return physx::intrinsics::sign(a); } PX_CUDA_CALLABLE PX_FORCE_INLINE float PxSign2(float a, float eps = FLT_EPSILON) { return (a < -eps) ? -1.0f : (a > eps) ? 1.0f : 0.0f; } PX_CUDA_CALLABLE PX_FORCE_INLINE float PxPow(float x, float y) { return ::powf(x, y); } PX_CUDA_CALLABLE PX_FORCE_INLINE float PxLog(float x) { return ::logf(x); } #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
9,233
C
22.984416
117
0.714611
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxVecMath.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_VEC_MATH_H #define PX_VEC_MATH_H #include "foundation/Px.h" #include "foundation/PxIntrinsics.h" #include "foundation/PxVec3.h" #include "foundation/PxVec4.h" #include "foundation/PxMat33.h" #include "foundation/PxUnionCast.h" // We can opt to use the scalar version of vectorised functions. // This can catch type safety issues and might even work out more optimal on pc. // It will also be useful for benchmarking and testing. // NEVER submit with vector intrinsics deactivated without good reason. // AM: deactivating SIMD for debug win64 just so autobuild will also exercise // non-SIMD path, until a dedicated non-SIMD platform sich as Arm comes online. // TODO: dima: reference all platforms with SIMD support here, // all unknown/experimental cases should better default to NO SIMD. // enable/disable SIMD #if !defined(PX_SIMD_DISABLED) #if PX_INTEL_FAMILY && (!defined(__EMSCRIPTEN__) || defined(__SSE2__)) #define COMPILE_VECTOR_INTRINSICS 1 #elif PX_SWITCH #define COMPILE_VECTOR_INTRINSICS 1 #else #define COMPILE_VECTOR_INTRINSICS 0 #endif #else #define COMPILE_VECTOR_INTRINSICS 0 #endif #if COMPILE_VECTOR_INTRINSICS && PX_INTEL_FAMILY && PX_UNIX_FAMILY // only SSE2 compatible platforms should reach this #if PX_EMSCRIPTEN #include <emmintrin.h> #endif #include <xmmintrin.h> #endif #if COMPILE_VECTOR_INTRINSICS #include "PxAoS.h" #else #include "PxVecMathAoSScalar.h" #endif #if !PX_DOXYGEN namespace physx { #endif namespace aos { // Basic AoS types are // FloatV - 16-byte aligned representation of float. // Vec3V - 16-byte aligned representation of PxVec3 stored as (x y z 0). // Vec4V - 16-byte aligned representation of vector of 4 floats stored as (x y z w). // BoolV - 16-byte aligned representation of vector of 4 bools stored as (x y z w). // VecU32V - 16-byte aligned representation of 4 unsigned ints stored as (x y z w). // VecI32V - 16-byte aligned representation of 4 signed ints stored as (x y z w). // Mat33V - 16-byte aligned representation of any 3x3 matrix. // Mat34V - 16-byte aligned representation of transformation matrix (rotation in col1,col2,col3 and translation in // col4). // Mat44V - 16-byte aligned representation of any 4x4 matrix. ////////////////////////////////////////// // Construct a simd type from a scalar type ////////////////////////////////////////// // FloatV //(f,f,f,f) PX_FORCE_INLINE FloatV FLoad(const PxF32 f); // Vec3V //(f,f,f,0) PX_FORCE_INLINE Vec3V V3Load(const PxF32 f); //(f.x,f.y,f.z,0) PX_FORCE_INLINE Vec3V V3LoadU(const PxVec3& f); //(f.x,f.y,f.z,0), f must be 16-byte aligned PX_FORCE_INLINE Vec3V V3LoadA(const PxVec3& f); //(f.x,f.y,f.z,w_undefined), f must be 16-byte aligned PX_FORCE_INLINE Vec3V V3LoadUnsafeA(const PxVec3& f); //(f.x,f.y,f.z,0) PX_FORCE_INLINE Vec3V V3LoadU(const PxF32* f); //(f.x,f.y,f.z,0), f must be 16-byte aligned PX_FORCE_INLINE Vec3V V3LoadA(const PxF32* f); // Vec4V //(f,f,f,f) PX_FORCE_INLINE Vec4V V4Load(const PxF32 f); //(f[0],f[1],f[2],f[3]) PX_FORCE_INLINE Vec4V V4LoadU(const PxF32* const f); //(f[0],f[1],f[2],f[3]), f must be 16-byte aligned PX_FORCE_INLINE Vec4V V4LoadA(const PxF32* const f); //(x,y,z,w) PX_FORCE_INLINE Vec4V V4LoadXYZW(const PxF32& x, const PxF32& y, const PxF32& z, const PxF32& w); // BoolV //(f,f,f,f) PX_FORCE_INLINE BoolV BLoad(const bool f); //(f[0],f[1],f[2],f[3]) PX_FORCE_INLINE BoolV BLoad(const bool* const f); // VecU32V //(f,f,f,f) PX_FORCE_INLINE VecU32V U4Load(const PxU32 f); //(f[0],f[1],f[2],f[3]) PX_FORCE_INLINE VecU32V U4LoadU(const PxU32* f); //(f[0],f[1],f[2],f[3]), f must be 16-byte aligned PX_FORCE_INLINE VecU32V U4LoadA(const PxU32* f); //((U32)x, (U32)y, (U32)z, (U32)w) PX_FORCE_INLINE VecU32V U4LoadXYZW(PxU32 x, PxU32 y, PxU32 z, PxU32 w); // VecI32V //(i,i,i,i) PX_FORCE_INLINE VecI32V I4Load(const PxI32 i); //(i,i,i,i) PX_FORCE_INLINE VecI32V I4LoadU(const PxI32* i); //(i,i,i,i) PX_FORCE_INLINE VecI32V I4LoadA(const PxI32* i); // QuatV //(x = v[0], y=v[1], z=v[2], w=v3[3]) and array don't need to aligned PX_FORCE_INLINE QuatV QuatVLoadU(const PxF32* v); //(x = v[0], y=v[1], z=v[2], w=v3[3]) and array need to aligned, fast load PX_FORCE_INLINE QuatV QuatVLoadA(const PxF32* v); //(x, y, z, w) PX_FORCE_INLINE QuatV QuatVLoadXYZW(const PxF32 x, const PxF32 y, const PxF32 z, const PxF32 w); // not added to public api Vec4V Vec4V_From_PxVec3_WUndefined(const PxVec3& v); /////////////////////////////////////////////////// // Construct a simd type from a different simd type /////////////////////////////////////////////////// // Vec3V //(v.x,v.y,v.z,0) PX_FORCE_INLINE Vec3V Vec3V_From_Vec4V(Vec4V v); //(v.x,v.y,v.z,undefined) - be very careful with w!=0 because many functions require w==0 for correct operation eg V3Dot, V3Length, V3Cross etc etc. PX_FORCE_INLINE Vec3V Vec3V_From_Vec4V_WUndefined(const Vec4V v); // Vec4V //(f.x,f.y,f.z,f.w) PX_FORCE_INLINE Vec4V Vec4V_From_Vec3V(Vec3V f); //((PxF32)f.x, (PxF32)f.y, (PxF32)f.z, (PxF32)f.w) PX_FORCE_INLINE Vec4V Vec4V_From_VecU32V(VecU32V a); //((PxF32)f.x, (PxF32)f.y, (PxF32)f.z, (PxF32)f.w) PX_FORCE_INLINE Vec4V Vec4V_From_VecI32V(VecI32V a); //(*(reinterpret_cast<PxF32*>(&f.x), (reinterpret_cast<PxF32*>(&f.y), (reinterpret_cast<PxF32*>(&f.z), //(reinterpret_cast<PxF32*>(&f.w)) PX_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecU32V(VecU32V a); //(*(reinterpret_cast<PxF32*>(&f.x), (reinterpret_cast<PxF32*>(&f.y), (reinterpret_cast<PxF32*>(&f.z), //(reinterpret_cast<PxF32*>(&f.w)) PX_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecI32V(VecI32V a); // VecU32V //(*(reinterpret_cast<PxU32*>(&f.x), (reinterpret_cast<PxU32*>(&f.y), (reinterpret_cast<PxU32*>(&f.z), //(reinterpret_cast<PxU32*>(&f.w)) PX_FORCE_INLINE VecU32V VecU32V_ReinterpretFrom_Vec4V(Vec4V a); //(b[0], b[1], b[2], b[3]) PX_FORCE_INLINE VecU32V VecU32V_From_BoolV(const BoolVArg b); // VecI32V //(*(reinterpret_cast<PxI32*>(&f.x), (reinterpret_cast<PxI32*>(&f.y), (reinterpret_cast<PxI32*>(&f.z), //(reinterpret_cast<PxI32*>(&f.w)) PX_FORCE_INLINE VecI32V VecI32V_ReinterpretFrom_Vec4V(Vec4V a); //((I32)a.x, (I32)a.y, (I32)a.z, (I32)a.w) PX_FORCE_INLINE VecI32V VecI32V_From_Vec4V(Vec4V a); //((I32)b.x, (I32)b.y, (I32)b.z, (I32)b.w) PX_FORCE_INLINE VecI32V VecI32V_From_BoolV(const BoolVArg b); /////////////////////////////////////////////////// // Convert from a simd type back to a scalar type /////////////////////////////////////////////////// // FloatV // a.x PX_FORCE_INLINE void FStore(const FloatV a, PxF32* PX_RESTRICT f); // Vec3V //(a.x,a.y,a.z) PX_FORCE_INLINE void V3StoreA(const Vec3V a, PxVec3& f); //(a.x,a.y,a.z) PX_FORCE_INLINE void V3StoreU(const Vec3V a, PxVec3& f); // Vec4V PX_FORCE_INLINE void V4StoreA(const Vec4V a, PxF32* f); PX_FORCE_INLINE void V4StoreU(const Vec4V a, PxF32* f); // BoolV PX_FORCE_INLINE void BStoreA(const BoolV b, PxU32* f); // VecU32V PX_FORCE_INLINE void U4StoreA(const VecU32V uv, PxU32* u); // VecI32V PX_FORCE_INLINE void I4StoreA(const VecI32V iv, PxI32* i); ////////////////////////////////////////////////////////////////// // Test that simd types have elements in the floating point range ////////////////////////////////////////////////////////////////// // check for each component is valid ie in floating point range PX_FORCE_INLINE bool isFiniteFloatV(const FloatV a); // check for each component is valid ie in floating point range PX_FORCE_INLINE bool isFiniteVec3V(const Vec3V a); // check for each component is valid ie in floating point range PX_FORCE_INLINE bool isFiniteVec4V(const Vec4V a); // Check that w-component is zero. PX_FORCE_INLINE bool isValidVec3V(const Vec3V a); ////////////////////////////////////////////////////////////////// // Tests that all elements of two 16-byte types are completely equivalent. // Use these tests for unit testing and asserts only. ////////////////////////////////////////////////////////////////// namespace vecMathTests { PX_FORCE_INLINE Vec3V getInvalidVec3V(); PX_FORCE_INLINE bool allElementsEqualFloatV(const FloatV a, const FloatV b); PX_FORCE_INLINE bool allElementsEqualVec3V(const Vec3V a, const Vec3V b); PX_FORCE_INLINE bool allElementsEqualVec4V(const Vec4V a, const Vec4V b); PX_FORCE_INLINE bool allElementsEqualBoolV(const BoolV a, const BoolV b); PX_FORCE_INLINE bool allElementsEqualVecU32V(const VecU32V a, const VecU32V b); PX_FORCE_INLINE bool allElementsEqualVecI32V(const VecI32V a, const VecI32V b); PX_FORCE_INLINE bool allElementsEqualMat33V(const Mat33V& a, const Mat33V& b) { return (allElementsEqualVec3V(a.col0, b.col0) && allElementsEqualVec3V(a.col1, b.col1) && allElementsEqualVec3V(a.col2, b.col2)); } PX_FORCE_INLINE bool allElementsEqualMat34V(const Mat34V& a, const Mat34V& b) { return (allElementsEqualVec3V(a.col0, b.col0) && allElementsEqualVec3V(a.col1, b.col1) && allElementsEqualVec3V(a.col2, b.col2) && allElementsEqualVec3V(a.col3, b.col3)); } PX_FORCE_INLINE bool allElementsEqualMat44V(const Mat44V& a, const Mat44V& b) { return (allElementsEqualVec4V(a.col0, b.col0) && allElementsEqualVec4V(a.col1, b.col1) && allElementsEqualVec4V(a.col2, b.col2) && allElementsEqualVec4V(a.col3, b.col3)); } PX_FORCE_INLINE bool allElementsNearEqualFloatV(const FloatV a, const FloatV b); PX_FORCE_INLINE bool allElementsNearEqualVec3V(const Vec3V a, const Vec3V b); PX_FORCE_INLINE bool allElementsNearEqualVec4V(const Vec4V a, const Vec4V b); PX_FORCE_INLINE bool allElementsNearEqualMat33V(const Mat33V& a, const Mat33V& b) { return (allElementsNearEqualVec3V(a.col0, b.col0) && allElementsNearEqualVec3V(a.col1, b.col1) && allElementsNearEqualVec3V(a.col2, b.col2)); } PX_FORCE_INLINE bool allElementsNearEqualMat34V(const Mat34V& a, const Mat34V& b) { return (allElementsNearEqualVec3V(a.col0, b.col0) && allElementsNearEqualVec3V(a.col1, b.col1) && allElementsNearEqualVec3V(a.col2, b.col2) && allElementsNearEqualVec3V(a.col3, b.col3)); } PX_FORCE_INLINE bool allElementsNearEqualMat44V(const Mat44V& a, const Mat44V& b) { return (allElementsNearEqualVec4V(a.col0, b.col0) && allElementsNearEqualVec4V(a.col1, b.col1) && allElementsNearEqualVec4V(a.col2, b.col2) && allElementsNearEqualVec4V(a.col3, b.col3)); } } ////////////////////////////////////////////////////////////////// // Math operations on FloatV ////////////////////////////////////////////////////////////////// //(0,0,0,0) PX_FORCE_INLINE FloatV FZero(); //(1,1,1,1) PX_FORCE_INLINE FloatV FOne(); //(0.5,0.5,0.5,0.5) PX_FORCE_INLINE FloatV FHalf(); //(PX_EPS_REAL,PX_EPS_REAL,PX_EPS_REAL,PX_EPS_REAL) PX_FORCE_INLINE FloatV FEps(); //! @cond //(PX_MAX_REAL, PX_MAX_REAL, PX_MAX_REAL PX_MAX_REAL) PX_FORCE_INLINE FloatV FMax(); //! @endcond //(-PX_MAX_REAL, -PX_MAX_REAL, -PX_MAX_REAL -PX_MAX_REAL) PX_FORCE_INLINE FloatV FNegMax(); //(1e-6f, 1e-6f, 1e-6f, 1e-6f) PX_FORCE_INLINE FloatV FEps6(); //((PxF32*)&1, (PxF32*)&1, (PxF32*)&1, (PxF32*)&1) //-f (per component) PX_FORCE_INLINE FloatV FNeg(const FloatV f); // a+b (per component) PX_FORCE_INLINE FloatV FAdd(const FloatV a, const FloatV b); // a-b (per component) PX_FORCE_INLINE FloatV FSub(const FloatV a, const FloatV b); // a*b (per component) PX_FORCE_INLINE FloatV FMul(const FloatV a, const FloatV b); // a/b (per component) PX_FORCE_INLINE FloatV FDiv(const FloatV a, const FloatV b); // a/b (per component) PX_FORCE_INLINE FloatV FDivFast(const FloatV a, const FloatV b); // 1.0f/a PX_FORCE_INLINE FloatV FRecip(const FloatV a); // 1.0f/a PX_FORCE_INLINE FloatV FRecipFast(const FloatV a); // 1.0f/sqrt(a) PX_FORCE_INLINE FloatV FRsqrt(const FloatV a); // 1.0f/sqrt(a) PX_FORCE_INLINE FloatV FRsqrtFast(const FloatV a); // sqrt(a) PX_FORCE_INLINE FloatV FSqrt(const FloatV a); // a*b+c PX_FORCE_INLINE FloatV FScaleAdd(const FloatV a, const FloatV b, const FloatV c); // c-a*b PX_FORCE_INLINE FloatV FNegScaleSub(const FloatV a, const FloatV b, const FloatV c); // fabs(a) PX_FORCE_INLINE FloatV FAbs(const FloatV a); // c ? a : b (per component) PX_FORCE_INLINE FloatV FSel(const BoolV c, const FloatV a, const FloatV b); // a>b (per component) PX_FORCE_INLINE BoolV FIsGrtr(const FloatV a, const FloatV b); // a>=b (per component) PX_FORCE_INLINE BoolV FIsGrtrOrEq(const FloatV a, const FloatV b); // a==b (per component) PX_FORCE_INLINE BoolV FIsEq(const FloatV a, const FloatV b); // Max(a,b) (per component) PX_FORCE_INLINE FloatV FMax(const FloatV a, const FloatV b); // Min(a,b) (per component) PX_FORCE_INLINE FloatV FMin(const FloatV a, const FloatV b); // Clamp(a,b) (per component) PX_FORCE_INLINE FloatV FClamp(const FloatV a, const FloatV minV, const FloatV maxV); // a.x>b.x PX_FORCE_INLINE PxU32 FAllGrtr(const FloatV a, const FloatV b); // a.x>=b.x PX_FORCE_INLINE PxU32 FAllGrtrOrEq(const FloatV a, const FloatV b); // a.x==b.x PX_FORCE_INLINE PxU32 FAllEq(const FloatV a, const FloatV b); // a<min || a>max PX_FORCE_INLINE PxU32 FOutOfBounds(const FloatV a, const FloatV min, const FloatV max); // a>=min && a<=max PX_FORCE_INLINE PxU32 FInBounds(const FloatV a, const FloatV min, const FloatV max); // a<-bounds || a>bounds PX_FORCE_INLINE PxU32 FOutOfBounds(const FloatV a, const FloatV bounds); // a>=-bounds && a<=bounds PX_FORCE_INLINE PxU32 FInBounds(const FloatV a, const FloatV bounds); // round float a to the near int PX_FORCE_INLINE FloatV FRound(const FloatV a); // calculate the sin of float a PX_FORCE_INLINE FloatV FSin(const FloatV a); // calculate the cos of float b PX_FORCE_INLINE FloatV FCos(const FloatV a); ////////////////////////////////////////////////////////////////// // Math operations on Vec3V ////////////////////////////////////////////////////////////////// //(f,f,f,f) PX_FORCE_INLINE Vec3V V3Splat(const FloatV f); //(x,y,z) PX_FORCE_INLINE Vec3V V3Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z); //(1,0,0,0) PX_FORCE_INLINE Vec3V V3UnitX(); //(0,1,0,0) PX_FORCE_INLINE Vec3V V3UnitY(); //(0,0,1,0) PX_FORCE_INLINE Vec3V V3UnitZ(); //(f.x,f.x,f.x,f.x) PX_FORCE_INLINE FloatV V3GetX(const Vec3V f); //(f.y,f.y,f.y,f.y) PX_FORCE_INLINE FloatV V3GetY(const Vec3V f); //(f.z,f.z,f.z,f.z) PX_FORCE_INLINE FloatV V3GetZ(const Vec3V f); //(f,v.y,v.z,v.w) PX_FORCE_INLINE Vec3V V3SetX(const Vec3V v, const FloatV f); //(v.x,f,v.z,v.w) PX_FORCE_INLINE Vec3V V3SetY(const Vec3V v, const FloatV f); //(v.x,v.y,f,v.w) PX_FORCE_INLINE Vec3V V3SetZ(const Vec3V v, const FloatV f); // v.x=f PX_FORCE_INLINE void V3WriteX(Vec3V& v, const PxF32 f); // v.y=f PX_FORCE_INLINE void V3WriteY(Vec3V& v, const PxF32 f); // v.z=f PX_FORCE_INLINE void V3WriteZ(Vec3V& v, const PxF32 f); // v.x=f.x, v.y=f.y, v.z=f.z PX_FORCE_INLINE void V3WriteXYZ(Vec3V& v, const PxVec3& f); // return v.x PX_FORCE_INLINE PxF32 V3ReadX(const Vec3V& v); // return v.y PX_FORCE_INLINE PxF32 V3ReadY(const Vec3V& v); // return v.y PX_FORCE_INLINE PxF32 V3ReadZ(const Vec3V& v); // return (v.x,v.y,v.z) PX_FORCE_INLINE const PxVec3& V3ReadXYZ(const Vec3V& v); //(a.x, b.x, c.x) PX_FORCE_INLINE Vec3V V3ColX(const Vec3V a, const Vec3V b, const Vec3V c); //(a.y, b.y, c.y) PX_FORCE_INLINE Vec3V V3ColY(const Vec3V a, const Vec3V b, const Vec3V c); //(a.z, b.z, c.z) PX_FORCE_INLINE Vec3V V3ColZ(const Vec3V a, const Vec3V b, const Vec3V c); //(0,0,0,0) PX_FORCE_INLINE Vec3V V3Zero(); //(1,1,1,1) PX_FORCE_INLINE Vec3V V3One(); //(PX_EPS_REAL,PX_EPS_REAL,PX_EPS_REAL,PX_EPS_REAL) PX_FORCE_INLINE Vec3V V3Eps(); //-c (per component) PX_FORCE_INLINE Vec3V V3Neg(const Vec3V c); // a+b (per component) PX_FORCE_INLINE Vec3V V3Add(const Vec3V a, const Vec3V b); // a-b (per component) PX_FORCE_INLINE Vec3V V3Sub(const Vec3V a, const Vec3V b); // a*b (per component) PX_FORCE_INLINE Vec3V V3Scale(const Vec3V a, const FloatV b); // a*b (per component) PX_FORCE_INLINE Vec3V V3Mul(const Vec3V a, const Vec3V b); // a/b (per component) PX_FORCE_INLINE Vec3V V3ScaleInv(const Vec3V a, const FloatV b); // a/b (per component) PX_FORCE_INLINE Vec3V V3Div(const Vec3V a, const Vec3V b); // a/b (per component) PX_FORCE_INLINE Vec3V V3ScaleInvFast(const Vec3V a, const FloatV b); // a/b (per component) PX_FORCE_INLINE Vec3V V3DivFast(const Vec3V a, const Vec3V b); // 1.0f/a PX_FORCE_INLINE Vec3V V3Recip(const Vec3V a); // 1.0f/a PX_FORCE_INLINE Vec3V V3RecipFast(const Vec3V a); // 1.0f/sqrt(a) PX_FORCE_INLINE Vec3V V3Rsqrt(const Vec3V a); // 1.0f/sqrt(a) PX_FORCE_INLINE Vec3V V3RsqrtFast(const Vec3V a); // a*b+c PX_FORCE_INLINE Vec3V V3ScaleAdd(const Vec3V a, const FloatV b, const Vec3V c); // c-a*b PX_FORCE_INLINE Vec3V V3NegScaleSub(const Vec3V a, const FloatV b, const Vec3V c); // a*b+c PX_FORCE_INLINE Vec3V V3MulAdd(const Vec3V a, const Vec3V b, const Vec3V c); // c-a*b PX_FORCE_INLINE Vec3V V3NegMulSub(const Vec3V a, const Vec3V b, const Vec3V c); // fabs(a) PX_FORCE_INLINE Vec3V V3Abs(const Vec3V a); // a.b // Note: a.w and b.w must have value zero PX_FORCE_INLINE FloatV V3Dot(const Vec3V a, const Vec3V b); // aXb // Note: a.w and b.w must have value zero PX_FORCE_INLINE Vec3V V3Cross(const Vec3V a, const Vec3V b); // |a.a|^1/2 // Note: a.w must have value zero PX_FORCE_INLINE FloatV V3Length(const Vec3V a); // a.a // Note: a.w must have value zero PX_FORCE_INLINE FloatV V3LengthSq(const Vec3V a); // a*|a.a|^-1/2 // Note: a.w must have value zero PX_FORCE_INLINE Vec3V V3Normalize(const Vec3V a); // a.a>0 ? a*|a.a|^-1/2 : (0,0,0,0) // Note: a.w must have value zero PX_FORCE_INLINE FloatV V3Length(const Vec3V a); // a.a>0 ? a*|a.a|^-1/2 : unsafeReturnValue // Note: a.w must have value zero PX_FORCE_INLINE Vec3V V3NormalizeSafe(const Vec3V a, const Vec3V unsafeReturnValue); // a.x + a.y + a.z // Note: a.w must have value zero PX_FORCE_INLINE FloatV V3SumElems(const Vec3V a); // c ? a : b (per component) PX_FORCE_INLINE Vec3V V3Sel(const BoolV c, const Vec3V a, const Vec3V b); // a>b (per component) PX_FORCE_INLINE BoolV V3IsGrtr(const Vec3V a, const Vec3V b); // a>=b (per component) PX_FORCE_INLINE BoolV V3IsGrtrOrEq(const Vec3V a, const Vec3V b); // a==b (per component) PX_FORCE_INLINE BoolV V3IsEq(const Vec3V a, const Vec3V b); // Max(a,b) (per component) PX_FORCE_INLINE Vec3V V3Max(const Vec3V a, const Vec3V b); // Min(a,b) (per component) PX_FORCE_INLINE Vec3V V3Min(const Vec3V a, const Vec3V b); // Extract the maximum value from a // Note: a.w must have value zero PX_FORCE_INLINE FloatV V3ExtractMax(const Vec3V a); // Extract the minimum value from a // Note: a.w must have value zero PX_FORCE_INLINE FloatV V3ExtractMin(const Vec3V a); // Clamp(a,b) (per component) PX_FORCE_INLINE Vec3V V3Clamp(const Vec3V a, const Vec3V minV, const Vec3V maxV); // Extract the sign for each component PX_FORCE_INLINE Vec3V V3Sign(const Vec3V a); // Test all components. // (a.x>b.x && a.y>b.y && a.z>b.z) // Note: a.w and b.w must have value zero PX_FORCE_INLINE PxU32 V3AllGrtr(const Vec3V a, const Vec3V b); // (a.x>=b.x && a.y>=b.y && a.z>=b.z) // Note: a.w and b.w must have value zero PX_FORCE_INLINE PxU32 V3AllGrtrOrEq(const Vec3V a, const Vec3V b); // (a.x==b.x && a.y==b.y && a.z==b.z) // Note: a.w and b.w must have value zero PX_FORCE_INLINE PxU32 V3AllEq(const Vec3V a, const Vec3V b); // a.x<min.x || a.y<min.y || a.z<min.z || a.x>max.x || a.y>max.y || a.z>max.z // Note: a.w and min.w and max.w must have value zero PX_FORCE_INLINE PxU32 V3OutOfBounds(const Vec3V a, const Vec3V min, const Vec3V max); // a.x>=min.x && a.y>=min.y && a.z>=min.z && a.x<=max.x && a.y<=max.y && a.z<=max.z // Note: a.w and min.w and max.w must have value zero PX_FORCE_INLINE PxU32 V3InBounds(const Vec3V a, const Vec3V min, const Vec3V max); // a.x<-bounds.x || a.y<=-bounds.y || a.z<bounds.z || a.x>bounds.x || a.y>bounds.y || a.z>bounds.z // Note: a.w and bounds.w must have value zero PX_FORCE_INLINE PxU32 V3OutOfBounds(const Vec3V a, const Vec3V bounds); // a.x>=-bounds.x && a.y>=-bounds.y && a.z>=-bounds.z && a.x<=bounds.x && a.y<=bounds.y && a.z<=bounds.z // Note: a.w and bounds.w must have value zero PX_FORCE_INLINE PxU32 V3InBounds(const Vec3V a, const Vec3V bounds); //(floor(a.x + 0.5f), floor(a.y + 0.5f), floor(a.z + 0.5f)) PX_FORCE_INLINE Vec3V V3Round(const Vec3V a); //(sinf(a.x), sinf(a.y), sinf(a.z)) PX_FORCE_INLINE Vec3V V3Sin(const Vec3V a); //(cosf(a.x), cosf(a.y), cosf(a.z)) PX_FORCE_INLINE Vec3V V3Cos(const Vec3V a); //(a.y,a.z,a.z) PX_FORCE_INLINE Vec3V V3PermYZZ(const Vec3V a); //(a.x,a.y,a.x) PX_FORCE_INLINE Vec3V V3PermXYX(const Vec3V a); //(a.y,a.z,a.x) PX_FORCE_INLINE Vec3V V3PermYZX(const Vec3V a); //(a.z, a.x, a.y) PX_FORCE_INLINE Vec3V V3PermZXY(const Vec3V a); //(a.z,a.z,a.y) PX_FORCE_INLINE Vec3V V3PermZZY(const Vec3V a); //(a.y,a.x,a.x) PX_FORCE_INLINE Vec3V V3PermYXX(const Vec3V a); //(0, v1.z, v0.y) PX_FORCE_INLINE Vec3V V3Perm_Zero_1Z_0Y(const Vec3V v0, const Vec3V v1); //(v0.z, 0, v1.x) PX_FORCE_INLINE Vec3V V3Perm_0Z_Zero_1X(const Vec3V v0, const Vec3V v1); //(v1.y, v0.x, 0) PX_FORCE_INLINE Vec3V V3Perm_1Y_0X_Zero(const Vec3V v0, const Vec3V v1); // Transpose 3 Vec3Vs inplace. Sets the w component to zero // [ x0, y0, z0, w0] [ x1, y1, z1, w1] [ x2, y2, z2, w2] -> [x0 x1 x2 0] [y0 y1 y2 0] [z0 z1 z2 0] PX_FORCE_INLINE void V3Transpose(Vec3V& col0, Vec3V& col1, Vec3V& col2); ////////////////////////////////////////////////////////////////// // Math operations on Vec4V ////////////////////////////////////////////////////////////////// //(f,f,f,f) PX_FORCE_INLINE Vec4V V4Splat(const FloatV f); //(f[0],f[1],f[2],f[3]) PX_FORCE_INLINE Vec4V V4Merge(const FloatV* const f); //(x,y,z,w) PX_FORCE_INLINE Vec4V V4Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z, const FloatVArg w); //(x.w, y.w, z.w, w.w) PX_FORCE_INLINE Vec4V V4MergeW(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w); //(x.z, y.z, z.z, w.z) PX_FORCE_INLINE Vec4V V4MergeZ(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w); //(x.y, y.y, z.y, w.y) PX_FORCE_INLINE Vec4V V4MergeY(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w); //(x.x, y.x, z.x, w.x) PX_FORCE_INLINE Vec4V V4MergeX(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w); //(a.x, b.x, a.y, b.y) PX_FORCE_INLINE Vec4V V4UnpackXY(const Vec4VArg a, const Vec4VArg b); //(a.z, b.z, a.w, b.w) PX_FORCE_INLINE Vec4V V4UnpackZW(const Vec4VArg a, const Vec4VArg b); //(1,0,0,0) PX_FORCE_INLINE Vec4V V4UnitW(); //(0,1,0,0) PX_FORCE_INLINE Vec4V V4UnitY(); //(0,0,1,0) PX_FORCE_INLINE Vec4V V4UnitZ(); //(0,0,0,1) PX_FORCE_INLINE Vec4V V4UnitW(); //(f.x,f.x,f.x,f.x) PX_FORCE_INLINE FloatV V4GetX(const Vec4V f); //(f.y,f.y,f.y,f.y) PX_FORCE_INLINE FloatV V4GetY(const Vec4V f); //(f.z,f.z,f.z,f.z) PX_FORCE_INLINE FloatV V4GetZ(const Vec4V f); //(f.w,f.w,f.w,f.w) PX_FORCE_INLINE FloatV V4GetW(const Vec4V f); //(f,v.y,v.z,v.w) PX_FORCE_INLINE Vec4V V4SetX(const Vec4V v, const FloatV f); //(v.x,f,v.z,v.w) PX_FORCE_INLINE Vec4V V4SetY(const Vec4V v, const FloatV f); //(v.x,v.y,f,v.w) PX_FORCE_INLINE Vec4V V4SetZ(const Vec4V v, const FloatV f); //(v.x,v.y,v.z,f) PX_FORCE_INLINE Vec4V V4SetW(const Vec4V v, const FloatV f); //(v.x,v.y,v.z,0) PX_FORCE_INLINE Vec4V V4ClearW(const Vec4V v); //(a[elementIndex], a[elementIndex], a[elementIndex], a[elementIndex]) template <int elementIndex> PX_FORCE_INLINE Vec4V V4SplatElement(Vec4V a); // v.x=f PX_FORCE_INLINE void V4WriteX(Vec4V& v, const PxF32 f); // v.y=f PX_FORCE_INLINE void V4WriteY(Vec4V& v, const PxF32 f); // v.z=f PX_FORCE_INLINE void V4WriteZ(Vec4V& v, const PxF32 f); // v.w=f PX_FORCE_INLINE void V4WriteW(Vec4V& v, const PxF32 f); // v.x=f.x, v.y=f.y, v.z=f.z PX_FORCE_INLINE void V4WriteXYZ(Vec4V& v, const PxVec3& f); // return v.x PX_FORCE_INLINE PxF32 V4ReadX(const Vec4V& v); // return v.y PX_FORCE_INLINE PxF32 V4ReadY(const Vec4V& v); // return v.z PX_FORCE_INLINE PxF32 V4ReadZ(const Vec4V& v); // return v.w PX_FORCE_INLINE PxF32 V4ReadW(const Vec4V& v); // return (v.x,v.y,v.z) PX_FORCE_INLINE const PxVec3& V4ReadXYZ(const Vec4V& v); //(0,0,0,0) PX_FORCE_INLINE Vec4V V4Zero(); //(1,1,1,1) PX_FORCE_INLINE Vec4V V4One(); //(PX_EPS_REAL,PX_EPS_REAL,PX_EPS_REAL,PX_EPS_REAL) PX_FORCE_INLINE Vec4V V4Eps(); //-c (per component) PX_FORCE_INLINE Vec4V V4Neg(const Vec4V c); // a+b (per component) PX_FORCE_INLINE Vec4V V4Add(const Vec4V a, const Vec4V b); // a-b (per component) PX_FORCE_INLINE Vec4V V4Sub(const Vec4V a, const Vec4V b); // a*b (per component) PX_FORCE_INLINE Vec4V V4Scale(const Vec4V a, const FloatV b); // a*b (per component) PX_FORCE_INLINE Vec4V V4Mul(const Vec4V a, const Vec4V b); // a/b (per component) PX_FORCE_INLINE Vec4V V4ScaleInv(const Vec4V a, const FloatV b); // a/b (per component) PX_FORCE_INLINE Vec4V V4Div(const Vec4V a, const Vec4V b); // a/b (per component) PX_FORCE_INLINE Vec4V V4ScaleInvFast(const Vec4V a, const FloatV b); // a/b (per component) PX_FORCE_INLINE Vec4V V4DivFast(const Vec4V a, const Vec4V b); // 1.0f/a PX_FORCE_INLINE Vec4V V4Recip(const Vec4V a); // 1.0f/a PX_FORCE_INLINE Vec4V V4RecipFast(const Vec4V a); // 1.0f/sqrt(a) PX_FORCE_INLINE Vec4V V4Rsqrt(const Vec4V a); // 1.0f/sqrt(a) PX_FORCE_INLINE Vec4V V4RsqrtFast(const Vec4V a); // a*b+c PX_FORCE_INLINE Vec4V V4ScaleAdd(const Vec4V a, const FloatV b, const Vec4V c); // c-a*b PX_FORCE_INLINE Vec4V V4NegScaleSub(const Vec4V a, const FloatV b, const Vec4V c); // a*b+c PX_FORCE_INLINE Vec4V V4MulAdd(const Vec4V a, const Vec4V b, const Vec4V c); // c-a*b PX_FORCE_INLINE Vec4V V4NegMulSub(const Vec4V a, const Vec4V b, const Vec4V c); // fabs(a) PX_FORCE_INLINE Vec4V V4Abs(const Vec4V a); // bitwise a & ~b PX_FORCE_INLINE Vec4V V4Andc(const Vec4V a, const VecU32V b); // a.b (W is taken into account) PX_FORCE_INLINE FloatV V4Dot(const Vec4V a, const Vec4V b); // a.b (same computation as V3Dot. W is ignored in input) PX_FORCE_INLINE FloatV V4Dot3(const Vec4V a, const Vec4V b); // aXb (same computation as V3Cross. W is ignored in input and undefined in output) PX_FORCE_INLINE Vec4V V4Cross(const Vec4V a, const Vec4V b); //|a.a|^1/2 PX_FORCE_INLINE FloatV V4Length(const Vec4V a); // a.a PX_FORCE_INLINE FloatV V4LengthSq(const Vec4V a); // a*|a.a|^-1/2 PX_FORCE_INLINE Vec4V V4Normalize(const Vec4V a); // a.a>0 ? a*|a.a|^-1/2 : unsafeReturnValue PX_FORCE_INLINE Vec4V V4NormalizeSafe(const Vec4V a, const Vec4V unsafeReturnValue); // a*|a.a|^-1/2 PX_FORCE_INLINE Vec4V V4NormalizeFast(const Vec4V a); // c ? a : b (per component) PX_FORCE_INLINE Vec4V V4Sel(const BoolV c, const Vec4V a, const Vec4V b); // a>b (per component) PX_FORCE_INLINE BoolV V4IsGrtr(const Vec4V a, const Vec4V b); // a>=b (per component) PX_FORCE_INLINE BoolV V4IsGrtrOrEq(const Vec4V a, const Vec4V b); // a==b (per component) PX_FORCE_INLINE BoolV V4IsEq(const Vec4V a, const Vec4V b); // Max(a,b) (per component) PX_FORCE_INLINE Vec4V V4Max(const Vec4V a, const Vec4V b); // Min(a,b) (per component) PX_FORCE_INLINE Vec4V V4Min(const Vec4V a, const Vec4V b); // Get the maximum component from a PX_FORCE_INLINE FloatV V4ExtractMax(const Vec4V a); // Get the minimum component from a PX_FORCE_INLINE FloatV V4ExtractMin(const Vec4V a); // Clamp(a,b) (per component) PX_FORCE_INLINE Vec4V V4Clamp(const Vec4V a, const Vec4V minV, const Vec4V maxV); // return 1 if all components of a are greater than all components of b. PX_FORCE_INLINE PxU32 V4AllGrtr(const Vec4V a, const Vec4V b); // return 1 if all components of a are greater than or equal to all components of b PX_FORCE_INLINE PxU32 V4AllGrtrOrEq(const Vec4V a, const Vec4V b); // return 1 if XYZ components of a are greater than or equal to XYZ components of b. W is ignored. PX_FORCE_INLINE PxU32 V4AllGrtrOrEq3(const Vec4V a, const Vec4V b); // return 1 if all components of a are equal to all components of b PX_FORCE_INLINE PxU32 V4AllEq(const Vec4V a, const Vec4V b); // return 1 if any XYZ component of a is greater than the corresponding component of b. W is ignored. PX_FORCE_INLINE PxU32 V4AnyGrtr3(const Vec4V a, const Vec4V b); // round(a)(per component) PX_FORCE_INLINE Vec4V V4Round(const Vec4V a); // sin(a) (per component) PX_FORCE_INLINE Vec4V V4Sin(const Vec4V a); // cos(a) (per component) PX_FORCE_INLINE Vec4V V4Cos(const Vec4V a); // Permute v into a new vec4v with YXWZ format PX_FORCE_INLINE Vec4V V4PermYXWZ(const Vec4V v); // Permute v into a new vec4v with XZXZ format PX_FORCE_INLINE Vec4V V4PermXZXZ(const Vec4V v); // Permute v into a new vec4v with YWYW format PX_FORCE_INLINE Vec4V V4PermYWYW(const Vec4V v); // Permute v into a new vec4v with YZXW format PX_FORCE_INLINE Vec4V V4PermYZXW(const Vec4V v); // Permute v into a new vec4v with ZWXY format - equivalent to a swap of the two 64bit parts of the vector PX_FORCE_INLINE Vec4V V4PermZWXY(const Vec4V a); // Permute v into a new vec4v with format {a[x], a[y], a[z], a[w]} // V4Perm<1,3,1,3> is equal to V4PermYWYW // V4Perm<0,2,0,2> is equal to V4PermXZXZ // V3Perm<1,0,3,2> is equal to V4PermYXWZ template <PxU8 x, PxU8 y, PxU8 z, PxU8 w> PX_FORCE_INLINE Vec4V V4Perm(const Vec4V a); // Transpose 4 Vec4Vs inplace. // [ x0, y0, z0, w0] [ x1, y1, z1, w1] [ x2, y2, z2, w2] [ x3, y3, z3, w3] -> // [ x0, x1, x2, x3] [ y0, y1, y2, y3] [ z0, z1, z2, z3] [ w0, w1, w2, w3] PX_FORCE_INLINE void V3Transpose(Vec3V& col0, Vec3V& col1, Vec3V& col2); // q = cos(a/2) + u*sin(a/2) PX_FORCE_INLINE QuatV QuatV_From_RotationAxisAngle(const Vec3V u, const FloatV a); // convert q to a unit quaternion PX_FORCE_INLINE QuatV QuatNormalize(const QuatV q); //|q.q|^1/2 PX_FORCE_INLINE FloatV QuatLength(const QuatV q); // q.q PX_FORCE_INLINE FloatV QuatLengthSq(const QuatV q); // a.b PX_FORCE_INLINE FloatV QuatDot(const QuatV a, const QuatV b); //(-q.x, -q.y, -q.z, q.w) PX_FORCE_INLINE QuatV QuatConjugate(const QuatV q); //(q.x, q.y, q.z) PX_FORCE_INLINE Vec3V QuatGetImaginaryPart(const QuatV q); // convert quaternion to matrix 33 PX_FORCE_INLINE Mat33V QuatGetMat33V(const QuatVArg q); // convert quaternion to matrix 33 PX_FORCE_INLINE void QuatGetMat33V(const QuatVArg q, Vec3V& column0, Vec3V& column1, Vec3V& column2); // convert matrix 33 to quaternion PX_FORCE_INLINE QuatV Mat33GetQuatV(const Mat33V& a); // brief computes rotation of x-axis PX_FORCE_INLINE Vec3V QuatGetBasisVector0(const QuatV q); // brief computes rotation of y-axis PX_FORCE_INLINE Vec3V QuatGetBasisVector1(const QuatV q); // brief computes rotation of z-axis PX_FORCE_INLINE Vec3V QuatGetBasisVector2(const QuatV q); // calculate the rotation vector from q and v PX_FORCE_INLINE Vec3V QuatRotate(const QuatV q, const Vec3V v); // calculate the rotation vector from the conjugate quaternion and v PX_FORCE_INLINE Vec3V QuatRotateInv(const QuatV q, const Vec3V v); // quaternion multiplication PX_FORCE_INLINE QuatV QuatMul(const QuatV a, const QuatV b); // quaternion add PX_FORCE_INLINE QuatV QuatAdd(const QuatV a, const QuatV b); // (-q.x, -q.y, -q.z, -q.w) PX_FORCE_INLINE QuatV QuatNeg(const QuatV q); // (a.x - b.x, a.y-b.y, a.z-b.z, a.w-b.w ) PX_FORCE_INLINE QuatV QuatSub(const QuatV a, const QuatV b); // (a.x*b, a.y*b, a.z*b, a.w*b) PX_FORCE_INLINE QuatV QuatScale(const QuatV a, const FloatV b); // (x = v[0], y = v[1], z = v[2], w =v[3]) PX_FORCE_INLINE QuatV QuatMerge(const FloatV* const v); // (x = v[0], y = v[1], z = v[2], w =v[3]) PX_FORCE_INLINE QuatV QuatMerge(const FloatVArg x, const FloatVArg y, const FloatVArg z, const FloatVArg w); // (x = 0.f, y = 0.f, z = 0.f, w = 1.f) PX_FORCE_INLINE QuatV QuatIdentity(); // check for each component is valid PX_FORCE_INLINE bool isFiniteQuatV(const QuatV q); // check for each component is valid PX_FORCE_INLINE bool isValidQuatV(const QuatV q); // check for each component is valid PX_FORCE_INLINE bool isSaneQuatV(const QuatV q); // Math operations on 16-byte aligned booleans. // x=false y=false z=false w=false PX_FORCE_INLINE BoolV BFFFF(); // x=false y=false z=false w=true PX_FORCE_INLINE BoolV BFFFT(); // x=false y=false z=true w=false PX_FORCE_INLINE BoolV BFFTF(); // x=false y=false z=true w=true PX_FORCE_INLINE BoolV BFFTT(); // x=false y=true z=false w=false PX_FORCE_INLINE BoolV BFTFF(); // x=false y=true z=false w=true PX_FORCE_INLINE BoolV BFTFT(); // x=false y=true z=true w=false PX_FORCE_INLINE BoolV BFTTF(); // x=false y=true z=true w=true PX_FORCE_INLINE BoolV BFTTT(); // x=true y=false z=false w=false PX_FORCE_INLINE BoolV BTFFF(); // x=true y=false z=false w=true PX_FORCE_INLINE BoolV BTFFT(); // x=true y=false z=true w=false PX_FORCE_INLINE BoolV BTFTF(); // x=true y=false z=true w=true PX_FORCE_INLINE BoolV BTFTT(); // x=true y=true z=false w=false PX_FORCE_INLINE BoolV BTTFF(); // x=true y=true z=false w=true PX_FORCE_INLINE BoolV BTTFT(); // x=true y=true z=true w=false PX_FORCE_INLINE BoolV BTTTF(); // x=true y=true z=true w=true PX_FORCE_INLINE BoolV BTTTT(); // x=false y=false z=false w=true PX_FORCE_INLINE BoolV BWMask(); // x=true y=false z=false w=false PX_FORCE_INLINE BoolV BXMask(); // x=false y=true z=false w=false PX_FORCE_INLINE BoolV BYMask(); // x=false y=false z=true w=false PX_FORCE_INLINE BoolV BZMask(); // get x component PX_FORCE_INLINE BoolV BGetX(const BoolV f); // get y component PX_FORCE_INLINE BoolV BGetY(const BoolV f); // get z component PX_FORCE_INLINE BoolV BGetZ(const BoolV f); // get w component PX_FORCE_INLINE BoolV BGetW(const BoolV f); // Use elementIndex to splat xxxx or yyyy or zzzz or wwww template <int elementIndex> PX_FORCE_INLINE BoolV BSplatElement(Vec4V a); // component-wise && (AND) PX_FORCE_INLINE BoolV BAnd(const BoolV a, const BoolV b); // component-wise || (OR) PX_FORCE_INLINE BoolV BOr(const BoolV a, const BoolV b); // component-wise not PX_FORCE_INLINE BoolV BNot(const BoolV a); // if all four components are true, return true, otherwise return false PX_FORCE_INLINE BoolV BAllTrue4(const BoolV a); // if any four components is true, return true, otherwise return false PX_FORCE_INLINE BoolV BAnyTrue4(const BoolV a); // if all three(0, 1, 2) components are true, return true, otherwise return false PX_FORCE_INLINE BoolV BAllTrue3(const BoolV a); // if any three (0, 1, 2) components is true, return true, otherwise return false PX_FORCE_INLINE BoolV BAnyTrue3(const BoolV a); // Return 1 if all components equal, zero otherwise. PX_FORCE_INLINE PxU32 BAllEq(const BoolV a, const BoolV b); // Specialized/faster BAllEq function for b==TTTT PX_FORCE_INLINE PxU32 BAllEqTTTT(const BoolV a); // Specialized/faster BAllEq function for b==FFFF PX_FORCE_INLINE PxU32 BAllEqFFFF(const BoolV a); /// Get BoolV as bits set in an PxU32. A bit in the output is set if the element is 'true' in the input. /// There is a bit for each element in a, with element 0s value held in bit0, element 1 in bit 1s and so forth. /// If nothing is true in the input it will return 0, and if all are true if will return 0xf. /// NOTE! That performance of the function varies considerably by platform, thus it is recommended to use /// where your algorithm really needs a BoolV in an integer variable. PX_FORCE_INLINE PxU32 BGetBitMask(const BoolV a); // VecI32V stuff PX_FORCE_INLINE VecI32V VecI32V_Zero(); PX_FORCE_INLINE VecI32V VecI32V_One(); PX_FORCE_INLINE VecI32V VecI32V_Two(); PX_FORCE_INLINE VecI32V VecI32V_MinusOne(); // Compute a shift parameter for VecI32V_LeftShift and VecI32V_RightShift // Each element of shift must be identical ie the vector must have form {count, count, count, count} with count>=0 PX_FORCE_INLINE VecShiftV VecI32V_PrepareShift(const VecI32VArg shift); // Shift each element of a leftwards by the same amount // Compute shift with VecI32V_PrepareShift //{a.x<<shift[0], a.y<<shift[0], a.z<<shift[0], a.w<<shift[0]} PX_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const VecShiftVArg shift); // Shift each element of a rightwards by the same amount // Compute shift with VecI32V_PrepareShift //{a.x>>shift[0], a.y>>shift[0], a.z>>shift[0], a.w>>shift[0]} PX_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const VecShiftVArg shift); PX_FORCE_INLINE VecI32V VecI32V_Add(const VecI32VArg a, const VecI32VArg b); PX_FORCE_INLINE VecI32V VecI32V_Or(const VecI32VArg a, const VecI32VArg b); PX_FORCE_INLINE VecI32V VecI32V_GetX(const VecI32VArg a); PX_FORCE_INLINE VecI32V VecI32V_GetY(const VecI32VArg a); PX_FORCE_INLINE VecI32V VecI32V_GetZ(const VecI32VArg a); PX_FORCE_INLINE VecI32V VecI32V_GetW(const VecI32VArg a); PX_FORCE_INLINE VecI32V VecI32V_Sub(const VecI32VArg a, const VecI32VArg b); PX_FORCE_INLINE BoolV VecI32V_IsGrtr(const VecI32VArg a, const VecI32VArg b); PX_FORCE_INLINE BoolV VecI32V_IsEq(const VecI32VArg a, const VecI32VArg b); PX_FORCE_INLINE VecI32V V4I32Sel(const BoolV c, const VecI32V a, const VecI32V b); // VecU32V stuff PX_FORCE_INLINE VecU32V U4Zero(); PX_FORCE_INLINE VecU32V U4One(); PX_FORCE_INLINE VecU32V U4Two(); PX_FORCE_INLINE BoolV V4IsEqU32(const VecU32V a, const VecU32V b); PX_FORCE_INLINE VecU32V V4U32Sel(const BoolV c, const VecU32V a, const VecU32V b); PX_FORCE_INLINE VecU32V V4U32or(VecU32V a, VecU32V b); PX_FORCE_INLINE VecU32V V4U32xor(VecU32V a, VecU32V b); PX_FORCE_INLINE VecU32V V4U32and(VecU32V a, VecU32V b); PX_FORCE_INLINE VecU32V V4U32Andc(VecU32V a, VecU32V b); // VecU32 - why does this not return a bool? PX_FORCE_INLINE VecU32V V4IsGrtrV32u(const Vec4V a, const Vec4V b); // Math operations on 16-byte aligned Mat33s (represents any 3x3 matrix) PX_FORCE_INLINE Mat33V M33Load(const PxMat33& m) { return Mat33V(Vec3V_From_Vec4V(V4LoadU(&m.column0.x)), Vec3V_From_Vec4V(V4LoadU(&m.column1.x)), V3LoadU(m.column2)); } // a*b PX_FORCE_INLINE Vec3V M33MulV3(const Mat33V& a, const Vec3V b); // A*x + b PX_FORCE_INLINE Vec3V M33MulV3AddV3(const Mat33V& A, const Vec3V b, const Vec3V c); // transpose(a) * b PX_FORCE_INLINE Vec3V M33TrnspsMulV3(const Mat33V& a, const Vec3V b); // a*b PX_FORCE_INLINE Mat33V M33MulM33(const Mat33V& a, const Mat33V& b); // a+b PX_FORCE_INLINE Mat33V M33Add(const Mat33V& a, const Mat33V& b); // a+b PX_FORCE_INLINE Mat33V M33Sub(const Mat33V& a, const Mat33V& b); //-a PX_FORCE_INLINE Mat33V M33Neg(const Mat33V& a); // absolute value of the matrix PX_FORCE_INLINE Mat33V M33Abs(const Mat33V& a); // inverse mat PX_FORCE_INLINE Mat33V M33Inverse(const Mat33V& a); // transpose(a) PX_FORCE_INLINE Mat33V M33Trnsps(const Mat33V& a); // create an identity matrix PX_FORCE_INLINE Mat33V M33Identity(); // create a vec3 to store the diagonal element of the M33 PX_FORCE_INLINE Mat33V M33Diagonal(const Vec3VArg); // Not implemented // return 1 if all components of a are equal to all components of b // PX_FORCE_INLINE PxU32 V4U32AllEq(const VecU32V a, const VecU32V b); // v.w=f // PX_FORCE_INLINE void V3WriteW(Vec3V& v, const PxF32 f); // PX_FORCE_INLINE PxF32 V3ReadW(const Vec3V& v); // Not used // PX_FORCE_INLINE Vec4V V4LoadAligned(Vec4V* addr); // PX_FORCE_INLINE Vec4V V4LoadUnaligned(Vec4V* addr); // floor(a)(per component) // PX_FORCE_INLINE Vec4V V4Floor(Vec4V a); // ceil(a) (per component) // PX_FORCE_INLINE Vec4V V4Ceil(Vec4V a); // PX_FORCE_INLINE VecU32V V4ConvertToU32VSaturate(const Vec4V a, PxU32 power); // Math operations on 16-byte aligned Mat34s (represents transformation matrix - rotation and translation). // namespace _Mat34V //{ // //a*b // PX_FORCE_INLINE Vec3V multiplyV(const Mat34V& a, const Vec3V b); // //a_rotation * b // PX_FORCE_INLINE Vec3V multiply3X3V(const Mat34V& a, const Vec3V b); // //transpose(a_rotation)*b // PX_FORCE_INLINE Vec3V multiplyTranspose3X3V(const Mat34V& a, const Vec3V b); // //a*b // PX_FORCE_INLINE Mat34V multiplyV(const Mat34V& a, const Mat34V& b); // //a_rotation*b // PX_FORCE_INLINE Mat33V multiply3X3V(const Mat34V& a, const Mat33V& b); // //a_rotation*b_rotation // PX_FORCE_INLINE Mat33V multiply3X3V(const Mat34V& a, const Mat34V& b); // //a+b // PX_FORCE_INLINE Mat34V addV(const Mat34V& a, const Mat34V& b); // //a^-1 // PX_FORCE_INLINE Mat34V getInverseV(const Mat34V& a); // //transpose(a_rotation) // PX_FORCE_INLINE Mat33V getTranspose3X3(const Mat34V& a); //}; //namespace _Mat34V // a*b //#define M34MulV3(a,b) (M34MulV3(a,b)) ////a_rotation * b //#define M34Mul33V3(a,b) (M34Mul33V3(a,b)) ////transpose(a_rotation)*b //#define M34TrnspsMul33V3(a,b) (M34TrnspsMul33V3(a,b)) ////a*b //#define M34MulM34(a,b) (_Mat34V::multiplyV(a,b)) // a_rotation*b //#define M34MulM33(a,b) (M34MulM33(a,b)) // a_rotation*b_rotation //#define M34Mul33MM34(a,b) (M34MulM33(a,b)) // a+b //#define M34Add(a,b) (M34Add(a,b)) ////a^-1 //#define M34Inverse(a,b) (M34Inverse(a)) // transpose(a_rotation) //#define M34Trnsps33(a) (M33Trnsps3X3(a)) // Math operations on 16-byte aligned Mat44s (represents any 4x4 matrix) // namespace _Mat44V //{ // //a*b // PX_FORCE_INLINE Vec4V multiplyV(const Mat44V& a, const Vec4V b); // //transpose(a)*b // PX_FORCE_INLINE Vec4V multiplyTransposeV(const Mat44V& a, const Vec4V b); // //a*b // PX_FORCE_INLINE Mat44V multiplyV(const Mat44V& a, const Mat44V& b); // //a+b // PX_FORCE_INLINE Mat44V addV(const Mat44V& a, const Mat44V& b); // //a&-1 // PX_FORCE_INLINE Mat44V getInverseV(const Mat44V& a); // //transpose(a) // PX_FORCE_INLINE Mat44V getTransposeV(const Mat44V& a); //}; //namespace _Mat44V // namespace _VecU32V //{ // // pack 8 U32s to 8 U16s with saturation // PX_FORCE_INLINE VecU16V pack2U32VToU16VSaturate(VecU32V a, VecU32V b); // PX_FORCE_INLINE VecU32V orV(VecU32V a, VecU32V b); // PX_FORCE_INLINE VecU32V andV(VecU32V a, VecU32V b); // PX_FORCE_INLINE VecU32V andcV(VecU32V a, VecU32V b); // // conversion from integer to float // PX_FORCE_INLINE Vec4V convertToVec4V(VecU32V a); // // splat a[elementIndex] into all fields of a // template<int elementIndex> // PX_FORCE_INLINE VecU32V splatElement(VecU32V a); // PX_FORCE_INLINE void storeAligned(VecU32V a, VecU32V* address); //}; // namespace _VecI32V //{ // template<int a> PX_FORCE_INLINE VecI32V splatI32(); //}; // // namespace _VecU16V //{ // PX_FORCE_INLINE VecU16V orV(VecU16V a, VecU16V b); // PX_FORCE_INLINE VecU16V andV(VecU16V a, VecU16V b); // PX_FORCE_INLINE VecU16V andcV(VecU16V a, VecU16V b); // PX_FORCE_INLINE void storeAligned(VecU16V val, VecU16V *address); // PX_FORCE_INLINE VecU16V loadAligned(VecU16V* addr); // PX_FORCE_INLINE VecU16V loadUnaligned(VecU16V* addr); // PX_FORCE_INLINE VecU16V compareGt(VecU16V a, VecU16V b); // template<int elementIndex> // PX_FORCE_INLINE VecU16V splatElement(VecU16V a); // PX_FORCE_INLINE VecU16V subtractModulo(VecU16V a, VecU16V b); // PX_FORCE_INLINE VecU16V addModulo(VecU16V a, VecU16V b); // PX_FORCE_INLINE VecU32V getLo16(VecU16V a); // [0,2,4,6] 16-bit values to [0,1,2,3] 32-bit vector // PX_FORCE_INLINE VecU32V getHi16(VecU16V a); // [1,3,5,7] 16-bit values to [0,1,2,3] 32-bit vector //}; // // namespace _VecI16V //{ // template <int val> PX_FORCE_INLINE VecI16V splatImmediate(); //}; // // namespace _VecU8V //{ //}; // a*b //#define M44MulV4(a,b) (M44MulV4(a,b)) ////transpose(a)*b //#define M44TrnspsMulV4(a,b) (M44TrnspsMulV4(a,b)) ////a*b //#define M44MulM44(a,b) (M44MulM44(a,b)) ////a+b //#define M44Add(a,b) (M44Add(a,b)) ////a&-1 //#define M44Inverse(a) (M44Inverse(a)) ////transpose(a) //#define M44Trnsps(a) (M44Trnsps(a)) // dsequeira: these used to be assert'd out in SIMD builds, but they're necessary if // we want to be able to write some scalar functions which run using SIMD data structures PX_FORCE_INLINE void V3WriteX(Vec3V& v, const PxF32 f) { reinterpret_cast<PxVec3&>(v).x = f; } PX_FORCE_INLINE void V3WriteY(Vec3V& v, const PxF32 f) { reinterpret_cast<PxVec3&>(v).y = f; } PX_FORCE_INLINE void V3WriteZ(Vec3V& v, const PxF32 f) { reinterpret_cast<PxVec3&>(v).z = f; } PX_FORCE_INLINE void V3WriteXYZ(Vec3V& v, const PxVec3& f) { reinterpret_cast<PxVec3&>(v) = f; } PX_FORCE_INLINE PxF32 V3ReadX(const Vec3V& v) { return reinterpret_cast<const PxVec3&>(v).x; } PX_FORCE_INLINE PxF32 V3ReadY(const Vec3V& v) { return reinterpret_cast<const PxVec3&>(v).y; } PX_FORCE_INLINE PxF32 V3ReadZ(const Vec3V& v) { return reinterpret_cast<const PxVec3&>(v).z; } PX_FORCE_INLINE const PxVec3& V3ReadXYZ(const Vec3V& v) { return reinterpret_cast<const PxVec3&>(v); } PX_FORCE_INLINE void V4WriteX(Vec4V& v, const PxF32 f) { reinterpret_cast<PxVec4&>(v).x = f; } PX_FORCE_INLINE void V4WriteY(Vec4V& v, const PxF32 f) { reinterpret_cast<PxVec4&>(v).y = f; } PX_FORCE_INLINE void V4WriteZ(Vec4V& v, const PxF32 f) { reinterpret_cast<PxVec4&>(v).z = f; } PX_FORCE_INLINE void V4WriteW(Vec4V& v, const PxF32 f) { reinterpret_cast<PxVec4&>(v).w = f; } PX_FORCE_INLINE void V4WriteXYZ(Vec4V& v, const PxVec3& f) { reinterpret_cast<PxVec3&>(v) = f; } PX_FORCE_INLINE PxF32 V4ReadX(const Vec4V& v) { return reinterpret_cast<const PxVec4&>(v).x; } PX_FORCE_INLINE PxF32 V4ReadY(const Vec4V& v) { return reinterpret_cast<const PxVec4&>(v).y; } PX_FORCE_INLINE PxF32 V4ReadZ(const Vec4V& v) { return reinterpret_cast<const PxVec4&>(v).z; } PX_FORCE_INLINE PxF32 V4ReadW(const Vec4V& v) { return reinterpret_cast<const PxVec4&>(v).w; } PX_FORCE_INLINE const PxVec3& V4ReadXYZ(const Vec4V& v) { return reinterpret_cast<const PxVec3&>(v); } // this macro transposes 4 Vec4V into 3 Vec4V (assuming that the W component can be ignored #define PX_TRANSPOSE_44_34(inA, inB, inC, inD, outA, outB, outC) \ outA = V4UnpackXY(inA, inC); \ inA = V4UnpackZW(inA, inC); \ inC = V4UnpackXY(inB, inD); \ inB = V4UnpackZW(inB, inD); \ outB = V4UnpackZW(outA, inC); \ outA = V4UnpackXY(outA, inC); \ outC = V4UnpackXY(inA, inB); // this macro transposes 3 Vec4V into 4 Vec4V (with W components as garbage!) #define PX_TRANSPOSE_34_44(inA, inB, inC, outA, outB, outC, outD) \ outA = V4UnpackXY(inA, inC); \ inA = V4UnpackZW(inA, inC); \ outC = V4UnpackXY(inB, inB); \ inC = V4UnpackZW(inB, inB); \ outB = V4UnpackZW(outA, outC); \ outA = V4UnpackXY(outA, outC); \ outC = V4UnpackXY(inA, inC); \ outD = V4UnpackZW(inA, inC); #define PX_TRANSPOSE_44(inA, inB, inC, inD, outA, outB, outC, outD) \ outA = V4UnpackXY(inA, inC); \ inA = V4UnpackZW(inA, inC); \ inC = V4UnpackXY(inB, inD); \ inB = V4UnpackZW(inB, inD); \ outB = V4UnpackZW(outA, inC); \ outA = V4UnpackXY(outA, inC); \ outC = V4UnpackXY(inA, inB); \ outD = V4UnpackZW(inA, inB); // This function returns a Vec4V, where each element is the dot product of one pair of Vec3Vs. On PC, each element in // the result should be identical to the results if V3Dot was performed // for each pair of Vec3V. // However, on other platforms, the result might diverge by some small margin due to differences in FP rounding, e.g. if // _mm_dp_ps was used or some other approximate dot product or fused madd operations // were used. // Where there does not exist a hw-accelerated dot-product operation, this approach should be the fastest way to compute // the dot product of 4 vectors. PX_FORCE_INLINE Vec4V V3Dot4(const Vec3VArg a0, const Vec3VArg b0, const Vec3VArg a1, const Vec3VArg b1, const Vec3VArg a2, const Vec3VArg b2, const Vec3VArg a3, const Vec3VArg b3) { Vec4V a0b0 = Vec4V_From_Vec3V(V3Mul(a0, b0)); Vec4V a1b1 = Vec4V_From_Vec3V(V3Mul(a1, b1)); Vec4V a2b2 = Vec4V_From_Vec3V(V3Mul(a2, b2)); Vec4V a3b3 = Vec4V_From_Vec3V(V3Mul(a3, b3)); Vec4V aTrnsps, bTrnsps, cTrnsps; PX_TRANSPOSE_44_34(a0b0, a1b1, a2b2, a3b3, aTrnsps, bTrnsps, cTrnsps); return V4Add(V4Add(aTrnsps, bTrnsps), cTrnsps); } //(f.x,f.y,f.z,0) - Alternative/faster V3LoadU implementation when it is safe to read "W", i.e. the 32bits after the PxVec3. PX_FORCE_INLINE Vec3V V3LoadU_SafeReadW(const PxVec3& f) { return Vec3V_From_Vec4V(V4LoadU(&f.x)); } } // namespace aos #if !PX_DOXYGEN } // namespace physx #endif // Now for the cross-platform implementations of the 16-byte aligned maths functions (win32/360/ppu/spu etc). #if COMPILE_VECTOR_INTRINSICS #include "PxInlineAoS.h" #else // #if COMPILE_VECTOR_INTRINSICS #include "PxVecMathAoSScalarInline.h" #endif // #if !COMPILE_VECTOR_INTRINSICS #include "PxVecQuat.h" #endif
51,480
C
37.562547
148
0.675408
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxVecMathSSE.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_VEC_MATH_SSE_H #define PX_VEC_MATH_SSE_H #if !PX_DOXYGEN namespace physx { #endif namespace aos { namespace { const PX_ALIGN(16, PxF32) minus1w[4] = { 0.0f, 0.0f, 0.0f, -1.0f }; } PX_FORCE_INLINE void QuatGetMat33V(const QuatVArg q, Vec3V& column0, Vec3V& column1, Vec3V& column2) { const __m128 q2 = V4Add(q, q); const __m128 qw2 = V4MulAdd(q2, V4GetW(q), _mm_load_ps(minus1w)); // (2wx, 2wy, 2wz, 2ww-1) const __m128 nw2 = Vec3V_From_Vec4V(V4Neg(qw2)); // (-2wx, -2wy, -2wz, 0) const __m128 v = Vec3V_From_Vec4V(q); const __m128 a0 = _mm_shuffle_ps(qw2, nw2, _MM_SHUFFLE(3, 1, 2, 3)); // (2ww-1, 2wz, -2wy, 0) column0 = V4MulAdd(v, V4GetX(q2), a0); const __m128 a1 = _mm_shuffle_ps(qw2, nw2, _MM_SHUFFLE(3, 2, 0, 3)); // (2ww-1, 2wx, -2wz, 0) column1 = V4MulAdd(v, V4GetY(q2), _mm_shuffle_ps(a1, a1, _MM_SHUFFLE(3, 1, 0, 2))); const __m128 a2 = _mm_shuffle_ps(qw2, nw2, _MM_SHUFFLE(3, 0, 1, 3)); // (2ww-1, 2wy, -2wx, 0) column2 = V4MulAdd(v, V4GetZ(q2), _mm_shuffle_ps(a2, a2, _MM_SHUFFLE(3, 0, 2, 1))); } } // namespace aos #if !PX_DOXYGEN } // namespace physx #endif #endif
2,848
C
40.289854
100
0.697331
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxMat44.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_MAT44_H #define PX_MAT44_H /** \addtogroup foundation @{ */ #include "foundation/PxQuat.h" #include "foundation/PxVec4.h" #include "foundation/PxMat33.h" #include "foundation/PxTransform.h" #if !PX_DOXYGEN namespace physx { #endif /*! \brief 4x4 matrix class This class is layout-compatible with D3D and OpenGL matrices. More notes on layout are given in the PxMat33 @see PxMat33 PxTransform */ template<class Type> class PxMat44T { public: //! Default constructor PX_CUDA_CALLABLE PX_INLINE PxMat44T() { } //! identity constructor PX_CUDA_CALLABLE PX_INLINE PxMat44T(PxIDENTITY) : column0(Type(1.0), Type(0.0), Type(0.0), Type(0.0)), column1(Type(0.0), Type(1.0), Type(0.0), Type(0.0)), column2(Type(0.0), Type(0.0), Type(1.0), Type(0.0)), column3(Type(0.0), Type(0.0), Type(0.0), Type(1.0)) { } //! zero constructor PX_CUDA_CALLABLE PX_INLINE PxMat44T(PxZERO) : column0(PxZero), column1(PxZero), column2(PxZero), column3(PxZero) { } //! Construct from four 4-vectors PX_CUDA_CALLABLE PxMat44T(const PxVec4T<Type>& col0, const PxVec4T<Type>& col1, const PxVec4T<Type>& col2, const PxVec4T<Type>& col3) : column0(col0), column1(col1), column2(col2), column3(col3) { } //! constructor that generates a multiple of the identity matrix explicit PX_CUDA_CALLABLE PX_INLINE PxMat44T(Type r) : column0(r, Type(0.0), Type(0.0), Type(0.0)), column1(Type(0.0), r, Type(0.0), Type(0.0)), column2(Type(0.0), Type(0.0), r, Type(0.0)), column3(Type(0.0), Type(0.0), Type(0.0), r) { } //! Construct from three base vectors and a translation PX_CUDA_CALLABLE PxMat44T(const PxVec3T<Type>& col0, const PxVec3T<Type>& col1, const PxVec3T<Type>& col2, const PxVec3T<Type>& col3) : column0(col0, Type(0.0)), column1(col1, Type(0.0)), column2(col2, Type(0.0)), column3(col3, Type(1.0)) { } //! Construct from Type[16] explicit PX_CUDA_CALLABLE PX_INLINE PxMat44T(Type values[]) : column0(values[0], values[1], values[2], values[3]), column1(values[4], values[5], values[6], values[7]), column2(values[8], values[9], values[10], values[11]), column3(values[12], values[13], values[14], values[15]) { } //! Construct from a quaternion explicit PX_CUDA_CALLABLE PX_INLINE PxMat44T(const PxQuatT<Type>& q) { // PT: TODO: PX-566 const Type x = q.x; const Type y = q.y; const Type z = q.z; const Type w = q.w; const Type x2 = x + x; const Type y2 = y + y; const Type z2 = z + z; const Type xx = x2 * x; const Type yy = y2 * y; const Type zz = z2 * z; const Type xy = x2 * y; const Type xz = x2 * z; const Type xw = x2 * w; const Type yz = y2 * z; const Type yw = y2 * w; const Type zw = z2 * w; column0 = PxVec4T<Type>(Type(1.0) - yy - zz, xy + zw, xz - yw, Type(0.0)); column1 = PxVec4T<Type>(xy - zw, Type(1.0) - xx - zz, yz + xw, Type(0.0)); column2 = PxVec4T<Type>(xz + yw, yz - xw, Type(1.0) - xx - yy, Type(0.0)); column3 = PxVec4T<Type>(Type(0.0), Type(0.0), Type(0.0), Type(1.0)); } //! Construct from a diagonal vector explicit PX_CUDA_CALLABLE PX_INLINE PxMat44T(const PxVec4T<Type>& diagonal) : column0(diagonal.x, Type(0.0), Type(0.0), Type(0.0)), column1(Type(0.0), diagonal.y, Type(0.0), Type(0.0)), column2(Type(0.0), Type(0.0), diagonal.z, Type(0.0)), column3(Type(0.0), Type(0.0), Type(0.0), diagonal.w) { } //! Construct from Mat33 and a translation PX_CUDA_CALLABLE PxMat44T(const PxMat33T<Type>& axes, const PxVec3T<Type>& position) : column0(axes.column0, Type(0.0)), column1(axes.column1, Type(0.0)), column2(axes.column2, Type(0.0)), column3(position, Type(1.0)) { } PX_CUDA_CALLABLE PxMat44T(const PxTransform& t) { *this = PxMat44T(PxMat33T<Type>(t.q), t.p); } /** \brief returns true if the two matrices are exactly equal */ PX_CUDA_CALLABLE PX_INLINE bool operator==(const PxMat44T& m) const { return column0 == m.column0 && column1 == m.column1 && column2 == m.column2 && column3 == m.column3; } //! Copy constructor PX_CUDA_CALLABLE PX_INLINE PxMat44T(const PxMat44T& other) : column0(other.column0), column1(other.column1), column2(other.column2), column3(other.column3) { } //! Assignment operator PX_CUDA_CALLABLE PX_INLINE PxMat44T& operator=(const PxMat44T& other) { column0 = other.column0; column1 = other.column1; column2 = other.column2; column3 = other.column3; return *this; } //! Get transposed matrix PX_CUDA_CALLABLE PX_INLINE const PxMat44T getTranspose() const { return PxMat44T( PxVec4T<Type>(column0.x, column1.x, column2.x, column3.x), PxVec4T<Type>(column0.y, column1.y, column2.y, column3.y), PxVec4T<Type>(column0.z, column1.z, column2.z, column3.z), PxVec4T<Type>(column0.w, column1.w, column2.w, column3.w)); } //! Unary minus PX_CUDA_CALLABLE PX_INLINE const PxMat44T operator-() const { return PxMat44T(-column0, -column1, -column2, -column3); } //! Add PX_CUDA_CALLABLE PX_INLINE const PxMat44T operator+(const PxMat44T& other) const { return PxMat44T(column0 + other.column0, column1 + other.column1, column2 + other.column2, column3 + other.column3); } //! Subtract PX_CUDA_CALLABLE PX_INLINE const PxMat44T operator-(const PxMat44T& other) const { return PxMat44T(column0 - other.column0, column1 - other.column1, column2 - other.column2, column3 - other.column3); } //! Scalar multiplication PX_CUDA_CALLABLE PX_INLINE const PxMat44T operator*(Type scalar) const { return PxMat44T(column0 * scalar, column1 * scalar, column2 * scalar, column3 * scalar); } template<class Type2> friend PxMat44T<Type2> operator*(Type2, const PxMat44T<Type2>&); //! Matrix multiplication PX_CUDA_CALLABLE PX_INLINE const PxMat44T operator*(const PxMat44T& other) const { // Rows from this <dot> columns from other // column0 = transform(other.column0) etc return PxMat44T(transform(other.column0), transform(other.column1), transform(other.column2), transform(other.column3)); } // a <op>= b operators //! Equals-add PX_CUDA_CALLABLE PX_INLINE PxMat44T& operator+=(const PxMat44T& other) { column0 += other.column0; column1 += other.column1; column2 += other.column2; column3 += other.column3; return *this; } //! Equals-sub PX_CUDA_CALLABLE PX_INLINE PxMat44T& operator-=(const PxMat44T& other) { column0 -= other.column0; column1 -= other.column1; column2 -= other.column2; column3 -= other.column3; return *this; } //! Equals scalar multiplication PX_CUDA_CALLABLE PX_INLINE PxMat44T& operator*=(Type scalar) { column0 *= scalar; column1 *= scalar; column2 *= scalar; column3 *= scalar; return *this; } //! Equals matrix multiplication PX_CUDA_CALLABLE PX_INLINE PxMat44T& operator*=(const PxMat44T& other) { *this = *this * other; return *this; } //! Element access, mathematical way! PX_CUDA_CALLABLE PX_FORCE_INLINE Type operator()(PxU32 row, PxU32 col) const { return (*this)[col][row]; } //! Element access, mathematical way! PX_CUDA_CALLABLE PX_FORCE_INLINE Type& operator()(PxU32 row, PxU32 col) { return (*this)[col][row]; } //! Transform vector by matrix, equal to v' = M*v PX_CUDA_CALLABLE PX_INLINE const PxVec4T<Type> transform(const PxVec4T<Type>& other) const { return column0 * other.x + column1 * other.y + column2 * other.z + column3 * other.w; } //! Transform vector by matrix, equal to v' = M*v PX_CUDA_CALLABLE PX_INLINE const PxVec3T<Type> transform(const PxVec3T<Type>& other) const { return transform(PxVec4T<Type>(other, Type(1.0))).getXYZ(); } //! Rotate vector by matrix, equal to v' = M*v PX_CUDA_CALLABLE PX_INLINE const PxVec4T<Type> rotate(const PxVec4T<Type>& other) const { return column0 * other.x + column1 * other.y + column2 * other.z; // + column3*0; } //! Rotate vector by matrix, equal to v' = M*v PX_CUDA_CALLABLE PX_INLINE const PxVec3T<Type> rotate(const PxVec3T<Type>& other) const { return rotate(PxVec4T<Type>(other, Type(1.0))).getXYZ(); } PX_CUDA_CALLABLE PX_INLINE const PxVec3T<Type> getBasis(PxU32 num) const { PX_ASSERT(num < 3); return (&column0)[num].getXYZ(); } PX_CUDA_CALLABLE PX_INLINE const PxVec3T<Type> getPosition() const { return column3.getXYZ(); } PX_CUDA_CALLABLE PX_INLINE void setPosition(const PxVec3T<Type>& position) { column3.x = position.x; column3.y = position.y; column3.z = position.z; } PX_CUDA_CALLABLE PX_FORCE_INLINE const Type* front() const { return &column0.x; } PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec4T<Type>& operator[](PxU32 num) { return (&column0)[num]; } PX_CUDA_CALLABLE PX_FORCE_INLINE const PxVec4T<Type>& operator[](PxU32 num) const { return (&column0)[num]; } PX_CUDA_CALLABLE PX_INLINE void scale(const PxVec4T<Type>& p) { column0 *= p.x; column1 *= p.y; column2 *= p.z; column3 *= p.w; } PX_CUDA_CALLABLE PX_INLINE const PxMat44T inverseRT(void) const { const PxVec3T<Type> r0(column0.x, column1.x, column2.x); const PxVec3T<Type> r1(column0.y, column1.y, column2.y); const PxVec3T<Type> r2(column0.z, column1.z, column2.z); return PxMat44T(r0, r1, r2, -(r0 * column3.x + r1 * column3.y + r2 * column3.z)); } PX_CUDA_CALLABLE PX_INLINE bool isFinite() const { return column0.isFinite() && column1.isFinite() && column2.isFinite() && column3.isFinite(); } // Data, see above for format! PxVec4T<Type> column0, column1, column2, column3; // the four base vectors }; // implementation from PxTransform.h template<class Type> PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT<Type>::PxTransformT(const PxMat44T<Type>& m) { const PxVec3T<Type> column0(m.column0.x, m.column0.y, m.column0.z); const PxVec3T<Type> column1(m.column1.x, m.column1.y, m.column1.z); const PxVec3T<Type> column2(m.column2.x, m.column2.y, m.column2.z); q = PxQuatT<Type>(PxMat33T<Type>(column0, column1, column2)); p = PxVec3T<Type>(m.column3.x, m.column3.y, m.column3.z); } typedef PxMat44T<float> PxMat44; typedef PxMat44T<double> PxMat44d; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
11,739
C
28.94898
136
0.692563
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxFoundation.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_FOUNDATION_H #define PX_FOUNDATION_H /** \addtogroup foundation @{ */ #include "foundation/Px.h" #include "foundation/PxErrors.h" #include "foundation/PxFoundationConfig.h" #include "foundation/PxErrors.h" #include <stdarg.h> #if !PX_DOXYGEN namespace physx { #endif class PxAllocationListener; class PxErrorCallback; /** \brief Foundation SDK singleton class. You need to have an instance of this class to instance the higher level SDKs. */ class PX_FOUNDATION_API PxFoundation { public: /** \brief Destroys the instance it is called on. The operation will fail, if there are still modules referencing the foundation object. Release all dependent modules prior to calling this method. @see PxCreateFoundation() */ virtual void release() = 0; /** retrieves error callback */ virtual PxErrorCallback& getErrorCallback() = 0; /** Sets mask of errors to report. */ virtual void setErrorLevel(PxErrorCode::Enum mask = PxErrorCode::eMASK_ALL) = 0; /** Retrieves mask of errors to be reported. */ virtual PxErrorCode::Enum getErrorLevel() const = 0; /** Retrieves the allocator this object was created with. */ virtual PxAllocatorCallback& getAllocatorCallback() = 0; /** Retrieves if allocation names are being passed to allocator callback. */ virtual bool getReportAllocationNames() const = 0; /** Set if allocation names are being passed to allocator callback. \details Enabled by default in debug and checked build, disabled by default in profile and release build. */ virtual void setReportAllocationNames(bool value) = 0; virtual void registerAllocationListener(PxAllocationListener& listener) = 0; virtual void deregisterAllocationListener(PxAllocationListener& listener) = 0; virtual void registerErrorCallback(PxErrorCallback& callback) = 0; virtual void deregisterErrorCallback(PxErrorCallback& callback) = 0; virtual bool error(PxErrorCode::Enum c, const char* file, int line, const char* messageFmt, ...) = 0; virtual bool error(PxErrorCode::Enum, const char* file, int line, const char* messageFmt, va_list) = 0; protected: virtual ~PxFoundation() { } }; #if !PX_DOXYGEN } // namespace physx #endif // PT: use this to make generated code shorter (e.g. from 52 to 24 bytes of assembly (10 to 4 instructions)) // We must use a macro here to let __FILE__ expand to the proper filename (it doesn't work with an inlined function). #define PX_IMPLEMENT_OUTPUT_ERROR \ template<const int errorCode> \ static PX_NOINLINE bool outputError(int line, const char* message) \ { \ return PxGetFoundation().error(PxErrorCode::Enum(errorCode), __FILE__, line, message); \ } /** \brief Creates an instance of the foundation class The foundation class is needed to initialize higher level SDKs. There may be only one instance per process. Calling this method after an instance has been created already will result in an error message and NULL will be returned. \param version Version number we are expecting (should be #PX_PHYSICS_VERSION) \param allocator User supplied interface for allocating memory(see #PxAllocatorCallback) \param errorCallback User supplied interface for reporting errors and displaying messages(see #PxErrorCallback) \return Foundation instance on success, NULL if operation failed @see PxFoundation */ PX_C_EXPORT PX_FOUNDATION_API physx::PxFoundation* PX_CALL_CONV PxCreateFoundation(physx::PxU32 version, physx::PxAllocatorCallback& allocator, physx::PxErrorCallback& errorCallback); PX_C_EXPORT PX_FOUNDATION_API void PX_CALL_CONV PxSetFoundationInstance(physx::PxFoundation& foundation); /** \brief Retrieves the Foundation SDK after it has been created. \note The behavior of this method is undefined if the foundation instance has not been created already. @see PxCreateFoundation(), PxIsFoundationValid() */ #if PX_CLANG #if PX_LINUX #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wreturn-type-c-linkage" #endif // PX_LINUX #endif // PX_CLANG PX_C_EXPORT PX_FOUNDATION_API physx::PxFoundation& PX_CALL_CONV PxGetFoundation(); #if PX_CLANG #if PX_LINUX #pragma clang diagnostic pop #endif // PX_LINUX #endif // PX_CLANG /** \brief Similar to PxGetFoundation() except it handles the case if the foundation was not created already. \return Pointer to the foundation if an instance is currently available, otherwise null. @see PxCreateFoundation(), PxGetFoundation() */ PX_C_EXPORT PX_FOUNDATION_API physx::PxFoundation* PX_CALL_CONV PxIsFoundationValid(); #if !PX_DOXYGEN namespace physx { #endif class PxProfilerCallback; class PxAllocatorCallback; class PxErrorCallback; #if !PX_DOXYGEN } // namespace physx #endif /** \brief Get the callback that will be used for all profiling. */ PX_C_EXPORT PX_FOUNDATION_API physx::PxProfilerCallback* PX_CALL_CONV PxGetProfilerCallback(); /** \brief Set the callback that will be used for all profiling. */ PX_C_EXPORT PX_FOUNDATION_API void PX_CALL_CONV PxSetProfilerCallback(physx::PxProfilerCallback* profiler); /** \brief Get the allocator callback */ PX_C_EXPORT PX_FOUNDATION_API physx::PxAllocatorCallback* PX_CALL_CONV PxGetAllocatorCallback(); /** \brief Get the broadcasting allocator callback */ PX_C_EXPORT PX_FOUNDATION_API physx::PxAllocatorCallback* PX_CALL_CONV PxGetBroadcastAllocator(bool* reportAllocationNames = NULL); /** \brief Get the error callback */ PX_C_EXPORT PX_FOUNDATION_API physx::PxErrorCallback* PX_CALL_CONV PxGetErrorCallback(); /** \brief Get the broadcasting error callback */ PX_C_EXPORT PX_FOUNDATION_API physx::PxErrorCallback* PX_CALL_CONV PxGetBroadcastError(); /** \brief Get the warn once timestamp */ PX_C_EXPORT PX_FOUNDATION_API physx::PxU32 PX_CALL_CONV PxGetWarnOnceTimeStamp(); /** \brief Decrement the ref count of PxFoundation */ PX_C_EXPORT PX_FOUNDATION_API void PX_CALL_CONV PxDecFoundationRefCount(); /** \brief Increment the ref count of PxFoundation */ PX_C_EXPORT PX_FOUNDATION_API void PX_CALL_CONV PxIncFoundationRefCount(); /** @} */ #endif
7,741
C
31.529412
183
0.760884
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxMat34.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_MAT34_H #define PX_MAT34_H /** \addtogroup foundation @{ */ #include "foundation/PxTransform.h" #include "foundation/PxMat33.h" #if !PX_DOXYGEN namespace physx { #endif /*! Basic mathematical 3x4 matrix, implemented as a 3x3 rotation matrix and a translation See PxMat33 for the format of the rotation matrix. */ template<class Type> class PxMat34T { public: //! Default constructor PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T() { } //! Construct from four base vectors PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T(const PxVec3T<Type>& b0, const PxVec3T<Type>& b1, const PxVec3T<Type>& b2, const PxVec3T<Type>& b3) : m(b0, b1, b2), p(b3) { } //! Construct from Type[12] explicit PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T(Type values[]) : m(values), p(values[9], values[10], values[11]) { } //! Construct from a 3x3 matrix explicit PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T(const PxMat33T<Type>& other) : m(other), p(PxZero) { } //! Construct from a 3x3 matrix and a translation vector PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T(const PxMat33T<Type>& other, const PxVec3T<Type>& t) : m(other), p(t) { } //! Construct from a PxTransformT<Type> explicit PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T(const PxTransformT<Type>& other) : m(other.q), p(other.p) { } //! Copy constructor PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T(const PxMat34T& other) : m(other.m), p(other.p) { } //! Assignment operator PX_CUDA_CALLABLE PX_FORCE_INLINE const PxMat34T& operator=(const PxMat34T& other) { m = other.m; p = other.p; return *this; } //! Set to identity matrix PX_CUDA_CALLABLE PX_FORCE_INLINE void setIdentity() { m = PxMat33T<Type>(PxIdentity); p = PxVec3T<Type>(0); } // Simpler operators //! Equality operator PX_CUDA_CALLABLE PX_FORCE_INLINE bool operator==(const PxMat34T& other) const { return m == other.m && p == other.p; } //! Inequality operator PX_CUDA_CALLABLE PX_FORCE_INLINE bool operator!=(const PxMat34T& other) const { return !operator==(other); } //! Unary minus PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T operator-() const { return PxMat34T(-m, -p); } //! Add PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T operator+(const PxMat34T& other) const { return PxMat34T(m + other.m, p + other.p); } //! Subtract PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T operator-(const PxMat34T& other) const { return PxMat34T(m - other.m, p - other.p); } //! Scalar multiplication PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T operator*(Type scalar) const { return PxMat34T(m*scalar, p*scalar); } //! Matrix multiplication PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T operator*(const PxMat34T& other) const { //Rows from this <dot> columns from other //base0 = rotate(other.m.column0) etc return PxMat34T(m*other.m, m*other.p + p); } //! Matrix multiplication, extend the second matrix PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T operator*(const PxMat33T<Type>& other) const { //Rows from this <dot> columns from other //base0 = transform(other.m.column0) etc return PxMat34T(m*other, p); } template<class Type2> friend PxMat34T<Type2> operator*(const PxMat33T<Type2>& a, const PxMat34T<Type2>& b); // a <op>= b operators //! Equals-add PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T& operator+=(const PxMat34T& other) { m += other.m; p += other.p; return *this; } //! Equals-sub PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T& operator-=(const PxMat34T& other) { m -= other.m; p -= other.p; return *this; } //! Equals scalar multiplication PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T& operator*=(Type scalar) { m *= scalar; p *= scalar; return *this; } //! Element access, mathematical way! PX_CUDA_CALLABLE PX_FORCE_INLINE Type operator()(PxU32 row, PxU32 col) const { return (*this)[col][row]; } //! Element access, mathematical way! PX_CUDA_CALLABLE PX_FORCE_INLINE Type& operator()(PxU32 row, PxU32 col) { return (*this)[col][row]; } // Transform etc //! Transform vector by matrix, equal to v' = M*v PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> rotate(const PxVec3T<Type>& other) const { return m*other; } //! Transform vector by transpose of matrix, equal to v' = M^t*v PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> rotateTranspose(const PxVec3T<Type>& other) const { return m.transformTranspose(other); } //! Transform point by matrix PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> transform(const PxVec3T<Type>& other) const { return m*other + p; } //! Transform point by transposed matrix PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> transformTranspose(const PxVec3T<Type>& other) const { return m.transformTranspose(other - p); } //! Transform point by transposed matrix PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T transformTranspose(const PxMat34T& other) const { return PxMat34T(m.transformTranspose(other.m.column0), m.transformTranspose(other.m.column1), m.transformTranspose(other.m.column2), m.transformTranspose(other.p - p)); } //! Invert matrix treating it as a rotation+translation matrix only PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T getInverseRT() const { return PxMat34T(m.getTranspose(), m.transformTranspose(-p)); } PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type>& operator[](PxU32 num) { return (&m.column0)[num]; } PX_CUDA_CALLABLE PX_FORCE_INLINE const PxVec3T<Type>& operator[](PxU32 num) const { return (&m.column0)[num]; } //Data, see above for format! PxMat33T<Type> m; PxVec3T<Type> p; }; //! Multiply a*b, a is extended template<class Type> PX_INLINE PxMat34T<Type> operator*(const PxMat33T<Type>& a, const PxMat34T<Type>& b) { return PxMat34T<Type>(a * b.m, a * b.p); } typedef PxMat34T<float> PxMat34; typedef PxMat34T<double> PxMat34d; //! A padded version of PxMat34, to safely load its data using SIMD class PxMat34Padded : public PxMat34 { public: PX_FORCE_INLINE PxMat34Padded(const PxMat34& src) : PxMat34(src) {} PX_FORCE_INLINE PxMat34Padded() {} PX_FORCE_INLINE ~PxMat34Padded() {} PxU32 padding; }; PX_COMPILE_TIME_ASSERT(0==(sizeof(PxMat34Padded)==16)); #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
7,928
C
27.521583
142
0.712412
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxAssert.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_ASSERT_H #define PX_ASSERT_H #include "foundation/PxFoundationConfig.h" #include "foundation/Px.h" /** \addtogroup foundation @{ */ #if !PX_DOXYGEN namespace physx { #endif /** * @brief Built-in assert function */ PX_FOUNDATION_API void PxAssert(const char* exp, const char* file, int line, bool& ignore); #if !PX_ENABLE_ASSERTS #define PX_ASSERT(exp) ((void)0) #define PX_ALWAYS_ASSERT_MESSAGE(exp) ((void)0) #define PX_ASSERT_WITH_MESSAGE(condition, message) ((void)0) #else #if PX_VC #define PX_CODE_ANALYSIS_ASSUME(exp) \ __analysis_assume(!!(exp)) // This macro will be used to get rid of analysis warning messages if a PX_ASSERT is used // to "guard" illegal mem access, for example. #else #define PX_CODE_ANALYSIS_ASSUME(exp) #endif #define PX_ASSERT(exp) \ { \ static bool _ignore = false; \ ((void)((!!(exp)) || (!_ignore && (physx::PxAssert(#exp, PX_FL, _ignore), false)))); \ PX_CODE_ANALYSIS_ASSUME(exp); \ } #define PX_ALWAYS_ASSERT_MESSAGE(exp) \ { \ static bool _ignore = false; \ if(!_ignore) \ physx::PxAssert(exp, PX_FL, _ignore); \ } #define PX_ASSERT_WITH_MESSAGE(exp, message) \ { \ static bool _ignore = false; \ ((void)((!!(exp)) || (!_ignore && (physx::PxAssert(message, PX_FL, _ignore), false)))); \ PX_CODE_ANALYSIS_ASSUME(exp); \ } #endif // !PX_ENABLE_ASSERTS #define PX_ALWAYS_ASSERT() PX_ASSERT(0) #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
3,371
C
36.054945
118
0.667161
NVIDIA-Omniverse/PhysX/physx/include/foundation/unix/PxUnixIntrinsics.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PSFOUNDATION_PSUNIXINTRINSICS_H #define PSFOUNDATION_PSUNIXINTRINSICS_H #include "foundation/PxAssert.h" #include <math.h> // this file is for internal intrinsics - that is, intrinsics that are used in // cross platform code but do not appear in the API #if !(PX_LINUX || PX_APPLE_FAMILY) #error "This file should only be included by unix builds!!" #endif #if !PX_DOXYGEN namespace physx { #endif PX_FORCE_INLINE void PxMemoryBarrier() { __sync_synchronize(); } /*! Return the index of the highest set bit. Undefined for zero arg. */ PX_INLINE uint32_t PxHighestSetBitUnsafe(uint32_t v) { return uint32_t(31 - __builtin_clz(v)); } /*! Return the index of the highest set bit. Undefined for zero arg. */ PX_INLINE uint32_t PxLowestSetBitUnsafe(uint32_t v) { return uint32_t(__builtin_ctz(v)); } /*! Returns the index of the highest set bit. Returns 32 for v=0. */ PX_INLINE uint32_t PxCountLeadingZeros(uint32_t v) { if(v) return uint32_t(__builtin_clz(v)); else return 32u; } /*! Prefetch aligned 64B x86, 32b ARM around \c ptr+offset. */ PX_FORCE_INLINE void PxPrefetchLine(const void* ptr, uint32_t offset = 0) { #ifdef __CUDACC__ __builtin_prefetch(reinterpret_cast<const char*>(ptr) + offset, 0, 3); #else __builtin_prefetch(reinterpret_cast<const char* PX_RESTRICT>(ptr) + offset, 0, 3); #endif } /*! Prefetch \c count bytes starting at \c ptr. */ PX_FORCE_INLINE void PxPrefetch(const void* ptr, uint32_t count = 1) { const char* cp = reinterpret_cast<const char*>(ptr); uint64_t p = size_t(ptr); uint64_t startLine = p >> 6, endLine = (p + count - 1) >> 6; uint64_t lines = endLine - startLine + 1; do { PxPrefetchLine(cp); cp += 64; } while(--lines); } #if !PX_DOXYGEN } // namespace physx #endif #endif // #ifndef PSFOUNDATION_PSUNIXINTRINSICS_H
3,494
C
29.929203
83
0.732112
NVIDIA-Omniverse/PhysX/physx/include/foundation/unix/PxUnixMathIntrinsics.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PXFOUNDATION_PXUNIXINTRINSICS_H #define PXFOUNDATION_PXUNIXINTRINSICS_H #include "foundation/Px.h" #include "foundation/PxAssert.h" #if !(PX_LINUX || PX_APPLE_FAMILY) #error "This file should only be included by Unix builds!!" #endif #if PX_LINUX && !defined(__CUDACC__) && !PX_EMSCRIPTEN // Linux and CUDA compilation does not work with std::isfnite, as it is not marked as CUDA callable #include <cmath> #ifndef isfinite using std::isfinite; #endif #endif #include <math.h> #include <float.h> #if !PX_DOXYGEN namespace physx { #endif namespace intrinsics { //! \brief platform-specific absolute value PX_CUDA_CALLABLE PX_FORCE_INLINE float abs(float a) { return ::fabsf(a); } //! \brief platform-specific select float PX_CUDA_CALLABLE PX_FORCE_INLINE float fsel(float a, float b, float c) { return (a >= 0.0f) ? b : c; } //! \brief platform-specific sign PX_CUDA_CALLABLE PX_FORCE_INLINE float sign(float a) { return (a >= 0.0f) ? 1.0f : -1.0f; } //! \brief platform-specific reciprocal PX_CUDA_CALLABLE PX_FORCE_INLINE float recip(float a) { return 1.0f / a; } //! \brief platform-specific reciprocal estimate PX_CUDA_CALLABLE PX_FORCE_INLINE float recipFast(float a) { return 1.0f / a; } //! \brief platform-specific square root PX_CUDA_CALLABLE PX_FORCE_INLINE float sqrt(float a) { return ::sqrtf(a); } //! \brief platform-specific reciprocal square root PX_CUDA_CALLABLE PX_FORCE_INLINE float recipSqrt(float a) { return 1.0f / ::sqrtf(a); } PX_CUDA_CALLABLE PX_FORCE_INLINE float recipSqrtFast(float a) { return 1.0f / ::sqrtf(a); } //! \brief platform-specific sine PX_CUDA_CALLABLE PX_FORCE_INLINE float sin(float a) { return ::sinf(a); } //! \brief platform-specific cosine PX_CUDA_CALLABLE PX_FORCE_INLINE float cos(float a) { return ::cosf(a); } //! \brief platform-specific minimum PX_CUDA_CALLABLE PX_FORCE_INLINE float selectMin(float a, float b) { return a < b ? a : b; } //! \brief platform-specific maximum PX_CUDA_CALLABLE PX_FORCE_INLINE float selectMax(float a, float b) { return a > b ? a : b; } //! \brief platform-specific finiteness check (not INF or NAN) PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite(float a) { //std::isfinite not recommended as of Feb 2017, since it doesn't work with g++/clang's floating point optimization. union localU { PxU32 i; float f; } floatUnion; floatUnion.f = a; return !((floatUnion.i & 0x7fffffff) >= 0x7f800000); } //! \brief platform-specific finiteness check (not INF or NAN) PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite(double a) { return !!isfinite(a); } /*! Sets \c count bytes starting at \c dst to zero. */ PX_FORCE_INLINE void* memZero(void* dest, uint32_t count) { return memset(dest, 0, count); } /*! Sets \c count bytes starting at \c dst to \c c. */ PX_FORCE_INLINE void* memSet(void* dest, int32_t c, uint32_t count) { return memset(dest, c, count); } /*! Copies \c count bytes from \c src to \c dst. User memMove if regions overlap. */ PX_FORCE_INLINE void* memCopy(void* dest, const void* src, uint32_t count) { return memcpy(dest, src, count); } /*! Copies \c count bytes from \c src to \c dst. Supports overlapping regions. */ PX_FORCE_INLINE void* memMove(void* dest, const void* src, uint32_t count) { return memmove(dest, src, count); } } // namespace intrinsics #if !PX_DOXYGEN } // namespace physx #endif #endif // #ifndef PXFOUNDATION_PXUNIXINTRINSICS_H
5,123
C
27.309392
116
0.723795
NVIDIA-Omniverse/PhysX/physx/include/foundation/unix/PxUnixTrigConstants.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PXFOUNDATION_PXUNIXTRIGCONSTANTS_H #define PXFOUNDATION_PXUNIXTRIGCONSTANTS_H #include "foundation/PxPreprocessor.h" namespace physx { namespace aos { #if PX_CLANG #if PX_LINUX #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wmissing-variable-declarations" #endif #endif #define PX_GLOBALCONST extern const __attribute__((weak)) PX_ALIGN_PREFIX(16) struct PX_VECTORF32 { float f[4]; } PX_ALIGN_SUFFIX(16); PX_GLOBALCONST PX_VECTORF32 g_PXSinCoefficients0 = { { 1.0f, -0.166666667f, 8.333333333e-3f, -1.984126984e-4f } }; PX_GLOBALCONST PX_VECTORF32 g_PXSinCoefficients1 = { { 2.755731922e-6f, -2.505210839e-8f, 1.605904384e-10f, -7.647163732e-13f } }; PX_GLOBALCONST PX_VECTORF32 g_PXSinCoefficients2 = { { 2.811457254e-15f, -8.220635247e-18f, 1.957294106e-20f, -3.868170171e-23f } }; PX_GLOBALCONST PX_VECTORF32 g_PXCosCoefficients0 = { { 1.0f, -0.5f, 4.166666667e-2f, -1.388888889e-3f } }; PX_GLOBALCONST PX_VECTORF32 g_PXCosCoefficients1 = { { 2.480158730e-5f, -2.755731922e-7f, 2.087675699e-9f, -1.147074560e-11f } }; PX_GLOBALCONST PX_VECTORF32 g_PXCosCoefficients2 = { { 4.779477332e-14f, -1.561920697e-16f, 4.110317623e-19f, -8.896791392e-22f } }; PX_GLOBALCONST PX_VECTORF32 g_PXReciprocalTwoPi = { { PxInvTwoPi, PxInvTwoPi, PxInvTwoPi, PxInvTwoPi } }; PX_GLOBALCONST PX_VECTORF32 g_PXTwoPi = { { PxTwoPi, PxTwoPi, PxTwoPi, PxTwoPi } }; #if PX_CLANG #if PX_LINUX #pragma clang diagnostic pop #endif #endif } // namespace aos } // namespace physx #endif //PXFOUNDATION_PXUNIXTRIGCONSTANTS_H
3,228
C
41.486842
114
0.758055
NVIDIA-Omniverse/PhysX/physx/include/foundation/unix/sse2/PxUnixSse2InlineAoS.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PXFOUNDATION_PXUNIXSSE2INLINEAOS_H #define PXFOUNDATION_PXUNIXSSE2INLINEAOS_H #if !COMPILE_VECTOR_INTRINSICS #error Vector intrinsics should not be included when using scalar implementation. #endif #ifdef __SSE4_2__ #include "smmintrin.h" #endif #include "../../PxVecMathSSE.h" namespace physx { namespace aos { #define PX_FPCLASS_SNAN 0x0001 /* signaling NaN */ #define PX_FPCLASS_QNAN 0x0002 /* quiet NaN */ #define PX_FPCLASS_NINF 0x0004 /* negative infinity */ #define PX_FPCLASS_PINF 0x0200 /* positive infinity */ PX_FORCE_INLINE __m128 m128_I2F(__m128i n) { return _mm_castsi128_ps(n); } PX_FORCE_INLINE __m128i m128_F2I(__m128 n) { return _mm_castps_si128(n); } ////////////////////////////////////////////////////////////////////// //Test that Vec3V and FloatV are legal ////////////////////////////////////////////////////////////////////// #define FLOAT_COMPONENTS_EQUAL_THRESHOLD 0.01f PX_FORCE_INLINE static bool isValidFloatV(const FloatV a) { const PxF32 x = V4ReadX(a); const PxF32 y = V4ReadY(a); const PxF32 z = V4ReadZ(a); const PxF32 w = V4ReadW(a); return (x == y && x == z && x == w); /*if ( (PxAbs(x - y) < FLOAT_COMPONENTS_EQUAL_THRESHOLD) && (PxAbs(x - z) < FLOAT_COMPONENTS_EQUAL_THRESHOLD) && (PxAbs(x - w) < FLOAT_COMPONENTS_EQUAL_THRESHOLD) ) { return true; } if ( (PxAbs((x - y) / x) < FLOAT_COMPONENTS_EQUAL_THRESHOLD) && (PxAbs((x - z) / x) < FLOAT_COMPONENTS_EQUAL_THRESHOLD) && (PxAbs((x - w) / x) < FLOAT_COMPONENTS_EQUAL_THRESHOLD) ) { return true; } return false;*/ } PX_FORCE_INLINE bool isValidVec3V(const Vec3V a) { PX_ALIGN(16, PxF32 f[4]); V4StoreA(a, f); return (f[3] == 0.0f); } PX_FORCE_INLINE bool isFiniteLength(const Vec3V a) { return !FAllEq(V4LengthSq(a), FZero()); } PX_FORCE_INLINE bool isAligned16(void* a) { return(0 == (size_t(a) & 0x0f)); } //ASSERT_FINITELENGTH is deactivated because there is a lot of code that calls a simd normalisation function with zero length but then ignores the result. #if PX_DEBUG #define ASSERT_ISVALIDVEC3V(a) PX_ASSERT(isValidVec3V(a)) #define ASSERT_ISVALIDFLOATV(a) PX_ASSERT(isValidFloatV(a)) #define ASSERT_ISALIGNED16(a) PX_ASSERT(isAligned16(reinterpret_cast<void*>(a))) #define ASSERT_ISFINITELENGTH(a) //PX_ASSERT(isFiniteLength(a)) #else #define ASSERT_ISVALIDVEC3V(a) #define ASSERT_ISVALIDFLOATV(a) #define ASSERT_ISALIGNED16(a) #define ASSERT_ISFINITELENGTH(a) #endif namespace internalUnitSSE2Simd { PX_FORCE_INLINE PxU32 BAllTrue4_R(const BoolV a) { const PxI32 moveMask = _mm_movemask_ps(a); return PxU32(moveMask == 0xf); } PX_FORCE_INLINE PxU32 BAllTrue3_R(const BoolV a) { const PxI32 moveMask = _mm_movemask_ps(a); return PxU32((moveMask & 0x7) == 0x7); } PX_FORCE_INLINE PxU32 BAnyTrue4_R(const BoolV a) { const PxI32 moveMask = _mm_movemask_ps(a); return PxU32(moveMask != 0x0); } PX_FORCE_INLINE PxU32 BAnyTrue3_R(const BoolV a) { const PxI32 moveMask = _mm_movemask_ps(a); return PxU32((moveMask & 0x7) != 0x0); } PX_FORCE_INLINE PxU32 FiniteTestEq(const Vec4V a, const Vec4V b) { // This is a bit of a bodge. //_mm_comieq_ss returns 1 if either value is nan so we need to re-cast a and b with true encoded as a non-nan // number. // There must be a better way of doing this in sse. const BoolV one = FOne(); const BoolV zero = FZero(); const BoolV a1 = V4Sel(a, one, zero); const BoolV b1 = V4Sel(b, one, zero); return ( _mm_comieq_ss(a1, b1) && _mm_comieq_ss(_mm_shuffle_ps(a1, a1, _MM_SHUFFLE(1, 1, 1, 1)), _mm_shuffle_ps(b1, b1, _MM_SHUFFLE(1, 1, 1, 1))) && _mm_comieq_ss(_mm_shuffle_ps(a1, a1, _MM_SHUFFLE(2, 2, 2, 2)), _mm_shuffle_ps(b1, b1, _MM_SHUFFLE(2, 2, 2, 2))) && _mm_comieq_ss(_mm_shuffle_ps(a1, a1, _MM_SHUFFLE(3, 3, 3, 3)), _mm_shuffle_ps(b1, b1, _MM_SHUFFLE(3, 3, 3, 3)))); } #if !PX_EMSCRIPTEN #if PX_CLANG #if PX_LINUX #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wglobal-constructors" #endif #endif const PX_ALIGN(16, PxF32 gMaskXYZ[4]) = { physx::PxUnionCast<PxF32>(0xffffffff), physx::PxUnionCast<PxF32>(0xffffffff), physx::PxUnionCast<PxF32>(0xffffffff), 0 }; #if PX_CLANG #if PX_LINUX #pragma clang diagnostic pop #endif #endif #else // emscripten doesn't like the PxUnionCast data structure // the following is what windows and xbox does -- using these for emscripten const PX_ALIGN(16, PxU32 gMaskXYZ[4]) = { 0xffffffff, 0xffffffff, 0xffffffff, 0 }; #endif } namespace vecMathTests { // PT: this function returns an invalid Vec3V (W!=0.0f) just for unit-testing 'isValidVec3V' PX_FORCE_INLINE Vec3V getInvalidVec3V() { const float f = 1.0f; return _mm_load1_ps(&f); } PX_FORCE_INLINE bool allElementsEqualFloatV(const FloatV a, const FloatV b) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); return _mm_comieq_ss(a, b) != 0; } PX_FORCE_INLINE bool allElementsEqualVec3V(const Vec3V a, const Vec3V b) { return V3AllEq(a, b) != 0; } PX_FORCE_INLINE bool allElementsEqualVec4V(const Vec4V a, const Vec4V b) { return V4AllEq(a, b) != 0; } PX_FORCE_INLINE bool allElementsEqualBoolV(const BoolV a, const BoolV b) { return internalUnitSSE2Simd::BAllTrue4_R(VecI32V_IsEq(m128_F2I(a), m128_F2I(b))) != 0; } PX_FORCE_INLINE bool allElementsEqualVecU32V(const VecU32V a, const VecU32V b) { return internalUnitSSE2Simd::BAllTrue4_R(V4IsEqU32(a, b)) != 0; } PX_FORCE_INLINE bool allElementsEqualVecI32V(const VecI32V a, const VecI32V b) { BoolV c = m128_I2F(_mm_cmpeq_epi32(a, b)); return internalUnitSSE2Simd::BAllTrue4_R(c) != 0; } #define VECMATH_AOS_EPSILON (1e-3f) PX_FORCE_INLINE bool allElementsNearEqualFloatV(const FloatV a, const FloatV b) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); const FloatV c = FSub(a, b); const FloatV minError = FLoad(-VECMATH_AOS_EPSILON); const FloatV maxError = FLoad(VECMATH_AOS_EPSILON); return _mm_comigt_ss(c, minError) && _mm_comilt_ss(c, maxError); } PX_FORCE_INLINE bool allElementsNearEqualVec3V(const Vec3V a, const Vec3V b) { const Vec3V c = V3Sub(a, b); const Vec3V minError = V3Load(-VECMATH_AOS_EPSILON); const Vec3V maxError = V3Load(VECMATH_AOS_EPSILON); return (_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0)), minError) && _mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0)), maxError) && _mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1)), minError) && _mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1)), maxError) && _mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2)), minError) && _mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2)), maxError)); } PX_FORCE_INLINE bool allElementsNearEqualVec4V(const Vec4V a, const Vec4V b) { const Vec4V c = V4Sub(a, b); const Vec4V minError = V4Load(-VECMATH_AOS_EPSILON); const Vec4V maxError = V4Load(VECMATH_AOS_EPSILON); return (_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0)), minError) && _mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0)), maxError) && _mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1)), minError) && _mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1)), maxError) && _mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2)), minError) && _mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2)), maxError) && _mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 3, 3, 3)), minError) && _mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 3, 3, 3)), maxError)); } } ///////////////////////////////////////////////////////////////////// ////FUNCTIONS USED ONLY FOR ASSERTS IN VECTORISED IMPLEMENTATIONS ///////////////////////////////////////////////////////////////////// PX_FORCE_INLINE bool isFiniteFloatV(const FloatV a) { PxF32 badNumber = physx::PxUnionCast<PxF32, PxU32>(PX_FPCLASS_SNAN | PX_FPCLASS_QNAN | PX_FPCLASS_NINF | PX_FPCLASS_PINF); const FloatV vBadNum = FLoad(badNumber); const BoolV vMask = BAnd(vBadNum, a); return internalUnitSSE2Simd::FiniteTestEq(vMask, BFFFF()) == 1; } PX_FORCE_INLINE bool isFiniteVec3V(const Vec3V a) { PxF32 badNumber = physx::PxUnionCast<PxF32, PxU32>(PX_FPCLASS_SNAN | PX_FPCLASS_QNAN | PX_FPCLASS_NINF | PX_FPCLASS_PINF); const Vec3V vBadNum = V3Load(badNumber); const BoolV vMask = BAnd(BAnd(vBadNum, a), BTTTF()); return internalUnitSSE2Simd::FiniteTestEq(vMask, BFFFF()) == 1; } PX_FORCE_INLINE bool isFiniteVec4V(const Vec4V a) { /*Vec4V a; PX_ALIGN(16, PxF32 f[4]); F32Array_Aligned_From_Vec4V(a, f); return PxIsFinite(f[0]) && PxIsFinite(f[1]) && PxIsFinite(f[2]) && PxIsFinite(f[3]);*/ PxF32 badNumber = physx::PxUnionCast<PxF32, PxU32>(PX_FPCLASS_SNAN | PX_FPCLASS_QNAN | PX_FPCLASS_NINF | PX_FPCLASS_PINF); const Vec4V vBadNum = V4Load(badNumber); const BoolV vMask = BAnd(vBadNum, a); return internalUnitSSE2Simd::FiniteTestEq(vMask, BFFFF()) == 1; } PX_FORCE_INLINE bool hasZeroElementinFloatV(const FloatV a) { ASSERT_ISVALIDFLOATV(a); return _mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 0, 0, 0)), FZero()) ? true : false; } PX_FORCE_INLINE bool hasZeroElementInVec3V(const Vec3V a) { return (_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 0, 0, 0)), FZero()) || _mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 1, 1, 1)), FZero()) || _mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 2, 2, 2)), FZero())); } PX_FORCE_INLINE bool hasZeroElementInVec4V(const Vec4V a) { return (_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 0, 0, 0)), FZero()) || _mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 1, 1, 1)), FZero()) || _mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 2, 2, 2)), FZero()) || _mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 3, 3, 3)), FZero())); } ///////////////////////////////////////////////////////////////////// ////VECTORISED FUNCTION IMPLEMENTATIONS ///////////////////////////////////////////////////////////////////// PX_FORCE_INLINE FloatV FLoad(const PxF32 f) { return _mm_load1_ps(&f); } PX_FORCE_INLINE Vec3V V3Load(const PxF32 f) { return _mm_set_ps(0.0f, f, f, f); } PX_FORCE_INLINE Vec4V V4Load(const PxF32 f) { return _mm_load1_ps(&f); } PX_FORCE_INLINE BoolV BLoad(const bool f) { const PxU32 i = PxU32(-PxI32(f)); return _mm_load1_ps(reinterpret_cast<const float*>(&i)); } PX_FORCE_INLINE Vec3V V3LoadA(const PxVec3& f) { ASSERT_ISALIGNED16(const_cast<PxVec3*>(&f)); #if !PX_EMSCRIPTEN return _mm_and_ps(reinterpret_cast<const Vec3V&>(f), V4LoadA(internalUnitSSE2Simd::gMaskXYZ)); #else return _mm_and_ps((Vec3V&)f, (VecI32V&)internalUnitSSE2Simd::gMaskXYZ); #endif } PX_FORCE_INLINE Vec3V V3LoadU(const PxVec3& f) { return _mm_set_ps(0.0f, f.z, f.y, f.x); } PX_FORCE_INLINE Vec3V V3LoadUnsafeA(const PxVec3& f) { ASSERT_ISALIGNED16(const_cast<PxVec3*>(&f)); return _mm_set_ps(0.0f, f.z, f.y, f.x); } PX_FORCE_INLINE Vec3V V3LoadA(const PxF32* const f) { ASSERT_ISALIGNED16(const_cast<PxF32*>(f)); #if !PX_EMSCRIPTEN return _mm_and_ps(V4LoadA(f), V4LoadA(internalUnitSSE2Simd::gMaskXYZ)); #else return _mm_and_ps((Vec3V&)*f, (VecI32V&)internalUnitSSE2Simd::gMaskXYZ); #endif } PX_FORCE_INLINE Vec3V V3LoadU(const PxF32* const i) { return _mm_set_ps(0.0f, i[2], i[1], i[0]); } PX_FORCE_INLINE Vec3V Vec3V_From_Vec4V(Vec4V v) { return V4ClearW(v); } PX_FORCE_INLINE Vec3V Vec3V_From_Vec4V_WUndefined(const Vec4V v) { return v; } PX_FORCE_INLINE Vec4V Vec4V_From_Vec3V(Vec3V f) { ASSERT_ISVALIDVEC3V(f); return f; // ok if it is implemented as the same type. } PX_FORCE_INLINE Vec4V Vec4V_From_PxVec3_WUndefined(const PxVec3& f) { return _mm_set_ps(0.0f, f.z, f.y, f.x); } PX_FORCE_INLINE Vec4V Vec4V_From_FloatV(FloatV f) { return f; } PX_FORCE_INLINE Vec3V Vec3V_From_FloatV(FloatV f) { ASSERT_ISVALIDFLOATV(f); return Vec3V_From_Vec4V(Vec4V_From_FloatV(f)); } PX_FORCE_INLINE Vec3V Vec3V_From_FloatV_WUndefined(FloatV f) { ASSERT_ISVALIDVEC3V(f); return Vec3V_From_Vec4V_WUndefined(Vec4V_From_FloatV(f)); } PX_FORCE_INLINE Mat33V Mat33V_From_PxMat33(const PxMat33& m) { return Mat33V(V3LoadU(m.column0), V3LoadU(m.column1), V3LoadU(m.column2)); } PX_FORCE_INLINE void PxMat33_From_Mat33V(const Mat33V& m, PxMat33& out) { V3StoreU(m.col0, out.column0); V3StoreU(m.col1, out.column1); V3StoreU(m.col2, out.column2); } PX_FORCE_INLINE Vec4V V4LoadA(const PxF32* const f) { ASSERT_ISALIGNED16(const_cast<PxF32*>(f)); return _mm_load_ps(f); } PX_FORCE_INLINE void V4StoreA(Vec4V a, PxF32* f) { ASSERT_ISALIGNED16(f); _mm_store_ps(f, a); } PX_FORCE_INLINE void V4StoreU(const Vec4V a, PxF32* f) { _mm_storeu_ps(f, a); } PX_FORCE_INLINE void BStoreA(const BoolV a, PxU32* f) { ASSERT_ISALIGNED16(f); _mm_store_ps(reinterpret_cast<PxF32*>(f), a); } PX_FORCE_INLINE void U4StoreA(const VecU32V uv, PxU32* u) { ASSERT_ISALIGNED16(u); _mm_store_ps(reinterpret_cast<float*>(u), uv); } PX_FORCE_INLINE VecI32V I4LoadXYZW(const PxI32& x, const PxI32& y, const PxI32& z, const PxI32& w) { return _mm_set_epi32(w, z, y, x); } PX_FORCE_INLINE void I4StoreA(const VecI32V iv, PxI32* i) { ASSERT_ISALIGNED16(i); _mm_store_ps(reinterpret_cast<float*>(i), m128_I2F(iv)); } PX_FORCE_INLINE Vec4V V4LoadU(const PxF32* const f) { return _mm_loadu_ps(f); } PX_FORCE_INLINE BoolV BLoad(const bool* const f) { const PX_ALIGN(16, PxI32) b[4] = { -PxI32(f[0]), -PxI32(f[1]), -PxI32(f[2]), -PxI32(f[3]) }; return _mm_load_ps(reinterpret_cast<const float*>(&b)); } PX_FORCE_INLINE void FStore(const FloatV a, PxF32* PX_RESTRICT f) { ASSERT_ISVALIDFLOATV(a); _mm_store_ss(f, a); } PX_FORCE_INLINE void V3StoreA(const Vec3V a, PxVec3& f) { ASSERT_ISALIGNED16(&f); PX_ALIGN(16, PxF32) f2[4]; _mm_store_ps(f2, a); f = PxVec3(f2[0], f2[1], f2[2]); } PX_FORCE_INLINE void V3StoreU(const Vec3V a, PxVec3& f) { PX_ALIGN(16, PxF32) f2[4]; _mm_store_ps(f2, a); f = PxVec3(f2[0], f2[1], f2[2]); } PX_FORCE_INLINE void Store_From_BoolV(const BoolV b, PxU32* b2) { _mm_store_ss(reinterpret_cast<PxF32*>(b2), b); } PX_FORCE_INLINE VecU32V U4Load(const PxU32 i) { return _mm_load1_ps(reinterpret_cast<const PxF32*>(&i)); } PX_FORCE_INLINE VecU32V U4LoadU(const PxU32* i) { return _mm_loadu_ps(reinterpret_cast<const PxF32*>(i)); } PX_FORCE_INLINE VecU32V U4LoadA(const PxU32* i) { ASSERT_ISALIGNED16(const_cast<PxU32*>(i)); return _mm_load_ps(reinterpret_cast<const PxF32*>(i)); } ////////////////////////////////// // FLOATV ////////////////////////////////// PX_FORCE_INLINE FloatV FZero() { return FLoad(0.0f); } PX_FORCE_INLINE FloatV FOne() { return FLoad(1.0f); } PX_FORCE_INLINE FloatV FHalf() { return FLoad(0.5f); } PX_FORCE_INLINE FloatV FEps() { return FLoad(PX_EPS_REAL); } PX_FORCE_INLINE FloatV FEps6() { return FLoad(1e-6f); } PX_FORCE_INLINE FloatV FMax() { return FLoad(PX_MAX_REAL); } PX_FORCE_INLINE FloatV FNegMax() { return FLoad(-PX_MAX_REAL); } PX_FORCE_INLINE FloatV IZero() { const PxU32 zero = 0; return _mm_load1_ps(reinterpret_cast<const PxF32*>(&zero)); } PX_FORCE_INLINE FloatV IOne() { const PxU32 one = 1; return _mm_load1_ps(reinterpret_cast<const PxF32*>(&one)); } PX_FORCE_INLINE FloatV ITwo() { const PxU32 two = 2; return _mm_load1_ps(reinterpret_cast<const PxF32*>(&two)); } PX_FORCE_INLINE FloatV IThree() { const PxU32 three = 3; return _mm_load1_ps(reinterpret_cast<const PxF32*>(&three)); } PX_FORCE_INLINE FloatV IFour() { PxU32 four = 4; return _mm_load1_ps(reinterpret_cast<const PxF32*>(&four)); } PX_FORCE_INLINE FloatV FNeg(const FloatV f) { ASSERT_ISVALIDFLOATV(f); return _mm_sub_ps(_mm_setzero_ps(), f); } PX_FORCE_INLINE FloatV FAdd(const FloatV a, const FloatV b) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); /* if(!isValidFloatV(a)) { assert(false); } if(!isValidFloatV(b)) { assert(false); } */ return _mm_add_ps(a, b); } PX_FORCE_INLINE FloatV FSub(const FloatV a, const FloatV b) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); return _mm_sub_ps(a, b); } PX_FORCE_INLINE FloatV FMul(const FloatV a, const FloatV b) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); return _mm_mul_ps(a, b); } PX_FORCE_INLINE FloatV FDiv(const FloatV a, const FloatV b) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); return _mm_div_ps(a, b); } PX_FORCE_INLINE FloatV FDivFast(const FloatV a, const FloatV b) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); return _mm_mul_ps(a, _mm_rcp_ps(b)); } PX_FORCE_INLINE FloatV FRecip(const FloatV a) { ASSERT_ISVALIDFLOATV(a); return _mm_div_ps(FOne(), a); } PX_FORCE_INLINE FloatV FRecipFast(const FloatV a) { ASSERT_ISVALIDFLOATV(a); return _mm_rcp_ps(a); } PX_FORCE_INLINE FloatV FRsqrt(const FloatV a) { ASSERT_ISVALIDFLOATV(a); return _mm_div_ps(FOne(), _mm_sqrt_ps(a)); } PX_FORCE_INLINE FloatV FSqrt(const FloatV a) { ASSERT_ISVALIDFLOATV(a); return _mm_sqrt_ps(a); } PX_FORCE_INLINE FloatV FRsqrtFast(const FloatV a) { ASSERT_ISVALIDFLOATV(a); return _mm_rsqrt_ps(a); } PX_FORCE_INLINE FloatV FScaleAdd(const FloatV a, const FloatV b, const FloatV c) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); ASSERT_ISVALIDFLOATV(c); return FAdd(FMul(a, b), c); } PX_FORCE_INLINE FloatV FNegScaleSub(const FloatV a, const FloatV b, const FloatV c) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); ASSERT_ISVALIDFLOATV(c); return FSub(c, FMul(a, b)); } PX_FORCE_INLINE FloatV FAbs(const FloatV a) { ASSERT_ISVALIDFLOATV(a); PX_ALIGN(16, const PxU32) absMask[4] = { 0x7fFFffFF, 0x7fFFffFF, 0x7fFFffFF, 0x7fFFffFF }; return _mm_and_ps(a, _mm_load_ps(reinterpret_cast<const PxF32*>(absMask))); } PX_FORCE_INLINE FloatV FSel(const BoolV c, const FloatV a, const FloatV b) { PX_ASSERT(vecMathTests::allElementsEqualBoolV(c,BTTTT()) || vecMathTests::allElementsEqualBoolV(c,BFFFF())); ASSERT_ISVALIDFLOATV(_mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a))); return _mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a)); } PX_FORCE_INLINE BoolV FIsGrtr(const FloatV a, const FloatV b) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); return _mm_cmpgt_ps(a, b); } PX_FORCE_INLINE BoolV FIsGrtrOrEq(const FloatV a, const FloatV b) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); return _mm_cmpge_ps(a, b); } PX_FORCE_INLINE BoolV FIsEq(const FloatV a, const FloatV b) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); return _mm_cmpeq_ps(a, b); } PX_FORCE_INLINE FloatV FMax(const FloatV a, const FloatV b) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); return _mm_max_ps(a, b); } PX_FORCE_INLINE FloatV FMin(const FloatV a, const FloatV b) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); return _mm_min_ps(a, b); } PX_FORCE_INLINE FloatV FClamp(const FloatV a, const FloatV minV, const FloatV maxV) { ASSERT_ISVALIDFLOATV(minV); ASSERT_ISVALIDFLOATV(maxV); return _mm_max_ps(_mm_min_ps(a, maxV), minV); } PX_FORCE_INLINE PxU32 FAllGrtr(const FloatV a, const FloatV b) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); return PxU32(_mm_comigt_ss(a, b)); } PX_FORCE_INLINE PxU32 FAllGrtrOrEq(const FloatV a, const FloatV b) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); return PxU32(_mm_comige_ss(a, b)); } PX_FORCE_INLINE PxU32 FAllEq(const FloatV a, const FloatV b) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); return PxU32(_mm_comieq_ss(a, b)); } PX_FORCE_INLINE FloatV FRound(const FloatV a) { ASSERT_ISVALIDFLOATV(a); #ifdef __SSE4_2__ return _mm_round_ps(a, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); #else // return _mm_round_ps(a, 0x0); const FloatV half = FLoad(0.5f); const __m128 signBit = _mm_cvtepi32_ps(_mm_srli_epi32(_mm_cvtps_epi32(a), 31)); const FloatV aRound = FSub(FAdd(a, half), signBit); __m128i tmp = _mm_cvttps_epi32(aRound); return _mm_cvtepi32_ps(tmp); #endif } PX_FORCE_INLINE FloatV FSin(const FloatV a) { ASSERT_ISVALIDFLOATV(a); // Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI const FloatV recipTwoPi = V4LoadA(g_PXReciprocalTwoPi.f); const FloatV twoPi = V4LoadA(g_PXTwoPi.f); const FloatV tmp = FMul(a, recipTwoPi); const FloatV b = FRound(tmp); const FloatV V1 = FNegScaleSub(twoPi, b, a); // sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! - // V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI) const FloatV V2 = FMul(V1, V1); const FloatV V3 = FMul(V2, V1); const FloatV V5 = FMul(V3, V2); const FloatV V7 = FMul(V5, V2); const FloatV V9 = FMul(V7, V2); const FloatV V11 = FMul(V9, V2); const FloatV V13 = FMul(V11, V2); const FloatV V15 = FMul(V13, V2); const FloatV V17 = FMul(V15, V2); const FloatV V19 = FMul(V17, V2); const FloatV V21 = FMul(V19, V2); const FloatV V23 = FMul(V21, V2); const Vec4V sinCoefficients0 = V4LoadA(g_PXSinCoefficients0.f); const Vec4V sinCoefficients1 = V4LoadA(g_PXSinCoefficients1.f); const Vec4V sinCoefficients2 = V4LoadA(g_PXSinCoefficients2.f); const FloatV S1 = V4GetY(sinCoefficients0); const FloatV S2 = V4GetZ(sinCoefficients0); const FloatV S3 = V4GetW(sinCoefficients0); const FloatV S4 = V4GetX(sinCoefficients1); const FloatV S5 = V4GetY(sinCoefficients1); const FloatV S6 = V4GetZ(sinCoefficients1); const FloatV S7 = V4GetW(sinCoefficients1); const FloatV S8 = V4GetX(sinCoefficients2); const FloatV S9 = V4GetY(sinCoefficients2); const FloatV S10 = V4GetZ(sinCoefficients2); const FloatV S11 = V4GetW(sinCoefficients2); FloatV Result; Result = FScaleAdd(S1, V3, V1); Result = FScaleAdd(S2, V5, Result); Result = FScaleAdd(S3, V7, Result); Result = FScaleAdd(S4, V9, Result); Result = FScaleAdd(S5, V11, Result); Result = FScaleAdd(S6, V13, Result); Result = FScaleAdd(S7, V15, Result); Result = FScaleAdd(S8, V17, Result); Result = FScaleAdd(S9, V19, Result); Result = FScaleAdd(S10, V21, Result); Result = FScaleAdd(S11, V23, Result); return Result; } PX_FORCE_INLINE FloatV FCos(const FloatV a) { ASSERT_ISVALIDFLOATV(a); // Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI const FloatV recipTwoPi = V4LoadA(g_PXReciprocalTwoPi.f); const FloatV twoPi = V4LoadA(g_PXTwoPi.f); const FloatV tmp = FMul(a, recipTwoPi); const FloatV b = FRound(tmp); const FloatV V1 = FNegScaleSub(twoPi, b, a); // cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! - // V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI) const FloatV V2 = FMul(V1, V1); const FloatV V4 = FMul(V2, V2); const FloatV V6 = FMul(V4, V2); const FloatV V8 = FMul(V4, V4); const FloatV V10 = FMul(V6, V4); const FloatV V12 = FMul(V6, V6); const FloatV V14 = FMul(V8, V6); const FloatV V16 = FMul(V8, V8); const FloatV V18 = FMul(V10, V8); const FloatV V20 = FMul(V10, V10); const FloatV V22 = FMul(V12, V10); const Vec4V cosCoefficients0 = V4LoadA(g_PXCosCoefficients0.f); const Vec4V cosCoefficients1 = V4LoadA(g_PXCosCoefficients1.f); const Vec4V cosCoefficients2 = V4LoadA(g_PXCosCoefficients2.f); const FloatV C1 = V4GetY(cosCoefficients0); const FloatV C2 = V4GetZ(cosCoefficients0); const FloatV C3 = V4GetW(cosCoefficients0); const FloatV C4 = V4GetX(cosCoefficients1); const FloatV C5 = V4GetY(cosCoefficients1); const FloatV C6 = V4GetZ(cosCoefficients1); const FloatV C7 = V4GetW(cosCoefficients1); const FloatV C8 = V4GetX(cosCoefficients2); const FloatV C9 = V4GetY(cosCoefficients2); const FloatV C10 = V4GetZ(cosCoefficients2); const FloatV C11 = V4GetW(cosCoefficients2); FloatV Result; Result = FScaleAdd(C1, V2, V4One()); Result = FScaleAdd(C2, V4, Result); Result = FScaleAdd(C3, V6, Result); Result = FScaleAdd(C4, V8, Result); Result = FScaleAdd(C5, V10, Result); Result = FScaleAdd(C6, V12, Result); Result = FScaleAdd(C7, V14, Result); Result = FScaleAdd(C8, V16, Result); Result = FScaleAdd(C9, V18, Result); Result = FScaleAdd(C10, V20, Result); Result = FScaleAdd(C11, V22, Result); return Result; } PX_FORCE_INLINE PxU32 FOutOfBounds(const FloatV a, const FloatV min, const FloatV max) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(min); ASSERT_ISVALIDFLOATV(max); const BoolV c = BOr(FIsGrtr(a, max), FIsGrtr(min, a)); return !BAllEqFFFF(c); } PX_FORCE_INLINE PxU32 FInBounds(const FloatV a, const FloatV min, const FloatV max) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(min); ASSERT_ISVALIDFLOATV(max); const BoolV c = BAnd(FIsGrtrOrEq(a, min), FIsGrtrOrEq(max, a)); return BAllEqTTTT(c); } PX_FORCE_INLINE PxU32 FOutOfBounds(const FloatV a, const FloatV bounds) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(bounds); return FOutOfBounds(a, FNeg(bounds), bounds); } PX_FORCE_INLINE PxU32 FInBounds(const FloatV a, const FloatV bounds) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(bounds); return FInBounds(a, FNeg(bounds), bounds); } ////////////////////////////////// // VEC3V ////////////////////////////////// PX_FORCE_INLINE Vec3V V3Splat(const FloatV f) { ASSERT_ISVALIDFLOATV(f); const __m128 zero = FZero(); const __m128 fff0 = _mm_move_ss(f, zero); return _mm_shuffle_ps(fff0, fff0, _MM_SHUFFLE(0, 1, 2, 3)); } PX_FORCE_INLINE Vec3V V3Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z) { ASSERT_ISVALIDFLOATV(x); ASSERT_ISVALIDFLOATV(y); ASSERT_ISVALIDFLOATV(z); // static on zero causes compiler crash on x64 debug_opt const __m128 zero = FZero(); const __m128 xy = _mm_move_ss(x, y); const __m128 z0 = _mm_move_ss(zero, z); return _mm_shuffle_ps(xy, z0, _MM_SHUFFLE(1, 0, 0, 1)); } PX_FORCE_INLINE Vec3V V3UnitX() { const PX_ALIGN(16, PxF32) x[4] = { 1.0f, 0.0f, 0.0f, 0.0f }; const __m128 x128 = _mm_load_ps(x); return x128; } PX_FORCE_INLINE Vec3V V3UnitY() { const PX_ALIGN(16, PxF32) y[4] = { 0.0f, 1.0f, 0.0f, 0.0f }; const __m128 y128 = _mm_load_ps(y); return y128; } PX_FORCE_INLINE Vec3V V3UnitZ() { const PX_ALIGN(16, PxF32) z[4] = { 0.0f, 0.0f, 1.0f, 0.0f }; const __m128 z128 = _mm_load_ps(z); return z128; } PX_FORCE_INLINE FloatV V3GetX(const Vec3V f) { ASSERT_ISVALIDVEC3V(f); return _mm_shuffle_ps(f, f, _MM_SHUFFLE(0, 0, 0, 0)); } PX_FORCE_INLINE FloatV V3GetY(const Vec3V f) { ASSERT_ISVALIDVEC3V(f); return _mm_shuffle_ps(f, f, _MM_SHUFFLE(1, 1, 1, 1)); } PX_FORCE_INLINE FloatV V3GetZ(const Vec3V f) { ASSERT_ISVALIDVEC3V(f); return _mm_shuffle_ps(f, f, _MM_SHUFFLE(2, 2, 2, 2)); } PX_FORCE_INLINE Vec3V V3SetX(const Vec3V v, const FloatV f) { ASSERT_ISVALIDVEC3V(v); ASSERT_ISVALIDFLOATV(f); return V4Sel(BFTTT(), v, f); } PX_FORCE_INLINE Vec3V V3SetY(const Vec3V v, const FloatV f) { ASSERT_ISVALIDVEC3V(v); ASSERT_ISVALIDFLOATV(f); return V4Sel(BTFTT(), v, f); } PX_FORCE_INLINE Vec3V V3SetZ(const Vec3V v, const FloatV f) { ASSERT_ISVALIDVEC3V(v); ASSERT_ISVALIDFLOATV(f); return V4Sel(BTTFT(), v, f); } PX_FORCE_INLINE Vec3V V3ColX(const Vec3V a, const Vec3V b, const Vec3V c) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); ASSERT_ISVALIDVEC3V(c); Vec3V r = _mm_shuffle_ps(a, c, _MM_SHUFFLE(3, 0, 3, 0)); return V3SetY(r, V3GetX(b)); } PX_FORCE_INLINE Vec3V V3ColY(const Vec3V a, const Vec3V b, const Vec3V c) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); ASSERT_ISVALIDVEC3V(c); Vec3V r = _mm_shuffle_ps(a, c, _MM_SHUFFLE(3, 1, 3, 1)); return V3SetY(r, V3GetY(b)); } PX_FORCE_INLINE Vec3V V3ColZ(const Vec3V a, const Vec3V b, const Vec3V c) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); ASSERT_ISVALIDVEC3V(c); Vec3V r = _mm_shuffle_ps(a, c, _MM_SHUFFLE(3, 2, 3, 2)); return V3SetY(r, V3GetZ(b)); } PX_FORCE_INLINE Vec3V V3Zero() { return V3Load(0.0f); } PX_FORCE_INLINE Vec3V V3Eps() { return V3Load(PX_EPS_REAL); } PX_FORCE_INLINE Vec3V V3One() { return V3Load(1.0f); } PX_FORCE_INLINE Vec3V V3Neg(const Vec3V f) { ASSERT_ISVALIDVEC3V(f); return _mm_sub_ps(_mm_setzero_ps(), f); } PX_FORCE_INLINE Vec3V V3Add(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); return _mm_add_ps(a, b); } PX_FORCE_INLINE Vec3V V3Sub(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); return _mm_sub_ps(a, b); } PX_FORCE_INLINE Vec3V V3Scale(const Vec3V a, const FloatV b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDFLOATV(b); return _mm_mul_ps(a, b); } PX_FORCE_INLINE Vec3V V3Mul(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); return _mm_mul_ps(a, b); } PX_FORCE_INLINE Vec3V V3ScaleInv(const Vec3V a, const FloatV b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDFLOATV(b); return _mm_div_ps(a, b); } PX_FORCE_INLINE Vec3V V3Div(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); return V4ClearW(_mm_div_ps(a, b)); } PX_FORCE_INLINE Vec3V V3ScaleInvFast(const Vec3V a, const FloatV b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDFLOATV(b); return _mm_mul_ps(a, _mm_rcp_ps(b)); } PX_FORCE_INLINE Vec3V V3DivFast(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); return V4ClearW(_mm_mul_ps(a, _mm_rcp_ps(b))); } PX_FORCE_INLINE Vec3V V3Recip(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); const __m128 zero = V3Zero(); const __m128 tttf = BTTTF(); const __m128 recipA = _mm_div_ps(V3One(), a); return V4Sel(tttf, recipA, zero); } PX_FORCE_INLINE Vec3V V3RecipFast(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); const __m128 zero = V3Zero(); const __m128 tttf = BTTTF(); const __m128 recipA = _mm_rcp_ps(a); return V4Sel(tttf, recipA, zero); } PX_FORCE_INLINE Vec3V V3Rsqrt(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); const __m128 zero = V3Zero(); const __m128 tttf = BTTTF(); const __m128 recipA = _mm_div_ps(V3One(), _mm_sqrt_ps(a)); return V4Sel(tttf, recipA, zero); } PX_FORCE_INLINE Vec3V V3RsqrtFast(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); const __m128 zero = V3Zero(); const __m128 tttf = BTTTF(); const __m128 recipA = _mm_rsqrt_ps(a); return V4Sel(tttf, recipA, zero); } PX_FORCE_INLINE Vec3V V3ScaleAdd(const Vec3V a, const FloatV b, const Vec3V c) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDFLOATV(b); ASSERT_ISVALIDVEC3V(c); return V3Add(V3Scale(a, b), c); } PX_FORCE_INLINE Vec3V V3NegScaleSub(const Vec3V a, const FloatV b, const Vec3V c) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDFLOATV(b); ASSERT_ISVALIDVEC3V(c); return V3Sub(c, V3Scale(a, b)); } PX_FORCE_INLINE Vec3V V3MulAdd(const Vec3V a, const Vec3V b, const Vec3V c) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); ASSERT_ISVALIDVEC3V(c); return V3Add(V3Mul(a, b), c); } PX_FORCE_INLINE Vec3V V3NegMulSub(const Vec3V a, const Vec3V b, const Vec3V c) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); ASSERT_ISVALIDVEC3V(c); return V3Sub(c, V3Mul(a, b)); } PX_FORCE_INLINE Vec3V V3Abs(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); return V3Max(a, V3Neg(a)); } PX_FORCE_INLINE FloatV V3Dot(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); #ifdef __SSE4_2__ return _mm_dp_ps(a, b, 0x7f); #else const __m128 t0 = _mm_mul_ps(a, b); // aw*bw | az*bz | ay*by | ax*bx const __m128 t1 = _mm_shuffle_ps(t0, t0, _MM_SHUFFLE(1,0,3,2)); // ay*by | ax*bx | aw*bw | az*bz const __m128 t2 = _mm_add_ps(t0, t1); // ay*by + aw*bw | ax*bx + az*bz | aw*bw + ay*by | az*bz + ax*bx const __m128 t3 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(2,3,0,1)); // ax*bx + az*bz | ay*by + aw*bw | az*bz + ax*bx | aw*bw + ay*by return _mm_add_ps(t3, t2); // ax*bx + az*bz + ay*by + aw*bw // ay*by + aw*bw + ax*bx + az*bz // az*bz + ax*bx + aw*bw + ay*by // aw*bw + ay*by + az*bz + ax*bx #endif } PX_FORCE_INLINE Vec3V V3Cross(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); const __m128 r1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 0, 2)); // z,x,y,w const __m128 r2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1)); // y,z,x,w const __m128 l1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); // y,z,x,w const __m128 l2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 1, 0, 2)); // z,x,y,w return _mm_sub_ps(_mm_mul_ps(l1, l2), _mm_mul_ps(r1, r2)); } PX_FORCE_INLINE VecCrossV V3PrepareCross(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); VecCrossV v; v.mR1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 0, 2)); // z,x,y,w v.mL1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); // y,z,x,w return v; } PX_FORCE_INLINE Vec3V V3Cross(const VecCrossV& a, const Vec3V b) { ASSERT_ISVALIDVEC3V(b); const __m128 r2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1)); // y,z,x,w const __m128 l2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 1, 0, 2)); // z,x,y,w return _mm_sub_ps(_mm_mul_ps(a.mL1, l2), _mm_mul_ps(a.mR1, r2)); } PX_FORCE_INLINE Vec3V V3Cross(const Vec3V a, const VecCrossV& b) { ASSERT_ISVALIDVEC3V(a); const __m128 r2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); // y,z,x,w const __m128 l2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 0, 2)); // z,x,y,w return _mm_sub_ps(_mm_mul_ps(b.mR1, r2), _mm_mul_ps(b.mL1, l2)); } PX_FORCE_INLINE Vec3V V3Cross(const VecCrossV& a, const VecCrossV& b) { return _mm_sub_ps(_mm_mul_ps(a.mL1, b.mR1), _mm_mul_ps(a.mR1, b.mL1)); } PX_FORCE_INLINE FloatV V3Length(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); return _mm_sqrt_ps(V3Dot(a, a)); } PX_FORCE_INLINE FloatV V3LengthSq(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); return V3Dot(a, a); } PX_FORCE_INLINE Vec3V V3Normalize(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISFINITELENGTH(a); return V3ScaleInv(a, _mm_sqrt_ps(V3Dot(a, a))); } PX_FORCE_INLINE Vec3V V3NormalizeFast(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISFINITELENGTH(a); return V3Scale(a, _mm_rsqrt_ps(V3Dot(a, a))); } PX_FORCE_INLINE Vec3V V3NormalizeSafe(const Vec3V a, const Vec3V unsafeReturnValue) { ASSERT_ISVALIDVEC3V(a); const __m128 eps = V4Eps(); const __m128 length = V3Length(a); const __m128 isGreaterThanZero = FIsGrtr(length, eps); return V3Sel(isGreaterThanZero, V3ScaleInv(a, length), unsafeReturnValue); } PX_FORCE_INLINE Vec3V V3Sel(const BoolV c, const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(_mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a))); return _mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a)); } PX_FORCE_INLINE BoolV V3IsGrtr(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); return _mm_cmpgt_ps(a, b); } PX_FORCE_INLINE BoolV V3IsGrtrOrEq(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); return _mm_cmpge_ps(a, b); } PX_FORCE_INLINE BoolV V3IsEq(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); return _mm_cmpeq_ps(a, b); } PX_FORCE_INLINE Vec3V V3Max(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); return _mm_max_ps(a, b); } PX_FORCE_INLINE Vec3V V3Min(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); return _mm_min_ps(a, b); } PX_FORCE_INLINE FloatV V3ExtractMax(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); const __m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 0, 0, 0)); const __m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 1, 1, 1)); const __m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 2, 2, 2)); return _mm_max_ps(_mm_max_ps(shuf1, shuf2), shuf3); } PX_FORCE_INLINE FloatV V3ExtractMin(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); const __m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 0, 0, 0)); const __m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 1, 1, 1)); const __m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 2, 2, 2)); return _mm_min_ps(_mm_min_ps(shuf1, shuf2), shuf3); } // return (a >= 0.0f) ? 1.0f : -1.0f; PX_FORCE_INLINE Vec3V V3Sign(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); const __m128 zero = V3Zero(); const __m128 one = V3One(); const __m128 none = V3Neg(one); return V3Sel(V3IsGrtrOrEq(a, zero), one, none); } PX_FORCE_INLINE Vec3V V3Clamp(const Vec3V a, const Vec3V minV, const Vec3V maxV) { ASSERT_ISVALIDVEC3V(maxV); ASSERT_ISVALIDVEC3V(minV); return V3Max(V3Min(a, maxV), minV); } PX_FORCE_INLINE PxU32 V3AllGrtr(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); return internalUnitSSE2Simd::BAllTrue3_R(V4IsGrtr(a, b)); } PX_FORCE_INLINE PxU32 V3AllGrtrOrEq(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); return internalUnitSSE2Simd::BAllTrue3_R(V4IsGrtrOrEq(a, b)); } PX_FORCE_INLINE PxU32 V3AllEq(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); return internalUnitSSE2Simd::BAllTrue3_R(V4IsEq(a, b)); } PX_FORCE_INLINE Vec3V V3Round(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); #ifdef __SSE4_2__ return _mm_round_ps(a, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); #else // return _mm_round_ps(a, 0x0); const Vec3V half = V3Load(0.5f); const __m128 signBit = _mm_cvtepi32_ps(_mm_srli_epi32(_mm_cvtps_epi32(a), 31)); const Vec3V aRound = V3Sub(V3Add(a, half), signBit); __m128i tmp = _mm_cvttps_epi32(aRound); return _mm_cvtepi32_ps(tmp); #endif } PX_FORCE_INLINE Vec3V V3Sin(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); // Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI const Vec4V recipTwoPi = V4LoadA(g_PXReciprocalTwoPi.f); const Vec4V twoPi = V4LoadA(g_PXTwoPi.f); const Vec3V tmp = V3Scale(a, recipTwoPi); const Vec3V b = V3Round(tmp); const Vec3V V1 = V3NegScaleSub(b, twoPi, a); // sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! - // V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI) const Vec3V V2 = V3Mul(V1, V1); const Vec3V V3 = V3Mul(V2, V1); const Vec3V V5 = V3Mul(V3, V2); const Vec3V V7 = V3Mul(V5, V2); const Vec3V V9 = V3Mul(V7, V2); const Vec3V V11 = V3Mul(V9, V2); const Vec3V V13 = V3Mul(V11, V2); const Vec3V V15 = V3Mul(V13, V2); const Vec3V V17 = V3Mul(V15, V2); const Vec3V V19 = V3Mul(V17, V2); const Vec3V V21 = V3Mul(V19, V2); const Vec3V V23 = V3Mul(V21, V2); const Vec4V sinCoefficients0 = V4LoadA(g_PXSinCoefficients0.f); const Vec4V sinCoefficients1 = V4LoadA(g_PXSinCoefficients1.f); const Vec4V sinCoefficients2 = V4LoadA(g_PXSinCoefficients2.f); const FloatV S1 = V4GetY(sinCoefficients0); const FloatV S2 = V4GetZ(sinCoefficients0); const FloatV S3 = V4GetW(sinCoefficients0); const FloatV S4 = V4GetX(sinCoefficients1); const FloatV S5 = V4GetY(sinCoefficients1); const FloatV S6 = V4GetZ(sinCoefficients1); const FloatV S7 = V4GetW(sinCoefficients1); const FloatV S8 = V4GetX(sinCoefficients2); const FloatV S9 = V4GetY(sinCoefficients2); const FloatV S10 = V4GetZ(sinCoefficients2); const FloatV S11 = V4GetW(sinCoefficients2); Vec3V Result; Result = V3ScaleAdd(V3, S1, V1); Result = V3ScaleAdd(V5, S2, Result); Result = V3ScaleAdd(V7, S3, Result); Result = V3ScaleAdd(V9, S4, Result); Result = V3ScaleAdd(V11, S5, Result); Result = V3ScaleAdd(V13, S6, Result); Result = V3ScaleAdd(V15, S7, Result); Result = V3ScaleAdd(V17, S8, Result); Result = V3ScaleAdd(V19, S9, Result); Result = V3ScaleAdd(V21, S10, Result); Result = V3ScaleAdd(V23, S11, Result); ASSERT_ISVALIDVEC3V(Result); return Result; } PX_FORCE_INLINE Vec3V V3Cos(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); // Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI const Vec4V recipTwoPi = V4LoadA(g_PXReciprocalTwoPi.f); const Vec4V twoPi = V4LoadA(g_PXTwoPi.f); const Vec3V tmp = V3Scale(a, recipTwoPi); const Vec3V b = V3Round(tmp); const Vec3V V1 = V3NegScaleSub(b, twoPi, a); // cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! - // V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI) const Vec3V V2 = V3Mul(V1, V1); const Vec3V V4 = V3Mul(V2, V2); const Vec3V V6 = V3Mul(V4, V2); const Vec3V V8 = V3Mul(V4, V4); const Vec3V V10 = V3Mul(V6, V4); const Vec3V V12 = V3Mul(V6, V6); const Vec3V V14 = V3Mul(V8, V6); const Vec3V V16 = V3Mul(V8, V8); const Vec3V V18 = V3Mul(V10, V8); const Vec3V V20 = V3Mul(V10, V10); const Vec3V V22 = V3Mul(V12, V10); const Vec4V cosCoefficients0 = V4LoadA(g_PXCosCoefficients0.f); const Vec4V cosCoefficients1 = V4LoadA(g_PXCosCoefficients1.f); const Vec4V cosCoefficients2 = V4LoadA(g_PXCosCoefficients2.f); const FloatV C1 = V4GetY(cosCoefficients0); const FloatV C2 = V4GetZ(cosCoefficients0); const FloatV C3 = V4GetW(cosCoefficients0); const FloatV C4 = V4GetX(cosCoefficients1); const FloatV C5 = V4GetY(cosCoefficients1); const FloatV C6 = V4GetZ(cosCoefficients1); const FloatV C7 = V4GetW(cosCoefficients1); const FloatV C8 = V4GetX(cosCoefficients2); const FloatV C9 = V4GetY(cosCoefficients2); const FloatV C10 = V4GetZ(cosCoefficients2); const FloatV C11 = V4GetW(cosCoefficients2); Vec3V Result; Result = V3ScaleAdd(V2, C1, V3One()); Result = V3ScaleAdd(V4, C2, Result); Result = V3ScaleAdd(V6, C3, Result); Result = V3ScaleAdd(V8, C4, Result); Result = V3ScaleAdd(V10, C5, Result); Result = V3ScaleAdd(V12, C6, Result); Result = V3ScaleAdd(V14, C7, Result); Result = V3ScaleAdd(V16, C8, Result); Result = V3ScaleAdd(V18, C9, Result); Result = V3ScaleAdd(V20, C10, Result); Result = V3ScaleAdd(V22, C11, Result); ASSERT_ISVALIDVEC3V(Result); return Result; } PX_FORCE_INLINE Vec3V V3PermYZZ(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 2, 2, 1)); } PX_FORCE_INLINE Vec3V V3PermXYX(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 1, 0)); } PX_FORCE_INLINE Vec3V V3PermYZX(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); } PX_FORCE_INLINE Vec3V V3PermZXY(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 0, 2)); } PX_FORCE_INLINE Vec3V V3PermZZY(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 2, 2)); } PX_FORCE_INLINE Vec3V V3PermYXX(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 0, 1)); } PX_FORCE_INLINE Vec3V V3Perm_Zero_1Z_0Y(const Vec3V v0, const Vec3V v1) { ASSERT_ISVALIDVEC3V(v0); ASSERT_ISVALIDVEC3V(v1); return _mm_shuffle_ps(v1, v0, _MM_SHUFFLE(3, 1, 2, 3)); } PX_FORCE_INLINE Vec3V V3Perm_0Z_Zero_1X(const Vec3V v0, const Vec3V v1) { ASSERT_ISVALIDVEC3V(v0); ASSERT_ISVALIDVEC3V(v1); return _mm_shuffle_ps(v0, v1, _MM_SHUFFLE(3, 0, 3, 2)); } PX_FORCE_INLINE Vec3V V3Perm_1Y_0X_Zero(const Vec3V v0, const Vec3V v1) { ASSERT_ISVALIDVEC3V(v0); ASSERT_ISVALIDVEC3V(v1); // There must be a better way to do this. Vec3V v2 = V3Zero(); FloatV y1 = V3GetY(v1); FloatV x0 = V3GetX(v0); v2 = V3SetX(v2, y1); return V3SetY(v2, x0); } PX_FORCE_INLINE FloatV V3SumElems(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); #ifdef __SSE4_2__ Vec3V r = _mm_hadd_ps(a, a); r = _mm_hadd_ps(r, r); return r; #else __m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 0, 0, 0)); // z,y,x,w __m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 1, 1, 1)); // y,x,w,z __m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 2, 2, 2)); // x,w,z,y return _mm_add_ps(_mm_add_ps(shuf1, shuf2), shuf3); #endif } PX_FORCE_INLINE PxU32 V3OutOfBounds(const Vec3V a, const Vec3V min, const Vec3V max) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(min); ASSERT_ISVALIDVEC3V(max); const BoolV c = BOr(V3IsGrtr(a, max), V3IsGrtr(min, a)); return !BAllEqFFFF(c); } PX_FORCE_INLINE PxU32 V3InBounds(const Vec3V a, const Vec3V min, const Vec3V max) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(min); ASSERT_ISVALIDVEC3V(max); const BoolV c = BAnd(V3IsGrtrOrEq(a, min), V3IsGrtrOrEq(max, a)); return BAllEqTTTT(c); } PX_FORCE_INLINE PxU32 V3OutOfBounds(const Vec3V a, const Vec3V bounds) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(bounds); return V3OutOfBounds(a, V3Neg(bounds), bounds); } PX_FORCE_INLINE PxU32 V3InBounds(const Vec3V a, const Vec3V bounds) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(bounds); return V3InBounds(a, V3Neg(bounds), bounds); } PX_FORCE_INLINE void V3Transpose(Vec3V& col0, Vec3V& col1, Vec3V& col2) { ASSERT_ISVALIDVEC3V(col0); ASSERT_ISVALIDVEC3V(col1); ASSERT_ISVALIDVEC3V(col2); const Vec3V col3 = _mm_setzero_ps(); Vec3V tmp0 = _mm_unpacklo_ps(col0, col1); Vec3V tmp2 = _mm_unpacklo_ps(col2, col3); Vec3V tmp1 = _mm_unpackhi_ps(col0, col1); Vec3V tmp3 = _mm_unpackhi_ps(col2, col3); col0 = _mm_movelh_ps(tmp0, tmp2); col1 = _mm_movehl_ps(tmp2, tmp0); col2 = _mm_movelh_ps(tmp1, tmp3); } ////////////////////////////////// // VEC4V ////////////////////////////////// PX_FORCE_INLINE Vec4V V4Splat(const FloatV f) { ASSERT_ISVALIDFLOATV(f); // return _mm_shuffle_ps(f, f, _MM_SHUFFLE(0,0,0,0)); return f; } PX_FORCE_INLINE Vec4V V4Merge(const FloatV* const floatVArray) { ASSERT_ISVALIDFLOATV(floatVArray[0]); ASSERT_ISVALIDFLOATV(floatVArray[1]); ASSERT_ISVALIDFLOATV(floatVArray[2]); ASSERT_ISVALIDFLOATV(floatVArray[3]); const __m128 xw = _mm_move_ss(floatVArray[1], floatVArray[0]); // y, y, y, x const __m128 yz = _mm_move_ss(floatVArray[2], floatVArray[3]); // z, z, z, w return _mm_shuffle_ps(xw, yz, _MM_SHUFFLE(0, 2, 1, 0)); } PX_FORCE_INLINE Vec4V V4Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z, const FloatVArg w) { ASSERT_ISVALIDFLOATV(x); ASSERT_ISVALIDFLOATV(y); ASSERT_ISVALIDFLOATV(z); ASSERT_ISVALIDFLOATV(w); const __m128 xw = _mm_move_ss(y, x); // y, y, y, x const __m128 yz = _mm_move_ss(z, w); // z, z, z, w return _mm_shuffle_ps(xw, yz, _MM_SHUFFLE(0, 2, 1, 0)); } PX_FORCE_INLINE Vec4V V4MergeW(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { const Vec4V xz = _mm_unpackhi_ps(x, z); const Vec4V yw = _mm_unpackhi_ps(y, w); return _mm_unpackhi_ps(xz, yw); } PX_FORCE_INLINE Vec4V V4MergeZ(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { const Vec4V xz = _mm_unpackhi_ps(x, z); const Vec4V yw = _mm_unpackhi_ps(y, w); return _mm_unpacklo_ps(xz, yw); } PX_FORCE_INLINE Vec4V V4MergeY(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { const Vec4V xz = _mm_unpacklo_ps(x, z); const Vec4V yw = _mm_unpacklo_ps(y, w); return _mm_unpackhi_ps(xz, yw); } PX_FORCE_INLINE Vec4V V4MergeX(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { const Vec4V xz = _mm_unpacklo_ps(x, z); const Vec4V yw = _mm_unpacklo_ps(y, w); return _mm_unpacklo_ps(xz, yw); } PX_FORCE_INLINE Vec4V V4UnpackXY(const Vec4VArg a, const Vec4VArg b) { return _mm_unpacklo_ps(a, b); } PX_FORCE_INLINE Vec4V V4UnpackZW(const Vec4VArg a, const Vec4VArg b) { return _mm_unpackhi_ps(a, b); } PX_FORCE_INLINE Vec4V V4UnitW() { const PX_ALIGN(16, PxF32) w[4] = { 0.0f, 0.0f, 0.0f, 1.0f }; const __m128 w128 = _mm_load_ps(w); return w128; } PX_FORCE_INLINE Vec4V V4UnitX() { const PX_ALIGN(16, PxF32) x[4] = { 1.0f, 0.0f, 0.0f, 0.0f }; const __m128 x128 = _mm_load_ps(x); return x128; } PX_FORCE_INLINE Vec4V V4UnitY() { const PX_ALIGN(16, PxF32) y[4] = { 0.0f, 1.0f, 0.0f, 0.0f }; const __m128 y128 = _mm_load_ps(y); return y128; } PX_FORCE_INLINE Vec4V V4UnitZ() { const PX_ALIGN(16, PxF32) z[4] = { 0.0f, 0.0f, 1.0f, 0.0f }; const __m128 z128 = _mm_load_ps(z); return z128; } PX_FORCE_INLINE FloatV V4GetW(const Vec4V f) { return _mm_shuffle_ps(f, f, _MM_SHUFFLE(3, 3, 3, 3)); } PX_FORCE_INLINE FloatV V4GetX(const Vec4V f) { return _mm_shuffle_ps(f, f, _MM_SHUFFLE(0, 0, 0, 0)); } PX_FORCE_INLINE FloatV V4GetY(const Vec4V f) { return _mm_shuffle_ps(f, f, _MM_SHUFFLE(1, 1, 1, 1)); } PX_FORCE_INLINE FloatV V4GetZ(const Vec4V f) { return _mm_shuffle_ps(f, f, _MM_SHUFFLE(2, 2, 2, 2)); } PX_FORCE_INLINE Vec4V V4SetW(const Vec4V v, const FloatV f) { ASSERT_ISVALIDFLOATV(f); return V4Sel(BTTTF(), v, f); } PX_FORCE_INLINE Vec4V V4SetX(const Vec4V v, const FloatV f) { ASSERT_ISVALIDFLOATV(f); return V4Sel(BFTTT(), v, f); } PX_FORCE_INLINE Vec4V V4SetY(const Vec4V v, const FloatV f) { ASSERT_ISVALIDFLOATV(f); return V4Sel(BTFTT(), v, f); } PX_FORCE_INLINE Vec4V V4SetZ(const Vec4V v, const FloatV f) { ASSERT_ISVALIDFLOATV(f); return V4Sel(BTTFT(), v, f); } PX_FORCE_INLINE Vec4V V4ClearW(const Vec4V v) { #if !PX_EMSCRIPTEN return _mm_and_ps(v, V4LoadA(internalUnitSSE2Simd::gMaskXYZ)); #else return _mm_and_ps(v, (VecI32V&)internalUnitSSE2Simd::gMaskXYZ); #endif } PX_FORCE_INLINE Vec4V V4PermYXWZ(const Vec4V a) { return _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 3, 0, 1)); } PX_FORCE_INLINE Vec4V V4PermXZXZ(const Vec4V a) { return _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 0, 2, 0)); } PX_FORCE_INLINE Vec4V V4PermYWYW(const Vec4V a) { return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 3, 1)); } PX_FORCE_INLINE Vec4V V4PermYZXW(const Vec4V a) { return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); } PX_FORCE_INLINE Vec4V V4PermZWXY(const Vec4V a) { return _mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 0, 3, 2)); } template <PxU8 x, PxU8 y, PxU8 z, PxU8 w> PX_FORCE_INLINE Vec4V V4Perm(const Vec4V a) { return _mm_shuffle_ps(a, a, _MM_SHUFFLE(w, z, y, x)); } PX_FORCE_INLINE Vec4V V4Zero() { return V4Load(0.0f); } PX_FORCE_INLINE Vec4V V4One() { return V4Load(1.0f); } PX_FORCE_INLINE Vec4V V4Eps() { return V4Load(PX_EPS_REAL); } PX_FORCE_INLINE Vec4V V4Neg(const Vec4V f) { return _mm_sub_ps(_mm_setzero_ps(), f); } PX_FORCE_INLINE Vec4V V4Add(const Vec4V a, const Vec4V b) { return _mm_add_ps(a, b); } PX_FORCE_INLINE Vec4V V4Sub(const Vec4V a, const Vec4V b) { return _mm_sub_ps(a, b); } PX_FORCE_INLINE Vec4V V4Scale(const Vec4V a, const FloatV b) { return _mm_mul_ps(a, b); } PX_FORCE_INLINE Vec4V V4Mul(const Vec4V a, const Vec4V b) { return _mm_mul_ps(a, b); } PX_FORCE_INLINE Vec4V V4ScaleInv(const Vec4V a, const FloatV b) { ASSERT_ISVALIDFLOATV(b); return _mm_div_ps(a, b); } PX_FORCE_INLINE Vec4V V4Div(const Vec4V a, const Vec4V b) { return _mm_div_ps(a, b); } PX_FORCE_INLINE Vec4V V4ScaleInvFast(const Vec4V a, const FloatV b) { ASSERT_ISVALIDFLOATV(b); return _mm_mul_ps(a, _mm_rcp_ps(b)); } PX_FORCE_INLINE Vec4V V4DivFast(const Vec4V a, const Vec4V b) { return _mm_mul_ps(a, _mm_rcp_ps(b)); } PX_FORCE_INLINE Vec4V V4Recip(const Vec4V a) { return _mm_div_ps(V4One(), a); } PX_FORCE_INLINE Vec4V V4RecipFast(const Vec4V a) { return _mm_rcp_ps(a); } PX_FORCE_INLINE Vec4V V4Rsqrt(const Vec4V a) { return _mm_div_ps(V4One(), _mm_sqrt_ps(a)); } PX_FORCE_INLINE Vec4V V4RsqrtFast(const Vec4V a) { return _mm_rsqrt_ps(a); } PX_FORCE_INLINE Vec4V V4Sqrt(const Vec4V a) { return _mm_sqrt_ps(a); } PX_FORCE_INLINE Vec4V V4ScaleAdd(const Vec4V a, const FloatV b, const Vec4V c) { ASSERT_ISVALIDFLOATV(b); return V4Add(V4Scale(a, b), c); } PX_FORCE_INLINE Vec4V V4NegScaleSub(const Vec4V a, const FloatV b, const Vec4V c) { ASSERT_ISVALIDFLOATV(b); return V4Sub(c, V4Scale(a, b)); } PX_FORCE_INLINE Vec4V V4MulAdd(const Vec4V a, const Vec4V b, const Vec4V c) { return V4Add(V4Mul(a, b), c); } PX_FORCE_INLINE Vec4V V4NegMulSub(const Vec4V a, const Vec4V b, const Vec4V c) { return V4Sub(c, V4Mul(a, b)); } PX_FORCE_INLINE Vec4V V4Abs(const Vec4V a) { return V4Max(a, V4Neg(a)); } PX_FORCE_INLINE FloatV V4SumElements(const Vec4V a) { #ifdef __SSE4_2__ Vec4V r = _mm_hadd_ps(a, a); r = _mm_hadd_ps(r, r); return r; #else const Vec4V xy = V4UnpackXY(a, a); // x,x,y,y const Vec4V zw = V4UnpackZW(a, a); // z,z,w,w const Vec4V xz_yw = V4Add(xy, zw); // x+z,x+z,y+w,y+w const FloatV xz = V4GetX(xz_yw); // x+z const FloatV yw = V4GetZ(xz_yw); // y+w return FAdd(xz, yw); // sum #endif } PX_FORCE_INLINE FloatV V4Dot(const Vec4V a, const Vec4V b) { #ifdef __SSE4_2__ return _mm_dp_ps(a, b, 0xff); #else //const __m128 dot1 = _mm_mul_ps(a, b); // x,y,z,w //const __m128 shuf1 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(2, 1, 0, 3)); // w,x,y,z //const __m128 shuf2 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(1, 0, 3, 2)); // z,w,x,y //const __m128 shuf3 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(0, 3, 2, 1)); // y,z,w,x //return _mm_add_ps(_mm_add_ps(shuf2, shuf3), _mm_add_ps(dot1, shuf1)); // aw*bw | az*bz | ay*by | ax*bx const __m128 t0 = _mm_mul_ps(a, b); // ay*by | ax*bx | aw*bw | az*bz const __m128 t1 = _mm_shuffle_ps(t0, t0, _MM_SHUFFLE(1, 0, 3, 2)); // ay*by + aw*bw | ax*bx + az*bz | aw*bw + ay*by | az*bz + ax*bx const __m128 t2 = _mm_add_ps(t0, t1); // ax*bx + az*bz | ay*by + aw*bw | az*bz + ax*bx | aw*bw + ay*by const __m128 t3 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(2, 3, 0, 1)); // ax*bx + az*bz + ay*by + aw*bw return _mm_add_ps(t3, t2); #endif } PX_FORCE_INLINE FloatV V4Dot3(const Vec4V a, const Vec4V b) { #ifdef __SSE4_2__ return _mm_dp_ps(a, b, 0x7f); #else const __m128 dot1 = _mm_mul_ps(a, b); // w,z,y,x const __m128 shuf1 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(0, 0, 0, 0)); // z,y,x,w const __m128 shuf2 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(1, 1, 1, 1)); // y,x,w,z const __m128 shuf3 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(2, 2, 2, 2)); // x,w,z,y return _mm_add_ps(_mm_add_ps(shuf1, shuf2), shuf3); #endif } PX_FORCE_INLINE Vec4V V4Cross(const Vec4V a, const Vec4V b) { const __m128 r1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 0, 2)); // z,x,y,w const __m128 r2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1)); // y,z,x,w const __m128 l1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); // y,z,x,w const __m128 l2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 1, 0, 2)); // z,x,y,w return _mm_sub_ps(_mm_mul_ps(l1, l2), _mm_mul_ps(r1, r2)); } PX_FORCE_INLINE FloatV V4Length(const Vec4V a) { return _mm_sqrt_ps(V4Dot(a, a)); } PX_FORCE_INLINE FloatV V4LengthSq(const Vec4V a) { return V4Dot(a, a); } PX_FORCE_INLINE Vec4V V4Normalize(const Vec4V a) { ASSERT_ISFINITELENGTH(a); return V4ScaleInv(a, _mm_sqrt_ps(V4Dot(a, a))); } PX_FORCE_INLINE Vec4V V4NormalizeFast(const Vec4V a) { ASSERT_ISFINITELENGTH(a); return V4ScaleInvFast(a, _mm_sqrt_ps(V4Dot(a, a))); } PX_FORCE_INLINE Vec4V V4NormalizeSafe(const Vec4V a, const Vec3V unsafeReturnValue) { const __m128 eps = V3Eps(); const __m128 length = V4Length(a); const __m128 isGreaterThanZero = V4IsGrtr(length, eps); return V4Sel(isGreaterThanZero, V4ScaleInv(a, length), unsafeReturnValue); } PX_FORCE_INLINE BoolV V4IsEqU32(const VecU32V a, const VecU32V b) { return m128_I2F(_mm_cmpeq_epi32(m128_F2I(a), m128_F2I(b))); } PX_FORCE_INLINE Vec4V V4Sel(const BoolV c, const Vec4V a, const Vec4V b) { return _mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a)); } PX_FORCE_INLINE BoolV V4IsGrtr(const Vec4V a, const Vec4V b) { return _mm_cmpgt_ps(a, b); } PX_FORCE_INLINE BoolV V4IsGrtrOrEq(const Vec4V a, const Vec4V b) { return _mm_cmpge_ps(a, b); } PX_FORCE_INLINE BoolV V4IsEq(const Vec4V a, const Vec4V b) { return _mm_cmpeq_ps(a, b); } PX_FORCE_INLINE Vec4V V4Max(const Vec4V a, const Vec4V b) { return _mm_max_ps(a, b); } PX_FORCE_INLINE Vec4V V4Min(const Vec4V a, const Vec4V b) { return _mm_min_ps(a, b); } PX_FORCE_INLINE FloatV V4ExtractMax(const Vec4V a) { const __m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 1, 0, 3)); const __m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 0, 3, 2)); const __m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 3, 2, 1)); return _mm_max_ps(_mm_max_ps(a, shuf1), _mm_max_ps(shuf2, shuf3)); } PX_FORCE_INLINE FloatV V4ExtractMin(const Vec4V a) { const __m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 1, 0, 3)); const __m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 0, 3, 2)); const __m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 3, 2, 1)); return _mm_min_ps(_mm_min_ps(a, shuf1), _mm_min_ps(shuf2, shuf3)); } PX_FORCE_INLINE Vec4V V4Clamp(const Vec4V a, const Vec4V minV, const Vec4V maxV) { return V4Max(V4Min(a, maxV), minV); } PX_FORCE_INLINE PxU32 V4AllGrtr(const Vec4V a, const Vec4V b) { return internalUnitSSE2Simd::BAllTrue4_R(V4IsGrtr(a, b)); } PX_FORCE_INLINE PxU32 V4AllGrtrOrEq(const Vec4V a, const Vec4V b) { return internalUnitSSE2Simd::BAllTrue4_R(V4IsGrtrOrEq(a, b)); } PX_FORCE_INLINE PxU32 V4AllGrtrOrEq3(const Vec4V a, const Vec4V b) { return internalUnitSSE2Simd::BAllTrue3_R(V4IsGrtrOrEq(a, b)); } PX_FORCE_INLINE PxU32 V4AllEq(const Vec4V a, const Vec4V b) { return internalUnitSSE2Simd::BAllTrue4_R(V4IsEq(a, b)); } PX_FORCE_INLINE PxU32 V4AnyGrtr3(const Vec4V a, const Vec4V b) { return internalUnitSSE2Simd::BAnyTrue3_R(V4IsGrtr(a, b)); } PX_FORCE_INLINE Vec4V V4Round(const Vec4V a) { #ifdef __SSE4_2__ return _mm_round_ps(a, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); #else // return _mm_round_ps(a, 0x0); const Vec4V half = V4Load(0.5f); const __m128 signBit = _mm_cvtepi32_ps(_mm_srli_epi32(_mm_cvtps_epi32(a), 31)); const Vec4V aRound = V4Sub(V4Add(a, half), signBit); __m128i tmp = _mm_cvttps_epi32(aRound); return _mm_cvtepi32_ps(tmp); #endif } PX_FORCE_INLINE Vec4V V4Sin(const Vec4V a) { const Vec4V recipTwoPi = V4LoadA(g_PXReciprocalTwoPi.f); const Vec4V twoPi = V4LoadA(g_PXTwoPi.f); const Vec4V tmp = V4Mul(a, recipTwoPi); const Vec4V b = V4Round(tmp); const Vec4V V1 = V4NegMulSub(twoPi, b, a); // sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! - // V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI) const Vec4V V2 = V4Mul(V1, V1); const Vec4V V3 = V4Mul(V2, V1); const Vec4V V5 = V4Mul(V3, V2); const Vec4V V7 = V4Mul(V5, V2); const Vec4V V9 = V4Mul(V7, V2); const Vec4V V11 = V4Mul(V9, V2); const Vec4V V13 = V4Mul(V11, V2); const Vec4V V15 = V4Mul(V13, V2); const Vec4V V17 = V4Mul(V15, V2); const Vec4V V19 = V4Mul(V17, V2); const Vec4V V21 = V4Mul(V19, V2); const Vec4V V23 = V4Mul(V21, V2); const Vec4V sinCoefficients0 = V4LoadA(g_PXSinCoefficients0.f); const Vec4V sinCoefficients1 = V4LoadA(g_PXSinCoefficients1.f); const Vec4V sinCoefficients2 = V4LoadA(g_PXSinCoefficients2.f); const FloatV S1 = V4GetY(sinCoefficients0); const FloatV S2 = V4GetZ(sinCoefficients0); const FloatV S3 = V4GetW(sinCoefficients0); const FloatV S4 = V4GetX(sinCoefficients1); const FloatV S5 = V4GetY(sinCoefficients1); const FloatV S6 = V4GetZ(sinCoefficients1); const FloatV S7 = V4GetW(sinCoefficients1); const FloatV S8 = V4GetX(sinCoefficients2); const FloatV S9 = V4GetY(sinCoefficients2); const FloatV S10 = V4GetZ(sinCoefficients2); const FloatV S11 = V4GetW(sinCoefficients2); Vec4V Result; Result = V4MulAdd(S1, V3, V1); Result = V4MulAdd(S2, V5, Result); Result = V4MulAdd(S3, V7, Result); Result = V4MulAdd(S4, V9, Result); Result = V4MulAdd(S5, V11, Result); Result = V4MulAdd(S6, V13, Result); Result = V4MulAdd(S7, V15, Result); Result = V4MulAdd(S8, V17, Result); Result = V4MulAdd(S9, V19, Result); Result = V4MulAdd(S10, V21, Result); Result = V4MulAdd(S11, V23, Result); return Result; } PX_FORCE_INLINE Vec4V V4Cos(const Vec4V a) { const Vec4V recipTwoPi = V4LoadA(g_PXReciprocalTwoPi.f); const Vec4V twoPi = V4LoadA(g_PXTwoPi.f); const Vec4V tmp = V4Mul(a, recipTwoPi); const Vec4V b = V4Round(tmp); const Vec4V V1 = V4NegMulSub(twoPi, b, a); // cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! - // V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI) const Vec4V V2 = V4Mul(V1, V1); const Vec4V V4 = V4Mul(V2, V2); const Vec4V V6 = V4Mul(V4, V2); const Vec4V V8 = V4Mul(V4, V4); const Vec4V V10 = V4Mul(V6, V4); const Vec4V V12 = V4Mul(V6, V6); const Vec4V V14 = V4Mul(V8, V6); const Vec4V V16 = V4Mul(V8, V8); const Vec4V V18 = V4Mul(V10, V8); const Vec4V V20 = V4Mul(V10, V10); const Vec4V V22 = V4Mul(V12, V10); const Vec4V cosCoefficients0 = V4LoadA(g_PXCosCoefficients0.f); const Vec4V cosCoefficients1 = V4LoadA(g_PXCosCoefficients1.f); const Vec4V cosCoefficients2 = V4LoadA(g_PXCosCoefficients2.f); const FloatV C1 = V4GetY(cosCoefficients0); const FloatV C2 = V4GetZ(cosCoefficients0); const FloatV C3 = V4GetW(cosCoefficients0); const FloatV C4 = V4GetX(cosCoefficients1); const FloatV C5 = V4GetY(cosCoefficients1); const FloatV C6 = V4GetZ(cosCoefficients1); const FloatV C7 = V4GetW(cosCoefficients1); const FloatV C8 = V4GetX(cosCoefficients2); const FloatV C9 = V4GetY(cosCoefficients2); const FloatV C10 = V4GetZ(cosCoefficients2); const FloatV C11 = V4GetW(cosCoefficients2); Vec4V Result; Result = V4MulAdd(C1, V2, V4One()); Result = V4MulAdd(C2, V4, Result); Result = V4MulAdd(C3, V6, Result); Result = V4MulAdd(C4, V8, Result); Result = V4MulAdd(C5, V10, Result); Result = V4MulAdd(C6, V12, Result); Result = V4MulAdd(C7, V14, Result); Result = V4MulAdd(C8, V16, Result); Result = V4MulAdd(C9, V18, Result); Result = V4MulAdd(C10, V20, Result); Result = V4MulAdd(C11, V22, Result); return Result; } PX_FORCE_INLINE void V4Transpose(Vec4V& col0, Vec4V& col1, Vec4V& col2, Vec4V& col3) { Vec4V tmp0 = _mm_unpacklo_ps(col0, col1); Vec4V tmp2 = _mm_unpacklo_ps(col2, col3); Vec4V tmp1 = _mm_unpackhi_ps(col0, col1); Vec4V tmp3 = _mm_unpackhi_ps(col2, col3); col0 = _mm_movelh_ps(tmp0, tmp2); col1 = _mm_movehl_ps(tmp2, tmp0); col2 = _mm_movelh_ps(tmp1, tmp3); col3 = _mm_movehl_ps(tmp3, tmp1); } ////////////////////////////////// // BoolV ////////////////////////////////// PX_FORCE_INLINE BoolV BFFFF() { return _mm_setzero_ps(); } PX_FORCE_INLINE BoolV BFFFT() { /*const PX_ALIGN(16, PxU32 f[4])={0,0,0,0xFFFFFFFF}; const __m128 ffft=_mm_load_ps((float*)&f); return ffft;*/ return m128_I2F(_mm_set_epi32(-1, 0, 0, 0)); } PX_FORCE_INLINE BoolV BFFTF() { /*const PX_ALIGN(16, PxU32 f[4])={0,0,0xFFFFFFFF,0}; const __m128 fftf=_mm_load_ps((float*)&f); return fftf;*/ return m128_I2F(_mm_set_epi32(0, -1, 0, 0)); } PX_FORCE_INLINE BoolV BFFTT() { /*const PX_ALIGN(16, PxU32 f[4])={0,0,0xFFFFFFFF,0xFFFFFFFF}; const __m128 fftt=_mm_load_ps((float*)&f); return fftt;*/ return m128_I2F(_mm_set_epi32(-1, -1, 0, 0)); } PX_FORCE_INLINE BoolV BFTFF() { /*const PX_ALIGN(16, PxU32 f[4])={0,0xFFFFFFFF,0,0}; const __m128 ftff=_mm_load_ps((float*)&f); return ftff;*/ return m128_I2F(_mm_set_epi32(0, 0, -1, 0)); } PX_FORCE_INLINE BoolV BFTFT() { /*const PX_ALIGN(16, PxU32 f[4])={0,0xFFFFFFFF,0,0xFFFFFFFF}; const __m128 ftft=_mm_load_ps((float*)&f); return ftft;*/ return m128_I2F(_mm_set_epi32(-1, 0, -1, 0)); } PX_FORCE_INLINE BoolV BFTTF() { /*const PX_ALIGN(16, PxU32 f[4])={0,0xFFFFFFFF,0xFFFFFFFF,0}; const __m128 fttf=_mm_load_ps((float*)&f); return fttf;*/ return m128_I2F(_mm_set_epi32(0, -1, -1, 0)); } PX_FORCE_INLINE BoolV BFTTT() { /*const PX_ALIGN(16, PxU32 f[4])={0,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF}; const __m128 fttt=_mm_load_ps((float*)&f); return fttt;*/ return m128_I2F(_mm_set_epi32(-1, -1, -1, 0)); } PX_FORCE_INLINE BoolV BTFFF() { // const PX_ALIGN(16, PxU32 f[4])={0xFFFFFFFF,0,0,0}; // const __m128 tfff=_mm_load_ps((float*)&f); // return tfff; return m128_I2F(_mm_set_epi32(0, 0, 0, -1)); } PX_FORCE_INLINE BoolV BTFFT() { /*const PX_ALIGN(16, PxU32 f[4])={0xFFFFFFFF,0,0,0xFFFFFFFF}; const __m128 tfft=_mm_load_ps((float*)&f); return tfft;*/ return m128_I2F(_mm_set_epi32(-1, 0, 0, -1)); } PX_FORCE_INLINE BoolV BTFTF() { /*const PX_ALIGN(16, PxU32 f[4])={0xFFFFFFFF,0,0xFFFFFFFF,0}; const __m128 tftf=_mm_load_ps((float*)&f); return tftf;*/ return m128_I2F(_mm_set_epi32(0, -1, 0, -1)); } PX_FORCE_INLINE BoolV BTFTT() { /*const PX_ALIGN(16, PxU32 f[4])={0xFFFFFFFF,0,0xFFFFFFFF,0xFFFFFFFF}; const __m128 tftt=_mm_load_ps((float*)&f); return tftt;*/ return m128_I2F(_mm_set_epi32(-1, -1, 0, -1)); } PX_FORCE_INLINE BoolV BTTFF() { /*const PX_ALIGN(16, PxU32 f[4])={0xFFFFFFFF,0xFFFFFFFF,0,0}; const __m128 ttff=_mm_load_ps((float*)&f); return ttff;*/ return m128_I2F(_mm_set_epi32(0, 0, -1, -1)); } PX_FORCE_INLINE BoolV BTTFT() { /*const PX_ALIGN(16, PxU32 f[4])={0xFFFFFFFF,0xFFFFFFFF,0,0xFFFFFFFF}; const __m128 ttft=_mm_load_ps((float*)&f); return ttft;*/ return m128_I2F(_mm_set_epi32(-1, 0, -1, -1)); } PX_FORCE_INLINE BoolV BTTTF() { /*const PX_ALIGN(16, PxU32 f[4])={0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0}; const __m128 tttf=_mm_load_ps((float*)&f); return tttf;*/ return m128_I2F(_mm_set_epi32(0, -1, -1, -1)); } PX_FORCE_INLINE BoolV BTTTT() { /*const PX_ALIGN(16, PxU32 f[4])={0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF}; const __m128 tttt=_mm_load_ps((float*)&f); return tttt;*/ return m128_I2F(_mm_set_epi32(-1, -1, -1, -1)); } PX_FORCE_INLINE BoolV BXMask() { /*const PX_ALIGN(16, PxU32 f[4])={0xFFFFFFFF,0,0,0}; const __m128 tfff=_mm_load_ps((float*)&f); return tfff;*/ return m128_I2F(_mm_set_epi32(0, 0, 0, -1)); } PX_FORCE_INLINE BoolV BYMask() { /*const PX_ALIGN(16, PxU32 f[4])={0,0xFFFFFFFF,0,0}; const __m128 ftff=_mm_load_ps((float*)&f); return ftff;*/ return m128_I2F(_mm_set_epi32(0, 0, -1, 0)); } PX_FORCE_INLINE BoolV BZMask() { /*const PX_ALIGN(16, PxU32 f[4])={0,0,0xFFFFFFFF,0}; const __m128 fftf=_mm_load_ps((float*)&f); return fftf;*/ return m128_I2F(_mm_set_epi32(0, -1, 0, 0)); } PX_FORCE_INLINE BoolV BWMask() { /*const PX_ALIGN(16, PxU32 f[4])={0,0,0,0xFFFFFFFF}; const __m128 ffft=_mm_load_ps((float*)&f); return ffft;*/ return m128_I2F(_mm_set_epi32(-1, 0, 0, 0)); } PX_FORCE_INLINE BoolV BGetX(const BoolV f) { return _mm_shuffle_ps(f, f, _MM_SHUFFLE(0, 0, 0, 0)); } PX_FORCE_INLINE BoolV BGetY(const BoolV f) { return _mm_shuffle_ps(f, f, _MM_SHUFFLE(1, 1, 1, 1)); } PX_FORCE_INLINE BoolV BGetZ(const BoolV f) { return _mm_shuffle_ps(f, f, _MM_SHUFFLE(2, 2, 2, 2)); } PX_FORCE_INLINE BoolV BGetW(const BoolV f) { return _mm_shuffle_ps(f, f, _MM_SHUFFLE(3, 3, 3, 3)); } PX_FORCE_INLINE BoolV BSetX(const BoolV v, const BoolV f) { return V4Sel(BFTTT(), v, f); } PX_FORCE_INLINE BoolV BSetY(const BoolV v, const BoolV f) { return V4Sel(BTFTT(), v, f); } PX_FORCE_INLINE BoolV BSetZ(const BoolV v, const BoolV f) { return V4Sel(BTTFT(), v, f); } PX_FORCE_INLINE BoolV BSetW(const BoolV v, const BoolV f) { return V4Sel(BTTTF(), v, f); } PX_FORCE_INLINE BoolV BAnd(const BoolV a, const BoolV b) { return _mm_and_ps(a, b); } PX_FORCE_INLINE BoolV BNot(const BoolV a) { const BoolV bAllTrue(BTTTT()); return _mm_xor_ps(a, bAllTrue); } PX_FORCE_INLINE BoolV BAndNot(const BoolV a, const BoolV b) { return _mm_andnot_ps(b, a); } PX_FORCE_INLINE BoolV BOr(const BoolV a, const BoolV b) { return _mm_or_ps(a, b); } PX_FORCE_INLINE BoolV BAllTrue4(const BoolV a) { const BoolV bTmp = _mm_and_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 1, 0, 1)), _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 3, 2, 3))); return _mm_and_ps(_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(0, 0, 0, 0)), _mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(1, 1, 1, 1))); } PX_FORCE_INLINE BoolV BAnyTrue4(const BoolV a) { const BoolV bTmp = _mm_or_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 1, 0, 1)), _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 3, 2, 3))); return _mm_or_ps(_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(0, 0, 0, 0)), _mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(1, 1, 1, 1))); } PX_FORCE_INLINE BoolV BAllTrue3(const BoolV a) { const BoolV bTmp = _mm_and_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 1, 0, 1)), _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 2, 2, 2))); return _mm_and_ps(_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(0, 0, 0, 0)), _mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(1, 1, 1, 1))); } PX_FORCE_INLINE BoolV BAnyTrue3(const BoolV a) { const BoolV bTmp = _mm_or_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 1, 0, 1)), _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 2, 2, 2))); return _mm_or_ps(_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(0, 0, 0, 0)), _mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(1, 1, 1, 1))); } PX_FORCE_INLINE PxU32 BAllEq(const BoolV a, const BoolV b) { const BoolV bTest = m128_I2F(_mm_cmpeq_epi32(m128_F2I(a), m128_F2I(b))); return internalUnitSSE2Simd::BAllTrue4_R(bTest); } PX_FORCE_INLINE PxU32 BAllEqTTTT(const BoolV a) { return PxU32(_mm_movemask_ps(a)==15); } PX_FORCE_INLINE PxU32 BAllEqFFFF(const BoolV a) { return PxU32(_mm_movemask_ps(a)==0); } PX_FORCE_INLINE PxU32 BGetBitMask(const BoolV a) { return PxU32(_mm_movemask_ps(a)); } ////////////////////////////////// // MAT33V ////////////////////////////////// PX_FORCE_INLINE Vec3V M33MulV3(const Mat33V& a, const Vec3V b) { const FloatV x = V3GetX(b); const FloatV y = V3GetY(b); const FloatV z = V3GetZ(b); const Vec3V v0 = V3Scale(a.col0, x); const Vec3V v1 = V3Scale(a.col1, y); const Vec3V v2 = V3Scale(a.col2, z); const Vec3V v0PlusV1 = V3Add(v0, v1); return V3Add(v0PlusV1, v2); } PX_FORCE_INLINE Vec3V M33TrnspsMulV3(const Mat33V& a, const Vec3V b) { const FloatV x = V3Dot(a.col0, b); const FloatV y = V3Dot(a.col1, b); const FloatV z = V3Dot(a.col2, b); return V3Merge(x, y, z); } PX_FORCE_INLINE Vec3V M33MulV3AddV3(const Mat33V& A, const Vec3V b, const Vec3V c) { const FloatV x = V3GetX(b); const FloatV y = V3GetY(b); const FloatV z = V3GetZ(b); Vec3V result = V3ScaleAdd(A.col0, x, c); result = V3ScaleAdd(A.col1, y, result); return V3ScaleAdd(A.col2, z, result); } PX_FORCE_INLINE Mat33V M33MulM33(const Mat33V& a, const Mat33V& b) { return Mat33V(M33MulV3(a, b.col0), M33MulV3(a, b.col1), M33MulV3(a, b.col2)); } PX_FORCE_INLINE Mat33V M33Add(const Mat33V& a, const Mat33V& b) { return Mat33V(V3Add(a.col0, b.col0), V3Add(a.col1, b.col1), V3Add(a.col2, b.col2)); } PX_FORCE_INLINE Mat33V M33Scale(const Mat33V& a, const FloatV& b) { return Mat33V(V3Scale(a.col0, b), V3Scale(a.col1, b), V3Scale(a.col2, b)); } PX_FORCE_INLINE Mat33V M33Inverse(const Mat33V& a) { const BoolV tfft = BTFFT(); const BoolV tttf = BTTTF(); const FloatV zero = FZero(); const Vec3V cross01 = V3Cross(a.col0, a.col1); const Vec3V cross12 = V3Cross(a.col1, a.col2); const Vec3V cross20 = V3Cross(a.col2, a.col0); const FloatV dot = V3Dot(cross01, a.col2); const FloatV invDet = _mm_rcp_ps(dot); const Vec3V mergeh = _mm_unpacklo_ps(cross12, cross01); const Vec3V mergel = _mm_unpackhi_ps(cross12, cross01); Vec3V colInv0 = _mm_unpacklo_ps(mergeh, cross20); colInv0 = _mm_or_ps(_mm_andnot_ps(tttf, zero), _mm_and_ps(tttf, colInv0)); const Vec3V zppd = _mm_shuffle_ps(mergeh, cross20, _MM_SHUFFLE(3, 0, 0, 2)); const Vec3V pbwp = _mm_shuffle_ps(cross20, mergeh, _MM_SHUFFLE(3, 3, 1, 0)); const Vec3V colInv1 = _mm_or_ps(_mm_andnot_ps(BTFFT(), pbwp), _mm_and_ps(BTFFT(), zppd)); const Vec3V xppd = _mm_shuffle_ps(mergel, cross20, _MM_SHUFFLE(3, 0, 0, 0)); const Vec3V pcyp = _mm_shuffle_ps(cross20, mergel, _MM_SHUFFLE(3, 1, 2, 0)); const Vec3V colInv2 = _mm_or_ps(_mm_andnot_ps(tfft, pcyp), _mm_and_ps(tfft, xppd)); return Mat33V(_mm_mul_ps(colInv0, invDet), _mm_mul_ps(colInv1, invDet), _mm_mul_ps(colInv2, invDet)); } PX_FORCE_INLINE Mat33V M33Trnsps(const Mat33V& a) { return Mat33V(V3Merge(V3GetX(a.col0), V3GetX(a.col1), V3GetX(a.col2)), V3Merge(V3GetY(a.col0), V3GetY(a.col1), V3GetY(a.col2)), V3Merge(V3GetZ(a.col0), V3GetZ(a.col1), V3GetZ(a.col2))); } PX_FORCE_INLINE Mat33V M33Identity() { return Mat33V(V3UnitX(), V3UnitY(), V3UnitZ()); } PX_FORCE_INLINE Mat33V M33Sub(const Mat33V& a, const Mat33V& b) { return Mat33V(V3Sub(a.col0, b.col0), V3Sub(a.col1, b.col1), V3Sub(a.col2, b.col2)); } PX_FORCE_INLINE Mat33V M33Neg(const Mat33V& a) { return Mat33V(V3Neg(a.col0), V3Neg(a.col1), V3Neg(a.col2)); } PX_FORCE_INLINE Mat33V M33Abs(const Mat33V& a) { return Mat33V(V3Abs(a.col0), V3Abs(a.col1), V3Abs(a.col2)); } PX_FORCE_INLINE Mat33V PromoteVec3V(const Vec3V v) { const BoolV bTFFF = BTFFF(); const BoolV bFTFF = BFTFF(); const BoolV bFFTF = BTFTF(); const Vec3V zero = V3Zero(); return Mat33V(V3Sel(bTFFF, v, zero), V3Sel(bFTFF, v, zero), V3Sel(bFFTF, v, zero)); } PX_FORCE_INLINE Mat33V M33Diagonal(const Vec3VArg d) { const FloatV x = V3Mul(V3UnitX(), d); const FloatV y = V3Mul(V3UnitY(), d); const FloatV z = V3Mul(V3UnitZ(), d); return Mat33V(x, y, z); } ////////////////////////////////// // MAT34V ////////////////////////////////// PX_FORCE_INLINE Vec3V M34MulV3(const Mat34V& a, const Vec3V b) { const FloatV x = V3GetX(b); const FloatV y = V3GetY(b); const FloatV z = V3GetZ(b); const Vec3V v0 = V3Scale(a.col0, x); const Vec3V v1 = V3Scale(a.col1, y); const Vec3V v2 = V3Scale(a.col2, z); const Vec3V v0PlusV1 = V3Add(v0, v1); const Vec3V v0PlusV1Plusv2 = V3Add(v0PlusV1, v2); return V3Add(v0PlusV1Plusv2, a.col3); } PX_FORCE_INLINE Vec3V M34Mul33V3(const Mat34V& a, const Vec3V b) { const FloatV x = V3GetX(b); const FloatV y = V3GetY(b); const FloatV z = V3GetZ(b); const Vec3V v0 = V3Scale(a.col0, x); const Vec3V v1 = V3Scale(a.col1, y); const Vec3V v2 = V3Scale(a.col2, z); const Vec3V v0PlusV1 = V3Add(v0, v1); return V3Add(v0PlusV1, v2); } PX_FORCE_INLINE Vec3V M34TrnspsMul33V3(const Mat34V& a, const Vec3V b) { const FloatV x = V3Dot(a.col0, b); const FloatV y = V3Dot(a.col1, b); const FloatV z = V3Dot(a.col2, b); return V3Merge(x, y, z); } PX_FORCE_INLINE Mat34V M34MulM34(const Mat34V& a, const Mat34V& b) { return Mat34V(M34Mul33V3(a, b.col0), M34Mul33V3(a, b.col1), M34Mul33V3(a, b.col2), M34MulV3(a, b.col3)); } PX_FORCE_INLINE Mat33V M34MulM33(const Mat34V& a, const Mat33V& b) { return Mat33V(M34Mul33V3(a, b.col0), M34Mul33V3(a, b.col1), M34Mul33V3(a, b.col2)); } PX_FORCE_INLINE Mat33V M34Mul33MM34(const Mat34V& a, const Mat34V& b) { return Mat33V(M34Mul33V3(a, b.col0), M34Mul33V3(a, b.col1), M34Mul33V3(a, b.col2)); } PX_FORCE_INLINE Mat34V M34Add(const Mat34V& a, const Mat34V& b) { return Mat34V(V3Add(a.col0, b.col0), V3Add(a.col1, b.col1), V3Add(a.col2, b.col2), V3Add(a.col3, b.col3)); } PX_FORCE_INLINE Mat33V M34Trnsps33(const Mat34V& a) { return Mat33V(V3Merge(V3GetX(a.col0), V3GetX(a.col1), V3GetX(a.col2)), V3Merge(V3GetY(a.col0), V3GetY(a.col1), V3GetY(a.col2)), V3Merge(V3GetZ(a.col0), V3GetZ(a.col1), V3GetZ(a.col2))); } ////////////////////////////////// // MAT44V ////////////////////////////////// PX_FORCE_INLINE Vec4V M44MulV4(const Mat44V& a, const Vec4V b) { const FloatV x = V4GetX(b); const FloatV y = V4GetY(b); const FloatV z = V4GetZ(b); const FloatV w = V4GetW(b); const Vec4V v0 = V4Scale(a.col0, x); const Vec4V v1 = V4Scale(a.col1, y); const Vec4V v2 = V4Scale(a.col2, z); const Vec4V v3 = V4Scale(a.col3, w); const Vec4V v0PlusV1 = V4Add(v0, v1); const Vec4V v0PlusV1Plusv2 = V4Add(v0PlusV1, v2); return V4Add(v0PlusV1Plusv2, v3); } PX_FORCE_INLINE Vec4V M44TrnspsMulV4(const Mat44V& a, const Vec4V b) { PX_ALIGN(16, FloatV) dotProdArray[4] = { V4Dot(a.col0, b), V4Dot(a.col1, b), V4Dot(a.col2, b), V4Dot(a.col3, b) }; return V4Merge(dotProdArray); } PX_FORCE_INLINE Mat44V M44MulM44(const Mat44V& a, const Mat44V& b) { return Mat44V(M44MulV4(a, b.col0), M44MulV4(a, b.col1), M44MulV4(a, b.col2), M44MulV4(a, b.col3)); } PX_FORCE_INLINE Mat44V M44Add(const Mat44V& a, const Mat44V& b) { return Mat44V(V4Add(a.col0, b.col0), V4Add(a.col1, b.col1), V4Add(a.col2, b.col2), V4Add(a.col3, b.col3)); } PX_FORCE_INLINE Mat44V M44Trnsps(const Mat44V& a) { const Vec4V v0 = _mm_unpacklo_ps(a.col0, a.col2); const Vec4V v1 = _mm_unpackhi_ps(a.col0, a.col2); const Vec4V v2 = _mm_unpacklo_ps(a.col1, a.col3); const Vec4V v3 = _mm_unpackhi_ps(a.col1, a.col3); return Mat44V(_mm_unpacklo_ps(v0, v2), _mm_unpackhi_ps(v0, v2), _mm_unpacklo_ps(v1, v3), _mm_unpackhi_ps(v1, v3)); } PX_FORCE_INLINE Mat44V M44Inverse(const Mat44V& a) { __m128 minor0, minor1, minor2, minor3; __m128 row0, row1, row2, row3; __m128 det, tmp1; tmp1 = V4Zero(); row1 = V4Zero(); row3 = V4Zero(); row0 = a.col0; row1 = _mm_shuffle_ps(a.col1, a.col1, _MM_SHUFFLE(1, 0, 3, 2)); row2 = a.col2; row3 = _mm_shuffle_ps(a.col3, a.col3, _MM_SHUFFLE(1, 0, 3, 2)); tmp1 = _mm_mul_ps(row2, row3); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1); minor0 = _mm_mul_ps(row1, tmp1); minor1 = _mm_mul_ps(row0, tmp1); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E); minor0 = _mm_sub_ps(_mm_mul_ps(row1, tmp1), minor0); minor1 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor1); minor1 = _mm_shuffle_ps(minor1, minor1, 0x4E); tmp1 = _mm_mul_ps(row1, row2); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1); minor0 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor0); minor3 = _mm_mul_ps(row0, tmp1); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E); minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row3, tmp1)); minor3 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor3); minor3 = _mm_shuffle_ps(minor3, minor3, 0x4E); tmp1 = _mm_mul_ps(_mm_shuffle_ps(row1, row1, 0x4E), row3); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1); row2 = _mm_shuffle_ps(row2, row2, 0x4E); minor0 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor0); minor2 = _mm_mul_ps(row0, tmp1); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E); minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row2, tmp1)); minor2 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor2); minor2 = _mm_shuffle_ps(minor2, minor2, 0x4E); tmp1 = _mm_mul_ps(row0, row1); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1); minor2 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor2); minor3 = _mm_sub_ps(_mm_mul_ps(row2, tmp1), minor3); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E); minor2 = _mm_sub_ps(_mm_mul_ps(row3, tmp1), minor2); minor3 = _mm_sub_ps(minor3, _mm_mul_ps(row2, tmp1)); tmp1 = _mm_mul_ps(row0, row3); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1); minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row2, tmp1)); minor2 = _mm_add_ps(_mm_mul_ps(row1, tmp1), minor2); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E); minor1 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor1); minor2 = _mm_sub_ps(minor2, _mm_mul_ps(row1, tmp1)); tmp1 = _mm_mul_ps(row0, row2); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1); minor1 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor1); minor3 = _mm_sub_ps(minor3, _mm_mul_ps(row1, tmp1)); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E); minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row3, tmp1)); minor3 = _mm_add_ps(_mm_mul_ps(row1, tmp1), minor3); det = _mm_mul_ps(row0, minor0); det = _mm_add_ps(_mm_shuffle_ps(det, det, 0x4E), det); det = _mm_add_ss(_mm_shuffle_ps(det, det, 0xB1), det); tmp1 = _mm_rcp_ss(det); #if 0 det = _mm_sub_ss(_mm_add_ss(tmp1, tmp1), _mm_mul_ss(det, _mm_mul_ss(tmp1, tmp1))); det = _mm_shuffle_ps(det, det, 0x00); #else det = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(0, 0, 0, 0)); #endif minor0 = _mm_mul_ps(det, minor0); minor1 = _mm_mul_ps(det, minor1); minor2 = _mm_mul_ps(det, minor2); minor3 = _mm_mul_ps(det, minor3); Mat44V invTrans(minor0, minor1, minor2, minor3); return M44Trnsps(invTrans); } PX_FORCE_INLINE Vec4V V4LoadXYZW(const PxF32& x, const PxF32& y, const PxF32& z, const PxF32& w) { return _mm_set_ps(w, z, y, x); } /* // AP: work in progress - use proper SSE intrinsics where possible PX_FORCE_INLINE VecU16V V4U32PK(VecU32V a, VecU32V b) { VecU16V result; result.m128_u16[0] = PxU16(PxClamp<PxU32>((a).m128_u32[0], 0, 0xFFFF)); result.m128_u16[1] = PxU16(PxClamp<PxU32>((a).m128_u32[1], 0, 0xFFFF)); result.m128_u16[2] = PxU16(PxClamp<PxU32>((a).m128_u32[2], 0, 0xFFFF)); result.m128_u16[3] = PxU16(PxClamp<PxU32>((a).m128_u32[3], 0, 0xFFFF)); result.m128_u16[4] = PxU16(PxClamp<PxU32>((b).m128_u32[0], 0, 0xFFFF)); result.m128_u16[5] = PxU16(PxClamp<PxU32>((b).m128_u32[1], 0, 0xFFFF)); result.m128_u16[6] = PxU16(PxClamp<PxU32>((b).m128_u32[2], 0, 0xFFFF)); result.m128_u16[7] = PxU16(PxClamp<PxU32>((b).m128_u32[3], 0, 0xFFFF)); return result; } */ PX_FORCE_INLINE VecU32V V4U32Sel(const BoolV c, const VecU32V a, const VecU32V b) { return m128_I2F(_mm_or_si128(_mm_andnot_si128(m128_F2I(c), m128_F2I(b)), _mm_and_si128(m128_F2I(c), m128_F2I(a)))); } PX_FORCE_INLINE VecU32V V4U32or(VecU32V a, VecU32V b) { return m128_I2F(_mm_or_si128(m128_F2I(a), m128_F2I(b))); } PX_FORCE_INLINE VecU32V V4U32xor(VecU32V a, VecU32V b) { return m128_I2F(_mm_xor_si128(m128_F2I(a), m128_F2I(b))); } PX_FORCE_INLINE VecU32V V4U32and(VecU32V a, VecU32V b) { return m128_I2F(_mm_and_si128(m128_F2I(a), m128_F2I(b))); } PX_FORCE_INLINE VecU32V V4U32Andc(VecU32V a, VecU32V b) { return m128_I2F(_mm_andnot_si128(m128_F2I(b), m128_F2I(a))); } /* PX_FORCE_INLINE VecU16V V4U16Or(VecU16V a, VecU16V b) { return m128_I2F(_mm_or_si128(m128_F2I(a), m128_F2I(b))); } */ /* PX_FORCE_INLINE VecU16V V4U16And(VecU16V a, VecU16V b) { return m128_I2F(_mm_and_si128(m128_F2I(a), m128_F2I(b))); } */ /* PX_FORCE_INLINE VecU16V V4U16Andc(VecU16V a, VecU16V b) { return m128_I2F(_mm_andnot_si128(m128_F2I(b), m128_F2I(a))); } */ PX_FORCE_INLINE VecI32V I4Load(const PxI32 i) { return m128_F2I(_mm_load1_ps(reinterpret_cast<const PxF32*>(&i))); } PX_FORCE_INLINE VecI32V I4LoadU(const PxI32* i) { return m128_F2I(_mm_loadu_ps(reinterpret_cast<const PxF32*>(i))); } PX_FORCE_INLINE VecI32V I4LoadA(const PxI32* i) { return m128_F2I(_mm_load_ps(reinterpret_cast<const PxF32*>(i))); } PX_FORCE_INLINE VecI32V VecI32V_Add(const VecI32VArg a, const VecI32VArg b) { return _mm_add_epi32(a, b); } PX_FORCE_INLINE VecI32V VecI32V_Sub(const VecI32VArg a, const VecI32VArg b) { return _mm_sub_epi32(a, b); } PX_FORCE_INLINE BoolV VecI32V_IsGrtr(const VecI32VArg a, const VecI32VArg b) { return m128_I2F(_mm_cmpgt_epi32(a, b)); } PX_FORCE_INLINE BoolV VecI32V_IsEq(const VecI32VArg a, const VecI32VArg b) { return m128_I2F(_mm_cmpeq_epi32(a, b)); } PX_FORCE_INLINE VecI32V V4I32Sel(const BoolV c, const VecI32V a, const VecI32V b) { return _mm_or_si128(_mm_andnot_si128(m128_F2I(c), b), _mm_and_si128(m128_F2I(c), a)); } PX_FORCE_INLINE VecI32V VecI32V_Zero() { return _mm_setzero_si128(); } PX_FORCE_INLINE VecI32V VecI32V_One() { return I4Load(1); } PX_FORCE_INLINE VecI32V VecI32V_Two() { return I4Load(2); } PX_FORCE_INLINE VecI32V VecI32V_MinusOne() { return I4Load(-1); } PX_FORCE_INLINE VecU32V U4Zero() { return U4Load(0); } PX_FORCE_INLINE VecU32V U4One() { return U4Load(1); } PX_FORCE_INLINE VecU32V U4Two() { return U4Load(2); } PX_FORCE_INLINE VecI32V VecI32V_Sel(const BoolV c, const VecI32VArg a, const VecI32VArg b) { return _mm_or_si128(_mm_andnot_si128(m128_F2I(c), b), _mm_and_si128(m128_F2I(c), a)); } PX_FORCE_INLINE VecShiftV VecI32V_PrepareShift(const VecI32VArg shift) { VecShiftV s; s.shift = VecI32V_Sel(BTFFF(), shift, VecI32V_Zero()); return s; } PX_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const VecShiftVArg count) { return _mm_sll_epi32(a, count.shift); } PX_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const VecShiftVArg count) { return _mm_srl_epi32(a, count.shift); } PX_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const PxU32 count) { return _mm_slli_epi32(a, PxI32(count)); } PX_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const PxU32 count) { return _mm_srai_epi32(a, PxI32(count)); } PX_FORCE_INLINE VecI32V VecI32V_And(const VecI32VArg a, const VecI32VArg b) { return _mm_and_si128(a, b); } PX_FORCE_INLINE VecI32V VecI32V_Or(const VecI32VArg a, const VecI32VArg b) { return _mm_or_si128(a, b); } PX_FORCE_INLINE VecI32V VecI32V_GetX(const VecI32VArg a) { return m128_F2I(_mm_shuffle_ps(m128_I2F(a), m128_I2F(a), _MM_SHUFFLE(0, 0, 0, 0))); } PX_FORCE_INLINE VecI32V VecI32V_GetY(const VecI32VArg a) { return m128_F2I(_mm_shuffle_ps(m128_I2F(a), m128_I2F(a), _MM_SHUFFLE(1, 1, 1, 1))); } PX_FORCE_INLINE VecI32V VecI32V_GetZ(const VecI32VArg a) { return m128_F2I(_mm_shuffle_ps(m128_I2F(a), m128_I2F(a), _MM_SHUFFLE(2, 2, 2, 2))); } PX_FORCE_INLINE VecI32V VecI32V_GetW(const VecI32VArg a) { return m128_F2I(_mm_shuffle_ps(m128_I2F(a), m128_I2F(a), _MM_SHUFFLE(3, 3, 3, 3))); } PX_FORCE_INLINE void PxI32_From_VecI32V(const VecI32VArg a, PxI32* i) { _mm_store_ss(reinterpret_cast<PxF32*>(i), m128_I2F(a)); } PX_FORCE_INLINE VecI32V VecI32V_Merge(const VecI32VArg x, const VecI32VArg y, const VecI32VArg z, const VecI32VArg w) { const __m128 xw = _mm_move_ss(m128_I2F(y), m128_I2F(x)); // y, y, y, x const __m128 yz = _mm_move_ss(m128_I2F(z), m128_I2F(w)); // z, z, z, w return m128_F2I(_mm_shuffle_ps(xw, yz, _MM_SHUFFLE(0, 2, 1, 0))); } PX_FORCE_INLINE VecI32V VecI32V_From_BoolV(const BoolVArg a) { return m128_F2I(a); } PX_FORCE_INLINE VecU32V VecU32V_From_BoolV(const BoolVArg a) { return a; } /* template<int a> PX_FORCE_INLINE VecI32V V4ISplat() { VecI32V result; result.m128_i32[0] = a; result.m128_i32[1] = a; result.m128_i32[2] = a; result.m128_i32[3] = a; return result; } template<PxU32 a> PX_FORCE_INLINE VecU32V V4USplat() { VecU32V result; result.m128_u32[0] = a; result.m128_u32[1] = a; result.m128_u32[2] = a; result.m128_u32[3] = a; return result; } */ /* PX_FORCE_INLINE void V4U16StoreAligned(VecU16V val, VecU16V* address) { *address = val; } */ PX_FORCE_INLINE void V4U32StoreAligned(VecU32V val, VecU32V* address) { *address = val; } PX_FORCE_INLINE Vec4V V4LoadAligned(Vec4V* addr) { return *addr; } PX_FORCE_INLINE Vec4V V4LoadUnaligned(Vec4V* addr) { return V4LoadU(reinterpret_cast<float*>(addr)); } PX_FORCE_INLINE Vec4V V4Andc(const Vec4V a, const VecU32V b) { VecU32V result32(a); result32 = V4U32Andc(result32, b); return Vec4V(result32); } PX_FORCE_INLINE VecU32V V4IsGrtrV32u(const Vec4V a, const Vec4V b) { return V4IsGrtr(a, b); } PX_FORCE_INLINE VecU16V V4U16LoadAligned(VecU16V* addr) { return *addr; } PX_FORCE_INLINE VecU16V V4U16LoadUnaligned(VecU16V* addr) { return *addr; } PX_FORCE_INLINE VecU16V V4U16CompareGt(VecU16V a, VecU16V b) { // _mm_cmpgt_epi16 doesn't work for unsigned values unfortunately // return m128_I2F(_mm_cmpgt_epi16(m128_F2I(a), m128_F2I(b))); VecU16V result; result.m128_u16[0] = (a).m128_u16[0] > (b).m128_u16[0]; result.m128_u16[1] = (a).m128_u16[1] > (b).m128_u16[1]; result.m128_u16[2] = (a).m128_u16[2] > (b).m128_u16[2]; result.m128_u16[3] = (a).m128_u16[3] > (b).m128_u16[3]; result.m128_u16[4] = (a).m128_u16[4] > (b).m128_u16[4]; result.m128_u16[5] = (a).m128_u16[5] > (b).m128_u16[5]; result.m128_u16[6] = (a).m128_u16[6] > (b).m128_u16[6]; result.m128_u16[7] = (a).m128_u16[7] > (b).m128_u16[7]; return result; } PX_FORCE_INLINE VecU16V V4I16CompareGt(VecU16V a, VecU16V b) { return m128_I2F(_mm_cmpgt_epi16(m128_F2I(a), m128_F2I(b))); } PX_FORCE_INLINE Vec4V Vec4V_From_VecU32V(VecU32V a) { Vec4V result = V4LoadXYZW(PxF32(a.m128_u32[0]), PxF32(a.m128_u32[1]), PxF32(a.m128_u32[2]), PxF32(a.m128_u32[3])); return result; } PX_FORCE_INLINE Vec4V Vec4V_From_VecI32V(VecI32V in) { return _mm_cvtepi32_ps(in); } PX_FORCE_INLINE VecI32V VecI32V_From_Vec4V(Vec4V a) { return _mm_cvttps_epi32(a); } PX_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecU32V(VecU32V a) { return Vec4V(a); } PX_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecI32V(VecI32V a) { return m128_I2F(a); } PX_FORCE_INLINE VecU32V VecU32V_ReinterpretFrom_Vec4V(Vec4V a) { return VecU32V(a); } PX_FORCE_INLINE VecI32V VecI32V_ReinterpretFrom_Vec4V(Vec4V a) { return m128_F2I(a); } /* template<int index> PX_FORCE_INLINE BoolV BSplatElement(BoolV a) { BoolV result; result[0] = result[1] = result[2] = result[3] = a[index]; return result; } */ template <int index> BoolV BSplatElement(BoolV a) { float* data = reinterpret_cast<float*>(&a); return V4Load(data[index]); } template <int index> PX_FORCE_INLINE VecU32V V4U32SplatElement(VecU32V a) { VecU32V result; result.m128_u32[0] = result.m128_u32[1] = result.m128_u32[2] = result.m128_u32[3] = a.m128_u32[index]; return result; } template <int index> PX_FORCE_INLINE Vec4V V4SplatElement(Vec4V a) { float* data = reinterpret_cast<float*>(&a); return V4Load(data[index]); } PX_FORCE_INLINE VecU32V U4LoadXYZW(PxU32 x, PxU32 y, PxU32 z, PxU32 w) { VecU32V result; result.m128_u32[0] = x; result.m128_u32[1] = y; result.m128_u32[2] = z; result.m128_u32[3] = w; return result; } PX_FORCE_INLINE Vec4V V4Ceil(const Vec4V in) { UnionM128 a(in); return V4LoadXYZW(PxCeil(a.m128_f32[0]), PxCeil(a.m128_f32[1]), PxCeil(a.m128_f32[2]), PxCeil(a.m128_f32[3])); } PX_FORCE_INLINE Vec4V V4Floor(const Vec4V in) { UnionM128 a(in); return V4LoadXYZW(PxFloor(a.m128_f32[0]), PxFloor(a.m128_f32[1]), PxFloor(a.m128_f32[2]), PxFloor(a.m128_f32[3])); } PX_FORCE_INLINE VecU32V V4ConvertToU32VSaturate(const Vec4V in, PxU32 power) { PX_ASSERT(power == 0 && "Non-zero power not supported in convertToU32VSaturate"); PX_UNUSED(power); // prevent warning in release builds PxF32 ffffFFFFasFloat = PxF32(0xFFFF0000); UnionM128 a(in); VecU32V result; result.m128_u32[0] = PxU32(PxClamp<PxF32>((a).m128_f32[0], 0.0f, ffffFFFFasFloat)); result.m128_u32[1] = PxU32(PxClamp<PxF32>((a).m128_f32[1], 0.0f, ffffFFFFasFloat)); result.m128_u32[2] = PxU32(PxClamp<PxF32>((a).m128_f32[2], 0.0f, ffffFFFFasFloat)); result.m128_u32[3] = PxU32(PxClamp<PxF32>((a).m128_f32[3], 0.0f, ffffFFFFasFloat)); return result; } } // namespace aos } // namespace physx #endif // PXFOUNDATION_PXUNIXSSE2INLINEAOS_H
90,719
C
26.700763
154
0.66803
NVIDIA-Omniverse/PhysX/physx/include/foundation/unix/sse2/PxUnixSse2AoS.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PXFOUNDATION_PXUNIXSSE2AOS_H #define PXFOUNDATION_PXUNIXSSE2AOS_H // no includes here! this file should be included from PxcVecMath.h only!!! #if !COMPILE_VECTOR_INTRINSICS #error Vector intrinsics should not be included when using scalar implementation. #endif namespace physx { namespace aos { #if PX_EMSCRIPTEN typedef int8_t __int8_t; typedef int16_t __int16_t; typedef int32_t __int32_t; typedef int64_t __int64_t; typedef uint16_t __uint16_t; typedef uint32_t __uint32_t; typedef uint64_t __uint64_t; #endif typedef union UnionM128 { UnionM128() { } UnionM128(__m128 in) { m128 = in; } UnionM128(__m128i in) { m128i = in; } operator __m128() { return m128; } operator __m128() const { return m128; } float m128_f32[4]; __int8_t m128_i8[16]; __int16_t m128_i16[8]; __int32_t m128_i32[4]; __int64_t m128_i64[2]; __uint16_t m128_u16[8]; __uint32_t m128_u32[4]; __uint64_t m128_u64[2]; __m128 m128; __m128i m128i; } UnionM128; typedef __m128 FloatV; typedef __m128 Vec3V; typedef __m128 Vec4V; typedef __m128 BoolV; typedef __m128 QuatV; typedef __m128i VecI32V; typedef UnionM128 VecU32V; typedef UnionM128 VecU16V; typedef UnionM128 VecI16V; typedef UnionM128 VecU8V; #define FloatVArg FloatV & #define Vec3VArg Vec3V & #define Vec4VArg Vec4V & #define BoolVArg BoolV & #define VecU32VArg VecU32V & #define VecI32VArg VecI32V & #define VecU16VArg VecU16V & #define VecI16VArg VecI16V & #define VecU8VArg VecU8V & #define QuatVArg QuatV & // Optimization for situations in which you cross product multiple vectors with the same vector. // Avoids 2X shuffles per product struct VecCrossV { Vec3V mL1; Vec3V mR1; }; struct VecShiftV { VecI32V shift; }; #define VecShiftVArg VecShiftV & PX_ALIGN_PREFIX(16) struct Mat33V { Mat33V() { } Mat33V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2) : col0(c0), col1(c1), col2(c2) { } Vec3V PX_ALIGN(16, col0); Vec3V PX_ALIGN(16, col1); Vec3V PX_ALIGN(16, col2); } PX_ALIGN_SUFFIX(16); PX_ALIGN_PREFIX(16) struct Mat34V { Mat34V() { } Mat34V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2, const Vec3V& c3) : col0(c0), col1(c1), col2(c2), col3(c3) { } Vec3V PX_ALIGN(16, col0); Vec3V PX_ALIGN(16, col1); Vec3V PX_ALIGN(16, col2); Vec3V PX_ALIGN(16, col3); } PX_ALIGN_SUFFIX(16); PX_ALIGN_PREFIX(16) struct Mat43V { Mat43V() { } Mat43V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2) : col0(c0), col1(c1), col2(c2) { } Vec4V PX_ALIGN(16, col0); Vec4V PX_ALIGN(16, col1); Vec4V PX_ALIGN(16, col2); } PX_ALIGN_SUFFIX(16); PX_ALIGN_PREFIX(16) struct Mat44V { Mat44V() { } Mat44V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2, const Vec4V& c3) : col0(c0), col1(c1), col2(c2), col3(c3) { } Vec4V PX_ALIGN(16, col0); Vec4V PX_ALIGN(16, col1); Vec4V PX_ALIGN(16, col2); Vec4V PX_ALIGN(16, col3); } PX_ALIGN_SUFFIX(16); } // namespace aos } // namespace physx #endif // PXFOUNDATION_PXUNIXSSE2AOS_H
4,646
C
23.718085
116
0.717607
NVIDIA-Omniverse/PhysX/physx/include/foundation/unix/neon/PxUnixNeonInlineAoS.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PXFOUNDATION_PXUNIXNEONINLINEAOS_H #define PXFOUNDATION_PXUNIXNEONINLINEAOS_H #if !COMPILE_VECTOR_INTRINSICS #error Vector intrinsics should not be included when using scalar implementation. #endif namespace physx { namespace aos { // improved estimates #define VRECIPEQ recipq_newton<1> #define VRECIPE recip_newton<1> #define VRECIPSQRTEQ rsqrtq_newton<1> #define VRECIPSQRTE rsqrt_newton<1> // "exact" #define VRECIPQ recipq_newton<4> #if PX_SWITCH // StabilizationTests.AveragePoint needs more precision to succeed. #define VRECIP recip_newton<5> #else #define VRECIP recip_newton<4> #endif #define VRECIPSQRTQ rsqrtq_newton<4> #define VRECIPSQRT rsqrt_newton<4> #define VECMATH_AOS_EPSILON (1e-3f) ////////////////////////////////////////////////////////////////////// //Test that Vec3V and FloatV are legal ////////////////////////////////// #define FLOAT_COMPONENTS_EQUAL_THRESHOLD 0.01f PX_FORCE_INLINE bool isValidFloatV(const FloatV a) { /* PX_ALIGN(16, PxF32) data[4]; vst1_f32(reinterpret_cast<float32_t*>(data), a); return PxU32* intData = reinterpret_cast<PxU32*>(data); return intData[0] == intData[1]; */ PX_ALIGN(16, PxF32) data[4]; vst1_f32(reinterpret_cast<float32_t*>(data), a); const float32_t x = data[0]; const float32_t y = data[1]; return (x == y); /*if (PxAbs(x - y) < FLOAT_COMPONENTS_EQUAL_THRESHOLD) { return true; } if (PxAbs((x - y) / x) < FLOAT_COMPONENTS_EQUAL_THRESHOLD) { return true; } return false;*/ } PX_FORCE_INLINE bool isValidVec3V(const Vec3V a) { const float32_t w = vgetq_lane_f32(a, 3); return (0.0f == w); //const PxU32* intData = reinterpret_cast<const PxU32*>(&w); //return *intData == 0; } PX_FORCE_INLINE bool isAligned16(const void* a) { return(0 == (size_t(a) & 0x0f)); } #if PX_DEBUG #define ASSERT_ISVALIDVEC3V(a) PX_ASSERT(isValidVec3V(a)) #define ASSERT_ISVALIDFLOATV(a) PX_ASSERT(isValidFloatV(a)) #define ASSERT_ISALIGNED16(a) PX_ASSERT(isAligned16(static_cast<const void*>(a))) #else #define ASSERT_ISVALIDVEC3V(a) #define ASSERT_ISVALIDFLOATV(a) #define ASSERT_ISALIGNED16(a) #endif namespace internalUnitNeonSimd { PX_FORCE_INLINE PxU32 BAllTrue4_R(const BoolV a) { const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a)); const uint16x4_t dLow = vmovn_u32(a); const uint16x8_t combined = vcombine_u16(dLow, dHigh); const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined)); return PxU32(vget_lane_u32(finalReduce, 0) == 0xffffFFFF); } PX_FORCE_INLINE PxU32 BAllTrue3_R(const BoolV a) { const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a)); const uint16x4_t dLow = vmovn_u32(a); const uint16x8_t combined = vcombine_u16(dLow, dHigh); const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined)); return PxU32((vget_lane_u32(finalReduce, 0) & 0xffFFff) == 0xffFFff); } PX_FORCE_INLINE PxU32 BAnyTrue4_R(const BoolV a) { const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a)); const uint16x4_t dLow = vmovn_u32(a); const uint16x8_t combined = vcombine_u16(dLow, dHigh); const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined)); return PxU32(vget_lane_u32(finalReduce, 0) != 0x0); } PX_FORCE_INLINE PxU32 BAnyTrue3_R(const BoolV a) { const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a)); const uint16x4_t dLow = vmovn_u32(a); const uint16x8_t combined = vcombine_u16(dLow, dHigh); const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined)); return PxU32((vget_lane_u32(finalReduce, 0) & 0xffFFff) != 0); } } namespace vecMathTests { // PT: this function returns an invalid Vec3V (W!=0.0f) just for unit-testing 'isValidVec3V' PX_FORCE_INLINE Vec3V getInvalidVec3V() { PX_ALIGN(16, PxF32) data[4] = { 1.0f, 1.0f, 1.0f, 1.0f }; return V4LoadA(data); } PX_FORCE_INLINE bool allElementsEqualFloatV(const FloatV a, const FloatV b) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); return vget_lane_u32(vceq_f32(a, b), 0) != 0; } PX_FORCE_INLINE bool allElementsEqualVec3V(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); return V3AllEq(a, b) != 0; } PX_FORCE_INLINE bool allElementsEqualVec4V(const Vec4V a, const Vec4V b) { return V4AllEq(a, b) != 0; } PX_FORCE_INLINE bool allElementsEqualBoolV(const BoolV a, const BoolV b) { return internalUnitNeonSimd::BAllTrue4_R(vceqq_u32(a, b)) != 0; } PX_FORCE_INLINE PxU32 V4U32AllEq(const VecU32V a, const VecU32V b) { return internalUnitNeonSimd::BAllTrue4_R(V4IsEqU32(a, b)); } PX_FORCE_INLINE bool allElementsEqualVecU32V(const VecU32V a, const VecU32V b) { return V4U32AllEq(a, b) != 0; } PX_FORCE_INLINE BoolV V4IsEqI32(const VecI32V a, const VecI32V b) { return vceqq_s32(a, b); } PX_FORCE_INLINE PxU32 V4I32AllEq(const VecI32V a, const VecI32V b) { return internalUnitNeonSimd::BAllTrue4_R(V4IsEqI32(a, b)); } PX_FORCE_INLINE bool allElementsEqualVecI32V(const VecI32V a, const VecI32V b) { return V4I32AllEq(a, b) != 0; } PX_FORCE_INLINE bool allElementsNearEqualFloatV(const FloatV a, const FloatV b) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); const float32x2_t c = vsub_f32(a, b); const float32x2_t error = vdup_n_f32(VECMATH_AOS_EPSILON); // absolute compare abs(error) > abs(c) const uint32x2_t greater = vcagt_f32(error, c); const uint32x2_t min = vpmin_u32(greater, greater); return vget_lane_u32(min, 0) != 0x0; } PX_FORCE_INLINE bool allElementsNearEqualVec3V(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); const float32x4_t c = vsubq_f32(a, b); const float32x4_t error = vdupq_n_f32(VECMATH_AOS_EPSILON); // absolute compare abs(error) > abs(c) const uint32x4_t greater = vcagtq_f32(error, c); return internalUnitNeonSimd::BAllTrue3_R(greater) != 0; } PX_FORCE_INLINE bool allElementsNearEqualVec4V(const Vec4V a, const Vec4V b) { const float32x4_t c = vsubq_f32(a, b); const float32x4_t error = vdupq_n_f32(VECMATH_AOS_EPSILON); // absolute compare abs(error) > abs(c) const uint32x4_t greater = vcagtq_f32(error, c); return internalUnitNeonSimd::BAllTrue4_R(greater) != 0x0; } } #if 0 // debugging printfs #include <stdio.h> PX_FORCE_INLINE void printVec(const float32x4_t& v, const char* name) { PX_ALIGN(16, float32_t) data[4]; vst1q_f32(data, v); printf("%s: (%f, %f, %f, %f)\n", name, data[0], data[1], data[2], data[3]); } PX_FORCE_INLINE void printVec(const float32x2_t& v, const char* name) { PX_ALIGN(16, float32_t) data[2]; vst1_f32(data, v); printf("%s: (%f, %f)\n", name, data[0], data[1]); } PX_FORCE_INLINE void printVec(const uint32x4_t& v, const char* name) { PX_ALIGN(16, uint32_t) data[4]; vst1q_u32(data, v); printf("%s: (0x%x, 0x%x, 0x%x, 0x%x)\n", name, data[0], data[1], data[2], data[3]); } PX_FORCE_INLINE void printVec(const uint16x8_t& v, const char* name) { PX_ALIGN(16, uint16_t) data[8]; vst1q_u16(data, v); printf("%s: (0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n", name, data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7]); } PX_FORCE_INLINE void printVec(const int32x4_t& v, const char* name) { PX_ALIGN(16, int32_t) data[4]; vst1q_s32(data, v); printf("%s: (0x%x, 0x%x, 0x%x, 0x%x)\n", name, data[0], data[1], data[2], data[3]); } PX_FORCE_INLINE void printVec(const int16x8_t& v, const char* name) { PX_ALIGN(16, int16_t) data[8]; vst1q_s16(data, v); printf("%s: (0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n", name, data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7]); } PX_FORCE_INLINE void printVec(const uint16x4_t& v, const char* name) { PX_ALIGN(16, uint16_t) data[4]; vst1_u16(data, v); printf("%s: (0x%x, 0x%x, 0x%x, 0x%x)\n", name, data[0], data[1], data[2], data[3]); } PX_FORCE_INLINE void printVec(const uint32x2_t& v, const char* name) { PX_ALIGN(16, uint32_t) data[2]; vst1_u32(data, v); printf("%s: (0x%x, 0x%x)\n", name, data[0], data[1]); } PX_FORCE_INLINE void printVar(const PxU32 v, const char* name) { printf("%s: 0x%x\n", name, v); } PX_FORCE_INLINE void printVar(const PxF32 v, const char* name) { printf("%s: %f\n", name, v); } #define PRINT_VAR(X) printVar((X), #X) #define PRINT_VEC(X) printVec((X), #X) #define PRINT_VEC_TITLE(TITLE, X) printVec((X), TITLE #X) #endif // debugging printf ///////////////////////////////////////////////////////////////////// ////FUNCTIONS USED ONLY FOR ASSERTS IN VECTORISED IMPLEMENTATIONS ///////////////////////////////////////////////////////////////////// PX_FORCE_INLINE bool isFiniteFloatV(const FloatV a) { PX_ALIGN(16, PxF32) data[4]; vst1_f32(reinterpret_cast<float32_t*>(data), a); return PxIsFinite(data[0]) && PxIsFinite(data[1]); } PX_FORCE_INLINE bool isFiniteVec3V(const Vec3V a) { PX_ALIGN(16, PxF32) data[4]; vst1q_f32(reinterpret_cast<float32_t*>(data), a); return PxIsFinite(data[0]) && PxIsFinite(data[1]) && PxIsFinite(data[2]); } PX_FORCE_INLINE bool isFiniteVec4V(const Vec4V a) { PX_ALIGN(16, PxF32) data[4]; vst1q_f32(reinterpret_cast<float32_t*>(data), a); return PxIsFinite(data[0]) && PxIsFinite(data[1]) && PxIsFinite(data[2]) && PxIsFinite(data[3]); } PX_FORCE_INLINE bool hasZeroElementinFloatV(const FloatV a) { ASSERT_ISVALIDFLOATV(a); return vget_lane_u32(vreinterpret_u32_f32(a), 0) == 0; } PX_FORCE_INLINE bool hasZeroElementInVec3V(const Vec3V a) { const uint32x2_t dLow = vget_low_u32(vreinterpretq_u32_f32(a)); const uint32x2_t dMin = vpmin_u32(dLow, dLow); return vget_lane_u32(dMin, 0) == 0 || vgetq_lane_u32(vreinterpretq_u32_f32(a), 2) == 0; } PX_FORCE_INLINE bool hasZeroElementInVec4V(const Vec4V a) { const uint32x2_t dHigh = vget_high_u32(vreinterpretq_u32_f32(a)); const uint32x2_t dLow = vget_low_u32(vreinterpretq_u32_f32(a)); const uint32x2_t dMin = vmin_u32(dHigh, dLow); const uint32x2_t pairMin = vpmin_u32(dMin, dMin); return vget_lane_u32(pairMin, 0) == 0; } ///////////////////////////////////////////////////////////////////// ////VECTORISED FUNCTION IMPLEMENTATIONS ///////////////////////////////////////////////////////////////////// PX_FORCE_INLINE FloatV FLoad(const PxF32 f) { return vdup_n_f32(reinterpret_cast<const float32_t&>(f)); } PX_FORCE_INLINE FloatV FLoadA(const PxF32* const f) { ASSERT_ISALIGNED16(f); return vld1_f32(reinterpret_cast<const float32_t*>(f)); } PX_FORCE_INLINE Vec3V V3Load(const PxF32 f) { PX_ALIGN(16, PxF32) data[4] = { f, f, f, 0.0f }; return V4LoadA(data); } PX_FORCE_INLINE Vec4V V4Load(const PxF32 f) { return vdupq_n_f32(reinterpret_cast<const float32_t&>(f)); } PX_FORCE_INLINE BoolV BLoad(const bool f) { const PxU32 i = static_cast<PxU32>(-(static_cast<PxI32>(f))); return vdupq_n_u32(i); } PX_FORCE_INLINE Vec3V V3LoadA(const PxVec3& f) { ASSERT_ISALIGNED16(&f); PX_ALIGN(16, PxF32) data[4] = { f.x, f.y, f.z, 0.0f }; return V4LoadA(data); } PX_FORCE_INLINE Vec3V V3LoadU(const PxVec3& f) { PX_ALIGN(16, PxF32) data[4] = { f.x, f.y, f.z, 0.0f }; return V4LoadA(data); } PX_FORCE_INLINE Vec3V V3LoadUnsafeA(const PxVec3& f) { ASSERT_ISALIGNED16(&f); PX_ALIGN(16, PxF32) data[4] = { f.x, f.y, f.z, 0.0f }; return V4LoadA(data); } PX_FORCE_INLINE Vec3V V3LoadA(const PxF32* f) { ASSERT_ISALIGNED16(f); PX_ALIGN(16, PxF32) data[4] = { f[0], f[1], f[2], 0.0f }; return V4LoadA(data); } PX_FORCE_INLINE Vec3V V3LoadU(const PxF32* f) { PX_ALIGN(16, PxF32) data[4] = { f[0], f[1], f[2], 0.0f }; return V4LoadA(data); } PX_FORCE_INLINE Vec3V Vec3V_From_Vec4V(Vec4V v) { return vsetq_lane_f32(0.0f, v, 3); } PX_FORCE_INLINE Vec3V Vec3V_From_Vec4V_WUndefined(Vec4V v) { return v; } PX_FORCE_INLINE Vec4V Vec4V_From_Vec3V(Vec3V f) { return f; // ok if it is implemented as the same type. } PX_FORCE_INLINE Vec4V Vec4V_From_FloatV(FloatV f) { return vcombine_f32(f, f); } PX_FORCE_INLINE Vec3V Vec3V_From_FloatV(FloatV f) { return Vec3V_From_Vec4V(Vec4V_From_FloatV(f)); } PX_FORCE_INLINE Vec3V Vec3V_From_FloatV_WUndefined(FloatV f) { return Vec3V_From_Vec4V_WUndefined(Vec4V_From_FloatV(f)); } PX_FORCE_INLINE Vec4V Vec4V_From_PxVec3_WUndefined(const PxVec3& f) { PX_ALIGN(16, PxF32) data[4] = { f.x, f.y, f.z, 0.0f }; return V4LoadA(data); } PX_FORCE_INLINE Mat33V Mat33V_From_PxMat33(const PxMat33& m) { return Mat33V(V3LoadU(m.column0), V3LoadU(m.column1), V3LoadU(m.column2)); } PX_FORCE_INLINE void PxMat33_From_Mat33V(const Mat33V& m, PxMat33& out) { V3StoreU(m.col0, out.column0); V3StoreU(m.col1, out.column1); V3StoreU(m.col2, out.column2); } PX_FORCE_INLINE Vec4V V4LoadA(const PxF32* const f) { ASSERT_ISALIGNED16(f); return vld1q_f32(reinterpret_cast<const float32_t*>(f)); } PX_FORCE_INLINE void V4StoreA(Vec4V a, PxF32* f) { ASSERT_ISALIGNED16(f); vst1q_f32(reinterpret_cast<float32_t*>(f), a); } PX_FORCE_INLINE void V4StoreU(const Vec4V a, PxF32* f) { PX_ALIGN(16, PxF32) f2[4]; vst1q_f32(reinterpret_cast<float32_t*>(f2), a); f[0] = f2[0]; f[1] = f2[1]; f[2] = f2[2]; f[3] = f2[3]; } PX_FORCE_INLINE void BStoreA(const BoolV a, PxU32* u) { ASSERT_ISALIGNED16(u); vst1q_u32(reinterpret_cast<uint32_t*>(u), a); } PX_FORCE_INLINE void U4StoreA(const VecU32V uv, PxU32* u) { ASSERT_ISALIGNED16(u); vst1q_u32(reinterpret_cast<uint32_t*>(u), uv); } PX_FORCE_INLINE void I4StoreA(const VecI32V iv, PxI32* i) { ASSERT_ISALIGNED16(i); vst1q_s32(reinterpret_cast<int32_t*>(i), iv); } PX_FORCE_INLINE Vec4V V4LoadU(const PxF32* const f) { return vld1q_f32(reinterpret_cast<const float32_t*>(f)); } PX_FORCE_INLINE BoolV BLoad(const bool* const f) { const PX_ALIGN(16, PxU32) b[4] = { static_cast<PxU32>(-static_cast<PxI32>(f[0])), static_cast<PxU32>(-static_cast<PxI32>(f[1])), static_cast<PxU32>(-static_cast<PxI32>(f[2])), static_cast<PxU32>(-static_cast<PxI32>(f[3])) }; return vld1q_u32(b); } PX_FORCE_INLINE void FStore(const FloatV a, PxF32* PX_RESTRICT f) { ASSERT_ISVALIDFLOATV(a); // vst1q_lane_f32(f, a, 0); // causes vst1 alignment bug *f = vget_lane_f32(a, 0); } PX_FORCE_INLINE void Store_From_BoolV(const BoolV a, PxU32* PX_RESTRICT f) { *f = vget_lane_u32(vget_low_u32(a), 0); } PX_FORCE_INLINE void V3StoreA(const Vec3V a, PxVec3& f) { ASSERT_ISALIGNED16(&f); PX_ALIGN(16, PxF32) f2[4]; vst1q_f32(reinterpret_cast<float32_t*>(f2), a); f = PxVec3(f2[0], f2[1], f2[2]); } PX_FORCE_INLINE void V3StoreU(const Vec3V a, PxVec3& f) { PX_ALIGN(16, PxF32) f2[4]; vst1q_f32(reinterpret_cast<float32_t*>(f2), a); f = PxVec3(f2[0], f2[1], f2[2]); } ////////////////////////////////// // FLOATV ////////////////////////////////// PX_FORCE_INLINE FloatV FZero() { return FLoad(0.0f); } PX_FORCE_INLINE FloatV FOne() { return FLoad(1.0f); } PX_FORCE_INLINE FloatV FHalf() { return FLoad(0.5f); } PX_FORCE_INLINE FloatV FEps() { return FLoad(PX_EPS_REAL); } PX_FORCE_INLINE FloatV FEps6() { return FLoad(1e-6f); } PX_FORCE_INLINE FloatV FMax() { return FLoad(PX_MAX_REAL); } PX_FORCE_INLINE FloatV FNegMax() { return FLoad(-PX_MAX_REAL); } PX_FORCE_INLINE FloatV IZero() { return vreinterpret_f32_u32(vdup_n_u32(0)); } PX_FORCE_INLINE FloatV IOne() { return vreinterpret_f32_u32(vdup_n_u32(1)); } PX_FORCE_INLINE FloatV ITwo() { return vreinterpret_f32_u32(vdup_n_u32(2)); } PX_FORCE_INLINE FloatV IThree() { return vreinterpret_f32_u32(vdup_n_u32(3)); } PX_FORCE_INLINE FloatV IFour() { return vreinterpret_f32_u32(vdup_n_u32(4)); } PX_FORCE_INLINE FloatV FNeg(const FloatV f) { ASSERT_ISVALIDFLOATV(f); return vneg_f32(f); } PX_FORCE_INLINE FloatV FAdd(const FloatV a, const FloatV b) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); return vadd_f32(a, b); } PX_FORCE_INLINE FloatV FSub(const FloatV a, const FloatV b) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); return vsub_f32(a, b); } PX_FORCE_INLINE FloatV FMul(const FloatV a, const FloatV b) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); return vmul_f32(a, b); } template <int n> PX_FORCE_INLINE float32x2_t recip_newton(const float32x2_t& in) { float32x2_t recip = vrecpe_f32(in); for(int i = 0; i < n; ++i) recip = vmul_f32(recip, vrecps_f32(in, recip)); return recip; } template <int n> PX_FORCE_INLINE float32x4_t recipq_newton(const float32x4_t& in) { float32x4_t recip = vrecpeq_f32(in); for(int i = 0; i < n; ++i) recip = vmulq_f32(recip, vrecpsq_f32(recip, in)); return recip; } template <int n> PX_FORCE_INLINE float32x2_t rsqrt_newton(const float32x2_t& in) { float32x2_t rsqrt = vrsqrte_f32(in); for(int i = 0; i < n; ++i) rsqrt = vmul_f32(rsqrt, vrsqrts_f32(vmul_f32(rsqrt, rsqrt), in)); return rsqrt; } template <int n> PX_FORCE_INLINE float32x4_t rsqrtq_newton(const float32x4_t& in) { float32x4_t rsqrt = vrsqrteq_f32(in); for(int i = 0; i < n; ++i) rsqrt = vmulq_f32(rsqrt, vrsqrtsq_f32(vmulq_f32(rsqrt, rsqrt), in)); return rsqrt; } PX_FORCE_INLINE FloatV FDiv(const FloatV a, const FloatV b) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); return vmul_f32(a, VRECIP(b)); } PX_FORCE_INLINE FloatV FDivFast(const FloatV a, const FloatV b) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); return vmul_f32(a, VRECIPE(b)); } PX_FORCE_INLINE FloatV FRecip(const FloatV a) { ASSERT_ISVALIDFLOATV(a); return VRECIP(a); } PX_FORCE_INLINE FloatV FRecipFast(const FloatV a) { ASSERT_ISVALIDFLOATV(a); return VRECIPE(a); } PX_FORCE_INLINE FloatV FRsqrt(const FloatV a) { ASSERT_ISVALIDFLOATV(a); return VRECIPSQRT(a); } PX_FORCE_INLINE FloatV FSqrt(const FloatV a) { ASSERT_ISVALIDFLOATV(a); return FSel(FIsEq(a, FZero()), a, vmul_f32(a, VRECIPSQRT(a))); } PX_FORCE_INLINE FloatV FRsqrtFast(const FloatV a) { ASSERT_ISVALIDFLOATV(a); return VRECIPSQRTE(a); } PX_FORCE_INLINE FloatV FScaleAdd(const FloatV a, const FloatV b, const FloatV c) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); ASSERT_ISVALIDFLOATV(c); return vmla_f32(c, a, b); } PX_FORCE_INLINE FloatV FNegScaleSub(const FloatV a, const FloatV b, const FloatV c) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); ASSERT_ISVALIDFLOATV(c); return vmls_f32(c, a, b); } PX_FORCE_INLINE FloatV FAbs(const FloatV a) { ASSERT_ISVALIDFLOATV(a); return vabs_f32(a); } PX_FORCE_INLINE FloatV FSel(const BoolV c, const FloatV a, const FloatV b) { PX_ASSERT( vecMathTests::allElementsEqualBoolV(c, BTTTT()) || vecMathTests::allElementsEqualBoolV(c, BFFFF())); ASSERT_ISVALIDFLOATV(vbsl_f32(vget_low_u32(c), a, b)); return vbsl_f32(vget_low_u32(c), a, b); } PX_FORCE_INLINE BoolV FIsGrtr(const FloatV a, const FloatV b) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); return vdupq_lane_u32(vcgt_f32(a, b), 0); } PX_FORCE_INLINE BoolV FIsGrtrOrEq(const FloatV a, const FloatV b) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); return vdupq_lane_u32(vcge_f32(a, b), 0); } PX_FORCE_INLINE BoolV FIsEq(const FloatV a, const FloatV b) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); return vdupq_lane_u32(vceq_f32(a, b), 0); } PX_FORCE_INLINE FloatV FMax(const FloatV a, const FloatV b) { //ASSERT_ISVALIDFLOATV(a); //ASSERT_ISVALIDFLOATV(b); return vmax_f32(a, b); } PX_FORCE_INLINE FloatV FMin(const FloatV a, const FloatV b) { //ASSERT_ISVALIDFLOATV(a); //ASSERT_ISVALIDFLOATV(b); return vmin_f32(a, b); } PX_FORCE_INLINE FloatV FClamp(const FloatV a, const FloatV minV, const FloatV maxV) { ASSERT_ISVALIDFLOATV(minV); ASSERT_ISVALIDFLOATV(maxV); return vmax_f32(vmin_f32(a, maxV), minV); } PX_FORCE_INLINE PxU32 FAllGrtr(const FloatV a, const FloatV b) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); return vget_lane_u32(vcgt_f32(a, b), 0); } PX_FORCE_INLINE PxU32 FAllGrtrOrEq(const FloatV a, const FloatV b) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); return vget_lane_u32(vcge_f32(a, b), 0); } PX_FORCE_INLINE PxU32 FAllEq(const FloatV a, const FloatV b) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(b); return vget_lane_u32(vceq_f32(a, b), 0); } PX_FORCE_INLINE FloatV FRound(const FloatV a) { ASSERT_ISVALIDFLOATV(a); // truncate(a + (0.5f - sign(a))) const float32x2_t half = vdup_n_f32(0.5f); const float32x2_t sign = vcvt_f32_u32((vshr_n_u32(vreinterpret_u32_f32(a), 31))); const float32x2_t aPlusHalf = vadd_f32(a, half); const float32x2_t aRound = vsub_f32(aPlusHalf, sign); int32x2_t tmp = vcvt_s32_f32(aRound); return vcvt_f32_s32(tmp); } PX_FORCE_INLINE FloatV FSin(const FloatV a) { ASSERT_ISVALIDFLOATV(a); // Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI const FloatV recipTwoPi = FLoadA(g_PXReciprocalTwoPi.f); const FloatV twoPi = FLoadA(g_PXTwoPi.f); const FloatV tmp = FMul(a, recipTwoPi); const FloatV b = FRound(tmp); const FloatV V1 = FNegScaleSub(twoPi, b, a); // sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! - // V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI) const FloatV V2 = FMul(V1, V1); const FloatV V3 = FMul(V2, V1); const FloatV V5 = FMul(V3, V2); const FloatV V7 = FMul(V5, V2); const FloatV V9 = FMul(V7, V2); const FloatV V11 = FMul(V9, V2); const FloatV V13 = FMul(V11, V2); const FloatV V15 = FMul(V13, V2); const FloatV V17 = FMul(V15, V2); const FloatV V19 = FMul(V17, V2); const FloatV V21 = FMul(V19, V2); const FloatV V23 = FMul(V21, V2); const Vec4V sinCoefficients0 = V4LoadA(g_PXSinCoefficients0.f); const Vec4V sinCoefficients1 = V4LoadA(g_PXSinCoefficients1.f); const Vec4V sinCoefficients2 = V4LoadA(g_PXSinCoefficients2.f); const FloatV S1 = V4GetY(sinCoefficients0); const FloatV S2 = V4GetZ(sinCoefficients0); const FloatV S3 = V4GetW(sinCoefficients0); const FloatV S4 = V4GetX(sinCoefficients1); const FloatV S5 = V4GetY(sinCoefficients1); const FloatV S6 = V4GetZ(sinCoefficients1); const FloatV S7 = V4GetW(sinCoefficients1); const FloatV S8 = V4GetX(sinCoefficients2); const FloatV S9 = V4GetY(sinCoefficients2); const FloatV S10 = V4GetZ(sinCoefficients2); const FloatV S11 = V4GetW(sinCoefficients2); FloatV Result; Result = FScaleAdd(S1, V3, V1); Result = FScaleAdd(S2, V5, Result); Result = FScaleAdd(S3, V7, Result); Result = FScaleAdd(S4, V9, Result); Result = FScaleAdd(S5, V11, Result); Result = FScaleAdd(S6, V13, Result); Result = FScaleAdd(S7, V15, Result); Result = FScaleAdd(S8, V17, Result); Result = FScaleAdd(S9, V19, Result); Result = FScaleAdd(S10, V21, Result); Result = FScaleAdd(S11, V23, Result); return Result; } PX_FORCE_INLINE FloatV FCos(const FloatV a) { ASSERT_ISVALIDFLOATV(a); // Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI const FloatV recipTwoPi = FLoadA(g_PXReciprocalTwoPi.f); const FloatV twoPi = FLoadA(g_PXTwoPi.f); const FloatV tmp = FMul(a, recipTwoPi); const FloatV b = FRound(tmp); const FloatV V1 = FNegScaleSub(twoPi, b, a); // cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! - // V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI) const FloatV V2 = FMul(V1, V1); const FloatV V4 = FMul(V2, V2); const FloatV V6 = FMul(V4, V2); const FloatV V8 = FMul(V4, V4); const FloatV V10 = FMul(V6, V4); const FloatV V12 = FMul(V6, V6); const FloatV V14 = FMul(V8, V6); const FloatV V16 = FMul(V8, V8); const FloatV V18 = FMul(V10, V8); const FloatV V20 = FMul(V10, V10); const FloatV V22 = FMul(V12, V10); const Vec4V cosCoefficients0 = V4LoadA(g_PXCosCoefficients0.f); const Vec4V cosCoefficients1 = V4LoadA(g_PXCosCoefficients1.f); const Vec4V cosCoefficients2 = V4LoadA(g_PXCosCoefficients2.f); const FloatV C1 = V4GetY(cosCoefficients0); const FloatV C2 = V4GetZ(cosCoefficients0); const FloatV C3 = V4GetW(cosCoefficients0); const FloatV C4 = V4GetX(cosCoefficients1); const FloatV C5 = V4GetY(cosCoefficients1); const FloatV C6 = V4GetZ(cosCoefficients1); const FloatV C7 = V4GetW(cosCoefficients1); const FloatV C8 = V4GetX(cosCoefficients2); const FloatV C9 = V4GetY(cosCoefficients2); const FloatV C10 = V4GetZ(cosCoefficients2); const FloatV C11 = V4GetW(cosCoefficients2); FloatV Result; Result = FScaleAdd(C1, V2, FOne()); Result = FScaleAdd(C2, V4, Result); Result = FScaleAdd(C3, V6, Result); Result = FScaleAdd(C4, V8, Result); Result = FScaleAdd(C5, V10, Result); Result = FScaleAdd(C6, V12, Result); Result = FScaleAdd(C7, V14, Result); Result = FScaleAdd(C8, V16, Result); Result = FScaleAdd(C9, V18, Result); Result = FScaleAdd(C10, V20, Result); Result = FScaleAdd(C11, V22, Result); return Result; } PX_FORCE_INLINE PxU32 FOutOfBounds(const FloatV a, const FloatV min, const FloatV max) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(min); ASSERT_ISVALIDFLOATV(max); const BoolV c = BOr(FIsGrtr(a, max), FIsGrtr(min, a)); return PxU32(!BAllEqFFFF(c)); } PX_FORCE_INLINE PxU32 FInBounds(const FloatV a, const FloatV min, const FloatV max) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(min); ASSERT_ISVALIDFLOATV(max); const BoolV c = BAnd(FIsGrtrOrEq(a, min), FIsGrtrOrEq(max, a)); return PxU32(BAllEqTTTT(c)); } PX_FORCE_INLINE PxU32 FOutOfBounds(const FloatV a, const FloatV bounds) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(bounds); const uint32x2_t greater = vcagt_f32(a, bounds); return vget_lane_u32(greater, 0); } PX_FORCE_INLINE PxU32 FInBounds(const FloatV a, const FloatV bounds) { ASSERT_ISVALIDFLOATV(a); ASSERT_ISVALIDFLOATV(bounds); const uint32x2_t geq = vcage_f32(bounds, a); return vget_lane_u32(geq, 0); } ////////////////////////////////// // VEC3V ////////////////////////////////// PX_FORCE_INLINE Vec3V V3Splat(const FloatV f) { ASSERT_ISVALIDFLOATV(f); const uint32x2_t mask = { 0xffffFFFF, 0x0 }; const uint32x2_t uHigh = vreinterpret_u32_f32(f); const float32x2_t dHigh = vreinterpret_f32_u32(vand_u32(uHigh, mask)); return vcombine_f32(f, dHigh); } PX_FORCE_INLINE Vec3V V3Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z) { ASSERT_ISVALIDFLOATV(x); ASSERT_ISVALIDFLOATV(y); ASSERT_ISVALIDFLOATV(z); const uint32x2_t mask = { 0xffffFFFF, 0x0 }; const uint32x2_t dHigh = vand_u32(vreinterpret_u32_f32(z), mask); const uint32x2_t dLow = vext_u32(vreinterpret_u32_f32(x), vreinterpret_u32_f32(y), 1); return vreinterpretq_f32_u32(vcombine_u32(dLow, dHigh)); } PX_FORCE_INLINE Vec3V V3UnitX() { const float32x4_t x = { 1.0f, 0.0f, 0.0f, 0.0f }; return x; } PX_FORCE_INLINE Vec3V V3UnitY() { const float32x4_t y = { 0, 1.0f, 0, 0 }; return y; } PX_FORCE_INLINE Vec3V V3UnitZ() { const float32x4_t z = { 0, 0, 1.0f, 0 }; return z; } PX_FORCE_INLINE FloatV V3GetX(const Vec3V f) { ASSERT_ISVALIDVEC3V(f); const float32x2_t fLow = vget_low_f32(f); return vdup_lane_f32(fLow, 0); } PX_FORCE_INLINE FloatV V3GetY(const Vec3V f) { ASSERT_ISVALIDVEC3V(f); const float32x2_t fLow = vget_low_f32(f); return vdup_lane_f32(fLow, 1); } PX_FORCE_INLINE FloatV V3GetZ(const Vec3V f) { ASSERT_ISVALIDVEC3V(f); const float32x2_t fhigh = vget_high_f32(f); return vdup_lane_f32(fhigh, 0); } PX_FORCE_INLINE Vec3V V3SetX(const Vec3V v, const FloatV f) { ASSERT_ISVALIDVEC3V(v); ASSERT_ISVALIDFLOATV(f); return V4Sel(BFTTT(), v, vcombine_f32(f, f)); } PX_FORCE_INLINE Vec3V V3SetY(const Vec3V v, const FloatV f) { ASSERT_ISVALIDVEC3V(v); ASSERT_ISVALIDFLOATV(f); return V4Sel(BTFTT(), v, vcombine_f32(f, f)); } PX_FORCE_INLINE Vec3V V3SetZ(const Vec3V v, const FloatV f) { ASSERT_ISVALIDVEC3V(v); ASSERT_ISVALIDFLOATV(f); return V4Sel(BTTFT(), v, vcombine_f32(f, f)); } PX_FORCE_INLINE Vec3V V3ColX(const Vec3V a, const Vec3V b, const Vec3V c) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); ASSERT_ISVALIDVEC3V(c); const float32x2_t aLow = vget_low_f32(a); const float32x2_t bLow = vget_low_f32(b); const float32x2_t cLow = vget_low_f32(c); const float32x2_t zero = vdup_n_f32(0.0f); const float32x2x2_t zipL = vzip_f32(aLow, bLow); const float32x2x2_t zipH = vzip_f32(cLow, zero); return vcombine_f32(zipL.val[0], zipH.val[0]); } PX_FORCE_INLINE Vec3V V3ColY(const Vec3V a, const Vec3V b, const Vec3V c) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); ASSERT_ISVALIDVEC3V(c); const float32x2_t aLow = vget_low_f32(a); const float32x2_t bLow = vget_low_f32(b); const float32x2_t cLow = vget_low_f32(c); const float32x2_t zero = vdup_n_f32(0.0f); const float32x2x2_t zipL = vzip_f32(aLow, bLow); const float32x2x2_t zipH = vzip_f32(cLow, zero); return vcombine_f32(zipL.val[1], zipH.val[1]); } PX_FORCE_INLINE Vec3V V3ColZ(const Vec3V a, const Vec3V b, const Vec3V c) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); ASSERT_ISVALIDVEC3V(c); const float32x2_t aHi = vget_high_f32(a); const float32x2_t bHi = vget_high_f32(b); const float32x2_t cHi = vget_high_f32(c); const float32x2x2_t zipL = vzip_f32(aHi, bHi); return vcombine_f32(zipL.val[0], cHi); } PX_FORCE_INLINE Vec3V V3Zero() { return vdupq_n_f32(0.0f); } PX_FORCE_INLINE Vec3V V3Eps() { return V3Load(PX_EPS_REAL); } PX_FORCE_INLINE Vec3V V3One() { return V3Load(1.0f); } PX_FORCE_INLINE Vec3V V3Neg(const Vec3V f) { ASSERT_ISVALIDVEC3V(f); const float32x4_t tmp = vnegq_f32(f); return vsetq_lane_f32(0.0f, tmp, 3); } PX_FORCE_INLINE Vec3V V3Add(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); return vaddq_f32(a, b); } PX_FORCE_INLINE Vec3V V3Add(const Vec3V a, const FloatV b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDFLOATV(b); return vaddq_f32(a, Vec3V_From_FloatV(b)); } PX_FORCE_INLINE Vec3V V3Sub(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); return vsubq_f32(a, b); } PX_FORCE_INLINE Vec3V V3Sub(const Vec3V a, const FloatV b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDFLOATV(b); return vsubq_f32(a, Vec3V_From_FloatV(b)); } PX_FORCE_INLINE Vec3V V3Scale(const Vec3V a, const FloatV b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDFLOATV(b); const float32x4_t tmp = vmulq_lane_f32(a, b, 0); return vsetq_lane_f32(0.0f, tmp, 3); } PX_FORCE_INLINE Vec3V V3Mul(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); return vmulq_f32(a, b); } PX_FORCE_INLINE Vec3V V3ScaleInv(const Vec3V a, const FloatV b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDFLOATV(b); const float32x2_t invB = VRECIP(b); const float32x4_t tmp = vmulq_lane_f32(a, invB, 0); return vsetq_lane_f32(0.0f, tmp, 3); } PX_FORCE_INLINE Vec3V V3Div(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); float32x4_t invB = VRECIPQ(b); invB = vsetq_lane_f32(0.0f, invB, 3); return vmulq_f32(a, invB); } PX_FORCE_INLINE Vec3V V3ScaleInvFast(const Vec3V a, const FloatV b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDFLOATV(b); const float32x2_t invB = VRECIPE(b); const float32x4_t tmp = vmulq_lane_f32(a, invB, 0); return vsetq_lane_f32(0.0f, tmp, 3); } PX_FORCE_INLINE Vec3V V3DivFast(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); float32x4_t invB = VRECIPEQ(b); invB = vsetq_lane_f32(0.0f, invB, 3); return vmulq_f32(a, invB); } PX_FORCE_INLINE Vec3V V3Recip(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); const float32x4_t recipA = VRECIPQ(a); return vsetq_lane_f32(0.0f, recipA, 3); } PX_FORCE_INLINE Vec3V V3RecipFast(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); const float32x4_t recipA = VRECIPEQ(a); return vsetq_lane_f32(0.0f, recipA, 3); } PX_FORCE_INLINE Vec3V V3Rsqrt(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); const float32x4_t rSqrA = VRECIPSQRTQ(a); return vsetq_lane_f32(0.0f, rSqrA, 3); } PX_FORCE_INLINE Vec3V V3RsqrtFast(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); const float32x4_t rSqrA = VRECIPSQRTEQ(a); return vsetq_lane_f32(0.0f, rSqrA, 3); } PX_FORCE_INLINE Vec3V V3ScaleAdd(const Vec3V a, const FloatV b, const Vec3V c) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDFLOATV(b); ASSERT_ISVALIDVEC3V(c); float32x4_t tmp = vmlaq_lane_f32(c, a, b, 0); // using vsetq_lane_f32 resulted in failures, // probably related to a compiler bug on // ndk r9d-win32, gcc 4.8, cardhu/shield // code with issue // return vsetq_lane_f32(0.0f, tmp, 3); // workaround float32x2_t w_z = vget_high_f32(tmp); float32x2_t y_x = vget_low_f32(tmp); w_z = vset_lane_f32(0.0f, w_z, 1); return vcombine_f32(y_x, w_z); } PX_FORCE_INLINE Vec3V V3NegScaleSub(const Vec3V a, const FloatV b, const Vec3V c) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDFLOATV(b); ASSERT_ISVALIDVEC3V(c); float32x4_t tmp = vmlsq_lane_f32(c, a, b, 0); // using vsetq_lane_f32 resulted in failures, // probably related to a compiler bug on // ndk r9d-win32, gcc 4.8, cardhu/shield // code with issue // return vsetq_lane_f32(0.0f, tmp, 3); // workaround float32x2_t w_z = vget_high_f32(tmp); float32x2_t y_x = vget_low_f32(tmp); w_z = vset_lane_f32(0.0f, w_z, 1); return vcombine_f32(y_x, w_z); } PX_FORCE_INLINE Vec3V V3MulAdd(const Vec3V a, const Vec3V b, const Vec3V c) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); ASSERT_ISVALIDVEC3V(c); return vmlaq_f32(c, a, b); } PX_FORCE_INLINE Vec3V V3NegMulSub(const Vec3V a, const Vec3V b, const Vec3V c) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); ASSERT_ISVALIDVEC3V(c); return vmlsq_f32(c, a, b); } PX_FORCE_INLINE Vec3V V3Abs(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); return vabsq_f32(a); } PX_FORCE_INLINE FloatV V3Dot(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); // const uint32x2_t mask = {0xffffFFFF, 0x0}; const float32x4_t tmp = vmulq_f32(a, b); const float32x2_t low = vget_low_f32(tmp); const float32x2_t high = vget_high_f32(tmp); // const float32x2_t high = vreinterpret_f32_u32(vand_u32(vreinterpret_u32_f32(high_), mask)); const float32x2_t sumTmp = vpadd_f32(low, high); // = {0+z, x+y} const float32x2_t sum0ZYX = vpadd_f32(sumTmp, sumTmp); // = {x+y+z, x+y+z} return sum0ZYX; } PX_FORCE_INLINE Vec3V V3Cross(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); const uint32x2_t TF = { 0xffffFFFF, 0x0 }; const float32x2_t ay_ax = vget_low_f32(a); // d2 const float32x2_t aw_az = vget_high_f32(a); // d3 const float32x2_t by_bx = vget_low_f32(b); // d4 const float32x2_t bw_bz = vget_high_f32(b); // d5 // Hi, Lo const float32x2_t bz_by = vext_f32(by_bx, bw_bz, 1); // bz, by const float32x2_t az_ay = vext_f32(ay_ax, aw_az, 1); // az, ay const float32x2_t azbx = vmul_f32(aw_az, by_bx); // 0, az*bx const float32x2_t aybz_axby = vmul_f32(ay_ax, bz_by); // ay*bz, ax*by const float32x2_t azbxSUBaxbz = vmls_f32(azbx, bw_bz, ay_ax); // 0, az*bx-ax*bz const float32x2_t aybzSUBazby_axbySUBaybx = vmls_f32(aybz_axby, by_bx, az_ay); // ay*bz-az*by, ax*by-ay*bx const float32x2_t retLow = vext_f32(aybzSUBazby_axbySUBaybx, azbxSUBaxbz, 1); // az*bx-ax*bz, ay*bz-az*by const uint32x2_t retHigh = vand_u32(TF, vreinterpret_u32_f32(aybzSUBazby_axbySUBaybx)); // 0, ax*by-ay*bx return vcombine_f32(retLow, vreinterpret_f32_u32(retHigh)); } PX_FORCE_INLINE VecCrossV V3PrepareCross(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); return a; } PX_FORCE_INLINE FloatV V3Length(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); // const uint32x2_t mask = {0xffffFFFF, 0x0}; const float32x4_t tmp = vmulq_f32(a, a); const float32x2_t low = vget_low_f32(tmp); const float32x2_t high = vget_high_f32(tmp); // const float32x2_t high = vreinterpret_f32_u32(vand_u32(vreinterpret_u32_f32(high_), mask)); const float32x2_t sumTmp = vpadd_f32(low, high); // = {0+z, x+y} const float32x2_t sum0ZYX = vpadd_f32(sumTmp, sumTmp); // = {x+y+z, x+y+z} return FSqrt(sum0ZYX); } PX_FORCE_INLINE FloatV V3LengthSq(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); return V3Dot(a, a); } PX_FORCE_INLINE Vec3V V3Normalize(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); //PX_ASSERT(!FAllEq(V4LengthSq(a), FZero())); return V3ScaleInv(a, V3Length(a)); } PX_FORCE_INLINE Vec3V V3NormalizeFast(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); //PX_ASSERT(!FAllEq(V4LengthSq(a), FZero())); return V3Scale(a, VRECIPSQRTE(V3Dot(a, a))); } PX_FORCE_INLINE Vec3V V3NormalizeSafe(const Vec3V a, const Vec3V unsafeReturnValue) { ASSERT_ISVALIDVEC3V(a); const FloatV zero = vdup_n_f32(0.0f); const FloatV length = V3Length(a); const uint32x4_t isGreaterThanZero = FIsGrtr(length, zero); return V3Sel(isGreaterThanZero, V3ScaleInv(a, length), unsafeReturnValue); } PX_FORCE_INLINE Vec3V V3Sel(const BoolV c, const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V( vbslq_f32(c, a, b)); return vbslq_f32(c, a, b); } PX_FORCE_INLINE BoolV V3IsGrtr(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); return vcgtq_f32(a, b); } PX_FORCE_INLINE BoolV V3IsGrtrOrEq(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); return vcgeq_f32(a, b); } PX_FORCE_INLINE BoolV V3IsEq(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); return vceqq_f32(a, b); } PX_FORCE_INLINE Vec3V V3Max(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); return vmaxq_f32(a, b); } PX_FORCE_INLINE Vec3V V3Min(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); return vminq_f32(a, b); } PX_FORCE_INLINE FloatV V3ExtractMax(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); const float32x2_t low = vget_low_f32(a); const float32x2_t high = vget_high_f32(a); const float32x2_t zz = vdup_lane_f32(high, 0); const float32x2_t max0 = vpmax_f32(zz, low); const float32x2_t max1 = vpmax_f32(max0, max0); return max1; } PX_FORCE_INLINE FloatV V3ExtractMin(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); const float32x2_t low = vget_low_f32(a); const float32x2_t high = vget_high_f32(a); const float32x2_t zz = vdup_lane_f32(high, 0); const float32x2_t min0 = vpmin_f32(zz, low); const float32x2_t min1 = vpmin_f32(min0, min0); return min1; } // return (a >= 0.0f) ? 1.0f : -1.0f; PX_FORCE_INLINE Vec3V V3Sign(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); const Vec3V zero = V3Zero(); const Vec3V one = V3One(); const Vec3V none = V3Neg(one); return V3Sel(V3IsGrtrOrEq(a, zero), one, none); } PX_FORCE_INLINE Vec3V V3Clamp(const Vec3V a, const Vec3V minV, const Vec3V maxV) { ASSERT_ISVALIDVEC3V(minV); ASSERT_ISVALIDVEC3V(maxV); return V3Max(V3Min(a, maxV), minV); } PX_FORCE_INLINE PxU32 V3AllGrtr(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); return internalUnitNeonSimd::BAllTrue3_R(V4IsGrtr(a, b)); } PX_FORCE_INLINE PxU32 V3AllGrtrOrEq(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); return internalUnitNeonSimd::BAllTrue3_R(V4IsGrtrOrEq(a, b)); } PX_FORCE_INLINE PxU32 V3AllEq(const Vec3V a, const Vec3V b) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(b); return internalUnitNeonSimd::BAllTrue3_R(V4IsEq(a, b)); } PX_FORCE_INLINE Vec3V V3Round(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); // truncate(a + (0.5f - sign(a))) const Vec3V half = V3Load(0.5f); const float32x4_t sign = vcvtq_f32_u32((vshrq_n_u32(vreinterpretq_u32_f32(a), 31))); const Vec3V aPlusHalf = V3Add(a, half); const Vec3V aRound = V3Sub(aPlusHalf, sign); return vcvtq_f32_s32(vcvtq_s32_f32(aRound)); } PX_FORCE_INLINE Vec3V V3Sin(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); // Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI const Vec4V recipTwoPi = V4LoadA(g_PXReciprocalTwoPi.f); const Vec4V twoPi = V4LoadA(g_PXTwoPi.f); const Vec3V tmp = V4Mul(a, recipTwoPi); const Vec3V b = V3Round(tmp); const Vec3V V1 = V4NegMulSub(twoPi, b, a); // sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! - // V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI) const Vec3V V2 = V3Mul(V1, V1); const Vec3V V3 = V3Mul(V2, V1); const Vec3V V5 = V3Mul(V3, V2); const Vec3V V7 = V3Mul(V5, V2); const Vec3V V9 = V3Mul(V7, V2); const Vec3V V11 = V3Mul(V9, V2); const Vec3V V13 = V3Mul(V11, V2); const Vec3V V15 = V3Mul(V13, V2); const Vec3V V17 = V3Mul(V15, V2); const Vec3V V19 = V3Mul(V17, V2); const Vec3V V21 = V3Mul(V19, V2); const Vec3V V23 = V3Mul(V21, V2); const Vec4V sinCoefficients0 = V4LoadA(g_PXSinCoefficients0.f); const Vec4V sinCoefficients1 = V4LoadA(g_PXSinCoefficients1.f); const Vec4V sinCoefficients2 = V4LoadA(g_PXSinCoefficients2.f); const FloatV S1 = V4GetY(sinCoefficients0); const FloatV S2 = V4GetZ(sinCoefficients0); const FloatV S3 = V4GetW(sinCoefficients0); const FloatV S4 = V4GetX(sinCoefficients1); const FloatV S5 = V4GetY(sinCoefficients1); const FloatV S6 = V4GetZ(sinCoefficients1); const FloatV S7 = V4GetW(sinCoefficients1); const FloatV S8 = V4GetX(sinCoefficients2); const FloatV S9 = V4GetY(sinCoefficients2); const FloatV S10 = V4GetZ(sinCoefficients2); const FloatV S11 = V4GetW(sinCoefficients2); Vec3V Result; Result = V4ScaleAdd(V3, S1, V1); Result = V4ScaleAdd(V5, S2, Result); Result = V4ScaleAdd(V7, S3, Result); Result = V4ScaleAdd(V9, S4, Result); Result = V4ScaleAdd(V11, S5, Result); Result = V4ScaleAdd(V13, S6, Result); Result = V4ScaleAdd(V15, S7, Result); Result = V4ScaleAdd(V17, S8, Result); Result = V4ScaleAdd(V19, S9, Result); Result = V4ScaleAdd(V21, S10, Result); Result = V4ScaleAdd(V23, S11, Result); return Result; } PX_FORCE_INLINE Vec3V V3Cos(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); // Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI const Vec4V recipTwoPi = V4LoadA(g_PXReciprocalTwoPi.f); const Vec4V twoPi = V4LoadA(g_PXTwoPi.f); const Vec3V tmp = V4Mul(a, recipTwoPi); const Vec3V b = V3Round(tmp); const Vec3V V1 = V4NegMulSub(twoPi, b, a); // cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! - // V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI) const Vec3V V2 = V3Mul(V1, V1); const Vec3V V4 = V3Mul(V2, V2); const Vec3V V6 = V3Mul(V4, V2); const Vec3V V8 = V3Mul(V4, V4); const Vec3V V10 = V3Mul(V6, V4); const Vec3V V12 = V3Mul(V6, V6); const Vec3V V14 = V3Mul(V8, V6); const Vec3V V16 = V3Mul(V8, V8); const Vec3V V18 = V3Mul(V10, V8); const Vec3V V20 = V3Mul(V10, V10); const Vec3V V22 = V3Mul(V12, V10); const Vec4V cosCoefficients0 = V4LoadA(g_PXCosCoefficients0.f); const Vec4V cosCoefficients1 = V4LoadA(g_PXCosCoefficients1.f); const Vec4V cosCoefficients2 = V4LoadA(g_PXCosCoefficients2.f); const FloatV C1 = V4GetY(cosCoefficients0); const FloatV C2 = V4GetZ(cosCoefficients0); const FloatV C3 = V4GetW(cosCoefficients0); const FloatV C4 = V4GetX(cosCoefficients1); const FloatV C5 = V4GetY(cosCoefficients1); const FloatV C6 = V4GetZ(cosCoefficients1); const FloatV C7 = V4GetW(cosCoefficients1); const FloatV C8 = V4GetX(cosCoefficients2); const FloatV C9 = V4GetY(cosCoefficients2); const FloatV C10 = V4GetZ(cosCoefficients2); const FloatV C11 = V4GetW(cosCoefficients2); Vec3V Result; Result = V4ScaleAdd(V2, C1, V4One()); Result = V4ScaleAdd(V4, C2, Result); Result = V4ScaleAdd(V6, C3, Result); Result = V4ScaleAdd(V8, C4, Result); Result = V4ScaleAdd(V10, C5, Result); Result = V4ScaleAdd(V12, C6, Result); Result = V4ScaleAdd(V14, C7, Result); Result = V4ScaleAdd(V16, C8, Result); Result = V4ScaleAdd(V18, C9, Result); Result = V4ScaleAdd(V20, C10, Result); Result = V4ScaleAdd(V22, C11, Result); return V4ClearW(Result); } PX_FORCE_INLINE Vec3V V3PermYZZ(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); const float32x2_t xy = vget_low_f32(a); const float32x2_t zw = vget_high_f32(a); const float32x2_t yz = vext_f32(xy, zw, 1); return vcombine_f32(yz, zw); } PX_FORCE_INLINE Vec3V V3PermXYX(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); const uint32x2_t mask = { 0xffffFFFF, 0x0 }; const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(a)); const uint32x2_t xw = vand_u32(xy, mask); return vreinterpretq_f32_u32(vcombine_u32(xy, xw)); } PX_FORCE_INLINE Vec3V V3PermYZX(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); const uint32x2_t mask = { 0xffffFFFF, 0x0 }; const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(a)); const uint32x2_t zw = vget_high_u32(vreinterpretq_u32_f32(a)); const uint32x2_t yz = vext_u32(xy, zw, 1); const uint32x2_t xw = vand_u32(xy, mask); return vreinterpretq_f32_u32(vcombine_u32(yz, xw)); } PX_FORCE_INLINE Vec3V V3PermZXY(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(a)); const uint32x2_t zw = vget_high_u32(vreinterpretq_u32_f32(a)); const uint32x2_t wz = vrev64_u32(zw); const uint32x2_t zx = vext_u32(wz, xy, 1); const uint32x2_t yw = vext_u32(xy, wz, 1); return vreinterpretq_f32_u32(vcombine_u32(zx, yw)); } PX_FORCE_INLINE Vec3V V3PermZZY(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(a)); const uint32x2_t zw = vget_high_u32(vreinterpretq_u32_f32(a)); const uint32x2_t wz = vrev64_u32(zw); const uint32x2_t yw = vext_u32(xy, wz, 1); const uint32x2_t zz = vdup_lane_u32(wz, 1); return vreinterpretq_f32_u32(vcombine_u32(zz, yw)); } PX_FORCE_INLINE Vec3V V3PermYXX(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); const uint32x2_t mask = { 0xffffFFFF, 0x0 }; const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(a)); const uint32x2_t yx = vrev64_u32(xy); const uint32x2_t xw = vand_u32(xy, mask); return vreinterpretq_f32_u32(vcombine_u32(yx, xw)); } PX_FORCE_INLINE Vec3V V3Perm_Zero_1Z_0Y(const Vec3V v0, const Vec3V v1) { ASSERT_ISVALIDVEC3V(v0); ASSERT_ISVALIDVEC3V(v1); const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(v0)); const uint32x2_t zw = vget_high_u32(vreinterpretq_u32_f32(v1)); const uint32x2_t wz = vrev64_u32(zw); const uint32x2_t yw = vext_u32(xy, wz, 1); return vreinterpretq_f32_u32(vcombine_u32(wz, yw)); } PX_FORCE_INLINE Vec3V V3Perm_0Z_Zero_1X(const Vec3V v0, const Vec3V v1) { ASSERT_ISVALIDVEC3V(v0); ASSERT_ISVALIDVEC3V(v1); const uint32x2_t mask = { 0xffffFFFF, 0x0 }; const uint32x2_t zw = vget_high_u32(vreinterpretq_u32_f32(v0)); const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(v1)); const uint32x2_t xw = vand_u32(xy, mask); return vreinterpretq_f32_u32(vcombine_u32(zw, xw)); } PX_FORCE_INLINE Vec3V V3Perm_1Y_0X_Zero(const Vec3V v0, const Vec3V v1) { ASSERT_ISVALIDVEC3V(v0); ASSERT_ISVALIDVEC3V(v1); const uint32x2_t axy = vget_low_u32(vreinterpretq_u32_f32(v0)); const uint32x2_t bxy = vget_low_u32(vreinterpretq_u32_f32(v1)); const uint32x2_t byax = vext_u32(bxy, axy, 1); const uint32x2_t ww = vdup_n_u32(0); return vreinterpretq_f32_u32(vcombine_u32(byax, ww)); } PX_FORCE_INLINE FloatV V3SumElems(const Vec3V a) { ASSERT_ISVALIDVEC3V(a); // const uint32x2_t mask = {0xffffFFFF, 0x0}; const float32x2_t low = vget_low_f32(a); const float32x2_t high = vget_high_f32(a); // const float32x2_t high = vreinterpret_f32_u32(vand_u32(vreinterpret_u32_f32(high_), mask)); const float32x2_t sumTmp = vpadd_f32(low, high); // = {0+z, x+y} const float32x2_t sum0ZYX = vpadd_f32(sumTmp, sumTmp); // = {x+y+z, x+y+z} return sum0ZYX; } PX_FORCE_INLINE PxU32 V3OutOfBounds(const Vec3V a, const Vec3V min, const Vec3V max) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(min); ASSERT_ISVALIDVEC3V(max); const BoolV c = BOr(V3IsGrtr(a, max), V3IsGrtr(min, a)); return internalUnitNeonSimd::BAnyTrue3_R(c); } PX_FORCE_INLINE PxU32 V3InBounds(const Vec3V a, const Vec3V min, const Vec3V max) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(min); ASSERT_ISVALIDVEC3V(max); const BoolV c = BAnd(V3IsGrtrOrEq(a, min), V3IsGrtrOrEq(max, a)); return internalUnitNeonSimd::BAllTrue4_R(c); } PX_FORCE_INLINE PxU32 V3OutOfBounds(const Vec3V a, const Vec3V bounds) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(bounds); const BoolV greater = V3IsGrtr(V3Abs(a), bounds); return internalUnitNeonSimd::BAnyTrue3_R(greater); } PX_FORCE_INLINE PxU32 V3InBounds(const Vec3V a, const Vec3V bounds) { ASSERT_ISVALIDVEC3V(a); ASSERT_ISVALIDVEC3V(bounds); const BoolV greaterOrEq = V3IsGrtrOrEq(bounds, V3Abs(a)); return internalUnitNeonSimd::BAllTrue4_R(greaterOrEq); } PX_FORCE_INLINE void V3Transpose(Vec3V& col0, Vec3V& col1, Vec3V& col2) { ASSERT_ISVALIDVEC3V(col0); ASSERT_ISVALIDVEC3V(col1); ASSERT_ISVALIDVEC3V(col2); Vec3V col3 = V3Zero(); const float32x4x2_t v0v1 = vzipq_f32(col0, col2); const float32x4x2_t v2v3 = vzipq_f32(col1, col3); const float32x4x2_t zip0 = vzipq_f32(v0v1.val[0], v2v3.val[0]); const float32x4x2_t zip1 = vzipq_f32(v0v1.val[1], v2v3.val[1]); col0 = zip0.val[0]; col1 = zip0.val[1]; col2 = zip1.val[0]; // col3 = zip1.val[1]; } ////////////////////////////////// // VEC4V ////////////////////////////////// PX_FORCE_INLINE Vec4V V4Splat(const FloatV f) { ASSERT_ISVALIDFLOATV(f); return vcombine_f32(f, f); } PX_FORCE_INLINE Vec4V V4Merge(const FloatV* const floatVArray) { ASSERT_ISVALIDFLOATV(floatVArray[0]); ASSERT_ISVALIDFLOATV(floatVArray[1]); ASSERT_ISVALIDFLOATV(floatVArray[2]); ASSERT_ISVALIDFLOATV(floatVArray[3]); const uint32x2_t xLow = vreinterpret_u32_f32(floatVArray[0]); const uint32x2_t yLow = vreinterpret_u32_f32(floatVArray[1]); const uint32x2_t zLow = vreinterpret_u32_f32(floatVArray[2]); const uint32x2_t wLow = vreinterpret_u32_f32(floatVArray[3]); const uint32x2_t dLow = vext_u32(xLow, yLow, 1); const uint32x2_t dHigh = vext_u32(zLow, wLow, 1); return vreinterpretq_f32_u32(vcombine_u32(dLow, dHigh)); } PX_FORCE_INLINE Vec4V V4Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z, const FloatVArg w) { ASSERT_ISVALIDFLOATV(x); ASSERT_ISVALIDFLOATV(y); ASSERT_ISVALIDFLOATV(z); ASSERT_ISVALIDFLOATV(w); const uint32x2_t xLow = vreinterpret_u32_f32(x); const uint32x2_t yLow = vreinterpret_u32_f32(y); const uint32x2_t zLow = vreinterpret_u32_f32(z); const uint32x2_t wLow = vreinterpret_u32_f32(w); const uint32x2_t dLow = vext_u32(xLow, yLow, 1); const uint32x2_t dHigh = vext_u32(zLow, wLow, 1); return vreinterpretq_f32_u32(vcombine_u32(dLow, dHigh)); } PX_FORCE_INLINE Vec4V V4MergeW(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { const float32x2_t xx = vget_high_f32(x); const float32x2_t yy = vget_high_f32(y); const float32x2_t zz = vget_high_f32(z); const float32x2_t ww = vget_high_f32(w); const float32x2x2_t zipL = vzip_f32(xx, yy); const float32x2x2_t zipH = vzip_f32(zz, ww); return vcombine_f32(zipL.val[1], zipH.val[1]); } PX_FORCE_INLINE Vec4V V4MergeZ(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { const float32x2_t xx = vget_high_f32(x); const float32x2_t yy = vget_high_f32(y); const float32x2_t zz = vget_high_f32(z); const float32x2_t ww = vget_high_f32(w); const float32x2x2_t zipL = vzip_f32(xx, yy); const float32x2x2_t zipH = vzip_f32(zz, ww); return vcombine_f32(zipL.val[0], zipH.val[0]); } PX_FORCE_INLINE Vec4V V4MergeY(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { const float32x2_t xx = vget_low_f32(x); const float32x2_t yy = vget_low_f32(y); const float32x2_t zz = vget_low_f32(z); const float32x2_t ww = vget_low_f32(w); const float32x2x2_t zipL = vzip_f32(xx, yy); const float32x2x2_t zipH = vzip_f32(zz, ww); return vcombine_f32(zipL.val[1], zipH.val[1]); } PX_FORCE_INLINE Vec4V V4MergeX(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { const float32x2_t xx = vget_low_f32(x); const float32x2_t yy = vget_low_f32(y); const float32x2_t zz = vget_low_f32(z); const float32x2_t ww = vget_low_f32(w); const float32x2x2_t zipL = vzip_f32(xx, yy); const float32x2x2_t zipH = vzip_f32(zz, ww); return vcombine_f32(zipL.val[0], zipH.val[0]); } PX_FORCE_INLINE Vec4V V4UnpackXY(const Vec4VArg a, const Vec4VArg b) { return vzipq_f32(a, b).val[0]; } PX_FORCE_INLINE Vec4V V4UnpackZW(const Vec4VArg a, const Vec4VArg b) { return vzipq_f32(a, b).val[1]; } PX_FORCE_INLINE Vec4V V4UnitW() { const float32x2_t zeros = vreinterpret_f32_u32(vmov_n_u32(0)); const float32x2_t ones = vmov_n_f32(1.0f); const float32x2_t zo = vext_f32(zeros, ones, 1); return vcombine_f32(zeros, zo); } PX_FORCE_INLINE Vec4V V4UnitX() { const float32x2_t zeros = vreinterpret_f32_u32(vmov_n_u32(0)); const float32x2_t ones = vmov_n_f32(1.0f); const float32x2_t oz = vext_f32(ones, zeros, 1); return vcombine_f32(oz, zeros); } PX_FORCE_INLINE Vec4V V4UnitY() { const float32x2_t zeros = vreinterpret_f32_u32(vmov_n_u32(0)); const float32x2_t ones = vmov_n_f32(1.0f); const float32x2_t zo = vext_f32(zeros, ones, 1); return vcombine_f32(zo, zeros); } PX_FORCE_INLINE Vec4V V4UnitZ() { const float32x2_t zeros = vreinterpret_f32_u32(vmov_n_u32(0)); const float32x2_t ones = vmov_n_f32(1.0f); const float32x2_t oz = vext_f32(ones, zeros, 1); return vcombine_f32(zeros, oz); } PX_FORCE_INLINE FloatV V4GetW(const Vec4V f) { const float32x2_t fhigh = vget_high_f32(f); return vdup_lane_f32(fhigh, 1); } PX_FORCE_INLINE FloatV V4GetX(const Vec4V f) { const float32x2_t fLow = vget_low_f32(f); return vdup_lane_f32(fLow, 0); } PX_FORCE_INLINE FloatV V4GetY(const Vec4V f) { const float32x2_t fLow = vget_low_f32(f); return vdup_lane_f32(fLow, 1); } PX_FORCE_INLINE FloatV V4GetZ(const Vec4V f) { const float32x2_t fhigh = vget_high_f32(f); return vdup_lane_f32(fhigh, 0); } PX_FORCE_INLINE Vec4V V4SetW(const Vec4V v, const FloatV f) { ASSERT_ISVALIDFLOATV(f); return V4Sel(BTTTF(), v, vcombine_f32(f, f)); } PX_FORCE_INLINE Vec4V V4SetX(const Vec4V v, const FloatV f) { ASSERT_ISVALIDFLOATV(f); return V4Sel(BFTTT(), v, vcombine_f32(f, f)); } PX_FORCE_INLINE Vec4V V4SetY(const Vec4V v, const FloatV f) { ASSERT_ISVALIDFLOATV(f); return V4Sel(BTFTT(), v, vcombine_f32(f, f)); } PX_FORCE_INLINE Vec4V V4SetZ(const Vec4V v, const FloatV f) { ASSERT_ISVALIDFLOATV(f); return V4Sel(BTTFT(), v, vcombine_f32(f, f)); } PX_FORCE_INLINE Vec4V V4ClearW(const Vec4V v) { return V4Sel(BTTTF(), v, V4Zero()); } PX_FORCE_INLINE Vec4V V4PermYXWZ(const Vec4V a) { const float32x2_t xy = vget_low_f32(a); const float32x2_t zw = vget_high_f32(a); const float32x2_t yx = vext_f32(xy, xy, 1); const float32x2_t wz = vext_f32(zw, zw, 1); return vcombine_f32(yx, wz); } PX_FORCE_INLINE Vec4V V4PermXZXZ(const Vec4V a) { const float32x2_t xy = vget_low_f32(a); const float32x2_t zw = vget_high_f32(a); const float32x2x2_t xzyw = vzip_f32(xy, zw); return vcombine_f32(xzyw.val[0], xzyw.val[0]); } PX_FORCE_INLINE Vec4V V4PermYWYW(const Vec4V a) { const float32x2_t xy = vget_low_f32(a); const float32x2_t zw = vget_high_f32(a); const float32x2x2_t xzyw = vzip_f32(xy, zw); return vcombine_f32(xzyw.val[1], xzyw.val[1]); } PX_FORCE_INLINE Vec4V V4PermYZXW(const Vec4V a) { const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(a)); const uint32x2_t zw = vget_high_u32(vreinterpretq_u32_f32(a)); const uint32x2_t yz = vext_u32(xy, zw, 1); const uint32x2_t xw = vrev64_u32(vext_u32(zw, xy, 1)); return vreinterpretq_f32_u32(vcombine_u32(yz, xw)); } PX_FORCE_INLINE Vec4V V4PermZWXY(const Vec4V a) { const float32x2_t low = vget_low_f32(a); const float32x2_t high = vget_high_f32(a); return vcombine_f32(high, low); } template <PxU8 E0, PxU8 E1, PxU8 E2, PxU8 E3> PX_FORCE_INLINE Vec4V V4Perm(const Vec4V V) { static const uint32_t ControlElement[4] = { #if 1 0x03020100, // XM_SWIZZLE_X 0x07060504, // XM_SWIZZLE_Y 0x0B0A0908, // XM_SWIZZLE_Z 0x0F0E0D0C, // XM_SWIZZLE_W #else 0x00010203, // XM_SWIZZLE_X 0x04050607, // XM_SWIZZLE_Y 0x08090A0B, // XM_SWIZZLE_Z 0x0C0D0E0F, // XM_SWIZZLE_W #endif }; uint8x8x2_t tbl; tbl.val[0] = vreinterpret_u8_f32(vget_low_f32(V)); tbl.val[1] = vreinterpret_u8_f32(vget_high_f32(V)); uint8x8_t idx = vcreate_u8(static_cast<uint64_t>(ControlElement[E0]) | (static_cast<uint64_t>(ControlElement[E1]) << 32)); const uint8x8_t rL = vtbl2_u8(tbl, idx); idx = vcreate_u8(static_cast<uint64_t>(ControlElement[E2]) | (static_cast<uint64_t>(ControlElement[E3]) << 32)); const uint8x8_t rH = vtbl2_u8(tbl, idx); return vreinterpretq_f32_u8(vcombine_u8(rL, rH)); } // PT: this seems measurably slower than the hardcoded version /*PX_FORCE_INLINE Vec4V V4PermYZXW(const Vec4V a) { return V4Perm<1, 2, 0, 3>(a); }*/ PX_FORCE_INLINE Vec4V V4Zero() { return vreinterpretq_f32_u32(vmovq_n_u32(0)); // return vmovq_n_f32(0.0f); } PX_FORCE_INLINE Vec4V V4One() { return vmovq_n_f32(1.0f); } PX_FORCE_INLINE Vec4V V4Eps() { // return vmovq_n_f32(PX_EPS_REAL); return V4Load(PX_EPS_REAL); } PX_FORCE_INLINE Vec4V V4Neg(const Vec4V f) { return vnegq_f32(f); } PX_FORCE_INLINE Vec4V V4Add(const Vec4V a, const Vec4V b) { return vaddq_f32(a, b); } PX_FORCE_INLINE Vec4V V4Sub(const Vec4V a, const Vec4V b) { return vsubq_f32(a, b); } PX_FORCE_INLINE Vec4V V4Scale(const Vec4V a, const FloatV b) { return vmulq_lane_f32(a, b, 0); } PX_FORCE_INLINE Vec4V V4Mul(const Vec4V a, const Vec4V b) { return vmulq_f32(a, b); } PX_FORCE_INLINE Vec4V V4ScaleInv(const Vec4V a, const FloatV b) { ASSERT_ISVALIDFLOATV(b); const float32x2_t invB = VRECIP(b); return vmulq_lane_f32(a, invB, 0); } PX_FORCE_INLINE Vec4V V4Div(const Vec4V a, const Vec4V b) { const float32x4_t invB = VRECIPQ(b); return vmulq_f32(a, invB); } PX_FORCE_INLINE Vec4V V4ScaleInvFast(const Vec4V a, const FloatV b) { ASSERT_ISVALIDFLOATV(b); const float32x2_t invB = VRECIPE(b); return vmulq_lane_f32(a, invB, 0); } PX_FORCE_INLINE Vec4V V4DivFast(const Vec4V a, const Vec4V b) { const float32x4_t invB = VRECIPEQ(b); return vmulq_f32(a, invB); } PX_FORCE_INLINE Vec4V V4Recip(const Vec4V a) { return VRECIPQ(a); } PX_FORCE_INLINE Vec4V V4RecipFast(const Vec4V a) { return VRECIPEQ(a); } PX_FORCE_INLINE Vec4V V4Rsqrt(const Vec4V a) { return VRECIPSQRTQ(a); } PX_FORCE_INLINE Vec4V V4RsqrtFast(const Vec4V a) { return VRECIPSQRTEQ(a); } PX_FORCE_INLINE Vec4V V4Sqrt(const Vec4V a) { return V4Sel(V4IsEq(a, V4Zero()), a, V4Mul(a, VRECIPSQRTQ(a))); } PX_FORCE_INLINE Vec4V V4ScaleAdd(const Vec4V a, const FloatV b, const Vec4V c) { ASSERT_ISVALIDFLOATV(b); return vmlaq_lane_f32(c, a, b, 0); } PX_FORCE_INLINE Vec4V V4NegScaleSub(const Vec4V a, const FloatV b, const Vec4V c) { ASSERT_ISVALIDFLOATV(b); return vmlsq_lane_f32(c, a, b, 0); } PX_FORCE_INLINE Vec4V V4MulAdd(const Vec4V a, const Vec4V b, const Vec4V c) { return vmlaq_f32(c, a, b); } PX_FORCE_INLINE Vec4V V4NegMulSub(const Vec4V a, const Vec4V b, const Vec4V c) { return vmlsq_f32(c, a, b); } PX_FORCE_INLINE Vec4V V4Abs(const Vec4V a) { return vabsq_f32(a); } PX_FORCE_INLINE FloatV V4SumElements(const Vec4V a) { const Vec4V xy = V4UnpackXY(a, a); // x,x,y,y const Vec4V zw = V4UnpackZW(a, a); // z,z,w,w const Vec4V xz_yw = V4Add(xy, zw); // x+z,x+z,y+w,y+w const FloatV xz = V4GetX(xz_yw); // x+z const FloatV yw = V4GetZ(xz_yw); // y+w return FAdd(xz, yw); // sum } PX_FORCE_INLINE FloatV V4Dot(const Vec4V a, const Vec4V b) { const float32x4_t tmp = vmulq_f32(a, b); const float32x2_t low = vget_low_f32(tmp); const float32x2_t high = vget_high_f32(tmp); const float32x2_t sumTmp = vpadd_f32(low, high); // = {z+w, x+y} const float32x2_t sumWZYX = vpadd_f32(sumTmp, sumTmp); // = {x+y+z+w, x+y+z+w} return sumWZYX; } PX_FORCE_INLINE FloatV V4Dot3(const Vec4V aa, const Vec4V bb) { // PT: the V3Dot code relies on the fact that W=0 so we can't reuse it as-is, we need to clear W first. // TODO: find a better implementation that does not need to clear W. const Vec4V a = V4ClearW(aa); const Vec4V b = V4ClearW(bb); const float32x4_t tmp = vmulq_f32(a, b); const float32x2_t low = vget_low_f32(tmp); const float32x2_t high = vget_high_f32(tmp); const float32x2_t sumTmp = vpadd_f32(low, high); // = {0+z, x+y} const float32x2_t sum0ZYX = vpadd_f32(sumTmp, sumTmp); // = {x+y+z, x+y+z} return sum0ZYX; } PX_FORCE_INLINE Vec4V V4Cross(const Vec4V a, const Vec4V b) { const uint32x2_t TF = { 0xffffFFFF, 0x0 }; const float32x2_t ay_ax = vget_low_f32(a); // d2 const float32x2_t aw_az = vget_high_f32(a); // d3 const float32x2_t by_bx = vget_low_f32(b); // d4 const float32x2_t bw_bz = vget_high_f32(b); // d5 // Hi, Lo const float32x2_t bz_by = vext_f32(by_bx, bw_bz, 1); // bz, by const float32x2_t az_ay = vext_f32(ay_ax, aw_az, 1); // az, ay const float32x2_t azbx = vmul_f32(aw_az, by_bx); // 0, az*bx const float32x2_t aybz_axby = vmul_f32(ay_ax, bz_by); // ay*bz, ax*by const float32x2_t azbxSUBaxbz = vmls_f32(azbx, bw_bz, ay_ax); // 0, az*bx-ax*bz const float32x2_t aybzSUBazby_axbySUBaybx = vmls_f32(aybz_axby, by_bx, az_ay); // ay*bz-az*by, ax*by-ay*bx const float32x2_t retLow = vext_f32(aybzSUBazby_axbySUBaybx, azbxSUBaxbz, 1); // az*bx-ax*bz, ay*bz-az*by const uint32x2_t retHigh = vand_u32(TF, vreinterpret_u32_f32(aybzSUBazby_axbySUBaybx)); // 0, ax*by-ay*bx return vcombine_f32(retLow, vreinterpret_f32_u32(retHigh)); } PX_FORCE_INLINE FloatV V4Length(const Vec4V a) { const float32x4_t tmp = vmulq_f32(a, a); const float32x2_t low = vget_low_f32(tmp); const float32x2_t high = vget_high_f32(tmp); const float32x2_t sumTmp = vpadd_f32(low, high); // = {0+z, x+y} const float32x2_t sumWZYX = vpadd_f32(sumTmp, sumTmp); // = {x+y+z, x+y+z} return FSqrt(sumWZYX); } PX_FORCE_INLINE FloatV V4LengthSq(const Vec4V a) { return V4Dot(a, a); } PX_FORCE_INLINE Vec4V V4Normalize(const Vec4V a) { //PX_ASSERT(!FAllEq(V4LengthSq(a), FZero())); return V4ScaleInv(a, V4Length(a)); } PX_FORCE_INLINE Vec4V V4NormalizeFast(const Vec4V a) { //PX_ASSERT(!FAllEq(V4LengthSq(a), FZero())); return V4Scale(a, FRsqrtFast(V4Dot(a, a))); } PX_FORCE_INLINE Vec4V V4NormalizeSafe(const Vec4V a, const Vec4V unsafeReturnValue) { const FloatV zero = FZero(); const FloatV length = V4Length(a); const uint32x4_t isGreaterThanZero = FIsGrtr(length, zero); return V4Sel(isGreaterThanZero, V4ScaleInv(a, length), unsafeReturnValue); } PX_FORCE_INLINE BoolV V4IsEqU32(const VecU32V a, const VecU32V b) { return vceqq_u32(a, b); } PX_FORCE_INLINE Vec4V V4Sel(const BoolV c, const Vec4V a, const Vec4V b) { return vbslq_f32(c, a, b); } PX_FORCE_INLINE BoolV V4IsGrtr(const Vec4V a, const Vec4V b) { return vcgtq_f32(a, b); } PX_FORCE_INLINE BoolV V4IsGrtrOrEq(const Vec4V a, const Vec4V b) { return vcgeq_f32(a, b); } PX_FORCE_INLINE BoolV V4IsEq(const Vec4V a, const Vec4V b) { return vceqq_f32(a, b); } PX_FORCE_INLINE Vec4V V4Max(const Vec4V a, const Vec4V b) { return vmaxq_f32(a, b); } PX_FORCE_INLINE Vec4V V4Min(const Vec4V a, const Vec4V b) { return vminq_f32(a, b); } PX_FORCE_INLINE FloatV V4ExtractMax(const Vec4V a) { const float32x2_t low = vget_low_f32(a); const float32x2_t high = vget_high_f32(a); const float32x2_t max0 = vpmax_f32(high, low); const float32x2_t max1 = vpmax_f32(max0, max0); return max1; } PX_FORCE_INLINE FloatV V4ExtractMin(const Vec4V a) { const float32x2_t low = vget_low_f32(a); const float32x2_t high = vget_high_f32(a); const float32x2_t min0 = vpmin_f32(high, low); const float32x2_t min1 = vpmin_f32(min0, min0); return min1; } PX_FORCE_INLINE Vec4V V4Clamp(const Vec4V a, const Vec4V minV, const Vec4V maxV) { return V4Max(V4Min(a, maxV), minV); } PX_FORCE_INLINE PxU32 V4AllGrtr(const Vec4V a, const Vec4V b) { return internalUnitNeonSimd::BAllTrue4_R(V4IsGrtr(a, b)); } PX_FORCE_INLINE PxU32 V4AllGrtrOrEq(const Vec4V a, const Vec4V b) { return internalUnitNeonSimd::BAllTrue4_R(V4IsGrtrOrEq(a, b)); } PX_FORCE_INLINE PxU32 V4AllGrtrOrEq3(const Vec4V a, const Vec4V b) { return internalUnitNeonSimd::BAllTrue3_R(V4IsGrtrOrEq(a, b)); } PX_FORCE_INLINE PxU32 V4AllEq(const Vec4V a, const Vec4V b) { return internalUnitNeonSimd::BAllTrue4_R(V4IsEq(a, b)); } PX_FORCE_INLINE PxU32 V4AnyGrtr3(const Vec4V a, const Vec4V b) { return internalUnitNeonSimd::BAnyTrue3_R(V4IsGrtr(a, b)); } PX_FORCE_INLINE Vec4V V4Round(const Vec4V a) { // truncate(a + (0.5f - sign(a))) const Vec4V half = V4Load(0.5f); const float32x4_t sign = vcvtq_f32_u32((vshrq_n_u32(vreinterpretq_u32_f32(a), 31))); const Vec4V aPlusHalf = V4Add(a, half); const Vec4V aRound = V4Sub(aPlusHalf, sign); return vcvtq_f32_s32(vcvtq_s32_f32(aRound)); } PX_FORCE_INLINE Vec4V V4Sin(const Vec4V a) { const Vec4V recipTwoPi = V4LoadA(g_PXReciprocalTwoPi.f); const Vec4V twoPi = V4LoadA(g_PXTwoPi.f); const Vec4V tmp = V4Mul(a, recipTwoPi); const Vec4V b = V4Round(tmp); const Vec4V V1 = V4NegMulSub(twoPi, b, a); // sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! - // V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI) const Vec4V V2 = V4Mul(V1, V1); const Vec4V V3 = V4Mul(V2, V1); const Vec4V V5 = V4Mul(V3, V2); const Vec4V V7 = V4Mul(V5, V2); const Vec4V V9 = V4Mul(V7, V2); const Vec4V V11 = V4Mul(V9, V2); const Vec4V V13 = V4Mul(V11, V2); const Vec4V V15 = V4Mul(V13, V2); const Vec4V V17 = V4Mul(V15, V2); const Vec4V V19 = V4Mul(V17, V2); const Vec4V V21 = V4Mul(V19, V2); const Vec4V V23 = V4Mul(V21, V2); const Vec4V sinCoefficients0 = V4LoadA(g_PXSinCoefficients0.f); const Vec4V sinCoefficients1 = V4LoadA(g_PXSinCoefficients1.f); const Vec4V sinCoefficients2 = V4LoadA(g_PXSinCoefficients2.f); const FloatV S1 = V4GetY(sinCoefficients0); const FloatV S2 = V4GetZ(sinCoefficients0); const FloatV S3 = V4GetW(sinCoefficients0); const FloatV S4 = V4GetX(sinCoefficients1); const FloatV S5 = V4GetY(sinCoefficients1); const FloatV S6 = V4GetZ(sinCoefficients1); const FloatV S7 = V4GetW(sinCoefficients1); const FloatV S8 = V4GetX(sinCoefficients2); const FloatV S9 = V4GetY(sinCoefficients2); const FloatV S10 = V4GetZ(sinCoefficients2); const FloatV S11 = V4GetW(sinCoefficients2); Vec4V Result; Result = V4ScaleAdd(V3, S1, V1); Result = V4ScaleAdd(V5, S2, Result); Result = V4ScaleAdd(V7, S3, Result); Result = V4ScaleAdd(V9, S4, Result); Result = V4ScaleAdd(V11, S5, Result); Result = V4ScaleAdd(V13, S6, Result); Result = V4ScaleAdd(V15, S7, Result); Result = V4ScaleAdd(V17, S8, Result); Result = V4ScaleAdd(V19, S9, Result); Result = V4ScaleAdd(V21, S10, Result); Result = V4ScaleAdd(V23, S11, Result); return Result; } PX_FORCE_INLINE Vec4V V4Cos(const Vec4V a) { const Vec4V recipTwoPi = V4LoadA(g_PXReciprocalTwoPi.f); const Vec4V twoPi = V4LoadA(g_PXTwoPi.f); const Vec4V tmp = V4Mul(a, recipTwoPi); const Vec4V b = V4Round(tmp); const Vec4V V1 = V4NegMulSub(twoPi, b, a); // cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! - // V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI) const Vec4V V2 = V4Mul(V1, V1); const Vec4V V4 = V4Mul(V2, V2); const Vec4V V6 = V4Mul(V4, V2); const Vec4V V8 = V4Mul(V4, V4); const Vec4V V10 = V4Mul(V6, V4); const Vec4V V12 = V4Mul(V6, V6); const Vec4V V14 = V4Mul(V8, V6); const Vec4V V16 = V4Mul(V8, V8); const Vec4V V18 = V4Mul(V10, V8); const Vec4V V20 = V4Mul(V10, V10); const Vec4V V22 = V4Mul(V12, V10); const Vec4V cosCoefficients0 = V4LoadA(g_PXCosCoefficients0.f); const Vec4V cosCoefficients1 = V4LoadA(g_PXCosCoefficients1.f); const Vec4V cosCoefficients2 = V4LoadA(g_PXCosCoefficients2.f); const FloatV C1 = V4GetY(cosCoefficients0); const FloatV C2 = V4GetZ(cosCoefficients0); const FloatV C3 = V4GetW(cosCoefficients0); const FloatV C4 = V4GetX(cosCoefficients1); const FloatV C5 = V4GetY(cosCoefficients1); const FloatV C6 = V4GetZ(cosCoefficients1); const FloatV C7 = V4GetW(cosCoefficients1); const FloatV C8 = V4GetX(cosCoefficients2); const FloatV C9 = V4GetY(cosCoefficients2); const FloatV C10 = V4GetZ(cosCoefficients2); const FloatV C11 = V4GetW(cosCoefficients2); Vec4V Result; Result = V4ScaleAdd(V2, C1, V4One()); Result = V4ScaleAdd(V4, C2, Result); Result = V4ScaleAdd(V6, C3, Result); Result = V4ScaleAdd(V8, C4, Result); Result = V4ScaleAdd(V10, C5, Result); Result = V4ScaleAdd(V12, C6, Result); Result = V4ScaleAdd(V14, C7, Result); Result = V4ScaleAdd(V16, C8, Result); Result = V4ScaleAdd(V18, C9, Result); Result = V4ScaleAdd(V20, C10, Result); Result = V4ScaleAdd(V22, C11, Result); return Result; } PX_FORCE_INLINE void V4Transpose(Vec4V& col0, Vec4V& col1, Vec4V& col2, Vec4V& col3) { const float32x4x2_t v0v1 = vzipq_f32(col0, col2); const float32x4x2_t v2v3 = vzipq_f32(col1, col3); const float32x4x2_t zip0 = vzipq_f32(v0v1.val[0], v2v3.val[0]); const float32x4x2_t zip1 = vzipq_f32(v0v1.val[1], v2v3.val[1]); col0 = zip0.val[0]; col1 = zip0.val[1]; col2 = zip1.val[0]; col3 = zip1.val[1]; } ////////////////////////////////// // VEC4V ////////////////////////////////// PX_FORCE_INLINE BoolV BFFFF() { return vmovq_n_u32(0); } PX_FORCE_INLINE BoolV BFFFT() { const uint32x2_t zeros = vmov_n_u32(0); const uint32x2_t ones = vmov_n_u32(0xffffFFFF); const uint32x2_t zo = vext_u32(zeros, ones, 1); return vcombine_u32(zeros, zo); } PX_FORCE_INLINE BoolV BFFTF() { const uint32x2_t zeros = vmov_n_u32(0); const uint32x2_t ones = vmov_n_u32(0xffffFFFF); const uint32x2_t oz = vext_u32(ones, zeros, 1); return vcombine_u32(zeros, oz); } PX_FORCE_INLINE BoolV BFFTT() { const uint32x2_t zeros = vmov_n_u32(0); const uint32x2_t ones = vmov_n_u32(0xffffFFFF); return vcombine_u32(zeros, ones); } PX_FORCE_INLINE BoolV BFTFF() { const uint32x2_t zeros = vmov_n_u32(0); const uint32x2_t ones = vmov_n_u32(0xffffFFFF); const uint32x2_t zo = vext_u32(zeros, ones, 1); return vcombine_u32(zo, zeros); } PX_FORCE_INLINE BoolV BFTFT() { const uint32x2_t zeros = vmov_n_u32(0); const uint32x2_t ones = vmov_n_u32(0xffffFFFF); const uint32x2_t zo = vext_u32(zeros, ones, 1); return vcombine_u32(zo, zo); } PX_FORCE_INLINE BoolV BFTTF() { const uint32x2_t zeros = vmov_n_u32(0); const uint32x2_t ones = vmov_n_u32(0xffffFFFF); const uint32x2_t zo = vext_u32(zeros, ones, 1); const uint32x2_t oz = vext_u32(ones, zeros, 1); return vcombine_u32(zo, oz); } PX_FORCE_INLINE BoolV BFTTT() { const uint32x2_t zeros = vmov_n_u32(0); const uint32x2_t ones = vmov_n_u32(0xffffFFFF); const uint32x2_t zo = vext_u32(zeros, ones, 1); return vcombine_u32(zo, ones); } PX_FORCE_INLINE BoolV BTFFF() { const uint32x2_t zeros = vmov_n_u32(0); const uint32x2_t ones = vmov_n_u32(0xffffFFFF); // const uint32x2_t zo = vext_u32(zeros, ones, 1); const uint32x2_t oz = vext_u32(ones, zeros, 1); return vcombine_u32(oz, zeros); } PX_FORCE_INLINE BoolV BTFFT() { const uint32x2_t zeros = vmov_n_u32(0); const uint32x2_t ones = vmov_n_u32(0xffffFFFF); const uint32x2_t zo = vext_u32(zeros, ones, 1); const uint32x2_t oz = vext_u32(ones, zeros, 1); return vcombine_u32(oz, zo); } PX_FORCE_INLINE BoolV BTFTF() { const uint32x2_t zeros = vmov_n_u32(0); const uint32x2_t ones = vmov_n_u32(0xffffFFFF); const uint32x2_t oz = vext_u32(ones, zeros, 1); return vcombine_u32(oz, oz); } PX_FORCE_INLINE BoolV BTFTT() { const uint32x2_t zeros = vmov_n_u32(0); const uint32x2_t ones = vmov_n_u32(0xffffFFFF); const uint32x2_t oz = vext_u32(ones, zeros, 1); return vcombine_u32(oz, ones); } PX_FORCE_INLINE BoolV BTTFF() { const uint32x2_t zeros = vmov_n_u32(0); const uint32x2_t ones = vmov_n_u32(0xffffFFFF); return vcombine_u32(ones, zeros); } PX_FORCE_INLINE BoolV BTTFT() { const uint32x2_t zeros = vmov_n_u32(0); const uint32x2_t ones = vmov_n_u32(0xffffFFFF); const uint32x2_t zo = vext_u32(zeros, ones, 1); return vcombine_u32(ones, zo); } PX_FORCE_INLINE BoolV BTTTF() { const uint32x2_t zeros = vmov_n_u32(0); const uint32x2_t ones = vmov_n_u32(0xffffFFFF); const uint32x2_t oz = vext_u32(ones, zeros, 1); return vcombine_u32(ones, oz); } PX_FORCE_INLINE BoolV BTTTT() { return vmovq_n_u32(0xffffFFFF); } PX_FORCE_INLINE BoolV BXMask() { return BTFFF(); } PX_FORCE_INLINE BoolV BYMask() { return BFTFF(); } PX_FORCE_INLINE BoolV BZMask() { return BFFTF(); } PX_FORCE_INLINE BoolV BWMask() { return BFFFT(); } PX_FORCE_INLINE BoolV BGetX(const BoolV f) { const uint32x2_t fLow = vget_low_u32(f); return vdupq_lane_u32(fLow, 0); } PX_FORCE_INLINE BoolV BGetY(const BoolV f) { const uint32x2_t fLow = vget_low_u32(f); return vdupq_lane_u32(fLow, 1); } PX_FORCE_INLINE BoolV BGetZ(const BoolV f) { const uint32x2_t fHigh = vget_high_u32(f); return vdupq_lane_u32(fHigh, 0); } PX_FORCE_INLINE BoolV BGetW(const BoolV f) { const uint32x2_t fHigh = vget_high_u32(f); return vdupq_lane_u32(fHigh, 1); } PX_FORCE_INLINE BoolV BSetX(const BoolV v, const BoolV f) { return vbslq_u32(BFTTT(), v, f); } PX_FORCE_INLINE BoolV BSetY(const BoolV v, const BoolV f) { return vbslq_u32(BTFTT(), v, f); } PX_FORCE_INLINE BoolV BSetZ(const BoolV v, const BoolV f) { return vbslq_u32(BTTFT(), v, f); } PX_FORCE_INLINE BoolV BSetW(const BoolV v, const BoolV f) { return vbslq_u32(BTTTF(), v, f); } PX_FORCE_INLINE BoolV BAnd(const BoolV a, const BoolV b) { return vandq_u32(a, b); } PX_FORCE_INLINE BoolV BNot(const BoolV a) { return vmvnq_u32(a); } PX_FORCE_INLINE BoolV BAndNot(const BoolV a, const BoolV b) { // return vbicq_u32(a, b); return vandq_u32(a, vmvnq_u32(b)); } PX_FORCE_INLINE BoolV BOr(const BoolV a, const BoolV b) { return vorrq_u32(a, b); } PX_FORCE_INLINE BoolV BAllTrue4(const BoolV a) { const uint32x2_t allTrue = vmov_n_u32(0xffffFFFF); const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a)); const uint16x4_t dLow = vmovn_u32(a); uint16x8_t combined = vcombine_u16(dLow, dHigh); const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined)); const uint32x2_t result = vceq_u32(finalReduce, allTrue); return vdupq_lane_u32(result, 0); } PX_FORCE_INLINE BoolV BAnyTrue4(const BoolV a) { const uint32x2_t allTrue = vmov_n_u32(0xffffFFFF); const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a)); const uint16x4_t dLow = vmovn_u32(a); uint16x8_t combined = vcombine_u16(dLow, dHigh); const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined)); const uint32x2_t result = vtst_u32(finalReduce, allTrue); return vdupq_lane_u32(result, 0); } PX_FORCE_INLINE BoolV BAllTrue3(const BoolV a) { const uint32x2_t allTrue3 = vmov_n_u32(0x00ffFFFF); const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a)); const uint16x4_t dLow = vmovn_u32(a); uint16x8_t combined = vcombine_u16(dLow, dHigh); const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined)); const uint32x2_t result = vceq_u32(vand_u32(finalReduce, allTrue3), allTrue3); return vdupq_lane_u32(result, 0); } PX_FORCE_INLINE BoolV BAnyTrue3(const BoolV a) { const uint32x2_t allTrue3 = vmov_n_u32(0x00ffFFFF); const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a)); const uint16x4_t dLow = vmovn_u32(a); uint16x8_t combined = vcombine_u16(dLow, dHigh); const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined)); const uint32x2_t result = vtst_u32(vand_u32(finalReduce, allTrue3), allTrue3); return vdupq_lane_u32(result, 0); } PX_FORCE_INLINE PxU32 BAllEq(const BoolV a, const BoolV b) { const BoolV bTest = vceqq_u32(a, b); return internalUnitNeonSimd::BAllTrue4_R(bTest); } PX_FORCE_INLINE PxU32 BAllEqTTTT(const BoolV a) { return BAllEq(a, BTTTT()); } PX_FORCE_INLINE PxU32 BAllEqFFFF(const BoolV a) { return BAllEq(a, BFFFF()); } PX_FORCE_INLINE PxU32 BGetBitMask(const BoolV a) { static PX_ALIGN(16, const PxU32) bitMaskData[4] = { 1, 2, 4, 8 }; const uint32x4_t bitMask = *(reinterpret_cast<const uint32x4_t*>(bitMaskData)); const uint32x4_t t0 = vandq_u32(a, bitMask); const uint32x2_t t1 = vpadd_u32(vget_low_u32(t0), vget_high_u32(t0)); // Pairwise add (0 + 1), (2 + 3) return PxU32(vget_lane_u32(vpadd_u32(t1, t1), 0)); } ////////////////////////////////// // MAT33V ////////////////////////////////// PX_FORCE_INLINE Vec3V M33MulV3(const Mat33V& a, const Vec3V b) { const FloatV x = V3GetX(b); const FloatV y = V3GetY(b); const FloatV z = V3GetZ(b); const Vec3V v0 = V3Scale(a.col0, x); const Vec3V v1 = V3Scale(a.col1, y); const Vec3V v2 = V3Scale(a.col2, z); const Vec3V v0PlusV1 = V3Add(v0, v1); return V3Add(v0PlusV1, v2); } PX_FORCE_INLINE Vec3V M33TrnspsMulV3(const Mat33V& a, const Vec3V b) { const FloatV x = V3Dot(a.col0, b); const FloatV y = V3Dot(a.col1, b); const FloatV z = V3Dot(a.col2, b); return V3Merge(x, y, z); } PX_FORCE_INLINE Vec3V M33MulV3AddV3(const Mat33V& A, const Vec3V b, const Vec3V c) { const FloatV x = V3GetX(b); const FloatV y = V3GetY(b); const FloatV z = V3GetZ(b); Vec3V result = V3ScaleAdd(A.col0, x, c); result = V3ScaleAdd(A.col1, y, result); return V3ScaleAdd(A.col2, z, result); } PX_FORCE_INLINE Mat33V M33MulM33(const Mat33V& a, const Mat33V& b) { return Mat33V(M33MulV3(a, b.col0), M33MulV3(a, b.col1), M33MulV3(a, b.col2)); } PX_FORCE_INLINE Mat33V M33Add(const Mat33V& a, const Mat33V& b) { return Mat33V(V3Add(a.col0, b.col0), V3Add(a.col1, b.col1), V3Add(a.col2, b.col2)); } PX_FORCE_INLINE Mat33V M33Scale(const Mat33V& a, const FloatV& b) { return Mat33V(V3Scale(a.col0, b), V3Scale(a.col1, b), V3Scale(a.col2, b)); } PX_FORCE_INLINE Mat33V M33Inverse(const Mat33V& a) { const float32x2_t zeros = vreinterpret_f32_u32(vmov_n_u32(0)); const BoolV btttf = BTTTF(); const Vec3V cross01 = V3Cross(a.col0, a.col1); const Vec3V cross12 = V3Cross(a.col1, a.col2); const Vec3V cross20 = V3Cross(a.col2, a.col0); const FloatV dot = V3Dot(cross01, a.col2); const FloatV invDet = FRecipFast(dot); const float32x4x2_t merge = vzipq_f32(cross12, cross01); const float32x4_t mergeh = merge.val[0]; const float32x4_t mergel = merge.val[1]; // const Vec3V colInv0 = XMVectorPermute(mergeh,cross20,PxPermuteControl(0,4,1,7)); const float32x4_t colInv0_xxyy = vzipq_f32(mergeh, cross20).val[0]; const float32x4_t colInv0 = vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(colInv0_xxyy), btttf)); // const Vec3V colInv1 = XMVectorPermute(mergeh,cross20,PxPermuteControl(2,5,3,7)); const float32x2_t zw0 = vget_high_f32(mergeh); const float32x2_t xy1 = vget_low_f32(cross20); const float32x2_t yzero1 = vext_f32(xy1, zeros, 1); const float32x2x2_t merge1 = vzip_f32(zw0, yzero1); const float32x4_t colInv1 = vcombine_f32(merge1.val[0], merge1.val[1]); // const Vec3V colInv2 = XMVectorPermute(mergel,cross20,PxPermuteControl(0,6,1,7)); const float32x2_t x0y0 = vget_low_f32(mergel); const float32x2_t z1w1 = vget_high_f32(cross20); const float32x2x2_t merge2 = vzip_f32(x0y0, z1w1); const float32x4_t colInv2 = vcombine_f32(merge2.val[0], merge2.val[1]); return Mat33V(vmulq_lane_f32(colInv0, invDet, 0), vmulq_lane_f32(colInv1, invDet, 0), vmulq_lane_f32(colInv2, invDet, 0)); } PX_FORCE_INLINE Mat33V M33Trnsps(const Mat33V& a) { return Mat33V(V3Merge(V3GetX(a.col0), V3GetX(a.col1), V3GetX(a.col2)), V3Merge(V3GetY(a.col0), V3GetY(a.col1), V3GetY(a.col2)), V3Merge(V3GetZ(a.col0), V3GetZ(a.col1), V3GetZ(a.col2))); } PX_FORCE_INLINE Mat33V M33Identity() { return Mat33V(V3UnitX(), V3UnitY(), V3UnitZ()); } PX_FORCE_INLINE Mat33V M33Sub(const Mat33V& a, const Mat33V& b) { return Mat33V(V3Sub(a.col0, b.col0), V3Sub(a.col1, b.col1), V3Sub(a.col2, b.col2)); } PX_FORCE_INLINE Mat33V M33Neg(const Mat33V& a) { return Mat33V(V3Neg(a.col0), V3Neg(a.col1), V3Neg(a.col2)); } PX_FORCE_INLINE Mat33V M33Abs(const Mat33V& a) { return Mat33V(V3Abs(a.col0), V3Abs(a.col1), V3Abs(a.col2)); } PX_FORCE_INLINE Mat33V PromoteVec3V(const Vec3V v) { const BoolV bTFFF = BTFFF(); const BoolV bFTFF = BFTFF(); const BoolV bFFTF = BTFTF(); const Vec3V zero = V3Zero(); return Mat33V(V3Sel(bTFFF, v, zero), V3Sel(bFTFF, v, zero), V3Sel(bFFTF, v, zero)); } PX_FORCE_INLINE Mat33V M33Diagonal(const Vec3VArg d) { const Vec3V x = V3Mul(V3UnitX(), d); const Vec3V y = V3Mul(V3UnitY(), d); const Vec3V z = V3Mul(V3UnitZ(), d); return Mat33V(x, y, z); } ////////////////////////////////// // MAT34V ////////////////////////////////// PX_FORCE_INLINE Vec3V M34MulV3(const Mat34V& a, const Vec3V b) { const FloatV x = V3GetX(b); const FloatV y = V3GetY(b); const FloatV z = V3GetZ(b); const Vec3V v0 = V3Scale(a.col0, x); const Vec3V v1 = V3Scale(a.col1, y); const Vec3V v2 = V3Scale(a.col2, z); const Vec3V v0PlusV1 = V3Add(v0, v1); const Vec3V v0PlusV1Plusv2 = V3Add(v0PlusV1, v2); return V3Add(v0PlusV1Plusv2, a.col3); } PX_FORCE_INLINE Vec3V M34Mul33V3(const Mat34V& a, const Vec3V b) { const FloatV x = V3GetX(b); const FloatV y = V3GetY(b); const FloatV z = V3GetZ(b); const Vec3V v0 = V3Scale(a.col0, x); const Vec3V v1 = V3Scale(a.col1, y); const Vec3V v2 = V3Scale(a.col2, z); const Vec3V v0PlusV1 = V3Add(v0, v1); return V3Add(v0PlusV1, v2); } PX_FORCE_INLINE Vec3V M34TrnspsMul33V3(const Mat34V& a, const Vec3V b) { const FloatV x = V3Dot(a.col0, b); const FloatV y = V3Dot(a.col1, b); const FloatV z = V3Dot(a.col2, b); return V3Merge(x, y, z); } PX_FORCE_INLINE Mat34V M34MulM34(const Mat34V& a, const Mat34V& b) { return Mat34V(M34Mul33V3(a, b.col0), M34Mul33V3(a, b.col1), M34Mul33V3(a, b.col2), M34MulV3(a, b.col3)); } PX_FORCE_INLINE Mat33V M34MulM33(const Mat34V& a, const Mat33V& b) { return Mat33V(M34Mul33V3(a, b.col0), M34Mul33V3(a, b.col1), M34Mul33V3(a, b.col2)); } PX_FORCE_INLINE Mat33V M34Mul33MM34(const Mat34V& a, const Mat34V& b) { return Mat33V(M34Mul33V3(a, b.col0), M34Mul33V3(a, b.col1), M34Mul33V3(a, b.col2)); } PX_FORCE_INLINE Mat34V M34Add(const Mat34V& a, const Mat34V& b) { return Mat34V(V3Add(a.col0, b.col0), V3Add(a.col1, b.col1), V3Add(a.col2, b.col2), V3Add(a.col3, b.col3)); } PX_FORCE_INLINE Mat33V M34Trnsps33(const Mat34V& a) { return Mat33V(V3Merge(V3GetX(a.col0), V3GetX(a.col1), V3GetX(a.col2)), V3Merge(V3GetY(a.col0), V3GetY(a.col1), V3GetY(a.col2)), V3Merge(V3GetZ(a.col0), V3GetZ(a.col1), V3GetZ(a.col2))); } ////////////////////////////////// // MAT44V ////////////////////////////////// PX_FORCE_INLINE Vec4V M44MulV4(const Mat44V& a, const Vec4V b) { const FloatV x = V4GetX(b); const FloatV y = V4GetY(b); const FloatV z = V4GetZ(b); const FloatV w = V4GetW(b); const Vec4V v0 = V4Scale(a.col0, x); const Vec4V v1 = V4Scale(a.col1, y); const Vec4V v2 = V4Scale(a.col2, z); const Vec4V v3 = V4Scale(a.col3, w); const Vec4V v0PlusV1 = V4Add(v0, v1); const Vec4V v0PlusV1Plusv2 = V4Add(v0PlusV1, v2); return V4Add(v0PlusV1Plusv2, v3); } PX_FORCE_INLINE Vec4V M44TrnspsMulV4(const Mat44V& a, const Vec4V b) { return V4Merge(V4Dot(a.col0, b), V4Dot(a.col1, b), V4Dot(a.col2, b), V4Dot(a.col3, b)); } PX_FORCE_INLINE Mat44V M44MulM44(const Mat44V& a, const Mat44V& b) { return Mat44V(M44MulV4(a, b.col0), M44MulV4(a, b.col1), M44MulV4(a, b.col2), M44MulV4(a, b.col3)); } PX_FORCE_INLINE Mat44V M44Add(const Mat44V& a, const Mat44V& b) { return Mat44V(V4Add(a.col0, b.col0), V4Add(a.col1, b.col1), V4Add(a.col2, b.col2), V4Add(a.col3, b.col3)); } PX_FORCE_INLINE Mat44V M44Trnsps(const Mat44V& a) { // asm volatile( // "vzip.f32 %q0, %q2 \n\t" // "vzip.f32 %q1, %q3 \n\t" // "vzip.f32 %q0, %q1 \n\t" // "vzip.f32 %q2, %q3 \n\t" // : "+w" (a.col0), "+w" (a.col1), "+w" (a.col2), "+w" a.col3)); const float32x4x2_t v0v1 = vzipq_f32(a.col0, a.col2); const float32x4x2_t v2v3 = vzipq_f32(a.col1, a.col3); const float32x4x2_t zip0 = vzipq_f32(v0v1.val[0], v2v3.val[0]); const float32x4x2_t zip1 = vzipq_f32(v0v1.val[1], v2v3.val[1]); return Mat44V(zip0.val[0], zip0.val[1], zip1.val[0], zip1.val[1]); } PX_FORCE_INLINE Mat44V M44Inverse(const Mat44V& a) { float32x4_t minor0, minor1, minor2, minor3; float32x4_t row0, row1, row2, row3; float32x4_t det, tmp1; tmp1 = vmovq_n_f32(0.0f); row1 = vmovq_n_f32(0.0f); row3 = vmovq_n_f32(0.0f); row0 = a.col0; row1 = vextq_f32(a.col1, a.col1, 2); row2 = a.col2; row3 = vextq_f32(a.col3, a.col3, 2); tmp1 = vmulq_f32(row2, row3); tmp1 = vrev64q_f32(tmp1); minor0 = vmulq_f32(row1, tmp1); minor1 = vmulq_f32(row0, tmp1); tmp1 = vextq_f32(tmp1, tmp1, 2); minor0 = vsubq_f32(vmulq_f32(row1, tmp1), minor0); minor1 = vsubq_f32(vmulq_f32(row0, tmp1), minor1); minor1 = vextq_f32(minor1, minor1, 2); tmp1 = vmulq_f32(row1, row2); tmp1 = vrev64q_f32(tmp1); minor0 = vaddq_f32(vmulq_f32(row3, tmp1), minor0); minor3 = vmulq_f32(row0, tmp1); tmp1 = vextq_f32(tmp1, tmp1, 2); minor0 = vsubq_f32(minor0, vmulq_f32(row3, tmp1)); minor3 = vsubq_f32(vmulq_f32(row0, tmp1), minor3); minor3 = vextq_f32(minor3, minor3, 2); tmp1 = vmulq_f32(vextq_f32(row1, row1, 2), row3); tmp1 = vrev64q_f32(tmp1); row2 = vextq_f32(row2, row2, 2); minor0 = vaddq_f32(vmulq_f32(row2, tmp1), minor0); minor2 = vmulq_f32(row0, tmp1); tmp1 = vextq_f32(tmp1, tmp1, 2); minor0 = vsubq_f32(minor0, vmulq_f32(row2, tmp1)); minor2 = vsubq_f32(vmulq_f32(row0, tmp1), minor2); minor2 = vextq_f32(minor2, minor2, 2); tmp1 = vmulq_f32(row0, row1); tmp1 = vrev64q_f32(tmp1); minor2 = vaddq_f32(vmulq_f32(row3, tmp1), minor2); minor3 = vsubq_f32(vmulq_f32(row2, tmp1), minor3); tmp1 = vextq_f32(tmp1, tmp1, 2); minor2 = vsubq_f32(vmulq_f32(row3, tmp1), minor2); minor3 = vsubq_f32(minor3, vmulq_f32(row2, tmp1)); tmp1 = vmulq_f32(row0, row3); tmp1 = vrev64q_f32(tmp1); minor1 = vsubq_f32(minor1, vmulq_f32(row2, tmp1)); minor2 = vaddq_f32(vmulq_f32(row1, tmp1), minor2); tmp1 = vextq_f32(tmp1, tmp1, 2); minor1 = vaddq_f32(vmulq_f32(row2, tmp1), minor1); minor2 = vsubq_f32(minor2, vmulq_f32(row1, tmp1)); tmp1 = vmulq_f32(row0, row2); tmp1 = vrev64q_f32(tmp1); minor1 = vaddq_f32(vmulq_f32(row3, tmp1), minor1); minor3 = vsubq_f32(minor3, vmulq_f32(row1, tmp1)); tmp1 = vextq_f32(tmp1, tmp1, 2); minor1 = vsubq_f32(minor1, vmulq_f32(row3, tmp1)); minor3 = vaddq_f32(vmulq_f32(row1, tmp1), minor3); det = vmulq_f32(row0, minor0); det = vaddq_f32(vextq_f32(det, det, 2), det); det = vaddq_f32(vrev64q_f32(det), det); det = vdupq_lane_f32(VRECIPE(vget_low_f32(det)), 0); minor0 = vmulq_f32(det, minor0); minor1 = vmulq_f32(det, minor1); minor2 = vmulq_f32(det, minor2); minor3 = vmulq_f32(det, minor3); Mat44V invTrans(minor0, minor1, minor2, minor3); return M44Trnsps(invTrans); } PX_FORCE_INLINE Vec4V V4LoadXYZW(const PxF32& x, const PxF32& y, const PxF32& z, const PxF32& w) { const float32x4_t ret = { x, y, z, w }; return ret; } /* PX_FORCE_INLINE VecU16V V4U32PK(VecU32V a, VecU32V b) { return vcombine_u16(vqmovn_u32(a), vqmovn_u32(b)); } */ PX_FORCE_INLINE VecU32V V4U32Sel(const BoolV c, const VecU32V a, const VecU32V b) { return vbslq_u32(c, a, b); } PX_FORCE_INLINE VecU32V V4U32or(VecU32V a, VecU32V b) { return vorrq_u32(a, b); } PX_FORCE_INLINE VecU32V V4U32xor(VecU32V a, VecU32V b) { return veorq_u32(a, b); } PX_FORCE_INLINE VecU32V V4U32and(VecU32V a, VecU32V b) { return vandq_u32(a, b); } PX_FORCE_INLINE VecU32V V4U32Andc(VecU32V a, VecU32V b) { // return vbicq_u32(a, b); // creates gcc compiler bug in RTreeQueries.cpp return vandq_u32(a, vmvnq_u32(b)); } /* PX_FORCE_INLINE VecU16V V4U16Or(VecU16V a, VecU16V b) { return vorrq_u16(a, b); } */ /* PX_FORCE_INLINE VecU16V V4U16And(VecU16V a, VecU16V b) { return vandq_u16(a, b); } */ /* PX_FORCE_INLINE VecU16V V4U16Andc(VecU16V a, VecU16V b) { return vbicq_u16(a, b); } */ PX_FORCE_INLINE VecI32V I4LoadXYZW(const PxI32& x, const PxI32& y, const PxI32& z, const PxI32& w) { const int32x4_t ret = { x, y, z, w }; return ret; } PX_FORCE_INLINE VecI32V I4Load(const PxI32 i) { return vdupq_n_s32(i); } PX_FORCE_INLINE VecI32V I4LoadU(const PxI32* i) { return vld1q_s32(i); } PX_FORCE_INLINE VecI32V I4LoadA(const PxI32* i) { return vld1q_s32(i); } PX_FORCE_INLINE VecI32V VecI32V_Add(const VecI32VArg a, const VecI32VArg b) { return vaddq_s32(a, b); } PX_FORCE_INLINE VecI32V VecI32V_Sub(const VecI32VArg a, const VecI32VArg b) { return vsubq_s32(a, b); } PX_FORCE_INLINE BoolV VecI32V_IsGrtr(const VecI32VArg a, const VecI32VArg b) { return vcgtq_s32(a, b); } PX_FORCE_INLINE BoolV VecI32V_IsEq(const VecI32VArg a, const VecI32VArg b) { return vceqq_s32(a, b); } PX_FORCE_INLINE VecI32V V4I32Sel(const BoolV c, const VecI32V a, const VecI32V b) { return vbslq_s32(c, a, b); } PX_FORCE_INLINE VecI32V VecI32V_Zero() { return vdupq_n_s32(0); } PX_FORCE_INLINE VecI32V VecI32V_One() { return vdupq_n_s32(1); } PX_FORCE_INLINE VecI32V VecI32V_Two() { return vdupq_n_s32(2); } PX_FORCE_INLINE VecI32V VecI32V_MinusOne() { return vdupq_n_s32(-1); } PX_FORCE_INLINE VecU32V U4Zero() { return U4Load(0); } PX_FORCE_INLINE VecU32V U4One() { return U4Load(1); } PX_FORCE_INLINE VecU32V U4Two() { return U4Load(2); } PX_FORCE_INLINE VecShiftV VecI32V_PrepareShift(const VecI32VArg shift) { return shift; } PX_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const VecShiftVArg count) { return vshlq_s32(a, count); } PX_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const VecShiftVArg count) { return vshlq_s32(a, VecI32V_Sub(I4Load(0), count)); } PX_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const PxU32 count) { const int32x4_t shiftCount = { (PxI32)count, (PxI32)count, (PxI32)count, (PxI32)count }; return vshlq_s32(a, shiftCount); } PX_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const PxU32 count) { const int32x4_t shiftCount = { -(PxI32)count, -(PxI32)count, -(PxI32)count, -(PxI32)count }; return vshlq_s32(a, shiftCount); } PX_FORCE_INLINE VecI32V VecI32V_And(const VecI32VArg a, const VecI32VArg b) { return vandq_s32(a, b); } PX_FORCE_INLINE VecI32V VecI32V_Or(const VecI32VArg a, const VecI32VArg b) { return vorrq_s32(a, b); } PX_FORCE_INLINE VecI32V VecI32V_GetX(const VecI32VArg f) { const int32x2_t fLow = vget_low_s32(f); return vdupq_lane_s32(fLow, 0); } PX_FORCE_INLINE VecI32V VecI32V_GetY(const VecI32VArg f) { const int32x2_t fLow = vget_low_s32(f); return vdupq_lane_s32(fLow, 1); } PX_FORCE_INLINE VecI32V VecI32V_GetZ(const VecI32VArg f) { const int32x2_t fHigh = vget_high_s32(f); return vdupq_lane_s32(fHigh, 0); } PX_FORCE_INLINE VecI32V VecI32V_GetW(const VecI32VArg f) { const int32x2_t fHigh = vget_high_s32(f); return vdupq_lane_s32(fHigh, 1); } PX_FORCE_INLINE VecI32V VecI32V_Sel(const BoolV c, const VecI32VArg a, const VecI32VArg b) { return vbslq_s32(c, a, b); } PX_FORCE_INLINE void PxI32_From_VecI32V(const VecI32VArg a, PxI32* i) { *i = vgetq_lane_s32(a, 0); } PX_FORCE_INLINE VecI32V VecI32V_Merge(const VecI32VArg a, const VecI32VArg b, const VecI32VArg c, const VecI32VArg d) { const int32x2_t aLow = vget_low_s32(a); const int32x2_t bLow = vget_low_s32(b); const int32x2_t cLow = vget_low_s32(c); const int32x2_t dLow = vget_low_s32(d); const int32x2_t low = vext_s32(aLow, bLow, 1); const int32x2_t high = vext_s32(cLow, dLow, 1); return vcombine_s32(low, high); } PX_FORCE_INLINE VecI32V VecI32V_From_BoolV(const BoolVArg a) { return vreinterpretq_s32_u32(a); } PX_FORCE_INLINE VecU32V VecU32V_From_BoolV(const BoolVArg a) { return a; } /* template<int a> PX_FORCE_INLINE VecI32V V4ISplat() { return vdupq_n_s32(a); } template<PxU32 a> PX_FORCE_INLINE VecU32V V4USplat() { return vdupq_n_u32(a); } */ /* PX_FORCE_INLINE void V4U16StoreAligned(VecU16V val, VecU16V* address) { vst1q_u16((uint16_t*)address, val); } */ PX_FORCE_INLINE void V4U32StoreAligned(VecU32V val, VecU32V* address) { vst1q_u32(reinterpret_cast<uint32_t*>(address), val); } PX_FORCE_INLINE Vec4V V4LoadAligned(Vec4V* addr) { return vld1q_f32(reinterpret_cast<float32_t*>(addr)); } PX_FORCE_INLINE Vec4V V4LoadUnaligned(Vec4V* addr) { return vld1q_f32(reinterpret_cast<float32_t*>(addr)); } PX_FORCE_INLINE Vec4V V4Andc(const Vec4V a, const VecU32V b) { return vreinterpretq_f32_u32(V4U32Andc(vreinterpretq_u32_f32(a), b)); } PX_FORCE_INLINE VecU32V V4IsGrtrV32u(const Vec4V a, const Vec4V b) { return V4IsGrtr(a, b); } PX_FORCE_INLINE VecU16V V4U16LoadAligned(VecU16V* addr) { return vld1q_u16(reinterpret_cast<uint16_t*>(addr)); } PX_FORCE_INLINE VecU16V V4U16LoadUnaligned(VecU16V* addr) { return vld1q_u16(reinterpret_cast<uint16_t*>(addr)); } PX_FORCE_INLINE VecU16V V4U16CompareGt(VecU16V a, VecU16V b) { return vcgtq_u16(a, b); } PX_FORCE_INLINE VecU16V V4I16CompareGt(VecI16V a, VecI16V b) { return vcgtq_s16(a, b); } PX_FORCE_INLINE Vec4V Vec4V_From_VecU32V(VecU32V a) { return vcvtq_f32_u32(a); } PX_FORCE_INLINE Vec4V Vec4V_From_VecI32V(VecI32V a) { return vcvtq_f32_s32(a); } PX_FORCE_INLINE VecI32V VecI32V_From_Vec4V(Vec4V a) { return vcvtq_s32_f32(a); } PX_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecU32V(VecU32V a) { return vreinterpretq_f32_u32(a); } PX_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecI32V(VecI32V a) { return vreinterpretq_f32_s32(a); } PX_FORCE_INLINE VecU32V VecU32V_ReinterpretFrom_Vec4V(Vec4V a) { return vreinterpretq_u32_f32(a); } PX_FORCE_INLINE VecI32V VecI32V_ReinterpretFrom_Vec4V(Vec4V a) { return vreinterpretq_s32_f32(a); } #if !PX_SWITCH template <int index> PX_FORCE_INLINE BoolV BSplatElement(BoolV a) { if(index < 2) { return vdupq_lane_u32(vget_low_u32(a), index); } else if(index == 2) { return vdupq_lane_u32(vget_high_u32(a), 0); } else if(index == 3) { return vdupq_lane_u32(vget_high_u32(a), 1); } } #else //workaround for template compile issue template <int index> PX_FORCE_INLINE BoolV BSplatElement(BoolV a); template<> PX_FORCE_INLINE BoolV BSplatElement<0>(BoolV a) { return vdupq_lane_u32(vget_low_u32(a), 0); } template<> PX_FORCE_INLINE BoolV BSplatElement<1>(BoolV a) { return vdupq_lane_u32(vget_low_u32(a), 1); } template<> PX_FORCE_INLINE BoolV BSplatElement<2>(BoolV a) { return vdupq_lane_u32(vget_high_u32(a), 0); } template<> PX_FORCE_INLINE BoolV BSplatElement<3>(BoolV a) { return vdupq_lane_u32(vget_high_u32(a), 1); } #endif #if !PX_SWITCH template <int index> PX_FORCE_INLINE VecU32V V4U32SplatElement(VecU32V a) { if(index < 2) { return vdupq_lane_u32(vget_low_u32(a), index); } else if(index == 2) { return vdupq_lane_u32(vget_high_u32(a), 0); } else if(index == 3) { return vdupq_lane_u32(vget_high_u32(a), 1); } } #else //workaround for template compile issue template <int index> PX_FORCE_INLINE VecU32V V4U32SplatElement(VecU32V a); template <> PX_FORCE_INLINE VecU32V V4U32SplatElement<0>(VecU32V a) { return vdupq_lane_u32(vget_low_u32(a), 0); } template <> PX_FORCE_INLINE VecU32V V4U32SplatElement<1>(VecU32V a) { return vdupq_lane_u32(vget_low_u32(a), 1); } template <> PX_FORCE_INLINE VecU32V V4U32SplatElement<2>(VecU32V a) { return vdupq_lane_u32(vget_high_u32(a), 0); } template <> PX_FORCE_INLINE VecU32V V4U32SplatElement<3>(VecU32V a) { return vdupq_lane_u32(vget_high_u32(a), 1); } #endif #if !PX_SWITCH template <int index> PX_FORCE_INLINE Vec4V V4SplatElement(Vec4V a) { if(index < 2) { return vdupq_lane_f32(vget_low_f32(a), index); } else if(index == 2) { return vdupq_lane_f32(vget_high_f32(a), 0); } else if(index == 3) { return vdupq_lane_f32(vget_high_f32(a), 1); } } #else //workaround for template compile issue template <int index> PX_FORCE_INLINE Vec4V V4SplatElement(Vec4V a); template <> PX_FORCE_INLINE Vec4V V4SplatElement<0>(Vec4V a) { return vdupq_lane_f32(vget_low_f32(a), 0); } template <> PX_FORCE_INLINE Vec4V V4SplatElement<1>(Vec4V a) { return vdupq_lane_f32(vget_low_f32(a), 1); } template <> PX_FORCE_INLINE Vec4V V4SplatElement<2>(Vec4V a) { return vdupq_lane_f32(vget_high_f32(a), 0); } template <> PX_FORCE_INLINE Vec4V V4SplatElement<3>(Vec4V a) { return vdupq_lane_f32(vget_high_f32(a), 1); } #endif PX_FORCE_INLINE VecU32V U4LoadXYZW(PxU32 x, PxU32 y, PxU32 z, PxU32 w) { const uint32x4_t ret = { x, y, z, w }; return ret; } PX_FORCE_INLINE VecU32V U4Load(const PxU32 i) { return vdupq_n_u32(i); } PX_FORCE_INLINE VecU32V U4LoadU(const PxU32* i) { return vld1q_u32(i); } PX_FORCE_INLINE VecU32V U4LoadA(const PxU32* i) { return vld1q_u32(i); } PX_FORCE_INLINE Vec4V V4Ceil(const Vec4V in) { const float32x4_t ones = vdupq_n_f32(1.0f); const float32x4_t rdToZero = vcvtq_f32_s32(vcvtq_s32_f32(in)); const float32x4_t rdToZeroPlusOne = vaddq_f32(rdToZero, ones); const uint32x4_t gt = vcgtq_f32(in, rdToZero); return vbslq_f32(gt, rdToZeroPlusOne, rdToZero); } PX_FORCE_INLINE Vec4V V4Floor(const Vec4V in) { const float32x4_t ones = vdupq_n_f32(1.0f); const float32x4_t rdToZero = vcvtq_f32_s32(vcvtq_s32_f32(in)); const float32x4_t rdToZeroMinusOne = vsubq_f32(rdToZero, ones); const uint32x4_t lt = vcltq_f32(in, rdToZero); return vbslq_f32(lt, rdToZeroMinusOne, rdToZero); } PX_FORCE_INLINE VecU32V V4ConvertToU32VSaturate(const Vec4V in, PxU32 power) { PX_ASSERT(power == 0 && "Non-zero power not supported in convertToU32VSaturate"); PX_UNUSED(power); // prevent warning in release builds return vcvtq_u32_f32(in); } PX_FORCE_INLINE void QuatGetMat33V(const QuatVArg q, Vec3V& column0, Vec3V& column1, Vec3V& column2) { const FloatV one = FOne(); const FloatV x = V4GetX(q); const FloatV y = V4GetY(q); const FloatV z = V4GetZ(q); const FloatV w = V4GetW(q); const FloatV x2 = FAdd(x, x); const FloatV y2 = FAdd(y, y); const FloatV z2 = FAdd(z, z); const FloatV xx = FMul(x2, x); const FloatV yy = FMul(y2, y); const FloatV zz = FMul(z2, z); const FloatV xy = FMul(x2, y); const FloatV xz = FMul(x2, z); const FloatV xw = FMul(x2, w); const FloatV yz = FMul(y2, z); const FloatV yw = FMul(y2, w); const FloatV zw = FMul(z2, w); const FloatV v = FSub(one, xx); column0 = V3Merge(FSub(FSub(one, yy), zz), FAdd(xy, zw), FSub(xz, yw)); column1 = V3Merge(FSub(xy, zw), FSub(v, zz), FAdd(yz, xw)); column2 = V3Merge(FAdd(xz, yw), FSub(yz, xw), FSub(v, yy)); } } // namespace aos } // namespace physx #endif // PXFOUNDATION_PXUNIXNEONINLINEAOS_H
99,340
C
26.313995
117
0.693849
NVIDIA-Omniverse/PhysX/physx/include/foundation/unix/neon/PxUnixNeonAoS.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PXFOUNDATION_PXUNIXNEONAOS_H #define PXFOUNDATION_PXUNIXNEONAOS_H // no includes here! this file should be included from PxcVecMath.h only!!! #if !COMPILE_VECTOR_INTRINSICS #error Vector intrinsics should not be included when using scalar implementation. #endif // only ARM NEON compatible platforms should reach this #include <arm_neon.h> namespace physx { namespace aos { typedef float32x2_t FloatV; typedef float32x4_t Vec3V; typedef float32x4_t Vec4V; typedef uint32x4_t BoolV; typedef float32x4_t QuatV; typedef uint32x4_t VecU32V; typedef int32x4_t VecI32V; typedef uint16x8_t VecU16V; typedef int16x8_t VecI16V; typedef uint8x16_t VecU8V; #define FloatVArg FloatV & #define Vec3VArg Vec3V & #define Vec4VArg Vec4V & #define BoolVArg BoolV & #define VecU32VArg VecU32V & #define VecI32VArg VecI32V & #define VecU16VArg VecU16V & #define VecI16VArg VecI16V & #define VecU8VArg VecU8V & #define QuatVArg QuatV & // KS - TODO - make an actual VecCrossV type for NEON #define VecCrossV Vec3V typedef VecI32V VecShiftV; #define VecShiftVArg VecShiftV & PX_ALIGN_PREFIX(16) struct Mat33V { Mat33V() { } Mat33V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2) : col0(c0), col1(c1), col2(c2) { } Vec3V PX_ALIGN(16, col0); Vec3V PX_ALIGN(16, col1); Vec3V PX_ALIGN(16, col2); } PX_ALIGN_SUFFIX(16); PX_ALIGN_PREFIX(16) struct Mat34V { Mat34V() { } Mat34V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2, const Vec3V& c3) : col0(c0), col1(c1), col2(c2), col3(c3) { } Vec3V PX_ALIGN(16, col0); Vec3V PX_ALIGN(16, col1); Vec3V PX_ALIGN(16, col2); Vec3V PX_ALIGN(16, col3); } PX_ALIGN_SUFFIX(16); PX_ALIGN_PREFIX(16) struct Mat43V { Mat43V() { } Mat43V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2) : col0(c0), col1(c1), col2(c2) { } Vec4V PX_ALIGN(16, col0); Vec4V PX_ALIGN(16, col1); Vec4V PX_ALIGN(16, col2); } PX_ALIGN_SUFFIX(16); PX_ALIGN_PREFIX(16) struct Mat44V { Mat44V() { } Mat44V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2, const Vec4V& c3) : col0(c0), col1(c1), col2(c2), col3(c3) { } Vec4V PX_ALIGN(16, col0); Vec4V PX_ALIGN(16, col1); Vec4V PX_ALIGN(16, col2); Vec4V PX_ALIGN(16, col3); } PX_ALIGN_SUFFIX(16); } // namespace aos } // namespace physx #endif // PXFOUNDATION_PXUNIXNEONAOS_H
3,968
C
27.970803
116
0.736895
NVIDIA-Omniverse/PhysX/physx/include/foundation/windows/PxWindowsIntrinsics.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_WINDOWS_INTRINSICS_H #define PX_WINDOWS_INTRINSICS_H #include "foundation/PxAssert.h" // this file is for internal intrinsics - that is, intrinsics that are used in // cross platform code but do not appear in the API #if !PX_WINDOWS_FAMILY #error "This file should only be included by Windows builds!!" #endif #pragma warning(push) //'symbol' is not defined as a preprocessor macro, replacing with '0' for 'directives' #pragma warning(disable : 4668) #if PX_VC == 10 #pragma warning(disable : 4987) // nonstandard extension used: 'throw (...)' #endif #include <intrin.h> #pragma warning(pop) #pragma warning(push) #pragma warning(disable : 4985) // 'symbol name': attributes not present on previous declaration #include <math.h> #pragma warning(pop) #include <float.h> // do not include for ARM target #if !PX_ARM && !PX_A64 #include <mmintrin.h> #endif #pragma intrinsic(_BitScanForward) #pragma intrinsic(_BitScanReverse) #if !PX_DOXYGEN namespace physx { #endif /* * Implements a memory barrier */ PX_FORCE_INLINE void PxMemoryBarrier() { _ReadWriteBarrier(); /* long Barrier; __asm { xchg Barrier, eax }*/ } /*! Returns the index of the highest set bit. Not valid for zero arg. */ PX_FORCE_INLINE uint32_t PxHighestSetBitUnsafe(uint32_t v) { unsigned long retval; _BitScanReverse(&retval, v); return retval; } /*! Returns the index of the highest set bit. Undefined for zero arg. */ PX_FORCE_INLINE uint32_t PxLowestSetBitUnsafe(uint32_t v) { unsigned long retval; _BitScanForward(&retval, v); return retval; } /*! Returns the number of leading zeros in v. Returns 32 for v=0. */ PX_FORCE_INLINE uint32_t PxCountLeadingZeros(uint32_t v) { if(v) { unsigned long bsr = (unsigned long)-1; _BitScanReverse(&bsr, v); return 31 - bsr; } else return 32; } /*! Prefetch aligned cache size around \c ptr+offset. */ #if !PX_ARM && !PX_A64 PX_FORCE_INLINE void PxPrefetchLine(const void* ptr, uint32_t offset = 0) { // cache line on X86/X64 is 64-bytes so a 128-byte prefetch would require 2 prefetches. // However, we can only dispatch a limited number of prefetch instructions so we opt to prefetch just 1 cache line /*_mm_prefetch(((const char*)ptr + offset), _MM_HINT_T0);*/ // We get slightly better performance prefetching to non-temporal addresses instead of all cache levels _mm_prefetch(((const char*)ptr + offset), _MM_HINT_NTA); } #else PX_FORCE_INLINE void PxPrefetchLine(const void* ptr, uint32_t offset = 0) { // arm does have 32b cache line size __prefetch(((const char*)ptr + offset)); } #endif /*! Prefetch \c count bytes starting at \c ptr. */ #if !PX_ARM PX_FORCE_INLINE void PxPrefetch(const void* ptr, uint32_t count = 1) { const char* cp = (char*)ptr; uint64_t p = size_t(ptr); uint64_t startLine = p >> 6, endLine = (p + count - 1) >> 6; uint64_t lines = endLine - startLine + 1; do { PxPrefetchLine(cp); cp += 64; } while(--lines); } #else PX_FORCE_INLINE void PxPrefetch(const void* ptr, uint32_t count = 1) { const char* cp = (char*)ptr; uint32_t p = size_t(ptr); uint32_t startLine = p >> 5, endLine = (p + count - 1) >> 5; uint32_t lines = endLine - startLine + 1; do { PxPrefetchLine(cp); cp += 32; } while(--lines); } #endif #if !PX_DOXYGEN } // namespace physx #endif #endif
4,979
C
27.786127
115
0.720024
NVIDIA-Omniverse/PhysX/physx/include/foundation/windows/PxWindowsInclude.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_WINDOWS_INCLUDE_H #define PX_WINDOWS_INCLUDE_H #ifndef _WIN32 #error "This file should only be included by Windows builds!!" #endif #ifdef _WINDOWS_ // windows already included #error "Only include windows.h through this file!!" #endif // We only support >= Windows 7, and we need this for critical section and // Setting this hides some important APIs (e.g. LoadPackagedLibrary), so don't do it #define _WIN32_WINNT 0x0601 // turn off as much as we can for windows. All we really need is the thread functions(critical sections/Interlocked* // etc) #define NOGDICAPMASKS #define NOVIRTUALKEYCODES #define NOWINMESSAGES #define NOWINSTYLES #define NOSYSMETRICS #define NOMENUS #define NOICONS #define NOKEYSTATES #define NOSYSCOMMANDS #define NORASTEROPS #define NOSHOWWINDOW #define NOATOM #define NOCLIPBOARD #define NOCOLOR #define NOCTLMGR #define NODRAWTEXT #define NOGDI #define NOMB #define NOMEMMGR #define NOMETAFILE #define NOMINMAX #define NOOPENFILE #define NOSCROLL #define NOSERVICE #define NOSOUND #define NOTEXTMETRIC #define NOWH #define NOWINOFFSETS #define NOCOMM #define NOKANJI #define NOHELP #define NOPROFILER #define NODEFERWINDOWPOS #define NOMCX #define WIN32_LEAN_AND_MEAN // We need a slightly wider API surface for e.g. MultiByteToWideChar #define NOUSER #define NONLS #define NOMSG #pragma warning(push) #pragma warning(disable : 4668) //'symbol' is not defined as a preprocessor macro, replacing with '0' for 'directives' #include <windows.h> #pragma warning(pop) #if PX_SSE2 #include <xmmintrin.h> #endif #endif
3,258
C
32.597938
118
0.775629
NVIDIA-Omniverse/PhysX/physx/include/foundation/windows/PxWindowsAoS.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_WINDOWS_AOS_H #define PX_WINDOWS_AOS_H // no includes here! this file should be included from PxAOS.h only!!! #if !COMPILE_VECTOR_INTRINSICS #error Vector intrinsics should not be included when using scalar implementation. #endif #if !PX_DOXYGEN namespace physx { #endif namespace aos { typedef __m128 FloatV; typedef __m128 Vec3V; typedef __m128 Vec4V; typedef __m128 BoolV; typedef __m128 VecU32V; typedef __m128 VecI32V; typedef __m128 VecU16V; typedef __m128 VecI16V; typedef __m128 QuatV; #define FloatVArg FloatV & #define Vec3VArg Vec3V & #define Vec4VArg Vec4V & #define BoolVArg BoolV & #define VecU32VArg VecU32V & #define VecI32VArg VecI32V & #define VecU16VArg VecU16V & #define VecI16VArg VecI16V & #define QuatVArg QuatV & // Optimization for situations in which you cross product multiple vectors with the same vector. // Avoids 2X shuffles per product struct VecCrossV { Vec3V mL1; Vec3V mR1; }; struct VecShiftV { VecI32V shift; }; #define VecShiftVArg VecShiftV & PX_ALIGN_PREFIX(16) struct Mat33V { Mat33V() { } Mat33V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2) : col0(c0), col1(c1), col2(c2) { } Vec3V PX_ALIGN(16, col0); Vec3V PX_ALIGN(16, col1); Vec3V PX_ALIGN(16, col2); } PX_ALIGN_SUFFIX(16); PX_ALIGN_PREFIX(16) struct Mat34V { Mat34V() { } Mat34V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2, const Vec3V& c3) : col0(c0), col1(c1), col2(c2), col3(c3) { } Vec3V PX_ALIGN(16, col0); Vec3V PX_ALIGN(16, col1); Vec3V PX_ALIGN(16, col2); Vec3V PX_ALIGN(16, col3); } PX_ALIGN_SUFFIX(16); PX_ALIGN_PREFIX(16) struct Mat43V { Mat43V() { } Mat43V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2) : col0(c0), col1(c1), col2(c2) { } Vec4V PX_ALIGN(16, col0); Vec4V PX_ALIGN(16, col1); Vec4V PX_ALIGN(16, col2); } PX_ALIGN_SUFFIX(16); PX_ALIGN_PREFIX(16) struct Mat44V { Mat44V() { } Mat44V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2, const Vec4V& c3) : col0(c0), col1(c1), col2(c2), col3(c3) { } Vec4V PX_ALIGN(16, col0); Vec4V PX_ALIGN(16, col1); Vec4V PX_ALIGN(16, col2); Vec4V PX_ALIGN(16, col3); } PX_ALIGN_SUFFIX(16); } // namespace aos #if !PX_DOXYGEN } // namespace physx #endif #endif
3,892
C
26.034722
116
0.72739
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxDefaultSimulationFilterShader.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_DEFAULT_SIMULATION_FILTER_SHADER_H #define PX_DEFAULT_SIMULATION_FILTER_SHADER_H /** \addtogroup extensions @{ */ #include "PxPhysXConfig.h" #include "PxFiltering.h" #if !PX_DOXYGEN namespace physx { #endif class PxActor; /** \brief 64-bit mask used for collision filtering. The collision filtering equation for 2 objects o0 and o1 is: <pre> (G0 op0 K0) op2 (G1 op1 K1) == b </pre> with <ul> <li> G0 = PxGroupsMask for object o0. See PxSetGroupsMask </li> <li> G1 = PxGroupsMask for object o1. See PxSetGroupsMask </li> <li> K0 = filtering constant 0. See PxSetFilterConstants </li> <li> K1 = filtering constant 1. See PxSetFilterConstants </li> <li> b = filtering boolean. See PxSetFilterBool </li> <li> op0, op1, op2 = filtering operations. See PxSetFilterOps </li> </ul> If the filtering equation is true, collision detection is enabled. @see PxSetFilterOps() */ class PxGroupsMask { public: PX_INLINE PxGroupsMask():bits0(0),bits1(0),bits2(0),bits3(0) {} PX_INLINE ~PxGroupsMask() {} PxU16 bits0, bits1, bits2, bits3; }; /** \brief Collision filtering operations. @see PxGroupsMask */ struct PxFilterOp { enum Enum { PX_FILTEROP_AND, PX_FILTEROP_OR, PX_FILTEROP_XOR, PX_FILTEROP_NAND, PX_FILTEROP_NOR, PX_FILTEROP_NXOR, PX_FILTEROP_SWAP_AND }; }; /** \brief Implementation of a simple filter shader that emulates PhysX 2.8.x filtering This shader provides the following logic: \li If one of the two filter objects is a trigger, the pair is acccepted and #PxPairFlag::eTRIGGER_DEFAULT will be used for trigger reports \li Else, if the filter mask logic (see further below) discards the pair it will be suppressed (#PxFilterFlag::eSUPPRESS) \li Else, the pair gets accepted and collision response gets enabled (#PxPairFlag::eCONTACT_DEFAULT) Filter mask logic: Given the two #PxFilterData structures fd0 and fd1 of two collision objects, the pair passes the filter if the following conditions are met: 1) Collision groups of the pair are enabled 2) Collision filtering equation is satisfied @see PxSimulationFilterShader */ PxFilterFlags PxDefaultSimulationFilterShader( PxFilterObjectAttributes attributes0, PxFilterData filterData0, PxFilterObjectAttributes attributes1, PxFilterData filterData1, PxPairFlags& pairFlags, const void* constantBlock, PxU32 constantBlockSize); /** \brief Determines if collision detection is performed between a pair of groups \note Collision group is an integer between 0 and 31. \param[in] group1 First Group \param[in] group2 Second Group \return True if the groups could collide @see PxSetGroupCollisionFlag */ bool PxGetGroupCollisionFlag(const PxU16 group1, const PxU16 group2); /** \brief Specifies if collision should be performed by a pair of groups \note Collision group is an integer between 0 and 31. \param[in] group1 First Group \param[in] group2 Second Group \param[in] enable True to enable collision between the groups @see PxGetGroupCollisionFlag */ void PxSetGroupCollisionFlag(const PxU16 group1, const PxU16 group2, const bool enable); /** \brief Retrieves the value set with PxSetGroup() \note Collision group is an integer between 0 and 31. \param[in] actor The actor \return The collision group this actor belongs to @see PxSetGroup */ PxU16 PxGetGroup(const PxActor& actor); /** \brief Sets which collision group this actor is part of \note Collision group is an integer between 0 and 31. \param[in] actor The actor \param[in] collisionGroup Collision group this actor belongs to @see PxGetGroup */ void PxSetGroup(PxActor& actor, const PxU16 collisionGroup); /** \brief Retrieves filtering operation. See comments for PxGroupsMask \param[out] op0 First filter operator. \param[out] op1 Second filter operator. \param[out] op2 Third filter operator. @see PxSetFilterOps PxSetFilterBool PxSetFilterConstants */ void PxGetFilterOps(PxFilterOp::Enum& op0, PxFilterOp::Enum& op1, PxFilterOp::Enum& op2); /** \brief Setups filtering operations. See comments for PxGroupsMask \param[in] op0 Filter op 0. \param[in] op1 Filter op 1. \param[in] op2 Filter op 2. @see PxSetFilterBool PxSetFilterConstants */ void PxSetFilterOps(const PxFilterOp::Enum& op0, const PxFilterOp::Enum& op1, const PxFilterOp::Enum& op2); /** \brief Retrieves filtering's boolean value. See comments for PxGroupsMask \return flag Boolean value for filter. @see PxSetFilterBool PxSetFilterConstants */ bool PxGetFilterBool(); /** \brief Setups filtering's boolean value. See comments for PxGroupsMask \param[in] enable Boolean value for filter. @see PxSetFilterOps PxSsetFilterConstants */ void PxSetFilterBool(const bool enable); /** \brief Gets filtering constant K0 and K1. See comments for PxGroupsMask \param[out] c0 the filtering constants, as a mask. See #PxGroupsMask. \param[out] c1 the filtering constants, as a mask. See #PxGroupsMask. @see PxSetFilterOps PxSetFilterBool PxSetFilterConstants */ void PxGetFilterConstants(PxGroupsMask& c0, PxGroupsMask& c1); /** \brief Setups filtering's K0 and K1 value. See comments for PxGroupsMask \param[in] c0 The new group mask. See #PxGroupsMask. \param[in] c1 The new group mask. See #PxGroupsMask. @see PxSetFilterOps PxSetFilterBool PxGetFilterConstants */ void PxSetFilterConstants(const PxGroupsMask& c0, const PxGroupsMask& c1); /** \brief Gets 64-bit mask used for collision filtering. See comments for PxGroupsMask \param[in] actor The actor \return The group mask for the actor. @see PxSetGroupsMask() */ PxGroupsMask PxGetGroupsMask(const PxActor& actor); /** \brief Sets 64-bit mask used for collision filtering. See comments for PxGroupsMask \param[in] actor The actor \param[in] mask The group mask to set for the actor. @see PxGetGroupsMask() */ void PxSetGroupsMask(PxActor& actor, const PxGroupsMask& mask); #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
7,594
C
27.98855
139
0.764288
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxRackAndPinionJoint.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_RACK_AND_PINION_JOINT_H #define PX_RACK_AND_PINION_JOINT_H /** \addtogroup extensions @{ */ #include "extensions/PxJoint.h" #if !PX_DOXYGEN namespace physx { #endif class PxRackAndPinionJoint; /** \brief Create a rack & pinion Joint. \param[in] physics The physics SDK \param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localFrame0 The position and orientation of the joint relative to actor0 \param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localFrame1 The position and orientation of the joint relative to actor1 @see PxRackAndPinionJoint */ PxRackAndPinionJoint* PxRackAndPinionJointCreate(PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1); /** \brief A joint that connects an existing revolute joint to an existing prismatic joint, and constrains their relative angular/linear velocity and position with respect to each other. @see PxRackAndPinionJointCreate PxJoint */ class PxRackAndPinionJoint : public PxJoint { public: /** \brief Set the hinge & prismatic joints connected by the rack & pinion joint. The passed hinge joint can be either PxRevoluteJoint, PxD6Joint or PxArticulationJointReducedCoordinate. The passed prismatic joint can be either PxPrismaticJoint or PxD6Joint. Note that these joints are only used to compute the positional error correction term, used to adjust potential drift between jointed actors. The rack & pinion joint can run without calling this function, but in that case some visible overlap may develop over time between the teeth of the rack & pinion meshes. \note Calling this function resets the internal positional error correction term. \param[in] hinge The hinge joint (pinion) \param[in] prismatic The prismatic joint (rack) \return true if success */ virtual bool setJoints(const PxBase* hinge, const PxBase* prismatic) = 0; /** \brief Get the hinge & prismatic joints connected by the rack & pinion joint. \param[out] hinge The hinge joint (pinion) \param[out] prismatic The prismatic joint (rack) */ virtual void getJoints(const PxBase*& hinge, const PxBase*& prismatic) const = 0; /** \brief Set the desired ratio directly. \note You may need to use a negative gear ratio if the joint frames of involved actors are not oriented in the same direction. \note Calling this function resets the internal positional error correction term. \param[in] ratio Desired ratio between the hinge and the prismatic. */ virtual void setRatio(float ratio) = 0; /** \brief Get the ratio. \return Current ratio */ virtual float getRatio() const = 0; /** \brief Set the desired ratio indirectly. This is a simple helper function that computes the ratio from passed data: ratio = (PI*2*nbRackTeeth)/(rackLength*nbPinionTeeth) \note Calling this function resets the internal positional error correction term. \param[in] nbRackTeeth Number of teeth on the rack (cannot be zero) \param[in] nbPinionTeeth Number of teeth on the pinion (cannot be zero) \param[in] rackLength Length of the rack \return true if success */ virtual bool setData(PxU32 nbRackTeeth, PxU32 nbPinionTeeth, float rackLength) = 0; virtual const char* getConcreteTypeName() const { return "PxRackAndPinionJoint"; } protected: PX_INLINE PxRackAndPinionJoint(PxType concreteType, PxBaseFlags baseFlags) : PxJoint(concreteType, baseFlags) {} PX_INLINE PxRackAndPinionJoint(PxBaseFlags baseFlags) : PxJoint(baseFlags) {} virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxRackAndPinionJoint", PxJoint); } }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
5,631
C
37.841379
178
0.755994
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxSphericalJoint.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_SPHERICAL_JOINT_H #define PX_SPHERICAL_JOINT_H /** \addtogroup extensions @{ */ #include "extensions/PxJoint.h" #include "extensions/PxJointLimit.h" #if !PX_DOXYGEN namespace physx { #endif class PxSphericalJoint; /** \brief Create a spherical joint. \param[in] physics The physics SDK \param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localFrame0 The position and orientation of the joint relative to actor0 \param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localFrame1 The position and orientation of the joint relative to actor1 @see PxSphericalJoint */ PxSphericalJoint* PxSphericalJointCreate(PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1); /** \brief Flags specific to the spherical joint. @see PxSphericalJoint */ struct PxSphericalJointFlag { enum Enum { eLIMIT_ENABLED = 1<<1 //!< the cone limit for the spherical joint is enabled }; }; typedef PxFlags<PxSphericalJointFlag::Enum, PxU16> PxSphericalJointFlags; PX_FLAGS_OPERATORS(PxSphericalJointFlag::Enum, PxU16) /** \brief A joint which behaves in a similar way to a ball and socket. A spherical joint removes all linear degrees of freedom from two objects. The position of the joint on each actor is specified by the origin of the body's joint frame. A spherical joint may have a cone limit, to restrict the motion to within a certain range. In addition, the bodies may be projected together if the distance between them exceeds a given threshold. Projection, drive and limits are activated by setting the appropriate flags on the joint. @see PxRevoluteJointCreate() PxJoint */ class PxSphericalJoint : public PxJoint { public: /** \brief Set the limit cone. If enabled, the limit cone will constrain the angular movement of the joint to lie within an elliptical cone. \return the limit cone @see PxJointLimitCone setLimit() */ virtual PxJointLimitCone getLimitCone() const = 0; /** \brief Get the limit cone. \param[in] limit the limit cone @see PxJointLimitCone getLimit() */ virtual void setLimitCone(const PxJointLimitCone& limit) = 0; /** \brief get the swing angle of the joint from the Y axis */ virtual PxReal getSwingYAngle() const = 0; /** \brief get the swing angle of the joint from the Z axis */ virtual PxReal getSwingZAngle() const = 0; /** \brief Set the flags specific to the Spherical Joint. <b>Default</b> PxSphericalJointFlags(0) \param[in] flags The joint flags. @see PxSphericalJointFlag setFlag() getFlags() */ virtual void setSphericalJointFlags(PxSphericalJointFlags flags) = 0; /** \brief Set a single flag specific to a Spherical Joint to true or false. \param[in] flag The flag to set or clear. \param[in] value the value to which to set the flag @see PxSphericalJointFlag, getFlags() setFlags() */ virtual void setSphericalJointFlag(PxSphericalJointFlag::Enum flag, bool value) = 0; /** \brief Get the flags specific to the Spherical Joint. \return the joint flags @see PxSphericalJoint::flags, PxSphericalJointFlag setFlag() setFlags() */ virtual PxSphericalJointFlags getSphericalJointFlags() const = 0; /** \brief Returns string name of PxSphericalJoint, used for serialization */ virtual const char* getConcreteTypeName() const { return "PxSphericalJoint"; } protected: //serialization /** \brief Constructor */ PX_INLINE PxSphericalJoint(PxType concreteType, PxBaseFlags baseFlags) : PxJoint(concreteType, baseFlags) {} /** \brief Deserialization constructor */ PX_INLINE PxSphericalJoint(PxBaseFlags baseFlags) : PxJoint(baseFlags) {} /** \brief Returns whether a given type name matches with the type of this instance */ virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxSphericalJoint", PxJoint); } //~serialization }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
5,841
C
30.408602
169
0.751241
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxShapeExt.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_SHAPE_EXT_H #define PX_SHAPE_EXT_H /** \addtogroup extensions @{ */ #include "PxPhysXConfig.h" #include "PxShape.h" #include "PxRigidActor.h" #include "geometry/PxGeometryQuery.h" #include "PxQueryReport.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief utility functions for use with PxShape @see PxShape */ class PxShapeExt { public: /** \brief Retrieves the world space pose of the shape. \param[in] shape The shape for which to get the global pose. \param[in] actor The actor to which the shape is attached \return Global pose of shape. */ static PX_INLINE PxTransform getGlobalPose(const PxShape& shape, const PxRigidActor& actor) { // PT:: tag: scalar transform*transform return actor.getGlobalPose() * shape.getLocalPose(); } /** \brief Raycast test against the shape. \param[in] shape the shape \param[in] actor the actor to which the shape is attached \param[in] rayOrigin The origin of the ray to test the geometry object against \param[in] rayDir The direction of the ray to test the geometry object against \param[in] maxDist Maximum ray length \param[in] hitFlags Specify which properties per hit should be computed and written to result hit array. Combination of #PxHitFlag flags \param[in] maxHits max number of returned hits = size of 'rayHits' buffer \param[out] rayHits Raycast hits information \return Number of hits between the ray and the shape @see PxRaycastHit PxTransform */ static PX_INLINE PxU32 raycast(const PxShape& shape, const PxRigidActor& actor, const PxVec3& rayOrigin, const PxVec3& rayDir, PxReal maxDist, PxHitFlags hitFlags, PxU32 maxHits, PxRaycastHit* rayHits) { return PxGeometryQuery::raycast( rayOrigin, rayDir, shape.getGeometry(), getGlobalPose(shape, actor), maxDist, hitFlags, maxHits, rayHits); } /** \brief Test overlap between the shape and a geometry object \param[in] shape the shape \param[in] actor the actor to which the shape is attached \param[in] otherGeom The other geometry object to test overlap with \param[in] otherGeomPose Pose of the other geometry object \return True if the shape overlaps the geometry object @see PxGeometry PxTransform */ static PX_INLINE bool overlap(const PxShape& shape, const PxRigidActor& actor, const PxGeometry& otherGeom, const PxTransform& otherGeomPose) { return PxGeometryQuery::overlap(shape.getGeometry(), getGlobalPose(shape, actor), otherGeom, otherGeomPose); } /** \brief Sweep a geometry object against the shape. Currently only box, sphere, capsule and convex mesh shapes are supported, i.e. the swept geometry object must be one of those types. \param[in] shape the shape \param[in] actor the actor to which the shape is attached \param[in] unitDir Normalized direction along which the geometry object should be swept. \param[in] distance Sweep distance. Needs to be larger than 0. \param[in] otherGeom The geometry object to sweep against the shape \param[in] otherGeomPose Pose of the geometry object \param[out] sweepHit The sweep hit information. Only valid if this method returns true. \param[in] hitFlags Specify which properties per hit should be computed and written to result hit array. Combination of #PxHitFlag flags \return True if the swept geometry object hits the shape @see PxGeometry PxTransform PxSweepHit */ static PX_INLINE bool sweep(const PxShape& shape, const PxRigidActor& actor, const PxVec3& unitDir, const PxReal distance, const PxGeometry& otherGeom, const PxTransform& otherGeomPose, PxSweepHit& sweepHit, PxHitFlags hitFlags) { return PxGeometryQuery::sweep(unitDir, distance, otherGeom, otherGeomPose, shape.getGeometry(), getGlobalPose(shape, actor), sweepHit, hitFlags); } /** \brief Retrieves the axis aligned bounding box enclosing the shape. \return The shape's bounding box. \param[in] shape the shape \param[in] actor the actor to which the shape is attached \param[in] inflation Scale factor for computed world bounds. Box extents are multiplied by this value. @see PxBounds3 */ static PX_INLINE PxBounds3 getWorldBounds(const PxShape& shape, const PxRigidActor& actor, float inflation=1.01f) { PxBounds3 bounds; PxGeometryQuery::computeGeomBounds(bounds, shape.getGeometry(), getGlobalPose(shape, actor), 0.0f, inflation); return bounds; } }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
6,147
C
37.425
147
0.75484
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxParticleExt.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_PARTICLE_EXT_H #define PX_PARTICLE_EXT_H /** \addtogroup extensions @{ */ #include "PxParticleSystem.h" #include "PxParticleBuffer.h" #include "foundation/PxArray.h" #include "foundation/PxHashMap.h" #include "foundation/PxUserAllocated.h" #include "PxAttachment.h" #if !PX_DOXYGEN namespace physx { #endif namespace ExtGpu { /** \brief Structure to define user-defined particle state when constructing a new particle system. */ struct PxParticleBufferDesc { PxVec4* positions; PxVec4* velocities; PxU32* phases; PxParticleVolume* volumes; PxU32 numActiveParticles; PxU32 maxParticles; PxU32 numVolumes; PxU32 maxVolumes; PxParticleBufferDesc() : positions(NULL), velocities(NULL), phases(NULL), volumes(NULL), numActiveParticles(0), maxParticles(0), numVolumes(0), maxVolumes(0) { } }; /** \brief Structure to define user-defined particle state when constructing a new particle system that includes diffuse particles. */ struct PxParticleAndDiffuseBufferDesc : public PxParticleBufferDesc { PxDiffuseParticleParams diffuseParams; PxU32 maxDiffuseParticles; PxU32 maxActiveDiffuseParticles; PxParticleAndDiffuseBufferDesc() : PxParticleBufferDesc() { } }; /** \brief Structure to define user-defined particle state when constructing a new particle system that includes shape-matched rigid bodies. */ struct PxParticleRigidDesc { PxParticleRigidDesc() : rigidOffsets(NULL), rigidCoefficients(NULL), rigidTranslations(NULL), rigidRotations(NULL), rigidLocalPositions(NULL), rigidLocalNormals(NULL), maxRigids(0), numActiveRigids(0) { } PxU32* rigidOffsets; PxReal* rigidCoefficients; PxVec4* rigidTranslations; PxQuat* rigidRotations; PxVec4* rigidLocalPositions; PxVec4* rigidLocalNormals; PxU32 maxRigids; PxU32 numActiveRigids; }; ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /** \brief Helper class to manage PxParticleClothDesc buffers used for communicating particle based cloths to PxParticleClothBuffer. */ class PxParticleClothBufferHelper { public: virtual void release() = 0; virtual PxU32 getMaxCloths() const = 0; //!< \return The maximum number of cloths this PxParticleClothBufferHelper can hold. virtual PxU32 getNumCloths() const = 0; //!< \return The current number of cloths in this PxParticleClothBufferHelper. virtual PxU32 getMaxSprings() const = 0; //!< \return The maximum number of springs this PxParticleClothBufferHelper can hold. virtual PxU32 getNumSprings() const = 0; //!< \return The current number of springs in this PxParticleClothBufferHelper. virtual PxU32 getMaxTriangles() const = 0; //!< \return The maximum number of triangles this PxParticleClothBufferHelper can hold. virtual PxU32 getNumTriangles() const = 0; //!< \return The current number of triangles in this PxParticleClothBufferHelper. virtual PxU32 getMaxParticles() const = 0; //!< \return The maximum number of particles this PxParticleClothBufferHelper can hold. virtual PxU32 getNumParticles() const = 0; //!< \return The current number of particles in this PxParticleClothBufferHelper. /** \brief Adds a PxParticleCloth to this PxParticleClothBufferHelper instance. \param[in] particleCloth The PxParticleCloth to be added. \param[in] triangles A pointer to the triangles \param[in] numTriangles The number of triangles \param[in] springs A pointer to the springs \param[in] numSprings The number of springs \param[in] restPositions A pointer to the particle rest positions \param[in] numParticles The number of particles in this cloth @see PxParticleCloth PxParticleSpring */ virtual void addCloth(const PxParticleCloth& particleCloth, const PxU32* triangles, const PxU32 numTriangles, const PxParticleSpring* springs, const PxU32 numSprings, const PxVec4* restPositions, const PxU32 numParticles) = 0; /** \brief Adds a cloth to this PxParticleClothBufferHelper instance. Adds a cloth to this PxParticleClothBufferHelper instance. With this method the relevant parameters for inflatable simulation (restVolume, pressure) can be set directly. \param[in] blendScale This should be 1.f / (numPartitions + 1) if the springs are partitioned by the user. Otherwise this will be set during spring partitioning. \param[in] restVolume The rest volume of the inflatable \param[in] pressure The pressure of the inflatable. The target inflatable volume is defined as restVolume * pressure. Setting this to > 0.0 will enable inflatable simulation. \param[in] triangles A pointer to the triangles \param[in] numTriangles The number of triangles \param[in] springs A pointer to the springs \param[in] numSprings The number of springs \param[in] restPositions A pointer to the particle rest positions \param[in] numParticles The number of particles in this cloth @see PxParticleSpring */ virtual void addCloth(const PxReal blendScale, const PxReal restVolume, const PxReal pressure, const PxU32* triangles, const PxU32 numTriangles, const PxParticleSpring* springs, const PxU32 numSprings, const PxVec4* restPositions, const PxU32 numParticles) = 0; /** \brief Returns a PxParticleClothDesc for this PxParticleClothBufferHelper instance to be used for spring partitioning. \return the PxParticleClothDesc. @see PxCreateAndPopulateParticleClothBuffer, PxParticleClothPreProcessor::partitionSprings */ virtual PxParticleClothDesc& getParticleClothDesc() = 0; protected: virtual ~PxParticleClothBufferHelper() {} }; /** \brief Helper struct that holds information about a specific mesh in a PxParticleVolumeBufferHelper. */ struct PxParticleVolumeMesh { PxU32 startIndex; //!< The index of the first triangle of this mesh in the triangle array of the PxParticleVolumeBufferHelper instance. PxU32 count; //!< The number of triangles of this mesh. }; /** \brief Helper class to manage communicating PxParticleVolumes data to PxParticleBuffer. */ class PxParticleVolumeBufferHelper { public: virtual void release() = 0; virtual PxU32 getMaxVolumes() const = 0; //!< \return The maximum number of PxParticleVolume this PxParticleVolumeBufferHelper instance can hold. virtual PxU32 getNumVolumes() const = 0; //!< \return The current number of PxParticleVolume in this PxParticleVolumeBufferHelper instance. virtual PxU32 getMaxTriangles() const = 0; //!< \return The maximum number of triangles this PxParticleVolumeBufferHelper instance can hold. virtual PxU32 getNumTriangles() const = 0; //!< \return The current number of triangles in this PxParticleVolumeBufferHelper instance. virtual PxParticleVolume* getParticleVolumes() = 0; //!< \return A pointer to the PxParticleVolume s of this PxParticleVolumeBufferHelper instance. virtual PxParticleVolumeMesh* getParticleVolumeMeshes() = 0; //!< \return A pointer to the PxParticleVolumeMesh structs describing the PxParticleVolumes of this PxParticleVolumeBufferHelper instance. virtual PxU32* getTriangles() = 0; //!< \return A pointer to the triangle indices in this PxParticleVolumeBufferHelper instance. /** \brief Adds a PxParticleVolume with a PxParticleVolumeMesh \param[in] volume The PxParticleVolume to be added. \param[in] volumeMesh A PxParticleVolumeMesh that describes the volumes to be added. startIndex is the index into the triangle list of the PxParticleVolumeBufferHelper instance. \param[in] triangles A pointer to the triangle indices of the PxParticleVolume to be added. \param[in] numTriangles The number of triangles of the PxParticleVolume to be added. */ virtual void addVolume(const PxParticleVolume& volume, const PxParticleVolumeMesh& volumeMesh, const PxU32* triangles, const PxU32 numTriangles) = 0; /** \brief Adds a volume \param[in] particleOffset The index of the first particle of the cloth that maps to this volume in the PxParticleClothBufferHelper instance. \param[in] numParticles The number of particles of the cloth that maps to this volume in the PxParticleClothBufferHelper instance. \param[in] triangles A pointer to the triangle indices of this volume. \param[in] numTriangles The number of triangles in this volume. */ virtual void addVolume(const PxU32 particleOffset, const PxU32 numParticles, const PxU32* triangles, const PxU32 numTriangles) = 0; protected: virtual ~PxParticleVolumeBufferHelper() {} }; /** \brief Helper class to manage PxParticleRigidDesc buffers used for communicating particle based rigids to PxPaticleSystem. */ class PxParticleRigidBufferHelper { public: virtual void release() = 0; virtual PxU32 getMaxRigids() const = 0; //!< \return The maximum number of rigids this PxParticleRigidBufferHelper instance can hold. virtual PxU32 getNumRigids() const = 0; //!< \return The current number of rigids in this PxParticleRigidBufferHelper instance. virtual PxU32 getMaxParticles() const = 0; //!< \return The maximum number of particles this PxParticleRigidBufferHelper instance can hold. virtual PxU32 getNumParticles() const = 0; //!< \return The current number of particles in this PxParticleRigidBufferHelper instance. /** \brief Adds a rigid. \param[in] translation The world-space location of the rigid. \param[in] rotation The world-space rotation of the rigid. \param[in] coefficient The stiffness of the rigid. \param[in] localPositions The particle positions in local space. \param[in] localNormals The surface normal for all the particles in local space. Each PxVec4 has the normal in the first 3 components and the SDF in the last component. \param[in] numParticles The number of particles in this rigid. */ virtual void addRigid(const PxVec3& translation, const PxQuat& rotation, const PxReal coefficient, const PxVec4* localPositions, const PxVec4* localNormals, PxU32 numParticles) = 0; /** \brief Get the PxParticleRigidDesc for this buffer. \returns A PxParticleRigidDesc. */ virtual PxParticleRigidDesc& getParticleRigidDesc() = 0; protected: virtual ~PxParticleRigidBufferHelper() {} }; /////////////////////////////////////////////////////////////////////////////// /** \brief Holds user-defined attachment data to attach particles to other bodies */ class PxParticleAttachmentBuffer : public PxUserAllocated { PxArray<PxParticleRigidAttachment> mAttachments; PxArray<PxParticleRigidFilterPair> mFilters; PxHashMap<PxRigidActor*, PxU32> mReferencedBodies; PxArray<PxRigidActor*> mNewReferencedBodies; PxArray<PxRigidActor*> mDestroyedRefrencedBodies; PxParticleBuffer& mParticleBuffer; PxParticleRigidAttachment* mDeviceAttachments; PxParticleRigidFilterPair* mDeviceFilters; PxU32 mNumDeviceAttachments; PxU32 mNumDeviceFilters; PxCudaContextManager* mCudaContextManager; PxParticleSystem& mParticleSystem; bool mDirty; PX_NOCOPY(PxParticleAttachmentBuffer) public: PxParticleAttachmentBuffer(PxParticleBuffer& particleBuffer, PxParticleSystem& particleSystem); ~PxParticleAttachmentBuffer(); // adds attachment to attachment buffer - localPose is in actor space for attachments to all types of rigids. void addRigidAttachment(PxRigidActor* rigidBody, const PxU32 particleID, const PxVec3& localPose, PxConeLimitedConstraint* coneLimit = NULL); bool removeRigidAttachment(PxRigidActor* rigidBody, const PxU32 particleID); void addRigidFilter(PxRigidActor* rigidBody, const PxU32 particleID); bool removeRigidFilter(PxRigidActor* rigidBody, const PxU32 particleID); void copyToDevice(CUstream stream = 0); }; /** \brief Creates a PxParticleRigidBufferHelper. \param[in] maxRigids The maximum number of rigids this PxParticleRigidsBuffers instance should hold. \param[in] maxParticles The maximum number of particles this PxParticleRigidBufferHelper instance should hold. \param[in] cudaContextManager A pointer to a PxCudaContextManager. \return A pointer to the new PxParticleRigidBufferHelper. */ PxParticleRigidBufferHelper* PxCreateParticleRigidBufferHelper(PxU32 maxRigids, PxU32 maxParticles, PxCudaContextManager* cudaContextManager); /** \brief Creates a PxParticleClothBufferHelper helper. \param[in] maxCloths The maximum number of cloths this PxParticleClothBufferHelper should hold. \param[in] maxTriangles The maximum number of triangles this PxParticleClothBufferHelper should hold. \param[in] maxSprings The maximum number of springs this PxParticleClothBufferHelper should hold. \param[in] maxParticles The maximum number of particles this PxParticleClothBufferHelper should hold. \param[in] cudaContextManager A pointer to a PxCudaContextManager. \return A pointer to the PxParticleClothBufferHelper that was created. */ PxParticleClothBufferHelper* PxCreateParticleClothBufferHelper(const PxU32 maxCloths, const PxU32 maxTriangles, const PxU32 maxSprings, const PxU32 maxParticles, PxCudaContextManager* cudaContextManager); /** \brief Creates a PxParticleVolumeBufferHelper. \param[in] maxVolumes The maximum number of PxParticleVolume s this PxParticleVolumeBufferHelper instance should hold. \param[in] maxTriangles The maximum number of triangles this PxParticleVolumeBufferHelper instance should hold. \param[in] cudaContextManager A pointer to a PxCudaContextManager. \return A pointer to the new PxParticleVolumeBufferHelper. */ PxParticleVolumeBufferHelper* PxCreateParticleVolumeBufferHelper(PxU32 maxVolumes, PxU32 maxTriangles, PxCudaContextManager* cudaContextManager); /** \brief Creates a particle attachment buffer \param[in] particleBuffer The particle buffer that contains particles that should get attached to something \param[in] particleSystem The particle system that is used to simulate the userBuffer \return An attachment buffer ready to use */ PxParticleAttachmentBuffer* PxCreateParticleAttachmentBuffer(PxParticleBuffer& particleBuffer, PxParticleSystem& particleSystem); /** \brief Creates and populates a particle buffer \param[in] desc The particle buffer descriptor \param[in] cudaContextManager A cuda context manager \return A fully populated particle buffer ready to use */ PxParticleBuffer* PxCreateAndPopulateParticleBuffer(const ExtGpu::PxParticleBufferDesc& desc, PxCudaContextManager* cudaContextManager); /** \brief Creates and populates a particle buffer that includes support for diffuse particles \param[in] desc The particle buffer descriptor \param[in] cudaContextManager A cuda context manager \return A fully populated particle buffer ready to use */ PxParticleAndDiffuseBuffer* PxCreateAndPopulateParticleAndDiffuseBuffer(const ExtGpu::PxParticleAndDiffuseBufferDesc& desc, PxCudaContextManager* cudaContextManager); /** \brief Creates and populates a particle cloth buffer \param[in] desc The particle buffer descriptor \param[in] clothDesc The cloth descriptor \param[out] output A cloth output object to further configure the behavior of the cloth \param[in] cudaContextManager A cuda context manager \return A fully populated particle cloth buffer ready to use */ PxParticleClothBuffer* PxCreateAndPopulateParticleClothBuffer(const ExtGpu::PxParticleBufferDesc& desc, const PxParticleClothDesc& clothDesc, PxPartitionedParticleCloth& output, PxCudaContextManager* cudaContextManager); /** \brief Creates and populates a particle rigid buffer. Particle rigids are particles that try to keep their relative positions. They are a bit commpressible similar to softbodies. \param[in] desc The particle buffer descriptor \param[in] rigidDesc The rigid descriptor \param[in] cudaContextManager A cuda context manager \return A fully populated particle rigid buffer ready to use */ PxParticleRigidBuffer* PxCreateAndPopulateParticleRigidBuffer(const ExtGpu::PxParticleBufferDesc& desc, const ExtGpu::PxParticleRigidDesc& rigidDesc, PxCudaContextManager* cudaContextManager); } // namespace ExtGpu #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
17,570
C
43.483544
207
0.787991
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxTetrahedronMeshExt.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_TETRAHEDRON_MESH_EXT_H #define PX_TETRAHEDRON_MESH_EXT_H /** \addtogroup extensions @{ */ #include "foundation/PxVec3.h" #include "foundation/PxArray.h" #if !PX_DOXYGEN namespace physx { #endif class PxTetrahedronMesh; /** \brief utility functions for use with PxTetrahedronMesh and subclasses */ class PxTetrahedronMeshExt { public: /** Returns the index of the tetrahedron that contains a point \param[in] mesh The tetmesh \param[in] point The point to find the enclosing tetrahedron for \param[in] bary The barycentric coordinates of the point inside the enclosing tetrahedron \param[in] tolerance Tolerance value used classify points as inside if they lie exactly a tetrahedron's surface \return The index of the tetrahedon containing the point, -1 if not tetrahedron contains the opoint */ static PxI32 findTetrahedronContainingPoint(const PxTetrahedronMesh* mesh, const PxVec3& point, PxVec4& bary, PxReal tolerance = 1e-6f); /** Returns the index of the tetrahedron closest to a point \param[in] mesh The tetmesh \param[in] point The point to find the closest tetrahedron for \param[out] bary The barycentric coordinates of the point in the tetrahedron \return The index of the tetrahedon closest to the point */ static PxI32 findTetrahedronClosestToPoint(const PxTetrahedronMesh* mesh, const PxVec3& point, PxVec4& bary); /** Associates points with closest tetrahedra from input tetrahedral mesh \param[in] tetMeshVertices The tetrahedral mesh vertices \param[in] tetMeshIndices The tetraheral mesh indices \param[in] pointsToEmbed The points for which the embedding should be created \param[in] barycentricCoordinates The output barycentric coordinates for each input point relative to its closest tetrahedron \param[in] tetLinks The output indices of the closest tetrahedron for each input point */ static void createPointsToTetrahedronMap(const PxArray<PxVec3>& tetMeshVertices, const PxArray<PxU32>& tetMeshIndices, const PxArray<PxVec3>& pointsToEmbed, PxArray<PxVec4>& barycentricCoordinates, PxArray<PxU32>& tetLinks); /** Extracts the surface triangles of a tetmesh The extracted triangle's vertex indices point to the vertex buffer of the tetmesh. \param[in] tetrahedra The tetrahedra indices \param[in] numTetrahedra The number of tetrahedra \param[in] sixteenBitIndices If set to true, the tetrahedra indices are read as 16bit integers, otherwise 32bit integers are used \param[in] surfaceTriangles The resulting surface triangles \param[in] surfaceTriangleToTet Optional array to get the index of a tetrahedron that is adjacent to the surface triangle with the corresponding index \param[in] flipTriangleOrientation Reverses the orientation of the ouput triangles */ static void extractTetMeshSurface(const void* tetrahedra, PxU32 numTetrahedra, bool sixteenBitIndices, PxArray<PxU32>& surfaceTriangles, PxArray<PxU32>* surfaceTriangleToTet = NULL, bool flipTriangleOrientation = false); /** Extracts the surface triangles of a tetmesh The extracted triangle's vertex indices point to the vertex buffer of the tetmesh. \param[in] mesh The mesh from which the surface shall be computed \param[in] surfaceTriangles The resulting surface triangles \param[in] surfaceTriangleToTet Optional array to get the index of a tetrahedron that is adjacent to the surface triangle with the corresponding index \param[in] flipTriangleOrientation Reverses the orientation of the ouput triangles */ static void extractTetMeshSurface(const PxTetrahedronMesh* mesh, PxArray<PxU32>& surfaceTriangles, PxArray<PxU32>* surfaceTriangleToTet = NULL, bool flipTriangleOrientation = false); }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
5,481
C
48.387387
226
0.77869
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxSmoothNormals.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_SMOOTH_NORMALS_H #define PX_SMOOTH_NORMALS_H /** \addtogroup extensions @{ */ #include "common/PxPhysXCommonConfig.h" /** \brief Builds smooth vertex normals over a mesh. - "smooth" because smoothing groups are not supported here - takes angles into account for correct cube normals computation To use 32bit indices pass a pointer in dFaces and set wFaces to zero. Alternatively pass a pointer to wFaces and set dFaces to zero. \param[in] nbTris Number of triangles \param[in] nbVerts Number of vertices \param[in] verts Array of vertices \param[in] dFaces Array of dword triangle indices, or null \param[in] wFaces Array of word triangle indices, or null \param[out] normals Array of computed normals (assumes nbVerts vectors) \param[in] flip Flips the normals or not \return True on success. */ PX_C_EXPORT bool PX_CALL_CONV PxBuildSmoothNormals(physx::PxU32 nbTris, physx::PxU32 nbVerts, const physx::PxVec3* verts, const physx::PxU32* dFaces, const physx::PxU16* wFaces, physx::PxVec3* normals, bool flip); /** @} */ #endif
2,761
C
45.033333
121
0.760594
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxBroadPhaseExt.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_BROAD_PHASE_EXT_H #define PX_BROAD_PHASE_EXT_H /** \addtogroup extensions @{ */ #include "PxPhysXConfig.h" #include "common/PxPhysXCommonConfig.h" #if !PX_DOXYGEN namespace physx { #endif class PxBroadPhaseExt { public: /** \brief Creates regions for PxSceneDesc, from a global box. This helper simply subdivides the given global box into a 2D grid of smaller boxes. Each one of those smaller boxes is a region of interest for the broadphase. There are nbSubdiv*nbSubdiv regions in the 2D grid. The function does not subdivide along the given up axis. This is the simplest setup one can use with PxBroadPhaseType::eMBP. A more sophisticated setup would try to cover the game world with a non-uniform set of regions (i.e. not just a grid). \param[out] regions Regions computed from the input global box \param[in] globalBounds World-space box covering the game world \param[in] nbSubdiv Grid subdivision level. The function will create nbSubdiv*nbSubdiv regions. \param[in] upAxis Up axis (0 for X, 1 for Y, 2 for Z). \return number of regions written out to the 'regions' array @see PxSceneDesc PxBroadPhaseType */ static PxU32 createRegionsFromWorldBounds(PxBounds3* regions, const PxBounds3& globalBounds, PxU32 nbSubdiv, PxU32 upAxis=1); }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
3,047
C
40.189189
126
0.761405
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxRevoluteJoint.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_REVOLUTE_JOINT_H #define PX_REVOLUTE_JOINT_H /** \addtogroup extensions @{ */ #include "extensions/PxJoint.h" #include "extensions/PxJointLimit.h" #if !PX_DOXYGEN namespace physx { #endif class PxRevoluteJoint; /** \brief Create a revolute joint. \param[in] physics The physics SDK \param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localFrame0 The position and orientation of the joint relative to actor0 \param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localFrame1 The position and orientation of the joint relative to actor1 @see PxRevoluteJoint */ PxRevoluteJoint* PxRevoluteJointCreate(PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1); /** \brief Flags specific to the Revolute Joint. @see PxRevoluteJoint */ struct PxRevoluteJointFlag { enum Enum { eLIMIT_ENABLED = 1<<0, //!< enable the limit eDRIVE_ENABLED = 1<<1, //!< enable the drive eDRIVE_FREESPIN = 1<<2 //!< if the existing velocity is beyond the drive velocity, do not add force }; }; typedef PxFlags<PxRevoluteJointFlag::Enum, PxU16> PxRevoluteJointFlags; PX_FLAGS_OPERATORS(PxRevoluteJointFlag::Enum, PxU16) /** \brief A joint which behaves in a similar way to a hinge or axle. A hinge joint removes all but a single rotational degree of freedom from two objects. The axis along which the two bodies may rotate is specified with a point and a direction vector. The position of the hinge on each body is specified by the origin of the body's joint frame. The axis of the hinge is specified as the direction of the x-axis in the body's joint frame. \image html revoluteJoint.png A revolute joint can be given a motor, so that it can apply a force to rotate the attached actors. It may also be given a limit, to restrict the revolute motion to within a certain range. In addition, the bodies may be projected together if the distance or angle between them exceeds a given threshold. Projection, drive and limits are activated by setting the appropriate flags on the joint. @see PxRevoluteJointCreate() PxJoint */ class PxRevoluteJoint : public PxJoint { public: /** \brief return the angle of the joint, in the range (-2*Pi, 2*Pi] */ virtual PxReal getAngle() const = 0; /** \brief return the velocity of the joint */ virtual PxReal getVelocity() const = 0; /** \brief set the joint limit parameters. The limit is activated using the flag PxRevoluteJointFlag::eLIMIT_ENABLED The limit angle range is (-2*Pi, 2*Pi). \param[in] limits The joint limit parameters. @see PxJointAngularLimitPair getLimit() */ virtual void setLimit(const PxJointAngularLimitPair& limits) = 0; /** \brief get the joint limit parameters. \return the joint limit parameters @see PxJointAngularLimitPair setLimit() */ virtual PxJointAngularLimitPair getLimit() const = 0; /** \brief set the target velocity for the drive model. The motor will only be able to reach this velocity if the maxForce is sufficiently large. If the joint is spinning faster than this velocity, the motor will actually try to brake (see PxRevoluteJointFlag::eDRIVE_FREESPIN.) The sign of this variable determines the rotation direction, with positive values going the same way as positive joint angles. Setting a very large target velocity may cause undesirable results. \param[in] velocity the drive target velocity \param[in] autowake Whether to wake up the joint rigids if they are asleep. <b>Range:</b> (-PX_MAX_F32, PX_MAX_F32)<br> <b>Default:</b> 0.0 @see PxRevoluteFlags::eDRIVE_FREESPIN */ virtual void setDriveVelocity(PxReal velocity, bool autowake = true) = 0; /** \brief gets the target velocity for the drive model. \return the drive target velocity @see setDriveVelocity() */ virtual PxReal getDriveVelocity() const = 0; /** \brief sets the maximum torque the drive can exert. The value set here may be used either as an impulse limit or a force limit, depending on the flag PxConstraintFlag::eDRIVE_LIMITS_ARE_FORCES <b>Range:</b> [0, PX_MAX_F32)<br> <b>Default:</b> PX_MAX_F32 @see setDriveVelocity() */ virtual void setDriveForceLimit(PxReal limit) = 0; /** \brief gets the maximum torque the drive can exert. \return the torque limit @see setDriveVelocity() */ virtual PxReal getDriveForceLimit() const = 0; /** \brief sets the gear ratio for the drive. When setting up the drive constraint, the velocity of the first actor is scaled by this value, and its response to drive torque is scaled down. So if the drive target velocity is zero, the second actor will be driven to the velocity of the first scaled by the gear ratio <b>Range:</b> [0, PX_MAX_F32)<br> <b>Default:</b> 1.0 \param[in] ratio the drive gear ratio @see getDriveGearRatio() */ virtual void setDriveGearRatio(PxReal ratio) = 0; /** \brief gets the gear ratio. \return the drive gear ratio @see setDriveGearRatio() */ virtual PxReal getDriveGearRatio() const = 0; /** \brief sets the flags specific to the Revolute Joint. <b>Default</b> PxRevoluteJointFlags(0) \param[in] flags The joint flags. @see PxRevoluteJointFlag setFlag() getFlags() */ virtual void setRevoluteJointFlags(PxRevoluteJointFlags flags) = 0; /** \brief sets a single flag specific to a Revolute Joint. \param[in] flag The flag to set or clear. \param[in] value the value to which to set the flag @see PxRevoluteJointFlag, getFlags() setFlags() */ virtual void setRevoluteJointFlag(PxRevoluteJointFlag::Enum flag, bool value) = 0; /** \brief gets the flags specific to the Revolute Joint. \return the joint flags @see PxRevoluteJoint::flags, PxRevoluteJointFlag setFlag() setFlags() */ virtual PxRevoluteJointFlags getRevoluteJointFlags() const = 0; /** \brief Returns string name of PxRevoluteJoint, used for serialization */ virtual const char* getConcreteTypeName() const { return "PxRevoluteJoint"; } protected: //serialization /** \brief Constructor */ PX_INLINE PxRevoluteJoint(PxType concreteType, PxBaseFlags baseFlags) : PxJoint(concreteType, baseFlags) {} /** \brief Deserialization constructor */ PX_INLINE PxRevoluteJoint(PxBaseFlags baseFlags) : PxJoint(baseFlags) {} /** \brief Returns whether a given type name matches with the type of this instance */ virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxRevoluteJoint", PxJoint); } //~serialization }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
8,448
C
30.0625
167
0.745147
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxContactJoint.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_CONTACT_JOINT_H #define PX_CONTACT_JOINT_H #include "extensions/PxJoint.h" #if !PX_DOXYGEN namespace physx { #endif class PxContactJoint; /** \brief Create a contact Joint. \param[in] physics The physics SDK \param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localFrame0 The position and orientation of the joint relative to actor0 \param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localFrame1 The position and orientation of the joint relative to actor1 @see PxContactJoint */ PX_DEPRECATED PxContactJoint* PxContactJointCreate(PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1); struct PX_DEPRECATED PxJacobianRow { PxVec3 linear0; PxVec3 linear1; PxVec3 angular0; PxVec3 angular1; PxJacobianRow(){} PxJacobianRow(const PxVec3& lin0, const PxVec3& lin1, const PxVec3& ang0, const PxVec3& ang1) : linear0(lin0), linear1(lin1), angular0(ang0), angular1(ang1) { } void operator *= (const PxReal scale) { linear0 *= scale; linear1 *= scale; angular0 *= scale; angular1 *= scale; } PxJacobianRow operator * (const PxReal scale) const { return PxJacobianRow(linear0*scale, linear1*scale, angular0*scale, angular1*scale); } }; /** \brief PxContactJoint is best viewed as a helper function for the inverse dynamics of articulations. The expected use case is to use PxContactJoint::getConstraint() in conjunction with PxArticulationReducedCoordinate::addLoopJoint(). @see PxContactJointCreate PxJoint */ PX_DEPRECATED class PxContactJoint : public PxJoint { public: /** \brief Set the current contact of the joint */ virtual void setContact(const PxVec3& contact) = 0; /** \brief Set the current contact normal of the joint */ virtual void setContactNormal(const PxVec3& contactNormal) = 0; /** \brief Set the current penetration of the joint */ virtual void setPenetration(const PxReal penetration) = 0; /** \brief Return the current contact of the joint */ virtual PxVec3 getContact() const = 0; /** \brief Return the current contact normal of the joint */ virtual PxVec3 getContactNormal() const = 0; /** \brief Return the current penetration value of the joint */ virtual PxReal getPenetration() const = 0; virtual PxReal getRestitution() const = 0; virtual void setRestitution(const PxReal restitution) = 0; virtual PxReal getBounceThreshold() const = 0; virtual void setBounceThreshold(const PxReal bounceThreshold) = 0; /** \brief Returns string name of PxContactJoint, used for serialization */ virtual const char* getConcreteTypeName() const { return "PxContactJoint"; } virtual void computeJacobians(PxJacobianRow* jacobian) const = 0; virtual PxU32 getNbJacobianRows() const = 0; protected: //serialization /** \brief Constructor */ PX_INLINE PxContactJoint(PxType concreteType, PxBaseFlags baseFlags) : PxJoint(concreteType, baseFlags) {} /** \brief Deserialization constructor */ PX_INLINE PxContactJoint(PxBaseFlags baseFlags) : PxJoint(baseFlags) {} /** \brief Returns whether a given type name matches with the type of this instance */ virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxContactJoint", PxJoint); } //~serialization }; #if !PX_DOXYGEN } #endif #endif
5,340
C
31.766871
180
0.733146
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxD6Joint.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_D6_JOINT_H #define PX_D6_JOINT_H /** \addtogroup extensions @{ */ #include "extensions/PxJoint.h" #include "extensions/PxJointLimit.h" #include "foundation/PxFlags.h" #if !PX_DOXYGEN namespace physx { #endif class PxD6Joint; /** \brief Create a D6 joint. \param[in] physics The physics SDK \param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localFrame0 The position and orientation of the joint relative to actor0 \param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localFrame1 The position and orientation of the joint relative to actor1 @see PxD6Joint */ PxD6Joint* PxD6JointCreate(PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1); /** \brief Used to specify one of the degrees of freedom of a D6 joint. @see PxD6Joint */ struct PxD6Axis { enum Enum { eX = 0, //!< motion along the X axis eY = 1, //!< motion along the Y axis eZ = 2, //!< motion along the Z axis eTWIST = 3, //!< motion around the X axis eSWING1 = 4, //!< motion around the Y axis eSWING2 = 5, //!< motion around the Z axis eCOUNT = 6 }; }; /** \brief Used to specify the range of motions allowed for a degree of freedom in a D6 joint. @see PxD6Joint */ struct PxD6Motion { enum Enum { eLOCKED, //!< The DOF is locked, it does not allow relative motion. eLIMITED, //!< The DOF is limited, it only allows motion within a specific range. eFREE //!< The DOF is free and has its full range of motion. }; }; /** \brief Used to specify which axes of a D6 joint are driven. Each drive is an implicit force-limited damped spring: force = spring * (target position - position) + damping * (targetVelocity - velocity) Alternatively, the spring may be configured to generate a specified acceleration instead of a force. A linear axis is affected by drive only if the corresponding drive flag is set. There are two possible models for angular drive: swing/twist, which may be used to drive one or more angular degrees of freedom, or slerp, which may only be used to drive all three angular degrees simultaneously. @see PxD6Joint */ struct PxD6Drive { enum Enum { eX = 0, //!< drive along the X-axis eY = 1, //!< drive along the Y-axis eZ = 2, //!< drive along the Z-axis eSWING = 3, //!< drive of displacement from the X-axis eTWIST = 4, //!< drive of the displacement around the X-axis eSLERP = 5, //!< drive of all three angular degrees along a SLERP-path eCOUNT = 6 }; }; /** \brief flags for configuring the drive model of a PxD6Joint @see PxD6JointDrive PxD6Joint */ struct PxD6JointDriveFlag { enum Enum { eACCELERATION = 1 //!< drive spring is for the acceleration at the joint (rather than the force) }; }; typedef PxFlags<PxD6JointDriveFlag::Enum, PxU32> PxD6JointDriveFlags; PX_FLAGS_OPERATORS(PxD6JointDriveFlag::Enum, PxU32) /** \brief parameters for configuring the drive model of a PxD6Joint @see PxD6Joint */ class PxD6JointDrive : public PxSpring { public: PxReal forceLimit; //!< the force limit of the drive - may be an impulse or a force depending on PxConstraintFlag::eDRIVE_LIMITS_ARE_FORCES PxD6JointDriveFlags flags; //!< the joint drive flags /** \brief default constructor for PxD6JointDrive. */ PxD6JointDrive(): PxSpring(0,0), forceLimit(PX_MAX_F32), flags(0) {} /** \brief constructor a PxD6JointDrive. \param[in] driveStiffness The stiffness of the drive spring. \param[in] driveDamping The damping of the drive spring \param[in] driveForceLimit The maximum impulse or force that can be exerted by the drive \param[in] isAcceleration Whether the drive is an acceleration drive or a force drive */ PxD6JointDrive(PxReal driveStiffness, PxReal driveDamping, PxReal driveForceLimit, bool isAcceleration = false) : PxSpring(driveStiffness, driveDamping) , forceLimit(driveForceLimit) , flags(isAcceleration?PxU32(PxD6JointDriveFlag::eACCELERATION) : 0) {} /** \brief returns true if the drive is valid */ bool isValid() const { return PxIsFinite(stiffness) && stiffness>=0 && PxIsFinite(damping) && damping >=0 && PxIsFinite(forceLimit) && forceLimit >=0; } }; /** \brief A D6 joint is a general constraint between two actors. It allows the application to individually define the linear and rotational degrees of freedom, and also to configure a variety of limits and driven degrees of freedom. By default all degrees of freedom are locked. So to create a prismatic joint with free motion along the x-axis: \code ... joint->setMotion(PxD6Axis::eX, PxD6JointMotion::eFREE); ... \endcode Or a Revolute joint with motion free allowed around the x-axis: \code ... joint->setMotion(PxD6Axis::eTWIST, PxD6JointMotion::eFREE); ... \endcode Degrees of freedom may also be set to limited instead of locked. There are two different kinds of linear limits available. The first kind is a single limit value for all linear degrees of freedom, which may act as a linear, circular, or spherical limit depending on which degrees of freedom are limited. This is similar to a distance limit. Then, the second kind supports a pair of limit values for each linear axis, which can be used to implement a traditional prismatic joint for example. If the twist degree of freedom is limited, is supports upper and lower limits. The two swing degrees of freedom are limited with a cone limit. @see PxD6JointCreate() PxJoint */ class PxD6Joint : public PxJoint { public: /** \brief Set the motion type around the specified axis. Each axis may independently specify that the degree of freedom is locked (blocking relative movement along or around this axis), limited by the corresponding limit, or free. \param[in] axis the axis around which motion is specified \param[in] type the motion type around the specified axis <b>Default:</b> all degrees of freedom are locked @see getMotion() PxD6Axis PxD6Motion */ virtual void setMotion(PxD6Axis::Enum axis, PxD6Motion::Enum type) = 0; /** \brief Get the motion type around the specified axis. @see setMotion() PxD6Axis PxD6Motion \param[in] axis the degree of freedom around which the motion type is specified \return the motion type around the specified axis */ virtual PxD6Motion::Enum getMotion(PxD6Axis::Enum axis) const = 0; /** \brief get the twist angle of the joint, in the range (-2*Pi, 2*Pi] */ virtual PxReal getTwistAngle() const = 0; /** \brief get the twist angle of the joint \deprecated Use getTwistAngle instead. Deprecated since PhysX version 4.0 */ PX_DEPRECATED PX_FORCE_INLINE PxReal getTwist() const { return getTwistAngle(); } /** \brief get the swing angle of the joint from the Y axis */ virtual PxReal getSwingYAngle() const = 0; /** \brief get the swing angle of the joint from the Z axis */ virtual PxReal getSwingZAngle() const = 0; /** \brief Set the distance limit for the joint. A single limit constraints all linear limited degrees of freedom, forming a linear, circular or spherical constraint on motion depending on the number of limited degrees. This is similar to a distance limit. \param[in] limit the distance limit structure @see getDistanceLimit() PxJointLinearLimit */ virtual void setDistanceLimit(const PxJointLinearLimit& limit) = 0; /** \brief Get the distance limit for the joint. \return the distance limit structure @see setDistanceLimit() PxJointLinearLimit */ virtual PxJointLinearLimit getDistanceLimit() const = 0; /** \deprecated Use setDistanceLimit instead. Deprecated since PhysX version 4.0 */ PX_DEPRECATED PX_FORCE_INLINE void setLinearLimit(const PxJointLinearLimit& limit) { setDistanceLimit(limit); } /** \deprecated Use getDistanceLimit instead. Deprecated since PhysX version 4.0 */ PX_DEPRECATED PX_FORCE_INLINE PxJointLinearLimit getLinearLimit() const { return getDistanceLimit(); } /** \brief Set the linear limit for a given linear axis. This function extends the previous setDistanceLimit call with the following features: - there can be a different limit for each linear axis - each limit is defined by two values, i.e. it can now be asymmetric This can be used to create prismatic joints similar to PxPrismaticJoint, or point-in-quad joints, or point-in-box joints. \param[in] axis The limited linear axis (must be PxD6Axis::eX, PxD6Axis::eY or PxD6Axis::eZ) \param[in] limit The linear limit pair structure @see getLinearLimit() */ virtual void setLinearLimit(PxD6Axis::Enum axis, const PxJointLinearLimitPair& limit) = 0; /** \brief Get the linear limit for a given linear axis. \param[in] axis The limited linear axis (must be PxD6Axis::eX, PxD6Axis::eY or PxD6Axis::eZ) \return the linear limit pair structure from desired axis @see setLinearLimit() PxJointLinearLimit */ virtual PxJointLinearLimitPair getLinearLimit(PxD6Axis::Enum axis) const = 0; /** \brief Set the twist limit for the joint. The twist limit controls the range of motion around the twist axis. The limit angle range is (-2*Pi, 2*Pi). \param[in] limit the twist limit structure @see getTwistLimit() PxJointAngularLimitPair */ virtual void setTwistLimit(const PxJointAngularLimitPair& limit) = 0; /** \brief Get the twist limit for the joint. \return the twist limit structure @see setTwistLimit() PxJointAngularLimitPair */ virtual PxJointAngularLimitPair getTwistLimit() const = 0; /** \brief Set the swing cone limit for the joint. The cone limit is used if either or both swing axes are limited. The extents are symmetrical and measured in the frame of the parent. If only one swing degree of freedom is limited, the corresponding value from the cone limit defines the limit range. \param[in] limit the cone limit structure @see getLimitCone() PxJointLimitCone */ virtual void setSwingLimit(const PxJointLimitCone& limit) = 0; /** \brief Get the cone limit for the joint. \return the swing limit structure @see setLimitCone() PxJointLimitCone */ virtual PxJointLimitCone getSwingLimit() const = 0; /** \brief Set a pyramidal swing limit for the joint. The pyramid limits will only be used in the following cases: - both swing Y and Z are limited. The limit shape is then a pyramid. - Y is limited and Z is locked, or vice versa. The limit shape is an asymmetric angular section, similar to what is supported for the twist axis. The remaining cases (Y limited and Z is free, or vice versa) are not supported. \param[in] limit the cone limit structure @see getLimitCone() PxJointLimitPyramid */ virtual void setPyramidSwingLimit(const PxJointLimitPyramid& limit) = 0; /** \brief Get the pyramidal swing limit for the joint. \return the swing limit structure @see setLimitCone() PxJointLimitPyramid */ virtual PxJointLimitPyramid getPyramidSwingLimit() const = 0; /** \brief Set the drive parameters for the specified drive type. \param[in] index the type of drive being specified \param[in] drive the drive parameters @see getDrive() PxD6JointDrive <b>Default</b> The default drive spring and damping values are zero, the force limit is zero, and no flags are set. */ virtual void setDrive(PxD6Drive::Enum index, const PxD6JointDrive& drive) = 0; /** \brief Get the drive parameters for the specified drive type. \param[in] index the specified drive type @see setDrive() PxD6JointDrive */ virtual PxD6JointDrive getDrive(PxD6Drive::Enum index) const = 0; /** \brief Set the drive goal pose The goal is relative to the constraint frame of actor[0] <b>Default</b> the identity transform \param[in] pose The goal drive pose if positional drive is in use. \param[in] autowake If true and the attached actors are in a scene, this call wakes them up and increases their wake counters to #PxSceneDesc::wakeCounterResetValue if the counter value is below the reset value. @see setDrivePosition() */ virtual void setDrivePosition(const PxTransform& pose, bool autowake = true) = 0; /** \brief Get the drive goal pose. @see getDrivePosition() */ virtual PxTransform getDrivePosition() const = 0; /** \brief Set the target goal velocity for drive. The velocity is measured in the constraint frame of actor[0] \param[in] linear The goal velocity for linear drive \param[in] angular The goal velocity for angular drive \param[in] autowake If true and the attached actors are in a scene, this call wakes them up and increases their wake counters to #PxSceneDesc::wakeCounterResetValue if the counter value is below the reset value. @see getDriveVelocity() */ virtual void setDriveVelocity(const PxVec3& linear, const PxVec3& angular, bool autowake = true) = 0; /** \brief Get the target goal velocity for joint drive. \param[in] linear The goal velocity for linear drive \param[in] angular The goal velocity for angular drive @see setDriveVelocity() */ virtual void getDriveVelocity(PxVec3& linear, PxVec3& angular) const = 0; /** \brief Returns string name of PxD6Joint, used for serialization */ virtual const char* getConcreteTypeName() const { return "PxD6Joint"; } protected: //serialization /** \brief Constructor */ PX_INLINE PxD6Joint(PxType concreteType, PxBaseFlags baseFlags) : PxJoint(concreteType, baseFlags) {} /** \brief Deserialization constructor */ PX_INLINE PxD6Joint(PxBaseFlags baseFlags) : PxJoint(baseFlags) {} /** \brief Returns whether a given type name matches with the type of this instance */ virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxD6Joint", PxJoint); } //~serialization }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
15,691
C
30.70101
155
0.739468
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxSamplingExt.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_SAMPLING_EXT_H #define PX_SAMPLING_EXT_H /** \addtogroup extensions @{ */ #include "foundation/PxArray.h" #include "geometry/PxGeometry.h" #include "foundation/PxUserAllocated.h" #include "geometry/PxSimpleTriangleMesh.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief utility functions to sample vertices on or inside a triangle mesh or other geometries */ class PxSamplingExt { public: /** Computes samples on a triangle mesh's surface that are not closer to each other than a given distance. Optionally the mesh's interior can be filled with samples as well. \param[in] mesh The triangle mesh \param[in] r The closest distance two surface samples are allowed to have \param[out] result Equally distributed samples on and if specified inside the triangle mesh \param[in] rVolume The average distance of samples inside the mesh. If set to zero, samples will only be placed on the mesh's surface \param[out] triangleIds Optional output containing the index of the triangle for all samples on the mesh's surface. The array will contain less entries than output vertices if volume samples are active since volume samples are not on the surface. \param[out] barycentricCoordinates Optional output containing the barycentric coordinates for all samples on the mesh's surface. The array will contain less entries than output vertices if volume samples are active since volume samples are not on the surface. \param[in] axisAlignedBox A box that limits the space where samples can get created \param[in] boxOrientation The orientation of the box that limits the space where samples can get created \param[in] maxNumSamples If larger than zero, the sampler will stop when the sample count reaches maxNumSamples \param[in] numSampleAttemptsAroundPoint Number of repetitions the underlying algorithm performs to find a new valid sample that matches all criteria like minimal distance to existing samples etc. \return Returns true if the sampling was successful and false if there was a problem. Usually an internal overflow is the problem for very big meshes or very small sampling radii. */ static bool poissonSample(const PxSimpleTriangleMesh& mesh, PxReal r, PxArray<PxVec3>& result, PxReal rVolume = 0.0f, PxArray<PxI32>* triangleIds = NULL, PxArray<PxVec3>* barycentricCoordinates = NULL, const PxBounds3* axisAlignedBox = NULL, const PxQuat* boxOrientation = NULL, PxU32 maxNumSamples = 0, PxU32 numSampleAttemptsAroundPoint = 30); /** Computes samples on a geometry's surface that are not closer to each other than a given distance. \param[in] geometry The geometry that defines the surface on which the samples get created \param[in] transform The geometry's global pose \param[in] worldBounds The geometry's bounding box \param[in] r The closest distance two surface samples are allowed to have \param[out] result Equally distributed samples on and if specified inside the triangle mesh \param[in] rVolume The average distance of samples inside the mesh. If set to zero, samples will only be placed on the mesh's surface \param[in] axisAlignedBox A box that limits the space where samples can get created \param[in] boxOrientation The orientation of the box that limits the space where samples can get created \param[in] maxNumSamples If larger than zero, the sampler will stop when the sample count reaches maxNumSamples \param[in] numSampleAttemptsAroundPoint Number of repetitions the underlying algorithm performs to find a new valid sample that matches all criteria like minimal distance to existing samples etc. \return Returns true if the sampling was successful and false if there was a problem. Usually an internal overflow is the problem for very big meshes or very small sampling radii. */ static bool poissonSample(const PxGeometry& geometry, const PxTransform& transform, const PxBounds3& worldBounds, PxReal r, PxArray<PxVec3>& result, PxReal rVolume = 0.0f, const PxBounds3* axisAlignedBox = NULL, const PxQuat* boxOrientation = NULL, PxU32 maxNumSamples = 0, PxU32 numSampleAttemptsAroundPoint = 30); }; /** \brief Sampler to generate Poisson Samples locally on a triangle mesh or a shape. For every local addition of new samples, an individual sampling density can be used. */ class PxPoissonSampler : public PxUserAllocated { public: /** Sets the sampling radius \param[in] samplingRadius The closest distance two surface samples are allowed to have. Changing the sampling radius is a bit an expensive operation. \return Returns true if the sampling was successful and false if there was a problem. Usually an internal overflow is the problem for very big meshes or very small sampling radii. */ virtual bool setSamplingRadius(PxReal samplingRadius) = 0; /** Adds samples \param[in] samples The samples to add. Adding samples is a bit an expensive operation. */ virtual void addSamples(const PxArray<PxVec3>& samples) = 0; /** Adds samples \param[in] samples The samples to remove. Removing samples is a bit an expensive operation. \return Returns the number of removed samples. If some samples were not found, then the number of actually removed samples will be smaller than the number of samples requested to remove */ virtual PxU32 removeSamples(const PxArray<PxVec3>& samples) = 0; /** Adds new Poisson Samples inside the sphere specified \param[in] sphereCenter The sphere's center. Used to define the region where new samples get added. \param[in] sphereRadius The sphere's radius. Used to define the region where new samples get added. \param[in] createVolumeSamples If set to true, samples will also get generated inside of the mesh, not just on its surface. */ virtual void addSamplesInSphere(const PxVec3& sphereCenter, PxReal sphereRadius, bool createVolumeSamples = false) = 0; /** Adds new Poisson Samples inside the box specified \param[in] axisAlignedBox The axis aligned bounding box. Used to define the region where new samples get added. \param[in] boxOrientation The orientation making an oriented bounding box out of the axis aligned one. Used to define the region where new samples get added. \param[in] createVolumeSamples If set to true, samples will also get generated inside of the mesh, not just on its surface. */ virtual void addSamplesInBox(const PxBounds3& axisAlignedBox, const PxQuat& boxOrientation, bool createVolumeSamples = false) = 0; /** Gets the Poisson Samples \return Returns the generated Poisson Samples */ virtual const PxArray<PxVec3>& getSamples() const = 0; virtual ~PxPoissonSampler() { } }; /** Creates a shape sampler \param[in] geometry The shape that defines the surface on which the samples get created \param[in] transform The shape's global pose \param[in] worldBounds The shapes bounding box \param[in] initialSamplingRadius The closest distance two surface samples are allowed to have \param[in] numSampleAttemptsAroundPoint Number of repetitions the underlying algorithm performs to find a new valid sample that matches all criteria like minimal distance to existing samples etc. \return Returns the sampler */ PxPoissonSampler* PxCreateShapeSampler(const PxGeometry& geometry, const PxTransform& transform, const PxBounds3& worldBounds, PxReal initialSamplingRadius, PxI32 numSampleAttemptsAroundPoint = 30); /** \brief Sampler to generate Poisson Samples on a triangle mesh. */ class PxTriangleMeshPoissonSampler : public virtual PxPoissonSampler { public: /** Gets the Poisson Samples' triangle indices \return Returns the generated Poisson Samples' triangle indices */ virtual const PxArray<PxI32>& getSampleTriangleIds() const = 0; /** Gets the Poisson Samples' barycentric coordinates \return Returns the generated Poisson Samples' barycentric coordinates */ virtual const PxArray<PxVec3>& getSampleBarycentrics() const = 0; /** Checks whether a point is inside the triangle mesh \return Returns true if the point is inside the triangle mesh */ virtual bool isPointInTriangleMesh(const PxVec3& p) = 0; virtual ~PxTriangleMeshPoissonSampler() { } }; /** Creates a triangle mesh sampler \param[in] triangles The triangle indices of the mesh \param[in] numTriangles The total number of triangles \param[in] vertices The vertices of the mesh \param[in] numVertices The total number of vertices \param[in] initialSamplingRadius The closest distance two surface samples are allowed to have \param[in] numSampleAttemptsAroundPoint Number of repetitions the underlying algorithm performs to find a new valid sample that matches all criteria like minimal distance to existing samples etc. \return Returns the sampler */ PxTriangleMeshPoissonSampler* PxCreateTriangleMeshSampler(const PxU32* triangles, PxU32 numTriangles, const PxVec3* vertices, PxU32 numVertices, PxReal initialSamplingRadius, PxI32 numSampleAttemptsAroundPoint = 30); #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
10,597
C
55.978494
260
0.787676
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxExtensionsAPI.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_EXTENSIONS_API_H #define PX_EXTENSIONS_API_H /** \addtogroup extensions @{ */ #include "foundation/PxErrorCallback.h" #include "extensions/PxDefaultAllocator.h" #include "extensions/PxConstraintExt.h" #include "extensions/PxDistanceJoint.h" #include "extensions/PxContactJoint.h" #include "extensions/PxFixedJoint.h" #include "extensions/PxPrismaticJoint.h" #include "extensions/PxRevoluteJoint.h" #include "extensions/PxSphericalJoint.h" #include "extensions/PxD6Joint.h" #include "extensions/PxGearJoint.h" #include "extensions/PxRackAndPinionJoint.h" #include "extensions/PxDefaultSimulationFilterShader.h" #include "extensions/PxDefaultErrorCallback.h" #include "extensions/PxDefaultStreams.h" #include "extensions/PxRigidActorExt.h" #include "extensions/PxRigidBodyExt.h" #include "extensions/PxShapeExt.h" #include "extensions/PxTriangleMeshExt.h" #include "extensions/PxSerialization.h" #include "extensions/PxDefaultCpuDispatcher.h" #include "extensions/PxSmoothNormals.h" #include "extensions/PxSimpleFactory.h" #include "extensions/PxStringTableExt.h" #include "extensions/PxBroadPhaseExt.h" #include "extensions/PxMassProperties.h" #include "extensions/PxSceneQueryExt.h" #include "extensions/PxSceneQuerySystemExt.h" #include "extensions/PxCustomSceneQuerySystem.h" #include "extensions/PxConvexMeshExt.h" #include "extensions/PxSamplingExt.h" #include "extensions/PxTetrahedronMeshExt.h" #include "extensions/PxCustomGeometryExt.h" #if PX_ENABLE_FEATURES_UNDER_CONSTRUCTION #include "extensions/PxFEMClothExt.h" #endif /** \brief Initialize the PhysXExtensions library. This should be called before calling any functions or methods in extensions which may require allocation. \note This function does not need to be called before creating a PxDefaultAllocator object. \param physics a PxPhysics object \param pvd an PxPvd (PhysX Visual Debugger) object @see PxCloseExtensions PxFoundation PxPhysics */ PX_C_EXPORT bool PX_CALL_CONV PxInitExtensions(physx::PxPhysics& physics, physx::PxPvd* pvd); /** \brief Shut down the PhysXExtensions library. This function should be called to cleanly shut down the PhysXExtensions library before application exit. \note This function is required to be called to release foundation usage. @see PxInitExtensions */ PX_C_EXPORT void PX_CALL_CONV PxCloseExtensions(); /** @} */ #endif
4,057
C
40.835051
106
0.791225
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxCustomSceneQuerySystem.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_NEW_SCENE_QUERY_SYSTEM_H #define PX_NEW_SCENE_QUERY_SYSTEM_H /** \addtogroup extensions @{ */ #include "PxSceneQuerySystem.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief A custom scene query system. This is an example of a custom scene query system. It augments the PxSceneQuerySystem API to support an arbitrary number of "pruners", instead of the usual hardcoded two. It might not be possible to support the whole PxSceneQuerySystem API in this context. See the source code for details. @see PxSceneQuerySystem */ class PxCustomSceneQuerySystem : public PxSceneQuerySystem { public: PxCustomSceneQuerySystem() {} virtual ~PxCustomSceneQuerySystem() {} /** \brief Adds a pruner to the system. The internal PhysX scene-query system uses two regular pruners (one for static shapes, one for dynamic shapes) and an optional compound pruner. Our custom scene query system supports an arbitrary number of regular pruners. This can be useful to reduce the load on each pruner, in particular during updates, when internal trees are rebuilt in the background. On the other hand this implementation simply iterates over all created pruners to perform queries, so their cost might increase if a large number of pruners is used. In any case this serves as an example of how the PxSceneQuerySystem API can be used to customize scene queries. \param[in] primaryType Desired primary (main) type for the new pruner \param[in] secondaryType Secondary type when primary type is PxPruningStructureType::eDYNAMIC_AABB_TREE. \param[in] preallocated Optional number of preallocated shapes in the new pruner \return A pruner index @see PxCustomSceneQuerySystem PxSceneQueryUpdateMode PxCustomSceneQuerySystemAdapter PxSceneDesc::sceneQuerySystem */ virtual PxU32 addPruner(PxPruningStructureType::Enum primaryType, PxDynamicTreeSecondaryPruner::Enum secondaryType, PxU32 preallocated=0) = 0; /** \brief Start custom build-steps for all pruners This function is used in combination with customBuildstep() and finishCustomBuildstep() to let users take control of the pruners' build-step & commit calls - basically the pruners' update functions. These functions should be used with the PxSceneQueryUpdateMode::eBUILD_DISABLED_COMMIT_DISABLED update mode, otherwise the build-steps will happen automatically in fetchResults. For N pruners it can be more efficient to use these custom build-step functions to perform the updates in parallel: - call startCustomBuildstep() first (one synchronous call) - for each pruner, call customBuildstep() (asynchronous calls from multiple threads) - once it is done, call finishCustomBuildstep() to finish the update (synchronous call) The multi-threaded update is more efficient here than what it is in PxScene, because the "flushShapes()" call is also multi-threaded (while it is not in PxScene). Note that users are responsible for locks here, and these calls should not overlap with other SQ calls. In particular one should not add new objects to the SQ system or perform queries while these calls are happening. \return The number of pruners in the system. @see customBuildstep finishCustomBuildstep PxSceneQueryUpdateMode */ virtual PxU32 startCustomBuildstep() = 0; /** \brief Perform a custom build-step for a given pruner. \param[in] index Pruner index (should be between 0 and the number returned by startCustomBuildstep) @see startCustomBuildstep finishCustomBuildstep */ virtual void customBuildstep(PxU32 index) = 0; /** \brief Finish custom build-steps Call this function once after all the customBuildstep() calls are done. @see startCustomBuildstep customBuildstep */ virtual void finishCustomBuildstep() = 0; }; /** \brief An adapter class to customize the object-to-pruner mapping. In the regular PhysX code static shapes went to the static pruner, and dynamic shapes went to the dynamic pruner. This class is a replacement for this mapping when N user-defined pruners are involved. */ class PxCustomSceneQuerySystemAdapter { public: PxCustomSceneQuerySystemAdapter() {} virtual ~PxCustomSceneQuerySystemAdapter() {} /** \brief Gets a pruner index for an actor/shape. This user-defined function tells the system in which pruner a given actor/shape should go. \note The returned index must be valid, i.e. it must have been previously returned to users by PxCustomSceneQuerySystem::addPruner. \param[in] actor The actor \param[in] shape The shape \return A pruner index for this actor/shape. @see PxRigidActor PxShape PxCustomSceneQuerySystem::addPruner */ virtual PxU32 getPrunerIndex(const PxRigidActor& actor, const PxShape& shape) const = 0; /** \brief Pruner filtering callback. This will be called for each query to validate whether it should process a given pruner. \param[in] prunerIndex The index of currently processed pruner \param[in] context The query context \param[in] filterData The query's filter data \param[in] filterCall The query's filter callback \return True to process the pruner, false to skip it entirely */ virtual bool processPruner(PxU32 prunerIndex, const PxQueryThreadContext* context, const PxQueryFilterData& filterData, PxQueryFilterCallback* filterCall) const = 0; }; /** \brief Creates a custom scene query system. This is similar to PxCreateExternalSceneQuerySystem, except this function creates a PxCustomSceneQuerySystem object. It can be plugged to PxScene the same way, via PxSceneDesc::sceneQuerySystem. \param[in] sceneQueryUpdateMode Desired update mode \param[in] contextID Context ID parameter, sent to the profiler \param[in] adapter Adapter class implementing our extended API \param[in] usesTreeOfPruners True to keep pruners themselves in a BVH, which might increase query performance if a lot of pruners are involved \return A custom SQ system instance @see PxCustomSceneQuerySystem PxSceneQueryUpdateMode PxCustomSceneQuerySystemAdapter PxSceneDesc::sceneQuerySystem */ PxCustomSceneQuerySystem* PxCreateCustomSceneQuerySystem(PxSceneQueryUpdateMode::Enum sceneQueryUpdateMode, PxU64 contextID, const PxCustomSceneQuerySystemAdapter& adapter, bool usesTreeOfPruners=false); #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
8,072
C
41.046875
204
0.776759
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxSceneQueryExt.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_SCENE_QUERY_EXT_H #define PX_SCENE_QUERY_EXT_H /** \addtogroup extensions @{ */ #include "PxPhysXConfig.h" #include "PxScene.h" #if !PX_DOXYGEN namespace physx { #endif // These types have been deprecated (removed) in PhysX 3.4. We typedef them to the new types here for easy migration from 3.3 to 3.4. typedef PxQueryHit PxSceneQueryHit; typedef PxQueryFilterData PxSceneQueryFilterData; typedef PxQueryFilterCallback PxSceneQueryFilterCallback; typedef PxQueryCache PxSceneQueryCache; typedef PxHitFlag PxSceneQueryFlag; typedef PxHitFlags PxSceneQueryFlags; /** \brief utility functions for use with PxScene, related to scene queries. Some of these functions have been deprecated (removed) in PhysX 3.4. We re-implement them here for easy migration from 3.3 to 3.4. @see PxShape */ class PxSceneQueryExt { public: /** \brief Raycast returning any blocking hit, not necessarily the closest. Returns whether any rigid actor is hit along the ray. \note Shooting a ray from within an object leads to different results depending on the shape type. Please check the details in article SceneQuery. User can ignore such objects by using one of the provided filter mechanisms. \param[in] scene The scene \param[in] origin Origin of the ray. \param[in] unitDir Normalized direction of the ray. \param[in] distance Length of the ray. Needs to be larger than 0. \param[out] hit Raycast hit information. \param[in] filterData Filtering data and simple logic. \param[in] filterCall Custom filtering logic (optional). Only used if the corresponding #PxHitFlag flags are set. If NULL, all hits are assumed to be blocking. \param[in] cache Cached hit shape (optional). Ray is tested against cached shape first. If no hit is found the ray gets queried against the scene. Note: Filtering is not executed for a cached shape if supplied; instead, if a hit is found, it is assumed to be a blocking hit. Note: Using past touching hits as cache will produce incorrect behavior since the cached hit will always be treated as blocking. \return True if a blocking hit was found. @see PxSceneQueryFilterData PxSceneQueryFilterCallback PxSceneQueryCache PxSceneQueryHit */ static bool raycastAny( const PxScene& scene, const PxVec3& origin, const PxVec3& unitDir, const PxReal distance, PxSceneQueryHit& hit, const PxSceneQueryFilterData& filterData = PxSceneQueryFilterData(), PxSceneQueryFilterCallback* filterCall = NULL, const PxSceneQueryCache* cache = NULL); /** \brief Raycast returning a single result. Returns the first rigid actor that is hit along the ray. Data for a blocking hit will be returned as specified by the outputFlags field. Touching hits will be ignored. \note Shooting a ray from within an object leads to different results depending on the shape type. Please check the details in article SceneQuery. User can ignore such objects by using one of the provided filter mechanisms. \param[in] scene The scene \param[in] origin Origin of the ray. \param[in] unitDir Normalized direction of the ray. \param[in] distance Length of the ray. Needs to be larger than 0. \param[in] outputFlags Specifies which properties should be written to the hit information \param[out] hit Raycast hit information. \param[in] filterData Filtering data and simple logic. \param[in] filterCall Custom filtering logic (optional). Only used if the corresponding #PxHitFlag flags are set. If NULL, all hits are assumed to be blocking. \param[in] cache Cached hit shape (optional). Ray is tested against cached shape first then against the scene. Note: Filtering is not executed for a cached shape if supplied; instead, if a hit is found, it is assumed to be a blocking hit. Note: Using past touching hits as cache will produce incorrect behavior since the cached hit will always be treated as blocking. \return True if a blocking hit was found. @see PxSceneQueryFlags PxRaycastHit PxSceneQueryFilterData PxSceneQueryFilterCallback PxSceneQueryCache */ static bool raycastSingle( const PxScene& scene, const PxVec3& origin, const PxVec3& unitDir, const PxReal distance, PxSceneQueryFlags outputFlags, PxRaycastHit& hit, const PxSceneQueryFilterData& filterData = PxSceneQueryFilterData(), PxSceneQueryFilterCallback* filterCall = NULL, const PxSceneQueryCache* cache = NULL); /** \brief Raycast returning multiple results. Find all rigid actors that get hit along the ray. Each result contains data as specified by the outputFlags field. \note Touching hits are not ordered. \note Shooting a ray from within an object leads to different results depending on the shape type. Please check the details in article SceneQuery. User can ignore such objects by using one of the provided filter mechanisms. \param[in] scene The scene \param[in] origin Origin of the ray. \param[in] unitDir Normalized direction of the ray. \param[in] distance Length of the ray. Needs to be larger than 0. \param[in] outputFlags Specifies which properties should be written to the hit information \param[out] hitBuffer Raycast hit information buffer. If the buffer overflows, the blocking hit is returned as the last entry together with an arbitrary subset of the nearer touching hits (typically the query should be restarted with a larger buffer). \param[in] hitBufferSize Size of the hit buffer. \param[out] blockingHit True if a blocking hit was found. If found, it is the last in the buffer, preceded by any touching hits which are closer. Otherwise the touching hits are listed. \param[in] filterData Filtering data and simple logic. \param[in] filterCall Custom filtering logic (optional). Only used if the corresponding #PxHitFlag flags are set. If NULL, all hits are assumed to be touching. \param[in] cache Cached hit shape (optional). Ray is tested against cached shape first then against the scene. Note: Filtering is not executed for a cached shape if supplied; instead, if a hit is found, it is assumed to be a blocking hit. Note: Using past touching hits as cache will produce incorrect behavior since the cached hit will always be treated as blocking. \return Number of hits in the buffer, or -1 if the buffer overflowed. @see PxSceneQueryFlags PxRaycastHit PxSceneQueryFilterData PxSceneQueryFilterCallback PxSceneQueryCache */ static PxI32 raycastMultiple( const PxScene& scene, const PxVec3& origin, const PxVec3& unitDir, const PxReal distance, PxSceneQueryFlags outputFlags, PxRaycastHit* hitBuffer, PxU32 hitBufferSize, bool& blockingHit, const PxSceneQueryFilterData& filterData = PxSceneQueryFilterData(), PxSceneQueryFilterCallback* filterCall = NULL, const PxSceneQueryCache* cache = NULL); /** \brief Sweep returning any blocking hit, not necessarily the closest. Returns whether any rigid actor is hit along the sweep path. \note If a shape from the scene is already overlapping with the query shape in its starting position, behavior is controlled by the PxSceneQueryFlag::eINITIAL_OVERLAP flag. \param[in] scene The scene \param[in] geometry Geometry of object to sweep (supported types are: box, sphere, capsule, convex). \param[in] pose Pose of the sweep object. \param[in] unitDir Normalized direction of the sweep. \param[in] distance Sweep distance. Needs to be larger than 0. Will be clamped to PX_MAX_SWEEP_DISTANCE. \param[in] queryFlags Combination of PxSceneQueryFlag defining the query behavior \param[out] hit Sweep hit information. \param[in] filterData Filtering data and simple logic. \param[in] filterCall Custom filtering logic (optional). Only used if the corresponding #PxHitFlag flags are set. If NULL, all hits are assumed to be blocking. \param[in] cache Cached hit shape (optional). Sweep is performed against cached shape first. If no hit is found the sweep gets queried against the scene. Note: Filtering is not executed for a cached shape if supplied; instead, if a hit is found, it is assumed to be a blocking hit. Note: Using past touching hits as cache will produce incorrect behavior since the cached hit will always be treated as blocking. \param[in] inflation This parameter creates a skin around the swept geometry which increases its extents for sweeping. The sweep will register a hit as soon as the skin touches a shape, and will return the corresponding distance and normal. \return True if a blocking hit was found. @see PxSceneQueryFilterData PxSceneQueryFilterCallback PxSceneQueryHit PxSceneQueryCache */ static bool sweepAny( const PxScene& scene, const PxGeometry& geometry, const PxTransform& pose, const PxVec3& unitDir, const PxReal distance, PxSceneQueryFlags queryFlags, PxSceneQueryHit& hit, const PxSceneQueryFilterData& filterData = PxSceneQueryFilterData(), PxSceneQueryFilterCallback* filterCall = NULL, const PxSceneQueryCache* cache = NULL, PxReal inflation = 0.0f); /** \brief Sweep returning a single result. Returns the first rigid actor that is hit along the ray. Data for a blocking hit will be returned as specified by the outputFlags field. Touching hits will be ignored. \note If a shape from the scene is already overlapping with the query shape in its starting position, behavior is controlled by the PxSceneQueryFlag::eINITIAL_OVERLAP flag. \param[in] scene The scene \param[in] geometry Geometry of object to sweep (supported types are: box, sphere, capsule, convex). \param[in] pose Pose of the sweep object. \param[in] unitDir Normalized direction of the sweep. \param[in] distance Sweep distance. Needs to be larger than 0. Will be clamped to PX_MAX_SWEEP_DISTANCE. \param[in] outputFlags Specifies which properties should be written to the hit information. \param[out] hit Sweep hit information. \param[in] filterData Filtering data and simple logic. \param[in] filterCall Custom filtering logic (optional). Only used if the corresponding #PxHitFlag flags are set. If NULL, all hits are assumed to be blocking. \param[in] cache Cached hit shape (optional). Sweep is performed against cached shape first then against the scene. Note: Filtering is not executed for a cached shape if supplied; instead, if a hit is found, it is assumed to be a blocking hit. Note: Using past touching hits as cache will produce incorrect behavior since the cached hit will always be treated as blocking. \param[in] inflation This parameter creates a skin around the swept geometry which increases its extents for sweeping. The sweep will register a hit as soon as the skin touches a shape, and will return the corresponding distance and normal. \return True if a blocking hit was found. @see PxSceneQueryFlags PxSweepHit PxSceneQueryFilterData PxSceneQueryFilterCallback PxSceneQueryCache */ static bool sweepSingle(const PxScene& scene, const PxGeometry& geometry, const PxTransform& pose, const PxVec3& unitDir, const PxReal distance, PxSceneQueryFlags outputFlags, PxSweepHit& hit, const PxSceneQueryFilterData& filterData = PxSceneQueryFilterData(), PxSceneQueryFilterCallback* filterCall = NULL, const PxSceneQueryCache* cache = NULL, PxReal inflation=0.0f); /** \brief Sweep returning multiple results. Find all rigid actors that get hit along the sweep. Each result contains data as specified by the outputFlags field. \note Touching hits are not ordered. \note If a shape from the scene is already overlapping with the query shape in its starting position, behavior is controlled by the PxSceneQueryFlag::eINITIAL_OVERLAP flag. \param[in] scene The scene \param[in] geometry Geometry of object to sweep (supported types are: box, sphere, capsule, convex). \param[in] pose Pose of the sweep object. \param[in] unitDir Normalized direction of the sweep. \param[in] distance Sweep distance. Needs to be larger than 0. Will be clamped to PX_MAX_SWEEP_DISTANCE. \param[in] outputFlags Specifies which properties should be written to the hit information. \param[out] hitBuffer Sweep hit information buffer. If the buffer overflows, the blocking hit is returned as the last entry together with an arbitrary subset of the nearer touching hits (typically the query should be restarted with a larger buffer). \param[in] hitBufferSize Size of the hit buffer. \param[out] blockingHit True if a blocking hit was found. If found, it is the last in the buffer, preceded by any touching hits which are closer. Otherwise the touching hits are listed. \param[in] filterData Filtering data and simple logic. \param[in] filterCall Custom filtering logic (optional). Only used if the corresponding #PxHitFlag flags are set. If NULL, all hits are assumed to be touching. \param[in] cache Cached hit shape (optional). Sweep is performed against cached shape first then against the scene. Note: Filtering is not executed for a cached shape if supplied; instead, if a hit is found, it is assumed to be a blocking hit. Note: Using past touching hits as cache will produce incorrect behavior since the cached hit will always be treated as blocking. \param[in] inflation This parameter creates a skin around the swept geometry which increases its extents for sweeping. The sweep will register a hit as soon as the skin touches a shape, and will return the corresponding distance and normal. \return Number of hits in the buffer, or -1 if the buffer overflowed. @see PxSceneQueryFlags PxSweepHit PxSceneQueryFilterData PxSceneQueryFilterCallback PxSceneQueryCache */ static PxI32 sweepMultiple( const PxScene& scene, const PxGeometry& geometry, const PxTransform& pose, const PxVec3& unitDir, const PxReal distance, PxSceneQueryFlags outputFlags, PxSweepHit* hitBuffer, PxU32 hitBufferSize, bool& blockingHit, const PxSceneQueryFilterData& filterData = PxSceneQueryFilterData(), PxSceneQueryFilterCallback* filterCall = NULL, const PxSceneQueryCache* cache = NULL, PxReal inflation = 0.0f); /** \brief Test overlap between a geometry and objects in the scene. \note Filtering: Overlap tests do not distinguish between touching and blocking hit types. Both get written to the hit buffer. \note PxHitFlag::eMESH_MULTIPLE and PxHitFlag::eMESH_BOTH_SIDES have no effect in this case \param[in] scene The scene \param[in] geometry Geometry of object to check for overlap (supported types are: box, sphere, capsule, convex). \param[in] pose Pose of the object. \param[out] hitBuffer Buffer to store the overlapping objects to. If the buffer overflows, an arbitrary subset of overlapping objects is stored (typically the query should be restarted with a larger buffer). \param[in] hitBufferSize Size of the hit buffer. \param[in] filterData Filtering data and simple logic. \param[in] filterCall Custom filtering logic (optional). Only used if the corresponding #PxHitFlag flags are set. If NULL, all hits are assumed to overlap. \return Number of hits in the buffer, or -1 if the buffer overflowed. @see PxSceneQueryFlags PxSceneQueryFilterData PxSceneQueryFilterCallback */ static PxI32 overlapMultiple( const PxScene& scene, const PxGeometry& geometry, const PxTransform& pose, PxOverlapHit* hitBuffer, PxU32 hitBufferSize, const PxSceneQueryFilterData& filterData = PxSceneQueryFilterData(), PxSceneQueryFilterCallback* filterCall = NULL); /** \brief Test returning, for a given geometry, any overlapping object in the scene. \note Filtering: Overlap tests do not distinguish between touching and blocking hit types. Both trigger a hit. \note PxHitFlag::eMESH_MULTIPLE and PxHitFlag::eMESH_BOTH_SIDES have no effect in this case \param[in] scene The scene \param[in] geometry Geometry of object to check for overlap (supported types are: box, sphere, capsule, convex). \param[in] pose Pose of the object. \param[out] hit Pointer to store the overlapping object to. \param[in] filterData Filtering data and simple logic. \param[in] filterCall Custom filtering logic (optional). Only used if the corresponding #PxHitFlag flags are set. If NULL, all hits are assumed to overlap. \return True if an overlap was found. @see PxSceneQueryFlags PxSceneQueryFilterData PxSceneQueryFilterCallback */ static bool overlapAny( const PxScene& scene, const PxGeometry& geometry, const PxTransform& pose, PxOverlapHit& hit, const PxSceneQueryFilterData& filterData = PxSceneQueryFilterData(), PxSceneQueryFilterCallback* filterCall = NULL); }; struct PxBatchQueryStatus { enum Enum { /** \brief This is the initial state before a query starts. */ ePENDING = 0, /** \brief The query is finished; results have been written into the result and hit buffers. */ eSUCCESS, /** \brief The query results were incomplete due to touch hit buffer overflow. Blocking hit is still correct. */ eOVERFLOW }; static PX_FORCE_INLINE Enum getStatus(const PxRaycastBuffer& r) { return (0xffffffff == r.nbTouches) ? ePENDING : (0xffffffff == r.maxNbTouches ? eOVERFLOW : eSUCCESS); } static PX_FORCE_INLINE Enum getStatus(const PxSweepBuffer& r) { return (0xffffffff == r.nbTouches) ? ePENDING : (0xffffffff == r.maxNbTouches ? eOVERFLOW : eSUCCESS); } static PX_FORCE_INLINE Enum getStatus(const PxOverlapBuffer& r) { return (0xffffffff == r.nbTouches) ? ePENDING : (0xffffffff == r.maxNbTouches ? eOVERFLOW : eSUCCESS); } }; class PxBatchQueryExt { public: virtual void release() = 0; /** \brief Performs a raycast against objects in the scene. \note Touching hits are not ordered. \note Shooting a ray from within an object leads to different results depending on the shape type. Please check the details in article SceneQuery. User can ignore such objects by using one of the provided filter mechanisms. \param[in] origin Origin of the ray. \param[in] unitDir Normalized direction of the ray. \param[in] distance Length of the ray. Needs to be larger than 0. \param[in] maxNbTouches Maximum number of hits to record in the touch buffer for this query. Default=0 reports a single blocking hit. If maxTouchHits is set to 0 all hits are treated as blocking by default. \param[in] hitFlags Specifies which properties per hit should be computed and returned in hit array and blocking hit. \param[in] filterData Filtering data passed to the filter shader. See #PxQueryFilterData #PxQueryFilterCallback \param[in] cache Cached hit shape (optional). Query is tested against cached shape first. If no hit is found the ray gets queried against the scene. Note: Filtering is not executed for a cached shape if supplied; instead, if a hit is found, it is assumed to be a blocking hit. Note: Using past touching hits as cache will produce incorrect behavior since the cached hit will always be treated as blocking. \note This query call writes to a list associated with the query object and is NOT thread safe (for performance reasons there is no lock and overlapping writes from different threads may result in undefined behavior). \return Returns a PxRaycastBuffer pointer that will store the result of the query after execute() is completed. This will point either to an element of the buffer allocated on construction or to a user buffer passed to the constructor. @see PxCreateBatchQueryExt @see PxQueryFilterData PxQueryFilterCallback PxRaycastHit PxScene::raycast */ virtual PxRaycastBuffer* raycast( const PxVec3& origin, const PxVec3& unitDir, const PxReal distance, const PxU16 maxNbTouches = 0, PxHitFlags hitFlags = PxHitFlags(PxHitFlag::eDEFAULT), const PxQueryFilterData& filterData = PxQueryFilterData(), const PxQueryCache* cache = NULL) = 0; /** \brief Performs a sweep test against objects in the scene. \note Touching hits are not ordered. \note If a shape from the scene is already overlapping with the query shape in its starting position, the hit is returned unless eASSUME_NO_INITIAL_OVERLAP was specified. \param[in] geometry Geometry of object to sweep (supported types are: box, sphere, capsule, convex). \param[in] pose Pose of the sweep object. \param[in] unitDir Normalized direction of the sweep. \param[in] distance Sweep distance. Needs to be larger than 0. Will be clamped to PX_MAX_SWEEP_DISTANCE. \param[in] maxNbTouches Maximum number of hits to record in the touch buffer for this query. Default=0 reports a single blocking hit. If maxTouchHits is set to 0 all hits are treated as blocking by default. \param[in] hitFlags Specifies which properties per hit should be computed and returned in hit array and blocking hit. \param[in] filterData Filtering data and simple logic. See #PxQueryFilterData #PxQueryFilterCallback \param[in] cache Cached hit shape (optional). Query is tested against cached shape first. If no hit is found the ray gets queried against the scene. Note: Filtering is not executed for a cached shape if supplied; instead, if a hit is found, it is assumed to be a blocking hit. Note: Using past touching hits as cache will produce incorrect behavior since the cached hit will always be treated as blocking. \param[in] inflation This parameter creates a skin around the swept geometry which increases its extents for sweeping. The sweep will register a hit as soon as the skin touches a shape, and will return the corresponding distance and normal. Note: ePRECISE_SWEEP doesn't support inflation. Therefore the sweep will be performed with zero inflation. \note This query call writes to a list associated with the query object and is NOT thread safe (for performance reasons there is no lock and overlapping writes from different threads may result in undefined behavior). \return Returns a PxSweepBuffer pointer that will store the result of the query after execute() is completed. This will point either to an element of the buffer allocated on construction or to a user buffer passed to the constructor. @see PxCreateBatchQueryExt @see PxHitFlags PxQueryFilterData PxBatchQueryPreFilterShader PxBatchQueryPostFilterShader PxSweepHit */ virtual PxSweepBuffer* sweep( const PxGeometry& geometry, const PxTransform& pose, const PxVec3& unitDir, const PxReal distance, const PxU16 maxNbTouches = 0, PxHitFlags hitFlags = PxHitFlags(PxHitFlag::eDEFAULT), const PxQueryFilterData& filterData = PxQueryFilterData(), const PxQueryCache* cache = NULL, const PxReal inflation = 0.0f) = 0; /** \brief Performs an overlap test of a given geometry against objects in the scene. \note Filtering: returning eBLOCK from user filter for overlap queries will cause a warning (see #PxQueryHitType). \param[in] geometry Geometry of object to check for overlap (supported types are: box, sphere, capsule, convex). \param[in] pose Pose of the object. \param[in] maxNbTouches Maximum number of hits to record in the touch buffer for this query. Default=0 reports a single blocking hit. If maxTouchHits is set to 0 all hits are treated as blocking by default. \param[in] filterData Filtering data and simple logic. See #PxQueryFilterData #PxQueryFilterCallback \param[in] cache Cached hit shape (optional). Query is tested against cached shape first. If no hit is found the ray gets queried against the scene. Note: Filtering is not executed for a cached shape if supplied; instead, if a hit is found, it is assumed to be a blocking hit. Note: Using past touching hits as cache will produce incorrect behavior since the cached hit will always be treated as blocking. \note eBLOCK should not be returned from user filters for overlap(). Doing so will result in undefined behavior, and a warning will be issued. \note If the PxQueryFlag::eNO_BLOCK flag is set, the eBLOCK will instead be automatically converted to an eTOUCH and the warning suppressed. \note This query call writes to a list associated with the query object and is NOT thread safe (for performance reasons there is no lock and overlapping writes from different threads may result in undefined behavior). \return Returns a PxOverlapBuffer pointer that will store the result of the query after execute() is completed. This will point either to an element of the buffer allocated on construction or to a user buffer passed to the constructor. @see PxCreateBatchQueryExt @see PxQueryFilterData PxQueryFilterCallback */ virtual PxOverlapBuffer* overlap( const PxGeometry& geometry, const PxTransform& pose, PxU16 maxNbTouches = 0, const PxQueryFilterData& filterData = PxQueryFilterData(), const PxQueryCache* cache = NULL) = 0; virtual void execute() = 0; protected: virtual ~PxBatchQueryExt() {} }; /** \brief Create a PxBatchQueryExt without the need for pre-allocated result or touch buffers. \param[in] scene Queries will be performed against objects in the specified PxScene \param[in] queryFilterCallback Filtering for all queries is performed using queryFilterCallback. A null pointer results in all shapes being considered. \param[in] maxNbRaycasts A result buffer will be allocated that is large enough to accommodate maxNbRaycasts calls to PxBatchQueryExt::raycast() \param[in] maxNbRaycastTouches A touch buffer will be allocated that is large enough to accommodate maxNbRaycastTouches touches for all raycasts in the batch. \param[in] maxNbSweeps A result buffer will be allocated that is large enough to accommodate maxNbSweeps calls to PxBatchQueryExt::sweep() \param[in] maxNbSweepTouches A touch buffer will be allocated that is large enough to accommodate maxNbSweepTouches touches for all sweeps in the batch. \param[in] maxNbOverlaps A result buffer will be allocated that is large enough to accommodate maxNbOverlaps calls to PxBatchQueryExt::overlap() \param[in] maxNbOverlapTouches A touch buffer will be allocated that is large enough to accommodate maxNbOverlapTouches touches for all overlaps in the batch. \return Returns a PxBatchQueryExt instance. A NULL pointer will be returned if the subsequent allocations fail or if any of the arguments are illegal. In the event that a NULL pointer is returned a corresponding error will be issued to the error stream. */ PxBatchQueryExt* PxCreateBatchQueryExt( const PxScene& scene, PxQueryFilterCallback* queryFilterCallback, const PxU32 maxNbRaycasts, const PxU32 maxNbRaycastTouches, const PxU32 maxNbSweeps, const PxU32 maxNbSweepTouches, const PxU32 maxNbOverlaps, const PxU32 maxNbOverlapTouches); /** \brief Create a PxBatchQueryExt with user-supplied result and touch buffers. \param[in] scene Queries will be performed against objects in the specified PxScene \param[in] queryFilterCallback Filtering for all queries is performed using queryFilterCallback. A null pointer results in all shapes being considered. \param[in] raycastBuffers This is the array that will be used to store the results of each raycast in a batch. \param[in] maxNbRaycasts This is the length of the raycastBuffers array. \param[in] raycastTouches This is the array that will be used to store the touches generated by all raycasts in a batch. \param[in] maxNbRaycastTouches This is the length of the raycastTouches array. \param[in] sweepBuffers This is the array that will be used to store the results of each sweep in a batch. \param[in] maxNbSweeps This is the length of the sweepBuffers array. \param[in] sweepTouches This is the array that will be used to store the touches generated by all sweeps in a batch. \param[in] maxNbSweepTouches This is the length of the sweepTouches array. \param[in] overlapBuffers This is the array that will be used to store the results of each overlap in a batch. \param[in] maxNbOverlaps This is the length of the overlapBuffers array. \param[in] overlapTouches This is the array that will be used to store the touches generated by all overlaps in a batch. \param[in] maxNbOverlapTouches This is the length of the overlapTouches array. \return Returns a PxBatchQueryExt instance. A NULL pointer will be returned if the subsequent allocations fail or if any of the arguments are illegal. In the event that a NULL pointer is returned a corresponding error will be issued to the error stream. */ PxBatchQueryExt* PxCreateBatchQueryExt( const PxScene& scene, PxQueryFilterCallback* queryFilterCallback, PxRaycastBuffer* raycastBuffers, const PxU32 maxNbRaycasts, PxRaycastHit* raycastTouches, const PxU32 maxNbRaycastTouches, PxSweepBuffer* sweepBuffers, const PxU32 maxNbSweeps, PxSweepHit* sweepTouches, const PxU32 maxNbSweepTouches, PxOverlapBuffer* overlapBuffers, const PxU32 maxNbOverlaps, PxOverlapHit* overlapTouches, const PxU32 maxNbOverlapTouches); #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
30,622
C
59.045098
242
0.775619
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxDistanceJoint.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_DISTANCE_JOINT_H #define PX_DISTANCE_JOINT_H /** \addtogroup extensions @{ */ #include "extensions/PxJoint.h" #if !PX_DOXYGEN namespace physx { #endif class PxDistanceJoint; /** \brief Create a distance Joint. \param[in] physics The physics SDK \param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localFrame0 The position and orientation of the joint relative to actor0 \param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localFrame1 The position and orientation of the joint relative to actor1 @see PxDistanceJoint */ PxDistanceJoint* PxDistanceJointCreate(PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1); /** \brief flags for configuring the drive of a PxDistanceJoint @see PxDistanceJoint */ struct PxDistanceJointFlag { enum Enum { eMAX_DISTANCE_ENABLED = 1<<1, eMIN_DISTANCE_ENABLED = 1<<2, eSPRING_ENABLED = 1<<3 }; }; typedef PxFlags<PxDistanceJointFlag::Enum, PxU16> PxDistanceJointFlags; PX_FLAGS_OPERATORS(PxDistanceJointFlag::Enum, PxU16) /** \brief a joint that maintains an upper or lower bound (or both) on the distance between two points on different objects @see PxDistanceJointCreate PxJoint */ class PxDistanceJoint : public PxJoint { public: /** \brief Return the current distance of the joint */ virtual PxReal getDistance() const = 0; /** \brief Set the allowed minimum distance for the joint. The minimum distance must be no more than the maximum distance <b>Default</b> 0.0f <b>Range</b> [0, PX_MAX_F32) \param[in] distance the minimum distance @see PxDistanceJoint::minDistance, PxDistanceJointFlag::eMIN_DISTANCE_ENABLED getMinDistance() */ virtual void setMinDistance(PxReal distance) = 0; /** \brief Get the allowed minimum distance for the joint. \return the allowed minimum distance @see PxDistanceJoint::minDistance, PxDistanceJointFlag::eMIN_DISTANCE_ENABLED setMinDistance() */ virtual PxReal getMinDistance() const = 0; /** \brief Set the allowed maximum distance for the joint. The maximum distance must be no less than the minimum distance. <b>Default</b> 0.0f <b>Range</b> [0, PX_MAX_F32) \param[in] distance the maximum distance @see PxDistanceJoint::maxDistance, PxDistanceJointFlag::eMAX_DISTANCE_ENABLED getMinDistance() */ virtual void setMaxDistance(PxReal distance) = 0; /** \brief Get the allowed maximum distance for the joint. \return the allowed maximum distance @see PxDistanceJoint::maxDistance, PxDistanceJointFlag::eMAX_DISTANCE_ENABLED setMaxDistance() */ virtual PxReal getMaxDistance() const = 0; /** \brief Set the error tolerance of the joint. \param[in] tolerance the distance beyond the allowed range at which the joint becomes active @see PxDistanceJoint::tolerance, getTolerance() */ virtual void setTolerance(PxReal tolerance) = 0; /** \brief Get the error tolerance of the joint. the distance beyond the joint's [min, max] range before the joint becomes active. <b>Default</b> 0.25f * PxTolerancesScale::length <b>Range</b> (0, PX_MAX_F32) This value should be used to ensure that if the minimum distance is zero and the spring function is in use, the rest length of the spring is non-zero. @see PxDistanceJoint::tolerance, setTolerance() */ virtual PxReal getTolerance() const = 0; /** \brief Set the strength of the joint spring. The spring is used if enabled, and the distance exceeds the range [min-error, max+error]. <b>Default</b> 0.0f <b>Range</b> [0, PX_MAX_F32) \param[in] stiffness the spring strength of the joint @see PxDistanceJointFlag::eSPRING_ENABLED getStiffness() */ virtual void setStiffness(PxReal stiffness) = 0; /** \brief Get the strength of the joint spring. \return stiffness the spring strength of the joint @see PxDistanceJointFlag::eSPRING_ENABLED setStiffness() */ virtual PxReal getStiffness() const = 0; /** \brief Set the damping of the joint spring. The spring is used if enabled, and the distance exceeds the range [min-error, max+error]. <b>Default</b> 0.0f <b>Range</b> [0, PX_MAX_F32) \param[in] damping the degree of damping of the joint spring of the joint @see PxDistanceJointFlag::eSPRING_ENABLED setDamping() */ virtual void setDamping(PxReal damping) = 0; /** \brief Get the damping of the joint spring. \return the degree of damping of the joint spring of the joint @see PxDistanceJointFlag::eSPRING_ENABLED setDamping() */ virtual PxReal getDamping() const = 0; /** \brief Set the flags specific to the Distance Joint. <b>Default</b> PxDistanceJointFlag::eMAX_DISTANCE_ENABLED \param[in] flags The joint flags. @see PxDistanceJointFlag setFlag() getFlags() */ virtual void setDistanceJointFlags(PxDistanceJointFlags flags) = 0; /** \brief Set a single flag specific to a Distance Joint to true or false. \param[in] flag The flag to set or clear. \param[in] value the value to which to set the flag @see PxDistanceJointFlag, getFlags() setFlags() */ virtual void setDistanceJointFlag(PxDistanceJointFlag::Enum flag, bool value) = 0; /** \brief Get the flags specific to the Distance Joint. \return the joint flags @see PxDistanceJoint::flags, PxDistanceJointFlag setFlag() setFlags() */ virtual PxDistanceJointFlags getDistanceJointFlags() const = 0; /** \brief Returns string name of PxDistanceJoint, used for serialization */ virtual const char* getConcreteTypeName() const { return "PxDistanceJoint"; } protected: //serialization /** \brief Constructor */ PX_INLINE PxDistanceJoint(PxType concreteType, PxBaseFlags baseFlags) : PxJoint(concreteType, baseFlags) {} /** \brief Deserialization constructor */ PX_INLINE PxDistanceJoint(PxBaseFlags baseFlags) : PxJoint(baseFlags) {} /** \brief Returns whether a given type name matches with the type of this instance */ virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxDistanceJoint", PxJoint); } //~serialization }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
8,026
C
28.840149
167
0.740718
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxPrismaticJoint.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_PRISMATIC_JOINT_H #define PX_PRISMATIC_JOINT_H /** \addtogroup extensions @{ */ #include "extensions/PxJoint.h" #include "extensions/PxJointLimit.h" #if !PX_DOXYGEN namespace physx { #endif class PxPrismaticJoint; /** \brief Create a prismatic joint. \param[in] physics The physics SDK \param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localFrame0 The position and orientation of the joint relative to actor0 \param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localFrame1 The position and orientation of the joint relative to actor1 @see PxPrismaticJoint */ PxPrismaticJoint* PxPrismaticJointCreate(PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1); /** \brief Flags specific to the prismatic joint. @see PxPrismaticJoint */ struct PxPrismaticJointFlag { enum Enum { eLIMIT_ENABLED = 1<<1 }; }; typedef PxFlags<PxPrismaticJointFlag::Enum, PxU16> PxPrismaticJointFlags; PX_FLAGS_OPERATORS(PxPrismaticJointFlag::Enum, PxU16) /** \brief A prismatic joint permits relative translational movement between two bodies along an axis, but no relative rotational movement. the axis on each body is defined as the line containing the origin of the joint frame and extending along the x-axis of that frame \image html prismJoint.png @see PxPrismaticJointCreate() PxJoint */ class PxPrismaticJoint : public PxJoint { public: /** \brief returns the displacement of the joint along its axis. */ virtual PxReal getPosition() const = 0; /** \brief returns the velocity of the joint along its axis */ virtual PxReal getVelocity() const = 0; /** \brief sets the joint limit parameters. The limit range is [-PX_MAX_F32, PX_MAX_F32], but note that the width of the limit (upper-lower) must also be a valid float. @see PxJointLinearLimitPair getLimit() */ virtual void setLimit(const PxJointLinearLimitPair&) = 0; /** \brief gets the joint limit parameters. @see PxJointLinearLimit getLimit() */ virtual PxJointLinearLimitPair getLimit() const = 0; /** \brief Set the flags specific to the Prismatic Joint. <b>Default</b> PxPrismaticJointFlags(0) \param[in] flags The joint flags. @see PxPrismaticJointFlag setFlag() getFlags() */ virtual void setPrismaticJointFlags(PxPrismaticJointFlags flags) = 0; /** \brief Set a single flag specific to a Prismatic Joint to true or false. \param[in] flag The flag to set or clear. \param[in] value The value to which to set the flag @see PxPrismaticJointFlag, getFlags() setFlags() */ virtual void setPrismaticJointFlag(PxPrismaticJointFlag::Enum flag, bool value) = 0; /** \brief Get the flags specific to the Prismatic Joint. \return the joint flags @see PxPrismaticJoint::flags, PxPrismaticJointFlag setFlag() setFlags() */ virtual PxPrismaticJointFlags getPrismaticJointFlags() const = 0; /** \brief Returns string name of PxPrismaticJoint, used for serialization */ virtual const char* getConcreteTypeName() const { return "PxPrismaticJoint"; } protected: //serialization /** \brief Constructor */ PX_INLINE PxPrismaticJoint(PxType concreteType, PxBaseFlags baseFlags) : PxJoint(concreteType, baseFlags) {} /** \brief Deserialization constructor */ PX_INLINE PxPrismaticJoint(PxBaseFlags baseFlags) : PxJoint(baseFlags) {} /** \brief Returns whether a given type name matches with the type of this instance */ virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxPrismaticJoint", PxJoint); } //~serialization }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
5,545
C
29.98324
169
0.750586
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxRepXSimpleType.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_REPX_SIMPLE_TYPE_H #define PX_REPX_SIMPLE_TYPE_H /** \addtogroup extensions @{ */ #include "foundation/PxSimpleTypes.h" #include "cooking/PxCooking.h" #include "common/PxStringTable.h" #include "common/PxSerialFramework.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief Helper class containing the mapping of id to object, and type name. \deprecated Xml serialization is deprecated. An alternative serialization system is provided through USD Physics. */ struct PX_DEPRECATED PxRepXObject { /** \brief Identifies the extension meant to handle this object. @see PxTypeInfo, PX_DEFINE_TYPEINFO, PxRepXSerializer */ const char* typeName; /** \brief Pointer to the serializable this was created from */ const void* serializable; /** \brief Id given to this object at some point */ PxSerialObjectId id; PxRepXObject( const char* inTypeName = "", const void* inSerializable = NULL, const PxSerialObjectId inId = 0 ) : typeName( inTypeName ) , serializable( inSerializable ) , id( inId ) { } bool isValid() const { return serializable != NULL; } }; /** \brief Arguments required to instantiate a serializable object from RepX. \deprecated Xml serialization is deprecated. An alternative serialization system is provided through USD Physics. Extra arguments can be added to the object map under special ids. @see PxRepXSerializer::objectToFile, PxRepXSerializer::fileToObject */ struct PX_DEPRECATED PxRepXInstantiationArgs { PxPhysics& physics; const PxCookingParams* cooker; PxStringTable* stringTable; PxRepXInstantiationArgs( PxPhysics& inPhysics, const PxCookingParams* inCooking = NULL , PxStringTable* inStringTable = NULL ) : physics( inPhysics ) , cooker( inCooking ) , stringTable( inStringTable ) { } PxRepXInstantiationArgs& operator=(const PxRepXInstantiationArgs&); }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
3,661
C
32.907407
129
0.746244
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxDefaultStreams.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_DEFAULT_STREAMS_H #define PX_DEFAULT_STREAMS_H /** \addtogroup extensions @{ */ #include <stdio.h> #include "common/PxPhysXCommonConfig.h" #include "foundation/PxIO.h" #include "foundation/PxFoundation.h" typedef FILE* PxFileHandle; #if !PX_DOXYGEN namespace physx { #endif /** \brief default implementation of a memory write stream @see PxOutputStream */ class PxDefaultMemoryOutputStream: public PxOutputStream { public: PxDefaultMemoryOutputStream(PxAllocatorCallback &allocator = *PxGetAllocatorCallback()); virtual ~PxDefaultMemoryOutputStream(); virtual PxU32 write(const void* src, PxU32 count); virtual PxU32 getSize() const { return mSize; } virtual PxU8* getData() const { return mData; } private: PxDefaultMemoryOutputStream(const PxDefaultMemoryOutputStream&); PxDefaultMemoryOutputStream& operator=(const PxDefaultMemoryOutputStream&); PxAllocatorCallback& mAllocator; PxU8* mData; PxU32 mSize; PxU32 mCapacity; }; /** \brief default implementation of a memory read stream @see PxInputData */ class PxDefaultMemoryInputData: public PxInputData { public: PxDefaultMemoryInputData(PxU8* data, PxU32 length); virtual PxU32 read(void* dest, PxU32 count); virtual PxU32 getLength() const; virtual void seek(PxU32 pos); virtual PxU32 tell() const; private: PxU32 mSize; const PxU8* mData; PxU32 mPos; }; /** \brief default implementation of a file write stream @see PxOutputStream */ class PxDefaultFileOutputStream: public PxOutputStream { public: PxDefaultFileOutputStream(const char* name); virtual ~PxDefaultFileOutputStream(); virtual PxU32 write(const void* src, PxU32 count); virtual bool isValid(); private: PxFileHandle mFile; }; /** \brief default implementation of a file read stream @see PxInputData */ class PxDefaultFileInputData: public PxInputData { public: PxDefaultFileInputData(const char* name); virtual ~PxDefaultFileInputData(); virtual PxU32 read(void* dest, PxU32 count); virtual void seek(PxU32 pos); virtual PxU32 tell() const; virtual PxU32 getLength() const; bool isValid() const; private: PxFileHandle mFile; PxU32 mLength; }; #if !PX_DOXYGEN } #endif /** @} */ #endif
3,962
C
25.777027
94
0.748864
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxMassProperties.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_MASS_PROPERTIES_H #define PX_MASS_PROPERTIES_H /** \addtogroup extensions @{ */ #include "PxPhysXConfig.h" #include "foundation/PxMath.h" #include "foundation/PxMathUtils.h" #include "foundation/PxVec3.h" #include "foundation/PxMat33.h" #include "foundation/PxQuat.h" #include "foundation/PxTransform.h" #include "geometry/PxGeometry.h" #include "geometry/PxBoxGeometry.h" #include "geometry/PxSphereGeometry.h" #include "geometry/PxCapsuleGeometry.h" #include "geometry/PxConvexMeshGeometry.h" #include "geometry/PxConvexMesh.h" #include "geometry/PxCustomGeometry.h" #include "geometry/PxTriangleMeshGeometry.h" #include "geometry/PxTriangleMesh.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief Utility class to compute and manipulate mass and inertia tensor properties. In most cases #PxRigidBodyExt::updateMassAndInertia(), #PxRigidBodyExt::setMassAndUpdateInertia() should be enough to setup the mass properties of a rigid body. This utility class targets users that need to customize the mass properties computation. */ class PxMassProperties { public: /** \brief Default constructor. */ PX_FORCE_INLINE PxMassProperties() : inertiaTensor(PxIdentity), centerOfMass(0.0f), mass(1.0f) {} /** \brief Construct from individual elements. */ PX_FORCE_INLINE PxMassProperties(const PxReal m, const PxMat33& inertiaT, const PxVec3& com) : inertiaTensor(inertiaT), centerOfMass(com), mass(m) {} /** \brief Compute mass properties based on a provided geometry structure. This constructor assumes the geometry has a density of 1. Mass and inertia tensor scale linearly with density. \param[in] geometry The geometry to compute the mass properties for. Supported geometry types are: sphere, box, capsule and convex mesh. */ PxMassProperties(const PxGeometry& geometry) { switch (geometry.getType()) { case PxGeometryType::eSPHERE: { const PxSphereGeometry& s = static_cast<const PxSphereGeometry&>(geometry); mass = (4.0f / 3.0f) * PxPi * s.radius * s.radius * s.radius; inertiaTensor = PxMat33::createDiagonal(PxVec3(2.0f / 5.0f * mass * s.radius * s.radius)); centerOfMass = PxVec3(0.0f); } break; case PxGeometryType::eBOX: { const PxBoxGeometry& b = static_cast<const PxBoxGeometry&>(geometry); mass = b.halfExtents.x * b.halfExtents.y * b.halfExtents.z * 8.0f; PxVec3 d2 = b.halfExtents.multiply(b.halfExtents); inertiaTensor = PxMat33::createDiagonal(PxVec3(d2.y + d2.z, d2.x + d2.z, d2.x + d2.y)) * (mass * 1.0f / 3.0f); centerOfMass = PxVec3(0.0f); } break; case PxGeometryType::eCAPSULE: { const PxCapsuleGeometry& c = static_cast<const PxCapsuleGeometry&>(geometry); PxReal r = c.radius, h = c.halfHeight; mass = ((4.0f / 3.0f) * r + 2 * c.halfHeight) * PxPi * r * r; PxReal a = r*r*r * (8.0f / 15.0f) + h*r*r * (3.0f / 2.0f) + h*h*r * (4.0f / 3.0f) + h*h*h * (2.0f / 3.0f); PxReal b = r*r*r * (8.0f / 15.0f) + h*r*r; inertiaTensor = PxMat33::createDiagonal(PxVec3(b, a, a) * PxPi * r * r); centerOfMass = PxVec3(0.0f); } break; case PxGeometryType::eCONVEXMESH: { const PxConvexMeshGeometry& c = static_cast<const PxConvexMeshGeometry&>(geometry); PxVec3 unscaledCoM; PxMat33 unscaledInertiaTensorNonCOM; // inertia tensor of convex mesh in mesh local space PxMat33 unscaledInertiaTensorCOM; PxReal unscaledMass; c.convexMesh->getMassInformation(unscaledMass, unscaledInertiaTensorNonCOM, unscaledCoM); // inertia tensor relative to center of mass unscaledInertiaTensorCOM[0][0] = unscaledInertiaTensorNonCOM[0][0] - unscaledMass*PxReal((unscaledCoM.y*unscaledCoM.y+unscaledCoM.z*unscaledCoM.z)); unscaledInertiaTensorCOM[1][1] = unscaledInertiaTensorNonCOM[1][1] - unscaledMass*PxReal((unscaledCoM.z*unscaledCoM.z+unscaledCoM.x*unscaledCoM.x)); unscaledInertiaTensorCOM[2][2] = unscaledInertiaTensorNonCOM[2][2] - unscaledMass*PxReal((unscaledCoM.x*unscaledCoM.x+unscaledCoM.y*unscaledCoM.y)); unscaledInertiaTensorCOM[0][1] = unscaledInertiaTensorCOM[1][0] = (unscaledInertiaTensorNonCOM[0][1] + unscaledMass*PxReal(unscaledCoM.x*unscaledCoM.y)); unscaledInertiaTensorCOM[1][2] = unscaledInertiaTensorCOM[2][1] = (unscaledInertiaTensorNonCOM[1][2] + unscaledMass*PxReal(unscaledCoM.y*unscaledCoM.z)); unscaledInertiaTensorCOM[0][2] = unscaledInertiaTensorCOM[2][0] = (unscaledInertiaTensorNonCOM[0][2] + unscaledMass*PxReal(unscaledCoM.z*unscaledCoM.x)); const PxMeshScale& s = c.scale; mass = unscaledMass * s.scale.x * s.scale.y * s.scale.z; centerOfMass = s.transform(unscaledCoM); inertiaTensor = scaleInertia(unscaledInertiaTensorCOM, s.rotation, s.scale); } break; case PxGeometryType::eCUSTOM: { *this = PxMassProperties(); static_cast<const PxCustomGeometry&>(geometry).callbacks->computeMassProperties(geometry, *this); } break; case PxGeometryType::eTRIANGLEMESH: { const PxTriangleMeshGeometry& g = static_cast<const PxTriangleMeshGeometry&>(geometry); PxVec3 unscaledCoM; PxMat33 unscaledInertiaTensorNonCOM; // inertia tensor of convex mesh in mesh local space PxMat33 unscaledInertiaTensorCOM; PxReal unscaledMass; g.triangleMesh->getMassInformation(unscaledMass, unscaledInertiaTensorNonCOM, unscaledCoM); // inertia tensor relative to center of mass unscaledInertiaTensorCOM[0][0] = unscaledInertiaTensorNonCOM[0][0] - unscaledMass * PxReal((unscaledCoM.y*unscaledCoM.y + unscaledCoM.z*unscaledCoM.z)); unscaledInertiaTensorCOM[1][1] = unscaledInertiaTensorNonCOM[1][1] - unscaledMass * PxReal((unscaledCoM.z*unscaledCoM.z + unscaledCoM.x*unscaledCoM.x)); unscaledInertiaTensorCOM[2][2] = unscaledInertiaTensorNonCOM[2][2] - unscaledMass * PxReal((unscaledCoM.x*unscaledCoM.x + unscaledCoM.y*unscaledCoM.y)); unscaledInertiaTensorCOM[0][1] = unscaledInertiaTensorCOM[1][0] = (unscaledInertiaTensorNonCOM[0][1] + unscaledMass * PxReal(unscaledCoM.x*unscaledCoM.y)); unscaledInertiaTensorCOM[1][2] = unscaledInertiaTensorCOM[2][1] = (unscaledInertiaTensorNonCOM[1][2] + unscaledMass * PxReal(unscaledCoM.y*unscaledCoM.z)); unscaledInertiaTensorCOM[0][2] = unscaledInertiaTensorCOM[2][0] = (unscaledInertiaTensorNonCOM[0][2] + unscaledMass * PxReal(unscaledCoM.z*unscaledCoM.x)); const PxMeshScale& s = g.scale; mass = unscaledMass * s.scale.x * s.scale.y * s.scale.z; centerOfMass = s.transform(unscaledCoM); inertiaTensor = scaleInertia(unscaledInertiaTensorCOM, s.rotation, s.scale); } break; default: { *this = PxMassProperties(); } } PX_ASSERT(inertiaTensor.column0.isFinite() && inertiaTensor.column1.isFinite() && inertiaTensor.column2.isFinite()); PX_ASSERT(centerOfMass.isFinite()); PX_ASSERT(PxIsFinite(mass)); } /** \brief Scale mass properties. \param[in] scale The linear scaling factor to apply to the mass properties. \return The scaled mass properties. */ PX_FORCE_INLINE PxMassProperties operator*(const PxReal scale) const { PX_ASSERT(PxIsFinite(scale)); return PxMassProperties(mass * scale, inertiaTensor * scale, centerOfMass); } /** \brief Translate the center of mass by a given vector and adjust the inertia tensor accordingly. \param[in] t The translation vector for the center of mass. */ PX_FORCE_INLINE void translate(const PxVec3& t) { PX_ASSERT(t.isFinite()); inertiaTensor = translateInertia(inertiaTensor, mass, t); centerOfMass += t; PX_ASSERT(inertiaTensor.column0.isFinite() && inertiaTensor.column1.isFinite() && inertiaTensor.column2.isFinite()); PX_ASSERT(centerOfMass.isFinite()); } /** \brief Get the entries of the diagonalized inertia tensor and the corresponding reference rotation. \param[in] inertia The inertia tensor to diagonalize. \param[out] massFrame The frame the diagonalized tensor refers to. \return The entries of the diagonalized inertia tensor. */ PX_FORCE_INLINE static PxVec3 getMassSpaceInertia(const PxMat33& inertia, PxQuat& massFrame) { PX_ASSERT(inertia.column0.isFinite() && inertia.column1.isFinite() && inertia.column2.isFinite()); PxVec3 diagT = PxDiagonalize(inertia, massFrame); PX_ASSERT(diagT.isFinite()); PX_ASSERT(massFrame.isFinite()); return diagT; } /** \brief Translate an inertia tensor using the parallel axis theorem \param[in] inertia The inertia tensor to translate. \param[in] mass The mass of the object. \param[in] t The relative frame to translate the inertia tensor to. \return The translated inertia tensor. */ PX_FORCE_INLINE static PxMat33 translateInertia(const PxMat33& inertia, const PxReal mass, const PxVec3& t) { PX_ASSERT(inertia.column0.isFinite() && inertia.column1.isFinite() && inertia.column2.isFinite()); PX_ASSERT(PxIsFinite(mass)); PX_ASSERT(t.isFinite()); PxMat33 s( PxVec3(0,t.z,-t.y), PxVec3(-t.z,0,t.x), PxVec3(t.y,-t.x,0) ); PxMat33 translatedIT = s.getTranspose() * s * mass + inertia; PX_ASSERT(translatedIT.column0.isFinite() && translatedIT.column1.isFinite() && translatedIT.column2.isFinite()); return translatedIT; } /** \brief Rotate an inertia tensor around the center of mass \param[in] inertia The inertia tensor to rotate. \param[in] q The rotation from the new to the old coordinate frame, i.e. q.rotate(v) transforms the coordinates of vector v from the old to the new coordinate frame. \return The rotated inertia tensor. */ PX_FORCE_INLINE static PxMat33 rotateInertia(const PxMat33& inertia, const PxQuat& q) { PX_ASSERT(inertia.column0.isFinite() && inertia.column1.isFinite() && inertia.column2.isFinite()); PX_ASSERT(q.isUnit()); PxMat33 m(q); PxMat33 rotatedIT = m * inertia * m.getTranspose(); PX_ASSERT(rotatedIT.column0.isFinite() && rotatedIT.column1.isFinite() && rotatedIT.column2.isFinite()); return rotatedIT; } /** \brief Non-uniform scaling of the inertia tensor \param[in] inertia The inertia tensor to scale. \param[in] scaleRotation The rotation from the scaling frame to the frame that inertia is expressed in. I.e. scaleRotation.rotate(v) transforms the coordinates of vertex v from inertia's frame to the scaling-axes frame. \param[in] scale The scaling factor for each axis (relative to the frame specified with scaleRotation). \return The scaled inertia tensor. */ static PxMat33 scaleInertia(const PxMat33& inertia, const PxQuat& scaleRotation, const PxVec3& scale) { PX_ASSERT(inertia.column0.isFinite() && inertia.column1.isFinite() && inertia.column2.isFinite()); PX_ASSERT(scaleRotation.isUnit()); PX_ASSERT(scale.isFinite()); PxMat33 localInertiaT = rotateInertia(inertia, scaleRotation); // rotate inertia into scaling frame PxVec3 diagonal(localInertiaT[0][0], localInertiaT[1][1], localInertiaT[2][2]); PxVec3 xyz2 = PxVec3(diagonal.dot(PxVec3(0.5f))) - diagonal; // original x^2, y^2, z^2 PxVec3 scaledxyz2 = xyz2.multiply(scale).multiply(scale); PxReal xx = scaledxyz2.y + scaledxyz2.z, yy = scaledxyz2.z + scaledxyz2.x, zz = scaledxyz2.x + scaledxyz2.y; PxReal xy = localInertiaT[0][1] * scale.x * scale.y, xz = localInertiaT[0][2] * scale.x * scale.z, yz = localInertiaT[1][2] * scale.y * scale.z; PxMat33 scaledInertia( PxVec3(xx, xy, xz), PxVec3(xy, yy, yz), PxVec3(xz, yz, zz)); PxMat33 scaledIT = rotateInertia(scaledInertia * (scale.x * scale.y * scale.z), scaleRotation.getConjugate()); PX_ASSERT(scaledIT.column0.isFinite() && scaledIT.column1.isFinite() && scaledIT.column2.isFinite()); return scaledIT; } /** \brief Sum up individual mass properties. \param[in] props Array of mass properties to sum up. \param[in] transforms Reference transforms for each mass properties entry. \param[in] count The number of mass properties to sum up. \return The summed up mass properties. */ static PxMassProperties sum(const PxMassProperties* props, const PxTransform* transforms, const PxU32 count) { PxReal combinedMass = 0.0f; PxVec3 combinedCoM(0.0f); PxMat33 combinedInertiaT = PxMat33(PxZero); for(PxU32 i = 0; i < count; i++) { PX_ASSERT(props[i].inertiaTensor.column0.isFinite() && props[i].inertiaTensor.column1.isFinite() && props[i].inertiaTensor.column2.isFinite()); PX_ASSERT(props[i].centerOfMass.isFinite()); PX_ASSERT(PxIsFinite(props[i].mass)); combinedMass += props[i].mass; const PxVec3 comTm = transforms[i].transform(props[i].centerOfMass); combinedCoM += comTm * props[i].mass; } if(combinedMass > 0.f) combinedCoM /= combinedMass; for(PxU32 i = 0; i < count; i++) { const PxVec3 comTm = transforms[i].transform(props[i].centerOfMass); combinedInertiaT += translateInertia(rotateInertia(props[i].inertiaTensor, transforms[i].q), props[i].mass, combinedCoM - comTm); } PX_ASSERT(combinedInertiaT.column0.isFinite() && combinedInertiaT.column1.isFinite() && combinedInertiaT.column2.isFinite()); PX_ASSERT(combinedCoM.isFinite()); PX_ASSERT(PxIsFinite(combinedMass)); return PxMassProperties(combinedMass, combinedInertiaT, combinedCoM); } PxMat33 inertiaTensor; //!< The inertia tensor of the object. PxVec3 centerOfMass; //!< The center of mass of the object. PxReal mass; //!< The mass of the object. }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
15,019
C
40.038251
159
0.731873
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxTetMakerExt.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_TETMAKER_EXT_H #define PX_TETMAKER_EXT_H /** \addtogroup extensions @{ */ #include "foundation/PxSimpleTypes.h" #include "foundation/PxVec3.h" #include "common/PxCoreUtilityTypes.h" #include "foundation/PxArray.h" #include "PxTriangleMeshAnalysisResult.h" #include "PxTetrahedronMeshAnalysisResult.h" #if !PX_DOXYGEN namespace physx { #endif class PxTriangleMesh; class PxTetrahedronMeshDesc; class PxSoftBodySimulationDataDesc; struct PxTetMakerData; class PxSimpleTriangleMesh; /** \brief Provides functionality to create a tetrahedral mesh from a triangle mesh. */ class PxTetMaker { public: /** \brief Create conforming tetrahedron mesh using TetMaker \param[in] triangleMesh The description of the triangle mesh including vertices and indices \param[out] outVertices The vertices to store the conforming tetrahedral mesh \param[out] outTetIndices The indices to store the conforming tetrahedral mesh \param[in] validate If set to true the input triangle mesh will get analyzed to find possible deficiencies \param[in] volumeThreshold Tetrahedra with a volume smaller than the specified threshold will be removed from the mesh \return True if success */ static bool createConformingTetrahedronMesh(const PxSimpleTriangleMesh& triangleMesh, physx::PxArray<physx::PxVec3>& outVertices, physx::PxArray<physx::PxU32>& outTetIndices, const bool validate = true, PxReal volumeThreshold = 0.0f); /** \brief Create voxel-based tetrahedron mesh using TetMaker \param[in] tetMesh The description of the tetrahedral mesh including vertices and indices \param[in] numVoxelsAlongLongestBoundingBoxAxis The number of voxels along the longest bounding box axis \param[out] outVertices The vertices to store the voxel-based tetrahedral mesh \param[out] outTetIndices The indices to store the voxel-based tetrahedral mesh \param[out] inputPointToOutputTetIndex Buffer with the size of nbTetVerts that contains the tetrahedron index containing the input point with the same index \param[in] anchorNodeIndices Some input vertices may not be referenced by any tetrahedron. They can be mapped to another input vertex that is used by a tetrahedron to support embedding of additional points. \param[in] numTetsPerVoxel The number of tetrahedra used to fill a voxel. Only a value of 5 or 6 is supported. 5 is recommended because it mostly avoids mesh anisotropy. \return True if success */ static bool createVoxelTetrahedronMesh(const PxTetrahedronMeshDesc& tetMesh, const PxU32 numVoxelsAlongLongestBoundingBoxAxis, physx::PxArray<physx::PxVec3>& outVertices, physx::PxArray<physx::PxU32>& outTetIndices, PxI32* inputPointToOutputTetIndex = NULL, const PxU32* anchorNodeIndices = NULL, PxU32 numTetsPerVoxel = 5); /** \brief Create voxel-based tetrahedron mesh using TetMaker \param[in] tetMesh The description of the tetrahedral mesh including vertices and indices \param[in] voxelEdgeLength The edge length of a voxel.Can be adjusted slightly such that a multiple of it matches the input points' bounding box size \param[out] outVertices The vertices to store the voxel-based tetrahedral mesh \param[out] outTetIndices The indices to store the voxel-based tetrahedral mesh \param[out] inputPointToOutputTetIndex Buffer with the size of nbTetVerts that contains the tetrahedron index containing the input point with the same index \param[in] anchorNodeIndices Some input vertices may not be referenced by any tetrahedron. They can be mapped to another input vertex that is used by a tetrahedron to support embedding of additional points. \param[in] numTetsPerVoxel The number of tetrahedra used to fill a voxel. Only a value of 5 or 6 is supported. 5 is recommended because it mostly avoids mesh anisotropy. \return True if success */ static bool createVoxelTetrahedronMeshFromEdgeLength(const PxTetrahedronMeshDesc& tetMesh, const PxReal voxelEdgeLength, physx::PxArray<physx::PxVec3>& outVertices, physx::PxArray<physx::PxU32>& outTetIndices, PxI32* inputPointToOutputTetIndex = NULL, const PxU32* anchorNodeIndices = NULL, PxU32 numTetsPerVoxel = 5); /** \brief Analyzes the triangle mesh to get a report about deficiencies. Some deficiencies can be handled by the tetmesher, others cannot. \param[in] triangleMesh The description of the triangle mesh including vertices and indices \param[in] minVolumeThreshold Minimum volume the mesh must have such that no volume warning is generated \param[in] minTriangleAngleRadians Minimum angle allowed for triangles such that no angle warning is generated \return Flags that describe the triangle mesh's deficiencies */ static PxTriangleMeshAnalysisResults validateTriangleMesh(const PxSimpleTriangleMesh& triangleMesh, const PxReal minVolumeThreshold = 1e-6f, const PxReal minTriangleAngleRadians = 10.0f*3.1415926535898f / 180.0f); /** \brief Analyzes the tetrahedron mesh to get a report about deficiencies. Some deficiencies can be handled by the softbody cooker, others cannot. \param[in] points The mesh's points \param[in] tetrahedra The mesh's tetrahedra (index buffer) \param[in] minTetVolumeThreshold Minimum volume every tetrahedron in the mesh must have such that no volume warning is generated \return Flags that describe the tetrahedron mesh's deficiencies */ static PxTetrahedronMeshAnalysisResults validateTetrahedronMesh(const PxBoundedData& points, const PxBoundedData& tetrahedra, const PxReal minTetVolumeThreshold = 1e-8f); /** \brief Simplifies (decimates) a triangle mesh using quadric simplification. \param[in] inputVertices The vertices of the input triangle mesh \param[in] inputIndices The indices of the input triangle mesh of the form (id0, id1, id2), (id0, id1, id2), .. \param[in] targetTriangleCount Desired number of triangles in the output mesh \param[in] maximalEdgeLength Edges below this length will not be collapsed. A value of zero means there is no limit. \param[out] outputVertices The vertices of the output (decimated) triangle mesh \param[out] outputIndices The indices of the output (decimated) triangle mesh of the form (id0, id1, id2), (id0, id1, id2), .. \param[out] vertexMap Optional parameter which returns the mapping from input to output vertices. Note that multiple input vertices are typically collapsed into the same output vertex. \param[in] edgeLengthCostWeight Factor to scale influence of edge length when prioritizing edge collapses. Has no effect if set to zero. \param[in] flatnessDetectionThreshold Threshold used to detect edges in flat regions and to improve the placement of the collapsed point. If set to a large value it will have no effect. \param[in] projectSimplifiedPointsOnInputMeshSurface If set to true, the simplified points will lie exactly on the original surface. \param[out] outputVertexToInputTriangle Optional indices providing the triangle index per resulting vertex. Only available when projectSimplifiedPointsOnInputMeshSurface is set to true \param[in] removeDisconnectedPatches Enables the optional removal of disconnected triangles in the mesh. Only the largest connected set/patch will be kept */ static void simplifyTriangleMesh(const PxArray<PxVec3>& inputVertices, const PxArray<PxU32>&inputIndices, int targetTriangleCount, PxF32 maximalEdgeLength, PxArray<PxVec3>& outputVertices, PxArray<PxU32>& outputIndices, PxArray<PxU32> *vertexMap = NULL, PxReal edgeLengthCostWeight = 0.1f, PxReal flatnessDetectionThreshold = 0.01f, bool projectSimplifiedPointsOnInputMeshSurface = false, PxArray<PxU32>* outputVertexToInputTriangle = NULL, bool removeDisconnectedPatches = false); /** \brief Creates a new mesh from a given mesh. The input mesh is first voxelized. The new surface is created from the voxel surface and subsequent projection to the original mesh. \param[in] inputVertices The vertices of the input triangle mesh \param[in] inputIndices The indices of the input triangle mesh of the form (id0, id1, id2), (id0, id1, id2), .. \param[in] gridResolution Size of the voxel grid (number of voxels along the longest dimension) \param[out] outputVertices The vertices of the output (decimated) triangle mesh \param[out] outputIndices The indices of the output (decimated) triangle mesh of the form (id0, id1, id2), (id0, id1, id2), .. \param[out] vertexMap Optional parameter which returns a mapping from input to output vertices. Since the meshes are independent, the mapping returns an output vertex that is topologically close to the input vertex. */ static void remeshTriangleMesh(const PxArray<PxVec3>& inputVertices, const PxArray<PxU32>&inputIndices, PxU32 gridResolution, PxArray<PxVec3>& outputVertices, PxArray<PxU32>& outputIndices, PxArray<PxU32> *vertexMap = NULL); /** \brief Creates a new mesh from a given mesh. The input mesh is first voxelized. The new surface is created from the voxel surface and subsequent projection to the original mesh. \param[in] inputVertices The vertices of the input triangle mesh \param[in] nbVertices The number of vertices of the input triangle mesh \param[in] inputIndices The indices of the input triangle mesh of the form (id0, id1, id2), (id0, id1, id2), .. \param[in] nbIndices The number of indices of the input triangle mesh (equal to three times the number of triangles) \param[in] gridResolution Size of the voxel grid (number of voxels along the longest dimension) \param[out] outputVertices The vertices of the output (decimated) triangle mesh \param[out] outputIndices The indices of the output (decimated) triangle mesh of the form (id0, id1, id2), (id0, id1, id2), .. \param[out] vertexMap Optional parameter which returns a mapping from input to output vertices. Since the meshes are independent, the mapping returns an output vertex that is topologically close to the input vertex. */ static void remeshTriangleMesh(const PxVec3* inputVertices, PxU32 nbVertices, const PxU32* inputIndices, PxU32 nbIndices, PxU32 gridResolution, PxArray<PxVec3>& outputVertices, PxArray<PxU32>& outputIndices, PxArray<PxU32> *vertexMap = NULL); /** \brief Creates a tetrahedral mesh using an octree. \param[in] inputVertices The vertices of the input triangle mesh \param[in] inputIndices The indices of the input triangle mesh of the form (id0, id1, id2), (id0, id1, id2), .. \param[in] useTreeNodes Using the nodes of the octree as tetrahedral vertices \param[out] outputVertices The vertices of the output tetrahedral mesh \param[out] outputIndices The indices of the output tetrahedral mesh of the form (id0, id1, id2, id3), (id0, id1, id2, id3), .. \param[in] volumeThreshold Tetrahedra with a volume smaller than the specified threshold will be removed from the mesh */ static void createTreeBasedTetrahedralMesh(const PxArray<PxVec3>& inputVertices, const PxArray<PxU32>&inputIndices, bool useTreeNodes, PxArray<PxVec3>& outputVertices, PxArray<PxU32>& outputIndices, PxReal volumeThreshold = 0.0f); /** \brief Creates a tetrahedral mesh by relaxing a voxel mesh around the input mesh \param[in] inputVertices The vertices of the input triangle mesh \param[in] inputIndices The indices of the input triangle mesh of the form (id0, id1, id2), (id0, id1, id2), .. \param[out] outputVertices The vertices of the output tetrahedral mesh \param[out] outputIndices The indices of the output tetrahedral mesh of the form (id0, id1, id2, id3), (id0, id1, id2, id3), .. \param[in] resolution The grid spacing is computed as the diagonal of the bounding box of the input mesh divided by the resolution. \param[in] numRelaxationIterations Number of iterations to pull the tetrahedral mesh towards the input mesh \param[in] relMinTetVolume Constrains the volumes of the tetrahedra to stay abobe relMinTetvolume times the tetrahedron's rest volume. */ static void createRelaxedVoxelTetrahedralMesh(const PxArray<PxVec3>& inputVertices, const PxArray<PxU32>&inputIndices, PxArray<PxVec3>& outputVertices, PxArray<PxU32>& outputIndices, PxI32 resolution, PxI32 numRelaxationIterations = 5, PxF32 relMinTetVolume = 0.05f); /** \brief Creates a tetrahedral mesh by relaxing a voxel mesh around the input mesh \param[in] triangles The indices of the input triangle mesh of the form (id0, id1, id2), (id0, id1, id2), .. \param[in] numTriangles The number of triangles \param[out] islandIndexPerTriangle Every triangle gets an island index assigned. Triangles with the same island index belong to the same patch of connected triangles. */ static void detectTriangleIslands(const PxI32* triangles, PxU32 numTriangles, PxArray<PxU32>& islandIndexPerTriangle); /** \brief Creates a tetrahedral mesh by relaxing a voxel mesh around the input mesh \param[in] islandIndexPerTriangle An island marker per triangles. All triangles with the same marker belong to an island. Can becomputed using the method detectTriangleIslands. \param[in] numTriangles The number of triangles \return The marker value of the island that contains the most triangles */ static PxU32 findLargestIslandId(const PxU32* islandIndexPerTriangle, PxU32 numTriangles); }; #if !PX_DOXYGEN } #endif /** @} */ #endif
14,854
C
64.730088
216
0.790359
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxTriangleMeshAnalysisResult.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #ifndef PX_TRIANGLE_MESH_ANALYSIS_RESULT_H #define PX_TRIANGLE_MESH_ANALYSIS_RESULT_H #include "PxPhysXConfig.h" #include "foundation/PxFlags.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief These flags indicate what kind of deficiencies a triangle mesh has and describe if the mesh is considered ok, problematic or invalid for tetmeshing */ class PxTriangleMeshAnalysisResult { public: enum Enum { eVALID = 0, eZERO_VOLUME = (1 << 0), //!< invalid: Flat mesh without meaningful amount of volume - cannot be meshed since a tetmesh is volumetric eOPEN_BOUNDARIES = (1 << 1), //!< problematic: Open boundary means that the mesh is not watertight and that there are holes. The mesher can fill holes but the surface might have an unexpected shape where the hole was. eSELF_INTERSECTIONS = (1 << 2), //!< problematic: The surface of the resulting mesh won't match exactly at locations of self-intersections. The tetmesh might be connected at self-intersections even if the input triangle mesh is not eINCONSISTENT_TRIANGLE_ORIENTATION = (1 << 3), //!< invalid: It is not possible to distinguish what is inside and outside of the mesh. If there are no self-intersections and not edges shared by more than two triangles, a call to makeTriOrientationConsistent can fix this. Without fixing it, the output from the tetmesher will be incorrect eCONTAINS_ACUTE_ANGLED_TRIANGLES = (1 << 4), //!< problematic: An ideal mesh for a softbody has triangles with similar angles and evenly distributed vertices. Acute angles can be handled but might lead to a poor quality tetmesh. eEDGE_SHARED_BY_MORE_THAN_TWO_TRIANGLES = (1 << 5), //!< problematic: Border case of a self-intersecting mesh. The tetmesh might not match the surace exactly near such edges. eCONTAINS_DUPLICATE_POINTS = (1 << 6), //!< ok: Duplicate points can be handled by the mesher without problems. The resulting tetmesh will only make use of first unique point that is found, duplicate points will get mapped to that unique point in the tetmesh. Therefore the tetmesh can contain points that are not accessed by a tet. eCONTAINS_INVALID_POINTS = (1 << 7), //!< invalid: Points contain NAN, infinity or similar values that will lead to an invalid mesh eREQUIRES_32BIT_INDEX_BUFFER = (1 << 8), //!< invalid: Mesh contains more indices than a 16bit index buffer can address eTRIANGLE_INDEX_OUT_OF_RANGE = (1 << 9), //!< invalid: A mesh triangle index is negative or lager than the size of the vertex buffer eMESH_IS_PROBLEMATIC = (1 << 10), //!< flag is set if the mesh is categorized as problematic eMESH_IS_INVALID = (1 << 11) //!< flag is set if the mesh is categorized as invalid }; }; typedef PxFlags<PxTriangleMeshAnalysisResult::Enum, PxU32> PxTriangleMeshAnalysisResults; PX_FLAGS_OPERATORS(PxTriangleMeshAnalysisResult::Enum, PxU32) #if !PX_DOXYGEN } #endif #endif
4,498
C
62.366196
343
0.749
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxCustomGeometryExt.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_CUSTOM_GEOMETRY_EXT_H #define PX_CUSTOM_GEOMETRY_EXT_H /** \addtogroup extensions @{ */ #include <geometry/PxCustomGeometry.h> #include <geometry/PxGjkQuery.h> #if !PX_DOXYGEN namespace physx { #endif class PxGeometry; class PxMassProperties; class PxGeometryHolder; struct PxContactPoint; /** \brief Pre-made custom geometry callbacks implementations. */ class PxCustomGeometryExt { public: /// \cond PRIVATE struct BaseConvexCallbacks : PxCustomGeometry::Callbacks, PxGjkQuery::Support { BaseConvexCallbacks(float _margin) : margin(_margin) {} // override PxCustomGeometry::Callbacks virtual PxBounds3 getLocalBounds(const PxGeometry& geometry) const; virtual bool generateContacts(const PxGeometry& geom0, const PxGeometry& geom1, const PxTransform& pose0, const PxTransform& pose1, const PxReal contactDistance, const PxReal meshContactMargin, const PxReal toleranceLength, PxContactBuffer& contactBuffer) const; virtual PxU32 raycast(const PxVec3& origin, const PxVec3& unitDir, const PxGeometry& geom, const PxTransform& pose, PxReal maxDist, PxHitFlags hitFlags, PxU32 maxHits, PxGeomRaycastHit* rayHits, PxU32 stride, PxRaycastThreadContext*) const; virtual bool overlap(const PxGeometry& geom0, const PxTransform& pose0, const PxGeometry& geom1, const PxTransform& pose1, PxOverlapThreadContext*) const; virtual bool sweep(const PxVec3& unitDir, const PxReal maxDist, const PxGeometry& geom0, const PxTransform& pose0, const PxGeometry& geom1, const PxTransform& pose1, PxGeomSweepHit& sweepHit, PxHitFlags hitFlags, const PxReal inflation, PxSweepThreadContext*) const; virtual bool usePersistentContactManifold(const PxGeometry& geometry, PxReal& breakingThreshold) const; // override PxGjkQuery::Support virtual PxReal getMargin() const { return margin; } // set margin void setMargin(float m); protected: // Shape margin float margin; // Substitute geometry virtual bool useSubstituteGeometry(PxGeometryHolder& geom, PxTransform& preTransform, const PxContactPoint& p, const PxTransform& pose0) const = 0; }; /// \endcond /** \brief Cylinder geometry callbacks */ struct CylinderCallbacks : BaseConvexCallbacks { /** \brief Construct cylinder geometry callbacks object \param[in] height The cylinder height. \param[in] radius The cylinder radius. \param[in] axis The cylinder axis (0 - X, 1 - Y, 2 - Z). \param[in] margin The cylinder margin. */ CylinderCallbacks(float height, float radius, int axis = 0, float margin = 0); /// \brief Set cylinder height /// \param[in] h The cylinder height void setHeight(float h); /// \brief Get cylinder height /// \return The cylinder height float getHeight() const { return height; } /// \brief Set cylinder radius /// \param[in] r The cylinder radius. void setRadius(float r); /// \brief Get cylinder radius /// \return The cylinder radius float getRadius() const { return radius; } /// \brief Set cylinder axis /// \param[in] a The cylinder axis (0 - X, 1 - Y, 2 - Z). void setAxis(int a); /// \brief Get cylinder axis /// \return The cylinder axis int getAxis() const { return axis; } /// \cond PRIVATE // override PxCustomGeometry::Callbacks DECLARE_CUSTOM_GEOMETRY_TYPE virtual void visualize(const PxGeometry&, PxRenderOutput&, const PxTransform&, const PxBounds3&) const; virtual void computeMassProperties(const PxGeometry& geometry, PxMassProperties& massProperties) const; // override PxGjkQuery::Support virtual PxVec3 supportLocal(const PxVec3& dir) const; protected: // Cylinder height float height; // Cylinder radius float radius; // Cylinder axis int axis; // Substitute geometry virtual bool useSubstituteGeometry(PxGeometryHolder& geom, PxTransform& preTransform, const PxContactPoint& p, const PxTransform& pose0) const; // Radius at height float getRadiusAtHeight(float height) const; /// \endcond }; /** \brief Cone geometry callbacks */ struct ConeCallbacks : BaseConvexCallbacks { /** \brief Construct cone geometry callbacks object \param[in] height The cylinder height. \param[in] radius The cylinder radius. \param[in] axis The cylinder axis (0 - X, 1 - Y, 2 - Z). \param[in] margin The cylinder margin. */ ConeCallbacks(float height, float radius, int axis = 0, float margin = 0); /// \brief Set cone height /// \param[in] h The cone height void setHeight(float h); /// \brief Get cone height /// \return The cone height float getHeight() const { return height; } /// \brief Set cone radius /// \param[in] r The cone radius void setRadius(float r); /// \brief Get cone radius /// \return The cone radius float getRadius() const { return radius; } /// \brief Set cone axis /// \param[in] a The cone axis void setAxis(int a); /// \brief Get cone axis /// \return The cone axis int getAxis() const { return axis; } /// \cond PRIVATE // override PxCustomGeometry::Callbacks DECLARE_CUSTOM_GEOMETRY_TYPE virtual void visualize(const PxGeometry&, PxRenderOutput&, const PxTransform&, const PxBounds3&) const; virtual void computeMassProperties(const PxGeometry& geometry, PxMassProperties& massProperties) const; // override PxGjkQuery::Support virtual PxVec3 supportLocal(const PxVec3& dir) const; protected: // Cone height float height; // Cone radius float radius; // Cone axis int axis; // Substitute geometry virtual bool useSubstituteGeometry(PxGeometryHolder& geom, PxTransform& preTransform, const PxContactPoint& p, const PxTransform& pose0) const; // Radius at height float getRadiusAtHeight(float height) const; /// \endcond }; }; /// \cond PRIVATE // OmniPVD friendly aliases typedef PxCustomGeometryExt::BaseConvexCallbacks PxCustomGeometryExtBaseConvexCallbacks; typedef PxCustomGeometryExt::CylinderCallbacks PxCustomGeometryExtCylinderCallbacks; typedef PxCustomGeometryExt::ConeCallbacks PxCustomGeometryExtConeCallbacks; /// \endcond #if !PX_DOXYGEN } #endif /** @} */ #endif
7,757
C
33.327433
156
0.743329
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxJoint.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_JOINT_H #define PX_JOINT_H /** \addtogroup extensions @{ */ #include "foundation/PxTransform.h" #include "PxRigidActor.h" #include "PxConstraint.h" #include "common/PxBase.h" #if !PX_DOXYGEN namespace physx { #endif class PxRigidActor; class PxScene; class PxPhysics; class PxConstraint; /** \brief an enumeration of PhysX' built-in joint types @see PxJoint */ struct PxJointConcreteType { enum Enum { eSPHERICAL = PxConcreteType::eFIRST_PHYSX_EXTENSION, eREVOLUTE, ePRISMATIC, eFIXED, eDISTANCE, eD6, eCONTACT, eGEAR, eRACK_AND_PINION, eLast }; }; PX_DEFINE_TYPEINFO(PxJoint, PxConcreteType::eUNDEFINED) PX_DEFINE_TYPEINFO(PxRackAndPinionJoint, PxJointConcreteType::eRACK_AND_PINION) PX_DEFINE_TYPEINFO(PxGearJoint, PxJointConcreteType::eGEAR) PX_DEFINE_TYPEINFO(PxD6Joint, PxJointConcreteType::eD6) PX_DEFINE_TYPEINFO(PxDistanceJoint, PxJointConcreteType::eDISTANCE) PX_DEFINE_TYPEINFO(PxContactJoint, PxJointConcreteType::eCONTACT) PX_DEFINE_TYPEINFO(PxFixedJoint, PxJointConcreteType::eFIXED) PX_DEFINE_TYPEINFO(PxPrismaticJoint, PxJointConcreteType::ePRISMATIC) PX_DEFINE_TYPEINFO(PxRevoluteJoint, PxJointConcreteType::eREVOLUTE) PX_DEFINE_TYPEINFO(PxSphericalJoint, PxJointConcreteType::eSPHERICAL) /** \brief an enumeration for specifying one or other of the actors referenced by a joint @see PxJoint */ struct PxJointActorIndex { enum Enum { eACTOR0, eACTOR1, COUNT }; }; /** \brief a base interface providing common functionality for PhysX joints */ class PxJoint : public PxBase { public: /** \brief Set the actors for this joint. An actor may be NULL to indicate the world frame. At most one of the actors may be NULL. \param[in] actor0 the first actor. \param[in] actor1 the second actor @see getActors() */ virtual void setActors(PxRigidActor* actor0, PxRigidActor* actor1) = 0; /** \brief Get the actors for this joint. \param[out] actor0 the first actor. \param[out] actor1 the second actor @see setActors() */ virtual void getActors(PxRigidActor*& actor0, PxRigidActor*& actor1) const = 0; /** \brief Set the joint local pose for an actor. This is the relative pose which locates the joint frame relative to the actor. \param[in] actor 0 for the first actor, 1 for the second actor. \param[in] localPose the local pose for the actor this joint @see getLocalPose() */ virtual void setLocalPose(PxJointActorIndex::Enum actor, const PxTransform& localPose) = 0; /** \brief get the joint local pose for an actor. \param[in] actor 0 for the first actor, 1 for the second actor. return the local pose for this joint @see setLocalPose() */ virtual PxTransform getLocalPose(PxJointActorIndex::Enum actor) const = 0; /** \brief get the relative pose for this joint This function returns the pose of the joint frame of actor1 relative to actor0 */ virtual PxTransform getRelativeTransform() const = 0; /** \brief get the relative linear velocity of the joint This function returns the linear velocity of the origin of the constraint frame of actor1, relative to the origin of the constraint frame of actor0. The value is returned in the constraint frame of actor0 */ virtual PxVec3 getRelativeLinearVelocity() const = 0; /** \brief get the relative angular velocity of the joint This function returns the angular velocity of actor1 relative to actor0. The value is returned in the constraint frame of actor0 */ virtual PxVec3 getRelativeAngularVelocity() const = 0; /** \brief set the break force for this joint. if the constraint force or torque on the joint exceeds the specified values, the joint will break, at which point it will not constrain the two actors and the flag PxConstraintFlag::eBROKEN will be set. The force and torque are measured in the joint frame of the first actor \param[in] force the maximum force the joint can apply before breaking \param[in] torque the maximum torque the joint can apply before breaking */ virtual void setBreakForce(PxReal force, PxReal torque) = 0; /** \brief get the break force for this joint. \param[out] force the maximum force the joint can apply before breaking \param[out] torque the maximum torque the joint can apply before breaking @see setBreakForce() */ virtual void getBreakForce(PxReal& force, PxReal& torque) const = 0; /** \brief set the constraint flags for this joint. \param[in] flags the constraint flags @see PxConstraintFlag */ virtual void setConstraintFlags(PxConstraintFlags flags) = 0; /** \brief set a constraint flags for this joint to a specified value. \param[in] flag the constraint flag \param[in] value the value to which to set the flag @see PxConstraintFlag */ virtual void setConstraintFlag(PxConstraintFlag::Enum flag, bool value) = 0; /** \brief get the constraint flags for this joint. \return the constraint flags @see PxConstraintFlag */ virtual PxConstraintFlags getConstraintFlags() const = 0; /** \brief set the inverse mass scale for actor0. \param[in] invMassScale the scale to apply to the inverse mass of actor 0 for resolving this constraint @see getInvMassScale0 */ virtual void setInvMassScale0(PxReal invMassScale) = 0; /** \brief get the inverse mass scale for actor0. \return inverse mass scale for actor0 @see setInvMassScale0 */ virtual PxReal getInvMassScale0() const = 0; /** \brief set the inverse inertia scale for actor0. \param[in] invInertiaScale the scale to apply to the inverse inertia of actor0 for resolving this constraint @see getInvMassScale0 */ virtual void setInvInertiaScale0(PxReal invInertiaScale) = 0; /** \brief get the inverse inertia scale for actor0. \return inverse inertia scale for actor0 @see setInvInertiaScale0 */ virtual PxReal getInvInertiaScale0() const = 0; /** \brief set the inverse mass scale for actor1. \param[in] invMassScale the scale to apply to the inverse mass of actor 1 for resolving this constraint @see getInvMassScale1 */ virtual void setInvMassScale1(PxReal invMassScale) = 0; /** \brief get the inverse mass scale for actor1. \return inverse mass scale for actor1 @see setInvMassScale1 */ virtual PxReal getInvMassScale1() const = 0; /** \brief set the inverse inertia scale for actor1. \param[in] invInertiaScale the scale to apply to the inverse inertia of actor1 for resolving this constraint @see getInvInertiaScale1 */ virtual void setInvInertiaScale1(PxReal invInertiaScale) = 0; /** \brief get the inverse inertia scale for actor1. \return inverse inertia scale for actor1 @see setInvInertiaScale1 */ virtual PxReal getInvInertiaScale1() const = 0; /** \brief Retrieves the PxConstraint corresponding to this joint. This can be used to determine, among other things, the force applied at the joint. \return the constraint */ virtual PxConstraint* getConstraint() const = 0; /** \brief Sets a name string for the object that can be retrieved with getName(). This is for debugging and is not used by the SDK. The string is not copied by the SDK, only the pointer is stored. \param[in] name String to set the objects name to. @see getName() */ virtual void setName(const char* name) = 0; /** \brief Retrieves the name string set with setName(). \return Name string associated with object. @see setName() */ virtual const char* getName() const = 0; /** \brief Deletes the joint. \note This call does not wake up the connected rigid bodies. */ virtual void release() = 0; /** \brief Retrieves the scene which this joint belongs to. \return Owner Scene. NULL if not part of a scene. @see PxScene */ virtual PxScene* getScene() const = 0; void* userData; //!< user can assign this to whatever, usually to create a 1:1 relationship with a user object. //serialization /** \brief Put class meta data in stream, used for serialization */ static void getBinaryMetaData(PxOutputStream& stream); //~serialization protected: virtual ~PxJoint() {} //serialization /** \brief Constructor */ PX_INLINE PxJoint(PxType concreteType, PxBaseFlags baseFlags) : PxBase(concreteType, baseFlags), userData(NULL) {} /** \brief Deserialization constructor */ PX_INLINE PxJoint(PxBaseFlags baseFlags) : PxBase(baseFlags) {} /** \brief Returns whether a given type name matches with the type of this instance */ virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxJoint", PxBase); } //~serialization }; class PxSpring { public: PxReal stiffness; //!< the spring strength of the drive: that is, the force proportional to the position error PxReal damping; //!< the damping strength of the drive: that is, the force proportional to the velocity error PxSpring(PxReal stiffness_, PxReal damping_): stiffness(stiffness_), damping(damping_) {} }; #if !PX_DOXYGEN } // namespace physx #endif /** \brief Helper function to setup a joint's global frame This replaces the following functions from previous SDK versions: void NxJointDesc::setGlobalAnchor(const NxVec3& wsAnchor); void NxJointDesc::setGlobalAxis(const NxVec3& wsAxis); The function sets the joint's localPose using world-space input parameters. \param[in] wsAnchor Global frame anchor point. <b>Range:</b> position vector \param[in] wsAxis Global frame axis. <b>Range:</b> direction vector \param[in,out] joint Joint having its global frame set. */ PX_C_EXPORT void PX_CALL_CONV PxSetJointGlobalFrame(physx::PxJoint& joint, const physx::PxVec3* wsAnchor, const physx::PxVec3* wsAxis); /** @} */ #endif
11,402
C
26.744525
135
0.741361
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxCollectionExt.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_COLLECTION_EXT_H #define PX_COLLECTION_EXT_H /** \addtogroup extensions @{ */ #include "PxPhysXConfig.h" #include "common/PxCollection.h" #if !PX_DOXYGEN namespace physx { #endif class PxCollection; class PxScene; class PxCollectionExt { public: /** \brief Removes and releases all object from a collection. The Collection itself is not released. If the releaseExclusiveShapes flag is not set to true, release() will not be called on exclusive shapes. It is assumed that the application holds a reference to each of the objects in the collection, with the exception of objects that are not releasable (PxBase::isReleasable()). In general, objects that violate this assumption need to be removed from the collection prior to calling releaseObjects. \note when a shape is created with PxRigidActor::createShape() or PxRigidActorExt::createExclusiveShape(), the only counted reference is held by the actor. If such a shape and its actor are present in the collection, the reference count will be decremented once when the actor is released, and once when the shape is released, resulting in undefined behavior. Shape reference counts can be incremented with PxShape::acquireReference(). \param[in] collection to remove and release all object from. \param[in] releaseExclusiveShapes if this parameter is set to false, release() will not be called on exclusive shapes. */ static void releaseObjects(PxCollection& collection, bool releaseExclusiveShapes = true); /** \brief Removes objects of a given type from a collection, potentially adding them to another collection. \param[in,out] collection Collection from which objects are removed \param[in] concreteType PxConcreteType of sdk objects that should be removed \param[in,out] to Optional collection to which the removed objects are added @see PxCollection, PxConcreteType */ static void remove(PxCollection& collection, PxType concreteType, PxCollection* to = NULL); /** \brief Collects all objects in PxPhysics that are shareable across multiple scenes. This function creates a new collection from all objects that are shareable across multiple scenes. Instances of the following types are included: PxConvexMesh, PxTriangleMesh, PxHeightField, PxShape and PxMaterial. This is a helper function to ease the creation of collections for serialization. \param[in] physics The physics SDK instance from which objects are collected. See #PxPhysics \return Collection to which objects are added. See #PxCollection @see PxCollection, PxPhysics */ static PxCollection* createCollection(PxPhysics& physics); /** \brief Collects all objects from a PxScene. This function creates a new collection from all objects that were added to the specified PxScene. Instances of the following types are included: PxActor, PxAggregate, PxArticulationReducedCoordinate and PxJoint (other PxConstraint types are not included). This is a helper function to ease the creation of collections for serialization. The function PxSerialization.complete() can be used to complete the collection with required objects prior to serialization. \param[in] scene The PxScene instance from which objects are collected. See #PxScene \return Collection to which objects are added. See #PxCollection @see PxCollection, PxScene, PxSerialization.complete() */ static PxCollection* createCollection(PxScene& scene); }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
5,234
C
42.625
158
0.77073
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxGjkQueryExt.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_GJK_QUERY_EXT_H #define PX_GJK_QUERY_EXT_H #include "geometry/PxGjkQuery.h" #include "geometry/PxGeometry.h" #if !PX_DOXYGEN namespace physx { #endif class PxSphereGeometry; class PxCapsuleGeometry; class PxBoxGeometry; class PxConvexMeshGeometry; class PxContactBuffer; class PxConvexMesh; /** \brief Pre-made support mapping for built-in convex geometry types. */ class PxGjkQueryExt { public: /** \brief Pre-made support mapping for a sphere */ struct SphereSupport : PxGjkQuery::Support { PxReal radius; /** \brief Default constructor */ SphereSupport(); /** \brief Constructs a SphereSupport for a sphere radius */ SphereSupport(PxReal radius); /** \brief Constructs a SphereSupport for a PxSphereGeometry */ SphereSupport(const PxSphereGeometry& geom); virtual PxReal getMargin() const; virtual PxVec3 supportLocal(const PxVec3& dir) const; }; /** \brief Pre-made support mapping for a capsule */ struct CapsuleSupport : PxGjkQuery::Support { PxReal radius, halfHeight; /** \brief Default constructor */ CapsuleSupport(); /** \brief Constructs a CapsuleSupport for capsule radius and halfHeight */ CapsuleSupport(PxReal radius, PxReal halfHeight); /** \brief Constructs a CapsuleSupport for a PxCapsuleGeometry */ CapsuleSupport(const PxCapsuleGeometry& geom); virtual PxReal getMargin() const; virtual PxVec3 supportLocal(const PxVec3& dir) const; }; /** \brief Pre-made support mapping for a box */ struct BoxSupport : PxGjkQuery::Support { PxVec3 halfExtents; PxReal margin; /** \brief Default constructor */ BoxSupport(); /** \brief Constructs a BoxSupport for a box halfExtents with optional margin */ BoxSupport(const PxVec3& halfExtents, PxReal margin = 0); /** \brief Constructs a BoxSupport for a PxBoxGeometry */ BoxSupport(const PxBoxGeometry& box, PxReal margin = 0); virtual PxReal getMargin() const; virtual PxVec3 supportLocal(const PxVec3& dir) const; }; /** \brief Pre-made support mapping for a convex mesh */ struct ConvexMeshSupport : PxGjkQuery::Support { const PxConvexMesh* convexMesh; PxVec3 scale; PxQuat scaleRotation; PxReal margin; /** \brief Default constructor */ ConvexMeshSupport(); /** \brief Constructs a BoxSupport for a PxConvexMesh */ ConvexMeshSupport(const PxConvexMesh& convexMesh, const PxVec3& scale = PxVec3(1), const PxQuat& scaleRotation = PxQuat(PxIdentity), PxReal margin = 0); /** \brief Constructs a BoxSupport for a PxConvexMeshGeometry */ ConvexMeshSupport(const PxConvexMeshGeometry& convexMesh, PxReal margin = 0); virtual PxReal getMargin() const; virtual PxVec3 supportLocal(const PxVec3& dir) const; }; /** \brief Pre-made support mapping for any PhysX's convex geometry (sphere, capsule, box, convex mesh) */ struct ConvexGeomSupport : PxGjkQuery::Support { /** \brief Default constructor */ ConvexGeomSupport(); /** \brief Constructs a BoxSupport for a PxGeometry */ ConvexGeomSupport(const PxGeometry& geom, PxReal margin = 0); /** \brief Destructor */ ~ConvexGeomSupport(); /** \brief Returns false if ConvexGeomSupport was constructed from non-convex geometry */ bool isValid() const; virtual PxReal getMargin() const; virtual PxVec3 supportLocal(const PxVec3& dir) const; private: PxGeometryType::Enum mType; union { void* alignment; PxU8 sphere[sizeof(SphereSupport)]; PxU8 capsule[sizeof(CapsuleSupport)]; PxU8 box[sizeof(BoxSupport)]; PxU8 convexMesh[sizeof(ConvexMeshSupport)]; } mSupport; }; /** \brief Generates a contact point between two shapes using GJK-EPA algorithm \param[in] a Shape A support mapping \param[in] b Shape B support mapping \param[in] poseA Shape A transformation \param[in] poseB Shape B transformation \param[in] contactDistance The distance at which contacts begin to be generated between the shapes \param[in] toleranceLength The toleranceLength. Used for scaling distance-based thresholds internally to produce appropriate results given simulations in different units \param[out] contactBuffer A buffer to store the contact \return True if there is a contact. */ static bool generateContacts(const PxGjkQuery::Support& a, const PxGjkQuery::Support& b, const PxTransform& poseA, const PxTransform& poseB, PxReal contactDistance, PxReal toleranceLength, PxContactBuffer& contactBuffer); }; #if !PX_DOXYGEN } #endif #endif
6,199
C
28.107981
170
0.742055
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxD6JointCreate.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_D6_JOINT_CREATE_H #define PX_D6_JOINT_CREATE_H #include "common/PxPhysXCommonConfig.h" /** \addtogroup extensions @{ */ #if !PX_DOXYGEN namespace physx { #endif class PxPhysics; class PxRigidActor; class PxJoint; /** \brief Helper function to create a fixed joint, using either a PxD6Joint or PxFixedJoint. For fixed joints it is important that the joint frames have the same orientation. This helper function uses an identity rotation for both. It is also important that the joint frames have an equivalent position in world space. The function does not check this, so it is up to users to ensure that this is the case. \param[in] physics The physics SDK \param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localPos0 The position of the joint relative to actor0 \param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localPos1 The position of the joint relative to actor1 \param[in] useD6 True to use a PxD6Joint, false to use a PxFixedJoint; \return The created joint. @see PxD6Joint PxFixedJoint */ PxJoint* PxD6JointCreate_Fixed(PxPhysics& physics, PxRigidActor* actor0, const PxVec3& localPos0, PxRigidActor* actor1, const PxVec3& localPos1, bool useD6); /** \brief Helper function to create a distance joint, using either a PxD6Joint or PxDistanceJoint. This helper function only supports a maximum distance constraint, because PxD6Joint does not support a minimum distance constraint (contrary to PxDistanceJoint). The distance is computed between the joint frames' world-space positions. The joint frames' orientations are irrelevant here so the function sets them to identity. \param[in] physics The physics SDK \param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localPos0 The position of the joint relative to actor0 \param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localPos1 The position of the joint relative to actor1 \param[in] maxDist The maximum allowed distance \param[in] useD6 True to use a PxD6Joint, false to use a PxDistanceJoint; \return The created joint. @see PxD6Joint PxDistanceJoint */ PxJoint* PxD6JointCreate_Distance(PxPhysics& physics, PxRigidActor* actor0, const PxVec3& localPos0, PxRigidActor* actor1, const PxVec3& localPos1, float maxDist, bool useD6); /** \brief Helper function to create a prismatic joint, using either a PxD6Joint or PxPrismaticJoint. This function enforces that the joint frames have the same orientation, which is a local frame whose X is the desired translation axis. This orientation is computed by the function, so users only have to define the desired translation axis (typically 1;0;0 or 0;1;0 or 0;0;1). The translation can be limited. Limits are enforced if minLimit<maxLimit. If minLimit=maxLimit the axis is locked. If minLimit>maxLimit the limits are not enforced and the axis is free. The limit values are computed relative to the position of actor0's joint frame. The function creates hard limits, and uses PhysX's default contact distance parameter. \param[in] physics The physics SDK \param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localPos0 The position of the joint relative to actor0 \param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localPos1 The position of the joint relative to actor1 \param[in] axis The axis along which objects are allowed to move, expressed in the actors' local space \param[in] minLimit The minimum allowed position along the axis \param[in] maxLimit The maximum allowed position along the axis \param[in] useD6 True to use a PxD6Joint, false to use a PxPrismaticJoint; \return The created joint. @see PxD6Joint PxPrismaticJoint */ PxJoint* PxD6JointCreate_Prismatic(PxPhysics& physics, PxRigidActor* actor0, const PxVec3& localPos0, PxRigidActor* actor1, const PxVec3& localPos1, const PxVec3& axis, float minLimit, float maxLimit, bool useD6); /** \brief Helper function to create a revolute joint, using either a PxD6Joint or PxRevoluteJoint. This function enforces that the joint frames have the same orientation, which is a local frame whose X is the desired rotation axis. This orientation is computed by the function, so users only have to define the desired rotation axis (typically 1;0;0 or 0;1;0 or 0;0;1). The rotation can be limited. Limits are enforced if minLimit<maxLimit. If minLimit=maxLimit the axis is locked. If minLimit>maxLimit the limits are not enforced and the axis is free. The limit values are computed relative to the rotation of actor0's joint frame. The function creates hard limits, and uses PhysX's default contact distance parameter. Limits are expressed in radians. Allowed range is ]-2*PI;+2*PI[ \param[in] physics The physics SDK \param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localPos0 The position of the joint relative to actor0 \param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localPos1 The position of the joint relative to actor1 \param[in] axis The axis around which objects are allowed to move, expressed in the actors' local space \param[in] minLimit The minimum allowed rotation along the axis \param[in] maxLimit The maximum allowed rotation along the axis \param[in] useD6 True to use a PxD6Joint, false to use a PxRevoluteJoint; \return The created joint. @see PxD6Joint PxRevoluteJoint */ PxJoint* PxD6JointCreate_Revolute(PxPhysics& physics, PxRigidActor* actor0, const PxVec3& localPos0, PxRigidActor* actor1, const PxVec3& localPos1, const PxVec3& axis, float minLimit, float maxLimit, bool useD6); /** \brief Helper function to create a spherical joint, using either a PxD6Joint or PxSphericalJoint. This function supports a cone limit shape, defined by a cone axis and two angular limit values. This function enforces that the joint frames have the same orientation, which is a local frame whose X is the desired cone axis. This orientation is computed by the function, so users only have to define the desired cone axis (typically 1;0;0 or 0;1;0 or 0;0;1). The rotations can be limited. Limits are enforced if limit1>0 and limit2>0. Otherwise the motion is free. The limit values define an ellipse, which is the cross-section of the cone limit shape. The function creates hard limits, and uses PhysX's default contact distance parameter. Limits are expressed in radians. Allowed range is ]0;PI[. Limits are symmetric around the cone axis. The cone axis is equivalent to the twist axis for the D6 joint. The twist motion is not limited. \param[in] physics The physics SDK \param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localPos0 The position of the joint relative to actor0 \param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localPos1 The position of the joint relative to actor1 \param[in] axis The cone axis, expressed in the actors' local space \param[in] limit1 Max angular limit for the ellipse along the joint frame's second axis (first axis = cone axis) \param[in] limit2 Max angular limit for the ellipse along the joint frame's third axis (first axis = cone axis) \param[in] useD6 True to use a PxD6Joint, false to use a PxSphericalJoint; \return The created joint. @see PxD6Joint PxSphericalJoint */ PxJoint* PxD6JointCreate_Spherical(PxPhysics& physics, PxRigidActor* actor0, const PxVec3& localPos0, PxRigidActor* actor1, const PxVec3& localPos1, const PxVec3& axis, float limit1, float limit2, bool useD6); /** \brief Helper function to create a spherical joint, using either a PxD6Joint or PxSphericalJoint. This function supports a cone limit shape, defined by two pairs of angular limit values. This can be used to create an asymmetric cone. If the angular limit values are symmetric (i.e. minLimit1=-maxLimit1 and minLimit2=-maxLimit2) then the cone axis is the X axis in actor0's space. If the limits are not symmetric, the function rotates the cone axis accordingly so that limits remain symmetric for PhysX. If this happens, the initial joint frames will be different for both actors. By default minLimit1/maxLimit1 are limits around the joint's Y axis, and minLimit2/maxLimit2 are limits around the joint's Z axis. The function creates hard limits, and uses PhysX's default contact distance parameter. Limits are expressed in radians. Allowed range is ]-PI;PI[. The cone axis is equivalent to the twist axis for the D6 joint. The twist motion is not limited. The returned apiroty and apirotz values can later be added to retrieved Y and Z swing angle values (from the joint), to remap angle values to the given input range. \param[out] apiroty Amount of rotation around Y used to setup actor0's joint frame \param[out] apirotz Amount of rotation around Z used to setup actor0's joint frame \param[in] physics The physics SDK \param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localPos0 The position of the joint relative to actor0 \param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localPos1 The position of the joint relative to actor1 \param[in] minLimit1 Min angular limit along the joint frame's second axis (first axis = cone axis) \param[in] maxLimit1 Max angular limit along the joint frame's second axis (first axis = cone axis) \param[in] minLimit2 Min angular limit along the joint frame's third axis (first axis = cone axis) \param[in] maxLimit2 Max angular limit along the joint frame's third axis (first axis = cone axis) \param[in] useD6 True to use a PxD6Joint, false to use a PxSphericalJoint; \return The created joint. @see PxD6Joint PxSphericalJoint */ PxJoint* PxD6JointCreate_GenericCone(float& apiroty, float& apirotz, PxPhysics& physics, PxRigidActor* actor0, const PxVec3& localPos0, PxRigidActor* actor1, const PxVec3& localPos1, float minLimit1, float maxLimit1, float minLimit2, float maxLimit2, bool useD6); /** \brief Helper function to create a D6 joint with pyramidal swing limits. This function supports a pyramid limit shape, defined by two pairs of angular limit values. This can be used to create an asymmetric pyramid. If the angular limit values are symmetric (i.e. minLimit1=-maxLimit1 and minLimit2=-maxLimit2) then the pyramid axis is the X axis in actor0's space. By default minLimit1/maxLimit1 are limits around the joint's Y axis, and minLimit2/maxLimit2 are limits around the joint's Z axis. The function creates hard limits, and uses PhysX's default contact distance parameter. Limits are expressed in radians. Allowed range is ]-PI;PI[. The pyramid axis is equivalent to the twist axis for the D6 joint. The twist motion is not limited. \param[in] physics The physics SDK \param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localPos0 The position of the joint relative to actor0 \param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localPos1 The position of the joint relative to actor1 \param[in] axis The pyramid axis, expressed in the actors' local space \param[in] minLimit1 Min angular limit along the joint frame's second axis (first axis = pyramid axis) \param[in] maxLimit1 Max angular limit along the joint frame's second axis (first axis = pyramid axis) \param[in] minLimit2 Min angular limit along the joint frame's third axis (first axis = pyramid axis) \param[in] maxLimit2 Max angular limit along the joint frame's third axis (first axis = pyramid axis) \return The created joint. @see PxD6Joint */ PxJoint* PxD6JointCreate_Pyramid(PxPhysics& physics, PxRigidActor* actor0, const PxVec3& localPos0, PxRigidActor* actor1, const PxVec3& localPos1, const PxVec3& axis, float minLimit1, float maxLimit1, float minLimit2, float maxLimit2); #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
14,632
C
56.384314
263
0.774399
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxFixedJoint.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_FIXED_JOINT_H #define PX_FIXED_JOINT_H /** \addtogroup extensions @{ */ #include "extensions/PxJoint.h" #if !PX_DOXYGEN namespace physx { #endif class PxFixedJoint; /** \brief Create a fixed joint. \param[in] physics The physics SDK \param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localFrame0 The position and orientation of the joint relative to actor0 \param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localFrame1 The position and orientation of the joint relative to actor1 @see PxFixedJoint */ PxFixedJoint* PxFixedJointCreate(PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1); /** \brief A fixed joint permits no relative movement between two bodies. ie the bodies are glued together. \image html fixedJoint.png @see PxFixedJointCreate() PxJoint */ class PxFixedJoint : public PxJoint { public: /** \brief Returns string name of PxFixedJoint, used for serialization */ virtual const char* getConcreteTypeName() const { return "PxFixedJoint"; } protected: //serialization /** \brief Constructor */ PX_INLINE PxFixedJoint(PxType concreteType, PxBaseFlags baseFlags) : PxJoint(concreteType, baseFlags) {} /** \brief Deserialization constructor */ PX_INLINE PxFixedJoint(PxBaseFlags baseFlags) : PxJoint(baseFlags) {} /** \brief Returns whether a given type name matches with the type of this instance */ virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxFixedJoint", PxJoint); } //~serialization }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
3,528
C
33.940594
161
0.753118
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxDefaultAllocator.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_DEFAULT_ALLOCATOR_H #define PX_DEFAULT_ALLOCATOR_H /** \addtogroup extensions @{ */ #include "foundation/PxAllocatorCallback.h" #include "foundation/PxAssert.h" #include "foundation/PxMemory.h" #include "common/PxPhysXCommonConfig.h" #include <stdlib.h> #if PX_WINDOWS_FAMILY || PX_LINUX_FAMILY || PX_SWITCH #include <malloc.h> #endif #if !PX_DOXYGEN namespace physx { #endif #if PX_WINDOWS_FAMILY // on win32 we only have 8-byte alignment guaranteed, but the CRT provides special aligned allocation fns PX_FORCE_INLINE void* platformAlignedAlloc(size_t size) { return _aligned_malloc(size, 16); } PX_FORCE_INLINE void platformAlignedFree(void* ptr) { _aligned_free(ptr); } #elif PX_LINUX_FAMILY || PX_SWITCH PX_FORCE_INLINE void* platformAlignedAlloc(size_t size) { return ::memalign(16, size); } PX_FORCE_INLINE void platformAlignedFree(void* ptr) { ::free(ptr); } #else // on all other platforms we get 16-byte alignment by default PX_FORCE_INLINE void* platformAlignedAlloc(size_t size) { return ::malloc(size); } PX_FORCE_INLINE void platformAlignedFree(void* ptr) { ::free(ptr); } #endif /** \brief default implementation of the allocator interface required by the SDK */ class PxDefaultAllocator : public PxAllocatorCallback { public: virtual void* allocate(size_t size, const char*, const char*, int) { void* ptr = platformAlignedAlloc(size); PX_ASSERT((size_t(ptr) & 15)==0); #if PX_STOMP_ALLOCATED_MEMORY if(ptr != NULL) { PxMemSet(ptr, PxI32(0xcd), PxU32(size)); } #endif return ptr; } virtual void deallocate(void* ptr) { platformAlignedFree(ptr); } }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
3,376
C
28.112069
105
0.744372
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxTriangleMeshExt.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_TRIANGLE_MESH_EXT_H #define PX_TRIANGLE_MESH_EXT_H /** \addtogroup extensions @{ */ #include "PxPhysXConfig.h" #include "common/PxPhysXCommonConfig.h" #include "foundation/PxArray.h" #if !PX_DOXYGEN namespace physx { #endif class PxGeometry; class PxTriangleMesh; class PxTriangleMeshGeometry; class PxHeightFieldGeometry; /** \brief Utility class to find mesh triangles touched by a specified geometry object. This class is a helper calling PxMeshQuery::findOverlapTriangleMesh or PxMeshQuery::findOverlapHeightField under the hood, while taking care of necessary memory management issues. PxMeshQuery::findOverlapTriangleMesh and PxMeshQuery::findOverlapHeightField are the "raw" functions operating on user-provided fixed-size buffers. These functions abort with an error code in case of buffer overflow. PxMeshOverlapUtil is a convenient helper function checking this error code, and resizing buffers appropriately, until the desired call succeeds. Returned triangle indices are stored within the class, and can be used with PxMeshQuery::getTriangle() to retrieve the triangle properties. */ class PxMeshOverlapUtil { public: PxMeshOverlapUtil(); ~PxMeshOverlapUtil(); /** \brief Find the mesh triangles which touch the specified geometry object. \param[in] geom The geometry object to test for mesh triangle overlaps. Supported geometries are #PxSphereGeometry, #PxCapsuleGeometry and #PxBoxGeometry \param[in] geomPose Pose of the geometry object \param[in] meshGeom The triangle mesh geometry to check overlap against \param[in] meshPose Pose of the triangle mesh \return Number of overlaps found. Triangle indices can then be accessed through the #getResults() function. @see PxGeometry PxTransform PxTriangleMeshGeometry PxMeshQuery::findOverlapTriangleMesh */ PxU32 findOverlap(const PxGeometry& geom, const PxTransform& geomPose, const PxTriangleMeshGeometry& meshGeom, const PxTransform& meshPose); /** \brief Find the height field triangles which touch the specified geometry object. \param[in] geom The geometry object to test for height field overlaps. Supported geometries are #PxSphereGeometry, #PxCapsuleGeometry and #PxBoxGeometry. The sphere and capsule queries are currently conservative estimates. \param[in] geomPose Pose of the geometry object \param[in] hfGeom The height field geometry to check overlap against \param[in] hfPose Pose of the height field \return Number of overlaps found. Triangle indices can then be accessed through the #getResults() function. @see PxGeometry PxTransform PxHeightFieldGeometry PxMeshQuery::findOverlapHeightField */ PxU32 findOverlap(const PxGeometry& geom, const PxTransform& geomPose, const PxHeightFieldGeometry& hfGeom, const PxTransform& hfPose); /** \brief Retrieves array of triangle indices after a findOverlap call. \return Indices of touched triangles */ PX_FORCE_INLINE const PxU32* getResults() const { return mResultsMemory; } /** \brief Retrieves number of triangle indices after a findOverlap call. \return Number of touched triangles */ PX_FORCE_INLINE PxU32 getNbResults() const { return mNbResults; } private: PxU32* mResultsMemory; PxU32 mResults[256]; PxU32 mNbResults; PxU32 mMaxNbResults; }; /** \brief Computes an approximate minimum translational distance (MTD) between a geometry object and a mesh. This iterative function computes an approximate vector that can be used to depenetrate a geom object from a triangle mesh. Returned depenetration vector should be applied to 'geom', to get out of the mesh. The function works best when the amount of overlap between the geom object and the mesh is small. If the geom object's center goes inside the mesh, backface culling usually kicks in, no overlap is detected, and the function does not compute an MTD vector. The function early exits if no overlap is detected after a depenetration attempt. This means that if maxIter = N, the code will attempt at most N iterations but it might exit earlier if depenetration has been successful. Usually N = 4 gives good results. \param[out] direction Computed MTD unit direction \param[out] depth Penetration depth. Always positive or zero. \param[in] geom The geometry object \param[in] geomPose Pose for the geometry object \param[in] meshGeom The mesh geometry \param[in] meshPose Pose for the mesh \param[in] maxIter Max number of iterations before returning. \param[out] usedIter Number of depenetrations attempts performed during the call. Will not be returned if the pointer is NULL. \return True if the MTD has successfully been computed, i.e. if objects do overlap. @see PxGeometry PxTransform PxTriangleMeshGeometry */ bool PxComputeTriangleMeshPenetration(PxVec3& direction, PxReal& depth, const PxGeometry& geom, const PxTransform& geomPose, const PxTriangleMeshGeometry& meshGeom, const PxTransform& meshPose, PxU32 maxIter, PxU32* usedIter = NULL); /** \brief Computes an approximate minimum translational distance (MTD) between a geometry object and a heightfield. This iterative function computes an approximate vector that can be used to depenetrate a geom object from a heightfield. Returned depenetration vector should be applied to 'geom', to get out of the heightfield. The function works best when the amount of overlap between the geom object and the mesh is small. If the geom object's center goes inside the heightfield, backface culling usually kicks in, no overlap is detected, and the function does not compute an MTD vector. The function early exits if no overlap is detected after a depenetration attempt. This means that if maxIter = N, the code will attempt at most N iterations but it might exit earlier if depenetration has been successful. Usually N = 4 gives good results. \param[out] direction Computed MTD unit direction \param[out] depth Penetration depth. Always positive or zero. \param[in] geom The geometry object \param[in] geomPose Pose for the geometry object \param[in] heightFieldGeom The heightfield geometry \param[in] heightFieldPose Pose for the heightfield \param[in] maxIter Max number of iterations before returning. \param[out] usedIter Number of depenetrations attempts performed during the call. Will not be returned if the pointer is NULL. \return True if the MTD has successfully been computed, i.e. if objects do overlap. @see PxGeometry PxTransform PxHeightFieldGeometry */ bool PxComputeHeightFieldPenetration(PxVec3& direction, PxReal& depth, const PxGeometry& geom, const PxTransform& geomPose, const PxHeightFieldGeometry& heightFieldGeom, const PxTransform& heightFieldPose, PxU32 maxIter, PxU32* usedIter = NULL); /** \brief Extracts an isosurface from the SDF of a mesh if it the SDF is available. \param[in] triangleMesh The triangle mesh \param[out] isosurfaceVertices The vertices of the extracted isosurface \param[out] isosurfaceTriangleIndices The triangles of the extracted isosurface */ bool PxExtractIsosurfaceFromSDF(const PxTriangleMesh& triangleMesh, PxArray<PxVec3>& isosurfaceVertices, PxArray<PxU32>& isosurfaceTriangleIndices); #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
9,134
C
45.136363
223
0.768448
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxJointLimit.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_JOINT_LIMIT_H #define PX_JOINT_LIMIT_H /** \addtogroup extensions @{ */ #include "foundation/PxMath.h" #include "common/PxTolerancesScale.h" #include "extensions/PxJoint.h" #include "PxPhysXConfig.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief Describes the parameters for a joint limit. Limits are enabled or disabled by setting flags or other configuration parameters joints, see the documentation for specific joint types for details. */ class PxJointLimitParameters { public: /** \brief Controls the amount of bounce when the joint hits a limit. A restitution value of 1.0 causes the joint to bounce back with the velocity which it hit the limit. A value of zero causes the joint to stop dead. In situations where the joint has many locked DOFs (e.g. 5) the restitution may not be applied correctly. This is due to a limitation in the solver which causes the restitution velocity to become zero as the solver enforces constraints on the other DOFs. This limitation applies to both angular and linear limits, however it is generally most apparent with limited angular DOFs. Disabling joint projection and increasing the solver iteration count may improve this behavior to some extent. Also, combining soft joint limits with joint drives driving against those limits may affect stability. <b>Range:</b> [0,1]<br> <b>Default:</b> 0.0 */ PxReal restitution; /** determines the minimum impact velocity which will cause the joint to bounce */ PxReal bounceThreshold; /** \brief if greater than zero, the limit is soft, i.e. a spring pulls the joint back to the limit <b>Range:</b> [0, PX_MAX_F32)<br> <b>Default:</b> 0.0 */ PxReal stiffness; /** \brief if spring is greater than zero, this is the damping of the limit spring <b>Range:</b> [0, PX_MAX_F32)<br> <b>Default:</b> 0.0 */ PxReal damping; PxJointLimitParameters() : restitution (0.0f), bounceThreshold (0.0f), stiffness (0.0f), damping (0.0f) { } PxJointLimitParameters(const PxJointLimitParameters& p) : restitution (p.restitution), bounceThreshold (p.bounceThreshold), stiffness (p.stiffness), damping (p.damping) { } /** \brief Returns true if the current settings are valid. \return true if the current settings are valid */ PX_INLINE bool isValid() const { return PxIsFinite(restitution) && restitution >= 0 && restitution <= 1 && PxIsFinite(stiffness) && stiffness >= 0 && PxIsFinite(damping) && damping >= 0 && PxIsFinite(bounceThreshold) && bounceThreshold >= 0; } PX_INLINE bool isSoft() const { return damping>0 || stiffness>0; } protected: ~PxJointLimitParameters() {} }; /** \brief Describes a one-sided linear limit. */ class PxJointLinearLimit : public PxJointLimitParameters { public: /** \brief the extent of the limit. <b>Range:</b> (0, PX_MAX_F32) <br> <b>Default:</b> PX_MAX_F32 */ PxReal value; /** \brief construct a linear hard limit \param[in] extent The extent of the limit @see PxJointLimitParameters */ PxJointLinearLimit(PxReal extent) : value(extent) { } /** \brief construct a linear soft limit \param[in] extent the extent of the limit \param[in] spring the stiffness and damping parameters for the limit spring @see PxJointLimitParameters */ PxJointLinearLimit(PxReal extent, const PxSpring& spring) : value(extent) { stiffness = spring.stiffness; damping = spring.damping; } /** \brief Returns true if the limit is valid \return true if the current settings are valid */ PX_INLINE bool isValid() const { return PxJointLimitParameters::isValid() && PxIsFinite(value) && value > 0.0f; } }; /** \brief Describes a two-sided limit. */ class PxJointLinearLimitPair : public PxJointLimitParameters { public: /** \brief the range of the limit. The upper limit must be no lower than the lower limit, and if they are equal the limited degree of freedom will be treated as locked. <b>Range:</b> See the joint on which the limit is used for details<br> <b>Default:</b> lower = -PX_MAX_F32/3, upper = PX_MAX_F32/3 */ PxReal upper, lower; /** \brief Construct a linear hard limit pair. The lower distance value must be less than the upper distance value. \param[in] scale A PxTolerancesScale struct. Should be the same as used when creating the PxPhysics object. \param[in] lowerLimit The lower distance of the limit \param[in] upperLimit The upper distance of the limit @see PxJointLimitParameters PxTolerancesScale */ PxJointLinearLimitPair(const PxTolerancesScale& scale, PxReal lowerLimit = -PX_MAX_F32/3.0f, PxReal upperLimit = PX_MAX_F32/3.0f) : upper(upperLimit), lower(lowerLimit) { bounceThreshold = 2.0f*scale.length; } /** \brief construct a linear soft limit pair \param[in] lowerLimit The lower distance of the limit \param[in] upperLimit The upper distance of the limit \param[in] spring The stiffness and damping parameters of the limit spring @see PxJointLimitParameters */ PxJointLinearLimitPair(PxReal lowerLimit, PxReal upperLimit, const PxSpring& spring) : upper(upperLimit), lower(lowerLimit) { stiffness = spring.stiffness; damping = spring.damping; } /** \brief Returns true if the limit is valid. \return true if the current settings are valid */ PX_INLINE bool isValid() const { return PxJointLimitParameters::isValid() && PxIsFinite(upper) && PxIsFinite(lower) && upper >= lower && PxIsFinite(upper - lower); } }; class PxJointAngularLimitPair : public PxJointLimitParameters { public: /** \brief the range of the limit. The upper limit must be no lower than the lower limit. <b>Unit:</b> Angular: Radians <b>Range:</b> See the joint on which the limit is used for details<br> <b>Default:</b> lower = -PI/2, upper = PI/2 */ PxReal upper, lower; /** \brief construct an angular hard limit pair. The lower value must be less than the upper value. \param[in] lowerLimit The lower angle of the limit \param[in] upperLimit The upper angle of the limit @see PxJointLimitParameters */ PxJointAngularLimitPair(PxReal lowerLimit, PxReal upperLimit) : upper(upperLimit), lower(lowerLimit) { bounceThreshold = 0.5f; } /** \brief construct an angular soft limit pair. The lower value must be less than the upper value. \param[in] lowerLimit The lower angle of the limit \param[in] upperLimit The upper angle of the limit \param[in] spring The stiffness and damping of the limit spring @see PxJointLimitParameters */ PxJointAngularLimitPair(PxReal lowerLimit, PxReal upperLimit, const PxSpring& spring) : upper(upperLimit), lower(lowerLimit) { stiffness = spring.stiffness; damping = spring.damping; } /** \brief Returns true if the limit is valid. \return true if the current settings are valid */ PX_INLINE bool isValid() const { return PxJointLimitParameters::isValid() && PxIsFinite(upper) && PxIsFinite(lower) && upper >= lower; } }; /** \brief Describes an elliptical conical joint limit. Note that very small or highly elliptical limit cones may result in jitter. @see PxD6Joint PxSphericalJoint */ class PxJointLimitCone : public PxJointLimitParameters { public: /** \brief the maximum angle from the Y axis of the constraint frame. <b>Unit:</b> Angular: Radians <b>Range:</b> Angular: (0,PI)<br> <b>Default:</b> PI/2 */ PxReal yAngle; /** \brief the maximum angle from the Z-axis of the constraint frame. <b>Unit:</b> Angular: Radians <b>Range:</b> Angular: (0,PI)<br> <b>Default:</b> PI/2 */ PxReal zAngle; /** \brief Construct a cone hard limit. \param[in] yLimitAngle The limit angle from the Y-axis of the constraint frame \param[in] zLimitAngle The limit angle from the Z-axis of the constraint frame @see PxJointLimitParameters */ PxJointLimitCone(PxReal yLimitAngle, PxReal zLimitAngle) : yAngle(yLimitAngle), zAngle(zLimitAngle) { bounceThreshold = 0.5f; } /** \brief Construct a cone soft limit. \param[in] yLimitAngle The limit angle from the Y-axis of the constraint frame \param[in] zLimitAngle The limit angle from the Z-axis of the constraint frame \param[in] spring The stiffness and damping of the limit spring @see PxJointLimitParameters */ PxJointLimitCone(PxReal yLimitAngle, PxReal zLimitAngle, const PxSpring& spring) : yAngle(yLimitAngle), zAngle(zLimitAngle) { stiffness = spring.stiffness; damping = spring.damping; } /** \brief Returns true if the limit is valid. \return true if the current settings are valid */ PX_INLINE bool isValid() const { return PxJointLimitParameters::isValid() && PxIsFinite(yAngle) && yAngle>0 && yAngle<PxPi && PxIsFinite(zAngle) && zAngle>0 && zAngle<PxPi; } }; /** \brief Describes a pyramidal joint limit. @see PxD6Joint */ class PxJointLimitPyramid : public PxJointLimitParameters { public: /** \brief the minimum angle from the Y axis of the constraint frame. <b>Unit:</b> Angular: Radians <b>Range:</b> Angular: (-PI,PI)<br> <b>Default:</b> -PI/2 */ PxReal yAngleMin; /** \brief the maximum angle from the Y axis of the constraint frame. <b>Unit:</b> Angular: Radians <b>Range:</b> Angular: (-PI,PI)<br> <b>Default:</b> PI/2 */ PxReal yAngleMax; /** \brief the minimum angle from the Z-axis of the constraint frame. <b>Unit:</b> Angular: Radians <b>Range:</b> Angular: (-PI,PI)<br> <b>Default:</b> -PI/2 */ PxReal zAngleMin; /** \brief the maximum angle from the Z-axis of the constraint frame. <b>Unit:</b> Angular: Radians <b>Range:</b> Angular: (-PI,PI)<br> <b>Default:</b> PI/2 */ PxReal zAngleMax; /** \brief Construct a pyramid hard limit. \param[in] yLimitAngleMin The minimum limit angle from the Y-axis of the constraint frame \param[in] yLimitAngleMax The maximum limit angle from the Y-axis of the constraint frame \param[in] zLimitAngleMin The minimum limit angle from the Z-axis of the constraint frame \param[in] zLimitAngleMax The maximum limit angle from the Z-axis of the constraint frame @see PxJointLimitParameters */ PxJointLimitPyramid(PxReal yLimitAngleMin, PxReal yLimitAngleMax, PxReal zLimitAngleMin, PxReal zLimitAngleMax) : yAngleMin(yLimitAngleMin), yAngleMax(yLimitAngleMax), zAngleMin(zLimitAngleMin), zAngleMax(zLimitAngleMax) { bounceThreshold = 0.5f; } /** \brief Construct a pyramid soft limit. \param[in] yLimitAngleMin The minimum limit angle from the Y-axis of the constraint frame \param[in] yLimitAngleMax The maximum limit angle from the Y-axis of the constraint frame \param[in] zLimitAngleMin The minimum limit angle from the Z-axis of the constraint frame \param[in] zLimitAngleMax The maximum limit angle from the Z-axis of the constraint frame \param[in] spring The stiffness and damping of the limit spring @see PxJointLimitParameters */ PxJointLimitPyramid(PxReal yLimitAngleMin, PxReal yLimitAngleMax, PxReal zLimitAngleMin, PxReal zLimitAngleMax, const PxSpring& spring) : yAngleMin(yLimitAngleMin), yAngleMax(yLimitAngleMax), zAngleMin(zLimitAngleMin), zAngleMax(zLimitAngleMax) { stiffness = spring.stiffness; damping = spring.damping; } /** \brief Returns true if the limit is valid. \return true if the current settings are valid */ PX_INLINE bool isValid() const { return PxJointLimitParameters::isValid() && PxIsFinite(yAngleMin) && yAngleMin>-PxPi && yAngleMin<PxPi && PxIsFinite(yAngleMax) && yAngleMax>-PxPi && yAngleMax<PxPi && PxIsFinite(zAngleMin) && zAngleMin>-PxPi && zAngleMin<PxPi && PxIsFinite(zAngleMax) && zAngleMax>-PxPi && zAngleMax<PxPi && yAngleMax>=yAngleMin && zAngleMax>=zAngleMin; } }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
13,452
C
26.567623
138
0.726881
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxSoftBodyExt.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_SOFT_BODY_EXT_H #define PX_SOFT_BODY_EXT_H /** \addtogroup extensions @{ */ #include "foundation/PxTransform.h" #include "foundation/PxUserAllocated.h" #include "PxSoftBody.h" #include "PxSoftBodyFlag.h" #include "cudamanager/PxCudaContextManager.h" #include "cudamanager/PxCudaTypes.h" #if !PX_DOXYGEN namespace physx { #endif struct PxCookingParams; class PxSimpleTriangleMesh; class PxInsertionCallback; class PxSoftBodyMesh; /** \brief Utility functions for use with PxSoftBody and subclasses */ class PxSoftBodyExt { public: /** \brief Computes the SoftBody's vertex masses from the provided density and the volume of the tetrahedra The buffers affected by this operation can be obtained from the SoftBody using the methods getSimPositionInvMassBufferD() and getSimVelocityBufferD() The inverse mass is stored in the 4th component (the first three components are x, y, z coordinates) of the simulation mesh's position buffer. \param[in] softBody The soft body which will get its mass updated \param[in] density The density to used to calculate the mass from the body's volume \param[in] maxInvMassRatio Maximum allowed ratio defined as max(vertexMasses) / min(vertexMasses) where vertexMasses is a list of float values with a mass for every vertex in the simulation mesh \param[in] simPositionsPinned A pointer to a pinned host memory buffer containing positions and inverse masses for each vertex of the simulation mesh. @see PxSoftBody PxSoftBody::getSimPositionInvMassBufferD() */ static void updateMass(PxSoftBody& softBody, const PxReal density, const PxReal maxInvMassRatio, PxVec4* simPositionsPinned); /** \brief Computes the SoftBody's vertex masses such that the sum of all masses is equal to the provided mass The buffers affected by this operation can be obtained from the SoftBody using the methods getSimPositionInvMassBufferD()) and getSimVelocityBufferD() The inverse mass is stored in the 4th component (the first three components are x, y, z coordinates) of the simulation mesh's position buffer. \param[in] softBody The soft body which will get its mass updated \param[in] mass The SoftBody's mass \param[in] maxInvMassRatio Maximum allowed ratio defined as max(vertexMasses) / min(vertexMasses) where vertexMasses is a list of float values with a mass for every vertex in the simulation mesh \param[in] simPositionsPinned A pointer to a pinned host memory buffer containing positions and inverse masses for each vertex of the simulation mesh. @see PxSoftBody PxSoftBody::getSimPositionInvMassBufferD() */ static void setMass(PxSoftBody& softBody, const PxReal mass, const PxReal maxInvMassRatio, PxVec4* simPositionsPinned); /** \brief Transforms a SoftBody The buffers affected by this operation can be obtained from the SoftBody using the methods getSimPositionInvMassBufferD() and getSimVelocityBufferD() Applies a transformation to the simulation mesh's positions an velocities. Velocities only get rotated and scaled (translation is not applicable to direction vectors). It does not modify the body's mass. If the method is called multiple times, the transformation will compound with the ones previously applied. \param[in] softBody The soft body which is transformed \param[in] transform The transform to apply \param[in] scale A scaling factor \param[in] simPositionsPinned A pointer to a pinned host memory buffer containing positions and inverse masses for each vertex of the simulation mesh. \param[in] simVelocitiesPinned A pointer to a pinned host memory buffer containing velocities for each vertex of the simulation mesh. \param[in] collPositionsPinned A pointer to a pinned host memory buffer containing positions and inverse masses for each vertex of the collision mesh. \param[in] restPositionsPinned A pointer to a pinned host memory buffer containing rest positions of the collision mesh. @see PxSoftBody */ static void transform(PxSoftBody& softBody, const PxTransform& transform, const PxReal scale, PxVec4* simPositionsPinned, PxVec4* simVelocitiesPinned, PxVec4* collPositionsPinned, PxVec4* restPositionsPinned); /** \brief Updates the collision mesh's vertex positions to match the simulation mesh's transformation and scale. The buffer affected by this operation can be obtained from the SoftBody using the method getPositionInvMassBufferD() \param[in] softBody The soft body which will get its collision mesh vertices updated \param[in] simPositionsPinned A pointer to a pinned host memory buffer containing positions and inverse masses for each vertex of the simulation mesh. \param[in] collPositionsPinned A pointer to a pinned host memory buffer containing positions and inverse masses for each vertex of the collision mesh. @see PxSoftBody */ static void updateEmbeddedCollisionMesh(PxSoftBody& softBody, PxVec4* simPositionsPinned, PxVec4* collPositionsPinned); /** \brief Uploads prepared SoftBody data to the GPU. It ensures that the embedded collision mesh matches the simulation mesh's transformation and scale. \param[in] softBody The soft body which will perform the data upload \param[in] flags Specifies which buffers the data transfer should include \param[in] simPositionsPinned A pointer to a pinned host memory buffer containing positions and inverse masses for each vertex of the simulation mesh. \param[in] simVelocitiesPinned A pointer to a pinned host memory buffer containing velocities for each vertex of the simulation mesh. \param[in] collPositionsPinned A pointer to a pinned host memory buffer containing positions and inverse masses for each vertex of the collision mesh. \param[in] restPositionsPinned A pointer to a pinned host memory buffer containing rest positions of the collision mesh. \param[in] stream A cuda stream to perform the copies. @see PxSoftBody @deprecated Use copyToDevice() instead. */ PX_DEPRECATED static void commit(PxSoftBody& softBody, PxSoftBodyDataFlags flags, PxVec4* simPositionsPinned, PxVec4* simVelocitiesPinned, PxVec4* collPositionsPinned, PxVec4* restPositionsPinned, CUstream stream = CUstream(0)); /** \brief Uploads prepared SoftBody data to the GPU. It ensures that the embedded collision mesh matches the simulation mesh's transformation and scale. \param[in] softBody The soft body which will perform the data upload \param[in] flags Specifies which buffers the data transfer should include \param[in] simPositionsPinned A pointer to a pinned host memory buffer containing positions and inverse masses for each vertex of the simulation mesh. \param[in] simVelocitiesPinned A pointer to a pinned host memory buffer containing velocities for each vertex of the simulation mesh. \param[in] collPositionsPinned A pointer to a pinned host memory buffer containing positions and inverse masses for each vertex of the collision mesh. \param[in] restPositionsPinned A pointer to a pinned host memory buffer containing rest positions of the collision mesh. \param[in] stream A cuda stream to perform the copies. @see PxSoftBody */ static void copyToDevice(PxSoftBody& softBody, PxSoftBodyDataFlags flags, PxVec4* simPositionsPinned, PxVec4* simVelocitiesPinned, PxVec4* collPositionsPinned, PxVec4* restPositionsPinned, CUstream stream = CUstream(0)); /** \brief Creates a full SoftBody mesh matching the shape given as input. Uses a voxel mesh for FEM simulation and a surface-matching mesh for collision detection. \param[in] params Cooking params instance required for mesh processing \param[in] surfaceMesh Input triangle mesh that represents the surface of the SoftBody \param[in] numVoxelsAlongLongestAABBAxis The number of voxels along the longest bounding box axis \param[in] insertionCallback The insertion interface from PxPhysics \param[in] validate If set to true the input triangle mesh will get analyzed to find possible deficiencies \return SoftBody mesh if cooking was successful, NULL otherwise @see PxSoftBodyMesh */ static PxSoftBodyMesh* createSoftBodyMesh(const PxCookingParams& params, const PxSimpleTriangleMesh& surfaceMesh, PxU32 numVoxelsAlongLongestAABBAxis, PxInsertionCallback& insertionCallback, const bool validate = true); /** \brief Creates a full SoftBody mesh matching the shape given as input. Uses the same surface-matching mesh for collision detection and FEM simulation. \param[in] params Cooking params instance required for mesh processing \param[in] surfaceMesh Input triangle mesh that represents the surface of the SoftBody \param[in] insertionCallback The insertion interface from PxPhysics \param[in] maxWeightRatioInTet Upper limit for the ratio of node weights that are adjacent to the same tetrahedron. The closer to one (while remaining larger than one), the more stable the simulation. \param[in] validate If set to true the input triangle mesh will get analyzed to find possible deficiencies \return SoftBody mesh if cooking was successful, NULL otherwise @see PxSoftBodyMesh */ static PxSoftBodyMesh* createSoftBodyMeshNoVoxels(const PxCookingParams& params, const PxSimpleTriangleMesh& surfaceMesh, PxInsertionCallback& insertionCallback, PxReal maxWeightRatioInTet = 1.5f, const bool validate = true); /** \brief Creates a SoftBody instance from a SoftBody mesh \param[in] softBodyMesh The SoftBody mesh \param[in] transform The transform that defines initial position and orientation of the SoftBody \param[in] material The material \param[in] cudaContextManager A cuda context manager \param[in] density The density used to compute the mass properties \param[in] solverIterationCount The number of iterations the solver should apply during simulation \param[in] femParams Additional parameters to specify e. g. damping \param[in] scale The scaling of the SoftBody \return SoftBody instance @see PxSoftBodyMesh, PxSoftBody */ static PxSoftBody* createSoftBodyFromMesh(PxSoftBodyMesh* softBodyMesh, const PxTransform& transform, const PxFEMSoftBodyMaterial& material, PxCudaContextManager& cudaContextManager, PxReal density = 100.0f, PxU32 solverIterationCount = 30, const PxFEMParameters& femParams = PxFEMParameters(), PxReal scale = 1.0f); /** \brief Creates a SoftBody instance with a box shape \param[in] transform The transform that defines initial position and orientation of the SoftBody \param[in] boxDimensions The dimensions (side lengths) of the box shape \param[in] material The material \param[in] cudaContextManager A cuda context manager \param[in] maxEdgeLength The maximal length of a triangle edge. Subdivision will get applied until the edge length criteria is matched. -1 means no subdivision is applied. \param[in] density The density used to compute the mass properties \param[in] solverIterationCount The number of iterations the solver should apply during simulation \param[in] femParams Additional parameters to specify e. g. damping \param[in] numVoxelsAlongLongestAABBAxis The number of voxels to use for the simulation mesh along the longest bounding box dimension \param[in] scale The scaling of the SoftBody \return SoftBody instance @see PxSoftBodyMesh, PxSoftBody */ static PxSoftBody* createSoftBodyBox(const PxTransform& transform, const PxVec3& boxDimensions, const PxFEMSoftBodyMaterial& material, PxCudaContextManager& cudaContextManager, PxReal maxEdgeLength = -1.0f, PxReal density = 100.0f, PxU32 solverIterationCount = 30, const PxFEMParameters& femParams = PxFEMParameters(), PxU32 numVoxelsAlongLongestAABBAxis = 10, PxReal scale = 1.0f); /** \brief allocates and initializes pinned host memory buffers from an actor with shape. \param[in] softBody A PxSoftBody that has a valid shape attached to it. \param[in] cudaContextManager The PxCudaContextManager of the scene this soft body will be simulated in \param[in] simPositionInvMassPinned A reference to a pointer for the return value of the simPositionInvMassPinned buffer, will be set by this function. \param[in] simVelocityPinned A reference to a pointer for the return value of the simVelocityPinned buffer, will be set by this function. \param[in] collPositionInvMassPinned A reference to a pointer for the return value of the collPositionInvMassPinned buffer, will be set by this function. \param[in] restPositionPinned A reference to a pointer for the return value of the restPositionPinned buffer, will be set by this function. @see PxSoftBody */ static void allocateAndInitializeHostMirror(PxSoftBody& softBody, PxCudaContextManager* cudaContextManager, PxVec4*& simPositionInvMassPinned, PxVec4*& simVelocityPinned, PxVec4*& collPositionInvMassPinned, PxVec4*& restPositionPinned); /** \brief Given a set of points and a set of tetrahedra, it finds the equilibrium state of the softbody. Every input point is either fixed or can move freely. \param[in] verticesOriginal Mesh vertex positions in undeformed original state. \param[in] verticesDeformed Mesh vertex positions in new deformed state. Only fixed vertices must have their final location, all other locations will get updated by the method. \param[in] nbVertices The number of vertices. \param[in] tetrahedra The tetrahedra. \param[in] nbTetraheda The number of tetrahedra. \param[in] vertexIsFixed Optional input that specifies which vertex is fixed and which one can move to relax the tension. If not provided, vertices from verticesOriginal which have a .w value of 0 will be considered fixed. \param[in] numIterations The number of stress relaxation iterations to run. */ static void relaxSoftBodyMesh(const PxVec4* verticesOriginal, PxVec4* verticesDeformed, PxU32 nbVertices, const PxU32* tetrahedra, PxU32 nbTetraheda, const bool* vertexIsFixed = NULL, PxU32 numIterations = 200); }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
15,522
C
59.87451
237
0.799575
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxDefaultCpuDispatcher.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_DEFAULT_CPU_DISPATCHER_H #define PX_DEFAULT_CPU_DISPATCHER_H /** \addtogroup extensions @{ */ #include "common/PxPhysXCommonConfig.h" #include "task/PxCpuDispatcher.h" #if !PX_DOXYGEN namespace physx { #endif /** \brief A default implementation for a CPU task dispatcher. @see PxDefaultCpuDispatcherCreate() PxCpuDispatcher */ class PxDefaultCpuDispatcher : public PxCpuDispatcher { public: /** \brief Deletes the dispatcher. Do not keep a reference to the deleted instance. @see PxDefaultCpuDispatcherCreate() */ virtual void release() = 0; /** \brief Enables profiling at task level. \note By default enabled only in profiling builds. \param[in] runProfiled True if tasks should be profiled. */ virtual void setRunProfiled(bool runProfiled) = 0; /** \brief Checks if profiling is enabled at task level. \return True if tasks should be profiled. */ virtual bool getRunProfiled() const = 0; }; /** \brief If a thread ends up waiting for work it will find itself in a spin-wait loop until work becomes available. Three strategies are available to limit wasted cycles. The strategies are as follows: a) wait until a work task signals the end of the spin-wait period. b) yield the thread by providing a hint to reschedule thread execution, thereby allowing other threads to run. c) yield the processor by informing it that it is waiting for work and requesting it to more efficiently use compute resources. */ struct PxDefaultCpuDispatcherWaitForWorkMode { enum Enum { eWAIT_FOR_WORK, eYIELD_THREAD, eYIELD_PROCESSOR }; }; /** \brief Create default dispatcher, extensions SDK needs to be initialized first. \param[in] numThreads Number of worker threads the dispatcher should use. \param[in] affinityMasks Array with affinity mask for each thread. If not defined, default masks will be used. \param[in] mode is the strategy employed when a busy-wait is encountered. \param[in] yieldProcessorCount specifies the number of times a OS-specific yield processor command will be executed during each cycle of a busy-wait in the event that the specified mode is eYIELD_PROCESSOR \note numThreads may be zero in which case no worker thread are initialized and simulation tasks will be executed on the thread that calls PxScene::simulate() \note yieldProcessorCount must be greater than zero if eYIELD_PROCESSOR is the chosen mode and equal to zero for all other modes. \note eYIELD_THREAD and eYIELD_PROCESSOR modes will use compute resources even if the simulation is not running. It is left to users to keep threads inactive, if so desired, when no simulation is running. @see PxDefaultCpuDispatcher */ PxDefaultCpuDispatcher* PxDefaultCpuDispatcherCreate(PxU32 numThreads, PxU32* affinityMasks = NULL, PxDefaultCpuDispatcherWaitForWorkMode::Enum mode = PxDefaultCpuDispatcherWaitForWorkMode::eWAIT_FOR_WORK, PxU32 yieldProcessorCount = 0); #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
4,656
C
36.556451
237
0.773196
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxRepXSerializer.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_REPX_SERIALIZER_H #define PX_REPX_SERIALIZER_H /** \addtogroup Serializers @{ */ #include "common/PxBase.h" #include "extensions/PxRepXSimpleType.h" #if !PX_DOXYGEN namespace physx { #endif class XmlMemoryAllocator; class XmlWriter; class XmlReader; class MemoryBuffer; /** \brief Serializer interface for RepX (Xml) serialization. \deprecated Xml serialization is deprecated. An alternative serialization system is provided through USD Physics. In order to serialize a class to RepX both a PxSerializer and a PxRepXSerializer implementation are needed. A repx Serializer provides the ability to capture a live object to a descriptor or static state and the ability to write that state out to a file. Objects allocated by the Serializer using the allocator are freed when the collection itself is freed. SnRepXCoreSerializers.cpp implements a set of Serializers for the core PhysX types. \note Implementing a PxRepXSerializer is currently not practical without including the internal PhysXExtension header "SnRepXSerializerImpl.h". @see PxSerializer, PX_NEW_REPX_SERIALIZER, PxSerializationRegistry::registerRepXSerializer */ class PX_DEPRECATED PxRepXSerializer { protected: virtual ~PxRepXSerializer(){} public: /** \brief The type this Serializer is meant to operate on. @see PxRepXObject::typeName */ virtual const char* getTypeName() = 0; /** \brief Convert from a RepX object to a key-value pair hierarchy \param[in] inLiveObject The object to convert to the passed in descriptor. \param[in] inCollection The collection to use to find ids of references of this object. \param[in] inWriter Interface to write data to. \param[in] inTempBuffer used to for temporary allocations. \param[in] inArgs The arguments used in create resources and objects. */ virtual void objectToFile( const PxRepXObject& inLiveObject, PxCollection* inCollection, XmlWriter& inWriter, MemoryBuffer& inTempBuffer, PxRepXInstantiationArgs& inArgs ) = 0; /** \brief Convert from a descriptor to a live object. Must be an object of this Serializer type. \param[in] inReader The inverse of the writer, a key-value pair database. \param[in] inAllocator An allocator to use for temporary allocations. These will be freed after instantiation completes. \param[in] inArgs The arguments used in create resources and objects. \param[in] inCollection The collection used to find references. \return The new live object. It can be an invalid object if the instantiation cannot take place. */ virtual PxRepXObject fileToObject( XmlReader& inReader, XmlMemoryAllocator& inAllocator, PxRepXInstantiationArgs& inArgs, PxCollection* inCollection ) = 0; }; #if !PX_DOXYGEN } // namespace physx #endif /** \brief Inline helper template function to create PxRepXObject from TDataType type supporting PxTypeInfo<TDataType>::name. \deprecated Xml serialization is deprecated. An alternative serialization system is provided through USD Physics. */ template<typename TDataType> PX_DEPRECATED PX_INLINE physx::PxRepXObject PxCreateRepXObject(const TDataType* inType, const physx::PxSerialObjectId inId) { return physx::PxRepXObject(physx::PxTypeInfo<TDataType>::name(), inType, inId); } /** \brief Inline helper function to create PxRepXObject from a PxBase instance. \deprecated Xml serialization is deprecated. An alternative serialization system is provided through USD Physics. */ PX_DEPRECATED PX_INLINE physx::PxRepXObject PxCreateRepXObject(const physx::PxBase* inType, const physx::PxSerialObjectId inId) { PX_ASSERT(inType); return physx::PxRepXObject(inType->getConcreteTypeName(), inType, inId); } /** \brief Inline helper template function to create PxRepXObject form TDataType type using inType pointer as a PxSerialObjectId id. \deprecated Xml serialization is deprecated. An alternative serialization system is provided through USD Physics. */ template<typename TDataType> PX_DEPRECATED PX_INLINE physx::PxRepXObject PxCreateRepXObject(const TDataType* inType) { return PxCreateRepXObject(inType, static_cast<physx::PxSerialObjectId>(size_t(inType))); } /** \brief Preprocessor macro for RepX serializer creation. \deprecated Xml serialization is deprecated. An alternative serialization system is provided through USD Physics. */ #define PX_NEW_REPX_SERIALIZER(T) \ *PX_PLACEMENT_NEW(PxGetAllocatorCallback()->allocate(sizeof(T), "PxRepXSerializer", PX_FL ), T)(*PxGetAllocatorCallback()) /** \brief Preprocessor Macro to simplify RepX serializer delete. \deprecated Xml serialization is deprecated. An alternative serialization system is provided through USD Physics. */ #define PX_DELETE_REPX_SERIALIZER(x) \ { PxRepXSerializer* s = x; if (s) { PxGetAllocatorCallback()->deallocate(s); } } /** @} */ #endif
6,513
C
40.490446
178
0.775986
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxRigidBodyExt.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_RIGID_BODY_EXT_H #define PX_RIGID_BODY_EXT_H /** \addtogroup extensions @{ */ #include "PxPhysXConfig.h" #include "PxRigidBody.h" #include "PxQueryReport.h" #include "PxQueryFiltering.h" #include "extensions/PxMassProperties.h" #if !PX_DOXYGEN namespace physx { #endif class PxScene; struct PxQueryCache; class PxShape; /** \brief utility functions for use with PxRigidBody and subclasses @see PxRigidBody PxRigidDynamic PxArticulationLink */ class PxRigidBodyExt { public: /** \brief Computation of mass properties for a rigid body actor To simulate a dynamic rigid actor, the SDK needs a mass and an inertia tensor. This method offers functionality to compute the necessary mass and inertia properties based on the shapes declared in the PxRigidBody descriptor and some additionally specified parameters. For each shape, the shape geometry, the shape positioning within the actor and the specified shape density are used to compute the body's mass and inertia properties. <ul> <li>Shapes without PxShapeFlag::eSIMULATION_SHAPE set are ignored unless includeNonSimShapes is true.</li> <li>Shapes with plane, triangle mesh or heightfield geometry and PxShapeFlag::eSIMULATION_SHAPE set are not allowed for PxRigidBody collision.</li> </ul> This method will set the mass, center of mass, and inertia tensor if no collision shapes are found, the inertia tensor is set to (1,1,1) and the mass to 1 if massLocalPose is non-NULL, the rigid body's center of mass parameter will be set to the user provided value (massLocalPose) and the inertia tensor will be resolved at that point. \note If all shapes of the actor have the same density then the overloaded method updateMassAndInertia() with a single density parameter can be used instead. \param[in,out] body The rigid body. \param[in] shapeDensities The per shape densities. There must be one entry for each shape which has the PxShapeFlag::eSIMULATION_SHAPE set (or for all shapes if includeNonSimShapes is set to true). Other shapes are ignored. The density values must be greater than 0. \param[in] shapeDensityCount The number of provided density values. \param[in] massLocalPose The center of mass relative to the actor frame. If set to null then (0,0,0) is assumed. \param[in] includeNonSimShapes True if all kind of shapes (PxShapeFlag::eSCENE_QUERY_SHAPE, PxShapeFlag::eTRIGGER_SHAPE) should be taken into account. \return Boolean. True on success else false. @see PxRigidBody::setMassLocalPose PxRigidBody::setMassSpaceInertiaTensor PxRigidBody::setMass */ static bool updateMassAndInertia(PxRigidBody& body, const PxReal* shapeDensities, PxU32 shapeDensityCount, const PxVec3* massLocalPose = NULL, bool includeNonSimShapes = false); /** \brief Computation of mass properties for a rigid body actor See previous method for details. \param[in,out] body The rigid body. \param[in] density The density of the body. Used to compute the mass of the body. The density must be greater than 0. \param[in] massLocalPose The center of mass relative to the actor frame. If set to null then (0,0,0) is assumed. \param[in] includeNonSimShapes True if all kind of shapes (PxShapeFlag::eSCENE_QUERY_SHAPE, PxShapeFlag::eTRIGGER_SHAPE) should be taken into account. \return Boolean. True on success else false. @see PxRigidBody::setMassLocalPose PxRigidBody::setMassSpaceInertiaTensor PxRigidBody::setMass */ static bool updateMassAndInertia(PxRigidBody& body, PxReal density, const PxVec3* massLocalPose = NULL, bool includeNonSimShapes = false); /** \brief Computation of mass properties for a rigid body actor This method sets the mass, inertia and center of mass of a rigid body. The mass is set to the sum of all user-supplied shape mass values, and the inertia and center of mass are computed according to the rigid body's shapes and the per shape mass input values. If no collision shapes are found, the inertia tensor is set to (1,1,1) \note If a single mass value should be used for the actor as a whole then the overloaded method setMassAndUpdateInertia() with a single mass parameter can be used instead. @see updateMassAndInertia for more details. \param[in,out] body The rigid body for which to set the mass and centre of mass local pose properties. \param[in] shapeMasses The per shape mass values. There must be one entry for each shape which has the PxShapeFlag::eSIMULATION_SHAPE set. Other shapes are ignored. The mass values must be greater than 0. \param[in] shapeMassCount The number of provided mass values. \param[in] massLocalPose The center of mass relative to the actor frame. If set to null then (0,0,0) is assumed. \param[in] includeNonSimShapes True if all kind of shapes (PxShapeFlag::eSCENE_QUERY_SHAPE, PxShapeFlag::eTRIGGER_SHAPE) should be taken into account. \return Boolean. True on success else false. @see PxRigidBody::setCMassLocalPose PxRigidBody::setMassSpaceInertiaTensor PxRigidBody::setMass */ static bool setMassAndUpdateInertia(PxRigidBody& body, const PxReal* shapeMasses, PxU32 shapeMassCount, const PxVec3* massLocalPose = NULL, bool includeNonSimShapes = false); /** \brief Computation of mass properties for a rigid body actor This method sets the mass, inertia and center of mass of a rigid body. The mass is set to the user-supplied value, and the inertia and center of mass are computed according to the rigid body's shapes and the input mass. If no collision shapes are found, the inertia tensor is set to (1,1,1) @see updateMassAndInertia for more details. \param[in,out] body The rigid body for which to set the mass and centre of mass local pose properties. \param[in] mass The mass of the body. Must be greater than 0. \param[in] massLocalPose The center of mass relative to the actor frame. If set to null then (0,0,0) is assumed. \param[in] includeNonSimShapes True if all kind of shapes (PxShapeFlag::eSCENE_QUERY_SHAPE, PxShapeFlag::eTRIGGER_SHAPE) should be taken into account. \return Boolean. True on success else false. @see PxRigidBody::setCMassLocalPose PxRigidBody::setMassSpaceInertiaTensor PxRigidBody::setMass */ static bool setMassAndUpdateInertia(PxRigidBody& body, PxReal mass, const PxVec3* massLocalPose = NULL, bool includeNonSimShapes = false); /** \brief Compute the mass, inertia tensor and center of mass from a list of shapes. \param[in] shapes The shapes to compute the mass properties from. \param[in] shapeCount The number of provided shapes. \return The mass properties from the combined shapes. @see PxRigidBody::setCMassLocalPose PxRigidBody::setMassSpaceInertiaTensor PxRigidBody::setMass */ static PxMassProperties computeMassPropertiesFromShapes(const PxShape* const* shapes, PxU32 shapeCount); /** \brief Applies a force (or impulse) defined in the global coordinate frame, acting at a particular point in global coordinates, to the actor. Note that if the force does not act along the center of mass of the actor, this will also add the corresponding torque. Because forces are reset at the end of every timestep, you can maintain a total external force on an object by calling this once every frame. \note if this call is used to apply a force or impulse to an articulation link, only the link is updated, not the entire articulation ::PxForceMode determines if the force is to be conventional or impulsive. Only eFORCE and eIMPULSE are supported, as the force required to produce a given velocity change or acceleration is underdetermined given only the desired change at a given point. <b>Sleeping:</b> This call wakes the actor if it is sleeping and the wakeup parameter is true (default). \param[in] body The rigid body to apply the force to. \param[in] force Force/impulse to add, defined in the global frame. <b>Range:</b> force vector \param[in] pos Position in the global frame to add the force at. <b>Range:</b> position vector \param[in] mode The mode to use when applying the force/impulse(see #PxForceMode). \param[in] wakeup Specify if the call should wake up the actor. @see PxForceMode @see addForceAtLocalPos() addLocalForceAtPos() addLocalForceAtLocalPos() */ static void addForceAtPos(PxRigidBody& body, const PxVec3& force, const PxVec3& pos, PxForceMode::Enum mode = PxForceMode::eFORCE, bool wakeup = true); /** \brief Applies a force (or impulse) defined in the global coordinate frame, acting at a particular point in local coordinates, to the actor. Note that if the force does not act along the center of mass of the actor, this will also add the corresponding torque. Because forces are reset at the end of every timestep, you can maintain a total external force on an object by calling this once every frame. \note if this call is used to apply a force or impulse to an articulation link, only the link is updated, not the entire articulation ::PxForceMode determines if the force is to be conventional or impulsive. Only eFORCE and eIMPULSE are supported, as the force required to produce a given velocity change or acceleration is underdetermined given only the desired change at a given point. <b>Sleeping:</b> This call wakes the actor if it is sleeping and the wakeup parameter is true (default). \param[in] body The rigid body to apply the force to. \param[in] force Force/impulse to add, defined in the global frame. <b>Range:</b> force vector \param[in] pos Position in the local frame to add the force at. <b>Range:</b> position vector \param[in] mode The mode to use when applying the force/impulse(see #PxForceMode). \param[in] wakeup Specify if the call should wake up the actor. @see PxForceMode @see addForceAtPos() addLocalForceAtPos() addLocalForceAtLocalPos() */ static void addForceAtLocalPos(PxRigidBody& body, const PxVec3& force, const PxVec3& pos, PxForceMode::Enum mode = PxForceMode::eFORCE, bool wakeup = true); /** \brief Applies a force (or impulse) defined in the actor local coordinate frame, acting at a particular point in global coordinates, to the actor. Note that if the force does not act along the center of mass of the actor, this will also add the corresponding torque. Because forces are reset at the end of every timestep, you can maintain a total external force on an object by calling this once every frame. \note if this call is used to apply a force or impulse to an articulation link, only the link is updated, not the entire articulation ::PxForceMode determines if the force is to be conventional or impulsive. Only eFORCE and eIMPULSE are supported, as the force required to produce a given velocity change or acceleration is underdetermined given only the desired change at a given point. <b>Sleeping:</b> This call wakes the actor if it is sleeping and the wakeup parameter is true (default). \param[in] body The rigid body to apply the force to. \param[in] force Force/impulse to add, defined in the local frame. <b>Range:</b> force vector \param[in] pos Position in the global frame to add the force at. <b>Range:</b> position vector \param[in] mode The mode to use when applying the force/impulse(see #PxForceMode). \param[in] wakeup Specify if the call should wake up the actor. @see PxForceMode @see addForceAtPos() addForceAtLocalPos() addLocalForceAtLocalPos() */ static void addLocalForceAtPos(PxRigidBody& body, const PxVec3& force, const PxVec3& pos, PxForceMode::Enum mode = PxForceMode::eFORCE, bool wakeup = true); /** \brief Applies a force (or impulse) defined in the actor local coordinate frame, acting at a particular point in local coordinates, to the actor. Note that if the force does not act along the center of mass of the actor, this will also add the corresponding torque. Because forces are reset at the end of every timestep, you can maintain a total external force on an object by calling this once every frame. \note if this call is used to apply a force or impulse to an articulation link, only the link is updated, not the entire articulation ::PxForceMode determines if the force is to be conventional or impulsive. Only eFORCE and eIMPULSE are supported, as the force required to produce a given velocity change or acceleration is underdetermined given only the desired change at a given point. <b>Sleeping:</b> This call wakes the actor if it is sleeping and the wakeup parameter is true (default). \param[in] body The rigid body to apply the force to. \param[in] force Force/impulse to add, defined in the local frame. <b>Range:</b> force vector \param[in] pos Position in the local frame to add the force at. <b>Range:</b> position vector \param[in] mode The mode to use when applying the force/impulse(see #PxForceMode). \param[in] wakeup Specify if the call should wake up the actor. @see PxForceMode @see addForceAtPos() addForceAtLocalPos() addLocalForceAtPos() */ static void addLocalForceAtLocalPos(PxRigidBody& body, const PxVec3& force, const PxVec3& pos, PxForceMode::Enum mode = PxForceMode::eFORCE, bool wakeup = true); /** \brief Computes the velocity of a point given in world coordinates if it were attached to the specified body and moving with it. \param[in] body The rigid body the point is attached to. \param[in] pos Position we wish to determine the velocity for, defined in the global frame. <b>Range:</b> position vector \return The velocity of point in the global frame. @see getLocalPointVelocity() */ static PxVec3 getVelocityAtPos(const PxRigidBody& body, const PxVec3& pos); /** \brief Computes the velocity of a point given in local coordinates if it were attached to the specified body and moving with it. \param[in] body The rigid body the point is attached to. \param[in] pos Position we wish to determine the velocity for, defined in the local frame. <b>Range:</b> position vector \return The velocity of point in the local frame. @see getLocalPointVelocity() */ static PxVec3 getLocalVelocityAtLocalPos(const PxRigidBody& body, const PxVec3& pos); /** \brief Computes the velocity of a point (offset from the origin of the body) given in world coordinates if it were attached to the specified body and moving with it. \param[in] body The rigid body the point is attached to. \param[in] pos Position (offset from the origin of the body) we wish to determine the velocity for, defined in the global frame. <b>Range:</b> position vector \return The velocity of point (offset from the origin of the body) in the global frame. @see getLocalPointVelocity() */ static PxVec3 getVelocityAtOffset(const PxRigidBody& body, const PxVec3& pos); /** \brief Compute the change to linear and angular velocity that would occur if an impulsive force and torque were to be applied to a specified rigid body. The rigid body is left unaffected unless a subsequent independent call is executed that actually applies the computed changes to velocity and angular velocity. \note if this call is used to determine the velocity delta for an articulation link, only the mass properties of the link are taken into account. @see PxRigidBody::getLinearVelocity, PxRigidBody::setLinearVelocity, PxRigidBody::getAngularVelocity, PxRigidBody::setAngularVelocity \param[in] body The body under consideration. \param[in] impulsiveForce The impulsive force that would be applied to the specified rigid body. \param[in] impulsiveTorque The impulsive torque that would be applied to the specified rigid body. \param[out] deltaLinearVelocity The change in linear velocity that would arise if impulsiveForce was to be applied to the specified rigid body. \param[out] deltaAngularVelocity The change in angular velocity that would arise if impulsiveTorque was to be applied to the specified rigid body. */ static void computeVelocityDeltaFromImpulse(const PxRigidBody& body, const PxVec3& impulsiveForce, const PxVec3& impulsiveTorque, PxVec3& deltaLinearVelocity, PxVec3& deltaAngularVelocity); /** \brief Computes the linear and angular velocity change vectors for a given impulse at a world space position taking a mass and inertia scale into account This function is useful for extracting the respective linear and angular velocity changes from a contact or joint when the mass/inertia ratios have been adjusted. \note if this call is used to determine the velocity delta for an articulation link, only the mass properties of the link are taken into account. \param[in] body The rigid body \param[in] globalPose The body's world space transform \param[in] point The point in world space where the impulse is applied \param[in] impulse The impulse vector in world space \param[in] invMassScale The inverse mass scale \param[in] invInertiaScale The inverse inertia scale \param[out] deltaLinearVelocity The linear velocity change \param[out] deltaAngularVelocity The angular velocity change */ static void computeVelocityDeltaFromImpulse(const PxRigidBody& body, const PxTransform& globalPose, const PxVec3& point, const PxVec3& impulse, const PxReal invMassScale, const PxReal invInertiaScale, PxVec3& deltaLinearVelocity, PxVec3& deltaAngularVelocity); /** \brief Computes the linear and angular impulse vectors for a given impulse at a world space position taking a mass and inertia scale into account This function is useful for extracting the respective linear and angular impulses from a contact or joint when the mass/inertia ratios have been adjusted. \param[in] body The rigid body \param[in] globalPose The body's world space transform \param[in] point The point in world space where the impulse is applied \param[in] impulse The impulse vector in world space \param[in] invMassScale The inverse mass scale \param[in] invInertiaScale The inverse inertia scale \param[out] linearImpulse The linear impulse \param[out] angularImpulse The angular impulse */ static void computeLinearAngularImpulse(const PxRigidBody& body, const PxTransform& globalPose, const PxVec3& point, const PxVec3& impulse, const PxReal invMassScale, const PxReal invInertiaScale, PxVec3& linearImpulse, PxVec3& angularImpulse); /** \brief Performs a linear sweep through space with the body's geometry objects. \note Supported geometries are: box, sphere, capsule, convex. Other geometry types will be ignored. \note If eTOUCH is returned from the filter callback, it will trigger an error and the hit will be discarded. The function sweeps all shapes attached to a given rigid body through space and reports the nearest object in the scene which intersects any of of the shapes swept paths. Information about the closest intersection is written to a #PxSweepHit structure. \param[in] body The rigid body to sweep. \param[in] scene The scene object to process the query. \param[in] unitDir Normalized direction of the sweep. \param[in] distance Sweep distance. Needs to be larger than 0. \param[in] outputFlags Specifies which properties should be written to the hit information. \param[out] closestHit Closest hit result. \param[out] shapeIndex Index of the body shape that caused the closest hit. \param[in] filterData If any word in filterData.data is non-zero then filterData.data will be used for filtering, otherwise shape->getQueryFilterData() will be used instead. \param[in] filterCall Custom filtering logic (optional). Only used if the corresponding #PxQueryFlag flags are set. If NULL, all hits are assumed to be blocking. \param[in] cache Cached hit shape (optional). Ray is tested against cached shape first then against the scene. Note: Filtering is not executed for a cached shape if supplied; instead, if a hit is found, it is assumed to be a blocking hit. \param[in] inflation This parameter creates a skin around the swept geometry which increases its extents for sweeping. The sweep will register a hit as soon as the skin touches a shape, and will return the corresponding distance and normal. \return True if a blocking hit was found. @see PxScene PxQueryFlags PxFilterData PxSweepHit */ static bool linearSweepSingle( PxRigidBody& body, PxScene& scene, const PxVec3& unitDir, const PxReal distance, PxHitFlags outputFlags, PxSweepHit& closestHit, PxU32& shapeIndex, const PxQueryFilterData& filterData = PxQueryFilterData(), PxQueryFilterCallback* filterCall = NULL, const PxQueryCache* cache = NULL, const PxReal inflation=0.0f); /** \brief Performs a linear sweep through space with the body's geometry objects, returning all overlaps. \note Supported geometries are: box, sphere, capsule, convex. Other geometry types will be ignored. This function sweeps all shapes attached to a given rigid body through space and reports all objects in the scene that intersect any of the shapes' swept paths until there are no more objects to report or a blocking hit is encountered. \param[in] body The rigid body to sweep. \param[in] scene The scene object to process the query. \param[in] unitDir Normalized direction of the sweep. \param[in] distance Sweep distance. Needs to be larger than 0. \param[in] outputFlags Specifies which properties should be written to the hit information. \param[out] touchHitBuffer Raycast hit information buffer. If the buffer overflows, an arbitrary subset of touch hits is returned (typically the query should be restarted with a larger buffer). \param[out] touchHitShapeIndices After the query is completed, touchHitShapeIndices[i] will contain the body index that caused the hit stored in hitBuffer[i] \param[in] touchHitBufferSize Size of both touch hit buffers in elements. \param[out] block Closest blocking hit is returned via this reference. \param[out] blockingShapeIndex Set to -1 if if a blocking hit was not found, otherwise set to closest blocking hit shape index. The touching hits are reported separately in hitBuffer. \param[out] overflow Set to true if touchHitBuffer didn't have enough space for all results. Touch hits will be incomplete if overflow occurred. Possible solution is to restart the query with a larger buffer. \param[in] filterData If any word in filterData.data is non-zero then filterData.data will be used for filtering, otherwise shape->getQueryFilterData() will be used instead. \param[in] filterCall Custom filtering logic (optional). Only used if the corresponding #PxQueryFlag flags are set. If NULL, all hits are assumed to be blocking. \param[in] cache Cached hit shape (optional). Ray is tested against cached shape first then against the scene. Note: Filtering is not executed for a cached shape if supplied; instead, if a hit is found, it is assumed to be a blocking hit. \param[in] inflation This parameter creates a skin around the swept geometry which increases its extents for sweeping. The sweep will register a hit as soon as the skin touches a shape, and will return the corresponding distance and normal. \return the number of touching hits. If overflow is set to true, the results are incomplete. In case of overflow there are also no guarantees that all touching hits returned are closer than the blocking hit. @see PxScene PxQueryFlags PxFilterData PxSweepHit */ static PxU32 linearSweepMultiple( PxRigidBody& body, PxScene& scene, const PxVec3& unitDir, const PxReal distance, PxHitFlags outputFlags, PxSweepHit* touchHitBuffer, PxU32* touchHitShapeIndices, PxU32 touchHitBufferSize, PxSweepHit& block, PxI32& blockingShapeIndex, bool& overflow, const PxQueryFilterData& filterData = PxQueryFilterData(), PxQueryFilterCallback* filterCall = NULL, const PxQueryCache* cache = NULL, const PxReal inflation = 0.0f); }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
25,577
C
55.966592
267
0.77382
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxGearJoint.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_GEAR_JOINT_H #define PX_GEAR_JOINT_H /** \addtogroup extensions @{ */ #include "extensions/PxJoint.h" #if !PX_DOXYGEN namespace physx { #endif class PxGearJoint; /** \brief Create a gear Joint. \param[in] physics The physics SDK \param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localFrame0 The position and orientation of the joint relative to actor0 \param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame \param[in] localFrame1 The position and orientation of the joint relative to actor1 @see PxGearJoint */ PxGearJoint* PxGearJointCreate(PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1); /** \brief A joint that connects two existing revolute joints and constrains their relative angular velocity and position with respect to each other. @see PxGearJointCreate PxJoint */ class PxGearJoint : public PxJoint { public: /** \brief Set the hinge/revolute joints connected by the gear joint. The passed joints can be either PxRevoluteJoint, PxD6Joint or PxArticulationJointReducedCoordinate. The joints must define degrees of freedom around the twist axis. Note that these joints are only used to compute the positional error correction term, used to adjust potential drift between jointed actors. The gear joint can run without calling this function, but in that case some visible overlap may develop over time between the teeth of the gear meshes. \note Calling this function resets the internal positional error correction term. \param[in] hinge0 The first hinge joint \param[in] hinge1 The second hinge joint \return true if success */ virtual bool setHinges(const PxBase* hinge0, const PxBase* hinge1) = 0; /** \brief Get the hinge/revolute joints connected by the gear joint. \param[out] hinge0 The first hinge joint \param[out] hinge1 The second hinge joint */ virtual void getHinges(const PxBase*& hinge0, const PxBase*& hinge1) const = 0; /** \brief Set the desired gear ratio. For two gears with n0 and n1 teeth respectively, the gear ratio is n0/n1. \note You may need to use a negative gear ratio if the joint frames of involved actors are not oriented in the same direction. \note Calling this function resets the internal positional error correction term. \param[in] ratio Desired ratio between the two hinges. */ virtual void setGearRatio(float ratio) = 0; /** \brief Get the gear ratio. \return Current ratio */ virtual float getGearRatio() const = 0; virtual const char* getConcreteTypeName() const { return "PxGearJoint"; } protected: PX_INLINE PxGearJoint(PxType concreteType, PxBaseFlags baseFlags) : PxJoint(concreteType, baseFlags) {} PX_INLINE PxGearJoint(PxBaseFlags baseFlags) : PxJoint(baseFlags) {} virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxGearJoint", PxJoint); } }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
4,893
C
36.646154
160
0.752708
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxBinaryConverter.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef PX_BINARY_CONVERTER_H #define PX_BINARY_CONVERTER_H /** \addtogroup extensions @{ */ #include "common/PxPhysXCommonConfig.h" #if !PX_DOXYGEN namespace physx { #endif struct PX_DEPRECATED PxConverterReportMode { enum Enum { eNONE, //!< Silent mode. If enabled, no information is sent to the error stream. eNORMAL, //!< Normal mode. If enabled, only important information is sent to the error stream. eVERBOSE //!< Verbose mode. If enabled, detailed information is sent to the error stream. }; }; /** \brief Binary converter for serialized streams. \deprecated Binary conversion and binary meta data are deprecated. The binary converter class is targeted at converting binary streams from authoring platforms, such as windows, osx or linux to any game runtime platform supported by PhysX. Particularly it is currently not supported to run the converter on a platforms that has an endian mismatch with the platform corresponding to the source binary file and source meta data. If you want to use multiple threads for batch conversions, please create one instance of this class for each thread. @see PxSerialization.createBinaryConverter */ class PX_DEPRECATED PxBinaryConverter { public: /** \brief Releases binary converter */ virtual void release() = 0; /** \brief Sets desired report mode. \param[in] mode Report mode */ virtual void setReportMode(PxConverterReportMode::Enum mode) = 0; /** \brief Setups source and target meta-data streams The source meta data provided needs to have the same endianness as the platform the converter is run on. The meta data needs to be set before calling the conversion method. \param[in] srcMetaData Source platform's meta-data stream \param[in] dstMetaData Target platform's meta-data stream \return True if success @see PxSerialization::dumpBinaryMetaData */ virtual bool setMetaData(PxInputStream& srcMetaData, PxInputStream& dstMetaData) = 0; /** \brief Test utility function to compare two sets of meta data. The meta data needs to be set before calling the compareMetaData method. This method will issue PxErrorCode::eDEBUG_INFO messages if mismatches are encountered. \return True if meta data is equivalend */ virtual bool compareMetaData() const = 0; /** \brief Converts binary stream from source platform to target platform The converter needs to be configured with source and destination meta data before calling the conversion method. The source meta data needs to correspond to the same platform as the source binary data. \param[in] srcStream Source stream \param[in] srcSize Number of bytes to convert \param[in] targetStream Target stream \return True if success */ virtual bool convert(PxInputStream& srcStream, PxU32 srcSize, PxOutputStream& targetStream) = 0; protected: PxBinaryConverter() {} virtual ~PxBinaryConverter() {} }; #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
4,680
C
33.674074
114
0.75812