file_path
stringlengths 21
207
| content
stringlengths 5
1.02M
| size
int64 5
1.02M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxTransform.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_TRANSFORM_H
#define PX_TRANSFORM_H
/** \addtogroup foundation
@{
*/
#include "foundation/PxQuat.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/*!
\brief class representing a rigid euclidean transform as a quaternion and a vector
*/
template<class Type>
class PxTransformT
{
public:
PxQuatT<Type> q;
PxVec3T<Type> p;
PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT()
{
}
PX_CUDA_CALLABLE PX_FORCE_INLINE explicit PxTransformT(PxIDENTITY) : q(PxIdentity), p(PxZero)
{
}
PX_CUDA_CALLABLE PX_FORCE_INLINE explicit PxTransformT(const PxVec3T<Type>& position) : q(PxIdentity), p(position)
{
}
PX_CUDA_CALLABLE PX_FORCE_INLINE explicit PxTransformT(const PxQuatT<Type>& orientation) : q(orientation), p(Type(0))
{
PX_ASSERT(orientation.isSane());
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT(Type x, Type y, Type z, PxQuatT<Type> aQ = PxQuatT<Type>(PxIdentity)) : q(aQ), p(x, y, z)
{
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT(const PxVec3T<Type>& p0, const PxQuatT<Type>& q0) : q(q0), p(p0)
{
PX_ASSERT(q0.isSane());
}
PX_CUDA_CALLABLE PX_FORCE_INLINE explicit PxTransformT(const PxMat44T<Type>& m); // defined in PxMat44.h
PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT(const PxTransformT& other)
{
p = other.p;
q = other.q;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator=(const PxTransformT& other)
{
p = other.p;
q = other.q;
}
/**
\brief returns true if the two transforms are exactly equal
*/
PX_CUDA_CALLABLE PX_INLINE bool operator==(const PxTransformT& t) const
{
return p == t.p && q == t.q;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT operator*(const PxTransformT& x) const
{
PX_ASSERT(x.isSane());
return transform(x);
}
//! Equals matrix multiplication
PX_CUDA_CALLABLE PX_INLINE PxTransformT& operator*=(const PxTransformT& other)
{
*this = *this * other;
return *this;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT getInverse() const
{
PX_ASSERT(isFinite());
return PxTransformT(q.rotateInv(-p), q.getConjugate());
}
/**
\brief return a normalized transform (i.e. one in which the quaternion has unit magnitude)
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT getNormalized() const
{
return PxTransformT(p, q.getNormalized());
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> transform(const PxVec3T<Type>& input) const
{
PX_ASSERT(isFinite());
return q.rotate(input) + p;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> transformInv(const PxVec3T<Type>& input) const
{
PX_ASSERT(isFinite());
return q.rotateInv(input - p);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> rotate(const PxVec3T<Type>& input) const
{
PX_ASSERT(isFinite());
return q.rotate(input);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> rotateInv(const PxVec3T<Type>& input) const
{
PX_ASSERT(isFinite());
return q.rotateInv(input);
}
//! Transform transform to parent (returns compound transform: first src, then *this)
PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT transform(const PxTransformT& src) const
{
PX_ASSERT(src.isSane());
PX_ASSERT(isSane());
// src = [srct, srcr] -> [r*srct + t, r*srcr]
return PxTransformT(q.rotate(src.p) + p, q * src.q);
}
//! Transform transform from parent (returns compound transform: first src, then this->inverse)
PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT transformInv(const PxTransformT& src) const
{
PX_ASSERT(src.isSane());
PX_ASSERT(isFinite());
// src = [srct, srcr] -> [r^-1*(srct-t), r^-1*srcr]
const PxQuatT<Type> qinv = q.getConjugate();
return PxTransformT(qinv.rotate(src.p - p), qinv * src.q);
}
/**
\brief returns true if finite and q is a unit quaternion
*/
PX_CUDA_CALLABLE bool isValid() const
{
return p.isFinite() && q.isFinite() && q.isUnit();
}
/**
\brief returns true if finite and quat magnitude is reasonably close to unit to allow for some accumulation of error
vs isValid
*/
PX_CUDA_CALLABLE bool isSane() const
{
return isFinite() && q.isSane();
}
/**
\brief returns true if all elems are finite (not NAN or INF, etc.)
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite() const
{
return p.isFinite() && q.isFinite();
}
};
typedef PxTransformT<float> PxTransform;
typedef PxTransformT<double> PxTransformd;
/*!
\brief A generic padded & aligned transform class.
This can be used for safe faster loads & stores, and faster address computations
(the default PxTransformT often generating imuls for this otherwise). Padding bytes
can be reused to store useful data if needed.
*/
struct PX_ALIGN_PREFIX(16) PxTransformPadded : PxTransform
{
PX_FORCE_INLINE PxTransformPadded()
{
}
PX_FORCE_INLINE PxTransformPadded(const PxTransformPadded& other) : PxTransform(other)
{
}
PX_FORCE_INLINE explicit PxTransformPadded(const PxTransform& other) : PxTransform(other)
{
}
PX_FORCE_INLINE explicit PxTransformPadded(PxIDENTITY) : PxTransform(PxIdentity)
{
}
PX_FORCE_INLINE explicit PxTransformPadded(const PxVec3& position) : PxTransform(position)
{
}
PX_FORCE_INLINE explicit PxTransformPadded(const PxQuat& orientation) : PxTransform(orientation)
{
}
PX_FORCE_INLINE PxTransformPadded(const PxVec3& p0, const PxQuat& q0) : PxTransform(p0, q0)
{
}
PX_FORCE_INLINE void operator=(const PxTransformPadded& other)
{
p = other.p;
q = other.q;
}
PX_FORCE_INLINE void operator=(const PxTransform& other)
{
p = other.p;
q = other.q;
}
PxU32 padding;
}
PX_ALIGN_SUFFIX(16);
PX_COMPILE_TIME_ASSERT(sizeof(PxTransformPadded)==32);
typedef PxTransformPadded PxTransform32;
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 7,325 | C | 26.855513 | 136 | 0.723003 |
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxMat33.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_MAT33_H
#define PX_MAT33_H
/** \addtogroup foundation
@{
*/
#include "foundation/PxVec3.h"
#include "foundation/PxQuat.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/*!
\brief 3x3 matrix class
Some clarifications, as there have been much confusion about matrix formats etc in the past.
Short:
- Matrix have base vectors in columns (vectors are column matrices, 3x1 matrices).
- Matrix is physically stored in column major format
- Matrices are concaternated from left
Long:
Given three base vectors a, b and c the matrix is stored as
|a.x b.x c.x|
|a.y b.y c.y|
|a.z b.z c.z|
Vectors are treated as columns, so the vector v is
|x|
|y|
|z|
And matrices are applied _before_ the vector (pre-multiplication)
v' = M*v
|x'| |a.x b.x c.x| |x| |a.x*x + b.x*y + c.x*z|
|y'| = |a.y b.y c.y| * |y| = |a.y*x + b.y*y + c.y*z|
|z'| |a.z b.z c.z| |z| |a.z*x + b.z*y + c.z*z|
Physical storage and indexing:
To be compatible with popular 3d rendering APIs (read D3d and OpenGL)
the physical indexing is
|0 3 6|
|1 4 7|
|2 5 8|
index = column*3 + row
which in C++ translates to M[column][row]
The mathematical indexing is M_row,column and this is what is used for _-notation
so _12 is 1st row, second column and operator(row, column)!
*/
template<class Type>
class PxMat33T
{
public:
//! Default constructor
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat33T()
{
}
//! identity constructor
PX_CUDA_CALLABLE PX_INLINE PxMat33T(PxIDENTITY) :
column0(Type(1.0), Type(0.0), Type(0.0)),
column1(Type(0.0), Type(1.0), Type(0.0)),
column2(Type(0.0), Type(0.0), Type(1.0))
{
}
//! zero constructor
PX_CUDA_CALLABLE PX_INLINE PxMat33T(PxZERO) :
column0(Type(0.0)),
column1(Type(0.0)),
column2(Type(0.0))
{
}
//! Construct from three base vectors
PX_CUDA_CALLABLE PxMat33T(const PxVec3T<Type>& col0, const PxVec3T<Type>& col1, const PxVec3T<Type>& col2) :
column0(col0),
column1(col1),
column2(col2)
{
}
//! constructor from a scalar, which generates a multiple of the identity matrix
explicit PX_CUDA_CALLABLE PX_INLINE PxMat33T(Type r) :
column0(r, Type(0.0), Type(0.0)),
column1(Type(0.0), r, Type(0.0)),
column2(Type(0.0), Type(0.0), r)
{
}
//! Construct from Type[9]
explicit PX_CUDA_CALLABLE PX_INLINE PxMat33T(Type values[]) :
column0(values[0], values[1], values[2]),
column1(values[3], values[4], values[5]),
column2(values[6], values[7], values[8])
{
}
//! Construct from a quaternion
explicit PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat33T(const PxQuatT<Type>& q)
{
// PT: TODO: PX-566
const Type x = q.x;
const Type y = q.y;
const Type z = q.z;
const Type w = q.w;
const Type x2 = x + x;
const Type y2 = y + y;
const Type z2 = z + z;
const Type xx = x2 * x;
const Type yy = y2 * y;
const Type zz = z2 * z;
const Type xy = x2 * y;
const Type xz = x2 * z;
const Type xw = x2 * w;
const Type yz = y2 * z;
const Type yw = y2 * w;
const Type zw = z2 * w;
column0 = PxVec3T<Type>(Type(1.0) - yy - zz, xy + zw, xz - yw);
column1 = PxVec3T<Type>(xy - zw, Type(1.0) - xx - zz, yz + xw);
column2 = PxVec3T<Type>(xz + yw, yz - xw, Type(1.0) - xx - yy);
}
//! Copy constructor
PX_CUDA_CALLABLE PX_INLINE PxMat33T(const PxMat33T& other) :
column0(other.column0),
column1(other.column1),
column2(other.column2)
{
}
//! Assignment operator
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat33T& operator=(const PxMat33T& other)
{
column0 = other.column0;
column1 = other.column1;
column2 = other.column2;
return *this;
}
//! Construct from diagonal, off-diagonals are zero.
PX_CUDA_CALLABLE PX_INLINE static const PxMat33T createDiagonal(const PxVec3T<Type>& d)
{
return PxMat33T(PxVec3T<Type>(d.x, Type(0.0), Type(0.0)),
PxVec3T<Type>(Type(0.0), d.y, Type(0.0)),
PxVec3T<Type>(Type(0.0), Type(0.0), d.z));
}
//! Computes the outer product of two vectors
PX_CUDA_CALLABLE PX_INLINE static const PxMat33T outer(const PxVec3T<Type>& a, const PxVec3T<Type>& b)
{
return PxMat33T(a * b.x, a * b.y, a * b.z);
}
/**
\brief returns true if the two matrices are exactly equal
*/
PX_CUDA_CALLABLE PX_INLINE bool operator==(const PxMat33T& m) const
{
return column0 == m.column0 && column1 == m.column1 && column2 == m.column2;
}
//! Get transposed matrix
PX_CUDA_CALLABLE PX_FORCE_INLINE const PxMat33T getTranspose() const
{
const PxVec3T<Type> v0(column0.x, column1.x, column2.x);
const PxVec3T<Type> v1(column0.y, column1.y, column2.y);
const PxVec3T<Type> v2(column0.z, column1.z, column2.z);
return PxMat33T(v0, v1, v2);
}
//! Get the real inverse
PX_CUDA_CALLABLE PX_INLINE const PxMat33T getInverse() const
{
const Type det = getDeterminant();
PxMat33T inverse;
if(det != Type(0.0))
{
const Type invDet = Type(1.0) / det;
inverse.column0.x = invDet * (column1.y * column2.z - column2.y * column1.z);
inverse.column0.y = invDet * -(column0.y * column2.z - column2.y * column0.z);
inverse.column0.z = invDet * (column0.y * column1.z - column0.z * column1.y);
inverse.column1.x = invDet * -(column1.x * column2.z - column1.z * column2.x);
inverse.column1.y = invDet * (column0.x * column2.z - column0.z * column2.x);
inverse.column1.z = invDet * -(column0.x * column1.z - column0.z * column1.x);
inverse.column2.x = invDet * (column1.x * column2.y - column1.y * column2.x);
inverse.column2.y = invDet * -(column0.x * column2.y - column0.y * column2.x);
inverse.column2.z = invDet * (column0.x * column1.y - column1.x * column0.y);
return inverse;
}
else
{
return PxMat33T(PxIdentity);
}
}
//! Get determinant
PX_CUDA_CALLABLE PX_INLINE Type getDeterminant() const
{
return column0.dot(column1.cross(column2));
}
//! Unary minus
PX_CUDA_CALLABLE PX_INLINE const PxMat33T operator-() const
{
return PxMat33T(-column0, -column1, -column2);
}
//! Add
PX_CUDA_CALLABLE PX_INLINE const PxMat33T operator+(const PxMat33T& other) const
{
return PxMat33T(column0 + other.column0, column1 + other.column1, column2 + other.column2);
}
//! Subtract
PX_CUDA_CALLABLE PX_INLINE const PxMat33T operator-(const PxMat33T& other) const
{
return PxMat33T(column0 - other.column0, column1 - other.column1, column2 - other.column2);
}
//! Scalar multiplication
PX_CUDA_CALLABLE PX_INLINE const PxMat33T operator*(Type scalar) const
{
return PxMat33T(column0 * scalar, column1 * scalar, column2 * scalar);
}
template<class Type2>
PX_CUDA_CALLABLE PX_INLINE friend PxMat33T<Type2> operator*(Type2, const PxMat33T<Type2>&);
//! Matrix vector multiplication (returns 'this->transform(vec)')
PX_CUDA_CALLABLE PX_INLINE const PxVec3T<Type> operator*(const PxVec3T<Type>& vec) const
{
return transform(vec);
}
// a <op>= b operators
//! Matrix multiplication
PX_CUDA_CALLABLE PX_FORCE_INLINE const PxMat33T operator*(const PxMat33T& other) const
{
// Rows from this <dot> columns from other
// column0 = transform(other.column0) etc
return PxMat33T(transform(other.column0),
transform(other.column1),
transform(other.column2));
}
//! Equals-add
PX_CUDA_CALLABLE PX_INLINE PxMat33T& operator+=(const PxMat33T& other)
{
column0 += other.column0;
column1 += other.column1;
column2 += other.column2;
return *this;
}
//! Equals-sub
PX_CUDA_CALLABLE PX_INLINE PxMat33T& operator-=(const PxMat33T& other)
{
column0 -= other.column0;
column1 -= other.column1;
column2 -= other.column2;
return *this;
}
//! Equals scalar multiplication
PX_CUDA_CALLABLE PX_INLINE PxMat33T& operator*=(Type scalar)
{
column0 *= scalar;
column1 *= scalar;
column2 *= scalar;
return *this;
}
//! Equals matrix multiplication
PX_CUDA_CALLABLE PX_INLINE PxMat33T& operator*=(const PxMat33T& other)
{
*this = *this * other;
return *this;
}
//! Element access, mathematical way!
PX_CUDA_CALLABLE PX_FORCE_INLINE Type operator()(PxU32 row, PxU32 col) const
{
return (*this)[col][row];
}
//! Element access, mathematical way!
PX_CUDA_CALLABLE PX_FORCE_INLINE Type& operator()(PxU32 row, PxU32 col)
{
return (*this)[col][row];
}
// Transform etc
//! Transform vector by matrix, equal to v' = M*v
PX_CUDA_CALLABLE PX_FORCE_INLINE const PxVec3T<Type> transform(const PxVec3T<Type>& other) const
{
return column0 * other.x + column1 * other.y + column2 * other.z;
}
//! Transform vector by matrix transpose, v' = M^t*v
PX_CUDA_CALLABLE PX_INLINE const PxVec3T<Type> transformTranspose(const PxVec3T<Type>& other) const
{
return PxVec3T<Type>(column0.dot(other), column1.dot(other), column2.dot(other));
}
PX_CUDA_CALLABLE PX_FORCE_INLINE const Type* front() const
{
return &column0.x;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type>& operator[](PxU32 num)
{
return (&column0)[num];
}
PX_CUDA_CALLABLE PX_FORCE_INLINE const PxVec3T<Type>& operator[](PxU32 num) const
{
return (&column0)[num];
}
// Data, see above for format!
PxVec3T<Type> column0, column1, column2; // the three base vectors
};
template<class Type>
PX_CUDA_CALLABLE PX_INLINE PxMat33T<Type> operator*(Type scalar, const PxMat33T<Type>& m)
{
return PxMat33T<Type>(scalar * m.column0, scalar * m.column1, scalar * m.column2);
}
// implementation from PxQuat.h
template<class Type>
PX_CUDA_CALLABLE PX_INLINE PxQuatT<Type>::PxQuatT(const PxMat33T<Type>& m)
{
if(m.column2.z < Type(0))
{
if(m.column0.x > m.column1.y)
{
const Type t = Type(1.0) + m.column0.x - m.column1.y - m.column2.z;
*this = PxQuatT<Type>(t, m.column0.y + m.column1.x, m.column2.x + m.column0.z, m.column1.z - m.column2.y) * (Type(0.5) / PxSqrt(t));
}
else
{
const Type t = Type(1.0) - m.column0.x + m.column1.y - m.column2.z;
*this = PxQuatT<Type>(m.column0.y + m.column1.x, t, m.column1.z + m.column2.y, m.column2.x - m.column0.z) * (Type(0.5) / PxSqrt(t));
}
}
else
{
if(m.column0.x < -m.column1.y)
{
const Type t = Type(1.0) - m.column0.x - m.column1.y + m.column2.z;
*this = PxQuatT<Type>(m.column2.x + m.column0.z, m.column1.z + m.column2.y, t, m.column0.y - m.column1.x) * (Type(0.5) / PxSqrt(t));
}
else
{
const Type t = Type(1.0) + m.column0.x + m.column1.y + m.column2.z;
*this = PxQuatT<Type>(m.column1.z - m.column2.y, m.column2.x - m.column0.z, m.column0.y - m.column1.x, t) * (Type(0.5) / PxSqrt(t));
}
}
}
typedef PxMat33T<float> PxMat33;
typedef PxMat33T<double> PxMat33d;
/**
\brief Sets a rotation matrix around the X axis.
\param m [out] output rotation matrix
\param angle [in] desired angle
*/
PX_INLINE void PxSetRotX(PxMat33& m, PxReal angle)
{
m = PxMat33(PxIdentity);
PxReal sin, cos;
PxSinCos(angle, sin, cos);
m[1][1] = m[2][2] = cos;
m[1][2] = sin;
m[2][1] = -sin;
}
/**
\brief Sets a rotation matrix around the Y axis.
\param m [out] output rotation matrix
\param angle [in] desired angle
*/
PX_INLINE void PxSetRotY(PxMat33& m, PxReal angle)
{
m = PxMat33(PxIdentity);
PxReal sin, cos;
PxSinCos(angle, sin, cos);
m[0][0] = m[2][2] = cos;
m[0][2] = -sin;
m[2][0] = sin;
}
/**
\brief Sets a rotation matrix around the Z axis.
\param m [out] output rotation matrix
\param angle [in] desired angle
*/
PX_INLINE void PxSetRotZ(PxMat33& m, PxReal angle)
{
m = PxMat33(PxIdentity);
PxReal sin, cos;
PxSinCos(angle, sin, cos);
m[0][0] = m[1][1] = cos;
m[0][1] = sin;
m[1][0] = -sin;
}
/**
\brief Returns a rotation quaternion around the X axis.
\param angle [in] desired angle
\return Quaternion that rotates around the desired axis
*/
PX_INLINE PxQuat PxGetRotXQuat(float angle)
{
PxMat33 m;
PxSetRotX(m, angle);
return PxQuat(m);
}
/**
\brief Returns a rotation quaternion around the Y axis.
\param angle [in] desired angle
\return Quaternion that rotates around the desired axis
*/
PX_INLINE PxQuat PxGetRotYQuat(float angle)
{
PxMat33 m;
PxSetRotY(m, angle);
return PxQuat(m);
}
/**
\brief Returns a rotation quaternion around the Z axis.
\param angle [in] desired angle
\return Quaternion that rotates around the desired axis
*/
PX_INLINE PxQuat PxGetRotZQuat(float angle)
{
PxMat33 m;
PxSetRotZ(m, angle);
return PxQuat(m);
}
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 13,950 | C | 26.194932 | 135 | 0.677993 |
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxVecTransform.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_VEC_TRANSFORM_H
#define PX_VEC_TRANSFORM_H
#include "foundation/PxVecMath.h"
#include "foundation/PxTransform.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
namespace aos
{
class PxTransformV
{
public:
QuatV q;
Vec3V p;
PX_FORCE_INLINE PxTransformV(const PxTransform& orientation)
{
// const PxQuat oq = orientation.q;
// const PxF32 f[4] = {oq.x, oq.y, oq.z, oq.w};
q = QuatVLoadXYZW(orientation.q.x, orientation.q.y, orientation.q.z, orientation.q.w);
// q = QuatV_From_F32Array(&oq.x);
p = V3LoadU(orientation.p);
}
PX_FORCE_INLINE PxTransformV(const Vec3VArg p0 = V3Zero(), const QuatVArg q0 = QuatIdentity()) : q(q0), p(p0)
{
PX_ASSERT(isSaneQuatV(q0));
}
PX_FORCE_INLINE PxTransformV operator*(const PxTransformV& x) const
{
PX_ASSERT(x.isSane());
return transform(x);
}
PX_FORCE_INLINE PxTransformV getInverse() const
{
PX_ASSERT(isFinite());
// return PxTransform(q.rotateInv(-p),q.getConjugate());
return PxTransformV(QuatRotateInv(q, V3Neg(p)), QuatConjugate(q));
}
PX_FORCE_INLINE void normalize()
{
p = V3Zero();
q = QuatIdentity();
}
PX_FORCE_INLINE void invalidate()
{
p = V3Splat(FMax());
q = QuatIdentity();
}
PX_FORCE_INLINE Vec3V transform(const Vec3VArg input) const
{
PX_ASSERT(isFinite());
// return q.rotate(input) + p;
return QuatTransform(q, p, input);
}
PX_FORCE_INLINE Vec3V transformInv(const Vec3VArg input) const
{
PX_ASSERT(isFinite());
// return q.rotateInv(input-p);
return QuatRotateInv(q, V3Sub(input, p));
}
PX_FORCE_INLINE Vec3V rotate(const Vec3VArg input) const
{
PX_ASSERT(isFinite());
// return q.rotate(input);
return QuatRotate(q, input);
}
PX_FORCE_INLINE Vec3V rotateInv(const Vec3VArg input) const
{
PX_ASSERT(isFinite());
// return q.rotateInv(input);
return QuatRotateInv(q, input);
}
//! Transform transform to parent (returns compound transform: first src, then *this)
PX_FORCE_INLINE PxTransformV transform(const PxTransformV& src) const
{
PX_ASSERT(src.isSane());
PX_ASSERT(isSane());
// src = [srct, srcr] -> [r*srct + t, r*srcr]
// return PxTransform(q.rotate(src.p) + p, q*src.q);
return PxTransformV(V3Add(QuatRotate(q, src.p), p), QuatMul(q, src.q));
}
#if PX_LINUX && PX_CLANG
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wbitwise-instead-of-logical" // bitwise intentionally chosen for performance
#endif
/**
\brief returns true if finite and q is a unit quaternion
*/
PX_FORCE_INLINE bool isValid() const
{
// return p.isFinite() && q.isFinite() && q.isValid();
return isFiniteVec3V(p) & isFiniteQuatV(q) & isValidQuatV(q);
}
/**
\brief returns true if finite and quat magnitude is reasonably close to unit to allow for some accumulation of error
vs isValid
*/
PX_FORCE_INLINE bool isSane() const
{
// return isFinite() && q.isSane();
return isFinite() & isSaneQuatV(q);
}
/**
\brief returns true if all elems are finite (not NAN or INF, etc.)
*/
PX_FORCE_INLINE bool isFinite() const
{
// return p.isFinite() && q.isFinite();
return isFiniteVec3V(p) & isFiniteQuatV(q);
}
#if PX_LINUX && PX_CLANG
#pragma clang diagnostic pop
#endif
//! Transform transform from parent (returns compound transform: first src, then this->inverse)
PX_FORCE_INLINE PxTransformV transformInv(const PxTransformV& src) const
{
PX_ASSERT(src.isSane());
PX_ASSERT(isFinite());
// src = [srct, srcr] -> [r^-1*(srct-t), r^-1*srcr]
/*PxQuat qinv = q.getConjugate();
return PxTransform(qinv.rotate(src.p - p), qinv*src.q);*/
const QuatV qinv = QuatConjugate(q);
const Vec3V v = QuatRotate(qinv, V3Sub(src.p, p));
const QuatV rot = QuatMul(qinv, src.q);
return PxTransformV(v, rot);
}
static PX_FORCE_INLINE PxTransformV createIdentity()
{
return PxTransformV(V3Zero());
}
};
PX_FORCE_INLINE PxTransformV loadTransformA(const PxTransform& transform)
{
const QuatV q0 = QuatVLoadA(&transform.q.x);
const Vec3V p0 = V3LoadA(&transform.p.x);
return PxTransformV(p0, q0);
}
PX_FORCE_INLINE PxTransformV loadTransformU(const PxTransform& transform)
{
const QuatV q0 = QuatVLoadU(&transform.q.x);
const Vec3V p0 = V3LoadU(&transform.p.x);
return PxTransformV(p0, q0);
}
class PxMatTransformV
{
public:
Mat33V rot;
Vec3V p;
PX_FORCE_INLINE PxMatTransformV()
{
p = V3Zero();
rot = M33Identity();
}
PX_FORCE_INLINE PxMatTransformV(const Vec3VArg _p, const Mat33V& _rot)
{
p = _p;
rot = _rot;
}
PX_FORCE_INLINE PxMatTransformV(const PxTransformV& other)
{
p = other.p;
QuatGetMat33V(other.q, rot.col0, rot.col1, rot.col2);
}
PX_FORCE_INLINE PxMatTransformV(const Vec3VArg _p, const QuatV& quat)
{
p = _p;
QuatGetMat33V(quat, rot.col0, rot.col1, rot.col2);
}
PX_FORCE_INLINE Vec3V getCol0() const
{
return rot.col0;
}
PX_FORCE_INLINE Vec3V getCol1() const
{
return rot.col1;
}
PX_FORCE_INLINE Vec3V getCol2() const
{
return rot.col2;
}
PX_FORCE_INLINE void setCol0(const Vec3VArg col0)
{
rot.col0 = col0;
}
PX_FORCE_INLINE void setCol1(const Vec3VArg col1)
{
rot.col1 = col1;
}
PX_FORCE_INLINE void setCol2(const Vec3VArg col2)
{
rot.col2 = col2;
}
PX_FORCE_INLINE Vec3V transform(const Vec3VArg input) const
{
return V3Add(p, M33MulV3(rot, input));
}
PX_FORCE_INLINE Vec3V transformInv(const Vec3VArg input) const
{
return M33TrnspsMulV3(rot, V3Sub(input, p)); // QuatRotateInv(q, V3Sub(input, p));
}
PX_FORCE_INLINE Vec3V rotate(const Vec3VArg input) const
{
return M33MulV3(rot, input);
}
PX_FORCE_INLINE Vec3V rotateInv(const Vec3VArg input) const
{
return M33TrnspsMulV3(rot, input);
}
PX_FORCE_INLINE PxMatTransformV transformInv(const PxMatTransformV& src) const
{
const Vec3V v = M33TrnspsMulV3(rot, V3Sub(src.p, p));
const Mat33V mat = M33MulM33(M33Trnsps(rot), src.rot);
return PxMatTransformV(v, mat);
}
};
}
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif
| 7,614 | C | 25.078767 | 117 | 0.708563 |
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxMath.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_MATH_H
#define PX_MATH_H
/** \addtogroup foundation
@{
*/
#include "foundation/PxPreprocessor.h"
#if PX_VC
#pragma warning(push)
#pragma warning(disable : 4985) // 'symbol name': attributes not present on previous declaration
#endif
#include <math.h>
#if PX_VC
#pragma warning(pop)
#endif
#if (PX_LINUX_FAMILY && !PX_ARM_FAMILY)
// Force linking against nothing newer than glibc v2.17 to remain compatible with platforms with older glibc versions
__asm__(".symver expf,expf@GLIBC_2.2.5");
__asm__(".symver powf,powf@GLIBC_2.2.5");
#endif
#include <float.h>
#include "foundation/PxMathIntrinsics.h"
#include "foundation/PxAssert.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
// constants
static const float PxPi = float(3.141592653589793);
static const float PxHalfPi = float(1.57079632679489661923);
static const float PxTwoPi = float(6.28318530717958647692);
static const float PxInvPi = float(0.31830988618379067154);
static const float PxInvTwoPi = float(0.15915494309189533577);
static const float PxPiDivTwo = float(1.57079632679489661923);
static const float PxPiDivFour = float(0.78539816339744830962);
static const float PxSqrt2 = float(1.4142135623730951);
static const float PxInvSqrt2 = float(0.7071067811865476);
/**
\brief The return value is the greater of the two specified values.
*/
template <class T>
PX_CUDA_CALLABLE PX_FORCE_INLINE T PxMax(T a, T b)
{
return a < b ? b : a;
}
//! overload for float to use fsel on xbox
template <>
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxMax(float a, float b)
{
return intrinsics::selectMax(a, b);
}
/**
\brief The return value is the lesser of the two specified values.
*/
template <class T>
PX_CUDA_CALLABLE PX_FORCE_INLINE T PxMin(T a, T b)
{
return a < b ? a : b;
}
template <>
//! overload for float to use fsel on xbox
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxMin(float a, float b)
{
return intrinsics::selectMin(a, b);
}
/*
Many of these are just implemented as PX_CUDA_CALLABLE PX_FORCE_INLINE calls to the C lib right now,
but later we could replace some of them with some approximations or more
clever stuff.
*/
/**
\brief abs returns the absolute value of its argument.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxAbs(float a)
{
return intrinsics::abs(a);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxEquals(float a, float b, float eps)
{
return (PxAbs(a - b) < eps);
}
/**
\brief abs returns the absolute value of its argument.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE double PxAbs(double a)
{
return ::fabs(a);
}
/**
\brief abs returns the absolute value of its argument.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE int32_t PxAbs(int32_t a)
{
return ::abs(a);
}
/**
\brief Clamps v to the range [hi,lo]
*/
template <class T>
PX_CUDA_CALLABLE PX_FORCE_INLINE T PxClamp(T v, T lo, T hi)
{
PX_ASSERT(lo <= hi);
return PxMin(hi, PxMax(lo, v));
}
//! \brief Square root.
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxSqrt(float a)
{
return intrinsics::sqrt(a);
}
//! \brief Square root.
PX_CUDA_CALLABLE PX_FORCE_INLINE double PxSqrt(double a)
{
return ::sqrt(a);
}
//! \brief reciprocal square root.
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxRecipSqrt(float a)
{
return intrinsics::recipSqrt(a);
}
//! \brief reciprocal square root.
PX_CUDA_CALLABLE PX_FORCE_INLINE double PxRecipSqrt(double a)
{
return 1 / ::sqrt(a);
}
//! \brief square of the argument
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF32 PxSqr(const PxF32 a)
{
return a * a;
}
//! trigonometry -- all angles are in radians.
//! \brief Sine of an angle ( <b>Unit:</b> Radians )
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxSin(float a)
{
return intrinsics::sin(a);
}
//! \brief Sine of an angle ( <b>Unit:</b> Radians )
PX_CUDA_CALLABLE PX_FORCE_INLINE double PxSin(double a)
{
return ::sin(a);
}
//! \brief Cosine of an angle (<b>Unit:</b> Radians)
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxCos(float a)
{
return intrinsics::cos(a);
}
//! \brief Cosine of an angle (<b>Unit:</b> Radians)
PX_CUDA_CALLABLE PX_FORCE_INLINE double PxCos(double a)
{
return ::cos(a);
}
//! \brief compute sine and cosine at the same time
PX_CUDA_CALLABLE PX_FORCE_INLINE void PxSinCos(const PxF32 a, PxF32& sin, PxF32& cos)
{
#if defined(__CUDACC__) && __CUDA_ARCH__ >= 350
__sincosf(a, &sin, &cos);
#else
sin = PxSin(a);
cos = PxCos(a);
#endif
}
//! \brief compute sine and cosine at the same time
PX_CUDA_CALLABLE PX_FORCE_INLINE void PxSinCos(const double a, double& sin, double& cos)
{
sin = PxSin(a);
cos = PxCos(a);
}
/**
\brief Tangent of an angle.
<b>Unit:</b> Radians
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxTan(float a)
{
return ::tanf(a);
}
/**
\brief Tangent of an angle.
<b>Unit:</b> Radians
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE double PxTan(double a)
{
return ::tan(a);
}
/**
\brief Arcsine.
Returns angle between -PI/2 and PI/2 in radians
<b>Unit:</b> Radians
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxAsin(float f)
{
return ::asinf(PxClamp(f, -1.0f, 1.0f));
}
/**
\brief Arcsine.
Returns angle between -PI/2 and PI/2 in radians
<b>Unit:</b> Radians
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE double PxAsin(double f)
{
return ::asin(PxClamp(f, -1.0, 1.0));
}
/**
\brief Arccosine.
Returns angle between 0 and PI in radians
<b>Unit:</b> Radians
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxAcos(float f)
{
return ::acosf(PxClamp(f, -1.0f, 1.0f));
}
/**
\brief Arccosine.
Returns angle between 0 and PI in radians
<b>Unit:</b> Radians
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE double PxAcos(double f)
{
return ::acos(PxClamp(f, -1.0, 1.0));
}
/**
\brief ArcTangent.
Returns angle between -PI/2 and PI/2 in radians
<b>Unit:</b> Radians
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxAtan(float a)
{
return ::atanf(a);
}
/**
\brief ArcTangent.
Returns angle between -PI/2 and PI/2 in radians
<b>Unit:</b> Radians
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE double PxAtan(double a)
{
return ::atan(a);
}
/**
\brief Arctangent of (x/y) with correct sign.
Returns angle between -PI and PI in radians
<b>Unit:</b> Radians
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxAtan2(float x, float y)
{
return ::atan2f(x, y);
}
/**
\brief Arctangent of (x/y) with correct sign.
Returns angle between -PI and PI in radians
<b>Unit:</b> Radians
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE double PxAtan2(double x, double y)
{
return ::atan2(x, y);
}
/**
\brief Converts degrees to radians.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF32 PxDegToRad(const PxF32 a)
{
return 0.01745329251994329547f * a;
}
//! \brief returns true if the passed number is a finite floating point number as opposed to INF, NAN, etc.
PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxIsFinite(float f)
{
return intrinsics::isFinite(f);
}
//! \brief returns true if the passed number is a finite floating point number as opposed to INF, NAN, etc.
PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxIsFinite(double f)
{
return intrinsics::isFinite(f);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxFloor(float a)
{
return ::floorf(a);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxExp(float a)
{
return ::expf(a);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxCeil(float a)
{
return ::ceilf(a);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxSign(float a)
{
return physx::intrinsics::sign(a);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxSign2(float a, float eps = FLT_EPSILON)
{
return (a < -eps) ? -1.0f : (a > eps) ? 1.0f : 0.0f;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxPow(float x, float y)
{
return ::powf(x, y);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxLog(float x)
{
return ::logf(x);
}
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 9,233 | C | 22.984416 | 117 | 0.714611 |
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxAoS.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_AOS_H
#define PX_AOS_H
#include "foundation/Px.h"
#if PX_WINDOWS && !PX_NEON
#include "windows/PxWindowsAoS.h"
#elif(PX_UNIX_FAMILY || PX_SWITCH)
#include "unix/PxUnixAoS.h"
#else
#error "Platform not supported!"
#endif
#endif
| 1,937 | C | 44.069766 | 74 | 0.760454 |
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxVecMath.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_VEC_MATH_H
#define PX_VEC_MATH_H
#include "foundation/Px.h"
#include "foundation/PxIntrinsics.h"
#include "foundation/PxVec3.h"
#include "foundation/PxVec4.h"
#include "foundation/PxMat33.h"
#include "foundation/PxUnionCast.h"
// We can opt to use the scalar version of vectorised functions.
// This can catch type safety issues and might even work out more optimal on pc.
// It will also be useful for benchmarking and testing.
// NEVER submit with vector intrinsics deactivated without good reason.
// AM: deactivating SIMD for debug win64 just so autobuild will also exercise
// non-SIMD path, until a dedicated non-SIMD platform sich as Arm comes online.
// TODO: dima: reference all platforms with SIMD support here,
// all unknown/experimental cases should better default to NO SIMD.
// enable/disable SIMD
#if !defined(PX_SIMD_DISABLED)
#if PX_INTEL_FAMILY && (!defined(__EMSCRIPTEN__) || defined(__SSE2__))
#define COMPILE_VECTOR_INTRINSICS 1
#elif PX_SWITCH
#define COMPILE_VECTOR_INTRINSICS 1
#else
#define COMPILE_VECTOR_INTRINSICS 0
#endif
#else
#define COMPILE_VECTOR_INTRINSICS 0
#endif
#if COMPILE_VECTOR_INTRINSICS && PX_INTEL_FAMILY && PX_UNIX_FAMILY
// only SSE2 compatible platforms should reach this
#if PX_EMSCRIPTEN
#include <emmintrin.h>
#endif
#include <xmmintrin.h>
#endif
#if COMPILE_VECTOR_INTRINSICS
#include "PxAoS.h"
#else
#include "PxVecMathAoSScalar.h"
#endif
#if !PX_DOXYGEN
namespace physx
{
#endif
namespace aos
{
// Basic AoS types are
// FloatV - 16-byte aligned representation of float.
// Vec3V - 16-byte aligned representation of PxVec3 stored as (x y z 0).
// Vec4V - 16-byte aligned representation of vector of 4 floats stored as (x y z w).
// BoolV - 16-byte aligned representation of vector of 4 bools stored as (x y z w).
// VecU32V - 16-byte aligned representation of 4 unsigned ints stored as (x y z w).
// VecI32V - 16-byte aligned representation of 4 signed ints stored as (x y z w).
// Mat33V - 16-byte aligned representation of any 3x3 matrix.
// Mat34V - 16-byte aligned representation of transformation matrix (rotation in col1,col2,col3 and translation in
// col4).
// Mat44V - 16-byte aligned representation of any 4x4 matrix.
//////////////////////////////////////////
// Construct a simd type from a scalar type
//////////////////////////////////////////
// FloatV
//(f,f,f,f)
PX_FORCE_INLINE FloatV FLoad(const PxF32 f);
// Vec3V
//(f,f,f,0)
PX_FORCE_INLINE Vec3V V3Load(const PxF32 f);
//(f.x,f.y,f.z,0)
PX_FORCE_INLINE Vec3V V3LoadU(const PxVec3& f);
//(f.x,f.y,f.z,0), f must be 16-byte aligned
PX_FORCE_INLINE Vec3V V3LoadA(const PxVec3& f);
//(f.x,f.y,f.z,w_undefined), f must be 16-byte aligned
PX_FORCE_INLINE Vec3V V3LoadUnsafeA(const PxVec3& f);
//(f.x,f.y,f.z,0)
PX_FORCE_INLINE Vec3V V3LoadU(const PxF32* f);
//(f.x,f.y,f.z,0), f must be 16-byte aligned
PX_FORCE_INLINE Vec3V V3LoadA(const PxF32* f);
// Vec4V
//(f,f,f,f)
PX_FORCE_INLINE Vec4V V4Load(const PxF32 f);
//(f[0],f[1],f[2],f[3])
PX_FORCE_INLINE Vec4V V4LoadU(const PxF32* const f);
//(f[0],f[1],f[2],f[3]), f must be 16-byte aligned
PX_FORCE_INLINE Vec4V V4LoadA(const PxF32* const f);
//(x,y,z,w)
PX_FORCE_INLINE Vec4V V4LoadXYZW(const PxF32& x, const PxF32& y, const PxF32& z, const PxF32& w);
// BoolV
//(f,f,f,f)
PX_FORCE_INLINE BoolV BLoad(const bool f);
//(f[0],f[1],f[2],f[3])
PX_FORCE_INLINE BoolV BLoad(const bool* const f);
// VecU32V
//(f,f,f,f)
PX_FORCE_INLINE VecU32V U4Load(const PxU32 f);
//(f[0],f[1],f[2],f[3])
PX_FORCE_INLINE VecU32V U4LoadU(const PxU32* f);
//(f[0],f[1],f[2],f[3]), f must be 16-byte aligned
PX_FORCE_INLINE VecU32V U4LoadA(const PxU32* f);
//((U32)x, (U32)y, (U32)z, (U32)w)
PX_FORCE_INLINE VecU32V U4LoadXYZW(PxU32 x, PxU32 y, PxU32 z, PxU32 w);
// VecI32V
//(i,i,i,i)
PX_FORCE_INLINE VecI32V I4Load(const PxI32 i);
//(i,i,i,i)
PX_FORCE_INLINE VecI32V I4LoadU(const PxI32* i);
//(i,i,i,i)
PX_FORCE_INLINE VecI32V I4LoadA(const PxI32* i);
// QuatV
//(x = v[0], y=v[1], z=v[2], w=v3[3]) and array don't need to aligned
PX_FORCE_INLINE QuatV QuatVLoadU(const PxF32* v);
//(x = v[0], y=v[1], z=v[2], w=v3[3]) and array need to aligned, fast load
PX_FORCE_INLINE QuatV QuatVLoadA(const PxF32* v);
//(x, y, z, w)
PX_FORCE_INLINE QuatV QuatVLoadXYZW(const PxF32 x, const PxF32 y, const PxF32 z, const PxF32 w);
// not added to public api
Vec4V Vec4V_From_PxVec3_WUndefined(const PxVec3& v);
///////////////////////////////////////////////////
// Construct a simd type from a different simd type
///////////////////////////////////////////////////
// Vec3V
//(v.x,v.y,v.z,0)
PX_FORCE_INLINE Vec3V Vec3V_From_Vec4V(Vec4V v);
//(v.x,v.y,v.z,undefined) - be very careful with w!=0 because many functions require w==0 for correct operation eg V3Dot, V3Length, V3Cross etc etc.
PX_FORCE_INLINE Vec3V Vec3V_From_Vec4V_WUndefined(const Vec4V v);
// Vec4V
//(f.x,f.y,f.z,f.w)
PX_FORCE_INLINE Vec4V Vec4V_From_Vec3V(Vec3V f);
//((PxF32)f.x, (PxF32)f.y, (PxF32)f.z, (PxF32)f.w)
PX_FORCE_INLINE Vec4V Vec4V_From_VecU32V(VecU32V a);
//((PxF32)f.x, (PxF32)f.y, (PxF32)f.z, (PxF32)f.w)
PX_FORCE_INLINE Vec4V Vec4V_From_VecI32V(VecI32V a);
//(*(reinterpret_cast<PxF32*>(&f.x), (reinterpret_cast<PxF32*>(&f.y), (reinterpret_cast<PxF32*>(&f.z),
//(reinterpret_cast<PxF32*>(&f.w))
PX_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecU32V(VecU32V a);
//(*(reinterpret_cast<PxF32*>(&f.x), (reinterpret_cast<PxF32*>(&f.y), (reinterpret_cast<PxF32*>(&f.z),
//(reinterpret_cast<PxF32*>(&f.w))
PX_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecI32V(VecI32V a);
// VecU32V
//(*(reinterpret_cast<PxU32*>(&f.x), (reinterpret_cast<PxU32*>(&f.y), (reinterpret_cast<PxU32*>(&f.z),
//(reinterpret_cast<PxU32*>(&f.w))
PX_FORCE_INLINE VecU32V VecU32V_ReinterpretFrom_Vec4V(Vec4V a);
//(b[0], b[1], b[2], b[3])
PX_FORCE_INLINE VecU32V VecU32V_From_BoolV(const BoolVArg b);
// VecI32V
//(*(reinterpret_cast<PxI32*>(&f.x), (reinterpret_cast<PxI32*>(&f.y), (reinterpret_cast<PxI32*>(&f.z),
//(reinterpret_cast<PxI32*>(&f.w))
PX_FORCE_INLINE VecI32V VecI32V_ReinterpretFrom_Vec4V(Vec4V a);
//((I32)a.x, (I32)a.y, (I32)a.z, (I32)a.w)
PX_FORCE_INLINE VecI32V VecI32V_From_Vec4V(Vec4V a);
//((I32)b.x, (I32)b.y, (I32)b.z, (I32)b.w)
PX_FORCE_INLINE VecI32V VecI32V_From_BoolV(const BoolVArg b);
///////////////////////////////////////////////////
// Convert from a simd type back to a scalar type
///////////////////////////////////////////////////
// FloatV
// a.x
PX_FORCE_INLINE void FStore(const FloatV a, PxF32* PX_RESTRICT f);
// Vec3V
//(a.x,a.y,a.z)
PX_FORCE_INLINE void V3StoreA(const Vec3V a, PxVec3& f);
//(a.x,a.y,a.z)
PX_FORCE_INLINE void V3StoreU(const Vec3V a, PxVec3& f);
// Vec4V
PX_FORCE_INLINE void V4StoreA(const Vec4V a, PxF32* f);
PX_FORCE_INLINE void V4StoreU(const Vec4V a, PxF32* f);
// BoolV
PX_FORCE_INLINE void BStoreA(const BoolV b, PxU32* f);
// VecU32V
PX_FORCE_INLINE void U4StoreA(const VecU32V uv, PxU32* u);
// VecI32V
PX_FORCE_INLINE void I4StoreA(const VecI32V iv, PxI32* i);
//////////////////////////////////////////////////////////////////
// Test that simd types have elements in the floating point range
//////////////////////////////////////////////////////////////////
// check for each component is valid ie in floating point range
PX_FORCE_INLINE bool isFiniteFloatV(const FloatV a);
// check for each component is valid ie in floating point range
PX_FORCE_INLINE bool isFiniteVec3V(const Vec3V a);
// check for each component is valid ie in floating point range
PX_FORCE_INLINE bool isFiniteVec4V(const Vec4V a);
// Check that w-component is zero.
PX_FORCE_INLINE bool isValidVec3V(const Vec3V a);
//////////////////////////////////////////////////////////////////
// Tests that all elements of two 16-byte types are completely equivalent.
// Use these tests for unit testing and asserts only.
//////////////////////////////////////////////////////////////////
namespace vecMathTests
{
PX_FORCE_INLINE Vec3V getInvalidVec3V();
PX_FORCE_INLINE bool allElementsEqualFloatV(const FloatV a, const FloatV b);
PX_FORCE_INLINE bool allElementsEqualVec3V(const Vec3V a, const Vec3V b);
PX_FORCE_INLINE bool allElementsEqualVec4V(const Vec4V a, const Vec4V b);
PX_FORCE_INLINE bool allElementsEqualBoolV(const BoolV a, const BoolV b);
PX_FORCE_INLINE bool allElementsEqualVecU32V(const VecU32V a, const VecU32V b);
PX_FORCE_INLINE bool allElementsEqualVecI32V(const VecI32V a, const VecI32V b);
PX_FORCE_INLINE bool allElementsEqualMat33V(const Mat33V& a, const Mat33V& b)
{
return (allElementsEqualVec3V(a.col0, b.col0) && allElementsEqualVec3V(a.col1, b.col1) &&
allElementsEqualVec3V(a.col2, b.col2));
}
PX_FORCE_INLINE bool allElementsEqualMat34V(const Mat34V& a, const Mat34V& b)
{
return (allElementsEqualVec3V(a.col0, b.col0) && allElementsEqualVec3V(a.col1, b.col1) &&
allElementsEqualVec3V(a.col2, b.col2) && allElementsEqualVec3V(a.col3, b.col3));
}
PX_FORCE_INLINE bool allElementsEqualMat44V(const Mat44V& a, const Mat44V& b)
{
return (allElementsEqualVec4V(a.col0, b.col0) && allElementsEqualVec4V(a.col1, b.col1) &&
allElementsEqualVec4V(a.col2, b.col2) && allElementsEqualVec4V(a.col3, b.col3));
}
PX_FORCE_INLINE bool allElementsNearEqualFloatV(const FloatV a, const FloatV b);
PX_FORCE_INLINE bool allElementsNearEqualVec3V(const Vec3V a, const Vec3V b);
PX_FORCE_INLINE bool allElementsNearEqualVec4V(const Vec4V a, const Vec4V b);
PX_FORCE_INLINE bool allElementsNearEqualMat33V(const Mat33V& a, const Mat33V& b)
{
return (allElementsNearEqualVec3V(a.col0, b.col0) && allElementsNearEqualVec3V(a.col1, b.col1) &&
allElementsNearEqualVec3V(a.col2, b.col2));
}
PX_FORCE_INLINE bool allElementsNearEqualMat34V(const Mat34V& a, const Mat34V& b)
{
return (allElementsNearEqualVec3V(a.col0, b.col0) && allElementsNearEqualVec3V(a.col1, b.col1) &&
allElementsNearEqualVec3V(a.col2, b.col2) && allElementsNearEqualVec3V(a.col3, b.col3));
}
PX_FORCE_INLINE bool allElementsNearEqualMat44V(const Mat44V& a, const Mat44V& b)
{
return (allElementsNearEqualVec4V(a.col0, b.col0) && allElementsNearEqualVec4V(a.col1, b.col1) &&
allElementsNearEqualVec4V(a.col2, b.col2) && allElementsNearEqualVec4V(a.col3, b.col3));
}
}
//////////////////////////////////////////////////////////////////
// Math operations on FloatV
//////////////////////////////////////////////////////////////////
//(0,0,0,0)
PX_FORCE_INLINE FloatV FZero();
//(1,1,1,1)
PX_FORCE_INLINE FloatV FOne();
//(0.5,0.5,0.5,0.5)
PX_FORCE_INLINE FloatV FHalf();
//(PX_EPS_REAL,PX_EPS_REAL,PX_EPS_REAL,PX_EPS_REAL)
PX_FORCE_INLINE FloatV FEps();
//! @cond
//(PX_MAX_REAL, PX_MAX_REAL, PX_MAX_REAL PX_MAX_REAL)
PX_FORCE_INLINE FloatV FMax();
//! @endcond
//(-PX_MAX_REAL, -PX_MAX_REAL, -PX_MAX_REAL -PX_MAX_REAL)
PX_FORCE_INLINE FloatV FNegMax();
//(1e-6f, 1e-6f, 1e-6f, 1e-6f)
PX_FORCE_INLINE FloatV FEps6();
//((PxF32*)&1, (PxF32*)&1, (PxF32*)&1, (PxF32*)&1)
//-f (per component)
PX_FORCE_INLINE FloatV FNeg(const FloatV f);
// a+b (per component)
PX_FORCE_INLINE FloatV FAdd(const FloatV a, const FloatV b);
// a-b (per component)
PX_FORCE_INLINE FloatV FSub(const FloatV a, const FloatV b);
// a*b (per component)
PX_FORCE_INLINE FloatV FMul(const FloatV a, const FloatV b);
// a/b (per component)
PX_FORCE_INLINE FloatV FDiv(const FloatV a, const FloatV b);
// a/b (per component)
PX_FORCE_INLINE FloatV FDivFast(const FloatV a, const FloatV b);
// 1.0f/a
PX_FORCE_INLINE FloatV FRecip(const FloatV a);
// 1.0f/a
PX_FORCE_INLINE FloatV FRecipFast(const FloatV a);
// 1.0f/sqrt(a)
PX_FORCE_INLINE FloatV FRsqrt(const FloatV a);
// 1.0f/sqrt(a)
PX_FORCE_INLINE FloatV FRsqrtFast(const FloatV a);
// sqrt(a)
PX_FORCE_INLINE FloatV FSqrt(const FloatV a);
// a*b+c
PX_FORCE_INLINE FloatV FScaleAdd(const FloatV a, const FloatV b, const FloatV c);
// c-a*b
PX_FORCE_INLINE FloatV FNegScaleSub(const FloatV a, const FloatV b, const FloatV c);
// fabs(a)
PX_FORCE_INLINE FloatV FAbs(const FloatV a);
// c ? a : b (per component)
PX_FORCE_INLINE FloatV FSel(const BoolV c, const FloatV a, const FloatV b);
// a>b (per component)
PX_FORCE_INLINE BoolV FIsGrtr(const FloatV a, const FloatV b);
// a>=b (per component)
PX_FORCE_INLINE BoolV FIsGrtrOrEq(const FloatV a, const FloatV b);
// a==b (per component)
PX_FORCE_INLINE BoolV FIsEq(const FloatV a, const FloatV b);
// Max(a,b) (per component)
PX_FORCE_INLINE FloatV FMax(const FloatV a, const FloatV b);
// Min(a,b) (per component)
PX_FORCE_INLINE FloatV FMin(const FloatV a, const FloatV b);
// Clamp(a,b) (per component)
PX_FORCE_INLINE FloatV FClamp(const FloatV a, const FloatV minV, const FloatV maxV);
// a.x>b.x
PX_FORCE_INLINE PxU32 FAllGrtr(const FloatV a, const FloatV b);
// a.x>=b.x
PX_FORCE_INLINE PxU32 FAllGrtrOrEq(const FloatV a, const FloatV b);
// a.x==b.x
PX_FORCE_INLINE PxU32 FAllEq(const FloatV a, const FloatV b);
// a<min || a>max
PX_FORCE_INLINE PxU32 FOutOfBounds(const FloatV a, const FloatV min, const FloatV max);
// a>=min && a<=max
PX_FORCE_INLINE PxU32 FInBounds(const FloatV a, const FloatV min, const FloatV max);
// a<-bounds || a>bounds
PX_FORCE_INLINE PxU32 FOutOfBounds(const FloatV a, const FloatV bounds);
// a>=-bounds && a<=bounds
PX_FORCE_INLINE PxU32 FInBounds(const FloatV a, const FloatV bounds);
// round float a to the near int
PX_FORCE_INLINE FloatV FRound(const FloatV a);
// calculate the sin of float a
PX_FORCE_INLINE FloatV FSin(const FloatV a);
// calculate the cos of float b
PX_FORCE_INLINE FloatV FCos(const FloatV a);
//////////////////////////////////////////////////////////////////
// Math operations on Vec3V
//////////////////////////////////////////////////////////////////
//(f,f,f,f)
PX_FORCE_INLINE Vec3V V3Splat(const FloatV f);
//(x,y,z)
PX_FORCE_INLINE Vec3V V3Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z);
//(1,0,0,0)
PX_FORCE_INLINE Vec3V V3UnitX();
//(0,1,0,0)
PX_FORCE_INLINE Vec3V V3UnitY();
//(0,0,1,0)
PX_FORCE_INLINE Vec3V V3UnitZ();
//(f.x,f.x,f.x,f.x)
PX_FORCE_INLINE FloatV V3GetX(const Vec3V f);
//(f.y,f.y,f.y,f.y)
PX_FORCE_INLINE FloatV V3GetY(const Vec3V f);
//(f.z,f.z,f.z,f.z)
PX_FORCE_INLINE FloatV V3GetZ(const Vec3V f);
//(f,v.y,v.z,v.w)
PX_FORCE_INLINE Vec3V V3SetX(const Vec3V v, const FloatV f);
//(v.x,f,v.z,v.w)
PX_FORCE_INLINE Vec3V V3SetY(const Vec3V v, const FloatV f);
//(v.x,v.y,f,v.w)
PX_FORCE_INLINE Vec3V V3SetZ(const Vec3V v, const FloatV f);
// v.x=f
PX_FORCE_INLINE void V3WriteX(Vec3V& v, const PxF32 f);
// v.y=f
PX_FORCE_INLINE void V3WriteY(Vec3V& v, const PxF32 f);
// v.z=f
PX_FORCE_INLINE void V3WriteZ(Vec3V& v, const PxF32 f);
// v.x=f.x, v.y=f.y, v.z=f.z
PX_FORCE_INLINE void V3WriteXYZ(Vec3V& v, const PxVec3& f);
// return v.x
PX_FORCE_INLINE PxF32 V3ReadX(const Vec3V& v);
// return v.y
PX_FORCE_INLINE PxF32 V3ReadY(const Vec3V& v);
// return v.y
PX_FORCE_INLINE PxF32 V3ReadZ(const Vec3V& v);
// return (v.x,v.y,v.z)
PX_FORCE_INLINE const PxVec3& V3ReadXYZ(const Vec3V& v);
//(a.x, b.x, c.x)
PX_FORCE_INLINE Vec3V V3ColX(const Vec3V a, const Vec3V b, const Vec3V c);
//(a.y, b.y, c.y)
PX_FORCE_INLINE Vec3V V3ColY(const Vec3V a, const Vec3V b, const Vec3V c);
//(a.z, b.z, c.z)
PX_FORCE_INLINE Vec3V V3ColZ(const Vec3V a, const Vec3V b, const Vec3V c);
//(0,0,0,0)
PX_FORCE_INLINE Vec3V V3Zero();
//(1,1,1,1)
PX_FORCE_INLINE Vec3V V3One();
//(PX_EPS_REAL,PX_EPS_REAL,PX_EPS_REAL,PX_EPS_REAL)
PX_FORCE_INLINE Vec3V V3Eps();
//-c (per component)
PX_FORCE_INLINE Vec3V V3Neg(const Vec3V c);
// a+b (per component)
PX_FORCE_INLINE Vec3V V3Add(const Vec3V a, const Vec3V b);
// a-b (per component)
PX_FORCE_INLINE Vec3V V3Sub(const Vec3V a, const Vec3V b);
// a*b (per component)
PX_FORCE_INLINE Vec3V V3Scale(const Vec3V a, const FloatV b);
// a*b (per component)
PX_FORCE_INLINE Vec3V V3Mul(const Vec3V a, const Vec3V b);
// a/b (per component)
PX_FORCE_INLINE Vec3V V3ScaleInv(const Vec3V a, const FloatV b);
// a/b (per component)
PX_FORCE_INLINE Vec3V V3Div(const Vec3V a, const Vec3V b);
// a/b (per component)
PX_FORCE_INLINE Vec3V V3ScaleInvFast(const Vec3V a, const FloatV b);
// a/b (per component)
PX_FORCE_INLINE Vec3V V3DivFast(const Vec3V a, const Vec3V b);
// 1.0f/a
PX_FORCE_INLINE Vec3V V3Recip(const Vec3V a);
// 1.0f/a
PX_FORCE_INLINE Vec3V V3RecipFast(const Vec3V a);
// 1.0f/sqrt(a)
PX_FORCE_INLINE Vec3V V3Rsqrt(const Vec3V a);
// 1.0f/sqrt(a)
PX_FORCE_INLINE Vec3V V3RsqrtFast(const Vec3V a);
// a*b+c
PX_FORCE_INLINE Vec3V V3ScaleAdd(const Vec3V a, const FloatV b, const Vec3V c);
// c-a*b
PX_FORCE_INLINE Vec3V V3NegScaleSub(const Vec3V a, const FloatV b, const Vec3V c);
// a*b+c
PX_FORCE_INLINE Vec3V V3MulAdd(const Vec3V a, const Vec3V b, const Vec3V c);
// c-a*b
PX_FORCE_INLINE Vec3V V3NegMulSub(const Vec3V a, const Vec3V b, const Vec3V c);
// fabs(a)
PX_FORCE_INLINE Vec3V V3Abs(const Vec3V a);
// a.b
// Note: a.w and b.w must have value zero
PX_FORCE_INLINE FloatV V3Dot(const Vec3V a, const Vec3V b);
// aXb
// Note: a.w and b.w must have value zero
PX_FORCE_INLINE Vec3V V3Cross(const Vec3V a, const Vec3V b);
// |a.a|^1/2
// Note: a.w must have value zero
PX_FORCE_INLINE FloatV V3Length(const Vec3V a);
// a.a
// Note: a.w must have value zero
PX_FORCE_INLINE FloatV V3LengthSq(const Vec3V a);
// a*|a.a|^-1/2
// Note: a.w must have value zero
PX_FORCE_INLINE Vec3V V3Normalize(const Vec3V a);
// a.a>0 ? a*|a.a|^-1/2 : (0,0,0,0)
// Note: a.w must have value zero
PX_FORCE_INLINE FloatV V3Length(const Vec3V a);
// a.a>0 ? a*|a.a|^-1/2 : unsafeReturnValue
// Note: a.w must have value zero
PX_FORCE_INLINE Vec3V V3NormalizeSafe(const Vec3V a, const Vec3V unsafeReturnValue);
// a.x + a.y + a.z
// Note: a.w must have value zero
PX_FORCE_INLINE FloatV V3SumElems(const Vec3V a);
// c ? a : b (per component)
PX_FORCE_INLINE Vec3V V3Sel(const BoolV c, const Vec3V a, const Vec3V b);
// a>b (per component)
PX_FORCE_INLINE BoolV V3IsGrtr(const Vec3V a, const Vec3V b);
// a>=b (per component)
PX_FORCE_INLINE BoolV V3IsGrtrOrEq(const Vec3V a, const Vec3V b);
// a==b (per component)
PX_FORCE_INLINE BoolV V3IsEq(const Vec3V a, const Vec3V b);
// Max(a,b) (per component)
PX_FORCE_INLINE Vec3V V3Max(const Vec3V a, const Vec3V b);
// Min(a,b) (per component)
PX_FORCE_INLINE Vec3V V3Min(const Vec3V a, const Vec3V b);
// Extract the maximum value from a
// Note: a.w must have value zero
PX_FORCE_INLINE FloatV V3ExtractMax(const Vec3V a);
// Extract the minimum value from a
// Note: a.w must have value zero
PX_FORCE_INLINE FloatV V3ExtractMin(const Vec3V a);
// Clamp(a,b) (per component)
PX_FORCE_INLINE Vec3V V3Clamp(const Vec3V a, const Vec3V minV, const Vec3V maxV);
// Extract the sign for each component
PX_FORCE_INLINE Vec3V V3Sign(const Vec3V a);
// Test all components.
// (a.x>b.x && a.y>b.y && a.z>b.z)
// Note: a.w and b.w must have value zero
PX_FORCE_INLINE PxU32 V3AllGrtr(const Vec3V a, const Vec3V b);
// (a.x>=b.x && a.y>=b.y && a.z>=b.z)
// Note: a.w and b.w must have value zero
PX_FORCE_INLINE PxU32 V3AllGrtrOrEq(const Vec3V a, const Vec3V b);
// (a.x==b.x && a.y==b.y && a.z==b.z)
// Note: a.w and b.w must have value zero
PX_FORCE_INLINE PxU32 V3AllEq(const Vec3V a, const Vec3V b);
// a.x<min.x || a.y<min.y || a.z<min.z || a.x>max.x || a.y>max.y || a.z>max.z
// Note: a.w and min.w and max.w must have value zero
PX_FORCE_INLINE PxU32 V3OutOfBounds(const Vec3V a, const Vec3V min, const Vec3V max);
// a.x>=min.x && a.y>=min.y && a.z>=min.z && a.x<=max.x && a.y<=max.y && a.z<=max.z
// Note: a.w and min.w and max.w must have value zero
PX_FORCE_INLINE PxU32 V3InBounds(const Vec3V a, const Vec3V min, const Vec3V max);
// a.x<-bounds.x || a.y<=-bounds.y || a.z<bounds.z || a.x>bounds.x || a.y>bounds.y || a.z>bounds.z
// Note: a.w and bounds.w must have value zero
PX_FORCE_INLINE PxU32 V3OutOfBounds(const Vec3V a, const Vec3V bounds);
// a.x>=-bounds.x && a.y>=-bounds.y && a.z>=-bounds.z && a.x<=bounds.x && a.y<=bounds.y && a.z<=bounds.z
// Note: a.w and bounds.w must have value zero
PX_FORCE_INLINE PxU32 V3InBounds(const Vec3V a, const Vec3V bounds);
//(floor(a.x + 0.5f), floor(a.y + 0.5f), floor(a.z + 0.5f))
PX_FORCE_INLINE Vec3V V3Round(const Vec3V a);
//(sinf(a.x), sinf(a.y), sinf(a.z))
PX_FORCE_INLINE Vec3V V3Sin(const Vec3V a);
//(cosf(a.x), cosf(a.y), cosf(a.z))
PX_FORCE_INLINE Vec3V V3Cos(const Vec3V a);
//(a.y,a.z,a.z)
PX_FORCE_INLINE Vec3V V3PermYZZ(const Vec3V a);
//(a.x,a.y,a.x)
PX_FORCE_INLINE Vec3V V3PermXYX(const Vec3V a);
//(a.y,a.z,a.x)
PX_FORCE_INLINE Vec3V V3PermYZX(const Vec3V a);
//(a.z, a.x, a.y)
PX_FORCE_INLINE Vec3V V3PermZXY(const Vec3V a);
//(a.z,a.z,a.y)
PX_FORCE_INLINE Vec3V V3PermZZY(const Vec3V a);
//(a.y,a.x,a.x)
PX_FORCE_INLINE Vec3V V3PermYXX(const Vec3V a);
//(0, v1.z, v0.y)
PX_FORCE_INLINE Vec3V V3Perm_Zero_1Z_0Y(const Vec3V v0, const Vec3V v1);
//(v0.z, 0, v1.x)
PX_FORCE_INLINE Vec3V V3Perm_0Z_Zero_1X(const Vec3V v0, const Vec3V v1);
//(v1.y, v0.x, 0)
PX_FORCE_INLINE Vec3V V3Perm_1Y_0X_Zero(const Vec3V v0, const Vec3V v1);
// Transpose 3 Vec3Vs inplace. Sets the w component to zero
// [ x0, y0, z0, w0] [ x1, y1, z1, w1] [ x2, y2, z2, w2] -> [x0 x1 x2 0] [y0 y1 y2 0] [z0 z1 z2 0]
PX_FORCE_INLINE void V3Transpose(Vec3V& col0, Vec3V& col1, Vec3V& col2);
//////////////////////////////////////////////////////////////////
// Math operations on Vec4V
//////////////////////////////////////////////////////////////////
//(f,f,f,f)
PX_FORCE_INLINE Vec4V V4Splat(const FloatV f);
//(f[0],f[1],f[2],f[3])
PX_FORCE_INLINE Vec4V V4Merge(const FloatV* const f);
//(x,y,z,w)
PX_FORCE_INLINE Vec4V V4Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z, const FloatVArg w);
//(x.w, y.w, z.w, w.w)
PX_FORCE_INLINE Vec4V V4MergeW(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w);
//(x.z, y.z, z.z, w.z)
PX_FORCE_INLINE Vec4V V4MergeZ(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w);
//(x.y, y.y, z.y, w.y)
PX_FORCE_INLINE Vec4V V4MergeY(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w);
//(x.x, y.x, z.x, w.x)
PX_FORCE_INLINE Vec4V V4MergeX(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w);
//(a.x, b.x, a.y, b.y)
PX_FORCE_INLINE Vec4V V4UnpackXY(const Vec4VArg a, const Vec4VArg b);
//(a.z, b.z, a.w, b.w)
PX_FORCE_INLINE Vec4V V4UnpackZW(const Vec4VArg a, const Vec4VArg b);
//(1,0,0,0)
PX_FORCE_INLINE Vec4V V4UnitW();
//(0,1,0,0)
PX_FORCE_INLINE Vec4V V4UnitY();
//(0,0,1,0)
PX_FORCE_INLINE Vec4V V4UnitZ();
//(0,0,0,1)
PX_FORCE_INLINE Vec4V V4UnitW();
//(f.x,f.x,f.x,f.x)
PX_FORCE_INLINE FloatV V4GetX(const Vec4V f);
//(f.y,f.y,f.y,f.y)
PX_FORCE_INLINE FloatV V4GetY(const Vec4V f);
//(f.z,f.z,f.z,f.z)
PX_FORCE_INLINE FloatV V4GetZ(const Vec4V f);
//(f.w,f.w,f.w,f.w)
PX_FORCE_INLINE FloatV V4GetW(const Vec4V f);
//(f,v.y,v.z,v.w)
PX_FORCE_INLINE Vec4V V4SetX(const Vec4V v, const FloatV f);
//(v.x,f,v.z,v.w)
PX_FORCE_INLINE Vec4V V4SetY(const Vec4V v, const FloatV f);
//(v.x,v.y,f,v.w)
PX_FORCE_INLINE Vec4V V4SetZ(const Vec4V v, const FloatV f);
//(v.x,v.y,v.z,f)
PX_FORCE_INLINE Vec4V V4SetW(const Vec4V v, const FloatV f);
//(v.x,v.y,v.z,0)
PX_FORCE_INLINE Vec4V V4ClearW(const Vec4V v);
//(a[elementIndex], a[elementIndex], a[elementIndex], a[elementIndex])
template <int elementIndex>
PX_FORCE_INLINE Vec4V V4SplatElement(Vec4V a);
// v.x=f
PX_FORCE_INLINE void V4WriteX(Vec4V& v, const PxF32 f);
// v.y=f
PX_FORCE_INLINE void V4WriteY(Vec4V& v, const PxF32 f);
// v.z=f
PX_FORCE_INLINE void V4WriteZ(Vec4V& v, const PxF32 f);
// v.w=f
PX_FORCE_INLINE void V4WriteW(Vec4V& v, const PxF32 f);
// v.x=f.x, v.y=f.y, v.z=f.z
PX_FORCE_INLINE void V4WriteXYZ(Vec4V& v, const PxVec3& f);
// return v.x
PX_FORCE_INLINE PxF32 V4ReadX(const Vec4V& v);
// return v.y
PX_FORCE_INLINE PxF32 V4ReadY(const Vec4V& v);
// return v.z
PX_FORCE_INLINE PxF32 V4ReadZ(const Vec4V& v);
// return v.w
PX_FORCE_INLINE PxF32 V4ReadW(const Vec4V& v);
// return (v.x,v.y,v.z)
PX_FORCE_INLINE const PxVec3& V4ReadXYZ(const Vec4V& v);
//(0,0,0,0)
PX_FORCE_INLINE Vec4V V4Zero();
//(1,1,1,1)
PX_FORCE_INLINE Vec4V V4One();
//(PX_EPS_REAL,PX_EPS_REAL,PX_EPS_REAL,PX_EPS_REAL)
PX_FORCE_INLINE Vec4V V4Eps();
//-c (per component)
PX_FORCE_INLINE Vec4V V4Neg(const Vec4V c);
// a+b (per component)
PX_FORCE_INLINE Vec4V V4Add(const Vec4V a, const Vec4V b);
// a-b (per component)
PX_FORCE_INLINE Vec4V V4Sub(const Vec4V a, const Vec4V b);
// a*b (per component)
PX_FORCE_INLINE Vec4V V4Scale(const Vec4V a, const FloatV b);
// a*b (per component)
PX_FORCE_INLINE Vec4V V4Mul(const Vec4V a, const Vec4V b);
// a/b (per component)
PX_FORCE_INLINE Vec4V V4ScaleInv(const Vec4V a, const FloatV b);
// a/b (per component)
PX_FORCE_INLINE Vec4V V4Div(const Vec4V a, const Vec4V b);
// a/b (per component)
PX_FORCE_INLINE Vec4V V4ScaleInvFast(const Vec4V a, const FloatV b);
// a/b (per component)
PX_FORCE_INLINE Vec4V V4DivFast(const Vec4V a, const Vec4V b);
// 1.0f/a
PX_FORCE_INLINE Vec4V V4Recip(const Vec4V a);
// 1.0f/a
PX_FORCE_INLINE Vec4V V4RecipFast(const Vec4V a);
// 1.0f/sqrt(a)
PX_FORCE_INLINE Vec4V V4Rsqrt(const Vec4V a);
// 1.0f/sqrt(a)
PX_FORCE_INLINE Vec4V V4RsqrtFast(const Vec4V a);
// a*b+c
PX_FORCE_INLINE Vec4V V4ScaleAdd(const Vec4V a, const FloatV b, const Vec4V c);
// c-a*b
PX_FORCE_INLINE Vec4V V4NegScaleSub(const Vec4V a, const FloatV b, const Vec4V c);
// a*b+c
PX_FORCE_INLINE Vec4V V4MulAdd(const Vec4V a, const Vec4V b, const Vec4V c);
// c-a*b
PX_FORCE_INLINE Vec4V V4NegMulSub(const Vec4V a, const Vec4V b, const Vec4V c);
// fabs(a)
PX_FORCE_INLINE Vec4V V4Abs(const Vec4V a);
// bitwise a & ~b
PX_FORCE_INLINE Vec4V V4Andc(const Vec4V a, const VecU32V b);
// a.b (W is taken into account)
PX_FORCE_INLINE FloatV V4Dot(const Vec4V a, const Vec4V b);
// a.b (same computation as V3Dot. W is ignored in input)
PX_FORCE_INLINE FloatV V4Dot3(const Vec4V a, const Vec4V b);
// aXb (same computation as V3Cross. W is ignored in input and undefined in output)
PX_FORCE_INLINE Vec4V V4Cross(const Vec4V a, const Vec4V b);
//|a.a|^1/2
PX_FORCE_INLINE FloatV V4Length(const Vec4V a);
// a.a
PX_FORCE_INLINE FloatV V4LengthSq(const Vec4V a);
// a*|a.a|^-1/2
PX_FORCE_INLINE Vec4V V4Normalize(const Vec4V a);
// a.a>0 ? a*|a.a|^-1/2 : unsafeReturnValue
PX_FORCE_INLINE Vec4V V4NormalizeSafe(const Vec4V a, const Vec4V unsafeReturnValue);
// a*|a.a|^-1/2
PX_FORCE_INLINE Vec4V V4NormalizeFast(const Vec4V a);
// c ? a : b (per component)
PX_FORCE_INLINE Vec4V V4Sel(const BoolV c, const Vec4V a, const Vec4V b);
// a>b (per component)
PX_FORCE_INLINE BoolV V4IsGrtr(const Vec4V a, const Vec4V b);
// a>=b (per component)
PX_FORCE_INLINE BoolV V4IsGrtrOrEq(const Vec4V a, const Vec4V b);
// a==b (per component)
PX_FORCE_INLINE BoolV V4IsEq(const Vec4V a, const Vec4V b);
// Max(a,b) (per component)
PX_FORCE_INLINE Vec4V V4Max(const Vec4V a, const Vec4V b);
// Min(a,b) (per component)
PX_FORCE_INLINE Vec4V V4Min(const Vec4V a, const Vec4V b);
// Get the maximum component from a
PX_FORCE_INLINE FloatV V4ExtractMax(const Vec4V a);
// Get the minimum component from a
PX_FORCE_INLINE FloatV V4ExtractMin(const Vec4V a);
// Clamp(a,b) (per component)
PX_FORCE_INLINE Vec4V V4Clamp(const Vec4V a, const Vec4V minV, const Vec4V maxV);
// return 1 if all components of a are greater than all components of b.
PX_FORCE_INLINE PxU32 V4AllGrtr(const Vec4V a, const Vec4V b);
// return 1 if all components of a are greater than or equal to all components of b
PX_FORCE_INLINE PxU32 V4AllGrtrOrEq(const Vec4V a, const Vec4V b);
// return 1 if XYZ components of a are greater than or equal to XYZ components of b. W is ignored.
PX_FORCE_INLINE PxU32 V4AllGrtrOrEq3(const Vec4V a, const Vec4V b);
// return 1 if all components of a are equal to all components of b
PX_FORCE_INLINE PxU32 V4AllEq(const Vec4V a, const Vec4V b);
// return 1 if any XYZ component of a is greater than the corresponding component of b. W is ignored.
PX_FORCE_INLINE PxU32 V4AnyGrtr3(const Vec4V a, const Vec4V b);
// round(a)(per component)
PX_FORCE_INLINE Vec4V V4Round(const Vec4V a);
// sin(a) (per component)
PX_FORCE_INLINE Vec4V V4Sin(const Vec4V a);
// cos(a) (per component)
PX_FORCE_INLINE Vec4V V4Cos(const Vec4V a);
// Permute v into a new vec4v with YXWZ format
PX_FORCE_INLINE Vec4V V4PermYXWZ(const Vec4V v);
// Permute v into a new vec4v with XZXZ format
PX_FORCE_INLINE Vec4V V4PermXZXZ(const Vec4V v);
// Permute v into a new vec4v with YWYW format
PX_FORCE_INLINE Vec4V V4PermYWYW(const Vec4V v);
// Permute v into a new vec4v with YZXW format
PX_FORCE_INLINE Vec4V V4PermYZXW(const Vec4V v);
// Permute v into a new vec4v with ZWXY format - equivalent to a swap of the two 64bit parts of the vector
PX_FORCE_INLINE Vec4V V4PermZWXY(const Vec4V a);
// Permute v into a new vec4v with format {a[x], a[y], a[z], a[w]}
// V4Perm<1,3,1,3> is equal to V4PermYWYW
// V4Perm<0,2,0,2> is equal to V4PermXZXZ
// V3Perm<1,0,3,2> is equal to V4PermYXWZ
template <PxU8 x, PxU8 y, PxU8 z, PxU8 w>
PX_FORCE_INLINE Vec4V V4Perm(const Vec4V a);
// Transpose 4 Vec4Vs inplace.
// [ x0, y0, z0, w0] [ x1, y1, z1, w1] [ x2, y2, z2, w2] [ x3, y3, z3, w3] ->
// [ x0, x1, x2, x3] [ y0, y1, y2, y3] [ z0, z1, z2, z3] [ w0, w1, w2, w3]
PX_FORCE_INLINE void V3Transpose(Vec3V& col0, Vec3V& col1, Vec3V& col2);
// q = cos(a/2) + u*sin(a/2)
PX_FORCE_INLINE QuatV QuatV_From_RotationAxisAngle(const Vec3V u, const FloatV a);
// convert q to a unit quaternion
PX_FORCE_INLINE QuatV QuatNormalize(const QuatV q);
//|q.q|^1/2
PX_FORCE_INLINE FloatV QuatLength(const QuatV q);
// q.q
PX_FORCE_INLINE FloatV QuatLengthSq(const QuatV q);
// a.b
PX_FORCE_INLINE FloatV QuatDot(const QuatV a, const QuatV b);
//(-q.x, -q.y, -q.z, q.w)
PX_FORCE_INLINE QuatV QuatConjugate(const QuatV q);
//(q.x, q.y, q.z)
PX_FORCE_INLINE Vec3V QuatGetImaginaryPart(const QuatV q);
// convert quaternion to matrix 33
PX_FORCE_INLINE Mat33V QuatGetMat33V(const QuatVArg q);
// convert quaternion to matrix 33
PX_FORCE_INLINE void QuatGetMat33V(const QuatVArg q, Vec3V& column0, Vec3V& column1, Vec3V& column2);
// convert matrix 33 to quaternion
PX_FORCE_INLINE QuatV Mat33GetQuatV(const Mat33V& a);
// brief computes rotation of x-axis
PX_FORCE_INLINE Vec3V QuatGetBasisVector0(const QuatV q);
// brief computes rotation of y-axis
PX_FORCE_INLINE Vec3V QuatGetBasisVector1(const QuatV q);
// brief computes rotation of z-axis
PX_FORCE_INLINE Vec3V QuatGetBasisVector2(const QuatV q);
// calculate the rotation vector from q and v
PX_FORCE_INLINE Vec3V QuatRotate(const QuatV q, const Vec3V v);
// calculate the rotation vector from the conjugate quaternion and v
PX_FORCE_INLINE Vec3V QuatRotateInv(const QuatV q, const Vec3V v);
// quaternion multiplication
PX_FORCE_INLINE QuatV QuatMul(const QuatV a, const QuatV b);
// quaternion add
PX_FORCE_INLINE QuatV QuatAdd(const QuatV a, const QuatV b);
// (-q.x, -q.y, -q.z, -q.w)
PX_FORCE_INLINE QuatV QuatNeg(const QuatV q);
// (a.x - b.x, a.y-b.y, a.z-b.z, a.w-b.w )
PX_FORCE_INLINE QuatV QuatSub(const QuatV a, const QuatV b);
// (a.x*b, a.y*b, a.z*b, a.w*b)
PX_FORCE_INLINE QuatV QuatScale(const QuatV a, const FloatV b);
// (x = v[0], y = v[1], z = v[2], w =v[3])
PX_FORCE_INLINE QuatV QuatMerge(const FloatV* const v);
// (x = v[0], y = v[1], z = v[2], w =v[3])
PX_FORCE_INLINE QuatV QuatMerge(const FloatVArg x, const FloatVArg y, const FloatVArg z, const FloatVArg w);
// (x = 0.f, y = 0.f, z = 0.f, w = 1.f)
PX_FORCE_INLINE QuatV QuatIdentity();
// check for each component is valid
PX_FORCE_INLINE bool isFiniteQuatV(const QuatV q);
// check for each component is valid
PX_FORCE_INLINE bool isValidQuatV(const QuatV q);
// check for each component is valid
PX_FORCE_INLINE bool isSaneQuatV(const QuatV q);
// Math operations on 16-byte aligned booleans.
// x=false y=false z=false w=false
PX_FORCE_INLINE BoolV BFFFF();
// x=false y=false z=false w=true
PX_FORCE_INLINE BoolV BFFFT();
// x=false y=false z=true w=false
PX_FORCE_INLINE BoolV BFFTF();
// x=false y=false z=true w=true
PX_FORCE_INLINE BoolV BFFTT();
// x=false y=true z=false w=false
PX_FORCE_INLINE BoolV BFTFF();
// x=false y=true z=false w=true
PX_FORCE_INLINE BoolV BFTFT();
// x=false y=true z=true w=false
PX_FORCE_INLINE BoolV BFTTF();
// x=false y=true z=true w=true
PX_FORCE_INLINE BoolV BFTTT();
// x=true y=false z=false w=false
PX_FORCE_INLINE BoolV BTFFF();
// x=true y=false z=false w=true
PX_FORCE_INLINE BoolV BTFFT();
// x=true y=false z=true w=false
PX_FORCE_INLINE BoolV BTFTF();
// x=true y=false z=true w=true
PX_FORCE_INLINE BoolV BTFTT();
// x=true y=true z=false w=false
PX_FORCE_INLINE BoolV BTTFF();
// x=true y=true z=false w=true
PX_FORCE_INLINE BoolV BTTFT();
// x=true y=true z=true w=false
PX_FORCE_INLINE BoolV BTTTF();
// x=true y=true z=true w=true
PX_FORCE_INLINE BoolV BTTTT();
// x=false y=false z=false w=true
PX_FORCE_INLINE BoolV BWMask();
// x=true y=false z=false w=false
PX_FORCE_INLINE BoolV BXMask();
// x=false y=true z=false w=false
PX_FORCE_INLINE BoolV BYMask();
// x=false y=false z=true w=false
PX_FORCE_INLINE BoolV BZMask();
// get x component
PX_FORCE_INLINE BoolV BGetX(const BoolV f);
// get y component
PX_FORCE_INLINE BoolV BGetY(const BoolV f);
// get z component
PX_FORCE_INLINE BoolV BGetZ(const BoolV f);
// get w component
PX_FORCE_INLINE BoolV BGetW(const BoolV f);
// Use elementIndex to splat xxxx or yyyy or zzzz or wwww
template <int elementIndex>
PX_FORCE_INLINE BoolV BSplatElement(Vec4V a);
// component-wise && (AND)
PX_FORCE_INLINE BoolV BAnd(const BoolV a, const BoolV b);
// component-wise || (OR)
PX_FORCE_INLINE BoolV BOr(const BoolV a, const BoolV b);
// component-wise not
PX_FORCE_INLINE BoolV BNot(const BoolV a);
// if all four components are true, return true, otherwise return false
PX_FORCE_INLINE BoolV BAllTrue4(const BoolV a);
// if any four components is true, return true, otherwise return false
PX_FORCE_INLINE BoolV BAnyTrue4(const BoolV a);
// if all three(0, 1, 2) components are true, return true, otherwise return false
PX_FORCE_INLINE BoolV BAllTrue3(const BoolV a);
// if any three (0, 1, 2) components is true, return true, otherwise return false
PX_FORCE_INLINE BoolV BAnyTrue3(const BoolV a);
// Return 1 if all components equal, zero otherwise.
PX_FORCE_INLINE PxU32 BAllEq(const BoolV a, const BoolV b);
// Specialized/faster BAllEq function for b==TTTT
PX_FORCE_INLINE PxU32 BAllEqTTTT(const BoolV a);
// Specialized/faster BAllEq function for b==FFFF
PX_FORCE_INLINE PxU32 BAllEqFFFF(const BoolV a);
/// Get BoolV as bits set in an PxU32. A bit in the output is set if the element is 'true' in the input.
/// There is a bit for each element in a, with element 0s value held in bit0, element 1 in bit 1s and so forth.
/// If nothing is true in the input it will return 0, and if all are true if will return 0xf.
/// NOTE! That performance of the function varies considerably by platform, thus it is recommended to use
/// where your algorithm really needs a BoolV in an integer variable.
PX_FORCE_INLINE PxU32 BGetBitMask(const BoolV a);
// VecI32V stuff
PX_FORCE_INLINE VecI32V VecI32V_Zero();
PX_FORCE_INLINE VecI32V VecI32V_One();
PX_FORCE_INLINE VecI32V VecI32V_Two();
PX_FORCE_INLINE VecI32V VecI32V_MinusOne();
// Compute a shift parameter for VecI32V_LeftShift and VecI32V_RightShift
// Each element of shift must be identical ie the vector must have form {count, count, count, count} with count>=0
PX_FORCE_INLINE VecShiftV VecI32V_PrepareShift(const VecI32VArg shift);
// Shift each element of a leftwards by the same amount
// Compute shift with VecI32V_PrepareShift
//{a.x<<shift[0], a.y<<shift[0], a.z<<shift[0], a.w<<shift[0]}
PX_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const VecShiftVArg shift);
// Shift each element of a rightwards by the same amount
// Compute shift with VecI32V_PrepareShift
//{a.x>>shift[0], a.y>>shift[0], a.z>>shift[0], a.w>>shift[0]}
PX_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const VecShiftVArg shift);
PX_FORCE_INLINE VecI32V VecI32V_Add(const VecI32VArg a, const VecI32VArg b);
PX_FORCE_INLINE VecI32V VecI32V_Or(const VecI32VArg a, const VecI32VArg b);
PX_FORCE_INLINE VecI32V VecI32V_GetX(const VecI32VArg a);
PX_FORCE_INLINE VecI32V VecI32V_GetY(const VecI32VArg a);
PX_FORCE_INLINE VecI32V VecI32V_GetZ(const VecI32VArg a);
PX_FORCE_INLINE VecI32V VecI32V_GetW(const VecI32VArg a);
PX_FORCE_INLINE VecI32V VecI32V_Sub(const VecI32VArg a, const VecI32VArg b);
PX_FORCE_INLINE BoolV VecI32V_IsGrtr(const VecI32VArg a, const VecI32VArg b);
PX_FORCE_INLINE BoolV VecI32V_IsEq(const VecI32VArg a, const VecI32VArg b);
PX_FORCE_INLINE VecI32V V4I32Sel(const BoolV c, const VecI32V a, const VecI32V b);
// VecU32V stuff
PX_FORCE_INLINE VecU32V U4Zero();
PX_FORCE_INLINE VecU32V U4One();
PX_FORCE_INLINE VecU32V U4Two();
PX_FORCE_INLINE BoolV V4IsEqU32(const VecU32V a, const VecU32V b);
PX_FORCE_INLINE VecU32V V4U32Sel(const BoolV c, const VecU32V a, const VecU32V b);
PX_FORCE_INLINE VecU32V V4U32or(VecU32V a, VecU32V b);
PX_FORCE_INLINE VecU32V V4U32xor(VecU32V a, VecU32V b);
PX_FORCE_INLINE VecU32V V4U32and(VecU32V a, VecU32V b);
PX_FORCE_INLINE VecU32V V4U32Andc(VecU32V a, VecU32V b);
// VecU32 - why does this not return a bool?
PX_FORCE_INLINE VecU32V V4IsGrtrV32u(const Vec4V a, const Vec4V b);
// Math operations on 16-byte aligned Mat33s (represents any 3x3 matrix)
PX_FORCE_INLINE Mat33V M33Load(const PxMat33& m)
{
return Mat33V(Vec3V_From_Vec4V(V4LoadU(&m.column0.x)),
Vec3V_From_Vec4V(V4LoadU(&m.column1.x)), V3LoadU(m.column2));
}
// a*b
PX_FORCE_INLINE Vec3V M33MulV3(const Mat33V& a, const Vec3V b);
// A*x + b
PX_FORCE_INLINE Vec3V M33MulV3AddV3(const Mat33V& A, const Vec3V b, const Vec3V c);
// transpose(a) * b
PX_FORCE_INLINE Vec3V M33TrnspsMulV3(const Mat33V& a, const Vec3V b);
// a*b
PX_FORCE_INLINE Mat33V M33MulM33(const Mat33V& a, const Mat33V& b);
// a+b
PX_FORCE_INLINE Mat33V M33Add(const Mat33V& a, const Mat33V& b);
// a+b
PX_FORCE_INLINE Mat33V M33Sub(const Mat33V& a, const Mat33V& b);
//-a
PX_FORCE_INLINE Mat33V M33Neg(const Mat33V& a);
// absolute value of the matrix
PX_FORCE_INLINE Mat33V M33Abs(const Mat33V& a);
// inverse mat
PX_FORCE_INLINE Mat33V M33Inverse(const Mat33V& a);
// transpose(a)
PX_FORCE_INLINE Mat33V M33Trnsps(const Mat33V& a);
// create an identity matrix
PX_FORCE_INLINE Mat33V M33Identity();
// create a vec3 to store the diagonal element of the M33
PX_FORCE_INLINE Mat33V M33Diagonal(const Vec3VArg);
// Not implemented
// return 1 if all components of a are equal to all components of b
// PX_FORCE_INLINE PxU32 V4U32AllEq(const VecU32V a, const VecU32V b);
// v.w=f
// PX_FORCE_INLINE void V3WriteW(Vec3V& v, const PxF32 f);
// PX_FORCE_INLINE PxF32 V3ReadW(const Vec3V& v);
// Not used
// PX_FORCE_INLINE Vec4V V4LoadAligned(Vec4V* addr);
// PX_FORCE_INLINE Vec4V V4LoadUnaligned(Vec4V* addr);
// floor(a)(per component)
// PX_FORCE_INLINE Vec4V V4Floor(Vec4V a);
// ceil(a) (per component)
// PX_FORCE_INLINE Vec4V V4Ceil(Vec4V a);
// PX_FORCE_INLINE VecU32V V4ConvertToU32VSaturate(const Vec4V a, PxU32 power);
// Math operations on 16-byte aligned Mat34s (represents transformation matrix - rotation and translation).
// namespace _Mat34V
//{
// //a*b
// PX_FORCE_INLINE Vec3V multiplyV(const Mat34V& a, const Vec3V b);
// //a_rotation * b
// PX_FORCE_INLINE Vec3V multiply3X3V(const Mat34V& a, const Vec3V b);
// //transpose(a_rotation)*b
// PX_FORCE_INLINE Vec3V multiplyTranspose3X3V(const Mat34V& a, const Vec3V b);
// //a*b
// PX_FORCE_INLINE Mat34V multiplyV(const Mat34V& a, const Mat34V& b);
// //a_rotation*b
// PX_FORCE_INLINE Mat33V multiply3X3V(const Mat34V& a, const Mat33V& b);
// //a_rotation*b_rotation
// PX_FORCE_INLINE Mat33V multiply3X3V(const Mat34V& a, const Mat34V& b);
// //a+b
// PX_FORCE_INLINE Mat34V addV(const Mat34V& a, const Mat34V& b);
// //a^-1
// PX_FORCE_INLINE Mat34V getInverseV(const Mat34V& a);
// //transpose(a_rotation)
// PX_FORCE_INLINE Mat33V getTranspose3X3(const Mat34V& a);
//}; //namespace _Mat34V
// a*b
//#define M34MulV3(a,b) (M34MulV3(a,b))
////a_rotation * b
//#define M34Mul33V3(a,b) (M34Mul33V3(a,b))
////transpose(a_rotation)*b
//#define M34TrnspsMul33V3(a,b) (M34TrnspsMul33V3(a,b))
////a*b
//#define M34MulM34(a,b) (_Mat34V::multiplyV(a,b))
// a_rotation*b
//#define M34MulM33(a,b) (M34MulM33(a,b))
// a_rotation*b_rotation
//#define M34Mul33MM34(a,b) (M34MulM33(a,b))
// a+b
//#define M34Add(a,b) (M34Add(a,b))
////a^-1
//#define M34Inverse(a,b) (M34Inverse(a))
// transpose(a_rotation)
//#define M34Trnsps33(a) (M33Trnsps3X3(a))
// Math operations on 16-byte aligned Mat44s (represents any 4x4 matrix)
// namespace _Mat44V
//{
// //a*b
// PX_FORCE_INLINE Vec4V multiplyV(const Mat44V& a, const Vec4V b);
// //transpose(a)*b
// PX_FORCE_INLINE Vec4V multiplyTransposeV(const Mat44V& a, const Vec4V b);
// //a*b
// PX_FORCE_INLINE Mat44V multiplyV(const Mat44V& a, const Mat44V& b);
// //a+b
// PX_FORCE_INLINE Mat44V addV(const Mat44V& a, const Mat44V& b);
// //a&-1
// PX_FORCE_INLINE Mat44V getInverseV(const Mat44V& a);
// //transpose(a)
// PX_FORCE_INLINE Mat44V getTransposeV(const Mat44V& a);
//}; //namespace _Mat44V
// namespace _VecU32V
//{
// // pack 8 U32s to 8 U16s with saturation
// PX_FORCE_INLINE VecU16V pack2U32VToU16VSaturate(VecU32V a, VecU32V b);
// PX_FORCE_INLINE VecU32V orV(VecU32V a, VecU32V b);
// PX_FORCE_INLINE VecU32V andV(VecU32V a, VecU32V b);
// PX_FORCE_INLINE VecU32V andcV(VecU32V a, VecU32V b);
// // conversion from integer to float
// PX_FORCE_INLINE Vec4V convertToVec4V(VecU32V a);
// // splat a[elementIndex] into all fields of a
// template<int elementIndex>
// PX_FORCE_INLINE VecU32V splatElement(VecU32V a);
// PX_FORCE_INLINE void storeAligned(VecU32V a, VecU32V* address);
//};
// namespace _VecI32V
//{
// template<int a> PX_FORCE_INLINE VecI32V splatI32();
//};
//
// namespace _VecU16V
//{
// PX_FORCE_INLINE VecU16V orV(VecU16V a, VecU16V b);
// PX_FORCE_INLINE VecU16V andV(VecU16V a, VecU16V b);
// PX_FORCE_INLINE VecU16V andcV(VecU16V a, VecU16V b);
// PX_FORCE_INLINE void storeAligned(VecU16V val, VecU16V *address);
// PX_FORCE_INLINE VecU16V loadAligned(VecU16V* addr);
// PX_FORCE_INLINE VecU16V loadUnaligned(VecU16V* addr);
// PX_FORCE_INLINE VecU16V compareGt(VecU16V a, VecU16V b);
// template<int elementIndex>
// PX_FORCE_INLINE VecU16V splatElement(VecU16V a);
// PX_FORCE_INLINE VecU16V subtractModulo(VecU16V a, VecU16V b);
// PX_FORCE_INLINE VecU16V addModulo(VecU16V a, VecU16V b);
// PX_FORCE_INLINE VecU32V getLo16(VecU16V a); // [0,2,4,6] 16-bit values to [0,1,2,3] 32-bit vector
// PX_FORCE_INLINE VecU32V getHi16(VecU16V a); // [1,3,5,7] 16-bit values to [0,1,2,3] 32-bit vector
//};
//
// namespace _VecI16V
//{
// template <int val> PX_FORCE_INLINE VecI16V splatImmediate();
//};
//
// namespace _VecU8V
//{
//};
// a*b
//#define M44MulV4(a,b) (M44MulV4(a,b))
////transpose(a)*b
//#define M44TrnspsMulV4(a,b) (M44TrnspsMulV4(a,b))
////a*b
//#define M44MulM44(a,b) (M44MulM44(a,b))
////a+b
//#define M44Add(a,b) (M44Add(a,b))
////a&-1
//#define M44Inverse(a) (M44Inverse(a))
////transpose(a)
//#define M44Trnsps(a) (M44Trnsps(a))
// dsequeira: these used to be assert'd out in SIMD builds, but they're necessary if
// we want to be able to write some scalar functions which run using SIMD data structures
PX_FORCE_INLINE void V3WriteX(Vec3V& v, const PxF32 f)
{
reinterpret_cast<PxVec3&>(v).x = f;
}
PX_FORCE_INLINE void V3WriteY(Vec3V& v, const PxF32 f)
{
reinterpret_cast<PxVec3&>(v).y = f;
}
PX_FORCE_INLINE void V3WriteZ(Vec3V& v, const PxF32 f)
{
reinterpret_cast<PxVec3&>(v).z = f;
}
PX_FORCE_INLINE void V3WriteXYZ(Vec3V& v, const PxVec3& f)
{
reinterpret_cast<PxVec3&>(v) = f;
}
PX_FORCE_INLINE PxF32 V3ReadX(const Vec3V& v)
{
return reinterpret_cast<const PxVec3&>(v).x;
}
PX_FORCE_INLINE PxF32 V3ReadY(const Vec3V& v)
{
return reinterpret_cast<const PxVec3&>(v).y;
}
PX_FORCE_INLINE PxF32 V3ReadZ(const Vec3V& v)
{
return reinterpret_cast<const PxVec3&>(v).z;
}
PX_FORCE_INLINE const PxVec3& V3ReadXYZ(const Vec3V& v)
{
return reinterpret_cast<const PxVec3&>(v);
}
PX_FORCE_INLINE void V4WriteX(Vec4V& v, const PxF32 f)
{
reinterpret_cast<PxVec4&>(v).x = f;
}
PX_FORCE_INLINE void V4WriteY(Vec4V& v, const PxF32 f)
{
reinterpret_cast<PxVec4&>(v).y = f;
}
PX_FORCE_INLINE void V4WriteZ(Vec4V& v, const PxF32 f)
{
reinterpret_cast<PxVec4&>(v).z = f;
}
PX_FORCE_INLINE void V4WriteW(Vec4V& v, const PxF32 f)
{
reinterpret_cast<PxVec4&>(v).w = f;
}
PX_FORCE_INLINE void V4WriteXYZ(Vec4V& v, const PxVec3& f)
{
reinterpret_cast<PxVec3&>(v) = f;
}
PX_FORCE_INLINE PxF32 V4ReadX(const Vec4V& v)
{
return reinterpret_cast<const PxVec4&>(v).x;
}
PX_FORCE_INLINE PxF32 V4ReadY(const Vec4V& v)
{
return reinterpret_cast<const PxVec4&>(v).y;
}
PX_FORCE_INLINE PxF32 V4ReadZ(const Vec4V& v)
{
return reinterpret_cast<const PxVec4&>(v).z;
}
PX_FORCE_INLINE PxF32 V4ReadW(const Vec4V& v)
{
return reinterpret_cast<const PxVec4&>(v).w;
}
PX_FORCE_INLINE const PxVec3& V4ReadXYZ(const Vec4V& v)
{
return reinterpret_cast<const PxVec3&>(v);
}
// this macro transposes 4 Vec4V into 3 Vec4V (assuming that the W component can be ignored
#define PX_TRANSPOSE_44_34(inA, inB, inC, inD, outA, outB, outC) \
outA = V4UnpackXY(inA, inC); \
inA = V4UnpackZW(inA, inC); \
inC = V4UnpackXY(inB, inD); \
inB = V4UnpackZW(inB, inD); \
outB = V4UnpackZW(outA, inC); \
outA = V4UnpackXY(outA, inC); \
outC = V4UnpackXY(inA, inB);
// this macro transposes 3 Vec4V into 4 Vec4V (with W components as garbage!)
#define PX_TRANSPOSE_34_44(inA, inB, inC, outA, outB, outC, outD) \
outA = V4UnpackXY(inA, inC); \
inA = V4UnpackZW(inA, inC); \
outC = V4UnpackXY(inB, inB); \
inC = V4UnpackZW(inB, inB); \
outB = V4UnpackZW(outA, outC); \
outA = V4UnpackXY(outA, outC); \
outC = V4UnpackXY(inA, inC); \
outD = V4UnpackZW(inA, inC);
#define PX_TRANSPOSE_44(inA, inB, inC, inD, outA, outB, outC, outD) \
outA = V4UnpackXY(inA, inC); \
inA = V4UnpackZW(inA, inC); \
inC = V4UnpackXY(inB, inD); \
inB = V4UnpackZW(inB, inD); \
outB = V4UnpackZW(outA, inC); \
outA = V4UnpackXY(outA, inC); \
outC = V4UnpackXY(inA, inB); \
outD = V4UnpackZW(inA, inB);
// This function returns a Vec4V, where each element is the dot product of one pair of Vec3Vs. On PC, each element in
// the result should be identical to the results if V3Dot was performed
// for each pair of Vec3V.
// However, on other platforms, the result might diverge by some small margin due to differences in FP rounding, e.g. if
// _mm_dp_ps was used or some other approximate dot product or fused madd operations
// were used.
// Where there does not exist a hw-accelerated dot-product operation, this approach should be the fastest way to compute
// the dot product of 4 vectors.
PX_FORCE_INLINE Vec4V V3Dot4(const Vec3VArg a0, const Vec3VArg b0, const Vec3VArg a1, const Vec3VArg b1,
const Vec3VArg a2, const Vec3VArg b2, const Vec3VArg a3, const Vec3VArg b3)
{
Vec4V a0b0 = Vec4V_From_Vec3V(V3Mul(a0, b0));
Vec4V a1b1 = Vec4V_From_Vec3V(V3Mul(a1, b1));
Vec4V a2b2 = Vec4V_From_Vec3V(V3Mul(a2, b2));
Vec4V a3b3 = Vec4V_From_Vec3V(V3Mul(a3, b3));
Vec4V aTrnsps, bTrnsps, cTrnsps;
PX_TRANSPOSE_44_34(a0b0, a1b1, a2b2, a3b3, aTrnsps, bTrnsps, cTrnsps);
return V4Add(V4Add(aTrnsps, bTrnsps), cTrnsps);
}
//(f.x,f.y,f.z,0) - Alternative/faster V3LoadU implementation when it is safe to read "W", i.e. the 32bits after the PxVec3.
PX_FORCE_INLINE Vec3V V3LoadU_SafeReadW(const PxVec3& f)
{
return Vec3V_From_Vec4V(V4LoadU(&f.x));
}
} // namespace aos
#if !PX_DOXYGEN
} // namespace physx
#endif
// Now for the cross-platform implementations of the 16-byte aligned maths functions (win32/360/ppu/spu etc).
#if COMPILE_VECTOR_INTRINSICS
#include "PxInlineAoS.h"
#else // #if COMPILE_VECTOR_INTRINSICS
#include "PxVecMathAoSScalarInline.h"
#endif // #if !COMPILE_VECTOR_INTRINSICS
#include "PxVecQuat.h"
#endif
| 51,480 | C | 37.562547 | 148 | 0.675408 |
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxFoundationConfig.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_FOUNDATION_CONFIG_H
#define PX_FOUNDATION_CONFIG_H
#include "foundation/PxPreprocessor.h"
/** \addtogroup foundation
@{
*/
#if defined PX_PHYSX_STATIC_LIB
#define PX_FOUNDATION_API
#else
#if PX_WINDOWS_FAMILY && !defined(__CUDACC__)
#if defined PX_PHYSX_FOUNDATION_EXPORTS
#define PX_FOUNDATION_API __declspec(dllexport)
#else
#define PX_FOUNDATION_API __declspec(dllimport)
#endif
#elif PX_UNIX_FAMILY
#define PX_FOUNDATION_API PX_UNIX_EXPORT
#else
#define PX_FOUNDATION_API
#endif
#endif
/** @} */
#endif
| 2,244 | C | 38.385964 | 74 | 0.753119 |
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxVecMathSSE.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_VEC_MATH_SSE_H
#define PX_VEC_MATH_SSE_H
#if !PX_DOXYGEN
namespace physx
{
#endif
namespace aos
{
namespace
{
const PX_ALIGN(16, PxF32) minus1w[4] = { 0.0f, 0.0f, 0.0f, -1.0f };
}
PX_FORCE_INLINE void QuatGetMat33V(const QuatVArg q, Vec3V& column0, Vec3V& column1, Vec3V& column2)
{
const __m128 q2 = V4Add(q, q);
const __m128 qw2 = V4MulAdd(q2, V4GetW(q), _mm_load_ps(minus1w)); // (2wx, 2wy, 2wz, 2ww-1)
const __m128 nw2 = Vec3V_From_Vec4V(V4Neg(qw2)); // (-2wx, -2wy, -2wz, 0)
const __m128 v = Vec3V_From_Vec4V(q);
const __m128 a0 = _mm_shuffle_ps(qw2, nw2, _MM_SHUFFLE(3, 1, 2, 3)); // (2ww-1, 2wz, -2wy, 0)
column0 = V4MulAdd(v, V4GetX(q2), a0);
const __m128 a1 = _mm_shuffle_ps(qw2, nw2, _MM_SHUFFLE(3, 2, 0, 3)); // (2ww-1, 2wx, -2wz, 0)
column1 = V4MulAdd(v, V4GetY(q2), _mm_shuffle_ps(a1, a1, _MM_SHUFFLE(3, 1, 0, 2)));
const __m128 a2 = _mm_shuffle_ps(qw2, nw2, _MM_SHUFFLE(3, 0, 1, 3)); // (2ww-1, 2wy, -2wx, 0)
column2 = V4MulAdd(v, V4GetZ(q2), _mm_shuffle_ps(a2, a2, _MM_SHUFFLE(3, 0, 2, 1)));
}
} // namespace aos
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif
| 2,848 | C | 40.289854 | 100 | 0.697331 |
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxMat44.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_MAT44_H
#define PX_MAT44_H
/** \addtogroup foundation
@{
*/
#include "foundation/PxQuat.h"
#include "foundation/PxVec4.h"
#include "foundation/PxMat33.h"
#include "foundation/PxTransform.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/*!
\brief 4x4 matrix class
This class is layout-compatible with D3D and OpenGL matrices. More notes on layout are given in the PxMat33
@see PxMat33 PxTransform
*/
template<class Type>
class PxMat44T
{
public:
//! Default constructor
PX_CUDA_CALLABLE PX_INLINE PxMat44T()
{
}
//! identity constructor
PX_CUDA_CALLABLE PX_INLINE PxMat44T(PxIDENTITY) :
column0(Type(1.0), Type(0.0), Type(0.0), Type(0.0)),
column1(Type(0.0), Type(1.0), Type(0.0), Type(0.0)),
column2(Type(0.0), Type(0.0), Type(1.0), Type(0.0)),
column3(Type(0.0), Type(0.0), Type(0.0), Type(1.0))
{
}
//! zero constructor
PX_CUDA_CALLABLE PX_INLINE PxMat44T(PxZERO) : column0(PxZero), column1(PxZero), column2(PxZero), column3(PxZero)
{
}
//! Construct from four 4-vectors
PX_CUDA_CALLABLE PxMat44T(const PxVec4T<Type>& col0, const PxVec4T<Type>& col1, const PxVec4T<Type>& col2, const PxVec4T<Type>& col3) :
column0(col0),
column1(col1),
column2(col2),
column3(col3)
{
}
//! constructor that generates a multiple of the identity matrix
explicit PX_CUDA_CALLABLE PX_INLINE PxMat44T(Type r) :
column0(r, Type(0.0), Type(0.0), Type(0.0)),
column1(Type(0.0), r, Type(0.0), Type(0.0)),
column2(Type(0.0), Type(0.0), r, Type(0.0)),
column3(Type(0.0), Type(0.0), Type(0.0), r)
{
}
//! Construct from three base vectors and a translation
PX_CUDA_CALLABLE PxMat44T(const PxVec3T<Type>& col0, const PxVec3T<Type>& col1, const PxVec3T<Type>& col2, const PxVec3T<Type>& col3) :
column0(col0, Type(0.0)),
column1(col1, Type(0.0)),
column2(col2, Type(0.0)),
column3(col3, Type(1.0))
{
}
//! Construct from Type[16]
explicit PX_CUDA_CALLABLE PX_INLINE PxMat44T(Type values[]) :
column0(values[0], values[1], values[2], values[3]),
column1(values[4], values[5], values[6], values[7]),
column2(values[8], values[9], values[10], values[11]),
column3(values[12], values[13], values[14], values[15])
{
}
//! Construct from a quaternion
explicit PX_CUDA_CALLABLE PX_INLINE PxMat44T(const PxQuatT<Type>& q)
{
// PT: TODO: PX-566
const Type x = q.x;
const Type y = q.y;
const Type z = q.z;
const Type w = q.w;
const Type x2 = x + x;
const Type y2 = y + y;
const Type z2 = z + z;
const Type xx = x2 * x;
const Type yy = y2 * y;
const Type zz = z2 * z;
const Type xy = x2 * y;
const Type xz = x2 * z;
const Type xw = x2 * w;
const Type yz = y2 * z;
const Type yw = y2 * w;
const Type zw = z2 * w;
column0 = PxVec4T<Type>(Type(1.0) - yy - zz, xy + zw, xz - yw, Type(0.0));
column1 = PxVec4T<Type>(xy - zw, Type(1.0) - xx - zz, yz + xw, Type(0.0));
column2 = PxVec4T<Type>(xz + yw, yz - xw, Type(1.0) - xx - yy, Type(0.0));
column3 = PxVec4T<Type>(Type(0.0), Type(0.0), Type(0.0), Type(1.0));
}
//! Construct from a diagonal vector
explicit PX_CUDA_CALLABLE PX_INLINE PxMat44T(const PxVec4T<Type>& diagonal) :
column0(diagonal.x, Type(0.0), Type(0.0), Type(0.0)),
column1(Type(0.0), diagonal.y, Type(0.0), Type(0.0)),
column2(Type(0.0), Type(0.0), diagonal.z, Type(0.0)),
column3(Type(0.0), Type(0.0), Type(0.0), diagonal.w)
{
}
//! Construct from Mat33 and a translation
PX_CUDA_CALLABLE PxMat44T(const PxMat33T<Type>& axes, const PxVec3T<Type>& position) :
column0(axes.column0, Type(0.0)),
column1(axes.column1, Type(0.0)),
column2(axes.column2, Type(0.0)),
column3(position, Type(1.0))
{
}
PX_CUDA_CALLABLE PxMat44T(const PxTransform& t)
{
*this = PxMat44T(PxMat33T<Type>(t.q), t.p);
}
/**
\brief returns true if the two matrices are exactly equal
*/
PX_CUDA_CALLABLE PX_INLINE bool operator==(const PxMat44T& m) const
{
return column0 == m.column0 && column1 == m.column1 && column2 == m.column2 && column3 == m.column3;
}
//! Copy constructor
PX_CUDA_CALLABLE PX_INLINE PxMat44T(const PxMat44T& other) :
column0(other.column0),
column1(other.column1),
column2(other.column2),
column3(other.column3)
{
}
//! Assignment operator
PX_CUDA_CALLABLE PX_INLINE PxMat44T& operator=(const PxMat44T& other)
{
column0 = other.column0;
column1 = other.column1;
column2 = other.column2;
column3 = other.column3;
return *this;
}
//! Get transposed matrix
PX_CUDA_CALLABLE PX_INLINE const PxMat44T getTranspose() const
{
return PxMat44T(
PxVec4T<Type>(column0.x, column1.x, column2.x, column3.x), PxVec4T<Type>(column0.y, column1.y, column2.y, column3.y),
PxVec4T<Type>(column0.z, column1.z, column2.z, column3.z), PxVec4T<Type>(column0.w, column1.w, column2.w, column3.w));
}
//! Unary minus
PX_CUDA_CALLABLE PX_INLINE const PxMat44T operator-() const
{
return PxMat44T(-column0, -column1, -column2, -column3);
}
//! Add
PX_CUDA_CALLABLE PX_INLINE const PxMat44T operator+(const PxMat44T& other) const
{
return PxMat44T(column0 + other.column0, column1 + other.column1, column2 + other.column2, column3 + other.column3);
}
//! Subtract
PX_CUDA_CALLABLE PX_INLINE const PxMat44T operator-(const PxMat44T& other) const
{
return PxMat44T(column0 - other.column0, column1 - other.column1, column2 - other.column2, column3 - other.column3);
}
//! Scalar multiplication
PX_CUDA_CALLABLE PX_INLINE const PxMat44T operator*(Type scalar) const
{
return PxMat44T(column0 * scalar, column1 * scalar, column2 * scalar, column3 * scalar);
}
template<class Type2>
friend PxMat44T<Type2> operator*(Type2, const PxMat44T<Type2>&);
//! Matrix multiplication
PX_CUDA_CALLABLE PX_INLINE const PxMat44T operator*(const PxMat44T& other) const
{
// Rows from this <dot> columns from other
// column0 = transform(other.column0) etc
return PxMat44T(transform(other.column0), transform(other.column1), transform(other.column2), transform(other.column3));
}
// a <op>= b operators
//! Equals-add
PX_CUDA_CALLABLE PX_INLINE PxMat44T& operator+=(const PxMat44T& other)
{
column0 += other.column0;
column1 += other.column1;
column2 += other.column2;
column3 += other.column3;
return *this;
}
//! Equals-sub
PX_CUDA_CALLABLE PX_INLINE PxMat44T& operator-=(const PxMat44T& other)
{
column0 -= other.column0;
column1 -= other.column1;
column2 -= other.column2;
column3 -= other.column3;
return *this;
}
//! Equals scalar multiplication
PX_CUDA_CALLABLE PX_INLINE PxMat44T& operator*=(Type scalar)
{
column0 *= scalar;
column1 *= scalar;
column2 *= scalar;
column3 *= scalar;
return *this;
}
//! Equals matrix multiplication
PX_CUDA_CALLABLE PX_INLINE PxMat44T& operator*=(const PxMat44T& other)
{
*this = *this * other;
return *this;
}
//! Element access, mathematical way!
PX_CUDA_CALLABLE PX_FORCE_INLINE Type operator()(PxU32 row, PxU32 col) const
{
return (*this)[col][row];
}
//! Element access, mathematical way!
PX_CUDA_CALLABLE PX_FORCE_INLINE Type& operator()(PxU32 row, PxU32 col)
{
return (*this)[col][row];
}
//! Transform vector by matrix, equal to v' = M*v
PX_CUDA_CALLABLE PX_INLINE const PxVec4T<Type> transform(const PxVec4T<Type>& other) const
{
return column0 * other.x + column1 * other.y + column2 * other.z + column3 * other.w;
}
//! Transform vector by matrix, equal to v' = M*v
PX_CUDA_CALLABLE PX_INLINE const PxVec3T<Type> transform(const PxVec3T<Type>& other) const
{
return transform(PxVec4T<Type>(other, Type(1.0))).getXYZ();
}
//! Rotate vector by matrix, equal to v' = M*v
PX_CUDA_CALLABLE PX_INLINE const PxVec4T<Type> rotate(const PxVec4T<Type>& other) const
{
return column0 * other.x + column1 * other.y + column2 * other.z; // + column3*0;
}
//! Rotate vector by matrix, equal to v' = M*v
PX_CUDA_CALLABLE PX_INLINE const PxVec3T<Type> rotate(const PxVec3T<Type>& other) const
{
return rotate(PxVec4T<Type>(other, Type(1.0))).getXYZ();
}
PX_CUDA_CALLABLE PX_INLINE const PxVec3T<Type> getBasis(PxU32 num) const
{
PX_ASSERT(num < 3);
return (&column0)[num].getXYZ();
}
PX_CUDA_CALLABLE PX_INLINE const PxVec3T<Type> getPosition() const
{
return column3.getXYZ();
}
PX_CUDA_CALLABLE PX_INLINE void setPosition(const PxVec3T<Type>& position)
{
column3.x = position.x;
column3.y = position.y;
column3.z = position.z;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE const Type* front() const
{
return &column0.x;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec4T<Type>& operator[](PxU32 num)
{
return (&column0)[num];
}
PX_CUDA_CALLABLE PX_FORCE_INLINE const PxVec4T<Type>& operator[](PxU32 num) const
{
return (&column0)[num];
}
PX_CUDA_CALLABLE PX_INLINE void scale(const PxVec4T<Type>& p)
{
column0 *= p.x;
column1 *= p.y;
column2 *= p.z;
column3 *= p.w;
}
PX_CUDA_CALLABLE PX_INLINE const PxMat44T inverseRT(void) const
{
const PxVec3T<Type> r0(column0.x, column1.x, column2.x);
const PxVec3T<Type> r1(column0.y, column1.y, column2.y);
const PxVec3T<Type> r2(column0.z, column1.z, column2.z);
return PxMat44T(r0, r1, r2, -(r0 * column3.x + r1 * column3.y + r2 * column3.z));
}
PX_CUDA_CALLABLE PX_INLINE bool isFinite() const
{
return column0.isFinite() && column1.isFinite() && column2.isFinite() && column3.isFinite();
}
// Data, see above for format!
PxVec4T<Type> column0, column1, column2, column3; // the four base vectors
};
// implementation from PxTransform.h
template<class Type>
PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT<Type>::PxTransformT(const PxMat44T<Type>& m)
{
const PxVec3T<Type> column0(m.column0.x, m.column0.y, m.column0.z);
const PxVec3T<Type> column1(m.column1.x, m.column1.y, m.column1.z);
const PxVec3T<Type> column2(m.column2.x, m.column2.y, m.column2.z);
q = PxQuatT<Type>(PxMat33T<Type>(column0, column1, column2));
p = PxVec3T<Type>(m.column3.x, m.column3.y, m.column3.z);
}
typedef PxMat44T<float> PxMat44;
typedef PxMat44T<double> PxMat44d;
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 11,739 | C | 28.94898 | 136 | 0.692563 |
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxFoundation.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_FOUNDATION_H
#define PX_FOUNDATION_H
/** \addtogroup foundation
@{
*/
#include "foundation/Px.h"
#include "foundation/PxErrors.h"
#include "foundation/PxFoundationConfig.h"
#include "foundation/PxErrors.h"
#include <stdarg.h>
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxAllocationListener;
class PxErrorCallback;
/**
\brief Foundation SDK singleton class.
You need to have an instance of this class to instance the higher level SDKs.
*/
class PX_FOUNDATION_API PxFoundation
{
public:
/**
\brief Destroys the instance it is called on.
The operation will fail, if there are still modules referencing the foundation object. Release all dependent modules
prior to calling this method.
@see PxCreateFoundation()
*/
virtual void release() = 0;
/**
retrieves error callback
*/
virtual PxErrorCallback& getErrorCallback() = 0;
/**
Sets mask of errors to report.
*/
virtual void setErrorLevel(PxErrorCode::Enum mask = PxErrorCode::eMASK_ALL) = 0;
/**
Retrieves mask of errors to be reported.
*/
virtual PxErrorCode::Enum getErrorLevel() const = 0;
/**
Retrieves the allocator this object was created with.
*/
virtual PxAllocatorCallback& getAllocatorCallback() = 0;
/**
Retrieves if allocation names are being passed to allocator callback.
*/
virtual bool getReportAllocationNames() const = 0;
/**
Set if allocation names are being passed to allocator callback.
\details Enabled by default in debug and checked build, disabled by default in profile and release build.
*/
virtual void setReportAllocationNames(bool value) = 0;
virtual void registerAllocationListener(PxAllocationListener& listener) = 0;
virtual void deregisterAllocationListener(PxAllocationListener& listener) = 0;
virtual void registerErrorCallback(PxErrorCallback& callback) = 0;
virtual void deregisterErrorCallback(PxErrorCallback& callback) = 0;
virtual bool error(PxErrorCode::Enum c, const char* file, int line, const char* messageFmt, ...) = 0;
virtual bool error(PxErrorCode::Enum, const char* file, int line, const char* messageFmt, va_list) = 0;
protected:
virtual ~PxFoundation()
{
}
};
#if !PX_DOXYGEN
} // namespace physx
#endif
// PT: use this to make generated code shorter (e.g. from 52 to 24 bytes of assembly (10 to 4 instructions))
// We must use a macro here to let __FILE__ expand to the proper filename (it doesn't work with an inlined function).
#define PX_IMPLEMENT_OUTPUT_ERROR \
template<const int errorCode> \
static PX_NOINLINE bool outputError(int line, const char* message) \
{ \
return PxGetFoundation().error(PxErrorCode::Enum(errorCode), __FILE__, line, message); \
}
/**
\brief Creates an instance of the foundation class
The foundation class is needed to initialize higher level SDKs. There may be only one instance per process.
Calling this method after an instance has been created already will result in an error message and NULL will be
returned.
\param version Version number we are expecting (should be #PX_PHYSICS_VERSION)
\param allocator User supplied interface for allocating memory(see #PxAllocatorCallback)
\param errorCallback User supplied interface for reporting errors and displaying messages(see #PxErrorCallback)
\return Foundation instance on success, NULL if operation failed
@see PxFoundation
*/
PX_C_EXPORT PX_FOUNDATION_API physx::PxFoundation* PX_CALL_CONV PxCreateFoundation(physx::PxU32 version, physx::PxAllocatorCallback& allocator, physx::PxErrorCallback& errorCallback);
PX_C_EXPORT PX_FOUNDATION_API void PX_CALL_CONV PxSetFoundationInstance(physx::PxFoundation& foundation);
/**
\brief Retrieves the Foundation SDK after it has been created.
\note The behavior of this method is undefined if the foundation instance has not been created already.
@see PxCreateFoundation(), PxIsFoundationValid()
*/
#if PX_CLANG
#if PX_LINUX
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wreturn-type-c-linkage"
#endif // PX_LINUX
#endif // PX_CLANG
PX_C_EXPORT PX_FOUNDATION_API physx::PxFoundation& PX_CALL_CONV PxGetFoundation();
#if PX_CLANG
#if PX_LINUX
#pragma clang diagnostic pop
#endif // PX_LINUX
#endif // PX_CLANG
/**
\brief Similar to PxGetFoundation() except it handles the case if the foundation was not created already.
\return Pointer to the foundation if an instance is currently available, otherwise null.
@see PxCreateFoundation(), PxGetFoundation()
*/
PX_C_EXPORT PX_FOUNDATION_API physx::PxFoundation* PX_CALL_CONV PxIsFoundationValid();
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxProfilerCallback;
class PxAllocatorCallback;
class PxErrorCallback;
#if !PX_DOXYGEN
} // namespace physx
#endif
/**
\brief Get the callback that will be used for all profiling.
*/
PX_C_EXPORT PX_FOUNDATION_API physx::PxProfilerCallback* PX_CALL_CONV PxGetProfilerCallback();
/**
\brief Set the callback that will be used for all profiling.
*/
PX_C_EXPORT PX_FOUNDATION_API void PX_CALL_CONV PxSetProfilerCallback(physx::PxProfilerCallback* profiler);
/**
\brief Get the allocator callback
*/
PX_C_EXPORT PX_FOUNDATION_API physx::PxAllocatorCallback* PX_CALL_CONV PxGetAllocatorCallback();
/**
\brief Get the broadcasting allocator callback
*/
PX_C_EXPORT PX_FOUNDATION_API physx::PxAllocatorCallback* PX_CALL_CONV PxGetBroadcastAllocator(bool* reportAllocationNames = NULL);
/**
\brief Get the error callback
*/
PX_C_EXPORT PX_FOUNDATION_API physx::PxErrorCallback* PX_CALL_CONV PxGetErrorCallback();
/**
\brief Get the broadcasting error callback
*/
PX_C_EXPORT PX_FOUNDATION_API physx::PxErrorCallback* PX_CALL_CONV PxGetBroadcastError();
/**
\brief Get the warn once timestamp
*/
PX_C_EXPORT PX_FOUNDATION_API physx::PxU32 PX_CALL_CONV PxGetWarnOnceTimeStamp();
/**
\brief Decrement the ref count of PxFoundation
*/
PX_C_EXPORT PX_FOUNDATION_API void PX_CALL_CONV PxDecFoundationRefCount();
/**
\brief Increment the ref count of PxFoundation
*/
PX_C_EXPORT PX_FOUNDATION_API void PX_CALL_CONV PxIncFoundationRefCount();
/** @} */
#endif
| 7,741 | C | 31.529412 | 183 | 0.760884 |
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxMat34.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_MAT34_H
#define PX_MAT34_H
/** \addtogroup foundation
@{
*/
#include "foundation/PxTransform.h"
#include "foundation/PxMat33.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/*!
Basic mathematical 3x4 matrix, implemented as a 3x3 rotation matrix and a translation
See PxMat33 for the format of the rotation matrix.
*/
template<class Type>
class PxMat34T
{
public:
//! Default constructor
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T()
{
}
//! Construct from four base vectors
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T(const PxVec3T<Type>& b0, const PxVec3T<Type>& b1, const PxVec3T<Type>& b2, const PxVec3T<Type>& b3)
: m(b0, b1, b2), p(b3)
{
}
//! Construct from Type[12]
explicit PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T(Type values[]) :
m(values), p(values[9], values[10], values[11])
{
}
//! Construct from a 3x3 matrix
explicit PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T(const PxMat33T<Type>& other)
: m(other), p(PxZero)
{
}
//! Construct from a 3x3 matrix and a translation vector
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T(const PxMat33T<Type>& other, const PxVec3T<Type>& t)
: m(other), p(t)
{
}
//! Construct from a PxTransformT<Type>
explicit PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T(const PxTransformT<Type>& other)
: m(other.q), p(other.p)
{
}
//! Copy constructor
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T(const PxMat34T& other) : m(other.m), p(other.p)
{
}
//! Assignment operator
PX_CUDA_CALLABLE PX_FORCE_INLINE const PxMat34T& operator=(const PxMat34T& other)
{
m = other.m;
p = other.p;
return *this;
}
//! Set to identity matrix
PX_CUDA_CALLABLE PX_FORCE_INLINE void setIdentity()
{
m = PxMat33T<Type>(PxIdentity);
p = PxVec3T<Type>(0);
}
// Simpler operators
//! Equality operator
PX_CUDA_CALLABLE PX_FORCE_INLINE bool operator==(const PxMat34T& other) const
{
return m == other.m && p == other.p;
}
//! Inequality operator
PX_CUDA_CALLABLE PX_FORCE_INLINE bool operator!=(const PxMat34T& other) const
{
return !operator==(other);
}
//! Unary minus
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T operator-() const
{
return PxMat34T(-m, -p);
}
//! Add
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T operator+(const PxMat34T& other) const
{
return PxMat34T(m + other.m, p + other.p);
}
//! Subtract
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T operator-(const PxMat34T& other) const
{
return PxMat34T(m - other.m, p - other.p);
}
//! Scalar multiplication
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T operator*(Type scalar) const
{
return PxMat34T(m*scalar, p*scalar);
}
//! Matrix multiplication
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T operator*(const PxMat34T& other) const
{
//Rows from this <dot> columns from other
//base0 = rotate(other.m.column0) etc
return PxMat34T(m*other.m, m*other.p + p);
}
//! Matrix multiplication, extend the second matrix
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T operator*(const PxMat33T<Type>& other) const
{
//Rows from this <dot> columns from other
//base0 = transform(other.m.column0) etc
return PxMat34T(m*other, p);
}
template<class Type2>
friend PxMat34T<Type2> operator*(const PxMat33T<Type2>& a, const PxMat34T<Type2>& b);
// a <op>= b operators
//! Equals-add
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T& operator+=(const PxMat34T& other)
{
m += other.m;
p += other.p;
return *this;
}
//! Equals-sub
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T& operator-=(const PxMat34T& other)
{
m -= other.m;
p -= other.p;
return *this;
}
//! Equals scalar multiplication
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T& operator*=(Type scalar)
{
m *= scalar;
p *= scalar;
return *this;
}
//! Element access, mathematical way!
PX_CUDA_CALLABLE PX_FORCE_INLINE Type operator()(PxU32 row, PxU32 col) const
{
return (*this)[col][row];
}
//! Element access, mathematical way!
PX_CUDA_CALLABLE PX_FORCE_INLINE Type& operator()(PxU32 row, PxU32 col)
{
return (*this)[col][row];
}
// Transform etc
//! Transform vector by matrix, equal to v' = M*v
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> rotate(const PxVec3T<Type>& other) const
{
return m*other;
}
//! Transform vector by transpose of matrix, equal to v' = M^t*v
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> rotateTranspose(const PxVec3T<Type>& other) const
{
return m.transformTranspose(other);
}
//! Transform point by matrix
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> transform(const PxVec3T<Type>& other) const
{
return m*other + p;
}
//! Transform point by transposed matrix
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> transformTranspose(const PxVec3T<Type>& other) const
{
return m.transformTranspose(other - p);
}
//! Transform point by transposed matrix
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T transformTranspose(const PxMat34T& other) const
{
return PxMat34T(m.transformTranspose(other.m.column0),
m.transformTranspose(other.m.column1),
m.transformTranspose(other.m.column2),
m.transformTranspose(other.p - p));
}
//! Invert matrix treating it as a rotation+translation matrix only
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T getInverseRT() const
{
return PxMat34T(m.getTranspose(), m.transformTranspose(-p));
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type>& operator[](PxU32 num) { return (&m.column0)[num]; }
PX_CUDA_CALLABLE PX_FORCE_INLINE const PxVec3T<Type>& operator[](PxU32 num) const { return (&m.column0)[num]; }
//Data, see above for format!
PxMat33T<Type> m;
PxVec3T<Type> p;
};
//! Multiply a*b, a is extended
template<class Type>
PX_INLINE PxMat34T<Type> operator*(const PxMat33T<Type>& a, const PxMat34T<Type>& b)
{
return PxMat34T<Type>(a * b.m, a * b.p);
}
typedef PxMat34T<float> PxMat34;
typedef PxMat34T<double> PxMat34d;
//! A padded version of PxMat34, to safely load its data using SIMD
class PxMat34Padded : public PxMat34
{
public:
PX_FORCE_INLINE PxMat34Padded(const PxMat34& src) : PxMat34(src) {}
PX_FORCE_INLINE PxMat34Padded() {}
PX_FORCE_INLINE ~PxMat34Padded() {}
PxU32 padding;
};
PX_COMPILE_TIME_ASSERT(0==(sizeof(PxMat34Padded)==16));
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 7,928 | C | 27.521583 | 142 | 0.712412 |
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxTempAllocator.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_TEMP_ALLOCATOR_H
#define PX_TEMP_ALLOCATOR_H
#include "foundation/PxAllocator.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
union PxTempAllocatorChunk
{
PxTempAllocatorChunk() : mNext(0)
{
}
PxTempAllocatorChunk* mNext; // while chunk is free
PxU32 mIndex; // while chunk is allocated
PxU8 mPad[16]; // 16 byte aligned allocations
};
class PxTempAllocator
{
public:
PX_FORCE_INLINE PxTempAllocator(const char* = 0)
{
}
PX_FOUNDATION_API void* allocate(size_t size, const char* file, PxI32 line);
PX_FOUNDATION_API void deallocate(void* ptr);
};
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif
| 2,339 | C | 35.562499 | 77 | 0.750321 |
NVIDIA-Omniverse/PhysX/physx/include/foundation/PxAssert.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_ASSERT_H
#define PX_ASSERT_H
#include "foundation/PxFoundationConfig.h"
#include "foundation/Px.h"
/** \addtogroup foundation
@{
*/
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
* @brief Built-in assert function
*/
PX_FOUNDATION_API void PxAssert(const char* exp, const char* file, int line, bool& ignore);
#if !PX_ENABLE_ASSERTS
#define PX_ASSERT(exp) ((void)0)
#define PX_ALWAYS_ASSERT_MESSAGE(exp) ((void)0)
#define PX_ASSERT_WITH_MESSAGE(condition, message) ((void)0)
#else
#if PX_VC
#define PX_CODE_ANALYSIS_ASSUME(exp) \
__analysis_assume(!!(exp)) // This macro will be used to get rid of analysis warning messages if a PX_ASSERT is used
// to "guard" illegal mem access, for example.
#else
#define PX_CODE_ANALYSIS_ASSUME(exp)
#endif
#define PX_ASSERT(exp) \
{ \
static bool _ignore = false; \
((void)((!!(exp)) || (!_ignore && (physx::PxAssert(#exp, PX_FL, _ignore), false)))); \
PX_CODE_ANALYSIS_ASSUME(exp); \
}
#define PX_ALWAYS_ASSERT_MESSAGE(exp) \
{ \
static bool _ignore = false; \
if(!_ignore) \
physx::PxAssert(exp, PX_FL, _ignore); \
}
#define PX_ASSERT_WITH_MESSAGE(exp, message) \
{ \
static bool _ignore = false; \
((void)((!!(exp)) || (!_ignore && (physx::PxAssert(message, PX_FL, _ignore), false)))); \
PX_CODE_ANALYSIS_ASSUME(exp); \
}
#endif // !PX_ENABLE_ASSERTS
#define PX_ALWAYS_ASSERT() PX_ASSERT(0)
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 3,371 | C | 36.054945 | 118 | 0.667161 |
NVIDIA-Omniverse/PhysX/physx/include/foundation/unix/PxUnixInlineAoS.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXFOUNDATION_PXUNIXINLINEAOS_H
#define PXFOUNDATION_PXUNIXINLINEAOS_H
#if !COMPILE_VECTOR_INTRINSICS
#error Vector intrinsics should not be included when using scalar implementation.
#endif
#if PX_INTEL_FAMILY
#include "foundation/unix/sse2/PxUnixSse2InlineAoS.h"
#elif PX_NEON
#include "foundation/unix/neon/PxUnixNeonInlineAoS.h"
#else
#error No SIMD implementation for this unix platform.
#endif
#endif // PXFOUNDATION_PXUNIXINLINEAOS_H
| 2,147 | C | 46.733332 | 81 | 0.777364 |
NVIDIA-Omniverse/PhysX/physx/include/foundation/unix/PxUnixAoS.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXFOUNDATION_PXUNIXAOS_H
#define PXFOUNDATION_PXUNIXAOS_H
// no includes here! this file should be included from PxcVecMath.h only!!!
#if !COMPILE_VECTOR_INTRINSICS
#error Vector intrinsics should not be included when using scalar implementation.
#endif
#if PX_INTEL_FAMILY
#include "foundation/unix/sse2/PxUnixSse2AoS.h"
#elif PX_NEON
#include "foundation/unix/neon/PxUnixNeonAoS.h"
#else
#error No SIMD implementation for this unix platform.
#endif
#endif // PXFOUNDATION_PXUNIXAOS_H
| 2,194 | C | 45.702127 | 81 | 0.773017 |
NVIDIA-Omniverse/PhysX/physx/include/foundation/unix/PxUnixIntrinsics.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSUNIXINTRINSICS_H
#define PSFOUNDATION_PSUNIXINTRINSICS_H
#include "foundation/PxAssert.h"
#include <math.h>
// this file is for internal intrinsics - that is, intrinsics that are used in
// cross platform code but do not appear in the API
#if !(PX_LINUX || PX_APPLE_FAMILY)
#error "This file should only be included by unix builds!!"
#endif
#if !PX_DOXYGEN
namespace physx
{
#endif
PX_FORCE_INLINE void PxMemoryBarrier()
{
__sync_synchronize();
}
/*!
Return the index of the highest set bit. Undefined for zero arg.
*/
PX_INLINE uint32_t PxHighestSetBitUnsafe(uint32_t v)
{
return uint32_t(31 - __builtin_clz(v));
}
/*!
Return the index of the highest set bit. Undefined for zero arg.
*/
PX_INLINE uint32_t PxLowestSetBitUnsafe(uint32_t v)
{
return uint32_t(__builtin_ctz(v));
}
/*!
Returns the index of the highest set bit. Returns 32 for v=0.
*/
PX_INLINE uint32_t PxCountLeadingZeros(uint32_t v)
{
if(v)
return uint32_t(__builtin_clz(v));
else
return 32u;
}
/*!
Prefetch aligned 64B x86, 32b ARM around \c ptr+offset.
*/
PX_FORCE_INLINE void PxPrefetchLine(const void* ptr, uint32_t offset = 0)
{
#ifdef __CUDACC__
__builtin_prefetch(reinterpret_cast<const char*>(ptr) + offset, 0, 3);
#else
__builtin_prefetch(reinterpret_cast<const char* PX_RESTRICT>(ptr) + offset, 0, 3);
#endif
}
/*!
Prefetch \c count bytes starting at \c ptr.
*/
PX_FORCE_INLINE void PxPrefetch(const void* ptr, uint32_t count = 1)
{
const char* cp = reinterpret_cast<const char*>(ptr);
uint64_t p = size_t(ptr);
uint64_t startLine = p >> 6, endLine = (p + count - 1) >> 6;
uint64_t lines = endLine - startLine + 1;
do
{
PxPrefetchLine(cp);
cp += 64;
} while(--lines);
}
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif // #ifndef PSFOUNDATION_PSUNIXINTRINSICS_H
| 3,494 | C | 29.929203 | 83 | 0.732112 |
NVIDIA-Omniverse/PhysX/physx/include/foundation/unix/PxUnixMathIntrinsics.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXFOUNDATION_PXUNIXINTRINSICS_H
#define PXFOUNDATION_PXUNIXINTRINSICS_H
#include "foundation/Px.h"
#include "foundation/PxAssert.h"
#if !(PX_LINUX || PX_APPLE_FAMILY)
#error "This file should only be included by Unix builds!!"
#endif
#if PX_LINUX && !defined(__CUDACC__) && !PX_EMSCRIPTEN
// Linux and CUDA compilation does not work with std::isfnite, as it is not marked as CUDA callable
#include <cmath>
#ifndef isfinite
using std::isfinite;
#endif
#endif
#include <math.h>
#include <float.h>
#if !PX_DOXYGEN
namespace physx
{
#endif
namespace intrinsics
{
//! \brief platform-specific absolute value
PX_CUDA_CALLABLE PX_FORCE_INLINE float abs(float a)
{
return ::fabsf(a);
}
//! \brief platform-specific select float
PX_CUDA_CALLABLE PX_FORCE_INLINE float fsel(float a, float b, float c)
{
return (a >= 0.0f) ? b : c;
}
//! \brief platform-specific sign
PX_CUDA_CALLABLE PX_FORCE_INLINE float sign(float a)
{
return (a >= 0.0f) ? 1.0f : -1.0f;
}
//! \brief platform-specific reciprocal
PX_CUDA_CALLABLE PX_FORCE_INLINE float recip(float a)
{
return 1.0f / a;
}
//! \brief platform-specific reciprocal estimate
PX_CUDA_CALLABLE PX_FORCE_INLINE float recipFast(float a)
{
return 1.0f / a;
}
//! \brief platform-specific square root
PX_CUDA_CALLABLE PX_FORCE_INLINE float sqrt(float a)
{
return ::sqrtf(a);
}
//! \brief platform-specific reciprocal square root
PX_CUDA_CALLABLE PX_FORCE_INLINE float recipSqrt(float a)
{
return 1.0f / ::sqrtf(a);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE float recipSqrtFast(float a)
{
return 1.0f / ::sqrtf(a);
}
//! \brief platform-specific sine
PX_CUDA_CALLABLE PX_FORCE_INLINE float sin(float a)
{
return ::sinf(a);
}
//! \brief platform-specific cosine
PX_CUDA_CALLABLE PX_FORCE_INLINE float cos(float a)
{
return ::cosf(a);
}
//! \brief platform-specific minimum
PX_CUDA_CALLABLE PX_FORCE_INLINE float selectMin(float a, float b)
{
return a < b ? a : b;
}
//! \brief platform-specific maximum
PX_CUDA_CALLABLE PX_FORCE_INLINE float selectMax(float a, float b)
{
return a > b ? a : b;
}
//! \brief platform-specific finiteness check (not INF or NAN)
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite(float a)
{
//std::isfinite not recommended as of Feb 2017, since it doesn't work with g++/clang's floating point optimization.
union localU { PxU32 i; float f; } floatUnion;
floatUnion.f = a;
return !((floatUnion.i & 0x7fffffff) >= 0x7f800000);
}
//! \brief platform-specific finiteness check (not INF or NAN)
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite(double a)
{
return !!isfinite(a);
}
/*!
Sets \c count bytes starting at \c dst to zero.
*/
PX_FORCE_INLINE void* memZero(void* dest, uint32_t count)
{
return memset(dest, 0, count);
}
/*!
Sets \c count bytes starting at \c dst to \c c.
*/
PX_FORCE_INLINE void* memSet(void* dest, int32_t c, uint32_t count)
{
return memset(dest, c, count);
}
/*!
Copies \c count bytes from \c src to \c dst. User memMove if regions overlap.
*/
PX_FORCE_INLINE void* memCopy(void* dest, const void* src, uint32_t count)
{
return memcpy(dest, src, count);
}
/*!
Copies \c count bytes from \c src to \c dst. Supports overlapping regions.
*/
PX_FORCE_INLINE void* memMove(void* dest, const void* src, uint32_t count)
{
return memmove(dest, src, count);
}
} // namespace intrinsics
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif // #ifndef PXFOUNDATION_PXUNIXINTRINSICS_H
| 5,123 | C | 27.309392 | 116 | 0.723795 |
NVIDIA-Omniverse/PhysX/physx/include/foundation/unix/PxUnixTrigConstants.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXFOUNDATION_PXUNIXTRIGCONSTANTS_H
#define PXFOUNDATION_PXUNIXTRIGCONSTANTS_H
#include "foundation/PxPreprocessor.h"
namespace physx
{
namespace aos
{
#if PX_CLANG
#if PX_LINUX
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wmissing-variable-declarations"
#endif
#endif
#define PX_GLOBALCONST extern const __attribute__((weak))
PX_ALIGN_PREFIX(16)
struct PX_VECTORF32
{
float f[4];
} PX_ALIGN_SUFFIX(16);
PX_GLOBALCONST PX_VECTORF32 g_PXSinCoefficients0 = { { 1.0f, -0.166666667f, 8.333333333e-3f, -1.984126984e-4f } };
PX_GLOBALCONST PX_VECTORF32
g_PXSinCoefficients1 = { { 2.755731922e-6f, -2.505210839e-8f, 1.605904384e-10f, -7.647163732e-13f } };
PX_GLOBALCONST PX_VECTORF32
g_PXSinCoefficients2 = { { 2.811457254e-15f, -8.220635247e-18f, 1.957294106e-20f, -3.868170171e-23f } };
PX_GLOBALCONST PX_VECTORF32 g_PXCosCoefficients0 = { { 1.0f, -0.5f, 4.166666667e-2f, -1.388888889e-3f } };
PX_GLOBALCONST PX_VECTORF32
g_PXCosCoefficients1 = { { 2.480158730e-5f, -2.755731922e-7f, 2.087675699e-9f, -1.147074560e-11f } };
PX_GLOBALCONST PX_VECTORF32
g_PXCosCoefficients2 = { { 4.779477332e-14f, -1.561920697e-16f, 4.110317623e-19f, -8.896791392e-22f } };
PX_GLOBALCONST PX_VECTORF32 g_PXReciprocalTwoPi = { { PxInvTwoPi, PxInvTwoPi, PxInvTwoPi, PxInvTwoPi } };
PX_GLOBALCONST PX_VECTORF32 g_PXTwoPi = { { PxTwoPi, PxTwoPi, PxTwoPi, PxTwoPi } };
#if PX_CLANG
#if PX_LINUX
#pragma clang diagnostic pop
#endif
#endif
} // namespace aos
} // namespace physx
#endif //PXFOUNDATION_PXUNIXTRIGCONSTANTS_H
| 3,228 | C | 41.486842 | 114 | 0.758055 |
NVIDIA-Omniverse/PhysX/physx/include/foundation/unix/PxUnixFPU.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXFOUNDATION_PXUNIXFPU_H
#define PXFOUNDATION_PXUNIXFPU_H
#include "foundation/PxPreprocessor.h"
#if PX_LINUX || PX_OSX
#if PX_X86 || PX_X64
#if PX_EMSCRIPTEN
#include <emmintrin.h>
#endif
#include <xmmintrin.h>
#elif PX_NEON
#include <arm_neon.h>
#endif
PX_INLINE physx::PxSIMDGuard::PxSIMDGuard(bool enable)
#if !PX_EMSCRIPTEN && (PX_X86 || PX_X64)
: mEnabled(enable)
#endif
{
#if !PX_EMSCRIPTEN && (PX_X86 || PX_X64)
if(enable)
{
mControlWord = _mm_getcsr();
// set default (disable exceptions: _MM_MASK_MASK) and FTZ (_MM_FLUSH_ZERO_ON), DAZ (_MM_DENORMALS_ZERO_ON: (1<<6))
_mm_setcsr(_MM_MASK_MASK | _MM_FLUSH_ZERO_ON | (1 << 6));
}
else
{
PX_UNUSED(enable);
PX_ASSERT(_mm_getcsr() & _MM_FLUSH_ZERO_ON);
PX_ASSERT(_mm_getcsr() & (1 << 6));
PX_ASSERT(_mm_getcsr() & _MM_MASK_MASK);
}
#endif
}
PX_INLINE physx::PxSIMDGuard::~PxSIMDGuard()
{
#if !PX_EMSCRIPTEN && (PX_X86 || PX_X64)
if(mEnabled)
{
// restore control word and clear exception flags
// (setting exception state flags cause exceptions on the first following fp operation)
_mm_setcsr(mControlWord & PxU32(~_MM_EXCEPT_MASK));
}
#endif
}
#else
#error No SIMD implementation for this unix platform.
#endif // PX_LINUX || PX_OSX
#endif // #ifndef PXFOUNDATION_PXUNIXFPU_H
| 2,976 | C | 34.440476 | 117 | 0.727487 |
NVIDIA-Omniverse/PhysX/physx/include/foundation/unix/sse2/PxUnixSse2InlineAoS.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXFOUNDATION_PXUNIXSSE2INLINEAOS_H
#define PXFOUNDATION_PXUNIXSSE2INLINEAOS_H
#if !COMPILE_VECTOR_INTRINSICS
#error Vector intrinsics should not be included when using scalar implementation.
#endif
#ifdef __SSE4_2__
#include "smmintrin.h"
#endif
#include "../../PxVecMathSSE.h"
namespace physx
{
namespace aos
{
#define PX_FPCLASS_SNAN 0x0001 /* signaling NaN */
#define PX_FPCLASS_QNAN 0x0002 /* quiet NaN */
#define PX_FPCLASS_NINF 0x0004 /* negative infinity */
#define PX_FPCLASS_PINF 0x0200 /* positive infinity */
PX_FORCE_INLINE __m128 m128_I2F(__m128i n)
{
return _mm_castsi128_ps(n);
}
PX_FORCE_INLINE __m128i m128_F2I(__m128 n)
{
return _mm_castps_si128(n);
}
//////////////////////////////////////////////////////////////////////
//Test that Vec3V and FloatV are legal
//////////////////////////////////////////////////////////////////////
#define FLOAT_COMPONENTS_EQUAL_THRESHOLD 0.01f
PX_FORCE_INLINE static bool isValidFloatV(const FloatV a)
{
const PxF32 x = V4ReadX(a);
const PxF32 y = V4ReadY(a);
const PxF32 z = V4ReadZ(a);
const PxF32 w = V4ReadW(a);
return (x == y && x == z && x == w);
/*if (
(PxAbs(x - y) < FLOAT_COMPONENTS_EQUAL_THRESHOLD) &&
(PxAbs(x - z) < FLOAT_COMPONENTS_EQUAL_THRESHOLD) &&
(PxAbs(x - w) < FLOAT_COMPONENTS_EQUAL_THRESHOLD)
)
{
return true;
}
if (
(PxAbs((x - y) / x) < FLOAT_COMPONENTS_EQUAL_THRESHOLD) &&
(PxAbs((x - z) / x) < FLOAT_COMPONENTS_EQUAL_THRESHOLD) &&
(PxAbs((x - w) / x) < FLOAT_COMPONENTS_EQUAL_THRESHOLD)
)
{
return true;
}
return false;*/
}
PX_FORCE_INLINE bool isValidVec3V(const Vec3V a)
{
PX_ALIGN(16, PxF32 f[4]);
V4StoreA(a, f);
return (f[3] == 0.0f);
}
PX_FORCE_INLINE bool isFiniteLength(const Vec3V a)
{
return !FAllEq(V4LengthSq(a), FZero());
}
PX_FORCE_INLINE bool isAligned16(void* a)
{
return(0 == (size_t(a) & 0x0f));
}
//ASSERT_FINITELENGTH is deactivated because there is a lot of code that calls a simd normalisation function with zero length but then ignores the result.
#if PX_DEBUG
#define ASSERT_ISVALIDVEC3V(a) PX_ASSERT(isValidVec3V(a))
#define ASSERT_ISVALIDFLOATV(a) PX_ASSERT(isValidFloatV(a))
#define ASSERT_ISALIGNED16(a) PX_ASSERT(isAligned16(reinterpret_cast<void*>(a)))
#define ASSERT_ISFINITELENGTH(a) //PX_ASSERT(isFiniteLength(a))
#else
#define ASSERT_ISVALIDVEC3V(a)
#define ASSERT_ISVALIDFLOATV(a)
#define ASSERT_ISALIGNED16(a)
#define ASSERT_ISFINITELENGTH(a)
#endif
namespace internalUnitSSE2Simd
{
PX_FORCE_INLINE PxU32 BAllTrue4_R(const BoolV a)
{
const PxI32 moveMask = _mm_movemask_ps(a);
return PxU32(moveMask == 0xf);
}
PX_FORCE_INLINE PxU32 BAllTrue3_R(const BoolV a)
{
const PxI32 moveMask = _mm_movemask_ps(a);
return PxU32((moveMask & 0x7) == 0x7);
}
PX_FORCE_INLINE PxU32 BAnyTrue4_R(const BoolV a)
{
const PxI32 moveMask = _mm_movemask_ps(a);
return PxU32(moveMask != 0x0);
}
PX_FORCE_INLINE PxU32 BAnyTrue3_R(const BoolV a)
{
const PxI32 moveMask = _mm_movemask_ps(a);
return PxU32((moveMask & 0x7) != 0x0);
}
PX_FORCE_INLINE PxU32 FiniteTestEq(const Vec4V a, const Vec4V b)
{
// This is a bit of a bodge.
//_mm_comieq_ss returns 1 if either value is nan so we need to re-cast a and b with true encoded as a non-nan
// number.
// There must be a better way of doing this in sse.
const BoolV one = FOne();
const BoolV zero = FZero();
const BoolV a1 = V4Sel(a, one, zero);
const BoolV b1 = V4Sel(b, one, zero);
return (
_mm_comieq_ss(a1, b1) &&
_mm_comieq_ss(_mm_shuffle_ps(a1, a1, _MM_SHUFFLE(1, 1, 1, 1)), _mm_shuffle_ps(b1, b1, _MM_SHUFFLE(1, 1, 1, 1))) &&
_mm_comieq_ss(_mm_shuffle_ps(a1, a1, _MM_SHUFFLE(2, 2, 2, 2)), _mm_shuffle_ps(b1, b1, _MM_SHUFFLE(2, 2, 2, 2))) &&
_mm_comieq_ss(_mm_shuffle_ps(a1, a1, _MM_SHUFFLE(3, 3, 3, 3)), _mm_shuffle_ps(b1, b1, _MM_SHUFFLE(3, 3, 3, 3))));
}
#if !PX_EMSCRIPTEN
#if PX_CLANG
#if PX_LINUX
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wglobal-constructors"
#endif
#endif
const PX_ALIGN(16, PxF32 gMaskXYZ[4]) = { physx::PxUnionCast<PxF32>(0xffffffff), physx::PxUnionCast<PxF32>(0xffffffff),
physx::PxUnionCast<PxF32>(0xffffffff), 0 };
#if PX_CLANG
#if PX_LINUX
#pragma clang diagnostic pop
#endif
#endif
#else
// emscripten doesn't like the PxUnionCast data structure
// the following is what windows and xbox does -- using these for emscripten
const PX_ALIGN(16, PxU32 gMaskXYZ[4]) = { 0xffffffff, 0xffffffff, 0xffffffff, 0 };
#endif
}
namespace vecMathTests
{
// PT: this function returns an invalid Vec3V (W!=0.0f) just for unit-testing 'isValidVec3V'
PX_FORCE_INLINE Vec3V getInvalidVec3V()
{
const float f = 1.0f;
return _mm_load1_ps(&f);
}
PX_FORCE_INLINE bool allElementsEqualFloatV(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return _mm_comieq_ss(a, b) != 0;
}
PX_FORCE_INLINE bool allElementsEqualVec3V(const Vec3V a, const Vec3V b)
{
return V3AllEq(a, b) != 0;
}
PX_FORCE_INLINE bool allElementsEqualVec4V(const Vec4V a, const Vec4V b)
{
return V4AllEq(a, b) != 0;
}
PX_FORCE_INLINE bool allElementsEqualBoolV(const BoolV a, const BoolV b)
{
return internalUnitSSE2Simd::BAllTrue4_R(VecI32V_IsEq(m128_F2I(a), m128_F2I(b))) != 0;
}
PX_FORCE_INLINE bool allElementsEqualVecU32V(const VecU32V a, const VecU32V b)
{
return internalUnitSSE2Simd::BAllTrue4_R(V4IsEqU32(a, b)) != 0;
}
PX_FORCE_INLINE bool allElementsEqualVecI32V(const VecI32V a, const VecI32V b)
{
BoolV c = m128_I2F(_mm_cmpeq_epi32(a, b));
return internalUnitSSE2Simd::BAllTrue4_R(c) != 0;
}
#define VECMATH_AOS_EPSILON (1e-3f)
PX_FORCE_INLINE bool allElementsNearEqualFloatV(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
const FloatV c = FSub(a, b);
const FloatV minError = FLoad(-VECMATH_AOS_EPSILON);
const FloatV maxError = FLoad(VECMATH_AOS_EPSILON);
return _mm_comigt_ss(c, minError) && _mm_comilt_ss(c, maxError);
}
PX_FORCE_INLINE bool allElementsNearEqualVec3V(const Vec3V a, const Vec3V b)
{
const Vec3V c = V3Sub(a, b);
const Vec3V minError = V3Load(-VECMATH_AOS_EPSILON);
const Vec3V maxError = V3Load(VECMATH_AOS_EPSILON);
return (_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0)), minError) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0)), maxError) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1)), minError) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1)), maxError) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2)), minError) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2)), maxError));
}
PX_FORCE_INLINE bool allElementsNearEqualVec4V(const Vec4V a, const Vec4V b)
{
const Vec4V c = V4Sub(a, b);
const Vec4V minError = V4Load(-VECMATH_AOS_EPSILON);
const Vec4V maxError = V4Load(VECMATH_AOS_EPSILON);
return (_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0)), minError) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0)), maxError) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1)), minError) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1)), maxError) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2)), minError) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2)), maxError) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 3, 3, 3)), minError) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 3, 3, 3)), maxError));
}
}
/////////////////////////////////////////////////////////////////////
////FUNCTIONS USED ONLY FOR ASSERTS IN VECTORISED IMPLEMENTATIONS
/////////////////////////////////////////////////////////////////////
PX_FORCE_INLINE bool isFiniteFloatV(const FloatV a)
{
PxF32 badNumber =
physx::PxUnionCast<PxF32, PxU32>(PX_FPCLASS_SNAN | PX_FPCLASS_QNAN | PX_FPCLASS_NINF | PX_FPCLASS_PINF);
const FloatV vBadNum = FLoad(badNumber);
const BoolV vMask = BAnd(vBadNum, a);
return internalUnitSSE2Simd::FiniteTestEq(vMask, BFFFF()) == 1;
}
PX_FORCE_INLINE bool isFiniteVec3V(const Vec3V a)
{
PxF32 badNumber =
physx::PxUnionCast<PxF32, PxU32>(PX_FPCLASS_SNAN | PX_FPCLASS_QNAN | PX_FPCLASS_NINF | PX_FPCLASS_PINF);
const Vec3V vBadNum = V3Load(badNumber);
const BoolV vMask = BAnd(BAnd(vBadNum, a), BTTTF());
return internalUnitSSE2Simd::FiniteTestEq(vMask, BFFFF()) == 1;
}
PX_FORCE_INLINE bool isFiniteVec4V(const Vec4V a)
{
/*Vec4V a;
PX_ALIGN(16, PxF32 f[4]);
F32Array_Aligned_From_Vec4V(a, f);
return PxIsFinite(f[0])
&& PxIsFinite(f[1])
&& PxIsFinite(f[2])
&& PxIsFinite(f[3]);*/
PxF32 badNumber =
physx::PxUnionCast<PxF32, PxU32>(PX_FPCLASS_SNAN | PX_FPCLASS_QNAN | PX_FPCLASS_NINF | PX_FPCLASS_PINF);
const Vec4V vBadNum = V4Load(badNumber);
const BoolV vMask = BAnd(vBadNum, a);
return internalUnitSSE2Simd::FiniteTestEq(vMask, BFFFF()) == 1;
}
PX_FORCE_INLINE bool hasZeroElementinFloatV(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
return _mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 0, 0, 0)), FZero()) ? true : false;
}
PX_FORCE_INLINE bool hasZeroElementInVec3V(const Vec3V a)
{
return (_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 0, 0, 0)), FZero()) ||
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 1, 1, 1)), FZero()) ||
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 2, 2, 2)), FZero()));
}
PX_FORCE_INLINE bool hasZeroElementInVec4V(const Vec4V a)
{
return (_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 0, 0, 0)), FZero()) ||
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 1, 1, 1)), FZero()) ||
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 2, 2, 2)), FZero()) ||
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 3, 3, 3)), FZero()));
}
/////////////////////////////////////////////////////////////////////
////VECTORISED FUNCTION IMPLEMENTATIONS
/////////////////////////////////////////////////////////////////////
PX_FORCE_INLINE FloatV FLoad(const PxF32 f)
{
return _mm_load1_ps(&f);
}
PX_FORCE_INLINE Vec3V V3Load(const PxF32 f)
{
return _mm_set_ps(0.0f, f, f, f);
}
PX_FORCE_INLINE Vec4V V4Load(const PxF32 f)
{
return _mm_load1_ps(&f);
}
PX_FORCE_INLINE BoolV BLoad(const bool f)
{
const PxU32 i = PxU32(-PxI32(f));
return _mm_load1_ps(reinterpret_cast<const float*>(&i));
}
PX_FORCE_INLINE Vec3V V3LoadA(const PxVec3& f)
{
ASSERT_ISALIGNED16(const_cast<PxVec3*>(&f));
#if !PX_EMSCRIPTEN
return _mm_and_ps(reinterpret_cast<const Vec3V&>(f), V4LoadA(internalUnitSSE2Simd::gMaskXYZ));
#else
return _mm_and_ps((Vec3V&)f, (VecI32V&)internalUnitSSE2Simd::gMaskXYZ);
#endif
}
PX_FORCE_INLINE Vec3V V3LoadU(const PxVec3& f)
{
return _mm_set_ps(0.0f, f.z, f.y, f.x);
}
PX_FORCE_INLINE Vec3V V3LoadUnsafeA(const PxVec3& f)
{
ASSERT_ISALIGNED16(const_cast<PxVec3*>(&f));
return _mm_set_ps(0.0f, f.z, f.y, f.x);
}
PX_FORCE_INLINE Vec3V V3LoadA(const PxF32* const f)
{
ASSERT_ISALIGNED16(const_cast<PxF32*>(f));
#if !PX_EMSCRIPTEN
return _mm_and_ps(V4LoadA(f), V4LoadA(internalUnitSSE2Simd::gMaskXYZ));
#else
return _mm_and_ps((Vec3V&)*f, (VecI32V&)internalUnitSSE2Simd::gMaskXYZ);
#endif
}
PX_FORCE_INLINE Vec3V V3LoadU(const PxF32* const i)
{
return _mm_set_ps(0.0f, i[2], i[1], i[0]);
}
PX_FORCE_INLINE Vec3V Vec3V_From_Vec4V(Vec4V v)
{
return V4ClearW(v);
}
PX_FORCE_INLINE Vec3V Vec3V_From_Vec4V_WUndefined(const Vec4V v)
{
return v;
}
PX_FORCE_INLINE Vec4V Vec4V_From_Vec3V(Vec3V f)
{
ASSERT_ISVALIDVEC3V(f);
return f; // ok if it is implemented as the same type.
}
PX_FORCE_INLINE Vec4V Vec4V_From_PxVec3_WUndefined(const PxVec3& f)
{
return _mm_set_ps(0.0f, f.z, f.y, f.x);
}
PX_FORCE_INLINE Vec4V Vec4V_From_FloatV(FloatV f)
{
return f;
}
PX_FORCE_INLINE Vec3V Vec3V_From_FloatV(FloatV f)
{
ASSERT_ISVALIDFLOATV(f);
return Vec3V_From_Vec4V(Vec4V_From_FloatV(f));
}
PX_FORCE_INLINE Vec3V Vec3V_From_FloatV_WUndefined(FloatV f)
{
ASSERT_ISVALIDVEC3V(f);
return Vec3V_From_Vec4V_WUndefined(Vec4V_From_FloatV(f));
}
PX_FORCE_INLINE Mat33V Mat33V_From_PxMat33(const PxMat33& m)
{
return Mat33V(V3LoadU(m.column0), V3LoadU(m.column1), V3LoadU(m.column2));
}
PX_FORCE_INLINE void PxMat33_From_Mat33V(const Mat33V& m, PxMat33& out)
{
V3StoreU(m.col0, out.column0);
V3StoreU(m.col1, out.column1);
V3StoreU(m.col2, out.column2);
}
PX_FORCE_INLINE Vec4V V4LoadA(const PxF32* const f)
{
ASSERT_ISALIGNED16(const_cast<PxF32*>(f));
return _mm_load_ps(f);
}
PX_FORCE_INLINE void V4StoreA(Vec4V a, PxF32* f)
{
ASSERT_ISALIGNED16(f);
_mm_store_ps(f, a);
}
PX_FORCE_INLINE void V4StoreU(const Vec4V a, PxF32* f)
{
_mm_storeu_ps(f, a);
}
PX_FORCE_INLINE void BStoreA(const BoolV a, PxU32* f)
{
ASSERT_ISALIGNED16(f);
_mm_store_ps(reinterpret_cast<PxF32*>(f), a);
}
PX_FORCE_INLINE void U4StoreA(const VecU32V uv, PxU32* u)
{
ASSERT_ISALIGNED16(u);
_mm_store_ps(reinterpret_cast<float*>(u), uv);
}
PX_FORCE_INLINE VecI32V I4LoadXYZW(const PxI32& x, const PxI32& y, const PxI32& z, const PxI32& w)
{
return _mm_set_epi32(w, z, y, x);
}
PX_FORCE_INLINE void I4StoreA(const VecI32V iv, PxI32* i)
{
ASSERT_ISALIGNED16(i);
_mm_store_ps(reinterpret_cast<float*>(i), m128_I2F(iv));
}
PX_FORCE_INLINE Vec4V V4LoadU(const PxF32* const f)
{
return _mm_loadu_ps(f);
}
PX_FORCE_INLINE BoolV BLoad(const bool* const f)
{
const PX_ALIGN(16, PxI32) b[4] = { -PxI32(f[0]), -PxI32(f[1]), -PxI32(f[2]), -PxI32(f[3]) };
return _mm_load_ps(reinterpret_cast<const float*>(&b));
}
PX_FORCE_INLINE void FStore(const FloatV a, PxF32* PX_RESTRICT f)
{
ASSERT_ISVALIDFLOATV(a);
_mm_store_ss(f, a);
}
PX_FORCE_INLINE void V3StoreA(const Vec3V a, PxVec3& f)
{
ASSERT_ISALIGNED16(&f);
PX_ALIGN(16, PxF32) f2[4];
_mm_store_ps(f2, a);
f = PxVec3(f2[0], f2[1], f2[2]);
}
PX_FORCE_INLINE void V3StoreU(const Vec3V a, PxVec3& f)
{
PX_ALIGN(16, PxF32) f2[4];
_mm_store_ps(f2, a);
f = PxVec3(f2[0], f2[1], f2[2]);
}
PX_FORCE_INLINE void Store_From_BoolV(const BoolV b, PxU32* b2)
{
_mm_store_ss(reinterpret_cast<PxF32*>(b2), b);
}
PX_FORCE_INLINE VecU32V U4Load(const PxU32 i)
{
return _mm_load1_ps(reinterpret_cast<const PxF32*>(&i));
}
PX_FORCE_INLINE VecU32V U4LoadU(const PxU32* i)
{
return _mm_loadu_ps(reinterpret_cast<const PxF32*>(i));
}
PX_FORCE_INLINE VecU32V U4LoadA(const PxU32* i)
{
ASSERT_ISALIGNED16(const_cast<PxU32*>(i));
return _mm_load_ps(reinterpret_cast<const PxF32*>(i));
}
//////////////////////////////////
// FLOATV
//////////////////////////////////
PX_FORCE_INLINE FloatV FZero()
{
return FLoad(0.0f);
}
PX_FORCE_INLINE FloatV FOne()
{
return FLoad(1.0f);
}
PX_FORCE_INLINE FloatV FHalf()
{
return FLoad(0.5f);
}
PX_FORCE_INLINE FloatV FEps()
{
return FLoad(PX_EPS_REAL);
}
PX_FORCE_INLINE FloatV FEps6()
{
return FLoad(1e-6f);
}
PX_FORCE_INLINE FloatV FMax()
{
return FLoad(PX_MAX_REAL);
}
PX_FORCE_INLINE FloatV FNegMax()
{
return FLoad(-PX_MAX_REAL);
}
PX_FORCE_INLINE FloatV IZero()
{
const PxU32 zero = 0;
return _mm_load1_ps(reinterpret_cast<const PxF32*>(&zero));
}
PX_FORCE_INLINE FloatV IOne()
{
const PxU32 one = 1;
return _mm_load1_ps(reinterpret_cast<const PxF32*>(&one));
}
PX_FORCE_INLINE FloatV ITwo()
{
const PxU32 two = 2;
return _mm_load1_ps(reinterpret_cast<const PxF32*>(&two));
}
PX_FORCE_INLINE FloatV IThree()
{
const PxU32 three = 3;
return _mm_load1_ps(reinterpret_cast<const PxF32*>(&three));
}
PX_FORCE_INLINE FloatV IFour()
{
PxU32 four = 4;
return _mm_load1_ps(reinterpret_cast<const PxF32*>(&four));
}
PX_FORCE_INLINE FloatV FNeg(const FloatV f)
{
ASSERT_ISVALIDFLOATV(f);
return _mm_sub_ps(_mm_setzero_ps(), f);
}
PX_FORCE_INLINE FloatV FAdd(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
/*
if(!isValidFloatV(a))
{
assert(false);
}
if(!isValidFloatV(b))
{
assert(false);
}
*/
return _mm_add_ps(a, b);
}
PX_FORCE_INLINE FloatV FSub(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return _mm_sub_ps(a, b);
}
PX_FORCE_INLINE FloatV FMul(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return _mm_mul_ps(a, b);
}
PX_FORCE_INLINE FloatV FDiv(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return _mm_div_ps(a, b);
}
PX_FORCE_INLINE FloatV FDivFast(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return _mm_mul_ps(a, _mm_rcp_ps(b));
}
PX_FORCE_INLINE FloatV FRecip(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
return _mm_div_ps(FOne(), a);
}
PX_FORCE_INLINE FloatV FRecipFast(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
return _mm_rcp_ps(a);
}
PX_FORCE_INLINE FloatV FRsqrt(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
return _mm_div_ps(FOne(), _mm_sqrt_ps(a));
}
PX_FORCE_INLINE FloatV FSqrt(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
return _mm_sqrt_ps(a);
}
PX_FORCE_INLINE FloatV FRsqrtFast(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
return _mm_rsqrt_ps(a);
}
PX_FORCE_INLINE FloatV FScaleAdd(const FloatV a, const FloatV b, const FloatV c)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
ASSERT_ISVALIDFLOATV(c);
return FAdd(FMul(a, b), c);
}
PX_FORCE_INLINE FloatV FNegScaleSub(const FloatV a, const FloatV b, const FloatV c)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
ASSERT_ISVALIDFLOATV(c);
return FSub(c, FMul(a, b));
}
PX_FORCE_INLINE FloatV FAbs(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
PX_ALIGN(16, const PxU32) absMask[4] = { 0x7fFFffFF, 0x7fFFffFF, 0x7fFFffFF, 0x7fFFffFF };
return _mm_and_ps(a, _mm_load_ps(reinterpret_cast<const PxF32*>(absMask)));
}
PX_FORCE_INLINE FloatV FSel(const BoolV c, const FloatV a, const FloatV b)
{
PX_ASSERT(vecMathTests::allElementsEqualBoolV(c,BTTTT()) ||
vecMathTests::allElementsEqualBoolV(c,BFFFF()));
ASSERT_ISVALIDFLOATV(_mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a)));
return _mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a));
}
PX_FORCE_INLINE BoolV FIsGrtr(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return _mm_cmpgt_ps(a, b);
}
PX_FORCE_INLINE BoolV FIsGrtrOrEq(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return _mm_cmpge_ps(a, b);
}
PX_FORCE_INLINE BoolV FIsEq(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return _mm_cmpeq_ps(a, b);
}
PX_FORCE_INLINE FloatV FMax(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return _mm_max_ps(a, b);
}
PX_FORCE_INLINE FloatV FMin(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return _mm_min_ps(a, b);
}
PX_FORCE_INLINE FloatV FClamp(const FloatV a, const FloatV minV, const FloatV maxV)
{
ASSERT_ISVALIDFLOATV(minV);
ASSERT_ISVALIDFLOATV(maxV);
return _mm_max_ps(_mm_min_ps(a, maxV), minV);
}
PX_FORCE_INLINE PxU32 FAllGrtr(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return PxU32(_mm_comigt_ss(a, b));
}
PX_FORCE_INLINE PxU32 FAllGrtrOrEq(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return PxU32(_mm_comige_ss(a, b));
}
PX_FORCE_INLINE PxU32 FAllEq(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return PxU32(_mm_comieq_ss(a, b));
}
PX_FORCE_INLINE FloatV FRound(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
#ifdef __SSE4_2__
return _mm_round_ps(a, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
#else
// return _mm_round_ps(a, 0x0);
const FloatV half = FLoad(0.5f);
const __m128 signBit = _mm_cvtepi32_ps(_mm_srli_epi32(_mm_cvtps_epi32(a), 31));
const FloatV aRound = FSub(FAdd(a, half), signBit);
__m128i tmp = _mm_cvttps_epi32(aRound);
return _mm_cvtepi32_ps(tmp);
#endif
}
PX_FORCE_INLINE FloatV FSin(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
// Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI
const FloatV recipTwoPi = V4LoadA(g_PXReciprocalTwoPi.f);
const FloatV twoPi = V4LoadA(g_PXTwoPi.f);
const FloatV tmp = FMul(a, recipTwoPi);
const FloatV b = FRound(tmp);
const FloatV V1 = FNegScaleSub(twoPi, b, a);
// sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! -
// V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI)
const FloatV V2 = FMul(V1, V1);
const FloatV V3 = FMul(V2, V1);
const FloatV V5 = FMul(V3, V2);
const FloatV V7 = FMul(V5, V2);
const FloatV V9 = FMul(V7, V2);
const FloatV V11 = FMul(V9, V2);
const FloatV V13 = FMul(V11, V2);
const FloatV V15 = FMul(V13, V2);
const FloatV V17 = FMul(V15, V2);
const FloatV V19 = FMul(V17, V2);
const FloatV V21 = FMul(V19, V2);
const FloatV V23 = FMul(V21, V2);
const Vec4V sinCoefficients0 = V4LoadA(g_PXSinCoefficients0.f);
const Vec4V sinCoefficients1 = V4LoadA(g_PXSinCoefficients1.f);
const Vec4V sinCoefficients2 = V4LoadA(g_PXSinCoefficients2.f);
const FloatV S1 = V4GetY(sinCoefficients0);
const FloatV S2 = V4GetZ(sinCoefficients0);
const FloatV S3 = V4GetW(sinCoefficients0);
const FloatV S4 = V4GetX(sinCoefficients1);
const FloatV S5 = V4GetY(sinCoefficients1);
const FloatV S6 = V4GetZ(sinCoefficients1);
const FloatV S7 = V4GetW(sinCoefficients1);
const FloatV S8 = V4GetX(sinCoefficients2);
const FloatV S9 = V4GetY(sinCoefficients2);
const FloatV S10 = V4GetZ(sinCoefficients2);
const FloatV S11 = V4GetW(sinCoefficients2);
FloatV Result;
Result = FScaleAdd(S1, V3, V1);
Result = FScaleAdd(S2, V5, Result);
Result = FScaleAdd(S3, V7, Result);
Result = FScaleAdd(S4, V9, Result);
Result = FScaleAdd(S5, V11, Result);
Result = FScaleAdd(S6, V13, Result);
Result = FScaleAdd(S7, V15, Result);
Result = FScaleAdd(S8, V17, Result);
Result = FScaleAdd(S9, V19, Result);
Result = FScaleAdd(S10, V21, Result);
Result = FScaleAdd(S11, V23, Result);
return Result;
}
PX_FORCE_INLINE FloatV FCos(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
// Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI
const FloatV recipTwoPi = V4LoadA(g_PXReciprocalTwoPi.f);
const FloatV twoPi = V4LoadA(g_PXTwoPi.f);
const FloatV tmp = FMul(a, recipTwoPi);
const FloatV b = FRound(tmp);
const FloatV V1 = FNegScaleSub(twoPi, b, a);
// cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! -
// V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI)
const FloatV V2 = FMul(V1, V1);
const FloatV V4 = FMul(V2, V2);
const FloatV V6 = FMul(V4, V2);
const FloatV V8 = FMul(V4, V4);
const FloatV V10 = FMul(V6, V4);
const FloatV V12 = FMul(V6, V6);
const FloatV V14 = FMul(V8, V6);
const FloatV V16 = FMul(V8, V8);
const FloatV V18 = FMul(V10, V8);
const FloatV V20 = FMul(V10, V10);
const FloatV V22 = FMul(V12, V10);
const Vec4V cosCoefficients0 = V4LoadA(g_PXCosCoefficients0.f);
const Vec4V cosCoefficients1 = V4LoadA(g_PXCosCoefficients1.f);
const Vec4V cosCoefficients2 = V4LoadA(g_PXCosCoefficients2.f);
const FloatV C1 = V4GetY(cosCoefficients0);
const FloatV C2 = V4GetZ(cosCoefficients0);
const FloatV C3 = V4GetW(cosCoefficients0);
const FloatV C4 = V4GetX(cosCoefficients1);
const FloatV C5 = V4GetY(cosCoefficients1);
const FloatV C6 = V4GetZ(cosCoefficients1);
const FloatV C7 = V4GetW(cosCoefficients1);
const FloatV C8 = V4GetX(cosCoefficients2);
const FloatV C9 = V4GetY(cosCoefficients2);
const FloatV C10 = V4GetZ(cosCoefficients2);
const FloatV C11 = V4GetW(cosCoefficients2);
FloatV Result;
Result = FScaleAdd(C1, V2, V4One());
Result = FScaleAdd(C2, V4, Result);
Result = FScaleAdd(C3, V6, Result);
Result = FScaleAdd(C4, V8, Result);
Result = FScaleAdd(C5, V10, Result);
Result = FScaleAdd(C6, V12, Result);
Result = FScaleAdd(C7, V14, Result);
Result = FScaleAdd(C8, V16, Result);
Result = FScaleAdd(C9, V18, Result);
Result = FScaleAdd(C10, V20, Result);
Result = FScaleAdd(C11, V22, Result);
return Result;
}
PX_FORCE_INLINE PxU32 FOutOfBounds(const FloatV a, const FloatV min, const FloatV max)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(min);
ASSERT_ISVALIDFLOATV(max);
const BoolV c = BOr(FIsGrtr(a, max), FIsGrtr(min, a));
return !BAllEqFFFF(c);
}
PX_FORCE_INLINE PxU32 FInBounds(const FloatV a, const FloatV min, const FloatV max)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(min);
ASSERT_ISVALIDFLOATV(max);
const BoolV c = BAnd(FIsGrtrOrEq(a, min), FIsGrtrOrEq(max, a));
return BAllEqTTTT(c);
}
PX_FORCE_INLINE PxU32 FOutOfBounds(const FloatV a, const FloatV bounds)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(bounds);
return FOutOfBounds(a, FNeg(bounds), bounds);
}
PX_FORCE_INLINE PxU32 FInBounds(const FloatV a, const FloatV bounds)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(bounds);
return FInBounds(a, FNeg(bounds), bounds);
}
//////////////////////////////////
// VEC3V
//////////////////////////////////
PX_FORCE_INLINE Vec3V V3Splat(const FloatV f)
{
ASSERT_ISVALIDFLOATV(f);
const __m128 zero = FZero();
const __m128 fff0 = _mm_move_ss(f, zero);
return _mm_shuffle_ps(fff0, fff0, _MM_SHUFFLE(0, 1, 2, 3));
}
PX_FORCE_INLINE Vec3V V3Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z)
{
ASSERT_ISVALIDFLOATV(x);
ASSERT_ISVALIDFLOATV(y);
ASSERT_ISVALIDFLOATV(z);
// static on zero causes compiler crash on x64 debug_opt
const __m128 zero = FZero();
const __m128 xy = _mm_move_ss(x, y);
const __m128 z0 = _mm_move_ss(zero, z);
return _mm_shuffle_ps(xy, z0, _MM_SHUFFLE(1, 0, 0, 1));
}
PX_FORCE_INLINE Vec3V V3UnitX()
{
const PX_ALIGN(16, PxF32) x[4] = { 1.0f, 0.0f, 0.0f, 0.0f };
const __m128 x128 = _mm_load_ps(x);
return x128;
}
PX_FORCE_INLINE Vec3V V3UnitY()
{
const PX_ALIGN(16, PxF32) y[4] = { 0.0f, 1.0f, 0.0f, 0.0f };
const __m128 y128 = _mm_load_ps(y);
return y128;
}
PX_FORCE_INLINE Vec3V V3UnitZ()
{
const PX_ALIGN(16, PxF32) z[4] = { 0.0f, 0.0f, 1.0f, 0.0f };
const __m128 z128 = _mm_load_ps(z);
return z128;
}
PX_FORCE_INLINE FloatV V3GetX(const Vec3V f)
{
ASSERT_ISVALIDVEC3V(f);
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(0, 0, 0, 0));
}
PX_FORCE_INLINE FloatV V3GetY(const Vec3V f)
{
ASSERT_ISVALIDVEC3V(f);
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(1, 1, 1, 1));
}
PX_FORCE_INLINE FloatV V3GetZ(const Vec3V f)
{
ASSERT_ISVALIDVEC3V(f);
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(2, 2, 2, 2));
}
PX_FORCE_INLINE Vec3V V3SetX(const Vec3V v, const FloatV f)
{
ASSERT_ISVALIDVEC3V(v);
ASSERT_ISVALIDFLOATV(f);
return V4Sel(BFTTT(), v, f);
}
PX_FORCE_INLINE Vec3V V3SetY(const Vec3V v, const FloatV f)
{
ASSERT_ISVALIDVEC3V(v);
ASSERT_ISVALIDFLOATV(f);
return V4Sel(BTFTT(), v, f);
}
PX_FORCE_INLINE Vec3V V3SetZ(const Vec3V v, const FloatV f)
{
ASSERT_ISVALIDVEC3V(v);
ASSERT_ISVALIDFLOATV(f);
return V4Sel(BTTFT(), v, f);
}
PX_FORCE_INLINE Vec3V V3ColX(const Vec3V a, const Vec3V b, const Vec3V c)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
ASSERT_ISVALIDVEC3V(c);
Vec3V r = _mm_shuffle_ps(a, c, _MM_SHUFFLE(3, 0, 3, 0));
return V3SetY(r, V3GetX(b));
}
PX_FORCE_INLINE Vec3V V3ColY(const Vec3V a, const Vec3V b, const Vec3V c)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
ASSERT_ISVALIDVEC3V(c);
Vec3V r = _mm_shuffle_ps(a, c, _MM_SHUFFLE(3, 1, 3, 1));
return V3SetY(r, V3GetY(b));
}
PX_FORCE_INLINE Vec3V V3ColZ(const Vec3V a, const Vec3V b, const Vec3V c)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
ASSERT_ISVALIDVEC3V(c);
Vec3V r = _mm_shuffle_ps(a, c, _MM_SHUFFLE(3, 2, 3, 2));
return V3SetY(r, V3GetZ(b));
}
PX_FORCE_INLINE Vec3V V3Zero()
{
return V3Load(0.0f);
}
PX_FORCE_INLINE Vec3V V3Eps()
{
return V3Load(PX_EPS_REAL);
}
PX_FORCE_INLINE Vec3V V3One()
{
return V3Load(1.0f);
}
PX_FORCE_INLINE Vec3V V3Neg(const Vec3V f)
{
ASSERT_ISVALIDVEC3V(f);
return _mm_sub_ps(_mm_setzero_ps(), f);
}
PX_FORCE_INLINE Vec3V V3Add(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return _mm_add_ps(a, b);
}
PX_FORCE_INLINE Vec3V V3Sub(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return _mm_sub_ps(a, b);
}
PX_FORCE_INLINE Vec3V V3Scale(const Vec3V a, const FloatV b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDFLOATV(b);
return _mm_mul_ps(a, b);
}
PX_FORCE_INLINE Vec3V V3Mul(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return _mm_mul_ps(a, b);
}
PX_FORCE_INLINE Vec3V V3ScaleInv(const Vec3V a, const FloatV b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDFLOATV(b);
return _mm_div_ps(a, b);
}
PX_FORCE_INLINE Vec3V V3Div(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return V4ClearW(_mm_div_ps(a, b));
}
PX_FORCE_INLINE Vec3V V3ScaleInvFast(const Vec3V a, const FloatV b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDFLOATV(b);
return _mm_mul_ps(a, _mm_rcp_ps(b));
}
PX_FORCE_INLINE Vec3V V3DivFast(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return V4ClearW(_mm_mul_ps(a, _mm_rcp_ps(b)));
}
PX_FORCE_INLINE Vec3V V3Recip(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
const __m128 zero = V3Zero();
const __m128 tttf = BTTTF();
const __m128 recipA = _mm_div_ps(V3One(), a);
return V4Sel(tttf, recipA, zero);
}
PX_FORCE_INLINE Vec3V V3RecipFast(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
const __m128 zero = V3Zero();
const __m128 tttf = BTTTF();
const __m128 recipA = _mm_rcp_ps(a);
return V4Sel(tttf, recipA, zero);
}
PX_FORCE_INLINE Vec3V V3Rsqrt(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
const __m128 zero = V3Zero();
const __m128 tttf = BTTTF();
const __m128 recipA = _mm_div_ps(V3One(), _mm_sqrt_ps(a));
return V4Sel(tttf, recipA, zero);
}
PX_FORCE_INLINE Vec3V V3RsqrtFast(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
const __m128 zero = V3Zero();
const __m128 tttf = BTTTF();
const __m128 recipA = _mm_rsqrt_ps(a);
return V4Sel(tttf, recipA, zero);
}
PX_FORCE_INLINE Vec3V V3ScaleAdd(const Vec3V a, const FloatV b, const Vec3V c)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDFLOATV(b);
ASSERT_ISVALIDVEC3V(c);
return V3Add(V3Scale(a, b), c);
}
PX_FORCE_INLINE Vec3V V3NegScaleSub(const Vec3V a, const FloatV b, const Vec3V c)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDFLOATV(b);
ASSERT_ISVALIDVEC3V(c);
return V3Sub(c, V3Scale(a, b));
}
PX_FORCE_INLINE Vec3V V3MulAdd(const Vec3V a, const Vec3V b, const Vec3V c)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
ASSERT_ISVALIDVEC3V(c);
return V3Add(V3Mul(a, b), c);
}
PX_FORCE_INLINE Vec3V V3NegMulSub(const Vec3V a, const Vec3V b, const Vec3V c)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
ASSERT_ISVALIDVEC3V(c);
return V3Sub(c, V3Mul(a, b));
}
PX_FORCE_INLINE Vec3V V3Abs(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
return V3Max(a, V3Neg(a));
}
PX_FORCE_INLINE FloatV V3Dot(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
#ifdef __SSE4_2__
return _mm_dp_ps(a, b, 0x7f);
#else
const __m128 t0 = _mm_mul_ps(a, b); // aw*bw | az*bz | ay*by | ax*bx
const __m128 t1 = _mm_shuffle_ps(t0, t0, _MM_SHUFFLE(1,0,3,2)); // ay*by | ax*bx | aw*bw | az*bz
const __m128 t2 = _mm_add_ps(t0, t1); // ay*by + aw*bw | ax*bx + az*bz | aw*bw + ay*by | az*bz + ax*bx
const __m128 t3 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(2,3,0,1)); // ax*bx + az*bz | ay*by + aw*bw | az*bz + ax*bx | aw*bw + ay*by
return _mm_add_ps(t3, t2); // ax*bx + az*bz + ay*by + aw*bw
// ay*by + aw*bw + ax*bx + az*bz
// az*bz + ax*bx + aw*bw + ay*by
// aw*bw + ay*by + az*bz + ax*bx
#endif
}
PX_FORCE_INLINE Vec3V V3Cross(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
const __m128 r1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 0, 2)); // z,x,y,w
const __m128 r2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1)); // y,z,x,w
const __m128 l1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); // y,z,x,w
const __m128 l2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 1, 0, 2)); // z,x,y,w
return _mm_sub_ps(_mm_mul_ps(l1, l2), _mm_mul_ps(r1, r2));
}
PX_FORCE_INLINE VecCrossV V3PrepareCross(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
VecCrossV v;
v.mR1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 0, 2)); // z,x,y,w
v.mL1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); // y,z,x,w
return v;
}
PX_FORCE_INLINE Vec3V V3Cross(const VecCrossV& a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(b);
const __m128 r2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1)); // y,z,x,w
const __m128 l2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 1, 0, 2)); // z,x,y,w
return _mm_sub_ps(_mm_mul_ps(a.mL1, l2), _mm_mul_ps(a.mR1, r2));
}
PX_FORCE_INLINE Vec3V V3Cross(const Vec3V a, const VecCrossV& b)
{
ASSERT_ISVALIDVEC3V(a);
const __m128 r2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); // y,z,x,w
const __m128 l2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 0, 2)); // z,x,y,w
return _mm_sub_ps(_mm_mul_ps(b.mR1, r2), _mm_mul_ps(b.mL1, l2));
}
PX_FORCE_INLINE Vec3V V3Cross(const VecCrossV& a, const VecCrossV& b)
{
return _mm_sub_ps(_mm_mul_ps(a.mL1, b.mR1), _mm_mul_ps(a.mR1, b.mL1));
}
PX_FORCE_INLINE FloatV V3Length(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
return _mm_sqrt_ps(V3Dot(a, a));
}
PX_FORCE_INLINE FloatV V3LengthSq(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
return V3Dot(a, a);
}
PX_FORCE_INLINE Vec3V V3Normalize(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISFINITELENGTH(a);
return V3ScaleInv(a, _mm_sqrt_ps(V3Dot(a, a)));
}
PX_FORCE_INLINE Vec3V V3NormalizeFast(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISFINITELENGTH(a);
return V3Scale(a, _mm_rsqrt_ps(V3Dot(a, a)));
}
PX_FORCE_INLINE Vec3V V3NormalizeSafe(const Vec3V a, const Vec3V unsafeReturnValue)
{
ASSERT_ISVALIDVEC3V(a);
const __m128 eps = V4Eps();
const __m128 length = V3Length(a);
const __m128 isGreaterThanZero = FIsGrtr(length, eps);
return V3Sel(isGreaterThanZero, V3ScaleInv(a, length), unsafeReturnValue);
}
PX_FORCE_INLINE Vec3V V3Sel(const BoolV c, const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(_mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a)));
return _mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a));
}
PX_FORCE_INLINE BoolV V3IsGrtr(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return _mm_cmpgt_ps(a, b);
}
PX_FORCE_INLINE BoolV V3IsGrtrOrEq(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return _mm_cmpge_ps(a, b);
}
PX_FORCE_INLINE BoolV V3IsEq(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return _mm_cmpeq_ps(a, b);
}
PX_FORCE_INLINE Vec3V V3Max(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return _mm_max_ps(a, b);
}
PX_FORCE_INLINE Vec3V V3Min(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return _mm_min_ps(a, b);
}
PX_FORCE_INLINE FloatV V3ExtractMax(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
const __m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 2, 2, 2));
return _mm_max_ps(_mm_max_ps(shuf1, shuf2), shuf3);
}
PX_FORCE_INLINE FloatV V3ExtractMin(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
const __m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 2, 2, 2));
return _mm_min_ps(_mm_min_ps(shuf1, shuf2), shuf3);
}
// return (a >= 0.0f) ? 1.0f : -1.0f;
PX_FORCE_INLINE Vec3V V3Sign(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
const __m128 zero = V3Zero();
const __m128 one = V3One();
const __m128 none = V3Neg(one);
return V3Sel(V3IsGrtrOrEq(a, zero), one, none);
}
PX_FORCE_INLINE Vec3V V3Clamp(const Vec3V a, const Vec3V minV, const Vec3V maxV)
{
ASSERT_ISVALIDVEC3V(maxV);
ASSERT_ISVALIDVEC3V(minV);
return V3Max(V3Min(a, maxV), minV);
}
PX_FORCE_INLINE PxU32 V3AllGrtr(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return internalUnitSSE2Simd::BAllTrue3_R(V4IsGrtr(a, b));
}
PX_FORCE_INLINE PxU32 V3AllGrtrOrEq(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return internalUnitSSE2Simd::BAllTrue3_R(V4IsGrtrOrEq(a, b));
}
PX_FORCE_INLINE PxU32 V3AllEq(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return internalUnitSSE2Simd::BAllTrue3_R(V4IsEq(a, b));
}
PX_FORCE_INLINE Vec3V V3Round(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
#ifdef __SSE4_2__
return _mm_round_ps(a, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
#else
// return _mm_round_ps(a, 0x0);
const Vec3V half = V3Load(0.5f);
const __m128 signBit = _mm_cvtepi32_ps(_mm_srli_epi32(_mm_cvtps_epi32(a), 31));
const Vec3V aRound = V3Sub(V3Add(a, half), signBit);
__m128i tmp = _mm_cvttps_epi32(aRound);
return _mm_cvtepi32_ps(tmp);
#endif
}
PX_FORCE_INLINE Vec3V V3Sin(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
// Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI
const Vec4V recipTwoPi = V4LoadA(g_PXReciprocalTwoPi.f);
const Vec4V twoPi = V4LoadA(g_PXTwoPi.f);
const Vec3V tmp = V3Scale(a, recipTwoPi);
const Vec3V b = V3Round(tmp);
const Vec3V V1 = V3NegScaleSub(b, twoPi, a);
// sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! -
// V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI)
const Vec3V V2 = V3Mul(V1, V1);
const Vec3V V3 = V3Mul(V2, V1);
const Vec3V V5 = V3Mul(V3, V2);
const Vec3V V7 = V3Mul(V5, V2);
const Vec3V V9 = V3Mul(V7, V2);
const Vec3V V11 = V3Mul(V9, V2);
const Vec3V V13 = V3Mul(V11, V2);
const Vec3V V15 = V3Mul(V13, V2);
const Vec3V V17 = V3Mul(V15, V2);
const Vec3V V19 = V3Mul(V17, V2);
const Vec3V V21 = V3Mul(V19, V2);
const Vec3V V23 = V3Mul(V21, V2);
const Vec4V sinCoefficients0 = V4LoadA(g_PXSinCoefficients0.f);
const Vec4V sinCoefficients1 = V4LoadA(g_PXSinCoefficients1.f);
const Vec4V sinCoefficients2 = V4LoadA(g_PXSinCoefficients2.f);
const FloatV S1 = V4GetY(sinCoefficients0);
const FloatV S2 = V4GetZ(sinCoefficients0);
const FloatV S3 = V4GetW(sinCoefficients0);
const FloatV S4 = V4GetX(sinCoefficients1);
const FloatV S5 = V4GetY(sinCoefficients1);
const FloatV S6 = V4GetZ(sinCoefficients1);
const FloatV S7 = V4GetW(sinCoefficients1);
const FloatV S8 = V4GetX(sinCoefficients2);
const FloatV S9 = V4GetY(sinCoefficients2);
const FloatV S10 = V4GetZ(sinCoefficients2);
const FloatV S11 = V4GetW(sinCoefficients2);
Vec3V Result;
Result = V3ScaleAdd(V3, S1, V1);
Result = V3ScaleAdd(V5, S2, Result);
Result = V3ScaleAdd(V7, S3, Result);
Result = V3ScaleAdd(V9, S4, Result);
Result = V3ScaleAdd(V11, S5, Result);
Result = V3ScaleAdd(V13, S6, Result);
Result = V3ScaleAdd(V15, S7, Result);
Result = V3ScaleAdd(V17, S8, Result);
Result = V3ScaleAdd(V19, S9, Result);
Result = V3ScaleAdd(V21, S10, Result);
Result = V3ScaleAdd(V23, S11, Result);
ASSERT_ISVALIDVEC3V(Result);
return Result;
}
PX_FORCE_INLINE Vec3V V3Cos(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
// Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI
const Vec4V recipTwoPi = V4LoadA(g_PXReciprocalTwoPi.f);
const Vec4V twoPi = V4LoadA(g_PXTwoPi.f);
const Vec3V tmp = V3Scale(a, recipTwoPi);
const Vec3V b = V3Round(tmp);
const Vec3V V1 = V3NegScaleSub(b, twoPi, a);
// cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! -
// V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI)
const Vec3V V2 = V3Mul(V1, V1);
const Vec3V V4 = V3Mul(V2, V2);
const Vec3V V6 = V3Mul(V4, V2);
const Vec3V V8 = V3Mul(V4, V4);
const Vec3V V10 = V3Mul(V6, V4);
const Vec3V V12 = V3Mul(V6, V6);
const Vec3V V14 = V3Mul(V8, V6);
const Vec3V V16 = V3Mul(V8, V8);
const Vec3V V18 = V3Mul(V10, V8);
const Vec3V V20 = V3Mul(V10, V10);
const Vec3V V22 = V3Mul(V12, V10);
const Vec4V cosCoefficients0 = V4LoadA(g_PXCosCoefficients0.f);
const Vec4V cosCoefficients1 = V4LoadA(g_PXCosCoefficients1.f);
const Vec4V cosCoefficients2 = V4LoadA(g_PXCosCoefficients2.f);
const FloatV C1 = V4GetY(cosCoefficients0);
const FloatV C2 = V4GetZ(cosCoefficients0);
const FloatV C3 = V4GetW(cosCoefficients0);
const FloatV C4 = V4GetX(cosCoefficients1);
const FloatV C5 = V4GetY(cosCoefficients1);
const FloatV C6 = V4GetZ(cosCoefficients1);
const FloatV C7 = V4GetW(cosCoefficients1);
const FloatV C8 = V4GetX(cosCoefficients2);
const FloatV C9 = V4GetY(cosCoefficients2);
const FloatV C10 = V4GetZ(cosCoefficients2);
const FloatV C11 = V4GetW(cosCoefficients2);
Vec3V Result;
Result = V3ScaleAdd(V2, C1, V3One());
Result = V3ScaleAdd(V4, C2, Result);
Result = V3ScaleAdd(V6, C3, Result);
Result = V3ScaleAdd(V8, C4, Result);
Result = V3ScaleAdd(V10, C5, Result);
Result = V3ScaleAdd(V12, C6, Result);
Result = V3ScaleAdd(V14, C7, Result);
Result = V3ScaleAdd(V16, C8, Result);
Result = V3ScaleAdd(V18, C9, Result);
Result = V3ScaleAdd(V20, C10, Result);
Result = V3ScaleAdd(V22, C11, Result);
ASSERT_ISVALIDVEC3V(Result);
return Result;
}
PX_FORCE_INLINE Vec3V V3PermYZZ(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 2, 2, 1));
}
PX_FORCE_INLINE Vec3V V3PermXYX(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 1, 0));
}
PX_FORCE_INLINE Vec3V V3PermYZX(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1));
}
PX_FORCE_INLINE Vec3V V3PermZXY(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 0, 2));
}
PX_FORCE_INLINE Vec3V V3PermZZY(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 2, 2));
}
PX_FORCE_INLINE Vec3V V3PermYXX(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 0, 1));
}
PX_FORCE_INLINE Vec3V V3Perm_Zero_1Z_0Y(const Vec3V v0, const Vec3V v1)
{
ASSERT_ISVALIDVEC3V(v0);
ASSERT_ISVALIDVEC3V(v1);
return _mm_shuffle_ps(v1, v0, _MM_SHUFFLE(3, 1, 2, 3));
}
PX_FORCE_INLINE Vec3V V3Perm_0Z_Zero_1X(const Vec3V v0, const Vec3V v1)
{
ASSERT_ISVALIDVEC3V(v0);
ASSERT_ISVALIDVEC3V(v1);
return _mm_shuffle_ps(v0, v1, _MM_SHUFFLE(3, 0, 3, 2));
}
PX_FORCE_INLINE Vec3V V3Perm_1Y_0X_Zero(const Vec3V v0, const Vec3V v1)
{
ASSERT_ISVALIDVEC3V(v0);
ASSERT_ISVALIDVEC3V(v1);
// There must be a better way to do this.
Vec3V v2 = V3Zero();
FloatV y1 = V3GetY(v1);
FloatV x0 = V3GetX(v0);
v2 = V3SetX(v2, y1);
return V3SetY(v2, x0);
}
PX_FORCE_INLINE FloatV V3SumElems(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
#ifdef __SSE4_2__
Vec3V r = _mm_hadd_ps(a, a);
r = _mm_hadd_ps(r, r);
return r;
#else
__m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 0, 0, 0)); // z,y,x,w
__m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 1, 1, 1)); // y,x,w,z
__m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 2, 2, 2)); // x,w,z,y
return _mm_add_ps(_mm_add_ps(shuf1, shuf2), shuf3);
#endif
}
PX_FORCE_INLINE PxU32 V3OutOfBounds(const Vec3V a, const Vec3V min, const Vec3V max)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(min);
ASSERT_ISVALIDVEC3V(max);
const BoolV c = BOr(V3IsGrtr(a, max), V3IsGrtr(min, a));
return !BAllEqFFFF(c);
}
PX_FORCE_INLINE PxU32 V3InBounds(const Vec3V a, const Vec3V min, const Vec3V max)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(min);
ASSERT_ISVALIDVEC3V(max);
const BoolV c = BAnd(V3IsGrtrOrEq(a, min), V3IsGrtrOrEq(max, a));
return BAllEqTTTT(c);
}
PX_FORCE_INLINE PxU32 V3OutOfBounds(const Vec3V a, const Vec3V bounds)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(bounds);
return V3OutOfBounds(a, V3Neg(bounds), bounds);
}
PX_FORCE_INLINE PxU32 V3InBounds(const Vec3V a, const Vec3V bounds)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(bounds);
return V3InBounds(a, V3Neg(bounds), bounds);
}
PX_FORCE_INLINE void V3Transpose(Vec3V& col0, Vec3V& col1, Vec3V& col2)
{
ASSERT_ISVALIDVEC3V(col0);
ASSERT_ISVALIDVEC3V(col1);
ASSERT_ISVALIDVEC3V(col2);
const Vec3V col3 = _mm_setzero_ps();
Vec3V tmp0 = _mm_unpacklo_ps(col0, col1);
Vec3V tmp2 = _mm_unpacklo_ps(col2, col3);
Vec3V tmp1 = _mm_unpackhi_ps(col0, col1);
Vec3V tmp3 = _mm_unpackhi_ps(col2, col3);
col0 = _mm_movelh_ps(tmp0, tmp2);
col1 = _mm_movehl_ps(tmp2, tmp0);
col2 = _mm_movelh_ps(tmp1, tmp3);
}
//////////////////////////////////
// VEC4V
//////////////////////////////////
PX_FORCE_INLINE Vec4V V4Splat(const FloatV f)
{
ASSERT_ISVALIDFLOATV(f);
// return _mm_shuffle_ps(f, f, _MM_SHUFFLE(0,0,0,0));
return f;
}
PX_FORCE_INLINE Vec4V V4Merge(const FloatV* const floatVArray)
{
ASSERT_ISVALIDFLOATV(floatVArray[0]);
ASSERT_ISVALIDFLOATV(floatVArray[1]);
ASSERT_ISVALIDFLOATV(floatVArray[2]);
ASSERT_ISVALIDFLOATV(floatVArray[3]);
const __m128 xw = _mm_move_ss(floatVArray[1], floatVArray[0]); // y, y, y, x
const __m128 yz = _mm_move_ss(floatVArray[2], floatVArray[3]); // z, z, z, w
return _mm_shuffle_ps(xw, yz, _MM_SHUFFLE(0, 2, 1, 0));
}
PX_FORCE_INLINE Vec4V V4Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z, const FloatVArg w)
{
ASSERT_ISVALIDFLOATV(x);
ASSERT_ISVALIDFLOATV(y);
ASSERT_ISVALIDFLOATV(z);
ASSERT_ISVALIDFLOATV(w);
const __m128 xw = _mm_move_ss(y, x); // y, y, y, x
const __m128 yz = _mm_move_ss(z, w); // z, z, z, w
return _mm_shuffle_ps(xw, yz, _MM_SHUFFLE(0, 2, 1, 0));
}
PX_FORCE_INLINE Vec4V V4MergeW(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w)
{
const Vec4V xz = _mm_unpackhi_ps(x, z);
const Vec4V yw = _mm_unpackhi_ps(y, w);
return _mm_unpackhi_ps(xz, yw);
}
PX_FORCE_INLINE Vec4V V4MergeZ(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w)
{
const Vec4V xz = _mm_unpackhi_ps(x, z);
const Vec4V yw = _mm_unpackhi_ps(y, w);
return _mm_unpacklo_ps(xz, yw);
}
PX_FORCE_INLINE Vec4V V4MergeY(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w)
{
const Vec4V xz = _mm_unpacklo_ps(x, z);
const Vec4V yw = _mm_unpacklo_ps(y, w);
return _mm_unpackhi_ps(xz, yw);
}
PX_FORCE_INLINE Vec4V V4MergeX(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w)
{
const Vec4V xz = _mm_unpacklo_ps(x, z);
const Vec4V yw = _mm_unpacklo_ps(y, w);
return _mm_unpacklo_ps(xz, yw);
}
PX_FORCE_INLINE Vec4V V4UnpackXY(const Vec4VArg a, const Vec4VArg b)
{
return _mm_unpacklo_ps(a, b);
}
PX_FORCE_INLINE Vec4V V4UnpackZW(const Vec4VArg a, const Vec4VArg b)
{
return _mm_unpackhi_ps(a, b);
}
PX_FORCE_INLINE Vec4V V4UnitW()
{
const PX_ALIGN(16, PxF32) w[4] = { 0.0f, 0.0f, 0.0f, 1.0f };
const __m128 w128 = _mm_load_ps(w);
return w128;
}
PX_FORCE_INLINE Vec4V V4UnitX()
{
const PX_ALIGN(16, PxF32) x[4] = { 1.0f, 0.0f, 0.0f, 0.0f };
const __m128 x128 = _mm_load_ps(x);
return x128;
}
PX_FORCE_INLINE Vec4V V4UnitY()
{
const PX_ALIGN(16, PxF32) y[4] = { 0.0f, 1.0f, 0.0f, 0.0f };
const __m128 y128 = _mm_load_ps(y);
return y128;
}
PX_FORCE_INLINE Vec4V V4UnitZ()
{
const PX_ALIGN(16, PxF32) z[4] = { 0.0f, 0.0f, 1.0f, 0.0f };
const __m128 z128 = _mm_load_ps(z);
return z128;
}
PX_FORCE_INLINE FloatV V4GetW(const Vec4V f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(3, 3, 3, 3));
}
PX_FORCE_INLINE FloatV V4GetX(const Vec4V f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(0, 0, 0, 0));
}
PX_FORCE_INLINE FloatV V4GetY(const Vec4V f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(1, 1, 1, 1));
}
PX_FORCE_INLINE FloatV V4GetZ(const Vec4V f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(2, 2, 2, 2));
}
PX_FORCE_INLINE Vec4V V4SetW(const Vec4V v, const FloatV f)
{
ASSERT_ISVALIDFLOATV(f);
return V4Sel(BTTTF(), v, f);
}
PX_FORCE_INLINE Vec4V V4SetX(const Vec4V v, const FloatV f)
{
ASSERT_ISVALIDFLOATV(f);
return V4Sel(BFTTT(), v, f);
}
PX_FORCE_INLINE Vec4V V4SetY(const Vec4V v, const FloatV f)
{
ASSERT_ISVALIDFLOATV(f);
return V4Sel(BTFTT(), v, f);
}
PX_FORCE_INLINE Vec4V V4SetZ(const Vec4V v, const FloatV f)
{
ASSERT_ISVALIDFLOATV(f);
return V4Sel(BTTFT(), v, f);
}
PX_FORCE_INLINE Vec4V V4ClearW(const Vec4V v)
{
#if !PX_EMSCRIPTEN
return _mm_and_ps(v, V4LoadA(internalUnitSSE2Simd::gMaskXYZ));
#else
return _mm_and_ps(v, (VecI32V&)internalUnitSSE2Simd::gMaskXYZ);
#endif
}
PX_FORCE_INLINE Vec4V V4PermYXWZ(const Vec4V a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 3, 0, 1));
}
PX_FORCE_INLINE Vec4V V4PermXZXZ(const Vec4V a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 0, 2, 0));
}
PX_FORCE_INLINE Vec4V V4PermYWYW(const Vec4V a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 3, 1));
}
PX_FORCE_INLINE Vec4V V4PermYZXW(const Vec4V a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1));
}
PX_FORCE_INLINE Vec4V V4PermZWXY(const Vec4V a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 0, 3, 2));
}
template <PxU8 x, PxU8 y, PxU8 z, PxU8 w>
PX_FORCE_INLINE Vec4V V4Perm(const Vec4V a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(w, z, y, x));
}
PX_FORCE_INLINE Vec4V V4Zero()
{
return V4Load(0.0f);
}
PX_FORCE_INLINE Vec4V V4One()
{
return V4Load(1.0f);
}
PX_FORCE_INLINE Vec4V V4Eps()
{
return V4Load(PX_EPS_REAL);
}
PX_FORCE_INLINE Vec4V V4Neg(const Vec4V f)
{
return _mm_sub_ps(_mm_setzero_ps(), f);
}
PX_FORCE_INLINE Vec4V V4Add(const Vec4V a, const Vec4V b)
{
return _mm_add_ps(a, b);
}
PX_FORCE_INLINE Vec4V V4Sub(const Vec4V a, const Vec4V b)
{
return _mm_sub_ps(a, b);
}
PX_FORCE_INLINE Vec4V V4Scale(const Vec4V a, const FloatV b)
{
return _mm_mul_ps(a, b);
}
PX_FORCE_INLINE Vec4V V4Mul(const Vec4V a, const Vec4V b)
{
return _mm_mul_ps(a, b);
}
PX_FORCE_INLINE Vec4V V4ScaleInv(const Vec4V a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(b);
return _mm_div_ps(a, b);
}
PX_FORCE_INLINE Vec4V V4Div(const Vec4V a, const Vec4V b)
{
return _mm_div_ps(a, b);
}
PX_FORCE_INLINE Vec4V V4ScaleInvFast(const Vec4V a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(b);
return _mm_mul_ps(a, _mm_rcp_ps(b));
}
PX_FORCE_INLINE Vec4V V4DivFast(const Vec4V a, const Vec4V b)
{
return _mm_mul_ps(a, _mm_rcp_ps(b));
}
PX_FORCE_INLINE Vec4V V4Recip(const Vec4V a)
{
return _mm_div_ps(V4One(), a);
}
PX_FORCE_INLINE Vec4V V4RecipFast(const Vec4V a)
{
return _mm_rcp_ps(a);
}
PX_FORCE_INLINE Vec4V V4Rsqrt(const Vec4V a)
{
return _mm_div_ps(V4One(), _mm_sqrt_ps(a));
}
PX_FORCE_INLINE Vec4V V4RsqrtFast(const Vec4V a)
{
return _mm_rsqrt_ps(a);
}
PX_FORCE_INLINE Vec4V V4Sqrt(const Vec4V a)
{
return _mm_sqrt_ps(a);
}
PX_FORCE_INLINE Vec4V V4ScaleAdd(const Vec4V a, const FloatV b, const Vec4V c)
{
ASSERT_ISVALIDFLOATV(b);
return V4Add(V4Scale(a, b), c);
}
PX_FORCE_INLINE Vec4V V4NegScaleSub(const Vec4V a, const FloatV b, const Vec4V c)
{
ASSERT_ISVALIDFLOATV(b);
return V4Sub(c, V4Scale(a, b));
}
PX_FORCE_INLINE Vec4V V4MulAdd(const Vec4V a, const Vec4V b, const Vec4V c)
{
return V4Add(V4Mul(a, b), c);
}
PX_FORCE_INLINE Vec4V V4NegMulSub(const Vec4V a, const Vec4V b, const Vec4V c)
{
return V4Sub(c, V4Mul(a, b));
}
PX_FORCE_INLINE Vec4V V4Abs(const Vec4V a)
{
return V4Max(a, V4Neg(a));
}
PX_FORCE_INLINE FloatV V4SumElements(const Vec4V a)
{
#ifdef __SSE4_2__
Vec4V r = _mm_hadd_ps(a, a);
r = _mm_hadd_ps(r, r);
return r;
#else
const Vec4V xy = V4UnpackXY(a, a); // x,x,y,y
const Vec4V zw = V4UnpackZW(a, a); // z,z,w,w
const Vec4V xz_yw = V4Add(xy, zw); // x+z,x+z,y+w,y+w
const FloatV xz = V4GetX(xz_yw); // x+z
const FloatV yw = V4GetZ(xz_yw); // y+w
return FAdd(xz, yw); // sum
#endif
}
PX_FORCE_INLINE FloatV V4Dot(const Vec4V a, const Vec4V b)
{
#ifdef __SSE4_2__
return _mm_dp_ps(a, b, 0xff);
#else
//const __m128 dot1 = _mm_mul_ps(a, b); // x,y,z,w
//const __m128 shuf1 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(2, 1, 0, 3)); // w,x,y,z
//const __m128 shuf2 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(1, 0, 3, 2)); // z,w,x,y
//const __m128 shuf3 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(0, 3, 2, 1)); // y,z,w,x
//return _mm_add_ps(_mm_add_ps(shuf2, shuf3), _mm_add_ps(dot1, shuf1));
// aw*bw | az*bz | ay*by | ax*bx
const __m128 t0 = _mm_mul_ps(a, b);
// ay*by | ax*bx | aw*bw | az*bz
const __m128 t1 = _mm_shuffle_ps(t0, t0, _MM_SHUFFLE(1, 0, 3, 2));
// ay*by + aw*bw | ax*bx + az*bz | aw*bw + ay*by | az*bz + ax*bx
const __m128 t2 = _mm_add_ps(t0, t1);
// ax*bx + az*bz | ay*by + aw*bw | az*bz + ax*bx | aw*bw + ay*by
const __m128 t3 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(2, 3, 0, 1));
// ax*bx + az*bz + ay*by + aw*bw
return _mm_add_ps(t3, t2);
#endif
}
PX_FORCE_INLINE FloatV V4Dot3(const Vec4V a, const Vec4V b)
{
#ifdef __SSE4_2__
return _mm_dp_ps(a, b, 0x7f);
#else
const __m128 dot1 = _mm_mul_ps(a, b); // w,z,y,x
const __m128 shuf1 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(0, 0, 0, 0)); // z,y,x,w
const __m128 shuf2 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(1, 1, 1, 1)); // y,x,w,z
const __m128 shuf3 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(2, 2, 2, 2)); // x,w,z,y
return _mm_add_ps(_mm_add_ps(shuf1, shuf2), shuf3);
#endif
}
PX_FORCE_INLINE Vec4V V4Cross(const Vec4V a, const Vec4V b)
{
const __m128 r1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 0, 2)); // z,x,y,w
const __m128 r2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1)); // y,z,x,w
const __m128 l1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); // y,z,x,w
const __m128 l2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 1, 0, 2)); // z,x,y,w
return _mm_sub_ps(_mm_mul_ps(l1, l2), _mm_mul_ps(r1, r2));
}
PX_FORCE_INLINE FloatV V4Length(const Vec4V a)
{
return _mm_sqrt_ps(V4Dot(a, a));
}
PX_FORCE_INLINE FloatV V4LengthSq(const Vec4V a)
{
return V4Dot(a, a);
}
PX_FORCE_INLINE Vec4V V4Normalize(const Vec4V a)
{
ASSERT_ISFINITELENGTH(a);
return V4ScaleInv(a, _mm_sqrt_ps(V4Dot(a, a)));
}
PX_FORCE_INLINE Vec4V V4NormalizeFast(const Vec4V a)
{
ASSERT_ISFINITELENGTH(a);
return V4ScaleInvFast(a, _mm_sqrt_ps(V4Dot(a, a)));
}
PX_FORCE_INLINE Vec4V V4NormalizeSafe(const Vec4V a, const Vec3V unsafeReturnValue)
{
const __m128 eps = V3Eps();
const __m128 length = V4Length(a);
const __m128 isGreaterThanZero = V4IsGrtr(length, eps);
return V4Sel(isGreaterThanZero, V4ScaleInv(a, length), unsafeReturnValue);
}
PX_FORCE_INLINE BoolV V4IsEqU32(const VecU32V a, const VecU32V b)
{
return m128_I2F(_mm_cmpeq_epi32(m128_F2I(a), m128_F2I(b)));
}
PX_FORCE_INLINE Vec4V V4Sel(const BoolV c, const Vec4V a, const Vec4V b)
{
return _mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a));
}
PX_FORCE_INLINE BoolV V4IsGrtr(const Vec4V a, const Vec4V b)
{
return _mm_cmpgt_ps(a, b);
}
PX_FORCE_INLINE BoolV V4IsGrtrOrEq(const Vec4V a, const Vec4V b)
{
return _mm_cmpge_ps(a, b);
}
PX_FORCE_INLINE BoolV V4IsEq(const Vec4V a, const Vec4V b)
{
return _mm_cmpeq_ps(a, b);
}
PX_FORCE_INLINE Vec4V V4Max(const Vec4V a, const Vec4V b)
{
return _mm_max_ps(a, b);
}
PX_FORCE_INLINE Vec4V V4Min(const Vec4V a, const Vec4V b)
{
return _mm_min_ps(a, b);
}
PX_FORCE_INLINE FloatV V4ExtractMax(const Vec4V a)
{
const __m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 0, 3, 2));
const __m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 3, 2, 1));
return _mm_max_ps(_mm_max_ps(a, shuf1), _mm_max_ps(shuf2, shuf3));
}
PX_FORCE_INLINE FloatV V4ExtractMin(const Vec4V a)
{
const __m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 0, 3, 2));
const __m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 3, 2, 1));
return _mm_min_ps(_mm_min_ps(a, shuf1), _mm_min_ps(shuf2, shuf3));
}
PX_FORCE_INLINE Vec4V V4Clamp(const Vec4V a, const Vec4V minV, const Vec4V maxV)
{
return V4Max(V4Min(a, maxV), minV);
}
PX_FORCE_INLINE PxU32 V4AllGrtr(const Vec4V a, const Vec4V b)
{
return internalUnitSSE2Simd::BAllTrue4_R(V4IsGrtr(a, b));
}
PX_FORCE_INLINE PxU32 V4AllGrtrOrEq(const Vec4V a, const Vec4V b)
{
return internalUnitSSE2Simd::BAllTrue4_R(V4IsGrtrOrEq(a, b));
}
PX_FORCE_INLINE PxU32 V4AllGrtrOrEq3(const Vec4V a, const Vec4V b)
{
return internalUnitSSE2Simd::BAllTrue3_R(V4IsGrtrOrEq(a, b));
}
PX_FORCE_INLINE PxU32 V4AllEq(const Vec4V a, const Vec4V b)
{
return internalUnitSSE2Simd::BAllTrue4_R(V4IsEq(a, b));
}
PX_FORCE_INLINE PxU32 V4AnyGrtr3(const Vec4V a, const Vec4V b)
{
return internalUnitSSE2Simd::BAnyTrue3_R(V4IsGrtr(a, b));
}
PX_FORCE_INLINE Vec4V V4Round(const Vec4V a)
{
#ifdef __SSE4_2__
return _mm_round_ps(a, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
#else
// return _mm_round_ps(a, 0x0);
const Vec4V half = V4Load(0.5f);
const __m128 signBit = _mm_cvtepi32_ps(_mm_srli_epi32(_mm_cvtps_epi32(a), 31));
const Vec4V aRound = V4Sub(V4Add(a, half), signBit);
__m128i tmp = _mm_cvttps_epi32(aRound);
return _mm_cvtepi32_ps(tmp);
#endif
}
PX_FORCE_INLINE Vec4V V4Sin(const Vec4V a)
{
const Vec4V recipTwoPi = V4LoadA(g_PXReciprocalTwoPi.f);
const Vec4V twoPi = V4LoadA(g_PXTwoPi.f);
const Vec4V tmp = V4Mul(a, recipTwoPi);
const Vec4V b = V4Round(tmp);
const Vec4V V1 = V4NegMulSub(twoPi, b, a);
// sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! -
// V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI)
const Vec4V V2 = V4Mul(V1, V1);
const Vec4V V3 = V4Mul(V2, V1);
const Vec4V V5 = V4Mul(V3, V2);
const Vec4V V7 = V4Mul(V5, V2);
const Vec4V V9 = V4Mul(V7, V2);
const Vec4V V11 = V4Mul(V9, V2);
const Vec4V V13 = V4Mul(V11, V2);
const Vec4V V15 = V4Mul(V13, V2);
const Vec4V V17 = V4Mul(V15, V2);
const Vec4V V19 = V4Mul(V17, V2);
const Vec4V V21 = V4Mul(V19, V2);
const Vec4V V23 = V4Mul(V21, V2);
const Vec4V sinCoefficients0 = V4LoadA(g_PXSinCoefficients0.f);
const Vec4V sinCoefficients1 = V4LoadA(g_PXSinCoefficients1.f);
const Vec4V sinCoefficients2 = V4LoadA(g_PXSinCoefficients2.f);
const FloatV S1 = V4GetY(sinCoefficients0);
const FloatV S2 = V4GetZ(sinCoefficients0);
const FloatV S3 = V4GetW(sinCoefficients0);
const FloatV S4 = V4GetX(sinCoefficients1);
const FloatV S5 = V4GetY(sinCoefficients1);
const FloatV S6 = V4GetZ(sinCoefficients1);
const FloatV S7 = V4GetW(sinCoefficients1);
const FloatV S8 = V4GetX(sinCoefficients2);
const FloatV S9 = V4GetY(sinCoefficients2);
const FloatV S10 = V4GetZ(sinCoefficients2);
const FloatV S11 = V4GetW(sinCoefficients2);
Vec4V Result;
Result = V4MulAdd(S1, V3, V1);
Result = V4MulAdd(S2, V5, Result);
Result = V4MulAdd(S3, V7, Result);
Result = V4MulAdd(S4, V9, Result);
Result = V4MulAdd(S5, V11, Result);
Result = V4MulAdd(S6, V13, Result);
Result = V4MulAdd(S7, V15, Result);
Result = V4MulAdd(S8, V17, Result);
Result = V4MulAdd(S9, V19, Result);
Result = V4MulAdd(S10, V21, Result);
Result = V4MulAdd(S11, V23, Result);
return Result;
}
PX_FORCE_INLINE Vec4V V4Cos(const Vec4V a)
{
const Vec4V recipTwoPi = V4LoadA(g_PXReciprocalTwoPi.f);
const Vec4V twoPi = V4LoadA(g_PXTwoPi.f);
const Vec4V tmp = V4Mul(a, recipTwoPi);
const Vec4V b = V4Round(tmp);
const Vec4V V1 = V4NegMulSub(twoPi, b, a);
// cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! -
// V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI)
const Vec4V V2 = V4Mul(V1, V1);
const Vec4V V4 = V4Mul(V2, V2);
const Vec4V V6 = V4Mul(V4, V2);
const Vec4V V8 = V4Mul(V4, V4);
const Vec4V V10 = V4Mul(V6, V4);
const Vec4V V12 = V4Mul(V6, V6);
const Vec4V V14 = V4Mul(V8, V6);
const Vec4V V16 = V4Mul(V8, V8);
const Vec4V V18 = V4Mul(V10, V8);
const Vec4V V20 = V4Mul(V10, V10);
const Vec4V V22 = V4Mul(V12, V10);
const Vec4V cosCoefficients0 = V4LoadA(g_PXCosCoefficients0.f);
const Vec4V cosCoefficients1 = V4LoadA(g_PXCosCoefficients1.f);
const Vec4V cosCoefficients2 = V4LoadA(g_PXCosCoefficients2.f);
const FloatV C1 = V4GetY(cosCoefficients0);
const FloatV C2 = V4GetZ(cosCoefficients0);
const FloatV C3 = V4GetW(cosCoefficients0);
const FloatV C4 = V4GetX(cosCoefficients1);
const FloatV C5 = V4GetY(cosCoefficients1);
const FloatV C6 = V4GetZ(cosCoefficients1);
const FloatV C7 = V4GetW(cosCoefficients1);
const FloatV C8 = V4GetX(cosCoefficients2);
const FloatV C9 = V4GetY(cosCoefficients2);
const FloatV C10 = V4GetZ(cosCoefficients2);
const FloatV C11 = V4GetW(cosCoefficients2);
Vec4V Result;
Result = V4MulAdd(C1, V2, V4One());
Result = V4MulAdd(C2, V4, Result);
Result = V4MulAdd(C3, V6, Result);
Result = V4MulAdd(C4, V8, Result);
Result = V4MulAdd(C5, V10, Result);
Result = V4MulAdd(C6, V12, Result);
Result = V4MulAdd(C7, V14, Result);
Result = V4MulAdd(C8, V16, Result);
Result = V4MulAdd(C9, V18, Result);
Result = V4MulAdd(C10, V20, Result);
Result = V4MulAdd(C11, V22, Result);
return Result;
}
PX_FORCE_INLINE void V4Transpose(Vec4V& col0, Vec4V& col1, Vec4V& col2, Vec4V& col3)
{
Vec4V tmp0 = _mm_unpacklo_ps(col0, col1);
Vec4V tmp2 = _mm_unpacklo_ps(col2, col3);
Vec4V tmp1 = _mm_unpackhi_ps(col0, col1);
Vec4V tmp3 = _mm_unpackhi_ps(col2, col3);
col0 = _mm_movelh_ps(tmp0, tmp2);
col1 = _mm_movehl_ps(tmp2, tmp0);
col2 = _mm_movelh_ps(tmp1, tmp3);
col3 = _mm_movehl_ps(tmp3, tmp1);
}
//////////////////////////////////
// BoolV
//////////////////////////////////
PX_FORCE_INLINE BoolV BFFFF()
{
return _mm_setzero_ps();
}
PX_FORCE_INLINE BoolV BFFFT()
{
/*const PX_ALIGN(16, PxU32 f[4])={0,0,0,0xFFFFFFFF};
const __m128 ffft=_mm_load_ps((float*)&f);
return ffft;*/
return m128_I2F(_mm_set_epi32(-1, 0, 0, 0));
}
PX_FORCE_INLINE BoolV BFFTF()
{
/*const PX_ALIGN(16, PxU32 f[4])={0,0,0xFFFFFFFF,0};
const __m128 fftf=_mm_load_ps((float*)&f);
return fftf;*/
return m128_I2F(_mm_set_epi32(0, -1, 0, 0));
}
PX_FORCE_INLINE BoolV BFFTT()
{
/*const PX_ALIGN(16, PxU32 f[4])={0,0,0xFFFFFFFF,0xFFFFFFFF};
const __m128 fftt=_mm_load_ps((float*)&f);
return fftt;*/
return m128_I2F(_mm_set_epi32(-1, -1, 0, 0));
}
PX_FORCE_INLINE BoolV BFTFF()
{
/*const PX_ALIGN(16, PxU32 f[4])={0,0xFFFFFFFF,0,0};
const __m128 ftff=_mm_load_ps((float*)&f);
return ftff;*/
return m128_I2F(_mm_set_epi32(0, 0, -1, 0));
}
PX_FORCE_INLINE BoolV BFTFT()
{
/*const PX_ALIGN(16, PxU32 f[4])={0,0xFFFFFFFF,0,0xFFFFFFFF};
const __m128 ftft=_mm_load_ps((float*)&f);
return ftft;*/
return m128_I2F(_mm_set_epi32(-1, 0, -1, 0));
}
PX_FORCE_INLINE BoolV BFTTF()
{
/*const PX_ALIGN(16, PxU32 f[4])={0,0xFFFFFFFF,0xFFFFFFFF,0};
const __m128 fttf=_mm_load_ps((float*)&f);
return fttf;*/
return m128_I2F(_mm_set_epi32(0, -1, -1, 0));
}
PX_FORCE_INLINE BoolV BFTTT()
{
/*const PX_ALIGN(16, PxU32 f[4])={0,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF};
const __m128 fttt=_mm_load_ps((float*)&f);
return fttt;*/
return m128_I2F(_mm_set_epi32(-1, -1, -1, 0));
}
PX_FORCE_INLINE BoolV BTFFF()
{
// const PX_ALIGN(16, PxU32 f[4])={0xFFFFFFFF,0,0,0};
// const __m128 tfff=_mm_load_ps((float*)&f);
// return tfff;
return m128_I2F(_mm_set_epi32(0, 0, 0, -1));
}
PX_FORCE_INLINE BoolV BTFFT()
{
/*const PX_ALIGN(16, PxU32 f[4])={0xFFFFFFFF,0,0,0xFFFFFFFF};
const __m128 tfft=_mm_load_ps((float*)&f);
return tfft;*/
return m128_I2F(_mm_set_epi32(-1, 0, 0, -1));
}
PX_FORCE_INLINE BoolV BTFTF()
{
/*const PX_ALIGN(16, PxU32 f[4])={0xFFFFFFFF,0,0xFFFFFFFF,0};
const __m128 tftf=_mm_load_ps((float*)&f);
return tftf;*/
return m128_I2F(_mm_set_epi32(0, -1, 0, -1));
}
PX_FORCE_INLINE BoolV BTFTT()
{
/*const PX_ALIGN(16, PxU32 f[4])={0xFFFFFFFF,0,0xFFFFFFFF,0xFFFFFFFF};
const __m128 tftt=_mm_load_ps((float*)&f);
return tftt;*/
return m128_I2F(_mm_set_epi32(-1, -1, 0, -1));
}
PX_FORCE_INLINE BoolV BTTFF()
{
/*const PX_ALIGN(16, PxU32 f[4])={0xFFFFFFFF,0xFFFFFFFF,0,0};
const __m128 ttff=_mm_load_ps((float*)&f);
return ttff;*/
return m128_I2F(_mm_set_epi32(0, 0, -1, -1));
}
PX_FORCE_INLINE BoolV BTTFT()
{
/*const PX_ALIGN(16, PxU32 f[4])={0xFFFFFFFF,0xFFFFFFFF,0,0xFFFFFFFF};
const __m128 ttft=_mm_load_ps((float*)&f);
return ttft;*/
return m128_I2F(_mm_set_epi32(-1, 0, -1, -1));
}
PX_FORCE_INLINE BoolV BTTTF()
{
/*const PX_ALIGN(16, PxU32 f[4])={0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0};
const __m128 tttf=_mm_load_ps((float*)&f);
return tttf;*/
return m128_I2F(_mm_set_epi32(0, -1, -1, -1));
}
PX_FORCE_INLINE BoolV BTTTT()
{
/*const PX_ALIGN(16, PxU32 f[4])={0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF};
const __m128 tttt=_mm_load_ps((float*)&f);
return tttt;*/
return m128_I2F(_mm_set_epi32(-1, -1, -1, -1));
}
PX_FORCE_INLINE BoolV BXMask()
{
/*const PX_ALIGN(16, PxU32 f[4])={0xFFFFFFFF,0,0,0};
const __m128 tfff=_mm_load_ps((float*)&f);
return tfff;*/
return m128_I2F(_mm_set_epi32(0, 0, 0, -1));
}
PX_FORCE_INLINE BoolV BYMask()
{
/*const PX_ALIGN(16, PxU32 f[4])={0,0xFFFFFFFF,0,0};
const __m128 ftff=_mm_load_ps((float*)&f);
return ftff;*/
return m128_I2F(_mm_set_epi32(0, 0, -1, 0));
}
PX_FORCE_INLINE BoolV BZMask()
{
/*const PX_ALIGN(16, PxU32 f[4])={0,0,0xFFFFFFFF,0};
const __m128 fftf=_mm_load_ps((float*)&f);
return fftf;*/
return m128_I2F(_mm_set_epi32(0, -1, 0, 0));
}
PX_FORCE_INLINE BoolV BWMask()
{
/*const PX_ALIGN(16, PxU32 f[4])={0,0,0,0xFFFFFFFF};
const __m128 ffft=_mm_load_ps((float*)&f);
return ffft;*/
return m128_I2F(_mm_set_epi32(-1, 0, 0, 0));
}
PX_FORCE_INLINE BoolV BGetX(const BoolV f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(0, 0, 0, 0));
}
PX_FORCE_INLINE BoolV BGetY(const BoolV f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(1, 1, 1, 1));
}
PX_FORCE_INLINE BoolV BGetZ(const BoolV f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(2, 2, 2, 2));
}
PX_FORCE_INLINE BoolV BGetW(const BoolV f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(3, 3, 3, 3));
}
PX_FORCE_INLINE BoolV BSetX(const BoolV v, const BoolV f)
{
return V4Sel(BFTTT(), v, f);
}
PX_FORCE_INLINE BoolV BSetY(const BoolV v, const BoolV f)
{
return V4Sel(BTFTT(), v, f);
}
PX_FORCE_INLINE BoolV BSetZ(const BoolV v, const BoolV f)
{
return V4Sel(BTTFT(), v, f);
}
PX_FORCE_INLINE BoolV BSetW(const BoolV v, const BoolV f)
{
return V4Sel(BTTTF(), v, f);
}
PX_FORCE_INLINE BoolV BAnd(const BoolV a, const BoolV b)
{
return _mm_and_ps(a, b);
}
PX_FORCE_INLINE BoolV BNot(const BoolV a)
{
const BoolV bAllTrue(BTTTT());
return _mm_xor_ps(a, bAllTrue);
}
PX_FORCE_INLINE BoolV BAndNot(const BoolV a, const BoolV b)
{
return _mm_andnot_ps(b, a);
}
PX_FORCE_INLINE BoolV BOr(const BoolV a, const BoolV b)
{
return _mm_or_ps(a, b);
}
PX_FORCE_INLINE BoolV BAllTrue4(const BoolV a)
{
const BoolV bTmp =
_mm_and_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 1, 0, 1)), _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 3, 2, 3)));
return _mm_and_ps(_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(0, 0, 0, 0)),
_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(1, 1, 1, 1)));
}
PX_FORCE_INLINE BoolV BAnyTrue4(const BoolV a)
{
const BoolV bTmp =
_mm_or_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 1, 0, 1)), _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 3, 2, 3)));
return _mm_or_ps(_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(0, 0, 0, 0)),
_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(1, 1, 1, 1)));
}
PX_FORCE_INLINE BoolV BAllTrue3(const BoolV a)
{
const BoolV bTmp =
_mm_and_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 1, 0, 1)), _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 2, 2, 2)));
return _mm_and_ps(_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(0, 0, 0, 0)),
_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(1, 1, 1, 1)));
}
PX_FORCE_INLINE BoolV BAnyTrue3(const BoolV a)
{
const BoolV bTmp =
_mm_or_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 1, 0, 1)), _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 2, 2, 2)));
return _mm_or_ps(_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(0, 0, 0, 0)),
_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(1, 1, 1, 1)));
}
PX_FORCE_INLINE PxU32 BAllEq(const BoolV a, const BoolV b)
{
const BoolV bTest = m128_I2F(_mm_cmpeq_epi32(m128_F2I(a), m128_F2I(b)));
return internalUnitSSE2Simd::BAllTrue4_R(bTest);
}
PX_FORCE_INLINE PxU32 BAllEqTTTT(const BoolV a)
{
return PxU32(_mm_movemask_ps(a)==15);
}
PX_FORCE_INLINE PxU32 BAllEqFFFF(const BoolV a)
{
return PxU32(_mm_movemask_ps(a)==0);
}
PX_FORCE_INLINE PxU32 BGetBitMask(const BoolV a)
{
return PxU32(_mm_movemask_ps(a));
}
//////////////////////////////////
// MAT33V
//////////////////////////////////
PX_FORCE_INLINE Vec3V M33MulV3(const Mat33V& a, const Vec3V b)
{
const FloatV x = V3GetX(b);
const FloatV y = V3GetY(b);
const FloatV z = V3GetZ(b);
const Vec3V v0 = V3Scale(a.col0, x);
const Vec3V v1 = V3Scale(a.col1, y);
const Vec3V v2 = V3Scale(a.col2, z);
const Vec3V v0PlusV1 = V3Add(v0, v1);
return V3Add(v0PlusV1, v2);
}
PX_FORCE_INLINE Vec3V M33TrnspsMulV3(const Mat33V& a, const Vec3V b)
{
const FloatV x = V3Dot(a.col0, b);
const FloatV y = V3Dot(a.col1, b);
const FloatV z = V3Dot(a.col2, b);
return V3Merge(x, y, z);
}
PX_FORCE_INLINE Vec3V M33MulV3AddV3(const Mat33V& A, const Vec3V b, const Vec3V c)
{
const FloatV x = V3GetX(b);
const FloatV y = V3GetY(b);
const FloatV z = V3GetZ(b);
Vec3V result = V3ScaleAdd(A.col0, x, c);
result = V3ScaleAdd(A.col1, y, result);
return V3ScaleAdd(A.col2, z, result);
}
PX_FORCE_INLINE Mat33V M33MulM33(const Mat33V& a, const Mat33V& b)
{
return Mat33V(M33MulV3(a, b.col0), M33MulV3(a, b.col1), M33MulV3(a, b.col2));
}
PX_FORCE_INLINE Mat33V M33Add(const Mat33V& a, const Mat33V& b)
{
return Mat33V(V3Add(a.col0, b.col0), V3Add(a.col1, b.col1), V3Add(a.col2, b.col2));
}
PX_FORCE_INLINE Mat33V M33Scale(const Mat33V& a, const FloatV& b)
{
return Mat33V(V3Scale(a.col0, b), V3Scale(a.col1, b), V3Scale(a.col2, b));
}
PX_FORCE_INLINE Mat33V M33Inverse(const Mat33V& a)
{
const BoolV tfft = BTFFT();
const BoolV tttf = BTTTF();
const FloatV zero = FZero();
const Vec3V cross01 = V3Cross(a.col0, a.col1);
const Vec3V cross12 = V3Cross(a.col1, a.col2);
const Vec3V cross20 = V3Cross(a.col2, a.col0);
const FloatV dot = V3Dot(cross01, a.col2);
const FloatV invDet = _mm_rcp_ps(dot);
const Vec3V mergeh = _mm_unpacklo_ps(cross12, cross01);
const Vec3V mergel = _mm_unpackhi_ps(cross12, cross01);
Vec3V colInv0 = _mm_unpacklo_ps(mergeh, cross20);
colInv0 = _mm_or_ps(_mm_andnot_ps(tttf, zero), _mm_and_ps(tttf, colInv0));
const Vec3V zppd = _mm_shuffle_ps(mergeh, cross20, _MM_SHUFFLE(3, 0, 0, 2));
const Vec3V pbwp = _mm_shuffle_ps(cross20, mergeh, _MM_SHUFFLE(3, 3, 1, 0));
const Vec3V colInv1 = _mm_or_ps(_mm_andnot_ps(BTFFT(), pbwp), _mm_and_ps(BTFFT(), zppd));
const Vec3V xppd = _mm_shuffle_ps(mergel, cross20, _MM_SHUFFLE(3, 0, 0, 0));
const Vec3V pcyp = _mm_shuffle_ps(cross20, mergel, _MM_SHUFFLE(3, 1, 2, 0));
const Vec3V colInv2 = _mm_or_ps(_mm_andnot_ps(tfft, pcyp), _mm_and_ps(tfft, xppd));
return Mat33V(_mm_mul_ps(colInv0, invDet), _mm_mul_ps(colInv1, invDet), _mm_mul_ps(colInv2, invDet));
}
PX_FORCE_INLINE Mat33V M33Trnsps(const Mat33V& a)
{
return Mat33V(V3Merge(V3GetX(a.col0), V3GetX(a.col1), V3GetX(a.col2)),
V3Merge(V3GetY(a.col0), V3GetY(a.col1), V3GetY(a.col2)),
V3Merge(V3GetZ(a.col0), V3GetZ(a.col1), V3GetZ(a.col2)));
}
PX_FORCE_INLINE Mat33V M33Identity()
{
return Mat33V(V3UnitX(), V3UnitY(), V3UnitZ());
}
PX_FORCE_INLINE Mat33V M33Sub(const Mat33V& a, const Mat33V& b)
{
return Mat33V(V3Sub(a.col0, b.col0), V3Sub(a.col1, b.col1), V3Sub(a.col2, b.col2));
}
PX_FORCE_INLINE Mat33V M33Neg(const Mat33V& a)
{
return Mat33V(V3Neg(a.col0), V3Neg(a.col1), V3Neg(a.col2));
}
PX_FORCE_INLINE Mat33V M33Abs(const Mat33V& a)
{
return Mat33V(V3Abs(a.col0), V3Abs(a.col1), V3Abs(a.col2));
}
PX_FORCE_INLINE Mat33V PromoteVec3V(const Vec3V v)
{
const BoolV bTFFF = BTFFF();
const BoolV bFTFF = BFTFF();
const BoolV bFFTF = BTFTF();
const Vec3V zero = V3Zero();
return Mat33V(V3Sel(bTFFF, v, zero), V3Sel(bFTFF, v, zero), V3Sel(bFFTF, v, zero));
}
PX_FORCE_INLINE Mat33V M33Diagonal(const Vec3VArg d)
{
const FloatV x = V3Mul(V3UnitX(), d);
const FloatV y = V3Mul(V3UnitY(), d);
const FloatV z = V3Mul(V3UnitZ(), d);
return Mat33V(x, y, z);
}
//////////////////////////////////
// MAT34V
//////////////////////////////////
PX_FORCE_INLINE Vec3V M34MulV3(const Mat34V& a, const Vec3V b)
{
const FloatV x = V3GetX(b);
const FloatV y = V3GetY(b);
const FloatV z = V3GetZ(b);
const Vec3V v0 = V3Scale(a.col0, x);
const Vec3V v1 = V3Scale(a.col1, y);
const Vec3V v2 = V3Scale(a.col2, z);
const Vec3V v0PlusV1 = V3Add(v0, v1);
const Vec3V v0PlusV1Plusv2 = V3Add(v0PlusV1, v2);
return V3Add(v0PlusV1Plusv2, a.col3);
}
PX_FORCE_INLINE Vec3V M34Mul33V3(const Mat34V& a, const Vec3V b)
{
const FloatV x = V3GetX(b);
const FloatV y = V3GetY(b);
const FloatV z = V3GetZ(b);
const Vec3V v0 = V3Scale(a.col0, x);
const Vec3V v1 = V3Scale(a.col1, y);
const Vec3V v2 = V3Scale(a.col2, z);
const Vec3V v0PlusV1 = V3Add(v0, v1);
return V3Add(v0PlusV1, v2);
}
PX_FORCE_INLINE Vec3V M34TrnspsMul33V3(const Mat34V& a, const Vec3V b)
{
const FloatV x = V3Dot(a.col0, b);
const FloatV y = V3Dot(a.col1, b);
const FloatV z = V3Dot(a.col2, b);
return V3Merge(x, y, z);
}
PX_FORCE_INLINE Mat34V M34MulM34(const Mat34V& a, const Mat34V& b)
{
return Mat34V(M34Mul33V3(a, b.col0), M34Mul33V3(a, b.col1), M34Mul33V3(a, b.col2), M34MulV3(a, b.col3));
}
PX_FORCE_INLINE Mat33V M34MulM33(const Mat34V& a, const Mat33V& b)
{
return Mat33V(M34Mul33V3(a, b.col0), M34Mul33V3(a, b.col1), M34Mul33V3(a, b.col2));
}
PX_FORCE_INLINE Mat33V M34Mul33MM34(const Mat34V& a, const Mat34V& b)
{
return Mat33V(M34Mul33V3(a, b.col0), M34Mul33V3(a, b.col1), M34Mul33V3(a, b.col2));
}
PX_FORCE_INLINE Mat34V M34Add(const Mat34V& a, const Mat34V& b)
{
return Mat34V(V3Add(a.col0, b.col0), V3Add(a.col1, b.col1), V3Add(a.col2, b.col2), V3Add(a.col3, b.col3));
}
PX_FORCE_INLINE Mat33V M34Trnsps33(const Mat34V& a)
{
return Mat33V(V3Merge(V3GetX(a.col0), V3GetX(a.col1), V3GetX(a.col2)),
V3Merge(V3GetY(a.col0), V3GetY(a.col1), V3GetY(a.col2)),
V3Merge(V3GetZ(a.col0), V3GetZ(a.col1), V3GetZ(a.col2)));
}
//////////////////////////////////
// MAT44V
//////////////////////////////////
PX_FORCE_INLINE Vec4V M44MulV4(const Mat44V& a, const Vec4V b)
{
const FloatV x = V4GetX(b);
const FloatV y = V4GetY(b);
const FloatV z = V4GetZ(b);
const FloatV w = V4GetW(b);
const Vec4V v0 = V4Scale(a.col0, x);
const Vec4V v1 = V4Scale(a.col1, y);
const Vec4V v2 = V4Scale(a.col2, z);
const Vec4V v3 = V4Scale(a.col3, w);
const Vec4V v0PlusV1 = V4Add(v0, v1);
const Vec4V v0PlusV1Plusv2 = V4Add(v0PlusV1, v2);
return V4Add(v0PlusV1Plusv2, v3);
}
PX_FORCE_INLINE Vec4V M44TrnspsMulV4(const Mat44V& a, const Vec4V b)
{
PX_ALIGN(16, FloatV) dotProdArray[4] = { V4Dot(a.col0, b), V4Dot(a.col1, b), V4Dot(a.col2, b), V4Dot(a.col3, b) };
return V4Merge(dotProdArray);
}
PX_FORCE_INLINE Mat44V M44MulM44(const Mat44V& a, const Mat44V& b)
{
return Mat44V(M44MulV4(a, b.col0), M44MulV4(a, b.col1), M44MulV4(a, b.col2), M44MulV4(a, b.col3));
}
PX_FORCE_INLINE Mat44V M44Add(const Mat44V& a, const Mat44V& b)
{
return Mat44V(V4Add(a.col0, b.col0), V4Add(a.col1, b.col1), V4Add(a.col2, b.col2), V4Add(a.col3, b.col3));
}
PX_FORCE_INLINE Mat44V M44Trnsps(const Mat44V& a)
{
const Vec4V v0 = _mm_unpacklo_ps(a.col0, a.col2);
const Vec4V v1 = _mm_unpackhi_ps(a.col0, a.col2);
const Vec4V v2 = _mm_unpacklo_ps(a.col1, a.col3);
const Vec4V v3 = _mm_unpackhi_ps(a.col1, a.col3);
return Mat44V(_mm_unpacklo_ps(v0, v2), _mm_unpackhi_ps(v0, v2), _mm_unpacklo_ps(v1, v3), _mm_unpackhi_ps(v1, v3));
}
PX_FORCE_INLINE Mat44V M44Inverse(const Mat44V& a)
{
__m128 minor0, minor1, minor2, minor3;
__m128 row0, row1, row2, row3;
__m128 det, tmp1;
tmp1 = V4Zero();
row1 = V4Zero();
row3 = V4Zero();
row0 = a.col0;
row1 = _mm_shuffle_ps(a.col1, a.col1, _MM_SHUFFLE(1, 0, 3, 2));
row2 = a.col2;
row3 = _mm_shuffle_ps(a.col3, a.col3, _MM_SHUFFLE(1, 0, 3, 2));
tmp1 = _mm_mul_ps(row2, row3);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
minor0 = _mm_mul_ps(row1, tmp1);
minor1 = _mm_mul_ps(row0, tmp1);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
minor0 = _mm_sub_ps(_mm_mul_ps(row1, tmp1), minor0);
minor1 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor1);
minor1 = _mm_shuffle_ps(minor1, minor1, 0x4E);
tmp1 = _mm_mul_ps(row1, row2);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
minor0 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor0);
minor3 = _mm_mul_ps(row0, tmp1);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row3, tmp1));
minor3 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor3);
minor3 = _mm_shuffle_ps(minor3, minor3, 0x4E);
tmp1 = _mm_mul_ps(_mm_shuffle_ps(row1, row1, 0x4E), row3);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
row2 = _mm_shuffle_ps(row2, row2, 0x4E);
minor0 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor0);
minor2 = _mm_mul_ps(row0, tmp1);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row2, tmp1));
minor2 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor2);
minor2 = _mm_shuffle_ps(minor2, minor2, 0x4E);
tmp1 = _mm_mul_ps(row0, row1);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
minor2 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor2);
minor3 = _mm_sub_ps(_mm_mul_ps(row2, tmp1), minor3);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
minor2 = _mm_sub_ps(_mm_mul_ps(row3, tmp1), minor2);
minor3 = _mm_sub_ps(minor3, _mm_mul_ps(row2, tmp1));
tmp1 = _mm_mul_ps(row0, row3);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row2, tmp1));
minor2 = _mm_add_ps(_mm_mul_ps(row1, tmp1), minor2);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
minor1 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor1);
minor2 = _mm_sub_ps(minor2, _mm_mul_ps(row1, tmp1));
tmp1 = _mm_mul_ps(row0, row2);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
minor1 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor1);
minor3 = _mm_sub_ps(minor3, _mm_mul_ps(row1, tmp1));
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row3, tmp1));
minor3 = _mm_add_ps(_mm_mul_ps(row1, tmp1), minor3);
det = _mm_mul_ps(row0, minor0);
det = _mm_add_ps(_mm_shuffle_ps(det, det, 0x4E), det);
det = _mm_add_ss(_mm_shuffle_ps(det, det, 0xB1), det);
tmp1 = _mm_rcp_ss(det);
#if 0
det = _mm_sub_ss(_mm_add_ss(tmp1, tmp1), _mm_mul_ss(det, _mm_mul_ss(tmp1, tmp1)));
det = _mm_shuffle_ps(det, det, 0x00);
#else
det = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(0, 0, 0, 0));
#endif
minor0 = _mm_mul_ps(det, minor0);
minor1 = _mm_mul_ps(det, minor1);
minor2 = _mm_mul_ps(det, minor2);
minor3 = _mm_mul_ps(det, minor3);
Mat44V invTrans(minor0, minor1, minor2, minor3);
return M44Trnsps(invTrans);
}
PX_FORCE_INLINE Vec4V V4LoadXYZW(const PxF32& x, const PxF32& y, const PxF32& z, const PxF32& w)
{
return _mm_set_ps(w, z, y, x);
}
/*
// AP: work in progress - use proper SSE intrinsics where possible
PX_FORCE_INLINE VecU16V V4U32PK(VecU32V a, VecU32V b)
{
VecU16V result;
result.m128_u16[0] = PxU16(PxClamp<PxU32>((a).m128_u32[0], 0, 0xFFFF));
result.m128_u16[1] = PxU16(PxClamp<PxU32>((a).m128_u32[1], 0, 0xFFFF));
result.m128_u16[2] = PxU16(PxClamp<PxU32>((a).m128_u32[2], 0, 0xFFFF));
result.m128_u16[3] = PxU16(PxClamp<PxU32>((a).m128_u32[3], 0, 0xFFFF));
result.m128_u16[4] = PxU16(PxClamp<PxU32>((b).m128_u32[0], 0, 0xFFFF));
result.m128_u16[5] = PxU16(PxClamp<PxU32>((b).m128_u32[1], 0, 0xFFFF));
result.m128_u16[6] = PxU16(PxClamp<PxU32>((b).m128_u32[2], 0, 0xFFFF));
result.m128_u16[7] = PxU16(PxClamp<PxU32>((b).m128_u32[3], 0, 0xFFFF));
return result;
}
*/
PX_FORCE_INLINE VecU32V V4U32Sel(const BoolV c, const VecU32V a, const VecU32V b)
{
return m128_I2F(_mm_or_si128(_mm_andnot_si128(m128_F2I(c), m128_F2I(b)), _mm_and_si128(m128_F2I(c), m128_F2I(a))));
}
PX_FORCE_INLINE VecU32V V4U32or(VecU32V a, VecU32V b)
{
return m128_I2F(_mm_or_si128(m128_F2I(a), m128_F2I(b)));
}
PX_FORCE_INLINE VecU32V V4U32xor(VecU32V a, VecU32V b)
{
return m128_I2F(_mm_xor_si128(m128_F2I(a), m128_F2I(b)));
}
PX_FORCE_INLINE VecU32V V4U32and(VecU32V a, VecU32V b)
{
return m128_I2F(_mm_and_si128(m128_F2I(a), m128_F2I(b)));
}
PX_FORCE_INLINE VecU32V V4U32Andc(VecU32V a, VecU32V b)
{
return m128_I2F(_mm_andnot_si128(m128_F2I(b), m128_F2I(a)));
}
/*
PX_FORCE_INLINE VecU16V V4U16Or(VecU16V a, VecU16V b)
{
return m128_I2F(_mm_or_si128(m128_F2I(a), m128_F2I(b)));
}
*/
/*
PX_FORCE_INLINE VecU16V V4U16And(VecU16V a, VecU16V b)
{
return m128_I2F(_mm_and_si128(m128_F2I(a), m128_F2I(b)));
}
*/
/*
PX_FORCE_INLINE VecU16V V4U16Andc(VecU16V a, VecU16V b)
{
return m128_I2F(_mm_andnot_si128(m128_F2I(b), m128_F2I(a)));
}
*/
PX_FORCE_INLINE VecI32V I4Load(const PxI32 i)
{
return m128_F2I(_mm_load1_ps(reinterpret_cast<const PxF32*>(&i)));
}
PX_FORCE_INLINE VecI32V I4LoadU(const PxI32* i)
{
return m128_F2I(_mm_loadu_ps(reinterpret_cast<const PxF32*>(i)));
}
PX_FORCE_INLINE VecI32V I4LoadA(const PxI32* i)
{
return m128_F2I(_mm_load_ps(reinterpret_cast<const PxF32*>(i)));
}
PX_FORCE_INLINE VecI32V VecI32V_Add(const VecI32VArg a, const VecI32VArg b)
{
return _mm_add_epi32(a, b);
}
PX_FORCE_INLINE VecI32V VecI32V_Sub(const VecI32VArg a, const VecI32VArg b)
{
return _mm_sub_epi32(a, b);
}
PX_FORCE_INLINE BoolV VecI32V_IsGrtr(const VecI32VArg a, const VecI32VArg b)
{
return m128_I2F(_mm_cmpgt_epi32(a, b));
}
PX_FORCE_INLINE BoolV VecI32V_IsEq(const VecI32VArg a, const VecI32VArg b)
{
return m128_I2F(_mm_cmpeq_epi32(a, b));
}
PX_FORCE_INLINE VecI32V V4I32Sel(const BoolV c, const VecI32V a, const VecI32V b)
{
return _mm_or_si128(_mm_andnot_si128(m128_F2I(c), b), _mm_and_si128(m128_F2I(c), a));
}
PX_FORCE_INLINE VecI32V VecI32V_Zero()
{
return _mm_setzero_si128();
}
PX_FORCE_INLINE VecI32V VecI32V_One()
{
return I4Load(1);
}
PX_FORCE_INLINE VecI32V VecI32V_Two()
{
return I4Load(2);
}
PX_FORCE_INLINE VecI32V VecI32V_MinusOne()
{
return I4Load(-1);
}
PX_FORCE_INLINE VecU32V U4Zero()
{
return U4Load(0);
}
PX_FORCE_INLINE VecU32V U4One()
{
return U4Load(1);
}
PX_FORCE_INLINE VecU32V U4Two()
{
return U4Load(2);
}
PX_FORCE_INLINE VecI32V VecI32V_Sel(const BoolV c, const VecI32VArg a, const VecI32VArg b)
{
return _mm_or_si128(_mm_andnot_si128(m128_F2I(c), b), _mm_and_si128(m128_F2I(c), a));
}
PX_FORCE_INLINE VecShiftV VecI32V_PrepareShift(const VecI32VArg shift)
{
VecShiftV s;
s.shift = VecI32V_Sel(BTFFF(), shift, VecI32V_Zero());
return s;
}
PX_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const VecShiftVArg count)
{
return _mm_sll_epi32(a, count.shift);
}
PX_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const VecShiftVArg count)
{
return _mm_srl_epi32(a, count.shift);
}
PX_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const PxU32 count)
{
return _mm_slli_epi32(a, PxI32(count));
}
PX_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const PxU32 count)
{
return _mm_srai_epi32(a, PxI32(count));
}
PX_FORCE_INLINE VecI32V VecI32V_And(const VecI32VArg a, const VecI32VArg b)
{
return _mm_and_si128(a, b);
}
PX_FORCE_INLINE VecI32V VecI32V_Or(const VecI32VArg a, const VecI32VArg b)
{
return _mm_or_si128(a, b);
}
PX_FORCE_INLINE VecI32V VecI32V_GetX(const VecI32VArg a)
{
return m128_F2I(_mm_shuffle_ps(m128_I2F(a), m128_I2F(a), _MM_SHUFFLE(0, 0, 0, 0)));
}
PX_FORCE_INLINE VecI32V VecI32V_GetY(const VecI32VArg a)
{
return m128_F2I(_mm_shuffle_ps(m128_I2F(a), m128_I2F(a), _MM_SHUFFLE(1, 1, 1, 1)));
}
PX_FORCE_INLINE VecI32V VecI32V_GetZ(const VecI32VArg a)
{
return m128_F2I(_mm_shuffle_ps(m128_I2F(a), m128_I2F(a), _MM_SHUFFLE(2, 2, 2, 2)));
}
PX_FORCE_INLINE VecI32V VecI32V_GetW(const VecI32VArg a)
{
return m128_F2I(_mm_shuffle_ps(m128_I2F(a), m128_I2F(a), _MM_SHUFFLE(3, 3, 3, 3)));
}
PX_FORCE_INLINE void PxI32_From_VecI32V(const VecI32VArg a, PxI32* i)
{
_mm_store_ss(reinterpret_cast<PxF32*>(i), m128_I2F(a));
}
PX_FORCE_INLINE VecI32V VecI32V_Merge(const VecI32VArg x, const VecI32VArg y, const VecI32VArg z, const VecI32VArg w)
{
const __m128 xw = _mm_move_ss(m128_I2F(y), m128_I2F(x)); // y, y, y, x
const __m128 yz = _mm_move_ss(m128_I2F(z), m128_I2F(w)); // z, z, z, w
return m128_F2I(_mm_shuffle_ps(xw, yz, _MM_SHUFFLE(0, 2, 1, 0)));
}
PX_FORCE_INLINE VecI32V VecI32V_From_BoolV(const BoolVArg a)
{
return m128_F2I(a);
}
PX_FORCE_INLINE VecU32V VecU32V_From_BoolV(const BoolVArg a)
{
return a;
}
/*
template<int a> PX_FORCE_INLINE VecI32V V4ISplat()
{
VecI32V result;
result.m128_i32[0] = a;
result.m128_i32[1] = a;
result.m128_i32[2] = a;
result.m128_i32[3] = a;
return result;
}
template<PxU32 a> PX_FORCE_INLINE VecU32V V4USplat()
{
VecU32V result;
result.m128_u32[0] = a;
result.m128_u32[1] = a;
result.m128_u32[2] = a;
result.m128_u32[3] = a;
return result;
}
*/
/*
PX_FORCE_INLINE void V4U16StoreAligned(VecU16V val, VecU16V* address)
{
*address = val;
}
*/
PX_FORCE_INLINE void V4U32StoreAligned(VecU32V val, VecU32V* address)
{
*address = val;
}
PX_FORCE_INLINE Vec4V V4LoadAligned(Vec4V* addr)
{
return *addr;
}
PX_FORCE_INLINE Vec4V V4LoadUnaligned(Vec4V* addr)
{
return V4LoadU(reinterpret_cast<float*>(addr));
}
PX_FORCE_INLINE Vec4V V4Andc(const Vec4V a, const VecU32V b)
{
VecU32V result32(a);
result32 = V4U32Andc(result32, b);
return Vec4V(result32);
}
PX_FORCE_INLINE VecU32V V4IsGrtrV32u(const Vec4V a, const Vec4V b)
{
return V4IsGrtr(a, b);
}
PX_FORCE_INLINE VecU16V V4U16LoadAligned(VecU16V* addr)
{
return *addr;
}
PX_FORCE_INLINE VecU16V V4U16LoadUnaligned(VecU16V* addr)
{
return *addr;
}
PX_FORCE_INLINE VecU16V V4U16CompareGt(VecU16V a, VecU16V b)
{
// _mm_cmpgt_epi16 doesn't work for unsigned values unfortunately
// return m128_I2F(_mm_cmpgt_epi16(m128_F2I(a), m128_F2I(b)));
VecU16V result;
result.m128_u16[0] = (a).m128_u16[0] > (b).m128_u16[0];
result.m128_u16[1] = (a).m128_u16[1] > (b).m128_u16[1];
result.m128_u16[2] = (a).m128_u16[2] > (b).m128_u16[2];
result.m128_u16[3] = (a).m128_u16[3] > (b).m128_u16[3];
result.m128_u16[4] = (a).m128_u16[4] > (b).m128_u16[4];
result.m128_u16[5] = (a).m128_u16[5] > (b).m128_u16[5];
result.m128_u16[6] = (a).m128_u16[6] > (b).m128_u16[6];
result.m128_u16[7] = (a).m128_u16[7] > (b).m128_u16[7];
return result;
}
PX_FORCE_INLINE VecU16V V4I16CompareGt(VecU16V a, VecU16V b)
{
return m128_I2F(_mm_cmpgt_epi16(m128_F2I(a), m128_F2I(b)));
}
PX_FORCE_INLINE Vec4V Vec4V_From_VecU32V(VecU32V a)
{
Vec4V result = V4LoadXYZW(PxF32(a.m128_u32[0]), PxF32(a.m128_u32[1]), PxF32(a.m128_u32[2]), PxF32(a.m128_u32[3]));
return result;
}
PX_FORCE_INLINE Vec4V Vec4V_From_VecI32V(VecI32V in)
{
return _mm_cvtepi32_ps(in);
}
PX_FORCE_INLINE VecI32V VecI32V_From_Vec4V(Vec4V a)
{
return _mm_cvttps_epi32(a);
}
PX_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecU32V(VecU32V a)
{
return Vec4V(a);
}
PX_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecI32V(VecI32V a)
{
return m128_I2F(a);
}
PX_FORCE_INLINE VecU32V VecU32V_ReinterpretFrom_Vec4V(Vec4V a)
{
return VecU32V(a);
}
PX_FORCE_INLINE VecI32V VecI32V_ReinterpretFrom_Vec4V(Vec4V a)
{
return m128_F2I(a);
}
/*
template<int index> PX_FORCE_INLINE BoolV BSplatElement(BoolV a)
{
BoolV result;
result[0] = result[1] = result[2] = result[3] = a[index];
return result;
}
*/
template <int index>
BoolV BSplatElement(BoolV a)
{
float* data = reinterpret_cast<float*>(&a);
return V4Load(data[index]);
}
template <int index>
PX_FORCE_INLINE VecU32V V4U32SplatElement(VecU32V a)
{
VecU32V result;
result.m128_u32[0] = result.m128_u32[1] = result.m128_u32[2] = result.m128_u32[3] = a.m128_u32[index];
return result;
}
template <int index>
PX_FORCE_INLINE Vec4V V4SplatElement(Vec4V a)
{
float* data = reinterpret_cast<float*>(&a);
return V4Load(data[index]);
}
PX_FORCE_INLINE VecU32V U4LoadXYZW(PxU32 x, PxU32 y, PxU32 z, PxU32 w)
{
VecU32V result;
result.m128_u32[0] = x;
result.m128_u32[1] = y;
result.m128_u32[2] = z;
result.m128_u32[3] = w;
return result;
}
PX_FORCE_INLINE Vec4V V4Ceil(const Vec4V in)
{
UnionM128 a(in);
return V4LoadXYZW(PxCeil(a.m128_f32[0]), PxCeil(a.m128_f32[1]), PxCeil(a.m128_f32[2]), PxCeil(a.m128_f32[3]));
}
PX_FORCE_INLINE Vec4V V4Floor(const Vec4V in)
{
UnionM128 a(in);
return V4LoadXYZW(PxFloor(a.m128_f32[0]), PxFloor(a.m128_f32[1]), PxFloor(a.m128_f32[2]), PxFloor(a.m128_f32[3]));
}
PX_FORCE_INLINE VecU32V V4ConvertToU32VSaturate(const Vec4V in, PxU32 power)
{
PX_ASSERT(power == 0 && "Non-zero power not supported in convertToU32VSaturate");
PX_UNUSED(power); // prevent warning in release builds
PxF32 ffffFFFFasFloat = PxF32(0xFFFF0000);
UnionM128 a(in);
VecU32V result;
result.m128_u32[0] = PxU32(PxClamp<PxF32>((a).m128_f32[0], 0.0f, ffffFFFFasFloat));
result.m128_u32[1] = PxU32(PxClamp<PxF32>((a).m128_f32[1], 0.0f, ffffFFFFasFloat));
result.m128_u32[2] = PxU32(PxClamp<PxF32>((a).m128_f32[2], 0.0f, ffffFFFFasFloat));
result.m128_u32[3] = PxU32(PxClamp<PxF32>((a).m128_f32[3], 0.0f, ffffFFFFasFloat));
return result;
}
} // namespace aos
} // namespace physx
#endif // PXFOUNDATION_PXUNIXSSE2INLINEAOS_H
| 90,719 | C | 26.700763 | 154 | 0.66803 |
NVIDIA-Omniverse/PhysX/physx/include/foundation/unix/sse2/PxUnixSse2AoS.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXFOUNDATION_PXUNIXSSE2AOS_H
#define PXFOUNDATION_PXUNIXSSE2AOS_H
// no includes here! this file should be included from PxcVecMath.h only!!!
#if !COMPILE_VECTOR_INTRINSICS
#error Vector intrinsics should not be included when using scalar implementation.
#endif
namespace physx
{
namespace aos
{
#if PX_EMSCRIPTEN
typedef int8_t __int8_t;
typedef int16_t __int16_t;
typedef int32_t __int32_t;
typedef int64_t __int64_t;
typedef uint16_t __uint16_t;
typedef uint32_t __uint32_t;
typedef uint64_t __uint64_t;
#endif
typedef union UnionM128
{
UnionM128()
{
}
UnionM128(__m128 in)
{
m128 = in;
}
UnionM128(__m128i in)
{
m128i = in;
}
operator __m128()
{
return m128;
}
operator __m128() const
{
return m128;
}
float m128_f32[4];
__int8_t m128_i8[16];
__int16_t m128_i16[8];
__int32_t m128_i32[4];
__int64_t m128_i64[2];
__uint16_t m128_u16[8];
__uint32_t m128_u32[4];
__uint64_t m128_u64[2];
__m128 m128;
__m128i m128i;
} UnionM128;
typedef __m128 FloatV;
typedef __m128 Vec3V;
typedef __m128 Vec4V;
typedef __m128 BoolV;
typedef __m128 QuatV;
typedef __m128i VecI32V;
typedef UnionM128 VecU32V;
typedef UnionM128 VecU16V;
typedef UnionM128 VecI16V;
typedef UnionM128 VecU8V;
#define FloatVArg FloatV &
#define Vec3VArg Vec3V &
#define Vec4VArg Vec4V &
#define BoolVArg BoolV &
#define VecU32VArg VecU32V &
#define VecI32VArg VecI32V &
#define VecU16VArg VecU16V &
#define VecI16VArg VecI16V &
#define VecU8VArg VecU8V &
#define QuatVArg QuatV &
// Optimization for situations in which you cross product multiple vectors with the same vector.
// Avoids 2X shuffles per product
struct VecCrossV
{
Vec3V mL1;
Vec3V mR1;
};
struct VecShiftV
{
VecI32V shift;
};
#define VecShiftVArg VecShiftV &
PX_ALIGN_PREFIX(16)
struct Mat33V
{
Mat33V()
{
}
Mat33V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2) : col0(c0), col1(c1), col2(c2)
{
}
Vec3V PX_ALIGN(16, col0);
Vec3V PX_ALIGN(16, col1);
Vec3V PX_ALIGN(16, col2);
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Mat34V
{
Mat34V()
{
}
Mat34V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2, const Vec3V& c3) : col0(c0), col1(c1), col2(c2), col3(c3)
{
}
Vec3V PX_ALIGN(16, col0);
Vec3V PX_ALIGN(16, col1);
Vec3V PX_ALIGN(16, col2);
Vec3V PX_ALIGN(16, col3);
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Mat43V
{
Mat43V()
{
}
Mat43V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2) : col0(c0), col1(c1), col2(c2)
{
}
Vec4V PX_ALIGN(16, col0);
Vec4V PX_ALIGN(16, col1);
Vec4V PX_ALIGN(16, col2);
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Mat44V
{
Mat44V()
{
}
Mat44V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2, const Vec4V& c3) : col0(c0), col1(c1), col2(c2), col3(c3)
{
}
Vec4V PX_ALIGN(16, col0);
Vec4V PX_ALIGN(16, col1);
Vec4V PX_ALIGN(16, col2);
Vec4V PX_ALIGN(16, col3);
} PX_ALIGN_SUFFIX(16);
} // namespace aos
} // namespace physx
#endif // PXFOUNDATION_PXUNIXSSE2AOS_H
| 4,646 | C | 23.718085 | 116 | 0.717607 |
NVIDIA-Omniverse/PhysX/physx/include/foundation/unix/neon/PxUnixNeonInlineAoS.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXFOUNDATION_PXUNIXNEONINLINEAOS_H
#define PXFOUNDATION_PXUNIXNEONINLINEAOS_H
#if !COMPILE_VECTOR_INTRINSICS
#error Vector intrinsics should not be included when using scalar implementation.
#endif
namespace physx
{
namespace aos
{
// improved estimates
#define VRECIPEQ recipq_newton<1>
#define VRECIPE recip_newton<1>
#define VRECIPSQRTEQ rsqrtq_newton<1>
#define VRECIPSQRTE rsqrt_newton<1>
// "exact"
#define VRECIPQ recipq_newton<4>
#if PX_SWITCH
// StabilizationTests.AveragePoint needs more precision to succeed.
#define VRECIP recip_newton<5>
#else
#define VRECIP recip_newton<4>
#endif
#define VRECIPSQRTQ rsqrtq_newton<4>
#define VRECIPSQRT rsqrt_newton<4>
#define VECMATH_AOS_EPSILON (1e-3f)
//////////////////////////////////////////////////////////////////////
//Test that Vec3V and FloatV are legal
//////////////////////////////////
#define FLOAT_COMPONENTS_EQUAL_THRESHOLD 0.01f
PX_FORCE_INLINE bool isValidFloatV(const FloatV a)
{
/*
PX_ALIGN(16, PxF32) data[4];
vst1_f32(reinterpret_cast<float32_t*>(data), a);
return
PxU32* intData = reinterpret_cast<PxU32*>(data);
return intData[0] == intData[1];
*/
PX_ALIGN(16, PxF32) data[4];
vst1_f32(reinterpret_cast<float32_t*>(data), a);
const float32_t x = data[0];
const float32_t y = data[1];
return (x == y);
/*if (PxAbs(x - y) < FLOAT_COMPONENTS_EQUAL_THRESHOLD)
{
return true;
}
if (PxAbs((x - y) / x) < FLOAT_COMPONENTS_EQUAL_THRESHOLD)
{
return true;
}
return false;*/
}
PX_FORCE_INLINE bool isValidVec3V(const Vec3V a)
{
const float32_t w = vgetq_lane_f32(a, 3);
return (0.0f == w);
//const PxU32* intData = reinterpret_cast<const PxU32*>(&w);
//return *intData == 0;
}
PX_FORCE_INLINE bool isAligned16(const void* a)
{
return(0 == (size_t(a) & 0x0f));
}
#if PX_DEBUG
#define ASSERT_ISVALIDVEC3V(a) PX_ASSERT(isValidVec3V(a))
#define ASSERT_ISVALIDFLOATV(a) PX_ASSERT(isValidFloatV(a))
#define ASSERT_ISALIGNED16(a) PX_ASSERT(isAligned16(static_cast<const void*>(a)))
#else
#define ASSERT_ISVALIDVEC3V(a)
#define ASSERT_ISVALIDFLOATV(a)
#define ASSERT_ISALIGNED16(a)
#endif
namespace internalUnitNeonSimd
{
PX_FORCE_INLINE PxU32 BAllTrue4_R(const BoolV a)
{
const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a));
const uint16x4_t dLow = vmovn_u32(a);
const uint16x8_t combined = vcombine_u16(dLow, dHigh);
const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined));
return PxU32(vget_lane_u32(finalReduce, 0) == 0xffffFFFF);
}
PX_FORCE_INLINE PxU32 BAllTrue3_R(const BoolV a)
{
const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a));
const uint16x4_t dLow = vmovn_u32(a);
const uint16x8_t combined = vcombine_u16(dLow, dHigh);
const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined));
return PxU32((vget_lane_u32(finalReduce, 0) & 0xffFFff) == 0xffFFff);
}
PX_FORCE_INLINE PxU32 BAnyTrue4_R(const BoolV a)
{
const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a));
const uint16x4_t dLow = vmovn_u32(a);
const uint16x8_t combined = vcombine_u16(dLow, dHigh);
const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined));
return PxU32(vget_lane_u32(finalReduce, 0) != 0x0);
}
PX_FORCE_INLINE PxU32 BAnyTrue3_R(const BoolV a)
{
const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a));
const uint16x4_t dLow = vmovn_u32(a);
const uint16x8_t combined = vcombine_u16(dLow, dHigh);
const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined));
return PxU32((vget_lane_u32(finalReduce, 0) & 0xffFFff) != 0);
}
}
namespace vecMathTests
{
// PT: this function returns an invalid Vec3V (W!=0.0f) just for unit-testing 'isValidVec3V'
PX_FORCE_INLINE Vec3V getInvalidVec3V()
{
PX_ALIGN(16, PxF32) data[4] = { 1.0f, 1.0f, 1.0f, 1.0f };
return V4LoadA(data);
}
PX_FORCE_INLINE bool allElementsEqualFloatV(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return vget_lane_u32(vceq_f32(a, b), 0) != 0;
}
PX_FORCE_INLINE bool allElementsEqualVec3V(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return V3AllEq(a, b) != 0;
}
PX_FORCE_INLINE bool allElementsEqualVec4V(const Vec4V a, const Vec4V b)
{
return V4AllEq(a, b) != 0;
}
PX_FORCE_INLINE bool allElementsEqualBoolV(const BoolV a, const BoolV b)
{
return internalUnitNeonSimd::BAllTrue4_R(vceqq_u32(a, b)) != 0;
}
PX_FORCE_INLINE PxU32 V4U32AllEq(const VecU32V a, const VecU32V b)
{
return internalUnitNeonSimd::BAllTrue4_R(V4IsEqU32(a, b));
}
PX_FORCE_INLINE bool allElementsEqualVecU32V(const VecU32V a, const VecU32V b)
{
return V4U32AllEq(a, b) != 0;
}
PX_FORCE_INLINE BoolV V4IsEqI32(const VecI32V a, const VecI32V b)
{
return vceqq_s32(a, b);
}
PX_FORCE_INLINE PxU32 V4I32AllEq(const VecI32V a, const VecI32V b)
{
return internalUnitNeonSimd::BAllTrue4_R(V4IsEqI32(a, b));
}
PX_FORCE_INLINE bool allElementsEqualVecI32V(const VecI32V a, const VecI32V b)
{
return V4I32AllEq(a, b) != 0;
}
PX_FORCE_INLINE bool allElementsNearEqualFloatV(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
const float32x2_t c = vsub_f32(a, b);
const float32x2_t error = vdup_n_f32(VECMATH_AOS_EPSILON);
// absolute compare abs(error) > abs(c)
const uint32x2_t greater = vcagt_f32(error, c);
const uint32x2_t min = vpmin_u32(greater, greater);
return vget_lane_u32(min, 0) != 0x0;
}
PX_FORCE_INLINE bool allElementsNearEqualVec3V(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
const float32x4_t c = vsubq_f32(a, b);
const float32x4_t error = vdupq_n_f32(VECMATH_AOS_EPSILON);
// absolute compare abs(error) > abs(c)
const uint32x4_t greater = vcagtq_f32(error, c);
return internalUnitNeonSimd::BAllTrue3_R(greater) != 0;
}
PX_FORCE_INLINE bool allElementsNearEqualVec4V(const Vec4V a, const Vec4V b)
{
const float32x4_t c = vsubq_f32(a, b);
const float32x4_t error = vdupq_n_f32(VECMATH_AOS_EPSILON);
// absolute compare abs(error) > abs(c)
const uint32x4_t greater = vcagtq_f32(error, c);
return internalUnitNeonSimd::BAllTrue4_R(greater) != 0x0;
}
}
#if 0 // debugging printfs
#include <stdio.h>
PX_FORCE_INLINE void printVec(const float32x4_t& v, const char* name)
{
PX_ALIGN(16, float32_t) data[4];
vst1q_f32(data, v);
printf("%s: (%f, %f, %f, %f)\n", name, data[0], data[1], data[2], data[3]);
}
PX_FORCE_INLINE void printVec(const float32x2_t& v, const char* name)
{
PX_ALIGN(16, float32_t) data[2];
vst1_f32(data, v);
printf("%s: (%f, %f)\n", name, data[0], data[1]);
}
PX_FORCE_INLINE void printVec(const uint32x4_t& v, const char* name)
{
PX_ALIGN(16, uint32_t) data[4];
vst1q_u32(data, v);
printf("%s: (0x%x, 0x%x, 0x%x, 0x%x)\n", name, data[0], data[1], data[2], data[3]);
}
PX_FORCE_INLINE void printVec(const uint16x8_t& v, const char* name)
{
PX_ALIGN(16, uint16_t) data[8];
vst1q_u16(data, v);
printf("%s: (0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n", name, data[0], data[1], data[2], data[3],
data[4], data[5], data[6], data[7]);
}
PX_FORCE_INLINE void printVec(const int32x4_t& v, const char* name)
{
PX_ALIGN(16, int32_t) data[4];
vst1q_s32(data, v);
printf("%s: (0x%x, 0x%x, 0x%x, 0x%x)\n", name, data[0], data[1], data[2], data[3]);
}
PX_FORCE_INLINE void printVec(const int16x8_t& v, const char* name)
{
PX_ALIGN(16, int16_t) data[8];
vst1q_s16(data, v);
printf("%s: (0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n", name, data[0], data[1], data[2], data[3],
data[4], data[5], data[6], data[7]);
}
PX_FORCE_INLINE void printVec(const uint16x4_t& v, const char* name)
{
PX_ALIGN(16, uint16_t) data[4];
vst1_u16(data, v);
printf("%s: (0x%x, 0x%x, 0x%x, 0x%x)\n", name, data[0], data[1], data[2], data[3]);
}
PX_FORCE_INLINE void printVec(const uint32x2_t& v, const char* name)
{
PX_ALIGN(16, uint32_t) data[2];
vst1_u32(data, v);
printf("%s: (0x%x, 0x%x)\n", name, data[0], data[1]);
}
PX_FORCE_INLINE void printVar(const PxU32 v, const char* name)
{
printf("%s: 0x%x\n", name, v);
}
PX_FORCE_INLINE void printVar(const PxF32 v, const char* name)
{
printf("%s: %f\n", name, v);
}
#define PRINT_VAR(X) printVar((X), #X)
#define PRINT_VEC(X) printVec((X), #X)
#define PRINT_VEC_TITLE(TITLE, X) printVec((X), TITLE #X)
#endif // debugging printf
/////////////////////////////////////////////////////////////////////
////FUNCTIONS USED ONLY FOR ASSERTS IN VECTORISED IMPLEMENTATIONS
/////////////////////////////////////////////////////////////////////
PX_FORCE_INLINE bool isFiniteFloatV(const FloatV a)
{
PX_ALIGN(16, PxF32) data[4];
vst1_f32(reinterpret_cast<float32_t*>(data), a);
return PxIsFinite(data[0]) && PxIsFinite(data[1]);
}
PX_FORCE_INLINE bool isFiniteVec3V(const Vec3V a)
{
PX_ALIGN(16, PxF32) data[4];
vst1q_f32(reinterpret_cast<float32_t*>(data), a);
return PxIsFinite(data[0]) && PxIsFinite(data[1]) && PxIsFinite(data[2]);
}
PX_FORCE_INLINE bool isFiniteVec4V(const Vec4V a)
{
PX_ALIGN(16, PxF32) data[4];
vst1q_f32(reinterpret_cast<float32_t*>(data), a);
return PxIsFinite(data[0]) && PxIsFinite(data[1]) && PxIsFinite(data[2]) && PxIsFinite(data[3]);
}
PX_FORCE_INLINE bool hasZeroElementinFloatV(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
return vget_lane_u32(vreinterpret_u32_f32(a), 0) == 0;
}
PX_FORCE_INLINE bool hasZeroElementInVec3V(const Vec3V a)
{
const uint32x2_t dLow = vget_low_u32(vreinterpretq_u32_f32(a));
const uint32x2_t dMin = vpmin_u32(dLow, dLow);
return vget_lane_u32(dMin, 0) == 0 || vgetq_lane_u32(vreinterpretq_u32_f32(a), 2) == 0;
}
PX_FORCE_INLINE bool hasZeroElementInVec4V(const Vec4V a)
{
const uint32x2_t dHigh = vget_high_u32(vreinterpretq_u32_f32(a));
const uint32x2_t dLow = vget_low_u32(vreinterpretq_u32_f32(a));
const uint32x2_t dMin = vmin_u32(dHigh, dLow);
const uint32x2_t pairMin = vpmin_u32(dMin, dMin);
return vget_lane_u32(pairMin, 0) == 0;
}
/////////////////////////////////////////////////////////////////////
////VECTORISED FUNCTION IMPLEMENTATIONS
/////////////////////////////////////////////////////////////////////
PX_FORCE_INLINE FloatV FLoad(const PxF32 f)
{
return vdup_n_f32(reinterpret_cast<const float32_t&>(f));
}
PX_FORCE_INLINE FloatV FLoadA(const PxF32* const f)
{
ASSERT_ISALIGNED16(f);
return vld1_f32(reinterpret_cast<const float32_t*>(f));
}
PX_FORCE_INLINE Vec3V V3Load(const PxF32 f)
{
PX_ALIGN(16, PxF32) data[4] = { f, f, f, 0.0f };
return V4LoadA(data);
}
PX_FORCE_INLINE Vec4V V4Load(const PxF32 f)
{
return vdupq_n_f32(reinterpret_cast<const float32_t&>(f));
}
PX_FORCE_INLINE BoolV BLoad(const bool f)
{
const PxU32 i = static_cast<PxU32>(-(static_cast<PxI32>(f)));
return vdupq_n_u32(i);
}
PX_FORCE_INLINE Vec3V V3LoadA(const PxVec3& f)
{
ASSERT_ISALIGNED16(&f);
PX_ALIGN(16, PxF32) data[4] = { f.x, f.y, f.z, 0.0f };
return V4LoadA(data);
}
PX_FORCE_INLINE Vec3V V3LoadU(const PxVec3& f)
{
PX_ALIGN(16, PxF32) data[4] = { f.x, f.y, f.z, 0.0f };
return V4LoadA(data);
}
PX_FORCE_INLINE Vec3V V3LoadUnsafeA(const PxVec3& f)
{
ASSERT_ISALIGNED16(&f);
PX_ALIGN(16, PxF32) data[4] = { f.x, f.y, f.z, 0.0f };
return V4LoadA(data);
}
PX_FORCE_INLINE Vec3V V3LoadA(const PxF32* f)
{
ASSERT_ISALIGNED16(f);
PX_ALIGN(16, PxF32) data[4] = { f[0], f[1], f[2], 0.0f };
return V4LoadA(data);
}
PX_FORCE_INLINE Vec3V V3LoadU(const PxF32* f)
{
PX_ALIGN(16, PxF32) data[4] = { f[0], f[1], f[2], 0.0f };
return V4LoadA(data);
}
PX_FORCE_INLINE Vec3V Vec3V_From_Vec4V(Vec4V v)
{
return vsetq_lane_f32(0.0f, v, 3);
}
PX_FORCE_INLINE Vec3V Vec3V_From_Vec4V_WUndefined(Vec4V v)
{
return v;
}
PX_FORCE_INLINE Vec4V Vec4V_From_Vec3V(Vec3V f)
{
return f; // ok if it is implemented as the same type.
}
PX_FORCE_INLINE Vec4V Vec4V_From_FloatV(FloatV f)
{
return vcombine_f32(f, f);
}
PX_FORCE_INLINE Vec3V Vec3V_From_FloatV(FloatV f)
{
return Vec3V_From_Vec4V(Vec4V_From_FloatV(f));
}
PX_FORCE_INLINE Vec3V Vec3V_From_FloatV_WUndefined(FloatV f)
{
return Vec3V_From_Vec4V_WUndefined(Vec4V_From_FloatV(f));
}
PX_FORCE_INLINE Vec4V Vec4V_From_PxVec3_WUndefined(const PxVec3& f)
{
PX_ALIGN(16, PxF32) data[4] = { f.x, f.y, f.z, 0.0f };
return V4LoadA(data);
}
PX_FORCE_INLINE Mat33V Mat33V_From_PxMat33(const PxMat33& m)
{
return Mat33V(V3LoadU(m.column0), V3LoadU(m.column1), V3LoadU(m.column2));
}
PX_FORCE_INLINE void PxMat33_From_Mat33V(const Mat33V& m, PxMat33& out)
{
V3StoreU(m.col0, out.column0);
V3StoreU(m.col1, out.column1);
V3StoreU(m.col2, out.column2);
}
PX_FORCE_INLINE Vec4V V4LoadA(const PxF32* const f)
{
ASSERT_ISALIGNED16(f);
return vld1q_f32(reinterpret_cast<const float32_t*>(f));
}
PX_FORCE_INLINE void V4StoreA(Vec4V a, PxF32* f)
{
ASSERT_ISALIGNED16(f);
vst1q_f32(reinterpret_cast<float32_t*>(f), a);
}
PX_FORCE_INLINE void V4StoreU(const Vec4V a, PxF32* f)
{
PX_ALIGN(16, PxF32) f2[4];
vst1q_f32(reinterpret_cast<float32_t*>(f2), a);
f[0] = f2[0];
f[1] = f2[1];
f[2] = f2[2];
f[3] = f2[3];
}
PX_FORCE_INLINE void BStoreA(const BoolV a, PxU32* u)
{
ASSERT_ISALIGNED16(u);
vst1q_u32(reinterpret_cast<uint32_t*>(u), a);
}
PX_FORCE_INLINE void U4StoreA(const VecU32V uv, PxU32* u)
{
ASSERT_ISALIGNED16(u);
vst1q_u32(reinterpret_cast<uint32_t*>(u), uv);
}
PX_FORCE_INLINE void I4StoreA(const VecI32V iv, PxI32* i)
{
ASSERT_ISALIGNED16(i);
vst1q_s32(reinterpret_cast<int32_t*>(i), iv);
}
PX_FORCE_INLINE Vec4V V4LoadU(const PxF32* const f)
{
return vld1q_f32(reinterpret_cast<const float32_t*>(f));
}
PX_FORCE_INLINE BoolV BLoad(const bool* const f)
{
const PX_ALIGN(16, PxU32) b[4] = { static_cast<PxU32>(-static_cast<PxI32>(f[0])),
static_cast<PxU32>(-static_cast<PxI32>(f[1])),
static_cast<PxU32>(-static_cast<PxI32>(f[2])),
static_cast<PxU32>(-static_cast<PxI32>(f[3])) };
return vld1q_u32(b);
}
PX_FORCE_INLINE void FStore(const FloatV a, PxF32* PX_RESTRICT f)
{
ASSERT_ISVALIDFLOATV(a);
// vst1q_lane_f32(f, a, 0); // causes vst1 alignment bug
*f = vget_lane_f32(a, 0);
}
PX_FORCE_INLINE void Store_From_BoolV(const BoolV a, PxU32* PX_RESTRICT f)
{
*f = vget_lane_u32(vget_low_u32(a), 0);
}
PX_FORCE_INLINE void V3StoreA(const Vec3V a, PxVec3& f)
{
ASSERT_ISALIGNED16(&f);
PX_ALIGN(16, PxF32) f2[4];
vst1q_f32(reinterpret_cast<float32_t*>(f2), a);
f = PxVec3(f2[0], f2[1], f2[2]);
}
PX_FORCE_INLINE void V3StoreU(const Vec3V a, PxVec3& f)
{
PX_ALIGN(16, PxF32) f2[4];
vst1q_f32(reinterpret_cast<float32_t*>(f2), a);
f = PxVec3(f2[0], f2[1], f2[2]);
}
//////////////////////////////////
// FLOATV
//////////////////////////////////
PX_FORCE_INLINE FloatV FZero()
{
return FLoad(0.0f);
}
PX_FORCE_INLINE FloatV FOne()
{
return FLoad(1.0f);
}
PX_FORCE_INLINE FloatV FHalf()
{
return FLoad(0.5f);
}
PX_FORCE_INLINE FloatV FEps()
{
return FLoad(PX_EPS_REAL);
}
PX_FORCE_INLINE FloatV FEps6()
{
return FLoad(1e-6f);
}
PX_FORCE_INLINE FloatV FMax()
{
return FLoad(PX_MAX_REAL);
}
PX_FORCE_INLINE FloatV FNegMax()
{
return FLoad(-PX_MAX_REAL);
}
PX_FORCE_INLINE FloatV IZero()
{
return vreinterpret_f32_u32(vdup_n_u32(0));
}
PX_FORCE_INLINE FloatV IOne()
{
return vreinterpret_f32_u32(vdup_n_u32(1));
}
PX_FORCE_INLINE FloatV ITwo()
{
return vreinterpret_f32_u32(vdup_n_u32(2));
}
PX_FORCE_INLINE FloatV IThree()
{
return vreinterpret_f32_u32(vdup_n_u32(3));
}
PX_FORCE_INLINE FloatV IFour()
{
return vreinterpret_f32_u32(vdup_n_u32(4));
}
PX_FORCE_INLINE FloatV FNeg(const FloatV f)
{
ASSERT_ISVALIDFLOATV(f);
return vneg_f32(f);
}
PX_FORCE_INLINE FloatV FAdd(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return vadd_f32(a, b);
}
PX_FORCE_INLINE FloatV FSub(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return vsub_f32(a, b);
}
PX_FORCE_INLINE FloatV FMul(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return vmul_f32(a, b);
}
template <int n>
PX_FORCE_INLINE float32x2_t recip_newton(const float32x2_t& in)
{
float32x2_t recip = vrecpe_f32(in);
for(int i = 0; i < n; ++i)
recip = vmul_f32(recip, vrecps_f32(in, recip));
return recip;
}
template <int n>
PX_FORCE_INLINE float32x4_t recipq_newton(const float32x4_t& in)
{
float32x4_t recip = vrecpeq_f32(in);
for(int i = 0; i < n; ++i)
recip = vmulq_f32(recip, vrecpsq_f32(recip, in));
return recip;
}
template <int n>
PX_FORCE_INLINE float32x2_t rsqrt_newton(const float32x2_t& in)
{
float32x2_t rsqrt = vrsqrte_f32(in);
for(int i = 0; i < n; ++i)
rsqrt = vmul_f32(rsqrt, vrsqrts_f32(vmul_f32(rsqrt, rsqrt), in));
return rsqrt;
}
template <int n>
PX_FORCE_INLINE float32x4_t rsqrtq_newton(const float32x4_t& in)
{
float32x4_t rsqrt = vrsqrteq_f32(in);
for(int i = 0; i < n; ++i)
rsqrt = vmulq_f32(rsqrt, vrsqrtsq_f32(vmulq_f32(rsqrt, rsqrt), in));
return rsqrt;
}
PX_FORCE_INLINE FloatV FDiv(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return vmul_f32(a, VRECIP(b));
}
PX_FORCE_INLINE FloatV FDivFast(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return vmul_f32(a, VRECIPE(b));
}
PX_FORCE_INLINE FloatV FRecip(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
return VRECIP(a);
}
PX_FORCE_INLINE FloatV FRecipFast(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
return VRECIPE(a);
}
PX_FORCE_INLINE FloatV FRsqrt(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
return VRECIPSQRT(a);
}
PX_FORCE_INLINE FloatV FSqrt(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
return FSel(FIsEq(a, FZero()), a, vmul_f32(a, VRECIPSQRT(a)));
}
PX_FORCE_INLINE FloatV FRsqrtFast(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
return VRECIPSQRTE(a);
}
PX_FORCE_INLINE FloatV FScaleAdd(const FloatV a, const FloatV b, const FloatV c)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
ASSERT_ISVALIDFLOATV(c);
return vmla_f32(c, a, b);
}
PX_FORCE_INLINE FloatV FNegScaleSub(const FloatV a, const FloatV b, const FloatV c)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
ASSERT_ISVALIDFLOATV(c);
return vmls_f32(c, a, b);
}
PX_FORCE_INLINE FloatV FAbs(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
return vabs_f32(a);
}
PX_FORCE_INLINE FloatV FSel(const BoolV c, const FloatV a, const FloatV b)
{
PX_ASSERT( vecMathTests::allElementsEqualBoolV(c, BTTTT()) ||
vecMathTests::allElementsEqualBoolV(c, BFFFF()));
ASSERT_ISVALIDFLOATV(vbsl_f32(vget_low_u32(c), a, b));
return vbsl_f32(vget_low_u32(c), a, b);
}
PX_FORCE_INLINE BoolV FIsGrtr(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return vdupq_lane_u32(vcgt_f32(a, b), 0);
}
PX_FORCE_INLINE BoolV FIsGrtrOrEq(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return vdupq_lane_u32(vcge_f32(a, b), 0);
}
PX_FORCE_INLINE BoolV FIsEq(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return vdupq_lane_u32(vceq_f32(a, b), 0);
}
PX_FORCE_INLINE FloatV FMax(const FloatV a, const FloatV b)
{
//ASSERT_ISVALIDFLOATV(a);
//ASSERT_ISVALIDFLOATV(b);
return vmax_f32(a, b);
}
PX_FORCE_INLINE FloatV FMin(const FloatV a, const FloatV b)
{
//ASSERT_ISVALIDFLOATV(a);
//ASSERT_ISVALIDFLOATV(b);
return vmin_f32(a, b);
}
PX_FORCE_INLINE FloatV FClamp(const FloatV a, const FloatV minV, const FloatV maxV)
{
ASSERT_ISVALIDFLOATV(minV);
ASSERT_ISVALIDFLOATV(maxV);
return vmax_f32(vmin_f32(a, maxV), minV);
}
PX_FORCE_INLINE PxU32 FAllGrtr(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return vget_lane_u32(vcgt_f32(a, b), 0);
}
PX_FORCE_INLINE PxU32 FAllGrtrOrEq(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return vget_lane_u32(vcge_f32(a, b), 0);
}
PX_FORCE_INLINE PxU32 FAllEq(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return vget_lane_u32(vceq_f32(a, b), 0);
}
PX_FORCE_INLINE FloatV FRound(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
// truncate(a + (0.5f - sign(a)))
const float32x2_t half = vdup_n_f32(0.5f);
const float32x2_t sign = vcvt_f32_u32((vshr_n_u32(vreinterpret_u32_f32(a), 31)));
const float32x2_t aPlusHalf = vadd_f32(a, half);
const float32x2_t aRound = vsub_f32(aPlusHalf, sign);
int32x2_t tmp = vcvt_s32_f32(aRound);
return vcvt_f32_s32(tmp);
}
PX_FORCE_INLINE FloatV FSin(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
// Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI
const FloatV recipTwoPi = FLoadA(g_PXReciprocalTwoPi.f);
const FloatV twoPi = FLoadA(g_PXTwoPi.f);
const FloatV tmp = FMul(a, recipTwoPi);
const FloatV b = FRound(tmp);
const FloatV V1 = FNegScaleSub(twoPi, b, a);
// sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! -
// V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI)
const FloatV V2 = FMul(V1, V1);
const FloatV V3 = FMul(V2, V1);
const FloatV V5 = FMul(V3, V2);
const FloatV V7 = FMul(V5, V2);
const FloatV V9 = FMul(V7, V2);
const FloatV V11 = FMul(V9, V2);
const FloatV V13 = FMul(V11, V2);
const FloatV V15 = FMul(V13, V2);
const FloatV V17 = FMul(V15, V2);
const FloatV V19 = FMul(V17, V2);
const FloatV V21 = FMul(V19, V2);
const FloatV V23 = FMul(V21, V2);
const Vec4V sinCoefficients0 = V4LoadA(g_PXSinCoefficients0.f);
const Vec4V sinCoefficients1 = V4LoadA(g_PXSinCoefficients1.f);
const Vec4V sinCoefficients2 = V4LoadA(g_PXSinCoefficients2.f);
const FloatV S1 = V4GetY(sinCoefficients0);
const FloatV S2 = V4GetZ(sinCoefficients0);
const FloatV S3 = V4GetW(sinCoefficients0);
const FloatV S4 = V4GetX(sinCoefficients1);
const FloatV S5 = V4GetY(sinCoefficients1);
const FloatV S6 = V4GetZ(sinCoefficients1);
const FloatV S7 = V4GetW(sinCoefficients1);
const FloatV S8 = V4GetX(sinCoefficients2);
const FloatV S9 = V4GetY(sinCoefficients2);
const FloatV S10 = V4GetZ(sinCoefficients2);
const FloatV S11 = V4GetW(sinCoefficients2);
FloatV Result;
Result = FScaleAdd(S1, V3, V1);
Result = FScaleAdd(S2, V5, Result);
Result = FScaleAdd(S3, V7, Result);
Result = FScaleAdd(S4, V9, Result);
Result = FScaleAdd(S5, V11, Result);
Result = FScaleAdd(S6, V13, Result);
Result = FScaleAdd(S7, V15, Result);
Result = FScaleAdd(S8, V17, Result);
Result = FScaleAdd(S9, V19, Result);
Result = FScaleAdd(S10, V21, Result);
Result = FScaleAdd(S11, V23, Result);
return Result;
}
PX_FORCE_INLINE FloatV FCos(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
// Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI
const FloatV recipTwoPi = FLoadA(g_PXReciprocalTwoPi.f);
const FloatV twoPi = FLoadA(g_PXTwoPi.f);
const FloatV tmp = FMul(a, recipTwoPi);
const FloatV b = FRound(tmp);
const FloatV V1 = FNegScaleSub(twoPi, b, a);
// cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! -
// V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI)
const FloatV V2 = FMul(V1, V1);
const FloatV V4 = FMul(V2, V2);
const FloatV V6 = FMul(V4, V2);
const FloatV V8 = FMul(V4, V4);
const FloatV V10 = FMul(V6, V4);
const FloatV V12 = FMul(V6, V6);
const FloatV V14 = FMul(V8, V6);
const FloatV V16 = FMul(V8, V8);
const FloatV V18 = FMul(V10, V8);
const FloatV V20 = FMul(V10, V10);
const FloatV V22 = FMul(V12, V10);
const Vec4V cosCoefficients0 = V4LoadA(g_PXCosCoefficients0.f);
const Vec4V cosCoefficients1 = V4LoadA(g_PXCosCoefficients1.f);
const Vec4V cosCoefficients2 = V4LoadA(g_PXCosCoefficients2.f);
const FloatV C1 = V4GetY(cosCoefficients0);
const FloatV C2 = V4GetZ(cosCoefficients0);
const FloatV C3 = V4GetW(cosCoefficients0);
const FloatV C4 = V4GetX(cosCoefficients1);
const FloatV C5 = V4GetY(cosCoefficients1);
const FloatV C6 = V4GetZ(cosCoefficients1);
const FloatV C7 = V4GetW(cosCoefficients1);
const FloatV C8 = V4GetX(cosCoefficients2);
const FloatV C9 = V4GetY(cosCoefficients2);
const FloatV C10 = V4GetZ(cosCoefficients2);
const FloatV C11 = V4GetW(cosCoefficients2);
FloatV Result;
Result = FScaleAdd(C1, V2, FOne());
Result = FScaleAdd(C2, V4, Result);
Result = FScaleAdd(C3, V6, Result);
Result = FScaleAdd(C4, V8, Result);
Result = FScaleAdd(C5, V10, Result);
Result = FScaleAdd(C6, V12, Result);
Result = FScaleAdd(C7, V14, Result);
Result = FScaleAdd(C8, V16, Result);
Result = FScaleAdd(C9, V18, Result);
Result = FScaleAdd(C10, V20, Result);
Result = FScaleAdd(C11, V22, Result);
return Result;
}
PX_FORCE_INLINE PxU32 FOutOfBounds(const FloatV a, const FloatV min, const FloatV max)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(min);
ASSERT_ISVALIDFLOATV(max);
const BoolV c = BOr(FIsGrtr(a, max), FIsGrtr(min, a));
return PxU32(!BAllEqFFFF(c));
}
PX_FORCE_INLINE PxU32 FInBounds(const FloatV a, const FloatV min, const FloatV max)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(min);
ASSERT_ISVALIDFLOATV(max);
const BoolV c = BAnd(FIsGrtrOrEq(a, min), FIsGrtrOrEq(max, a));
return PxU32(BAllEqTTTT(c));
}
PX_FORCE_INLINE PxU32 FOutOfBounds(const FloatV a, const FloatV bounds)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(bounds);
const uint32x2_t greater = vcagt_f32(a, bounds);
return vget_lane_u32(greater, 0);
}
PX_FORCE_INLINE PxU32 FInBounds(const FloatV a, const FloatV bounds)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(bounds);
const uint32x2_t geq = vcage_f32(bounds, a);
return vget_lane_u32(geq, 0);
}
//////////////////////////////////
// VEC3V
//////////////////////////////////
PX_FORCE_INLINE Vec3V V3Splat(const FloatV f)
{
ASSERT_ISVALIDFLOATV(f);
const uint32x2_t mask = { 0xffffFFFF, 0x0 };
const uint32x2_t uHigh = vreinterpret_u32_f32(f);
const float32x2_t dHigh = vreinterpret_f32_u32(vand_u32(uHigh, mask));
return vcombine_f32(f, dHigh);
}
PX_FORCE_INLINE Vec3V V3Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z)
{
ASSERT_ISVALIDFLOATV(x);
ASSERT_ISVALIDFLOATV(y);
ASSERT_ISVALIDFLOATV(z);
const uint32x2_t mask = { 0xffffFFFF, 0x0 };
const uint32x2_t dHigh = vand_u32(vreinterpret_u32_f32(z), mask);
const uint32x2_t dLow = vext_u32(vreinterpret_u32_f32(x), vreinterpret_u32_f32(y), 1);
return vreinterpretq_f32_u32(vcombine_u32(dLow, dHigh));
}
PX_FORCE_INLINE Vec3V V3UnitX()
{
const float32x4_t x = { 1.0f, 0.0f, 0.0f, 0.0f };
return x;
}
PX_FORCE_INLINE Vec3V V3UnitY()
{
const float32x4_t y = { 0, 1.0f, 0, 0 };
return y;
}
PX_FORCE_INLINE Vec3V V3UnitZ()
{
const float32x4_t z = { 0, 0, 1.0f, 0 };
return z;
}
PX_FORCE_INLINE FloatV V3GetX(const Vec3V f)
{
ASSERT_ISVALIDVEC3V(f);
const float32x2_t fLow = vget_low_f32(f);
return vdup_lane_f32(fLow, 0);
}
PX_FORCE_INLINE FloatV V3GetY(const Vec3V f)
{
ASSERT_ISVALIDVEC3V(f);
const float32x2_t fLow = vget_low_f32(f);
return vdup_lane_f32(fLow, 1);
}
PX_FORCE_INLINE FloatV V3GetZ(const Vec3V f)
{
ASSERT_ISVALIDVEC3V(f);
const float32x2_t fhigh = vget_high_f32(f);
return vdup_lane_f32(fhigh, 0);
}
PX_FORCE_INLINE Vec3V V3SetX(const Vec3V v, const FloatV f)
{
ASSERT_ISVALIDVEC3V(v);
ASSERT_ISVALIDFLOATV(f);
return V4Sel(BFTTT(), v, vcombine_f32(f, f));
}
PX_FORCE_INLINE Vec3V V3SetY(const Vec3V v, const FloatV f)
{
ASSERT_ISVALIDVEC3V(v);
ASSERT_ISVALIDFLOATV(f);
return V4Sel(BTFTT(), v, vcombine_f32(f, f));
}
PX_FORCE_INLINE Vec3V V3SetZ(const Vec3V v, const FloatV f)
{
ASSERT_ISVALIDVEC3V(v);
ASSERT_ISVALIDFLOATV(f);
return V4Sel(BTTFT(), v, vcombine_f32(f, f));
}
PX_FORCE_INLINE Vec3V V3ColX(const Vec3V a, const Vec3V b, const Vec3V c)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
ASSERT_ISVALIDVEC3V(c);
const float32x2_t aLow = vget_low_f32(a);
const float32x2_t bLow = vget_low_f32(b);
const float32x2_t cLow = vget_low_f32(c);
const float32x2_t zero = vdup_n_f32(0.0f);
const float32x2x2_t zipL = vzip_f32(aLow, bLow);
const float32x2x2_t zipH = vzip_f32(cLow, zero);
return vcombine_f32(zipL.val[0], zipH.val[0]);
}
PX_FORCE_INLINE Vec3V V3ColY(const Vec3V a, const Vec3V b, const Vec3V c)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
ASSERT_ISVALIDVEC3V(c);
const float32x2_t aLow = vget_low_f32(a);
const float32x2_t bLow = vget_low_f32(b);
const float32x2_t cLow = vget_low_f32(c);
const float32x2_t zero = vdup_n_f32(0.0f);
const float32x2x2_t zipL = vzip_f32(aLow, bLow);
const float32x2x2_t zipH = vzip_f32(cLow, zero);
return vcombine_f32(zipL.val[1], zipH.val[1]);
}
PX_FORCE_INLINE Vec3V V3ColZ(const Vec3V a, const Vec3V b, const Vec3V c)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
ASSERT_ISVALIDVEC3V(c);
const float32x2_t aHi = vget_high_f32(a);
const float32x2_t bHi = vget_high_f32(b);
const float32x2_t cHi = vget_high_f32(c);
const float32x2x2_t zipL = vzip_f32(aHi, bHi);
return vcombine_f32(zipL.val[0], cHi);
}
PX_FORCE_INLINE Vec3V V3Zero()
{
return vdupq_n_f32(0.0f);
}
PX_FORCE_INLINE Vec3V V3Eps()
{
return V3Load(PX_EPS_REAL);
}
PX_FORCE_INLINE Vec3V V3One()
{
return V3Load(1.0f);
}
PX_FORCE_INLINE Vec3V V3Neg(const Vec3V f)
{
ASSERT_ISVALIDVEC3V(f);
const float32x4_t tmp = vnegq_f32(f);
return vsetq_lane_f32(0.0f, tmp, 3);
}
PX_FORCE_INLINE Vec3V V3Add(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return vaddq_f32(a, b);
}
PX_FORCE_INLINE Vec3V V3Add(const Vec3V a, const FloatV b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDFLOATV(b);
return vaddq_f32(a, Vec3V_From_FloatV(b));
}
PX_FORCE_INLINE Vec3V V3Sub(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return vsubq_f32(a, b);
}
PX_FORCE_INLINE Vec3V V3Sub(const Vec3V a, const FloatV b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDFLOATV(b);
return vsubq_f32(a, Vec3V_From_FloatV(b));
}
PX_FORCE_INLINE Vec3V V3Scale(const Vec3V a, const FloatV b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDFLOATV(b);
const float32x4_t tmp = vmulq_lane_f32(a, b, 0);
return vsetq_lane_f32(0.0f, tmp, 3);
}
PX_FORCE_INLINE Vec3V V3Mul(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return vmulq_f32(a, b);
}
PX_FORCE_INLINE Vec3V V3ScaleInv(const Vec3V a, const FloatV b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDFLOATV(b);
const float32x2_t invB = VRECIP(b);
const float32x4_t tmp = vmulq_lane_f32(a, invB, 0);
return vsetq_lane_f32(0.0f, tmp, 3);
}
PX_FORCE_INLINE Vec3V V3Div(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
float32x4_t invB = VRECIPQ(b);
invB = vsetq_lane_f32(0.0f, invB, 3);
return vmulq_f32(a, invB);
}
PX_FORCE_INLINE Vec3V V3ScaleInvFast(const Vec3V a, const FloatV b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDFLOATV(b);
const float32x2_t invB = VRECIPE(b);
const float32x4_t tmp = vmulq_lane_f32(a, invB, 0);
return vsetq_lane_f32(0.0f, tmp, 3);
}
PX_FORCE_INLINE Vec3V V3DivFast(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
float32x4_t invB = VRECIPEQ(b);
invB = vsetq_lane_f32(0.0f, invB, 3);
return vmulq_f32(a, invB);
}
PX_FORCE_INLINE Vec3V V3Recip(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
const float32x4_t recipA = VRECIPQ(a);
return vsetq_lane_f32(0.0f, recipA, 3);
}
PX_FORCE_INLINE Vec3V V3RecipFast(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
const float32x4_t recipA = VRECIPEQ(a);
return vsetq_lane_f32(0.0f, recipA, 3);
}
PX_FORCE_INLINE Vec3V V3Rsqrt(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
const float32x4_t rSqrA = VRECIPSQRTQ(a);
return vsetq_lane_f32(0.0f, rSqrA, 3);
}
PX_FORCE_INLINE Vec3V V3RsqrtFast(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
const float32x4_t rSqrA = VRECIPSQRTEQ(a);
return vsetq_lane_f32(0.0f, rSqrA, 3);
}
PX_FORCE_INLINE Vec3V V3ScaleAdd(const Vec3V a, const FloatV b, const Vec3V c)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDFLOATV(b);
ASSERT_ISVALIDVEC3V(c);
float32x4_t tmp = vmlaq_lane_f32(c, a, b, 0);
// using vsetq_lane_f32 resulted in failures,
// probably related to a compiler bug on
// ndk r9d-win32, gcc 4.8, cardhu/shield
// code with issue
// return vsetq_lane_f32(0.0f, tmp, 3);
// workaround
float32x2_t w_z = vget_high_f32(tmp);
float32x2_t y_x = vget_low_f32(tmp);
w_z = vset_lane_f32(0.0f, w_z, 1);
return vcombine_f32(y_x, w_z);
}
PX_FORCE_INLINE Vec3V V3NegScaleSub(const Vec3V a, const FloatV b, const Vec3V c)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDFLOATV(b);
ASSERT_ISVALIDVEC3V(c);
float32x4_t tmp = vmlsq_lane_f32(c, a, b, 0);
// using vsetq_lane_f32 resulted in failures,
// probably related to a compiler bug on
// ndk r9d-win32, gcc 4.8, cardhu/shield
// code with issue
// return vsetq_lane_f32(0.0f, tmp, 3);
// workaround
float32x2_t w_z = vget_high_f32(tmp);
float32x2_t y_x = vget_low_f32(tmp);
w_z = vset_lane_f32(0.0f, w_z, 1);
return vcombine_f32(y_x, w_z);
}
PX_FORCE_INLINE Vec3V V3MulAdd(const Vec3V a, const Vec3V b, const Vec3V c)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
ASSERT_ISVALIDVEC3V(c);
return vmlaq_f32(c, a, b);
}
PX_FORCE_INLINE Vec3V V3NegMulSub(const Vec3V a, const Vec3V b, const Vec3V c)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
ASSERT_ISVALIDVEC3V(c);
return vmlsq_f32(c, a, b);
}
PX_FORCE_INLINE Vec3V V3Abs(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
return vabsq_f32(a);
}
PX_FORCE_INLINE FloatV V3Dot(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
// const uint32x2_t mask = {0xffffFFFF, 0x0};
const float32x4_t tmp = vmulq_f32(a, b);
const float32x2_t low = vget_low_f32(tmp);
const float32x2_t high = vget_high_f32(tmp);
// const float32x2_t high = vreinterpret_f32_u32(vand_u32(vreinterpret_u32_f32(high_), mask));
const float32x2_t sumTmp = vpadd_f32(low, high); // = {0+z, x+y}
const float32x2_t sum0ZYX = vpadd_f32(sumTmp, sumTmp); // = {x+y+z, x+y+z}
return sum0ZYX;
}
PX_FORCE_INLINE Vec3V V3Cross(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
const uint32x2_t TF = { 0xffffFFFF, 0x0 };
const float32x2_t ay_ax = vget_low_f32(a); // d2
const float32x2_t aw_az = vget_high_f32(a); // d3
const float32x2_t by_bx = vget_low_f32(b); // d4
const float32x2_t bw_bz = vget_high_f32(b); // d5
// Hi, Lo
const float32x2_t bz_by = vext_f32(by_bx, bw_bz, 1); // bz, by
const float32x2_t az_ay = vext_f32(ay_ax, aw_az, 1); // az, ay
const float32x2_t azbx = vmul_f32(aw_az, by_bx); // 0, az*bx
const float32x2_t aybz_axby = vmul_f32(ay_ax, bz_by); // ay*bz, ax*by
const float32x2_t azbxSUBaxbz = vmls_f32(azbx, bw_bz, ay_ax); // 0, az*bx-ax*bz
const float32x2_t aybzSUBazby_axbySUBaybx = vmls_f32(aybz_axby, by_bx, az_ay); // ay*bz-az*by, ax*by-ay*bx
const float32x2_t retLow = vext_f32(aybzSUBazby_axbySUBaybx, azbxSUBaxbz, 1); // az*bx-ax*bz, ay*bz-az*by
const uint32x2_t retHigh = vand_u32(TF, vreinterpret_u32_f32(aybzSUBazby_axbySUBaybx)); // 0, ax*by-ay*bx
return vcombine_f32(retLow, vreinterpret_f32_u32(retHigh));
}
PX_FORCE_INLINE VecCrossV V3PrepareCross(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
return a;
}
PX_FORCE_INLINE FloatV V3Length(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
// const uint32x2_t mask = {0xffffFFFF, 0x0};
const float32x4_t tmp = vmulq_f32(a, a);
const float32x2_t low = vget_low_f32(tmp);
const float32x2_t high = vget_high_f32(tmp);
// const float32x2_t high = vreinterpret_f32_u32(vand_u32(vreinterpret_u32_f32(high_), mask));
const float32x2_t sumTmp = vpadd_f32(low, high); // = {0+z, x+y}
const float32x2_t sum0ZYX = vpadd_f32(sumTmp, sumTmp); // = {x+y+z, x+y+z}
return FSqrt(sum0ZYX);
}
PX_FORCE_INLINE FloatV V3LengthSq(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
return V3Dot(a, a);
}
PX_FORCE_INLINE Vec3V V3Normalize(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
//PX_ASSERT(!FAllEq(V4LengthSq(a), FZero()));
return V3ScaleInv(a, V3Length(a));
}
PX_FORCE_INLINE Vec3V V3NormalizeFast(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
//PX_ASSERT(!FAllEq(V4LengthSq(a), FZero()));
return V3Scale(a, VRECIPSQRTE(V3Dot(a, a)));
}
PX_FORCE_INLINE Vec3V V3NormalizeSafe(const Vec3V a, const Vec3V unsafeReturnValue)
{
ASSERT_ISVALIDVEC3V(a);
const FloatV zero = vdup_n_f32(0.0f);
const FloatV length = V3Length(a);
const uint32x4_t isGreaterThanZero = FIsGrtr(length, zero);
return V3Sel(isGreaterThanZero, V3ScaleInv(a, length), unsafeReturnValue);
}
PX_FORCE_INLINE Vec3V V3Sel(const BoolV c, const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V( vbslq_f32(c, a, b));
return vbslq_f32(c, a, b);
}
PX_FORCE_INLINE BoolV V3IsGrtr(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return vcgtq_f32(a, b);
}
PX_FORCE_INLINE BoolV V3IsGrtrOrEq(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return vcgeq_f32(a, b);
}
PX_FORCE_INLINE BoolV V3IsEq(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return vceqq_f32(a, b);
}
PX_FORCE_INLINE Vec3V V3Max(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return vmaxq_f32(a, b);
}
PX_FORCE_INLINE Vec3V V3Min(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return vminq_f32(a, b);
}
PX_FORCE_INLINE FloatV V3ExtractMax(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
const float32x2_t low = vget_low_f32(a);
const float32x2_t high = vget_high_f32(a);
const float32x2_t zz = vdup_lane_f32(high, 0);
const float32x2_t max0 = vpmax_f32(zz, low);
const float32x2_t max1 = vpmax_f32(max0, max0);
return max1;
}
PX_FORCE_INLINE FloatV V3ExtractMin(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
const float32x2_t low = vget_low_f32(a);
const float32x2_t high = vget_high_f32(a);
const float32x2_t zz = vdup_lane_f32(high, 0);
const float32x2_t min0 = vpmin_f32(zz, low);
const float32x2_t min1 = vpmin_f32(min0, min0);
return min1;
}
// return (a >= 0.0f) ? 1.0f : -1.0f;
PX_FORCE_INLINE Vec3V V3Sign(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
const Vec3V zero = V3Zero();
const Vec3V one = V3One();
const Vec3V none = V3Neg(one);
return V3Sel(V3IsGrtrOrEq(a, zero), one, none);
}
PX_FORCE_INLINE Vec3V V3Clamp(const Vec3V a, const Vec3V minV, const Vec3V maxV)
{
ASSERT_ISVALIDVEC3V(minV);
ASSERT_ISVALIDVEC3V(maxV);
return V3Max(V3Min(a, maxV), minV);
}
PX_FORCE_INLINE PxU32 V3AllGrtr(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return internalUnitNeonSimd::BAllTrue3_R(V4IsGrtr(a, b));
}
PX_FORCE_INLINE PxU32 V3AllGrtrOrEq(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return internalUnitNeonSimd::BAllTrue3_R(V4IsGrtrOrEq(a, b));
}
PX_FORCE_INLINE PxU32 V3AllEq(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return internalUnitNeonSimd::BAllTrue3_R(V4IsEq(a, b));
}
PX_FORCE_INLINE Vec3V V3Round(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
// truncate(a + (0.5f - sign(a)))
const Vec3V half = V3Load(0.5f);
const float32x4_t sign = vcvtq_f32_u32((vshrq_n_u32(vreinterpretq_u32_f32(a), 31)));
const Vec3V aPlusHalf = V3Add(a, half);
const Vec3V aRound = V3Sub(aPlusHalf, sign);
return vcvtq_f32_s32(vcvtq_s32_f32(aRound));
}
PX_FORCE_INLINE Vec3V V3Sin(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
// Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI
const Vec4V recipTwoPi = V4LoadA(g_PXReciprocalTwoPi.f);
const Vec4V twoPi = V4LoadA(g_PXTwoPi.f);
const Vec3V tmp = V4Mul(a, recipTwoPi);
const Vec3V b = V3Round(tmp);
const Vec3V V1 = V4NegMulSub(twoPi, b, a);
// sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! -
// V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI)
const Vec3V V2 = V3Mul(V1, V1);
const Vec3V V3 = V3Mul(V2, V1);
const Vec3V V5 = V3Mul(V3, V2);
const Vec3V V7 = V3Mul(V5, V2);
const Vec3V V9 = V3Mul(V7, V2);
const Vec3V V11 = V3Mul(V9, V2);
const Vec3V V13 = V3Mul(V11, V2);
const Vec3V V15 = V3Mul(V13, V2);
const Vec3V V17 = V3Mul(V15, V2);
const Vec3V V19 = V3Mul(V17, V2);
const Vec3V V21 = V3Mul(V19, V2);
const Vec3V V23 = V3Mul(V21, V2);
const Vec4V sinCoefficients0 = V4LoadA(g_PXSinCoefficients0.f);
const Vec4V sinCoefficients1 = V4LoadA(g_PXSinCoefficients1.f);
const Vec4V sinCoefficients2 = V4LoadA(g_PXSinCoefficients2.f);
const FloatV S1 = V4GetY(sinCoefficients0);
const FloatV S2 = V4GetZ(sinCoefficients0);
const FloatV S3 = V4GetW(sinCoefficients0);
const FloatV S4 = V4GetX(sinCoefficients1);
const FloatV S5 = V4GetY(sinCoefficients1);
const FloatV S6 = V4GetZ(sinCoefficients1);
const FloatV S7 = V4GetW(sinCoefficients1);
const FloatV S8 = V4GetX(sinCoefficients2);
const FloatV S9 = V4GetY(sinCoefficients2);
const FloatV S10 = V4GetZ(sinCoefficients2);
const FloatV S11 = V4GetW(sinCoefficients2);
Vec3V Result;
Result = V4ScaleAdd(V3, S1, V1);
Result = V4ScaleAdd(V5, S2, Result);
Result = V4ScaleAdd(V7, S3, Result);
Result = V4ScaleAdd(V9, S4, Result);
Result = V4ScaleAdd(V11, S5, Result);
Result = V4ScaleAdd(V13, S6, Result);
Result = V4ScaleAdd(V15, S7, Result);
Result = V4ScaleAdd(V17, S8, Result);
Result = V4ScaleAdd(V19, S9, Result);
Result = V4ScaleAdd(V21, S10, Result);
Result = V4ScaleAdd(V23, S11, Result);
return Result;
}
PX_FORCE_INLINE Vec3V V3Cos(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
// Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI
const Vec4V recipTwoPi = V4LoadA(g_PXReciprocalTwoPi.f);
const Vec4V twoPi = V4LoadA(g_PXTwoPi.f);
const Vec3V tmp = V4Mul(a, recipTwoPi);
const Vec3V b = V3Round(tmp);
const Vec3V V1 = V4NegMulSub(twoPi, b, a);
// cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! -
// V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI)
const Vec3V V2 = V3Mul(V1, V1);
const Vec3V V4 = V3Mul(V2, V2);
const Vec3V V6 = V3Mul(V4, V2);
const Vec3V V8 = V3Mul(V4, V4);
const Vec3V V10 = V3Mul(V6, V4);
const Vec3V V12 = V3Mul(V6, V6);
const Vec3V V14 = V3Mul(V8, V6);
const Vec3V V16 = V3Mul(V8, V8);
const Vec3V V18 = V3Mul(V10, V8);
const Vec3V V20 = V3Mul(V10, V10);
const Vec3V V22 = V3Mul(V12, V10);
const Vec4V cosCoefficients0 = V4LoadA(g_PXCosCoefficients0.f);
const Vec4V cosCoefficients1 = V4LoadA(g_PXCosCoefficients1.f);
const Vec4V cosCoefficients2 = V4LoadA(g_PXCosCoefficients2.f);
const FloatV C1 = V4GetY(cosCoefficients0);
const FloatV C2 = V4GetZ(cosCoefficients0);
const FloatV C3 = V4GetW(cosCoefficients0);
const FloatV C4 = V4GetX(cosCoefficients1);
const FloatV C5 = V4GetY(cosCoefficients1);
const FloatV C6 = V4GetZ(cosCoefficients1);
const FloatV C7 = V4GetW(cosCoefficients1);
const FloatV C8 = V4GetX(cosCoefficients2);
const FloatV C9 = V4GetY(cosCoefficients2);
const FloatV C10 = V4GetZ(cosCoefficients2);
const FloatV C11 = V4GetW(cosCoefficients2);
Vec3V Result;
Result = V4ScaleAdd(V2, C1, V4One());
Result = V4ScaleAdd(V4, C2, Result);
Result = V4ScaleAdd(V6, C3, Result);
Result = V4ScaleAdd(V8, C4, Result);
Result = V4ScaleAdd(V10, C5, Result);
Result = V4ScaleAdd(V12, C6, Result);
Result = V4ScaleAdd(V14, C7, Result);
Result = V4ScaleAdd(V16, C8, Result);
Result = V4ScaleAdd(V18, C9, Result);
Result = V4ScaleAdd(V20, C10, Result);
Result = V4ScaleAdd(V22, C11, Result);
return V4ClearW(Result);
}
PX_FORCE_INLINE Vec3V V3PermYZZ(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
const float32x2_t xy = vget_low_f32(a);
const float32x2_t zw = vget_high_f32(a);
const float32x2_t yz = vext_f32(xy, zw, 1);
return vcombine_f32(yz, zw);
}
PX_FORCE_INLINE Vec3V V3PermXYX(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
const uint32x2_t mask = { 0xffffFFFF, 0x0 };
const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(a));
const uint32x2_t xw = vand_u32(xy, mask);
return vreinterpretq_f32_u32(vcombine_u32(xy, xw));
}
PX_FORCE_INLINE Vec3V V3PermYZX(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
const uint32x2_t mask = { 0xffffFFFF, 0x0 };
const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(a));
const uint32x2_t zw = vget_high_u32(vreinterpretq_u32_f32(a));
const uint32x2_t yz = vext_u32(xy, zw, 1);
const uint32x2_t xw = vand_u32(xy, mask);
return vreinterpretq_f32_u32(vcombine_u32(yz, xw));
}
PX_FORCE_INLINE Vec3V V3PermZXY(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(a));
const uint32x2_t zw = vget_high_u32(vreinterpretq_u32_f32(a));
const uint32x2_t wz = vrev64_u32(zw);
const uint32x2_t zx = vext_u32(wz, xy, 1);
const uint32x2_t yw = vext_u32(xy, wz, 1);
return vreinterpretq_f32_u32(vcombine_u32(zx, yw));
}
PX_FORCE_INLINE Vec3V V3PermZZY(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(a));
const uint32x2_t zw = vget_high_u32(vreinterpretq_u32_f32(a));
const uint32x2_t wz = vrev64_u32(zw);
const uint32x2_t yw = vext_u32(xy, wz, 1);
const uint32x2_t zz = vdup_lane_u32(wz, 1);
return vreinterpretq_f32_u32(vcombine_u32(zz, yw));
}
PX_FORCE_INLINE Vec3V V3PermYXX(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
const uint32x2_t mask = { 0xffffFFFF, 0x0 };
const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(a));
const uint32x2_t yx = vrev64_u32(xy);
const uint32x2_t xw = vand_u32(xy, mask);
return vreinterpretq_f32_u32(vcombine_u32(yx, xw));
}
PX_FORCE_INLINE Vec3V V3Perm_Zero_1Z_0Y(const Vec3V v0, const Vec3V v1)
{
ASSERT_ISVALIDVEC3V(v0);
ASSERT_ISVALIDVEC3V(v1);
const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(v0));
const uint32x2_t zw = vget_high_u32(vreinterpretq_u32_f32(v1));
const uint32x2_t wz = vrev64_u32(zw);
const uint32x2_t yw = vext_u32(xy, wz, 1);
return vreinterpretq_f32_u32(vcombine_u32(wz, yw));
}
PX_FORCE_INLINE Vec3V V3Perm_0Z_Zero_1X(const Vec3V v0, const Vec3V v1)
{
ASSERT_ISVALIDVEC3V(v0);
ASSERT_ISVALIDVEC3V(v1);
const uint32x2_t mask = { 0xffffFFFF, 0x0 };
const uint32x2_t zw = vget_high_u32(vreinterpretq_u32_f32(v0));
const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(v1));
const uint32x2_t xw = vand_u32(xy, mask);
return vreinterpretq_f32_u32(vcombine_u32(zw, xw));
}
PX_FORCE_INLINE Vec3V V3Perm_1Y_0X_Zero(const Vec3V v0, const Vec3V v1)
{
ASSERT_ISVALIDVEC3V(v0);
ASSERT_ISVALIDVEC3V(v1);
const uint32x2_t axy = vget_low_u32(vreinterpretq_u32_f32(v0));
const uint32x2_t bxy = vget_low_u32(vreinterpretq_u32_f32(v1));
const uint32x2_t byax = vext_u32(bxy, axy, 1);
const uint32x2_t ww = vdup_n_u32(0);
return vreinterpretq_f32_u32(vcombine_u32(byax, ww));
}
PX_FORCE_INLINE FloatV V3SumElems(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
// const uint32x2_t mask = {0xffffFFFF, 0x0};
const float32x2_t low = vget_low_f32(a);
const float32x2_t high = vget_high_f32(a);
// const float32x2_t high = vreinterpret_f32_u32(vand_u32(vreinterpret_u32_f32(high_), mask));
const float32x2_t sumTmp = vpadd_f32(low, high); // = {0+z, x+y}
const float32x2_t sum0ZYX = vpadd_f32(sumTmp, sumTmp); // = {x+y+z, x+y+z}
return sum0ZYX;
}
PX_FORCE_INLINE PxU32 V3OutOfBounds(const Vec3V a, const Vec3V min, const Vec3V max)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(min);
ASSERT_ISVALIDVEC3V(max);
const BoolV c = BOr(V3IsGrtr(a, max), V3IsGrtr(min, a));
return internalUnitNeonSimd::BAnyTrue3_R(c);
}
PX_FORCE_INLINE PxU32 V3InBounds(const Vec3V a, const Vec3V min, const Vec3V max)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(min);
ASSERT_ISVALIDVEC3V(max);
const BoolV c = BAnd(V3IsGrtrOrEq(a, min), V3IsGrtrOrEq(max, a));
return internalUnitNeonSimd::BAllTrue4_R(c);
}
PX_FORCE_INLINE PxU32 V3OutOfBounds(const Vec3V a, const Vec3V bounds)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(bounds);
const BoolV greater = V3IsGrtr(V3Abs(a), bounds);
return internalUnitNeonSimd::BAnyTrue3_R(greater);
}
PX_FORCE_INLINE PxU32 V3InBounds(const Vec3V a, const Vec3V bounds)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(bounds);
const BoolV greaterOrEq = V3IsGrtrOrEq(bounds, V3Abs(a));
return internalUnitNeonSimd::BAllTrue4_R(greaterOrEq);
}
PX_FORCE_INLINE void V3Transpose(Vec3V& col0, Vec3V& col1, Vec3V& col2)
{
ASSERT_ISVALIDVEC3V(col0);
ASSERT_ISVALIDVEC3V(col1);
ASSERT_ISVALIDVEC3V(col2);
Vec3V col3 = V3Zero();
const float32x4x2_t v0v1 = vzipq_f32(col0, col2);
const float32x4x2_t v2v3 = vzipq_f32(col1, col3);
const float32x4x2_t zip0 = vzipq_f32(v0v1.val[0], v2v3.val[0]);
const float32x4x2_t zip1 = vzipq_f32(v0v1.val[1], v2v3.val[1]);
col0 = zip0.val[0];
col1 = zip0.val[1];
col2 = zip1.val[0];
// col3 = zip1.val[1];
}
//////////////////////////////////
// VEC4V
//////////////////////////////////
PX_FORCE_INLINE Vec4V V4Splat(const FloatV f)
{
ASSERT_ISVALIDFLOATV(f);
return vcombine_f32(f, f);
}
PX_FORCE_INLINE Vec4V V4Merge(const FloatV* const floatVArray)
{
ASSERT_ISVALIDFLOATV(floatVArray[0]);
ASSERT_ISVALIDFLOATV(floatVArray[1]);
ASSERT_ISVALIDFLOATV(floatVArray[2]);
ASSERT_ISVALIDFLOATV(floatVArray[3]);
const uint32x2_t xLow = vreinterpret_u32_f32(floatVArray[0]);
const uint32x2_t yLow = vreinterpret_u32_f32(floatVArray[1]);
const uint32x2_t zLow = vreinterpret_u32_f32(floatVArray[2]);
const uint32x2_t wLow = vreinterpret_u32_f32(floatVArray[3]);
const uint32x2_t dLow = vext_u32(xLow, yLow, 1);
const uint32x2_t dHigh = vext_u32(zLow, wLow, 1);
return vreinterpretq_f32_u32(vcombine_u32(dLow, dHigh));
}
PX_FORCE_INLINE Vec4V V4Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z, const FloatVArg w)
{
ASSERT_ISVALIDFLOATV(x);
ASSERT_ISVALIDFLOATV(y);
ASSERT_ISVALIDFLOATV(z);
ASSERT_ISVALIDFLOATV(w);
const uint32x2_t xLow = vreinterpret_u32_f32(x);
const uint32x2_t yLow = vreinterpret_u32_f32(y);
const uint32x2_t zLow = vreinterpret_u32_f32(z);
const uint32x2_t wLow = vreinterpret_u32_f32(w);
const uint32x2_t dLow = vext_u32(xLow, yLow, 1);
const uint32x2_t dHigh = vext_u32(zLow, wLow, 1);
return vreinterpretq_f32_u32(vcombine_u32(dLow, dHigh));
}
PX_FORCE_INLINE Vec4V V4MergeW(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w)
{
const float32x2_t xx = vget_high_f32(x);
const float32x2_t yy = vget_high_f32(y);
const float32x2_t zz = vget_high_f32(z);
const float32x2_t ww = vget_high_f32(w);
const float32x2x2_t zipL = vzip_f32(xx, yy);
const float32x2x2_t zipH = vzip_f32(zz, ww);
return vcombine_f32(zipL.val[1], zipH.val[1]);
}
PX_FORCE_INLINE Vec4V V4MergeZ(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w)
{
const float32x2_t xx = vget_high_f32(x);
const float32x2_t yy = vget_high_f32(y);
const float32x2_t zz = vget_high_f32(z);
const float32x2_t ww = vget_high_f32(w);
const float32x2x2_t zipL = vzip_f32(xx, yy);
const float32x2x2_t zipH = vzip_f32(zz, ww);
return vcombine_f32(zipL.val[0], zipH.val[0]);
}
PX_FORCE_INLINE Vec4V V4MergeY(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w)
{
const float32x2_t xx = vget_low_f32(x);
const float32x2_t yy = vget_low_f32(y);
const float32x2_t zz = vget_low_f32(z);
const float32x2_t ww = vget_low_f32(w);
const float32x2x2_t zipL = vzip_f32(xx, yy);
const float32x2x2_t zipH = vzip_f32(zz, ww);
return vcombine_f32(zipL.val[1], zipH.val[1]);
}
PX_FORCE_INLINE Vec4V V4MergeX(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w)
{
const float32x2_t xx = vget_low_f32(x);
const float32x2_t yy = vget_low_f32(y);
const float32x2_t zz = vget_low_f32(z);
const float32x2_t ww = vget_low_f32(w);
const float32x2x2_t zipL = vzip_f32(xx, yy);
const float32x2x2_t zipH = vzip_f32(zz, ww);
return vcombine_f32(zipL.val[0], zipH.val[0]);
}
PX_FORCE_INLINE Vec4V V4UnpackXY(const Vec4VArg a, const Vec4VArg b)
{
return vzipq_f32(a, b).val[0];
}
PX_FORCE_INLINE Vec4V V4UnpackZW(const Vec4VArg a, const Vec4VArg b)
{
return vzipq_f32(a, b).val[1];
}
PX_FORCE_INLINE Vec4V V4UnitW()
{
const float32x2_t zeros = vreinterpret_f32_u32(vmov_n_u32(0));
const float32x2_t ones = vmov_n_f32(1.0f);
const float32x2_t zo = vext_f32(zeros, ones, 1);
return vcombine_f32(zeros, zo);
}
PX_FORCE_INLINE Vec4V V4UnitX()
{
const float32x2_t zeros = vreinterpret_f32_u32(vmov_n_u32(0));
const float32x2_t ones = vmov_n_f32(1.0f);
const float32x2_t oz = vext_f32(ones, zeros, 1);
return vcombine_f32(oz, zeros);
}
PX_FORCE_INLINE Vec4V V4UnitY()
{
const float32x2_t zeros = vreinterpret_f32_u32(vmov_n_u32(0));
const float32x2_t ones = vmov_n_f32(1.0f);
const float32x2_t zo = vext_f32(zeros, ones, 1);
return vcombine_f32(zo, zeros);
}
PX_FORCE_INLINE Vec4V V4UnitZ()
{
const float32x2_t zeros = vreinterpret_f32_u32(vmov_n_u32(0));
const float32x2_t ones = vmov_n_f32(1.0f);
const float32x2_t oz = vext_f32(ones, zeros, 1);
return vcombine_f32(zeros, oz);
}
PX_FORCE_INLINE FloatV V4GetW(const Vec4V f)
{
const float32x2_t fhigh = vget_high_f32(f);
return vdup_lane_f32(fhigh, 1);
}
PX_FORCE_INLINE FloatV V4GetX(const Vec4V f)
{
const float32x2_t fLow = vget_low_f32(f);
return vdup_lane_f32(fLow, 0);
}
PX_FORCE_INLINE FloatV V4GetY(const Vec4V f)
{
const float32x2_t fLow = vget_low_f32(f);
return vdup_lane_f32(fLow, 1);
}
PX_FORCE_INLINE FloatV V4GetZ(const Vec4V f)
{
const float32x2_t fhigh = vget_high_f32(f);
return vdup_lane_f32(fhigh, 0);
}
PX_FORCE_INLINE Vec4V V4SetW(const Vec4V v, const FloatV f)
{
ASSERT_ISVALIDFLOATV(f);
return V4Sel(BTTTF(), v, vcombine_f32(f, f));
}
PX_FORCE_INLINE Vec4V V4SetX(const Vec4V v, const FloatV f)
{
ASSERT_ISVALIDFLOATV(f);
return V4Sel(BFTTT(), v, vcombine_f32(f, f));
}
PX_FORCE_INLINE Vec4V V4SetY(const Vec4V v, const FloatV f)
{
ASSERT_ISVALIDFLOATV(f);
return V4Sel(BTFTT(), v, vcombine_f32(f, f));
}
PX_FORCE_INLINE Vec4V V4SetZ(const Vec4V v, const FloatV f)
{
ASSERT_ISVALIDFLOATV(f);
return V4Sel(BTTFT(), v, vcombine_f32(f, f));
}
PX_FORCE_INLINE Vec4V V4ClearW(const Vec4V v)
{
return V4Sel(BTTTF(), v, V4Zero());
}
PX_FORCE_INLINE Vec4V V4PermYXWZ(const Vec4V a)
{
const float32x2_t xy = vget_low_f32(a);
const float32x2_t zw = vget_high_f32(a);
const float32x2_t yx = vext_f32(xy, xy, 1);
const float32x2_t wz = vext_f32(zw, zw, 1);
return vcombine_f32(yx, wz);
}
PX_FORCE_INLINE Vec4V V4PermXZXZ(const Vec4V a)
{
const float32x2_t xy = vget_low_f32(a);
const float32x2_t zw = vget_high_f32(a);
const float32x2x2_t xzyw = vzip_f32(xy, zw);
return vcombine_f32(xzyw.val[0], xzyw.val[0]);
}
PX_FORCE_INLINE Vec4V V4PermYWYW(const Vec4V a)
{
const float32x2_t xy = vget_low_f32(a);
const float32x2_t zw = vget_high_f32(a);
const float32x2x2_t xzyw = vzip_f32(xy, zw);
return vcombine_f32(xzyw.val[1], xzyw.val[1]);
}
PX_FORCE_INLINE Vec4V V4PermYZXW(const Vec4V a)
{
const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(a));
const uint32x2_t zw = vget_high_u32(vreinterpretq_u32_f32(a));
const uint32x2_t yz = vext_u32(xy, zw, 1);
const uint32x2_t xw = vrev64_u32(vext_u32(zw, xy, 1));
return vreinterpretq_f32_u32(vcombine_u32(yz, xw));
}
PX_FORCE_INLINE Vec4V V4PermZWXY(const Vec4V a)
{
const float32x2_t low = vget_low_f32(a);
const float32x2_t high = vget_high_f32(a);
return vcombine_f32(high, low);
}
template <PxU8 E0, PxU8 E1, PxU8 E2, PxU8 E3>
PX_FORCE_INLINE Vec4V V4Perm(const Vec4V V)
{
static const uint32_t ControlElement[4] =
{
#if 1
0x03020100, // XM_SWIZZLE_X
0x07060504, // XM_SWIZZLE_Y
0x0B0A0908, // XM_SWIZZLE_Z
0x0F0E0D0C, // XM_SWIZZLE_W
#else
0x00010203, // XM_SWIZZLE_X
0x04050607, // XM_SWIZZLE_Y
0x08090A0B, // XM_SWIZZLE_Z
0x0C0D0E0F, // XM_SWIZZLE_W
#endif
};
uint8x8x2_t tbl;
tbl.val[0] = vreinterpret_u8_f32(vget_low_f32(V));
tbl.val[1] = vreinterpret_u8_f32(vget_high_f32(V));
uint8x8_t idx =
vcreate_u8(static_cast<uint64_t>(ControlElement[E0]) | (static_cast<uint64_t>(ControlElement[E1]) << 32));
const uint8x8_t rL = vtbl2_u8(tbl, idx);
idx = vcreate_u8(static_cast<uint64_t>(ControlElement[E2]) | (static_cast<uint64_t>(ControlElement[E3]) << 32));
const uint8x8_t rH = vtbl2_u8(tbl, idx);
return vreinterpretq_f32_u8(vcombine_u8(rL, rH));
}
// PT: this seems measurably slower than the hardcoded version
/*PX_FORCE_INLINE Vec4V V4PermYZXW(const Vec4V a)
{
return V4Perm<1, 2, 0, 3>(a);
}*/
PX_FORCE_INLINE Vec4V V4Zero()
{
return vreinterpretq_f32_u32(vmovq_n_u32(0));
// return vmovq_n_f32(0.0f);
}
PX_FORCE_INLINE Vec4V V4One()
{
return vmovq_n_f32(1.0f);
}
PX_FORCE_INLINE Vec4V V4Eps()
{
// return vmovq_n_f32(PX_EPS_REAL);
return V4Load(PX_EPS_REAL);
}
PX_FORCE_INLINE Vec4V V4Neg(const Vec4V f)
{
return vnegq_f32(f);
}
PX_FORCE_INLINE Vec4V V4Add(const Vec4V a, const Vec4V b)
{
return vaddq_f32(a, b);
}
PX_FORCE_INLINE Vec4V V4Sub(const Vec4V a, const Vec4V b)
{
return vsubq_f32(a, b);
}
PX_FORCE_INLINE Vec4V V4Scale(const Vec4V a, const FloatV b)
{
return vmulq_lane_f32(a, b, 0);
}
PX_FORCE_INLINE Vec4V V4Mul(const Vec4V a, const Vec4V b)
{
return vmulq_f32(a, b);
}
PX_FORCE_INLINE Vec4V V4ScaleInv(const Vec4V a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(b);
const float32x2_t invB = VRECIP(b);
return vmulq_lane_f32(a, invB, 0);
}
PX_FORCE_INLINE Vec4V V4Div(const Vec4V a, const Vec4V b)
{
const float32x4_t invB = VRECIPQ(b);
return vmulq_f32(a, invB);
}
PX_FORCE_INLINE Vec4V V4ScaleInvFast(const Vec4V a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(b);
const float32x2_t invB = VRECIPE(b);
return vmulq_lane_f32(a, invB, 0);
}
PX_FORCE_INLINE Vec4V V4DivFast(const Vec4V a, const Vec4V b)
{
const float32x4_t invB = VRECIPEQ(b);
return vmulq_f32(a, invB);
}
PX_FORCE_INLINE Vec4V V4Recip(const Vec4V a)
{
return VRECIPQ(a);
}
PX_FORCE_INLINE Vec4V V4RecipFast(const Vec4V a)
{
return VRECIPEQ(a);
}
PX_FORCE_INLINE Vec4V V4Rsqrt(const Vec4V a)
{
return VRECIPSQRTQ(a);
}
PX_FORCE_INLINE Vec4V V4RsqrtFast(const Vec4V a)
{
return VRECIPSQRTEQ(a);
}
PX_FORCE_INLINE Vec4V V4Sqrt(const Vec4V a)
{
return V4Sel(V4IsEq(a, V4Zero()), a, V4Mul(a, VRECIPSQRTQ(a)));
}
PX_FORCE_INLINE Vec4V V4ScaleAdd(const Vec4V a, const FloatV b, const Vec4V c)
{
ASSERT_ISVALIDFLOATV(b);
return vmlaq_lane_f32(c, a, b, 0);
}
PX_FORCE_INLINE Vec4V V4NegScaleSub(const Vec4V a, const FloatV b, const Vec4V c)
{
ASSERT_ISVALIDFLOATV(b);
return vmlsq_lane_f32(c, a, b, 0);
}
PX_FORCE_INLINE Vec4V V4MulAdd(const Vec4V a, const Vec4V b, const Vec4V c)
{
return vmlaq_f32(c, a, b);
}
PX_FORCE_INLINE Vec4V V4NegMulSub(const Vec4V a, const Vec4V b, const Vec4V c)
{
return vmlsq_f32(c, a, b);
}
PX_FORCE_INLINE Vec4V V4Abs(const Vec4V a)
{
return vabsq_f32(a);
}
PX_FORCE_INLINE FloatV V4SumElements(const Vec4V a)
{
const Vec4V xy = V4UnpackXY(a, a); // x,x,y,y
const Vec4V zw = V4UnpackZW(a, a); // z,z,w,w
const Vec4V xz_yw = V4Add(xy, zw); // x+z,x+z,y+w,y+w
const FloatV xz = V4GetX(xz_yw); // x+z
const FloatV yw = V4GetZ(xz_yw); // y+w
return FAdd(xz, yw); // sum
}
PX_FORCE_INLINE FloatV V4Dot(const Vec4V a, const Vec4V b)
{
const float32x4_t tmp = vmulq_f32(a, b);
const float32x2_t low = vget_low_f32(tmp);
const float32x2_t high = vget_high_f32(tmp);
const float32x2_t sumTmp = vpadd_f32(low, high); // = {z+w, x+y}
const float32x2_t sumWZYX = vpadd_f32(sumTmp, sumTmp); // = {x+y+z+w, x+y+z+w}
return sumWZYX;
}
PX_FORCE_INLINE FloatV V4Dot3(const Vec4V aa, const Vec4V bb)
{
// PT: the V3Dot code relies on the fact that W=0 so we can't reuse it as-is, we need to clear W first.
// TODO: find a better implementation that does not need to clear W.
const Vec4V a = V4ClearW(aa);
const Vec4V b = V4ClearW(bb);
const float32x4_t tmp = vmulq_f32(a, b);
const float32x2_t low = vget_low_f32(tmp);
const float32x2_t high = vget_high_f32(tmp);
const float32x2_t sumTmp = vpadd_f32(low, high); // = {0+z, x+y}
const float32x2_t sum0ZYX = vpadd_f32(sumTmp, sumTmp); // = {x+y+z, x+y+z}
return sum0ZYX;
}
PX_FORCE_INLINE Vec4V V4Cross(const Vec4V a, const Vec4V b)
{
const uint32x2_t TF = { 0xffffFFFF, 0x0 };
const float32x2_t ay_ax = vget_low_f32(a); // d2
const float32x2_t aw_az = vget_high_f32(a); // d3
const float32x2_t by_bx = vget_low_f32(b); // d4
const float32x2_t bw_bz = vget_high_f32(b); // d5
// Hi, Lo
const float32x2_t bz_by = vext_f32(by_bx, bw_bz, 1); // bz, by
const float32x2_t az_ay = vext_f32(ay_ax, aw_az, 1); // az, ay
const float32x2_t azbx = vmul_f32(aw_az, by_bx); // 0, az*bx
const float32x2_t aybz_axby = vmul_f32(ay_ax, bz_by); // ay*bz, ax*by
const float32x2_t azbxSUBaxbz = vmls_f32(azbx, bw_bz, ay_ax); // 0, az*bx-ax*bz
const float32x2_t aybzSUBazby_axbySUBaybx = vmls_f32(aybz_axby, by_bx, az_ay); // ay*bz-az*by, ax*by-ay*bx
const float32x2_t retLow = vext_f32(aybzSUBazby_axbySUBaybx, azbxSUBaxbz, 1); // az*bx-ax*bz, ay*bz-az*by
const uint32x2_t retHigh = vand_u32(TF, vreinterpret_u32_f32(aybzSUBazby_axbySUBaybx)); // 0, ax*by-ay*bx
return vcombine_f32(retLow, vreinterpret_f32_u32(retHigh));
}
PX_FORCE_INLINE FloatV V4Length(const Vec4V a)
{
const float32x4_t tmp = vmulq_f32(a, a);
const float32x2_t low = vget_low_f32(tmp);
const float32x2_t high = vget_high_f32(tmp);
const float32x2_t sumTmp = vpadd_f32(low, high); // = {0+z, x+y}
const float32x2_t sumWZYX = vpadd_f32(sumTmp, sumTmp); // = {x+y+z, x+y+z}
return FSqrt(sumWZYX);
}
PX_FORCE_INLINE FloatV V4LengthSq(const Vec4V a)
{
return V4Dot(a, a);
}
PX_FORCE_INLINE Vec4V V4Normalize(const Vec4V a)
{
//PX_ASSERT(!FAllEq(V4LengthSq(a), FZero()));
return V4ScaleInv(a, V4Length(a));
}
PX_FORCE_INLINE Vec4V V4NormalizeFast(const Vec4V a)
{
//PX_ASSERT(!FAllEq(V4LengthSq(a), FZero()));
return V4Scale(a, FRsqrtFast(V4Dot(a, a)));
}
PX_FORCE_INLINE Vec4V V4NormalizeSafe(const Vec4V a, const Vec4V unsafeReturnValue)
{
const FloatV zero = FZero();
const FloatV length = V4Length(a);
const uint32x4_t isGreaterThanZero = FIsGrtr(length, zero);
return V4Sel(isGreaterThanZero, V4ScaleInv(a, length), unsafeReturnValue);
}
PX_FORCE_INLINE BoolV V4IsEqU32(const VecU32V a, const VecU32V b)
{
return vceqq_u32(a, b);
}
PX_FORCE_INLINE Vec4V V4Sel(const BoolV c, const Vec4V a, const Vec4V b)
{
return vbslq_f32(c, a, b);
}
PX_FORCE_INLINE BoolV V4IsGrtr(const Vec4V a, const Vec4V b)
{
return vcgtq_f32(a, b);
}
PX_FORCE_INLINE BoolV V4IsGrtrOrEq(const Vec4V a, const Vec4V b)
{
return vcgeq_f32(a, b);
}
PX_FORCE_INLINE BoolV V4IsEq(const Vec4V a, const Vec4V b)
{
return vceqq_f32(a, b);
}
PX_FORCE_INLINE Vec4V V4Max(const Vec4V a, const Vec4V b)
{
return vmaxq_f32(a, b);
}
PX_FORCE_INLINE Vec4V V4Min(const Vec4V a, const Vec4V b)
{
return vminq_f32(a, b);
}
PX_FORCE_INLINE FloatV V4ExtractMax(const Vec4V a)
{
const float32x2_t low = vget_low_f32(a);
const float32x2_t high = vget_high_f32(a);
const float32x2_t max0 = vpmax_f32(high, low);
const float32x2_t max1 = vpmax_f32(max0, max0);
return max1;
}
PX_FORCE_INLINE FloatV V4ExtractMin(const Vec4V a)
{
const float32x2_t low = vget_low_f32(a);
const float32x2_t high = vget_high_f32(a);
const float32x2_t min0 = vpmin_f32(high, low);
const float32x2_t min1 = vpmin_f32(min0, min0);
return min1;
}
PX_FORCE_INLINE Vec4V V4Clamp(const Vec4V a, const Vec4V minV, const Vec4V maxV)
{
return V4Max(V4Min(a, maxV), minV);
}
PX_FORCE_INLINE PxU32 V4AllGrtr(const Vec4V a, const Vec4V b)
{
return internalUnitNeonSimd::BAllTrue4_R(V4IsGrtr(a, b));
}
PX_FORCE_INLINE PxU32 V4AllGrtrOrEq(const Vec4V a, const Vec4V b)
{
return internalUnitNeonSimd::BAllTrue4_R(V4IsGrtrOrEq(a, b));
}
PX_FORCE_INLINE PxU32 V4AllGrtrOrEq3(const Vec4V a, const Vec4V b)
{
return internalUnitNeonSimd::BAllTrue3_R(V4IsGrtrOrEq(a, b));
}
PX_FORCE_INLINE PxU32 V4AllEq(const Vec4V a, const Vec4V b)
{
return internalUnitNeonSimd::BAllTrue4_R(V4IsEq(a, b));
}
PX_FORCE_INLINE PxU32 V4AnyGrtr3(const Vec4V a, const Vec4V b)
{
return internalUnitNeonSimd::BAnyTrue3_R(V4IsGrtr(a, b));
}
PX_FORCE_INLINE Vec4V V4Round(const Vec4V a)
{
// truncate(a + (0.5f - sign(a)))
const Vec4V half = V4Load(0.5f);
const float32x4_t sign = vcvtq_f32_u32((vshrq_n_u32(vreinterpretq_u32_f32(a), 31)));
const Vec4V aPlusHalf = V4Add(a, half);
const Vec4V aRound = V4Sub(aPlusHalf, sign);
return vcvtq_f32_s32(vcvtq_s32_f32(aRound));
}
PX_FORCE_INLINE Vec4V V4Sin(const Vec4V a)
{
const Vec4V recipTwoPi = V4LoadA(g_PXReciprocalTwoPi.f);
const Vec4V twoPi = V4LoadA(g_PXTwoPi.f);
const Vec4V tmp = V4Mul(a, recipTwoPi);
const Vec4V b = V4Round(tmp);
const Vec4V V1 = V4NegMulSub(twoPi, b, a);
// sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! -
// V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI)
const Vec4V V2 = V4Mul(V1, V1);
const Vec4V V3 = V4Mul(V2, V1);
const Vec4V V5 = V4Mul(V3, V2);
const Vec4V V7 = V4Mul(V5, V2);
const Vec4V V9 = V4Mul(V7, V2);
const Vec4V V11 = V4Mul(V9, V2);
const Vec4V V13 = V4Mul(V11, V2);
const Vec4V V15 = V4Mul(V13, V2);
const Vec4V V17 = V4Mul(V15, V2);
const Vec4V V19 = V4Mul(V17, V2);
const Vec4V V21 = V4Mul(V19, V2);
const Vec4V V23 = V4Mul(V21, V2);
const Vec4V sinCoefficients0 = V4LoadA(g_PXSinCoefficients0.f);
const Vec4V sinCoefficients1 = V4LoadA(g_PXSinCoefficients1.f);
const Vec4V sinCoefficients2 = V4LoadA(g_PXSinCoefficients2.f);
const FloatV S1 = V4GetY(sinCoefficients0);
const FloatV S2 = V4GetZ(sinCoefficients0);
const FloatV S3 = V4GetW(sinCoefficients0);
const FloatV S4 = V4GetX(sinCoefficients1);
const FloatV S5 = V4GetY(sinCoefficients1);
const FloatV S6 = V4GetZ(sinCoefficients1);
const FloatV S7 = V4GetW(sinCoefficients1);
const FloatV S8 = V4GetX(sinCoefficients2);
const FloatV S9 = V4GetY(sinCoefficients2);
const FloatV S10 = V4GetZ(sinCoefficients2);
const FloatV S11 = V4GetW(sinCoefficients2);
Vec4V Result;
Result = V4ScaleAdd(V3, S1, V1);
Result = V4ScaleAdd(V5, S2, Result);
Result = V4ScaleAdd(V7, S3, Result);
Result = V4ScaleAdd(V9, S4, Result);
Result = V4ScaleAdd(V11, S5, Result);
Result = V4ScaleAdd(V13, S6, Result);
Result = V4ScaleAdd(V15, S7, Result);
Result = V4ScaleAdd(V17, S8, Result);
Result = V4ScaleAdd(V19, S9, Result);
Result = V4ScaleAdd(V21, S10, Result);
Result = V4ScaleAdd(V23, S11, Result);
return Result;
}
PX_FORCE_INLINE Vec4V V4Cos(const Vec4V a)
{
const Vec4V recipTwoPi = V4LoadA(g_PXReciprocalTwoPi.f);
const Vec4V twoPi = V4LoadA(g_PXTwoPi.f);
const Vec4V tmp = V4Mul(a, recipTwoPi);
const Vec4V b = V4Round(tmp);
const Vec4V V1 = V4NegMulSub(twoPi, b, a);
// cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! -
// V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI)
const Vec4V V2 = V4Mul(V1, V1);
const Vec4V V4 = V4Mul(V2, V2);
const Vec4V V6 = V4Mul(V4, V2);
const Vec4V V8 = V4Mul(V4, V4);
const Vec4V V10 = V4Mul(V6, V4);
const Vec4V V12 = V4Mul(V6, V6);
const Vec4V V14 = V4Mul(V8, V6);
const Vec4V V16 = V4Mul(V8, V8);
const Vec4V V18 = V4Mul(V10, V8);
const Vec4V V20 = V4Mul(V10, V10);
const Vec4V V22 = V4Mul(V12, V10);
const Vec4V cosCoefficients0 = V4LoadA(g_PXCosCoefficients0.f);
const Vec4V cosCoefficients1 = V4LoadA(g_PXCosCoefficients1.f);
const Vec4V cosCoefficients2 = V4LoadA(g_PXCosCoefficients2.f);
const FloatV C1 = V4GetY(cosCoefficients0);
const FloatV C2 = V4GetZ(cosCoefficients0);
const FloatV C3 = V4GetW(cosCoefficients0);
const FloatV C4 = V4GetX(cosCoefficients1);
const FloatV C5 = V4GetY(cosCoefficients1);
const FloatV C6 = V4GetZ(cosCoefficients1);
const FloatV C7 = V4GetW(cosCoefficients1);
const FloatV C8 = V4GetX(cosCoefficients2);
const FloatV C9 = V4GetY(cosCoefficients2);
const FloatV C10 = V4GetZ(cosCoefficients2);
const FloatV C11 = V4GetW(cosCoefficients2);
Vec4V Result;
Result = V4ScaleAdd(V2, C1, V4One());
Result = V4ScaleAdd(V4, C2, Result);
Result = V4ScaleAdd(V6, C3, Result);
Result = V4ScaleAdd(V8, C4, Result);
Result = V4ScaleAdd(V10, C5, Result);
Result = V4ScaleAdd(V12, C6, Result);
Result = V4ScaleAdd(V14, C7, Result);
Result = V4ScaleAdd(V16, C8, Result);
Result = V4ScaleAdd(V18, C9, Result);
Result = V4ScaleAdd(V20, C10, Result);
Result = V4ScaleAdd(V22, C11, Result);
return Result;
}
PX_FORCE_INLINE void V4Transpose(Vec4V& col0, Vec4V& col1, Vec4V& col2, Vec4V& col3)
{
const float32x4x2_t v0v1 = vzipq_f32(col0, col2);
const float32x4x2_t v2v3 = vzipq_f32(col1, col3);
const float32x4x2_t zip0 = vzipq_f32(v0v1.val[0], v2v3.val[0]);
const float32x4x2_t zip1 = vzipq_f32(v0v1.val[1], v2v3.val[1]);
col0 = zip0.val[0];
col1 = zip0.val[1];
col2 = zip1.val[0];
col3 = zip1.val[1];
}
//////////////////////////////////
// VEC4V
//////////////////////////////////
PX_FORCE_INLINE BoolV BFFFF()
{
return vmovq_n_u32(0);
}
PX_FORCE_INLINE BoolV BFFFT()
{
const uint32x2_t zeros = vmov_n_u32(0);
const uint32x2_t ones = vmov_n_u32(0xffffFFFF);
const uint32x2_t zo = vext_u32(zeros, ones, 1);
return vcombine_u32(zeros, zo);
}
PX_FORCE_INLINE BoolV BFFTF()
{
const uint32x2_t zeros = vmov_n_u32(0);
const uint32x2_t ones = vmov_n_u32(0xffffFFFF);
const uint32x2_t oz = vext_u32(ones, zeros, 1);
return vcombine_u32(zeros, oz);
}
PX_FORCE_INLINE BoolV BFFTT()
{
const uint32x2_t zeros = vmov_n_u32(0);
const uint32x2_t ones = vmov_n_u32(0xffffFFFF);
return vcombine_u32(zeros, ones);
}
PX_FORCE_INLINE BoolV BFTFF()
{
const uint32x2_t zeros = vmov_n_u32(0);
const uint32x2_t ones = vmov_n_u32(0xffffFFFF);
const uint32x2_t zo = vext_u32(zeros, ones, 1);
return vcombine_u32(zo, zeros);
}
PX_FORCE_INLINE BoolV BFTFT()
{
const uint32x2_t zeros = vmov_n_u32(0);
const uint32x2_t ones = vmov_n_u32(0xffffFFFF);
const uint32x2_t zo = vext_u32(zeros, ones, 1);
return vcombine_u32(zo, zo);
}
PX_FORCE_INLINE BoolV BFTTF()
{
const uint32x2_t zeros = vmov_n_u32(0);
const uint32x2_t ones = vmov_n_u32(0xffffFFFF);
const uint32x2_t zo = vext_u32(zeros, ones, 1);
const uint32x2_t oz = vext_u32(ones, zeros, 1);
return vcombine_u32(zo, oz);
}
PX_FORCE_INLINE BoolV BFTTT()
{
const uint32x2_t zeros = vmov_n_u32(0);
const uint32x2_t ones = vmov_n_u32(0xffffFFFF);
const uint32x2_t zo = vext_u32(zeros, ones, 1);
return vcombine_u32(zo, ones);
}
PX_FORCE_INLINE BoolV BTFFF()
{
const uint32x2_t zeros = vmov_n_u32(0);
const uint32x2_t ones = vmov_n_u32(0xffffFFFF);
// const uint32x2_t zo = vext_u32(zeros, ones, 1);
const uint32x2_t oz = vext_u32(ones, zeros, 1);
return vcombine_u32(oz, zeros);
}
PX_FORCE_INLINE BoolV BTFFT()
{
const uint32x2_t zeros = vmov_n_u32(0);
const uint32x2_t ones = vmov_n_u32(0xffffFFFF);
const uint32x2_t zo = vext_u32(zeros, ones, 1);
const uint32x2_t oz = vext_u32(ones, zeros, 1);
return vcombine_u32(oz, zo);
}
PX_FORCE_INLINE BoolV BTFTF()
{
const uint32x2_t zeros = vmov_n_u32(0);
const uint32x2_t ones = vmov_n_u32(0xffffFFFF);
const uint32x2_t oz = vext_u32(ones, zeros, 1);
return vcombine_u32(oz, oz);
}
PX_FORCE_INLINE BoolV BTFTT()
{
const uint32x2_t zeros = vmov_n_u32(0);
const uint32x2_t ones = vmov_n_u32(0xffffFFFF);
const uint32x2_t oz = vext_u32(ones, zeros, 1);
return vcombine_u32(oz, ones);
}
PX_FORCE_INLINE BoolV BTTFF()
{
const uint32x2_t zeros = vmov_n_u32(0);
const uint32x2_t ones = vmov_n_u32(0xffffFFFF);
return vcombine_u32(ones, zeros);
}
PX_FORCE_INLINE BoolV BTTFT()
{
const uint32x2_t zeros = vmov_n_u32(0);
const uint32x2_t ones = vmov_n_u32(0xffffFFFF);
const uint32x2_t zo = vext_u32(zeros, ones, 1);
return vcombine_u32(ones, zo);
}
PX_FORCE_INLINE BoolV BTTTF()
{
const uint32x2_t zeros = vmov_n_u32(0);
const uint32x2_t ones = vmov_n_u32(0xffffFFFF);
const uint32x2_t oz = vext_u32(ones, zeros, 1);
return vcombine_u32(ones, oz);
}
PX_FORCE_INLINE BoolV BTTTT()
{
return vmovq_n_u32(0xffffFFFF);
}
PX_FORCE_INLINE BoolV BXMask()
{
return BTFFF();
}
PX_FORCE_INLINE BoolV BYMask()
{
return BFTFF();
}
PX_FORCE_INLINE BoolV BZMask()
{
return BFFTF();
}
PX_FORCE_INLINE BoolV BWMask()
{
return BFFFT();
}
PX_FORCE_INLINE BoolV BGetX(const BoolV f)
{
const uint32x2_t fLow = vget_low_u32(f);
return vdupq_lane_u32(fLow, 0);
}
PX_FORCE_INLINE BoolV BGetY(const BoolV f)
{
const uint32x2_t fLow = vget_low_u32(f);
return vdupq_lane_u32(fLow, 1);
}
PX_FORCE_INLINE BoolV BGetZ(const BoolV f)
{
const uint32x2_t fHigh = vget_high_u32(f);
return vdupq_lane_u32(fHigh, 0);
}
PX_FORCE_INLINE BoolV BGetW(const BoolV f)
{
const uint32x2_t fHigh = vget_high_u32(f);
return vdupq_lane_u32(fHigh, 1);
}
PX_FORCE_INLINE BoolV BSetX(const BoolV v, const BoolV f)
{
return vbslq_u32(BFTTT(), v, f);
}
PX_FORCE_INLINE BoolV BSetY(const BoolV v, const BoolV f)
{
return vbslq_u32(BTFTT(), v, f);
}
PX_FORCE_INLINE BoolV BSetZ(const BoolV v, const BoolV f)
{
return vbslq_u32(BTTFT(), v, f);
}
PX_FORCE_INLINE BoolV BSetW(const BoolV v, const BoolV f)
{
return vbslq_u32(BTTTF(), v, f);
}
PX_FORCE_INLINE BoolV BAnd(const BoolV a, const BoolV b)
{
return vandq_u32(a, b);
}
PX_FORCE_INLINE BoolV BNot(const BoolV a)
{
return vmvnq_u32(a);
}
PX_FORCE_INLINE BoolV BAndNot(const BoolV a, const BoolV b)
{
// return vbicq_u32(a, b);
return vandq_u32(a, vmvnq_u32(b));
}
PX_FORCE_INLINE BoolV BOr(const BoolV a, const BoolV b)
{
return vorrq_u32(a, b);
}
PX_FORCE_INLINE BoolV BAllTrue4(const BoolV a)
{
const uint32x2_t allTrue = vmov_n_u32(0xffffFFFF);
const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a));
const uint16x4_t dLow = vmovn_u32(a);
uint16x8_t combined = vcombine_u16(dLow, dHigh);
const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined));
const uint32x2_t result = vceq_u32(finalReduce, allTrue);
return vdupq_lane_u32(result, 0);
}
PX_FORCE_INLINE BoolV BAnyTrue4(const BoolV a)
{
const uint32x2_t allTrue = vmov_n_u32(0xffffFFFF);
const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a));
const uint16x4_t dLow = vmovn_u32(a);
uint16x8_t combined = vcombine_u16(dLow, dHigh);
const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined));
const uint32x2_t result = vtst_u32(finalReduce, allTrue);
return vdupq_lane_u32(result, 0);
}
PX_FORCE_INLINE BoolV BAllTrue3(const BoolV a)
{
const uint32x2_t allTrue3 = vmov_n_u32(0x00ffFFFF);
const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a));
const uint16x4_t dLow = vmovn_u32(a);
uint16x8_t combined = vcombine_u16(dLow, dHigh);
const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined));
const uint32x2_t result = vceq_u32(vand_u32(finalReduce, allTrue3), allTrue3);
return vdupq_lane_u32(result, 0);
}
PX_FORCE_INLINE BoolV BAnyTrue3(const BoolV a)
{
const uint32x2_t allTrue3 = vmov_n_u32(0x00ffFFFF);
const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a));
const uint16x4_t dLow = vmovn_u32(a);
uint16x8_t combined = vcombine_u16(dLow, dHigh);
const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined));
const uint32x2_t result = vtst_u32(vand_u32(finalReduce, allTrue3), allTrue3);
return vdupq_lane_u32(result, 0);
}
PX_FORCE_INLINE PxU32 BAllEq(const BoolV a, const BoolV b)
{
const BoolV bTest = vceqq_u32(a, b);
return internalUnitNeonSimd::BAllTrue4_R(bTest);
}
PX_FORCE_INLINE PxU32 BAllEqTTTT(const BoolV a)
{
return BAllEq(a, BTTTT());
}
PX_FORCE_INLINE PxU32 BAllEqFFFF(const BoolV a)
{
return BAllEq(a, BFFFF());
}
PX_FORCE_INLINE PxU32 BGetBitMask(const BoolV a)
{
static PX_ALIGN(16, const PxU32) bitMaskData[4] = { 1, 2, 4, 8 };
const uint32x4_t bitMask = *(reinterpret_cast<const uint32x4_t*>(bitMaskData));
const uint32x4_t t0 = vandq_u32(a, bitMask);
const uint32x2_t t1 = vpadd_u32(vget_low_u32(t0), vget_high_u32(t0)); // Pairwise add (0 + 1), (2 + 3)
return PxU32(vget_lane_u32(vpadd_u32(t1, t1), 0));
}
//////////////////////////////////
// MAT33V
//////////////////////////////////
PX_FORCE_INLINE Vec3V M33MulV3(const Mat33V& a, const Vec3V b)
{
const FloatV x = V3GetX(b);
const FloatV y = V3GetY(b);
const FloatV z = V3GetZ(b);
const Vec3V v0 = V3Scale(a.col0, x);
const Vec3V v1 = V3Scale(a.col1, y);
const Vec3V v2 = V3Scale(a.col2, z);
const Vec3V v0PlusV1 = V3Add(v0, v1);
return V3Add(v0PlusV1, v2);
}
PX_FORCE_INLINE Vec3V M33TrnspsMulV3(const Mat33V& a, const Vec3V b)
{
const FloatV x = V3Dot(a.col0, b);
const FloatV y = V3Dot(a.col1, b);
const FloatV z = V3Dot(a.col2, b);
return V3Merge(x, y, z);
}
PX_FORCE_INLINE Vec3V M33MulV3AddV3(const Mat33V& A, const Vec3V b, const Vec3V c)
{
const FloatV x = V3GetX(b);
const FloatV y = V3GetY(b);
const FloatV z = V3GetZ(b);
Vec3V result = V3ScaleAdd(A.col0, x, c);
result = V3ScaleAdd(A.col1, y, result);
return V3ScaleAdd(A.col2, z, result);
}
PX_FORCE_INLINE Mat33V M33MulM33(const Mat33V& a, const Mat33V& b)
{
return Mat33V(M33MulV3(a, b.col0), M33MulV3(a, b.col1), M33MulV3(a, b.col2));
}
PX_FORCE_INLINE Mat33V M33Add(const Mat33V& a, const Mat33V& b)
{
return Mat33V(V3Add(a.col0, b.col0), V3Add(a.col1, b.col1), V3Add(a.col2, b.col2));
}
PX_FORCE_INLINE Mat33V M33Scale(const Mat33V& a, const FloatV& b)
{
return Mat33V(V3Scale(a.col0, b), V3Scale(a.col1, b), V3Scale(a.col2, b));
}
PX_FORCE_INLINE Mat33V M33Inverse(const Mat33V& a)
{
const float32x2_t zeros = vreinterpret_f32_u32(vmov_n_u32(0));
const BoolV btttf = BTTTF();
const Vec3V cross01 = V3Cross(a.col0, a.col1);
const Vec3V cross12 = V3Cross(a.col1, a.col2);
const Vec3V cross20 = V3Cross(a.col2, a.col0);
const FloatV dot = V3Dot(cross01, a.col2);
const FloatV invDet = FRecipFast(dot);
const float32x4x2_t merge = vzipq_f32(cross12, cross01);
const float32x4_t mergeh = merge.val[0];
const float32x4_t mergel = merge.val[1];
// const Vec3V colInv0 = XMVectorPermute(mergeh,cross20,PxPermuteControl(0,4,1,7));
const float32x4_t colInv0_xxyy = vzipq_f32(mergeh, cross20).val[0];
const float32x4_t colInv0 = vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(colInv0_xxyy), btttf));
// const Vec3V colInv1 = XMVectorPermute(mergeh,cross20,PxPermuteControl(2,5,3,7));
const float32x2_t zw0 = vget_high_f32(mergeh);
const float32x2_t xy1 = vget_low_f32(cross20);
const float32x2_t yzero1 = vext_f32(xy1, zeros, 1);
const float32x2x2_t merge1 = vzip_f32(zw0, yzero1);
const float32x4_t colInv1 = vcombine_f32(merge1.val[0], merge1.val[1]);
// const Vec3V colInv2 = XMVectorPermute(mergel,cross20,PxPermuteControl(0,6,1,7));
const float32x2_t x0y0 = vget_low_f32(mergel);
const float32x2_t z1w1 = vget_high_f32(cross20);
const float32x2x2_t merge2 = vzip_f32(x0y0, z1w1);
const float32x4_t colInv2 = vcombine_f32(merge2.val[0], merge2.val[1]);
return Mat33V(vmulq_lane_f32(colInv0, invDet, 0), vmulq_lane_f32(colInv1, invDet, 0),
vmulq_lane_f32(colInv2, invDet, 0));
}
PX_FORCE_INLINE Mat33V M33Trnsps(const Mat33V& a)
{
return Mat33V(V3Merge(V3GetX(a.col0), V3GetX(a.col1), V3GetX(a.col2)),
V3Merge(V3GetY(a.col0), V3GetY(a.col1), V3GetY(a.col2)),
V3Merge(V3GetZ(a.col0), V3GetZ(a.col1), V3GetZ(a.col2)));
}
PX_FORCE_INLINE Mat33V M33Identity()
{
return Mat33V(V3UnitX(), V3UnitY(), V3UnitZ());
}
PX_FORCE_INLINE Mat33V M33Sub(const Mat33V& a, const Mat33V& b)
{
return Mat33V(V3Sub(a.col0, b.col0), V3Sub(a.col1, b.col1), V3Sub(a.col2, b.col2));
}
PX_FORCE_INLINE Mat33V M33Neg(const Mat33V& a)
{
return Mat33V(V3Neg(a.col0), V3Neg(a.col1), V3Neg(a.col2));
}
PX_FORCE_INLINE Mat33V M33Abs(const Mat33V& a)
{
return Mat33V(V3Abs(a.col0), V3Abs(a.col1), V3Abs(a.col2));
}
PX_FORCE_INLINE Mat33V PromoteVec3V(const Vec3V v)
{
const BoolV bTFFF = BTFFF();
const BoolV bFTFF = BFTFF();
const BoolV bFFTF = BTFTF();
const Vec3V zero = V3Zero();
return Mat33V(V3Sel(bTFFF, v, zero), V3Sel(bFTFF, v, zero), V3Sel(bFFTF, v, zero));
}
PX_FORCE_INLINE Mat33V M33Diagonal(const Vec3VArg d)
{
const Vec3V x = V3Mul(V3UnitX(), d);
const Vec3V y = V3Mul(V3UnitY(), d);
const Vec3V z = V3Mul(V3UnitZ(), d);
return Mat33V(x, y, z);
}
//////////////////////////////////
// MAT34V
//////////////////////////////////
PX_FORCE_INLINE Vec3V M34MulV3(const Mat34V& a, const Vec3V b)
{
const FloatV x = V3GetX(b);
const FloatV y = V3GetY(b);
const FloatV z = V3GetZ(b);
const Vec3V v0 = V3Scale(a.col0, x);
const Vec3V v1 = V3Scale(a.col1, y);
const Vec3V v2 = V3Scale(a.col2, z);
const Vec3V v0PlusV1 = V3Add(v0, v1);
const Vec3V v0PlusV1Plusv2 = V3Add(v0PlusV1, v2);
return V3Add(v0PlusV1Plusv2, a.col3);
}
PX_FORCE_INLINE Vec3V M34Mul33V3(const Mat34V& a, const Vec3V b)
{
const FloatV x = V3GetX(b);
const FloatV y = V3GetY(b);
const FloatV z = V3GetZ(b);
const Vec3V v0 = V3Scale(a.col0, x);
const Vec3V v1 = V3Scale(a.col1, y);
const Vec3V v2 = V3Scale(a.col2, z);
const Vec3V v0PlusV1 = V3Add(v0, v1);
return V3Add(v0PlusV1, v2);
}
PX_FORCE_INLINE Vec3V M34TrnspsMul33V3(const Mat34V& a, const Vec3V b)
{
const FloatV x = V3Dot(a.col0, b);
const FloatV y = V3Dot(a.col1, b);
const FloatV z = V3Dot(a.col2, b);
return V3Merge(x, y, z);
}
PX_FORCE_INLINE Mat34V M34MulM34(const Mat34V& a, const Mat34V& b)
{
return Mat34V(M34Mul33V3(a, b.col0), M34Mul33V3(a, b.col1), M34Mul33V3(a, b.col2), M34MulV3(a, b.col3));
}
PX_FORCE_INLINE Mat33V M34MulM33(const Mat34V& a, const Mat33V& b)
{
return Mat33V(M34Mul33V3(a, b.col0), M34Mul33V3(a, b.col1), M34Mul33V3(a, b.col2));
}
PX_FORCE_INLINE Mat33V M34Mul33MM34(const Mat34V& a, const Mat34V& b)
{
return Mat33V(M34Mul33V3(a, b.col0), M34Mul33V3(a, b.col1), M34Mul33V3(a, b.col2));
}
PX_FORCE_INLINE Mat34V M34Add(const Mat34V& a, const Mat34V& b)
{
return Mat34V(V3Add(a.col0, b.col0), V3Add(a.col1, b.col1), V3Add(a.col2, b.col2), V3Add(a.col3, b.col3));
}
PX_FORCE_INLINE Mat33V M34Trnsps33(const Mat34V& a)
{
return Mat33V(V3Merge(V3GetX(a.col0), V3GetX(a.col1), V3GetX(a.col2)),
V3Merge(V3GetY(a.col0), V3GetY(a.col1), V3GetY(a.col2)),
V3Merge(V3GetZ(a.col0), V3GetZ(a.col1), V3GetZ(a.col2)));
}
//////////////////////////////////
// MAT44V
//////////////////////////////////
PX_FORCE_INLINE Vec4V M44MulV4(const Mat44V& a, const Vec4V b)
{
const FloatV x = V4GetX(b);
const FloatV y = V4GetY(b);
const FloatV z = V4GetZ(b);
const FloatV w = V4GetW(b);
const Vec4V v0 = V4Scale(a.col0, x);
const Vec4V v1 = V4Scale(a.col1, y);
const Vec4V v2 = V4Scale(a.col2, z);
const Vec4V v3 = V4Scale(a.col3, w);
const Vec4V v0PlusV1 = V4Add(v0, v1);
const Vec4V v0PlusV1Plusv2 = V4Add(v0PlusV1, v2);
return V4Add(v0PlusV1Plusv2, v3);
}
PX_FORCE_INLINE Vec4V M44TrnspsMulV4(const Mat44V& a, const Vec4V b)
{
return V4Merge(V4Dot(a.col0, b), V4Dot(a.col1, b), V4Dot(a.col2, b), V4Dot(a.col3, b));
}
PX_FORCE_INLINE Mat44V M44MulM44(const Mat44V& a, const Mat44V& b)
{
return Mat44V(M44MulV4(a, b.col0), M44MulV4(a, b.col1), M44MulV4(a, b.col2), M44MulV4(a, b.col3));
}
PX_FORCE_INLINE Mat44V M44Add(const Mat44V& a, const Mat44V& b)
{
return Mat44V(V4Add(a.col0, b.col0), V4Add(a.col1, b.col1), V4Add(a.col2, b.col2), V4Add(a.col3, b.col3));
}
PX_FORCE_INLINE Mat44V M44Trnsps(const Mat44V& a)
{
// asm volatile(
// "vzip.f32 %q0, %q2 \n\t"
// "vzip.f32 %q1, %q3 \n\t"
// "vzip.f32 %q0, %q1 \n\t"
// "vzip.f32 %q2, %q3 \n\t"
// : "+w" (a.col0), "+w" (a.col1), "+w" (a.col2), "+w" a.col3));
const float32x4x2_t v0v1 = vzipq_f32(a.col0, a.col2);
const float32x4x2_t v2v3 = vzipq_f32(a.col1, a.col3);
const float32x4x2_t zip0 = vzipq_f32(v0v1.val[0], v2v3.val[0]);
const float32x4x2_t zip1 = vzipq_f32(v0v1.val[1], v2v3.val[1]);
return Mat44V(zip0.val[0], zip0.val[1], zip1.val[0], zip1.val[1]);
}
PX_FORCE_INLINE Mat44V M44Inverse(const Mat44V& a)
{
float32x4_t minor0, minor1, minor2, minor3;
float32x4_t row0, row1, row2, row3;
float32x4_t det, tmp1;
tmp1 = vmovq_n_f32(0.0f);
row1 = vmovq_n_f32(0.0f);
row3 = vmovq_n_f32(0.0f);
row0 = a.col0;
row1 = vextq_f32(a.col1, a.col1, 2);
row2 = a.col2;
row3 = vextq_f32(a.col3, a.col3, 2);
tmp1 = vmulq_f32(row2, row3);
tmp1 = vrev64q_f32(tmp1);
minor0 = vmulq_f32(row1, tmp1);
minor1 = vmulq_f32(row0, tmp1);
tmp1 = vextq_f32(tmp1, tmp1, 2);
minor0 = vsubq_f32(vmulq_f32(row1, tmp1), minor0);
minor1 = vsubq_f32(vmulq_f32(row0, tmp1), minor1);
minor1 = vextq_f32(minor1, minor1, 2);
tmp1 = vmulq_f32(row1, row2);
tmp1 = vrev64q_f32(tmp1);
minor0 = vaddq_f32(vmulq_f32(row3, tmp1), minor0);
minor3 = vmulq_f32(row0, tmp1);
tmp1 = vextq_f32(tmp1, tmp1, 2);
minor0 = vsubq_f32(minor0, vmulq_f32(row3, tmp1));
minor3 = vsubq_f32(vmulq_f32(row0, tmp1), minor3);
minor3 = vextq_f32(minor3, minor3, 2);
tmp1 = vmulq_f32(vextq_f32(row1, row1, 2), row3);
tmp1 = vrev64q_f32(tmp1);
row2 = vextq_f32(row2, row2, 2);
minor0 = vaddq_f32(vmulq_f32(row2, tmp1), minor0);
minor2 = vmulq_f32(row0, tmp1);
tmp1 = vextq_f32(tmp1, tmp1, 2);
minor0 = vsubq_f32(minor0, vmulq_f32(row2, tmp1));
minor2 = vsubq_f32(vmulq_f32(row0, tmp1), minor2);
minor2 = vextq_f32(minor2, minor2, 2);
tmp1 = vmulq_f32(row0, row1);
tmp1 = vrev64q_f32(tmp1);
minor2 = vaddq_f32(vmulq_f32(row3, tmp1), minor2);
minor3 = vsubq_f32(vmulq_f32(row2, tmp1), minor3);
tmp1 = vextq_f32(tmp1, tmp1, 2);
minor2 = vsubq_f32(vmulq_f32(row3, tmp1), minor2);
minor3 = vsubq_f32(minor3, vmulq_f32(row2, tmp1));
tmp1 = vmulq_f32(row0, row3);
tmp1 = vrev64q_f32(tmp1);
minor1 = vsubq_f32(minor1, vmulq_f32(row2, tmp1));
minor2 = vaddq_f32(vmulq_f32(row1, tmp1), minor2);
tmp1 = vextq_f32(tmp1, tmp1, 2);
minor1 = vaddq_f32(vmulq_f32(row2, tmp1), minor1);
minor2 = vsubq_f32(minor2, vmulq_f32(row1, tmp1));
tmp1 = vmulq_f32(row0, row2);
tmp1 = vrev64q_f32(tmp1);
minor1 = vaddq_f32(vmulq_f32(row3, tmp1), minor1);
minor3 = vsubq_f32(minor3, vmulq_f32(row1, tmp1));
tmp1 = vextq_f32(tmp1, tmp1, 2);
minor1 = vsubq_f32(minor1, vmulq_f32(row3, tmp1));
minor3 = vaddq_f32(vmulq_f32(row1, tmp1), minor3);
det = vmulq_f32(row0, minor0);
det = vaddq_f32(vextq_f32(det, det, 2), det);
det = vaddq_f32(vrev64q_f32(det), det);
det = vdupq_lane_f32(VRECIPE(vget_low_f32(det)), 0);
minor0 = vmulq_f32(det, minor0);
minor1 = vmulq_f32(det, minor1);
minor2 = vmulq_f32(det, minor2);
minor3 = vmulq_f32(det, minor3);
Mat44V invTrans(minor0, minor1, minor2, minor3);
return M44Trnsps(invTrans);
}
PX_FORCE_INLINE Vec4V V4LoadXYZW(const PxF32& x, const PxF32& y, const PxF32& z, const PxF32& w)
{
const float32x4_t ret = { x, y, z, w };
return ret;
}
/*
PX_FORCE_INLINE VecU16V V4U32PK(VecU32V a, VecU32V b)
{
return vcombine_u16(vqmovn_u32(a), vqmovn_u32(b));
}
*/
PX_FORCE_INLINE VecU32V V4U32Sel(const BoolV c, const VecU32V a, const VecU32V b)
{
return vbslq_u32(c, a, b);
}
PX_FORCE_INLINE VecU32V V4U32or(VecU32V a, VecU32V b)
{
return vorrq_u32(a, b);
}
PX_FORCE_INLINE VecU32V V4U32xor(VecU32V a, VecU32V b)
{
return veorq_u32(a, b);
}
PX_FORCE_INLINE VecU32V V4U32and(VecU32V a, VecU32V b)
{
return vandq_u32(a, b);
}
PX_FORCE_INLINE VecU32V V4U32Andc(VecU32V a, VecU32V b)
{
// return vbicq_u32(a, b); // creates gcc compiler bug in RTreeQueries.cpp
return vandq_u32(a, vmvnq_u32(b));
}
/*
PX_FORCE_INLINE VecU16V V4U16Or(VecU16V a, VecU16V b)
{
return vorrq_u16(a, b);
}
*/
/*
PX_FORCE_INLINE VecU16V V4U16And(VecU16V a, VecU16V b)
{
return vandq_u16(a, b);
}
*/
/*
PX_FORCE_INLINE VecU16V V4U16Andc(VecU16V a, VecU16V b)
{
return vbicq_u16(a, b);
}
*/
PX_FORCE_INLINE VecI32V I4LoadXYZW(const PxI32& x, const PxI32& y, const PxI32& z, const PxI32& w)
{
const int32x4_t ret = { x, y, z, w };
return ret;
}
PX_FORCE_INLINE VecI32V I4Load(const PxI32 i)
{
return vdupq_n_s32(i);
}
PX_FORCE_INLINE VecI32V I4LoadU(const PxI32* i)
{
return vld1q_s32(i);
}
PX_FORCE_INLINE VecI32V I4LoadA(const PxI32* i)
{
return vld1q_s32(i);
}
PX_FORCE_INLINE VecI32V VecI32V_Add(const VecI32VArg a, const VecI32VArg b)
{
return vaddq_s32(a, b);
}
PX_FORCE_INLINE VecI32V VecI32V_Sub(const VecI32VArg a, const VecI32VArg b)
{
return vsubq_s32(a, b);
}
PX_FORCE_INLINE BoolV VecI32V_IsGrtr(const VecI32VArg a, const VecI32VArg b)
{
return vcgtq_s32(a, b);
}
PX_FORCE_INLINE BoolV VecI32V_IsEq(const VecI32VArg a, const VecI32VArg b)
{
return vceqq_s32(a, b);
}
PX_FORCE_INLINE VecI32V V4I32Sel(const BoolV c, const VecI32V a, const VecI32V b)
{
return vbslq_s32(c, a, b);
}
PX_FORCE_INLINE VecI32V VecI32V_Zero()
{
return vdupq_n_s32(0);
}
PX_FORCE_INLINE VecI32V VecI32V_One()
{
return vdupq_n_s32(1);
}
PX_FORCE_INLINE VecI32V VecI32V_Two()
{
return vdupq_n_s32(2);
}
PX_FORCE_INLINE VecI32V VecI32V_MinusOne()
{
return vdupq_n_s32(-1);
}
PX_FORCE_INLINE VecU32V U4Zero()
{
return U4Load(0);
}
PX_FORCE_INLINE VecU32V U4One()
{
return U4Load(1);
}
PX_FORCE_INLINE VecU32V U4Two()
{
return U4Load(2);
}
PX_FORCE_INLINE VecShiftV VecI32V_PrepareShift(const VecI32VArg shift)
{
return shift;
}
PX_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const VecShiftVArg count)
{
return vshlq_s32(a, count);
}
PX_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const VecShiftVArg count)
{
return vshlq_s32(a, VecI32V_Sub(I4Load(0), count));
}
PX_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const PxU32 count)
{
const int32x4_t shiftCount = { (PxI32)count, (PxI32)count, (PxI32)count, (PxI32)count };
return vshlq_s32(a, shiftCount);
}
PX_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const PxU32 count)
{
const int32x4_t shiftCount = { -(PxI32)count, -(PxI32)count, -(PxI32)count, -(PxI32)count };
return vshlq_s32(a, shiftCount);
}
PX_FORCE_INLINE VecI32V VecI32V_And(const VecI32VArg a, const VecI32VArg b)
{
return vandq_s32(a, b);
}
PX_FORCE_INLINE VecI32V VecI32V_Or(const VecI32VArg a, const VecI32VArg b)
{
return vorrq_s32(a, b);
}
PX_FORCE_INLINE VecI32V VecI32V_GetX(const VecI32VArg f)
{
const int32x2_t fLow = vget_low_s32(f);
return vdupq_lane_s32(fLow, 0);
}
PX_FORCE_INLINE VecI32V VecI32V_GetY(const VecI32VArg f)
{
const int32x2_t fLow = vget_low_s32(f);
return vdupq_lane_s32(fLow, 1);
}
PX_FORCE_INLINE VecI32V VecI32V_GetZ(const VecI32VArg f)
{
const int32x2_t fHigh = vget_high_s32(f);
return vdupq_lane_s32(fHigh, 0);
}
PX_FORCE_INLINE VecI32V VecI32V_GetW(const VecI32VArg f)
{
const int32x2_t fHigh = vget_high_s32(f);
return vdupq_lane_s32(fHigh, 1);
}
PX_FORCE_INLINE VecI32V VecI32V_Sel(const BoolV c, const VecI32VArg a, const VecI32VArg b)
{
return vbslq_s32(c, a, b);
}
PX_FORCE_INLINE void PxI32_From_VecI32V(const VecI32VArg a, PxI32* i)
{
*i = vgetq_lane_s32(a, 0);
}
PX_FORCE_INLINE VecI32V VecI32V_Merge(const VecI32VArg a, const VecI32VArg b, const VecI32VArg c, const VecI32VArg d)
{
const int32x2_t aLow = vget_low_s32(a);
const int32x2_t bLow = vget_low_s32(b);
const int32x2_t cLow = vget_low_s32(c);
const int32x2_t dLow = vget_low_s32(d);
const int32x2_t low = vext_s32(aLow, bLow, 1);
const int32x2_t high = vext_s32(cLow, dLow, 1);
return vcombine_s32(low, high);
}
PX_FORCE_INLINE VecI32V VecI32V_From_BoolV(const BoolVArg a)
{
return vreinterpretq_s32_u32(a);
}
PX_FORCE_INLINE VecU32V VecU32V_From_BoolV(const BoolVArg a)
{
return a;
}
/*
template<int a> PX_FORCE_INLINE VecI32V V4ISplat()
{
return vdupq_n_s32(a);
}
template<PxU32 a> PX_FORCE_INLINE VecU32V V4USplat()
{
return vdupq_n_u32(a);
}
*/
/*
PX_FORCE_INLINE void V4U16StoreAligned(VecU16V val, VecU16V* address)
{
vst1q_u16((uint16_t*)address, val);
}
*/
PX_FORCE_INLINE void V4U32StoreAligned(VecU32V val, VecU32V* address)
{
vst1q_u32(reinterpret_cast<uint32_t*>(address), val);
}
PX_FORCE_INLINE Vec4V V4LoadAligned(Vec4V* addr)
{
return vld1q_f32(reinterpret_cast<float32_t*>(addr));
}
PX_FORCE_INLINE Vec4V V4LoadUnaligned(Vec4V* addr)
{
return vld1q_f32(reinterpret_cast<float32_t*>(addr));
}
PX_FORCE_INLINE Vec4V V4Andc(const Vec4V a, const VecU32V b)
{
return vreinterpretq_f32_u32(V4U32Andc(vreinterpretq_u32_f32(a), b));
}
PX_FORCE_INLINE VecU32V V4IsGrtrV32u(const Vec4V a, const Vec4V b)
{
return V4IsGrtr(a, b);
}
PX_FORCE_INLINE VecU16V V4U16LoadAligned(VecU16V* addr)
{
return vld1q_u16(reinterpret_cast<uint16_t*>(addr));
}
PX_FORCE_INLINE VecU16V V4U16LoadUnaligned(VecU16V* addr)
{
return vld1q_u16(reinterpret_cast<uint16_t*>(addr));
}
PX_FORCE_INLINE VecU16V V4U16CompareGt(VecU16V a, VecU16V b)
{
return vcgtq_u16(a, b);
}
PX_FORCE_INLINE VecU16V V4I16CompareGt(VecI16V a, VecI16V b)
{
return vcgtq_s16(a, b);
}
PX_FORCE_INLINE Vec4V Vec4V_From_VecU32V(VecU32V a)
{
return vcvtq_f32_u32(a);
}
PX_FORCE_INLINE Vec4V Vec4V_From_VecI32V(VecI32V a)
{
return vcvtq_f32_s32(a);
}
PX_FORCE_INLINE VecI32V VecI32V_From_Vec4V(Vec4V a)
{
return vcvtq_s32_f32(a);
}
PX_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecU32V(VecU32V a)
{
return vreinterpretq_f32_u32(a);
}
PX_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecI32V(VecI32V a)
{
return vreinterpretq_f32_s32(a);
}
PX_FORCE_INLINE VecU32V VecU32V_ReinterpretFrom_Vec4V(Vec4V a)
{
return vreinterpretq_u32_f32(a);
}
PX_FORCE_INLINE VecI32V VecI32V_ReinterpretFrom_Vec4V(Vec4V a)
{
return vreinterpretq_s32_f32(a);
}
#if !PX_SWITCH
template <int index>
PX_FORCE_INLINE BoolV BSplatElement(BoolV a)
{
if(index < 2)
{
return vdupq_lane_u32(vget_low_u32(a), index);
}
else if(index == 2)
{
return vdupq_lane_u32(vget_high_u32(a), 0);
}
else if(index == 3)
{
return vdupq_lane_u32(vget_high_u32(a), 1);
}
}
#else
//workaround for template compile issue
template <int index> PX_FORCE_INLINE BoolV BSplatElement(BoolV a);
template<> PX_FORCE_INLINE BoolV BSplatElement<0>(BoolV a) { return vdupq_lane_u32(vget_low_u32(a), 0); }
template<> PX_FORCE_INLINE BoolV BSplatElement<1>(BoolV a) { return vdupq_lane_u32(vget_low_u32(a), 1); }
template<> PX_FORCE_INLINE BoolV BSplatElement<2>(BoolV a) { return vdupq_lane_u32(vget_high_u32(a), 0); }
template<> PX_FORCE_INLINE BoolV BSplatElement<3>(BoolV a) { return vdupq_lane_u32(vget_high_u32(a), 1); }
#endif
#if !PX_SWITCH
template <int index>
PX_FORCE_INLINE VecU32V V4U32SplatElement(VecU32V a)
{
if(index < 2)
{
return vdupq_lane_u32(vget_low_u32(a), index);
}
else if(index == 2)
{
return vdupq_lane_u32(vget_high_u32(a), 0);
}
else if(index == 3)
{
return vdupq_lane_u32(vget_high_u32(a), 1);
}
}
#else
//workaround for template compile issue
template <int index> PX_FORCE_INLINE VecU32V V4U32SplatElement(VecU32V a);
template <> PX_FORCE_INLINE VecU32V V4U32SplatElement<0>(VecU32V a) { return vdupq_lane_u32(vget_low_u32(a), 0); }
template <> PX_FORCE_INLINE VecU32V V4U32SplatElement<1>(VecU32V a) { return vdupq_lane_u32(vget_low_u32(a), 1); }
template <> PX_FORCE_INLINE VecU32V V4U32SplatElement<2>(VecU32V a) { return vdupq_lane_u32(vget_high_u32(a), 0); }
template <> PX_FORCE_INLINE VecU32V V4U32SplatElement<3>(VecU32V a) { return vdupq_lane_u32(vget_high_u32(a), 1); }
#endif
#if !PX_SWITCH
template <int index>
PX_FORCE_INLINE Vec4V V4SplatElement(Vec4V a)
{
if(index < 2)
{
return vdupq_lane_f32(vget_low_f32(a), index);
}
else if(index == 2)
{
return vdupq_lane_f32(vget_high_f32(a), 0);
}
else if(index == 3)
{
return vdupq_lane_f32(vget_high_f32(a), 1);
}
}
#else
//workaround for template compile issue
template <int index> PX_FORCE_INLINE Vec4V V4SplatElement(Vec4V a);
template <> PX_FORCE_INLINE Vec4V V4SplatElement<0>(Vec4V a) { return vdupq_lane_f32(vget_low_f32(a), 0); }
template <> PX_FORCE_INLINE Vec4V V4SplatElement<1>(Vec4V a) { return vdupq_lane_f32(vget_low_f32(a), 1); }
template <> PX_FORCE_INLINE Vec4V V4SplatElement<2>(Vec4V a) { return vdupq_lane_f32(vget_high_f32(a), 0); }
template <> PX_FORCE_INLINE Vec4V V4SplatElement<3>(Vec4V a) { return vdupq_lane_f32(vget_high_f32(a), 1); }
#endif
PX_FORCE_INLINE VecU32V U4LoadXYZW(PxU32 x, PxU32 y, PxU32 z, PxU32 w)
{
const uint32x4_t ret = { x, y, z, w };
return ret;
}
PX_FORCE_INLINE VecU32V U4Load(const PxU32 i)
{
return vdupq_n_u32(i);
}
PX_FORCE_INLINE VecU32V U4LoadU(const PxU32* i)
{
return vld1q_u32(i);
}
PX_FORCE_INLINE VecU32V U4LoadA(const PxU32* i)
{
return vld1q_u32(i);
}
PX_FORCE_INLINE Vec4V V4Ceil(const Vec4V in)
{
const float32x4_t ones = vdupq_n_f32(1.0f);
const float32x4_t rdToZero = vcvtq_f32_s32(vcvtq_s32_f32(in));
const float32x4_t rdToZeroPlusOne = vaddq_f32(rdToZero, ones);
const uint32x4_t gt = vcgtq_f32(in, rdToZero);
return vbslq_f32(gt, rdToZeroPlusOne, rdToZero);
}
PX_FORCE_INLINE Vec4V V4Floor(const Vec4V in)
{
const float32x4_t ones = vdupq_n_f32(1.0f);
const float32x4_t rdToZero = vcvtq_f32_s32(vcvtq_s32_f32(in));
const float32x4_t rdToZeroMinusOne = vsubq_f32(rdToZero, ones);
const uint32x4_t lt = vcltq_f32(in, rdToZero);
return vbslq_f32(lt, rdToZeroMinusOne, rdToZero);
}
PX_FORCE_INLINE VecU32V V4ConvertToU32VSaturate(const Vec4V in, PxU32 power)
{
PX_ASSERT(power == 0 && "Non-zero power not supported in convertToU32VSaturate");
PX_UNUSED(power); // prevent warning in release builds
return vcvtq_u32_f32(in);
}
PX_FORCE_INLINE void QuatGetMat33V(const QuatVArg q, Vec3V& column0, Vec3V& column1, Vec3V& column2)
{
const FloatV one = FOne();
const FloatV x = V4GetX(q);
const FloatV y = V4GetY(q);
const FloatV z = V4GetZ(q);
const FloatV w = V4GetW(q);
const FloatV x2 = FAdd(x, x);
const FloatV y2 = FAdd(y, y);
const FloatV z2 = FAdd(z, z);
const FloatV xx = FMul(x2, x);
const FloatV yy = FMul(y2, y);
const FloatV zz = FMul(z2, z);
const FloatV xy = FMul(x2, y);
const FloatV xz = FMul(x2, z);
const FloatV xw = FMul(x2, w);
const FloatV yz = FMul(y2, z);
const FloatV yw = FMul(y2, w);
const FloatV zw = FMul(z2, w);
const FloatV v = FSub(one, xx);
column0 = V3Merge(FSub(FSub(one, yy), zz), FAdd(xy, zw), FSub(xz, yw));
column1 = V3Merge(FSub(xy, zw), FSub(v, zz), FAdd(yz, xw));
column2 = V3Merge(FAdd(xz, yw), FSub(yz, xw), FSub(v, yy));
}
} // namespace aos
} // namespace physx
#endif // PXFOUNDATION_PXUNIXNEONINLINEAOS_H
| 99,340 | C | 26.313995 | 117 | 0.693849 |
NVIDIA-Omniverse/PhysX/physx/include/foundation/unix/neon/PxUnixNeonAoS.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXFOUNDATION_PXUNIXNEONAOS_H
#define PXFOUNDATION_PXUNIXNEONAOS_H
// no includes here! this file should be included from PxcVecMath.h only!!!
#if !COMPILE_VECTOR_INTRINSICS
#error Vector intrinsics should not be included when using scalar implementation.
#endif
// only ARM NEON compatible platforms should reach this
#include <arm_neon.h>
namespace physx
{
namespace aos
{
typedef float32x2_t FloatV;
typedef float32x4_t Vec3V;
typedef float32x4_t Vec4V;
typedef uint32x4_t BoolV;
typedef float32x4_t QuatV;
typedef uint32x4_t VecU32V;
typedef int32x4_t VecI32V;
typedef uint16x8_t VecU16V;
typedef int16x8_t VecI16V;
typedef uint8x16_t VecU8V;
#define FloatVArg FloatV &
#define Vec3VArg Vec3V &
#define Vec4VArg Vec4V &
#define BoolVArg BoolV &
#define VecU32VArg VecU32V &
#define VecI32VArg VecI32V &
#define VecU16VArg VecU16V &
#define VecI16VArg VecI16V &
#define VecU8VArg VecU8V &
#define QuatVArg QuatV &
// KS - TODO - make an actual VecCrossV type for NEON
#define VecCrossV Vec3V
typedef VecI32V VecShiftV;
#define VecShiftVArg VecShiftV &
PX_ALIGN_PREFIX(16)
struct Mat33V
{
Mat33V()
{
}
Mat33V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2) : col0(c0), col1(c1), col2(c2)
{
}
Vec3V PX_ALIGN(16, col0);
Vec3V PX_ALIGN(16, col1);
Vec3V PX_ALIGN(16, col2);
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Mat34V
{
Mat34V()
{
}
Mat34V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2, const Vec3V& c3) : col0(c0), col1(c1), col2(c2), col3(c3)
{
}
Vec3V PX_ALIGN(16, col0);
Vec3V PX_ALIGN(16, col1);
Vec3V PX_ALIGN(16, col2);
Vec3V PX_ALIGN(16, col3);
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Mat43V
{
Mat43V()
{
}
Mat43V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2) : col0(c0), col1(c1), col2(c2)
{
}
Vec4V PX_ALIGN(16, col0);
Vec4V PX_ALIGN(16, col1);
Vec4V PX_ALIGN(16, col2);
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Mat44V
{
Mat44V()
{
}
Mat44V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2, const Vec4V& c3) : col0(c0), col1(c1), col2(c2), col3(c3)
{
}
Vec4V PX_ALIGN(16, col0);
Vec4V PX_ALIGN(16, col1);
Vec4V PX_ALIGN(16, col2);
Vec4V PX_ALIGN(16, col3);
} PX_ALIGN_SUFFIX(16);
} // namespace aos
} // namespace physx
#endif // PXFOUNDATION_PXUNIXNEONAOS_H
| 3,968 | C | 27.970803 | 116 | 0.736895 |
NVIDIA-Omniverse/PhysX/physx/include/foundation/windows/PxWindowsTrigConstants.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_WINDOWS_TRIG_CONSTANTS_H
#define PX_WINDOWS_TRIG_CONSTANTS_H
namespace physx
{
namespace aos
{
#define PX_GLOBALCONST extern const __declspec(selectany)
__declspec(align(16)) struct PX_VECTORF32
{
float f[4];
};
PX_GLOBALCONST PX_VECTORF32 g_PXSinCoefficients0 = { { 1.0f, -0.166666667f, 8.333333333e-3f, -1.984126984e-4f } };
PX_GLOBALCONST PX_VECTORF32
g_PXSinCoefficients1 = { { 2.755731922e-6f, -2.505210839e-8f, 1.605904384e-10f, -7.647163732e-13f } };
PX_GLOBALCONST PX_VECTORF32
g_PXSinCoefficients2 = { { 2.811457254e-15f, -8.220635247e-18f, 1.957294106e-20f, -3.868170171e-23f } };
PX_GLOBALCONST PX_VECTORF32 g_PXCosCoefficients0 = { { 1.0f, -0.5f, 4.166666667e-2f, -1.388888889e-3f } };
PX_GLOBALCONST PX_VECTORF32
g_PXCosCoefficients1 = { { 2.480158730e-5f, -2.755731922e-7f, 2.087675699e-9f, -1.147074560e-11f } };
PX_GLOBALCONST PX_VECTORF32
g_PXCosCoefficients2 = { { 4.779477332e-14f, -1.561920697e-16f, 4.110317623e-19f, -8.896791392e-22f } };
PX_GLOBALCONST PX_VECTORF32 g_PXReciprocalTwoPi = { { PxInvTwoPi, PxInvTwoPi, PxInvTwoPi, PxInvTwoPi } };
PX_GLOBALCONST PX_VECTORF32 g_PXTwoPi = { { PxTwoPi, PxTwoPi, PxTwoPi, PxTwoPi } };
} // namespace aos
} // namespace physx
#endif
| 2,912 | C | 46.754098 | 114 | 0.753091 |
NVIDIA-Omniverse/PhysX/physx/include/foundation/windows/PxWindowsIntrinsics.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_WINDOWS_INTRINSICS_H
#define PX_WINDOWS_INTRINSICS_H
#include "foundation/PxAssert.h"
// this file is for internal intrinsics - that is, intrinsics that are used in
// cross platform code but do not appear in the API
#if !PX_WINDOWS_FAMILY
#error "This file should only be included by Windows builds!!"
#endif
#pragma warning(push)
//'symbol' is not defined as a preprocessor macro, replacing with '0' for 'directives'
#pragma warning(disable : 4668)
#if PX_VC == 10
#pragma warning(disable : 4987) // nonstandard extension used: 'throw (...)'
#endif
#include <intrin.h>
#pragma warning(pop)
#pragma warning(push)
#pragma warning(disable : 4985) // 'symbol name': attributes not present on previous declaration
#include <math.h>
#pragma warning(pop)
#include <float.h>
// do not include for ARM target
#if !PX_ARM && !PX_A64
#include <mmintrin.h>
#endif
#pragma intrinsic(_BitScanForward)
#pragma intrinsic(_BitScanReverse)
#if !PX_DOXYGEN
namespace physx
{
#endif
/*
* Implements a memory barrier
*/
PX_FORCE_INLINE void PxMemoryBarrier()
{
_ReadWriteBarrier();
/* long Barrier;
__asm {
xchg Barrier, eax
}*/
}
/*!
Returns the index of the highest set bit. Not valid for zero arg.
*/
PX_FORCE_INLINE uint32_t PxHighestSetBitUnsafe(uint32_t v)
{
unsigned long retval;
_BitScanReverse(&retval, v);
return retval;
}
/*!
Returns the index of the highest set bit. Undefined for zero arg.
*/
PX_FORCE_INLINE uint32_t PxLowestSetBitUnsafe(uint32_t v)
{
unsigned long retval;
_BitScanForward(&retval, v);
return retval;
}
/*!
Returns the number of leading zeros in v. Returns 32 for v=0.
*/
PX_FORCE_INLINE uint32_t PxCountLeadingZeros(uint32_t v)
{
if(v)
{
unsigned long bsr = (unsigned long)-1;
_BitScanReverse(&bsr, v);
return 31 - bsr;
}
else
return 32;
}
/*!
Prefetch aligned cache size around \c ptr+offset.
*/
#if !PX_ARM && !PX_A64
PX_FORCE_INLINE void PxPrefetchLine(const void* ptr, uint32_t offset = 0)
{
// cache line on X86/X64 is 64-bytes so a 128-byte prefetch would require 2 prefetches.
// However, we can only dispatch a limited number of prefetch instructions so we opt to prefetch just 1 cache line
/*_mm_prefetch(((const char*)ptr + offset), _MM_HINT_T0);*/
// We get slightly better performance prefetching to non-temporal addresses instead of all cache levels
_mm_prefetch(((const char*)ptr + offset), _MM_HINT_NTA);
}
#else
PX_FORCE_INLINE void PxPrefetchLine(const void* ptr, uint32_t offset = 0)
{
// arm does have 32b cache line size
__prefetch(((const char*)ptr + offset));
}
#endif
/*!
Prefetch \c count bytes starting at \c ptr.
*/
#if !PX_ARM
PX_FORCE_INLINE void PxPrefetch(const void* ptr, uint32_t count = 1)
{
const char* cp = (char*)ptr;
uint64_t p = size_t(ptr);
uint64_t startLine = p >> 6, endLine = (p + count - 1) >> 6;
uint64_t lines = endLine - startLine + 1;
do
{
PxPrefetchLine(cp);
cp += 64;
} while(--lines);
}
#else
PX_FORCE_INLINE void PxPrefetch(const void* ptr, uint32_t count = 1)
{
const char* cp = (char*)ptr;
uint32_t p = size_t(ptr);
uint32_t startLine = p >> 5, endLine = (p + count - 1) >> 5;
uint32_t lines = endLine - startLine + 1;
do
{
PxPrefetchLine(cp);
cp += 32;
} while(--lines);
}
#endif
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif
| 4,979 | C | 27.786127 | 115 | 0.720024 |
NVIDIA-Omniverse/PhysX/physx/include/foundation/windows/PxWindowsFPU.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_WINDOWS_FPU_H
#define PX_WINDOWS_FPU_H
PX_INLINE physx::PxSIMDGuard::PxSIMDGuard(bool enable) : mEnabled(enable)
{
#if !PX_ARM && !PX_A64
if (enable)
{
mControlWord = _mm_getcsr();
// set default (disable exceptions: _MM_MASK_MASK) and FTZ (_MM_FLUSH_ZERO_ON), DAZ (_MM_DENORMALS_ZERO_ON: (1<<6))
_mm_setcsr(_MM_MASK_MASK | _MM_FLUSH_ZERO_ON | (1 << 6));
}
else
{
PX_ASSERT(_mm_getcsr() & _MM_FLUSH_ZERO_ON);
PX_ASSERT(_mm_getcsr() & (1 << 6));
PX_ASSERT(_mm_getcsr() & _MM_MASK_MASK);
}
#endif
}
PX_INLINE physx::PxSIMDGuard::~PxSIMDGuard()
{
#if !PX_ARM && !PX_A64
if (mEnabled)
{
// restore control word and clear any exception flags
// (setting exception state flags cause exceptions on the first following fp operation)
_mm_setcsr(mControlWord & ~_MM_EXCEPT_MASK);
}
#endif
}
#endif
| 2,526 | C | 38.484374 | 117 | 0.730008 |
NVIDIA-Omniverse/PhysX/physx/include/foundation/windows/PxWindowsInlineAoS.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_WINDOWS_INLINE_AOS_H
#define PX_WINDOWS_INLINE_AOS_H
#if !COMPILE_VECTOR_INTRINSICS
#error Vector intrinsics should not be included when using scalar implementation.
#endif
#include "../PxVecMathSSE.h"
namespace physx
{
namespace aos
{
//////////////////////////////////////////////////////////////////////
//Test that Vec3V and FloatV are legal
//////////////////////////////////////////////////////////////////////
#define FLOAT_COMPONENTS_EQUAL_THRESHOLD 0.01f
PX_FORCE_INLINE bool isValidFloatV(const FloatV a)
{
const PxF32 x = V4ReadX(a);
const PxF32 y = V4ReadY(a);
const PxF32 z = V4ReadZ(a);
const PxF32 w = V4ReadW(a);
return (!(x != y || x != z || x != w));
/*if (
(PxAbs(x - y) < FLOAT_COMPONENTS_EQUAL_THRESHOLD) &&
(PxAbs(x - z) < FLOAT_COMPONENTS_EQUAL_THRESHOLD) &&
(PxAbs(x - w) < FLOAT_COMPONENTS_EQUAL_THRESHOLD)
)
{
return true;
}
if (
(PxAbs((x - y) / x) < FLOAT_COMPONENTS_EQUAL_THRESHOLD) &&
(PxAbs((x - z) / x) < FLOAT_COMPONENTS_EQUAL_THRESHOLD) &&
(PxAbs((x - w) / x) < FLOAT_COMPONENTS_EQUAL_THRESHOLD)
)
{
return true;
}
return false;*/
}
PX_FORCE_INLINE bool isValidVec3V(const Vec3V a)
{
//using _mm_comieq_ss to do the comparison doesn't work for NaN.
PX_ALIGN(16, PxF32 f[4]);
V4StoreA((const Vec4V&)a, f);
return f[3] == 0.0f;
}
PX_FORCE_INLINE bool isFiniteLength(const Vec3V a)
{
return !FAllEq(V4LengthSq(a), FZero());
}
PX_FORCE_INLINE bool isAligned16(void* a)
{
return(0 == ((size_t)a & 0x0f));
}
//ASSERT_FINITELENGTH is deactivated because there is a lot of code that calls a simd normalisation function with zero length but then ignores the result.
#if PX_DEBUG
#define ASSERT_ISVALIDVEC3V(a) PX_ASSERT(isValidVec3V(a))
#define ASSERT_ISVALIDFLOATV(a) PX_ASSERT(isValidFloatV(a))
#define ASSERT_ISALIGNED16(a) PX_ASSERT(isAligned16((void*)a))
#define ASSERT_ISFINITELENGTH(a) //PX_ASSERT(isFiniteLength(a))
#else
#define ASSERT_ISVALIDVEC3V(a)
#define ASSERT_ISVALIDFLOATV(a)
#define ASSERT_ISALIGNED16(a)
#define ASSERT_ISFINITELENGTH(a)
#endif
/////////////////////////////////////////////////////////////////////
////FUNCTIONS USED ONLY FOR ASSERTS IN VECTORISED IMPLEMENTATIONS
/////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////
// USED ONLY INTERNALLY
//////////////////////////////////////////////////////////////////////
namespace internalWindowsSimd
{
PX_FORCE_INLINE __m128 m128_I2F(__m128i n)
{
return _mm_castsi128_ps(n);
}
PX_FORCE_INLINE __m128i m128_F2I(__m128 n)
{
return _mm_castps_si128(n);
}
PX_FORCE_INLINE PxU32 BAllTrue4_R(const BoolV a)
{
const PxI32 moveMask = _mm_movemask_ps(a);
return PxU32(moveMask == 0xf);
}
PX_FORCE_INLINE PxU32 BAllTrue3_R(const BoolV a)
{
const PxI32 moveMask = _mm_movemask_ps(a);
return PxU32((moveMask & 0x7) == 0x7);
}
PX_FORCE_INLINE PxU32 BAnyTrue4_R(const BoolV a)
{
const PxI32 moveMask = _mm_movemask_ps(a);
return PxU32(moveMask != 0x0);
}
PX_FORCE_INLINE PxU32 BAnyTrue3_R(const BoolV a)
{
const PxI32 moveMask = _mm_movemask_ps(a);
return PxU32(((moveMask & 0x7) != 0x0));
}
PX_FORCE_INLINE PxU32 FiniteTestEq(const Vec4V a, const Vec4V b)
{
// This is a bit of a bodge.
//_mm_comieq_ss returns 1 if either value is nan so we need to re-cast a and b with true encoded as a non-nan
// number.
// There must be a better way of doing this in sse.
const BoolV one = FOne();
const BoolV zero = FZero();
const BoolV a1 = V4Sel(a, one, zero);
const BoolV b1 = V4Sel(b, one, zero);
return (PxU32(
_mm_comieq_ss(a1, b1) &&
_mm_comieq_ss(_mm_shuffle_ps(a1, a1, _MM_SHUFFLE(1, 1, 1, 1)), _mm_shuffle_ps(b1, b1, _MM_SHUFFLE(1, 1, 1, 1))) &&
_mm_comieq_ss(_mm_shuffle_ps(a1, a1, _MM_SHUFFLE(2, 2, 2, 2)), _mm_shuffle_ps(b1, b1, _MM_SHUFFLE(2, 2, 2, 2))) &&
_mm_comieq_ss(_mm_shuffle_ps(a1, a1, _MM_SHUFFLE(3, 3, 3, 3)), _mm_shuffle_ps(b1, b1, _MM_SHUFFLE(3, 3, 3, 3)))));
}
PX_FORCE_INLINE bool hasZeroElementinFloatV(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
return _mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 0, 0, 0)), FZero()) ? true : false;
}
PX_FORCE_INLINE bool hasZeroElementInVec3V(const Vec3V a)
{
return (_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 0, 0, 0)), FZero()) ||
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 1, 1, 1)), FZero()) ||
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 2, 2, 2)), FZero()));
}
PX_FORCE_INLINE bool hasZeroElementInVec4V(const Vec4V a)
{
return (_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 0, 0, 0)), FZero()) ||
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 1, 1, 1)), FZero()) ||
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 2, 2, 2)), FZero()) ||
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 3, 3, 3)), FZero()));
}
const PX_ALIGN(16, PxU32 gMaskXYZ[4]) = { 0xffffffff, 0xffffffff, 0xffffffff, 0 };
} //internalWindowsSimd
namespace vecMathTests
{
// PT: this function returns an invalid Vec3V (W!=0.0f) just for unit-testing 'isValidVec3V'
PX_FORCE_INLINE Vec3V getInvalidVec3V()
{
const float f = 1.0f;
return _mm_load1_ps(&f);
}
PX_FORCE_INLINE bool allElementsEqualFloatV(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return _mm_comieq_ss(a, b) != 0;
}
PX_FORCE_INLINE bool allElementsEqualVec3V(const Vec3V a, const Vec3V b)
{
return V3AllEq(a, b) != 0;
}
PX_FORCE_INLINE bool allElementsEqualVec4V(const Vec4V a, const Vec4V b)
{
return V4AllEq(a, b) != 0;
}
PX_FORCE_INLINE bool allElementsEqualBoolV(const BoolV a, const BoolV b)
{
return internalWindowsSimd::BAllTrue4_R(VecI32V_IsEq(a, b)) != 0;
}
PX_FORCE_INLINE bool allElementsEqualVecU32V(const VecU32V a, const VecU32V b)
{
return internalWindowsSimd::BAllTrue4_R(V4IsEqU32(a, b)) != 0;
}
PX_FORCE_INLINE bool allElementsEqualVecI32V(const VecI32V a, const VecI32V b)
{
BoolV c = internalWindowsSimd::m128_I2F(
_mm_cmpeq_epi32(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b)));
return internalWindowsSimd::BAllTrue4_R(c) != 0;
}
#define VECMATH_AOS_EPSILON (1e-3f)
static const FloatV minFError = FLoad(-VECMATH_AOS_EPSILON);
static const FloatV maxFError = FLoad(VECMATH_AOS_EPSILON);
static const Vec3V minV3Error = V3Load(-VECMATH_AOS_EPSILON);
static const Vec3V maxV3Error = V3Load(VECMATH_AOS_EPSILON);
static const Vec4V minV4Error = V4Load(-VECMATH_AOS_EPSILON);
static const Vec4V maxV4Error = V4Load(VECMATH_AOS_EPSILON);
PX_FORCE_INLINE bool allElementsNearEqualFloatV(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
const FloatV c = FSub(a, b);
return _mm_comigt_ss(c, minFError) && _mm_comilt_ss(c, maxFError);
}
PX_FORCE_INLINE bool allElementsNearEqualVec3V(const Vec3V a, const Vec3V b)
{
const Vec3V c = V3Sub(a, b);
return (_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0)), minV3Error) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0)), maxV3Error) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1)), minV3Error) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1)), maxV3Error) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2)), minV3Error) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2)), maxV3Error));
}
PX_FORCE_INLINE bool allElementsNearEqualVec4V(const Vec4V a, const Vec4V b)
{
const Vec4V c = V4Sub(a, b);
return (_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0)), minV4Error) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0)), maxV4Error) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1)), minV4Error) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1)), maxV4Error) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2)), minV4Error) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2)), maxV4Error) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 3, 3, 3)), minV4Error) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 3, 3, 3)), maxV4Error));
}
} //vecMathTests
PX_FORCE_INLINE bool isFiniteFloatV(const FloatV a)
{
PxF32 f;
FStore(a, &f);
return PxIsFinite(f);
/*
const PxU32 badNumber = (_FPCLASS_SNAN | _FPCLASS_QNAN | _FPCLASS_NINF | _FPCLASS_PINF);
const FloatV vBadNum = FloatV_From_F32((PxF32&)badNumber);
const BoolV vMask = BAnd(vBadNum, a);
return FiniteTestEq(vMask, BFFFF()) == 1;
*/
}
PX_FORCE_INLINE bool isFiniteVec3V(const Vec3V a)
{
PX_ALIGN(16, PxF32 f[4]);
V4StoreA((Vec4V&)a, f);
return PxIsFinite(f[0]) && PxIsFinite(f[1]) && PxIsFinite(f[2]);
/*
const PxU32 badNumber = (_FPCLASS_SNAN | _FPCLASS_QNAN | _FPCLASS_NINF | _FPCLASS_PINF);
const Vec3V vBadNum = Vec3V_From_F32((PxF32&)badNumber);
const BoolV vMask = BAnd(BAnd(vBadNum, a), BTTTF());
return FiniteTestEq(vMask, BFFFF()) == 1;
*/
}
PX_FORCE_INLINE bool isFiniteVec4V(const Vec4V a)
{
PX_ALIGN(16, PxF32 f[4]);
V4StoreA(a, f);
return PxIsFinite(f[0]) && PxIsFinite(f[1]) && PxIsFinite(f[2]) && PxIsFinite(f[3]);
/*
const PxU32 badNumber = (_FPCLASS_SNAN | _FPCLASS_QNAN | _FPCLASS_NINF | _FPCLASS_PINF);
const Vec4V vBadNum = Vec4V_From_U32((PxF32&)badNumber);
const BoolV vMask = BAnd(vBadNum, a);
return FiniteTestEq(vMask, BFFFF()) == 1;
*/
}
/////////////////////////////////////////////////////////////////////
////VECTORISED FUNCTION IMPLEMENTATIONS
/////////////////////////////////////////////////////////////////////
PX_FORCE_INLINE FloatV FLoad(const PxF32 f)
{
return _mm_load1_ps(&f);
}
PX_FORCE_INLINE Vec3V V3Load(const PxF32 f)
{
return _mm_set_ps(0.0f, f, f, f);
}
PX_FORCE_INLINE Vec4V V4Load(const PxF32 f)
{
return _mm_load1_ps(&f);
}
PX_FORCE_INLINE BoolV BLoad(const bool f)
{
const PxU32 i = PxU32(-(PxI32)f);
return _mm_load1_ps((float*)&i);
}
PX_FORCE_INLINE Vec3V V3LoadA(const PxVec3& f)
{
ASSERT_ISALIGNED16(&f);
return _mm_and_ps(_mm_load_ps(&f.x), reinterpret_cast<const Vec4V&>(internalWindowsSimd::gMaskXYZ));
}
PX_FORCE_INLINE Vec3V V3LoadU(const PxVec3& f)
{
return _mm_set_ps(0.0f, f.z, f.y, f.x);
}
// w component of result is undefined
PX_FORCE_INLINE Vec3V V3LoadUnsafeA(const PxVec3& f)
{
ASSERT_ISALIGNED16(&f);
return _mm_load_ps(&f.x);
}
PX_FORCE_INLINE Vec3V V3LoadA(const PxF32* const f)
{
ASSERT_ISALIGNED16(f);
return V4ClearW(_mm_load_ps(f));
}
PX_FORCE_INLINE Vec3V V3LoadU(const PxF32* const i)
{
return _mm_set_ps(0.0f, i[2], i[1], i[0]);
}
PX_FORCE_INLINE Vec3V Vec3V_From_Vec4V(Vec4V v)
{
return V4ClearW(v);
}
PX_FORCE_INLINE Vec3V Vec3V_From_Vec4V_WUndefined(const Vec4V v)
{
return v;
}
PX_FORCE_INLINE Vec4V Vec4V_From_Vec3V(Vec3V f)
{
return f; // ok if it is implemented as the same type.
}
PX_FORCE_INLINE Vec4V Vec4V_From_FloatV(FloatV f)
{
return f;
}
PX_FORCE_INLINE Vec3V Vec3V_From_FloatV(FloatV f)
{
return Vec3V_From_Vec4V(Vec4V_From_FloatV(f));
}
PX_FORCE_INLINE Vec3V Vec3V_From_FloatV_WUndefined(FloatV f)
{
return Vec3V_From_Vec4V_WUndefined(Vec4V_From_FloatV(f));
}
PX_FORCE_INLINE Vec4V Vec4V_From_PxVec3_WUndefined(const PxVec3& f)
{
return _mm_set_ps(0.0f, f.z, f.y, f.x);
}
PX_FORCE_INLINE Vec4V V4LoadA(const PxF32* const f)
{
ASSERT_ISALIGNED16(f);
return _mm_load_ps(f);
}
PX_FORCE_INLINE void V4StoreA(const Vec4V a, PxF32* f)
{
ASSERT_ISALIGNED16(f);
_mm_store_ps(f, a);
}
PX_FORCE_INLINE void V4StoreU(const Vec4V a, PxF32* f)
{
_mm_storeu_ps(f, a);
}
PX_FORCE_INLINE void BStoreA(const BoolV a, PxU32* f)
{
ASSERT_ISALIGNED16(f);
_mm_store_ps((PxF32*)f, a);
}
PX_FORCE_INLINE void U4StoreA(const VecU32V uv, PxU32* u)
{
ASSERT_ISALIGNED16(u);
_mm_store_ps((PxF32*)u, uv);
}
PX_FORCE_INLINE void I4StoreA(const VecI32V iv, PxI32* i)
{
ASSERT_ISALIGNED16(i);
_mm_store_ps((PxF32*)i, iv);
}
PX_FORCE_INLINE Vec4V V4LoadU(const PxF32* const f)
{
return _mm_loadu_ps(f);
}
PX_FORCE_INLINE BoolV BLoad(const bool* const f)
{
const PX_ALIGN(16, PxU32 b[4]) = { PxU32(-(PxI32)f[0]), PxU32(-(PxI32)f[1]),
PxU32(-(PxI32)f[2]), PxU32(-(PxI32)f[3]) };
return _mm_load_ps((float*)&b);
}
PX_FORCE_INLINE void FStore(const FloatV a, PxF32* PX_RESTRICT f)
{
ASSERT_ISVALIDFLOATV(a);
_mm_store_ss(f, a);
}
PX_FORCE_INLINE void V3StoreA(const Vec3V a, PxVec3& f)
{
ASSERT_ISALIGNED16(&f);
PX_ALIGN(16, PxF32 f2[4]);
_mm_store_ps(f2, a);
f = PxVec3(f2[0], f2[1], f2[2]);
}
PX_FORCE_INLINE void Store_From_BoolV(const BoolV b, PxU32* b2)
{
_mm_store_ss((PxF32*)b2, b);
}
PX_FORCE_INLINE void V3StoreU(const Vec3V a, PxVec3& f)
{
PX_ALIGN(16, PxF32 f2[4]);
_mm_store_ps(f2, a);
f = PxVec3(f2[0], f2[1], f2[2]);
}
PX_FORCE_INLINE Mat33V Mat33V_From_PxMat33(const PxMat33& m)
{
return Mat33V(V3LoadU(m.column0), V3LoadU(m.column1), V3LoadU(m.column2));
}
PX_FORCE_INLINE void PxMat33_From_Mat33V(const Mat33V& m, PxMat33& out)
{
ASSERT_ISALIGNED16(&out);
V3StoreU(m.col0, out.column0);
V3StoreU(m.col1, out.column1);
V3StoreU(m.col2, out.column2);
}
//////////////////////////////////
// FLOATV
//////////////////////////////////
PX_FORCE_INLINE FloatV FZero()
{
return _mm_setzero_ps();
}
PX_FORCE_INLINE FloatV FOne()
{
return FLoad(1.0f);
}
PX_FORCE_INLINE FloatV FHalf()
{
return FLoad(0.5f);
}
PX_FORCE_INLINE FloatV FEps()
{
return FLoad(PX_EPS_REAL);
}
PX_FORCE_INLINE FloatV FEps6()
{
return FLoad(1e-6f);
}
PX_FORCE_INLINE FloatV FMax()
{
return FLoad(PX_MAX_REAL);
}
PX_FORCE_INLINE FloatV FNegMax()
{
return FLoad(-PX_MAX_REAL);
}
PX_FORCE_INLINE FloatV IZero()
{
const PxU32 zero = 0;
return _mm_load1_ps((PxF32*)&zero);
}
PX_FORCE_INLINE FloatV IOne()
{
const PxU32 one = 1;
return _mm_load1_ps((PxF32*)&one);
}
PX_FORCE_INLINE FloatV ITwo()
{
const PxU32 two = 2;
return _mm_load1_ps((PxF32*)&two);
}
PX_FORCE_INLINE FloatV IThree()
{
const PxU32 three = 3;
return _mm_load1_ps((PxF32*)&three);
}
PX_FORCE_INLINE FloatV IFour()
{
const PxU32 four = 4;
return _mm_load1_ps((PxF32*)&four);
}
PX_FORCE_INLINE FloatV FNeg(const FloatV f)
{
ASSERT_ISVALIDFLOATV(f);
return _mm_sub_ps(_mm_setzero_ps(), f);
}
PX_FORCE_INLINE FloatV FAdd(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return _mm_add_ps(a, b);
}
PX_FORCE_INLINE FloatV FSub(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return _mm_sub_ps(a, b);
}
PX_FORCE_INLINE FloatV FMul(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return _mm_mul_ps(a, b);
}
PX_FORCE_INLINE FloatV FDiv(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return _mm_div_ps(a, b);
}
PX_FORCE_INLINE FloatV FDivFast(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return _mm_mul_ps(a, _mm_rcp_ps(b));
}
PX_FORCE_INLINE FloatV FRecip(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
return _mm_div_ps(FOne(), a);
}
PX_FORCE_INLINE FloatV FRecipFast(const FloatV a)
{
return _mm_rcp_ps(a);
}
PX_FORCE_INLINE FloatV FRsqrt(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
return _mm_div_ps(FOne(), _mm_sqrt_ps(a));
}
PX_FORCE_INLINE FloatV FSqrt(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
return _mm_sqrt_ps(a);
}
PX_FORCE_INLINE FloatV FRsqrtFast(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
return _mm_rsqrt_ps(a);
}
PX_FORCE_INLINE FloatV FScaleAdd(const FloatV a, const FloatV b, const FloatV c)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
ASSERT_ISVALIDFLOATV(c);
return FAdd(FMul(a, b), c);
}
PX_FORCE_INLINE FloatV FNegScaleSub(const FloatV a, const FloatV b, const FloatV c)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
ASSERT_ISVALIDFLOATV(c);
return FSub(c, FMul(a, b));
}
PX_FORCE_INLINE FloatV FAbs(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
PX_ALIGN(16, const static PxU32 absMask[4]) = { 0x7fFFffFF, 0x7fFFffFF, 0x7fFFffFF, 0x7fFFffFF };
return _mm_and_ps(a, _mm_load_ps((PxF32*)absMask));
}
PX_FORCE_INLINE FloatV FSel(const BoolV c, const FloatV a, const FloatV b)
{
PX_ASSERT(vecMathTests::allElementsEqualBoolV(c, BTTTT()) ||
vecMathTests::allElementsEqualBoolV(c, BFFFF()));
ASSERT_ISVALIDFLOATV(_mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a)));
return _mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a));
}
PX_FORCE_INLINE BoolV FIsGrtr(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return _mm_cmpgt_ps(a, b);
}
PX_FORCE_INLINE BoolV FIsGrtrOrEq(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return _mm_cmpge_ps(a, b);
}
PX_FORCE_INLINE BoolV FIsEq(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return _mm_cmpeq_ps(a, b);
}
PX_FORCE_INLINE FloatV FMax(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return _mm_max_ps(a, b);
}
PX_FORCE_INLINE FloatV FMin(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return _mm_min_ps(a, b);
}
PX_FORCE_INLINE FloatV FClamp(const FloatV a, const FloatV minV, const FloatV maxV)
{
ASSERT_ISVALIDFLOATV(minV);
ASSERT_ISVALIDFLOATV(maxV);
return _mm_max_ps(_mm_min_ps(a, maxV), minV);
}
PX_FORCE_INLINE PxU32 FAllGrtr(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return PxU32(_mm_comigt_ss(a, b));
}
PX_FORCE_INLINE PxU32 FAllGrtrOrEq(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return PxU32(_mm_comige_ss(a, b));
}
PX_FORCE_INLINE PxU32 FAllEq(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
return PxU32(_mm_comieq_ss(a, b));
}
PX_FORCE_INLINE FloatV FRound(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
// return _mm_round_ps(a, 0x0);
const FloatV half = FLoad(0.5f);
const __m128 signBit = _mm_cvtepi32_ps(_mm_srli_epi32(_mm_cvtps_epi32(a), 31));
const FloatV aRound = FSub(FAdd(a, half), signBit);
__m128i tmp = _mm_cvttps_epi32(aRound);
return _mm_cvtepi32_ps(tmp);
}
PX_FORCE_INLINE FloatV FSin(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
// Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI
const FloatV recipTwoPi = V4LoadA(g_PXReciprocalTwoPi.f);
const FloatV twoPi = V4LoadA(g_PXTwoPi.f);
const FloatV tmp = FMul(a, recipTwoPi);
const FloatV b = FRound(tmp);
const FloatV V1 = FNegScaleSub(twoPi, b, a);
// sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! -
// V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI)
const FloatV V2 = FMul(V1, V1);
const FloatV V3 = FMul(V2, V1);
const FloatV V5 = FMul(V3, V2);
const FloatV V7 = FMul(V5, V2);
const FloatV V9 = FMul(V7, V2);
const FloatV V11 = FMul(V9, V2);
const FloatV V13 = FMul(V11, V2);
const FloatV V15 = FMul(V13, V2);
const FloatV V17 = FMul(V15, V2);
const FloatV V19 = FMul(V17, V2);
const FloatV V21 = FMul(V19, V2);
const FloatV V23 = FMul(V21, V2);
const Vec4V sinCoefficients0 = V4LoadA(g_PXSinCoefficients0.f);
const Vec4V sinCoefficients1 = V4LoadA(g_PXSinCoefficients1.f);
const Vec4V sinCoefficients2 = V4LoadA(g_PXSinCoefficients2.f);
const FloatV S1 = V4GetY(sinCoefficients0);
const FloatV S2 = V4GetZ(sinCoefficients0);
const FloatV S3 = V4GetW(sinCoefficients0);
const FloatV S4 = V4GetX(sinCoefficients1);
const FloatV S5 = V4GetY(sinCoefficients1);
const FloatV S6 = V4GetZ(sinCoefficients1);
const FloatV S7 = V4GetW(sinCoefficients1);
const FloatV S8 = V4GetX(sinCoefficients2);
const FloatV S9 = V4GetY(sinCoefficients2);
const FloatV S10 = V4GetZ(sinCoefficients2);
const FloatV S11 = V4GetW(sinCoefficients2);
FloatV Result;
Result = FScaleAdd(S1, V3, V1);
Result = FScaleAdd(S2, V5, Result);
Result = FScaleAdd(S3, V7, Result);
Result = FScaleAdd(S4, V9, Result);
Result = FScaleAdd(S5, V11, Result);
Result = FScaleAdd(S6, V13, Result);
Result = FScaleAdd(S7, V15, Result);
Result = FScaleAdd(S8, V17, Result);
Result = FScaleAdd(S9, V19, Result);
Result = FScaleAdd(S10, V21, Result);
Result = FScaleAdd(S11, V23, Result);
return Result;
}
PX_FORCE_INLINE FloatV FCos(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
// Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI
const FloatV recipTwoPi = V4LoadA(g_PXReciprocalTwoPi.f);
const FloatV twoPi = V4LoadA(g_PXTwoPi.f);
const FloatV tmp = FMul(a, recipTwoPi);
const FloatV b = FRound(tmp);
const FloatV V1 = FNegScaleSub(twoPi, b, a);
// cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! -
// V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI)
const FloatV V2 = FMul(V1, V1);
const FloatV V4 = FMul(V2, V2);
const FloatV V6 = FMul(V4, V2);
const FloatV V8 = FMul(V4, V4);
const FloatV V10 = FMul(V6, V4);
const FloatV V12 = FMul(V6, V6);
const FloatV V14 = FMul(V8, V6);
const FloatV V16 = FMul(V8, V8);
const FloatV V18 = FMul(V10, V8);
const FloatV V20 = FMul(V10, V10);
const FloatV V22 = FMul(V12, V10);
const Vec4V cosCoefficients0 = V4LoadA(g_PXCosCoefficients0.f);
const Vec4V cosCoefficients1 = V4LoadA(g_PXCosCoefficients1.f);
const Vec4V cosCoefficients2 = V4LoadA(g_PXCosCoefficients2.f);
const FloatV C1 = V4GetY(cosCoefficients0);
const FloatV C2 = V4GetZ(cosCoefficients0);
const FloatV C3 = V4GetW(cosCoefficients0);
const FloatV C4 = V4GetX(cosCoefficients1);
const FloatV C5 = V4GetY(cosCoefficients1);
const FloatV C6 = V4GetZ(cosCoefficients1);
const FloatV C7 = V4GetW(cosCoefficients1);
const FloatV C8 = V4GetX(cosCoefficients2);
const FloatV C9 = V4GetY(cosCoefficients2);
const FloatV C10 = V4GetZ(cosCoefficients2);
const FloatV C11 = V4GetW(cosCoefficients2);
FloatV Result;
Result = FScaleAdd(C1, V2, V4One());
Result = FScaleAdd(C2, V4, Result);
Result = FScaleAdd(C3, V6, Result);
Result = FScaleAdd(C4, V8, Result);
Result = FScaleAdd(C5, V10, Result);
Result = FScaleAdd(C6, V12, Result);
Result = FScaleAdd(C7, V14, Result);
Result = FScaleAdd(C8, V16, Result);
Result = FScaleAdd(C9, V18, Result);
Result = FScaleAdd(C10, V20, Result);
Result = FScaleAdd(C11, V22, Result);
return Result;
}
PX_FORCE_INLINE PxU32 FOutOfBounds(const FloatV a, const FloatV min, const FloatV max)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(min);
ASSERT_ISVALIDFLOATV(max);
const BoolV c = BOr(FIsGrtr(a, max), FIsGrtr(min, a));
return PxU32(!BAllEqFFFF(c));
}
PX_FORCE_INLINE PxU32 FInBounds(const FloatV a, const FloatV min, const FloatV max)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(min);
ASSERT_ISVALIDFLOATV(max);
const BoolV c = BAnd(FIsGrtrOrEq(a, min), FIsGrtrOrEq(max, a));
return BAllEqTTTT(c);
}
PX_FORCE_INLINE PxU32 FOutOfBounds(const FloatV a, const FloatV bounds)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(bounds);
return FOutOfBounds(a, FNeg(bounds), bounds);
}
PX_FORCE_INLINE PxU32 FInBounds(const FloatV a, const FloatV bounds)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(bounds);
return FInBounds(a, FNeg(bounds), bounds);
}
//////////////////////////////////
// VEC3V
//////////////////////////////////
PX_FORCE_INLINE Vec3V V3Splat(const FloatV f)
{
ASSERT_ISVALIDFLOATV(f);
const __m128 zero = V3Zero();
const __m128 fff0 = _mm_move_ss(f, zero);
return _mm_shuffle_ps(fff0, fff0, _MM_SHUFFLE(0, 1, 2, 3));
}
PX_FORCE_INLINE Vec3V V3Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z)
{
ASSERT_ISVALIDFLOATV(x);
ASSERT_ISVALIDFLOATV(y);
ASSERT_ISVALIDFLOATV(z);
// static on zero causes compiler crash on x64 debug_opt
const __m128 zero = V3Zero();
const __m128 xy = _mm_move_ss(x, y);
const __m128 z0 = _mm_move_ss(zero, z);
return _mm_shuffle_ps(xy, z0, _MM_SHUFFLE(1, 0, 0, 1));
}
PX_FORCE_INLINE Vec3V V3UnitX()
{
const PX_ALIGN(16, PxF32 x[4]) = { 1.0f, 0.0f, 0.0f, 0.0f };
const __m128 x128 = _mm_load_ps(x);
return x128;
}
PX_FORCE_INLINE Vec3V V3UnitY()
{
const PX_ALIGN(16, PxF32 y[4]) = { 0.0f, 1.0f, 0.0f, 0.0f };
const __m128 y128 = _mm_load_ps(y);
return y128;
}
PX_FORCE_INLINE Vec3V V3UnitZ()
{
const PX_ALIGN(16, PxF32 z[4]) = { 0.0f, 0.0f, 1.0f, 0.0f };
const __m128 z128 = _mm_load_ps(z);
return z128;
}
PX_FORCE_INLINE FloatV V3GetX(const Vec3V f)
{
ASSERT_ISVALIDVEC3V(f);
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(0, 0, 0, 0));
}
PX_FORCE_INLINE FloatV V3GetY(const Vec3V f)
{
ASSERT_ISVALIDVEC3V(f);
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(1, 1, 1, 1));
}
PX_FORCE_INLINE FloatV V3GetZ(const Vec3V f)
{
ASSERT_ISVALIDVEC3V(f);
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(2, 2, 2, 2));
}
PX_FORCE_INLINE Vec3V V3SetX(const Vec3V v, const FloatV f)
{
ASSERT_ISVALIDVEC3V(v);
ASSERT_ISVALIDFLOATV(f);
return V4Sel(BFTTT(), v, f);
}
PX_FORCE_INLINE Vec3V V3SetY(const Vec3V v, const FloatV f)
{
ASSERT_ISVALIDVEC3V(v);
ASSERT_ISVALIDFLOATV(f);
return V4Sel(BTFTT(), v, f);
}
PX_FORCE_INLINE Vec3V V3SetZ(const Vec3V v, const FloatV f)
{
ASSERT_ISVALIDVEC3V(v);
ASSERT_ISVALIDFLOATV(f);
return V4Sel(BTTFT(), v, f);
}
PX_FORCE_INLINE Vec3V V3ColX(const Vec3V a, const Vec3V b, const Vec3V c)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
ASSERT_ISVALIDVEC3V(c);
Vec3V r = _mm_shuffle_ps(a, c, _MM_SHUFFLE(3, 0, 3, 0));
return V3SetY(r, V3GetX(b));
}
PX_FORCE_INLINE Vec3V V3ColY(const Vec3V a, const Vec3V b, const Vec3V c)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
ASSERT_ISVALIDVEC3V(c);
Vec3V r = _mm_shuffle_ps(a, c, _MM_SHUFFLE(3, 1, 3, 1));
return V3SetY(r, V3GetY(b));
}
PX_FORCE_INLINE Vec3V V3ColZ(const Vec3V a, const Vec3V b, const Vec3V c)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
ASSERT_ISVALIDVEC3V(c);
Vec3V r = _mm_shuffle_ps(a, c, _MM_SHUFFLE(3, 2, 3, 2));
return V3SetY(r, V3GetZ(b));
}
PX_FORCE_INLINE Vec3V V3Zero()
{
return _mm_setzero_ps();
}
PX_FORCE_INLINE Vec3V V3One()
{
return V3Load(1.0f);
}
PX_FORCE_INLINE Vec3V V3Eps()
{
return V3Load(PX_EPS_REAL);
}
PX_FORCE_INLINE Vec3V V3Neg(const Vec3V f)
{
ASSERT_ISVALIDVEC3V(f);
return _mm_sub_ps(_mm_setzero_ps(), f);
}
PX_FORCE_INLINE Vec3V V3Add(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return _mm_add_ps(a, b);
}
PX_FORCE_INLINE Vec3V V3Sub(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return _mm_sub_ps(a, b);
}
PX_FORCE_INLINE Vec3V V3Scale(const Vec3V a, const FloatV b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDFLOATV(b);
return _mm_mul_ps(a, b);
}
PX_FORCE_INLINE Vec3V V3Mul(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return _mm_mul_ps(a, b);
}
PX_FORCE_INLINE Vec3V V3ScaleInv(const Vec3V a, const FloatV b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDFLOATV(b);
return _mm_div_ps(a, b);
}
PX_FORCE_INLINE Vec3V V3Div(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return V4ClearW(_mm_div_ps(a, b));
}
PX_FORCE_INLINE Vec3V V3ScaleInvFast(const Vec3V a, const FloatV b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDFLOATV(b);
return _mm_mul_ps(a, _mm_rcp_ps(b));
}
PX_FORCE_INLINE Vec3V V3DivFast(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return V4ClearW(_mm_mul_ps(a, _mm_rcp_ps(b)));
}
PX_FORCE_INLINE Vec3V V3Recip(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
const __m128 zero = V3Zero();
const __m128 tttf = BTTTF();
const __m128 recipA = _mm_div_ps(V3One(), a);
return V4Sel(tttf, recipA, zero);
}
PX_FORCE_INLINE Vec3V V3RecipFast(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
const __m128 zero = V3Zero();
const __m128 tttf = BTTTF();
const __m128 recipA = _mm_rcp_ps(a);
return V4Sel(tttf, recipA, zero);
}
PX_FORCE_INLINE Vec3V V3Rsqrt(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
const __m128 zero = V3Zero();
const __m128 tttf = BTTTF();
const __m128 recipA = _mm_div_ps(V3One(), _mm_sqrt_ps(a));
return V4Sel(tttf, recipA, zero);
}
PX_FORCE_INLINE Vec3V V3RsqrtFast(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
const __m128 zero = V3Zero();
const __m128 tttf = BTTTF();
const __m128 recipA = _mm_rsqrt_ps(a);
return V4Sel(tttf, recipA, zero);
}
PX_FORCE_INLINE Vec3V V3ScaleAdd(const Vec3V a, const FloatV b, const Vec3V c)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDFLOATV(b);
ASSERT_ISVALIDVEC3V(c);
return V3Add(V3Scale(a, b), c);
}
PX_FORCE_INLINE Vec3V V3NegScaleSub(const Vec3V a, const FloatV b, const Vec3V c)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDFLOATV(b);
ASSERT_ISVALIDVEC3V(c);
return V3Sub(c, V3Scale(a, b));
}
PX_FORCE_INLINE Vec3V V3MulAdd(const Vec3V a, const Vec3V b, const Vec3V c)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
ASSERT_ISVALIDVEC3V(c);
return V3Add(V3Mul(a, b), c);
}
PX_FORCE_INLINE Vec3V V3NegMulSub(const Vec3V a, const Vec3V b, const Vec3V c)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
ASSERT_ISVALIDVEC3V(c);
return V3Sub(c, V3Mul(a, b));
}
PX_FORCE_INLINE Vec3V V3Abs(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
return V3Max(a, V3Neg(a));
}
PX_FORCE_INLINE FloatV V3Dot(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
const __m128 t0 = _mm_mul_ps(a, b); // aw*bw | az*bz | ay*by | ax*bx
const __m128 t1 = _mm_shuffle_ps(t0, t0, _MM_SHUFFLE(1,0,3,2)); // ay*by | ax*bx | aw*bw | az*bz
const __m128 t2 = _mm_add_ps(t0, t1); // ay*by + aw*bw | ax*bx + az*bz | aw*bw + ay*by | az*bz + ax*bx
const __m128 t3 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(2,3,0,1)); // ax*bx + az*bz | ay*by + aw*bw | az*bz + ax*bx | aw*bw + ay*by
return _mm_add_ps(t3, t2); // ax*bx + az*bz + ay*by + aw*bw
// ay*by + aw*bw + ax*bx + az*bz
// az*bz + ax*bx + aw*bw + ay*by
// aw*bw + ay*by + az*bz + ax*bx
}
PX_FORCE_INLINE Vec3V V3Cross(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
/* if(0)
{
const __m128 r1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 0, 2)); // z,x,y,w
const __m128 r2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1)); // y,z,x,w
const __m128 l1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); // y,z,x,w
const __m128 l2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 1, 0, 2)); // z,x,y,w
return _mm_sub_ps(_mm_mul_ps(l1, l2), _mm_mul_ps(r1, r2));
}
else*/
{
const __m128 b0 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3,0,2,1));
const __m128 a1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3,0,2,1));
__m128 v = _mm_mul_ps(a1, b);
v = _mm_sub_ps(_mm_mul_ps(a, b0), v);
__m128 res = _mm_shuffle_ps(v, v, _MM_SHUFFLE(3,0,2,1));
ASSERT_ISVALIDVEC3V(res);
return res;
}
}
PX_FORCE_INLINE VecCrossV V3PrepareCross(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
VecCrossV v;
v.mR1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 0, 2)); // z,x,y,w
v.mL1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); // y,z,x,w
return v;
}
PX_FORCE_INLINE Vec3V V3Cross(const VecCrossV& a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(b);
const __m128 r2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1)); // y,z,x,w
const __m128 l2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 1, 0, 2)); // z,x,y,w
return _mm_sub_ps(_mm_mul_ps(a.mL1, l2), _mm_mul_ps(a.mR1, r2));
}
PX_FORCE_INLINE Vec3V V3Cross(const Vec3V a, const VecCrossV& b)
{
ASSERT_ISVALIDVEC3V(a);
const __m128 r2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); // y,z,x,w
const __m128 l2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 0, 2)); // z,x,y,w
return _mm_sub_ps(_mm_mul_ps(b.mR1, r2), _mm_mul_ps(b.mL1, l2));
}
PX_FORCE_INLINE Vec3V V3Cross(const VecCrossV& a, const VecCrossV& b)
{
return _mm_sub_ps(_mm_mul_ps(a.mL1, b.mR1), _mm_mul_ps(a.mR1, b.mL1));
}
PX_FORCE_INLINE FloatV V3Length(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
return _mm_sqrt_ps(V3Dot(a, a));
}
PX_FORCE_INLINE FloatV V3LengthSq(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
return V3Dot(a, a);
}
PX_FORCE_INLINE Vec3V V3Normalize(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISFINITELENGTH(a);
return V3ScaleInv(a, _mm_sqrt_ps(V3Dot(a, a)));
}
PX_FORCE_INLINE Vec3V V3NormalizeFast(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISFINITELENGTH(a);
return V3Scale(a, _mm_rsqrt_ps(V3Dot(a, a)));
}
PX_FORCE_INLINE Vec3V V3NormalizeSafe(const Vec3V a, const Vec3V unsafeReturnValue)
{
ASSERT_ISVALIDVEC3V(a);
const __m128 eps = FEps();
const __m128 length = V3Length(a);
const __m128 isGreaterThanZero = FIsGrtr(length, eps);
return V3Sel(isGreaterThanZero, V3ScaleInv(a, length), unsafeReturnValue);
}
PX_FORCE_INLINE Vec3V V3Sel(const BoolV c, const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(_mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a)));
return _mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a));
}
PX_FORCE_INLINE BoolV V3IsGrtr(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return _mm_cmpgt_ps(a, b);
}
PX_FORCE_INLINE BoolV V3IsGrtrOrEq(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return _mm_cmpge_ps(a, b);
}
PX_FORCE_INLINE BoolV V3IsEq(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return _mm_cmpeq_ps(a, b);
}
PX_FORCE_INLINE Vec3V V3Max(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return _mm_max_ps(a, b);
}
PX_FORCE_INLINE Vec3V V3Min(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return _mm_min_ps(a, b);
}
PX_FORCE_INLINE FloatV V3ExtractMax(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
const __m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 2, 2, 2));
return _mm_max_ps(_mm_max_ps(shuf1, shuf2), shuf3);
}
PX_FORCE_INLINE FloatV V3ExtractMin(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
const __m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 2, 2, 2));
return _mm_min_ps(_mm_min_ps(shuf1, shuf2), shuf3);
}
//// if(a > 0.0f) return 1.0f; else if a == 0.f return 0.f, else return -1.f;
// PX_FORCE_INLINE Vec3V V3MathSign(const Vec3V a)
//{
// VECMATHAOS_ASSERT(isValidVec3V(a));
//
// const __m128i ai = _mm_cvtps_epi32(a);
// const __m128i bi = _mm_cvtps_epi32(V3Neg(a));
// const __m128 aa = _mm_cvtepi32_ps(_mm_srai_epi32(ai, 31));
// const __m128 bb = _mm_cvtepi32_ps(_mm_srai_epi32(bi, 31));
// return _mm_or_ps(aa, bb);
//}
// return (a >= 0.0f) ? 1.0f : -1.0f;
PX_FORCE_INLINE Vec3V V3Sign(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
const __m128 zero = V3Zero();
const __m128 one = V3One();
const __m128 none = V3Neg(one);
return V3Sel(V3IsGrtrOrEq(a, zero), one, none);
}
PX_FORCE_INLINE Vec3V V3Clamp(const Vec3V a, const Vec3V minV, const Vec3V maxV)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(minV);
ASSERT_ISVALIDVEC3V(maxV);
return V3Max(V3Min(a, maxV), minV);
}
PX_FORCE_INLINE PxU32 V3AllGrtr(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return internalWindowsSimd::BAllTrue3_R(V4IsGrtr(a, b));
}
PX_FORCE_INLINE PxU32 V3AllGrtrOrEq(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return internalWindowsSimd::BAllTrue3_R(V4IsGrtrOrEq(a, b));
}
PX_FORCE_INLINE PxU32 V3AllEq(const Vec3V a, const Vec3V b)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(b);
return internalWindowsSimd::BAllTrue3_R(V4IsEq(a, b));
}
PX_FORCE_INLINE Vec3V V3Round(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
// return _mm_round_ps(a, 0x0);
const Vec3V half = V3Load(0.5f);
const __m128 signBit = _mm_cvtepi32_ps(_mm_srli_epi32(_mm_cvtps_epi32(a), 31));
const Vec3V aRound = V3Sub(V3Add(a, half), signBit);
__m128i tmp = _mm_cvttps_epi32(aRound);
return _mm_cvtepi32_ps(tmp);
}
PX_FORCE_INLINE Vec3V V3Sin(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
// Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI
const Vec4V recipTwoPi = V4LoadA(g_PXReciprocalTwoPi.f);
const Vec4V twoPi = V4LoadA(g_PXTwoPi.f);
const Vec3V tmp = V3Scale(a, recipTwoPi);
const Vec3V b = V3Round(tmp);
const Vec3V V1 = V3NegScaleSub(b, twoPi, a);
// sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! -
// V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI)
const Vec3V V2 = V3Mul(V1, V1);
const Vec3V V3 = V3Mul(V2, V1);
const Vec3V V5 = V3Mul(V3, V2);
const Vec3V V7 = V3Mul(V5, V2);
const Vec3V V9 = V3Mul(V7, V2);
const Vec3V V11 = V3Mul(V9, V2);
const Vec3V V13 = V3Mul(V11, V2);
const Vec3V V15 = V3Mul(V13, V2);
const Vec3V V17 = V3Mul(V15, V2);
const Vec3V V19 = V3Mul(V17, V2);
const Vec3V V21 = V3Mul(V19, V2);
const Vec3V V23 = V3Mul(V21, V2);
const Vec4V sinCoefficients0 = V4LoadA(g_PXSinCoefficients0.f);
const Vec4V sinCoefficients1 = V4LoadA(g_PXSinCoefficients1.f);
const Vec4V sinCoefficients2 = V4LoadA(g_PXSinCoefficients2.f);
const FloatV S1 = V4GetY(sinCoefficients0);
const FloatV S2 = V4GetZ(sinCoefficients0);
const FloatV S3 = V4GetW(sinCoefficients0);
const FloatV S4 = V4GetX(sinCoefficients1);
const FloatV S5 = V4GetY(sinCoefficients1);
const FloatV S6 = V4GetZ(sinCoefficients1);
const FloatV S7 = V4GetW(sinCoefficients1);
const FloatV S8 = V4GetX(sinCoefficients2);
const FloatV S9 = V4GetY(sinCoefficients2);
const FloatV S10 = V4GetZ(sinCoefficients2);
const FloatV S11 = V4GetW(sinCoefficients2);
Vec3V Result;
Result = V3ScaleAdd(V3, S1, V1);
Result = V3ScaleAdd(V5, S2, Result);
Result = V3ScaleAdd(V7, S3, Result);
Result = V3ScaleAdd(V9, S4, Result);
Result = V3ScaleAdd(V11, S5, Result);
Result = V3ScaleAdd(V13, S6, Result);
Result = V3ScaleAdd(V15, S7, Result);
Result = V3ScaleAdd(V17, S8, Result);
Result = V3ScaleAdd(V19, S9, Result);
Result = V3ScaleAdd(V21, S10, Result);
Result = V3ScaleAdd(V23, S11, Result);
ASSERT_ISVALIDVEC3V(Result);
return Result;
}
PX_FORCE_INLINE Vec3V V3Cos(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
// Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI
const Vec4V recipTwoPi = V4LoadA(g_PXReciprocalTwoPi.f);
const Vec4V twoPi = V4LoadA(g_PXTwoPi.f);
const Vec3V tmp = V3Scale(a, recipTwoPi);
const Vec3V b = V3Round(tmp);
const Vec3V V1 = V3NegScaleSub(b, twoPi, a);
// cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! -
// V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI)
const Vec3V V2 = V3Mul(V1, V1);
const Vec3V V4 = V3Mul(V2, V2);
const Vec3V V6 = V3Mul(V4, V2);
const Vec3V V8 = V3Mul(V4, V4);
const Vec3V V10 = V3Mul(V6, V4);
const Vec3V V12 = V3Mul(V6, V6);
const Vec3V V14 = V3Mul(V8, V6);
const Vec3V V16 = V3Mul(V8, V8);
const Vec3V V18 = V3Mul(V10, V8);
const Vec3V V20 = V3Mul(V10, V10);
const Vec3V V22 = V3Mul(V12, V10);
const Vec4V cosCoefficients0 = V4LoadA(g_PXCosCoefficients0.f);
const Vec4V cosCoefficients1 = V4LoadA(g_PXCosCoefficients1.f);
const Vec4V cosCoefficients2 = V4LoadA(g_PXCosCoefficients2.f);
const FloatV C1 = V4GetY(cosCoefficients0);
const FloatV C2 = V4GetZ(cosCoefficients0);
const FloatV C3 = V4GetW(cosCoefficients0);
const FloatV C4 = V4GetX(cosCoefficients1);
const FloatV C5 = V4GetY(cosCoefficients1);
const FloatV C6 = V4GetZ(cosCoefficients1);
const FloatV C7 = V4GetW(cosCoefficients1);
const FloatV C8 = V4GetX(cosCoefficients2);
const FloatV C9 = V4GetY(cosCoefficients2);
const FloatV C10 = V4GetZ(cosCoefficients2);
const FloatV C11 = V4GetW(cosCoefficients2);
Vec3V Result;
Result = V3ScaleAdd(V2, C1, V3One());
Result = V3ScaleAdd(V4, C2, Result);
Result = V3ScaleAdd(V6, C3, Result);
Result = V3ScaleAdd(V8, C4, Result);
Result = V3ScaleAdd(V10, C5, Result);
Result = V3ScaleAdd(V12, C6, Result);
Result = V3ScaleAdd(V14, C7, Result);
Result = V3ScaleAdd(V16, C8, Result);
Result = V3ScaleAdd(V18, C9, Result);
Result = V3ScaleAdd(V20, C10, Result);
Result = V3ScaleAdd(V22, C11, Result);
ASSERT_ISVALIDVEC3V(Result);
return Result;
}
PX_FORCE_INLINE Vec3V V3PermYZZ(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 2, 2, 1));
}
PX_FORCE_INLINE Vec3V V3PermXYX(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 1, 0));
}
PX_FORCE_INLINE Vec3V V3PermYZX(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1));
}
PX_FORCE_INLINE Vec3V V3PermZXY(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 0, 2));
}
PX_FORCE_INLINE Vec3V V3PermZZY(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 2, 2));
}
PX_FORCE_INLINE Vec3V V3PermYXX(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 0, 1));
}
PX_FORCE_INLINE Vec3V V3Perm_Zero_1Z_0Y(const Vec3V v0, const Vec3V v1)
{
ASSERT_ISVALIDVEC3V(v0);
ASSERT_ISVALIDVEC3V(v1);
return _mm_shuffle_ps(v1, v0, _MM_SHUFFLE(3, 1, 2, 3));
}
PX_FORCE_INLINE Vec3V V3Perm_0Z_Zero_1X(const Vec3V v0, const Vec3V v1)
{
ASSERT_ISVALIDVEC3V(v0);
ASSERT_ISVALIDVEC3V(v1);
return _mm_shuffle_ps(v0, v1, _MM_SHUFFLE(3, 0, 3, 2));
}
PX_FORCE_INLINE Vec3V V3Perm_1Y_0X_Zero(const Vec3V v0, const Vec3V v1)
{
ASSERT_ISVALIDVEC3V(v0);
ASSERT_ISVALIDVEC3V(v1);
// There must be a better way to do this.
Vec3V v2 = V3Zero();
FloatV y1 = V3GetY(v1);
FloatV x0 = V3GetX(v0);
v2 = V3SetX(v2, y1);
return V3SetY(v2, x0);
}
PX_FORCE_INLINE FloatV V3SumElems(const Vec3V a)
{
ASSERT_ISVALIDVEC3V(a);
const __m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 0, 0, 0)); // z,y,x,w
const __m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 1, 1, 1)); // y,x,w,z
const __m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 2, 2, 2)); // x,w,z,y
return _mm_add_ps(_mm_add_ps(shuf1, shuf2), shuf3);
}
PX_FORCE_INLINE PxU32 V3OutOfBounds(const Vec3V a, const Vec3V min, const Vec3V max)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(min);
ASSERT_ISVALIDVEC3V(max);
const BoolV c = BOr(V3IsGrtr(a, max), V3IsGrtr(min, a));
return PxU32(!BAllEqFFFF(c));
}
PX_FORCE_INLINE PxU32 V3InBounds(const Vec3V a, const Vec3V min, const Vec3V max)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(min);
ASSERT_ISVALIDVEC3V(max);
const BoolV c = BAnd(V3IsGrtrOrEq(a, min), V3IsGrtrOrEq(max, a));
return BAllEqTTTT(c);
}
PX_FORCE_INLINE PxU32 V3OutOfBounds(const Vec3V a, const Vec3V bounds)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(bounds);
return V3OutOfBounds(a, V3Neg(bounds), bounds);
}
PX_FORCE_INLINE PxU32 V3InBounds(const Vec3V a, const Vec3V bounds)
{
ASSERT_ISVALIDVEC3V(a);
ASSERT_ISVALIDVEC3V(bounds);
return V3InBounds(a, V3Neg(bounds), bounds);
}
PX_FORCE_INLINE void V3Transpose(Vec3V& col0, Vec3V& col1, Vec3V& col2)
{
ASSERT_ISVALIDVEC3V(col0);
ASSERT_ISVALIDVEC3V(col1);
ASSERT_ISVALIDVEC3V(col2);
const Vec3V col3 = _mm_setzero_ps();
Vec3V tmp0 = _mm_unpacklo_ps(col0, col1);
Vec3V tmp2 = _mm_unpacklo_ps(col2, col3);
Vec3V tmp1 = _mm_unpackhi_ps(col0, col1);
Vec3V tmp3 = _mm_unpackhi_ps(col2, col3);
col0 = _mm_movelh_ps(tmp0, tmp2);
col1 = _mm_movehl_ps(tmp2, tmp0);
col2 = _mm_movelh_ps(tmp1, tmp3);
}
//////////////////////////////////
// VEC4V
//////////////////////////////////
PX_FORCE_INLINE Vec4V V4Splat(const FloatV f)
{
ASSERT_ISVALIDFLOATV(f);
// return _mm_shuffle_ps(f, f, _MM_SHUFFLE(0,0,0,0));
return f;
}
PX_FORCE_INLINE Vec4V V4Merge(const FloatV* const floatVArray)
{
ASSERT_ISVALIDFLOATV(floatVArray[0]);
ASSERT_ISVALIDFLOATV(floatVArray[1]);
ASSERT_ISVALIDFLOATV(floatVArray[2]);
ASSERT_ISVALIDFLOATV(floatVArray[3]);
const __m128 xw = _mm_move_ss(floatVArray[1], floatVArray[0]); // y, y, y, x
const __m128 yz = _mm_move_ss(floatVArray[2], floatVArray[3]); // z, z, z, w
return _mm_shuffle_ps(xw, yz, _MM_SHUFFLE(0, 2, 1, 0));
}
PX_FORCE_INLINE Vec4V V4Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z, const FloatVArg w)
{
ASSERT_ISVALIDFLOATV(x);
ASSERT_ISVALIDFLOATV(y);
ASSERT_ISVALIDFLOATV(z);
ASSERT_ISVALIDFLOATV(w);
const __m128 xw = _mm_move_ss(y, x); // y, y, y, x
const __m128 yz = _mm_move_ss(z, w); // z, z, z, w
return _mm_shuffle_ps(xw, yz, _MM_SHUFFLE(0, 2, 1, 0));
}
PX_FORCE_INLINE Vec4V V4MergeW(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w)
{
const Vec4V xz = _mm_unpackhi_ps(x, z);
const Vec4V yw = _mm_unpackhi_ps(y, w);
return _mm_unpackhi_ps(xz, yw);
}
PX_FORCE_INLINE Vec4V V4MergeZ(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w)
{
const Vec4V xz = _mm_unpackhi_ps(x, z);
const Vec4V yw = _mm_unpackhi_ps(y, w);
return _mm_unpacklo_ps(xz, yw);
}
PX_FORCE_INLINE Vec4V V4MergeY(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w)
{
const Vec4V xz = _mm_unpacklo_ps(x, z);
const Vec4V yw = _mm_unpacklo_ps(y, w);
return _mm_unpackhi_ps(xz, yw);
}
PX_FORCE_INLINE Vec4V V4MergeX(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w)
{
const Vec4V xz = _mm_unpacklo_ps(x, z);
const Vec4V yw = _mm_unpacklo_ps(y, w);
return _mm_unpacklo_ps(xz, yw);
}
PX_FORCE_INLINE Vec4V V4UnpackXY(const Vec4VArg a, const Vec4VArg b)
{
return _mm_unpacklo_ps(a, b);
}
PX_FORCE_INLINE Vec4V V4UnpackZW(const Vec4VArg a, const Vec4VArg b)
{
return _mm_unpackhi_ps(a, b);
}
PX_FORCE_INLINE Vec4V V4PermYXWZ(const Vec4V a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 3, 0, 1));
}
PX_FORCE_INLINE Vec4V V4PermXZXZ(const Vec4V a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 0, 2, 0));
}
PX_FORCE_INLINE Vec4V V4PermYWYW(const Vec4V a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 3, 1));
}
PX_FORCE_INLINE Vec4V V4PermYZXW(const Vec4V a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1));
}
PX_FORCE_INLINE Vec4V V4PermZWXY(const Vec4V a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 0, 3, 2));
}
template <PxU8 x, PxU8 y, PxU8 z, PxU8 w>
PX_FORCE_INLINE Vec4V V4Perm(const Vec4V a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(w, z, y, x));
}
PX_FORCE_INLINE Vec4V V4UnitW()
{
const PX_ALIGN(16, PxF32 w[4]) = { 0.0f, 0.0f, 0.0f, 1.0f };
const __m128 w128 = _mm_load_ps(w);
return w128;
}
PX_FORCE_INLINE Vec4V V4UnitX()
{
const PX_ALIGN(16, PxF32 x[4]) = { 1.0f, 0.0f, 0.0f, 0.0f };
const __m128 x128 = _mm_load_ps(x);
return x128;
}
PX_FORCE_INLINE Vec4V V4UnitY()
{
const PX_ALIGN(16, PxF32 y[4]) = { 0.0f, 1.0f, 0.0f, 0.0f };
const __m128 y128 = _mm_load_ps(y);
return y128;
}
PX_FORCE_INLINE Vec4V V4UnitZ()
{
const PX_ALIGN(16, PxF32 z[4]) = { 0.0f, 0.0f, 1.0f, 0.0f };
const __m128 z128 = _mm_load_ps(z);
return z128;
}
PX_FORCE_INLINE FloatV V4GetW(const Vec4V f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(3, 3, 3, 3));
}
PX_FORCE_INLINE FloatV V4GetX(const Vec4V f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(0, 0, 0, 0));
}
PX_FORCE_INLINE FloatV V4GetY(const Vec4V f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(1, 1, 1, 1));
}
PX_FORCE_INLINE FloatV V4GetZ(const Vec4V f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(2, 2, 2, 2));
}
PX_FORCE_INLINE Vec4V V4SetW(const Vec4V v, const FloatV f)
{
ASSERT_ISVALIDFLOATV(f);
return V4Sel(BTTTF(), v, f);
}
PX_FORCE_INLINE Vec4V V4ClearW(const Vec4V v)
{
return _mm_and_ps(v, (VecI32V&)internalWindowsSimd::gMaskXYZ);
}
PX_FORCE_INLINE Vec4V V4SetX(const Vec4V v, const FloatV f)
{
ASSERT_ISVALIDFLOATV(f);
return V4Sel(BFTTT(), v, f);
}
PX_FORCE_INLINE Vec4V V4SetY(const Vec4V v, const FloatV f)
{
ASSERT_ISVALIDFLOATV(f);
return V4Sel(BTFTT(), v, f);
}
PX_FORCE_INLINE Vec4V V4SetZ(const Vec4V v, const FloatV f)
{
ASSERT_ISVALIDFLOATV(f);
return V4Sel(BTTFT(), v, f);
}
PX_FORCE_INLINE Vec4V V4Zero()
{
return _mm_setzero_ps();
}
PX_FORCE_INLINE Vec4V V4One()
{
return V4Load(1.0f);
}
PX_FORCE_INLINE Vec4V V4Eps()
{
return V4Load(PX_EPS_REAL);
}
PX_FORCE_INLINE Vec4V V4Neg(const Vec4V f)
{
return _mm_sub_ps(_mm_setzero_ps(), f);
}
PX_FORCE_INLINE Vec4V V4Add(const Vec4V a, const Vec4V b)
{
return _mm_add_ps(a, b);
}
PX_FORCE_INLINE Vec4V V4Sub(const Vec4V a, const Vec4V b)
{
return _mm_sub_ps(a, b);
}
PX_FORCE_INLINE Vec4V V4Scale(const Vec4V a, const FloatV b)
{
return _mm_mul_ps(a, b);
}
PX_FORCE_INLINE Vec4V V4Mul(const Vec4V a, const Vec4V b)
{
return _mm_mul_ps(a, b);
}
PX_FORCE_INLINE Vec4V V4ScaleInv(const Vec4V a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(b);
return _mm_div_ps(a, b);
}
PX_FORCE_INLINE Vec4V V4Div(const Vec4V a, const Vec4V b)
{
return _mm_div_ps(a, b);
}
PX_FORCE_INLINE Vec4V V4ScaleInvFast(const Vec4V a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(b);
return _mm_mul_ps(a, _mm_rcp_ps(b));
}
PX_FORCE_INLINE Vec4V V4DivFast(const Vec4V a, const Vec4V b)
{
return _mm_mul_ps(a, _mm_rcp_ps(b));
}
PX_FORCE_INLINE Vec4V V4Recip(const Vec4V a)
{
return _mm_div_ps(V4One(), a);
}
PX_FORCE_INLINE Vec4V V4RecipFast(const Vec4V a)
{
return _mm_rcp_ps(a);
}
PX_FORCE_INLINE Vec4V V4Rsqrt(const Vec4V a)
{
return _mm_div_ps(V4One(), _mm_sqrt_ps(a));
}
PX_FORCE_INLINE Vec4V V4RsqrtFast(const Vec4V a)
{
return _mm_rsqrt_ps(a);
}
PX_FORCE_INLINE Vec4V V4Sqrt(const Vec4V a)
{
return _mm_sqrt_ps(a);
}
PX_FORCE_INLINE Vec4V V4ScaleAdd(const Vec4V a, const FloatV b, const Vec4V c)
{
ASSERT_ISVALIDFLOATV(b);
return V4Add(V4Scale(a, b), c);
}
PX_FORCE_INLINE Vec4V V4NegScaleSub(const Vec4V a, const FloatV b, const Vec4V c)
{
ASSERT_ISVALIDFLOATV(b);
return V4Sub(c, V4Scale(a, b));
}
PX_FORCE_INLINE Vec4V V4MulAdd(const Vec4V a, const Vec4V b, const Vec4V c)
{
return V4Add(V4Mul(a, b), c);
}
PX_FORCE_INLINE Vec4V V4NegMulSub(const Vec4V a, const Vec4V b, const Vec4V c)
{
return V4Sub(c, V4Mul(a, b));
}
PX_FORCE_INLINE Vec4V V4Abs(const Vec4V a)
{
return V4Max(a, V4Neg(a));
}
PX_FORCE_INLINE FloatV V4SumElements(const Vec4V a)
{
const Vec4V xy = V4UnpackXY(a, a); // x,x,y,y
const Vec4V zw = V4UnpackZW(a, a); // z,z,w,w
const Vec4V xz_yw = V4Add(xy, zw); // x+z,x+z,y+w,y+w
const FloatV xz = V4GetX(xz_yw); // x+z
const FloatV yw = V4GetZ(xz_yw); // y+w
return FAdd(xz, yw); // sum
}
PX_FORCE_INLINE FloatV V4Dot(const Vec4V a, const Vec4V b)
{
//const __m128 dot1 = _mm_mul_ps(a, b); // x,y,z,w
//const __m128 shuf1 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(2, 1, 0, 3)); // w,x,y,z
//const __m128 shuf2 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(1, 0, 3, 2)); // z,w,x,y
//const __m128 shuf3 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(0, 3, 2, 1)); // y,z,w,x
//return _mm_add_ps(_mm_add_ps(shuf2, shuf3), _mm_add_ps(dot1, shuf1));
// PT: this version has two less instructions but we should check its accuracy
// aw*bw | az*bz | ay*by | ax*bx
const __m128 t0 = _mm_mul_ps(a, b);
// ay*by | ax*bx | aw*bw | az*bz
const __m128 t1 = _mm_shuffle_ps(t0, t0, _MM_SHUFFLE(1,0,3,2));
// ay*by + aw*bw | ax*bx + az*bz | aw*bw + ay*by | az*bz + ax*bx
const __m128 t2 = _mm_add_ps(t0, t1);
// ax*bx + az*bz | ay*by + aw*bw | az*bz + ax*bx | aw*bw + ay*by
const __m128 t3 = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(2,3,0,1));
// ax*bx + az*bz + ay*by + aw*bw
return _mm_add_ps(t3, t2);
// ay*by + aw*bw + ax*bx + az*bz
// az*bz + ax*bx + aw*bw + ay*by
// aw*bw + ay*by + az*bz + ax*bx
}
PX_FORCE_INLINE FloatV V4Dot3(const Vec4V a, const Vec4V b)
{
const __m128 dot1 = _mm_mul_ps(a, b); // aw*bw | az*bz | ay*by | ax*bx
const __m128 shuf1 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(0, 0, 0, 0)); // ax*bx | ax*bx | ax*bx | ax*bx
const __m128 shuf2 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(1, 1, 1, 1)); // ay*by | ay*by | ay*by | ay*by
const __m128 shuf3 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(2, 2, 2, 2)); // az*bz | az*bz | az*bz | az*bz
return _mm_add_ps(_mm_add_ps(shuf1, shuf2), shuf3); // ax*bx + ay*by + az*bz in each component
}
PX_FORCE_INLINE Vec4V V4Cross(const Vec4V a, const Vec4V b)
{
/* if(0)
{
const __m128 r1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 0, 2)); // z,x,y,w
const __m128 r2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1)); // y,z,x,w
const __m128 l1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); // y,z,x,w
const __m128 l2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 1, 0, 2)); // z,x,y,w
return _mm_sub_ps(_mm_mul_ps(l1, l2), _mm_mul_ps(r1, r2));
}
else*/
{
const __m128 b0 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3,0,2,1));
const __m128 a1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3,0,2,1));
__m128 v = _mm_mul_ps(a1, b);
v = _mm_sub_ps(_mm_mul_ps(a, b0), v);
return _mm_shuffle_ps(v, v, _MM_SHUFFLE(3,0,2,1));
}
}
PX_FORCE_INLINE FloatV V4Length(const Vec4V a)
{
return _mm_sqrt_ps(V4Dot(a, a));
}
PX_FORCE_INLINE FloatV V4LengthSq(const Vec4V a)
{
return V4Dot(a, a);
}
PX_FORCE_INLINE Vec4V V4Normalize(const Vec4V a)
{
ASSERT_ISFINITELENGTH(a);
return V4ScaleInv(a, _mm_sqrt_ps(V4Dot(a, a)));
}
PX_FORCE_INLINE Vec4V V4NormalizeFast(const Vec4V a)
{
ASSERT_ISFINITELENGTH(a);
return V4ScaleInvFast(a, _mm_sqrt_ps(V4Dot(a, a)));
}
PX_FORCE_INLINE Vec4V V4NormalizeSafe(const Vec4V a, const Vec4V unsafeReturnValue)
{
const __m128 eps = V3Eps();
const __m128 length = V4Length(a);
const __m128 isGreaterThanZero = V4IsGrtr(length, eps);
return V4Sel(isGreaterThanZero, V4ScaleInv(a, length), unsafeReturnValue);
}
PX_FORCE_INLINE Vec4V V4Sel(const BoolV c, const Vec4V a, const Vec4V b)
{
return _mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a));
}
PX_FORCE_INLINE BoolV V4IsGrtr(const Vec4V a, const Vec4V b)
{
return _mm_cmpgt_ps(a, b);
}
PX_FORCE_INLINE BoolV V4IsGrtrOrEq(const Vec4V a, const Vec4V b)
{
return _mm_cmpge_ps(a, b);
}
PX_FORCE_INLINE BoolV V4IsEq(const Vec4V a, const Vec4V b)
{
return _mm_cmpeq_ps(a, b);
}
PX_FORCE_INLINE BoolV V4IsEqU32(const VecU32V a, const VecU32V b)
{
return internalWindowsSimd::m128_I2F(
_mm_cmpeq_epi32(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b)));
}
PX_FORCE_INLINE Vec4V V4Max(const Vec4V a, const Vec4V b)
{
return _mm_max_ps(a, b);
}
PX_FORCE_INLINE Vec4V V4Min(const Vec4V a, const Vec4V b)
{
return _mm_min_ps(a, b);
}
PX_FORCE_INLINE FloatV V4ExtractMax(const Vec4V a)
{
const __m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 0, 3, 2));
const __m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 3, 2, 1));
return _mm_max_ps(_mm_max_ps(a, shuf1), _mm_max_ps(shuf2, shuf3));
}
PX_FORCE_INLINE FloatV V4ExtractMin(const Vec4V a)
{
const __m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 0, 3, 2));
const __m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 3, 2, 1));
return _mm_min_ps(_mm_min_ps(a, shuf1), _mm_min_ps(shuf2, shuf3));
}
PX_FORCE_INLINE Vec4V V4Clamp(const Vec4V a, const Vec4V minV, const Vec4V maxV)
{
return V4Max(V4Min(a, maxV), minV);
}
PX_FORCE_INLINE PxU32 V4AllGrtr(const Vec4V a, const Vec4V b)
{
return internalWindowsSimd::BAllTrue4_R(V4IsGrtr(a, b));
}
PX_FORCE_INLINE PxU32 V4AllGrtrOrEq(const Vec4V a, const Vec4V b)
{
return internalWindowsSimd::BAllTrue4_R(V4IsGrtrOrEq(a, b));
}
PX_FORCE_INLINE PxU32 V4AllGrtrOrEq3(const Vec4V a, const Vec4V b)
{
return internalWindowsSimd::BAllTrue3_R(V4IsGrtrOrEq(a, b));
}
PX_FORCE_INLINE PxU32 V4AllEq(const Vec4V a, const Vec4V b)
{
return internalWindowsSimd::BAllTrue4_R(V4IsEq(a, b));
}
PX_FORCE_INLINE PxU32 V4AnyGrtr3(const Vec4V a, const Vec4V b)
{
return internalWindowsSimd::BAnyTrue3_R(V4IsGrtr(a, b));
}
PX_FORCE_INLINE Vec4V V4Round(const Vec4V a)
{
// return _mm_round_ps(a, 0x0);
const Vec4V half = V4Load(0.5f);
const __m128 signBit = _mm_cvtepi32_ps(_mm_srli_epi32(_mm_cvtps_epi32(a), 31));
const Vec4V aRound = V4Sub(V4Add(a, half), signBit);
const __m128i tmp = _mm_cvttps_epi32(aRound);
return _mm_cvtepi32_ps(tmp);
}
PX_FORCE_INLINE Vec4V V4Sin(const Vec4V a)
{
const Vec4V recipTwoPi = V4LoadA(g_PXReciprocalTwoPi.f);
const Vec4V twoPi = V4LoadA(g_PXTwoPi.f);
const Vec4V tmp = V4Mul(a, recipTwoPi);
const Vec4V b = V4Round(tmp);
const Vec4V V1 = V4NegMulSub(twoPi, b, a);
// sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! -
// V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI)
const Vec4V V2 = V4Mul(V1, V1);
const Vec4V V3 = V4Mul(V2, V1);
const Vec4V V5 = V4Mul(V3, V2);
const Vec4V V7 = V4Mul(V5, V2);
const Vec4V V9 = V4Mul(V7, V2);
const Vec4V V11 = V4Mul(V9, V2);
const Vec4V V13 = V4Mul(V11, V2);
const Vec4V V15 = V4Mul(V13, V2);
const Vec4V V17 = V4Mul(V15, V2);
const Vec4V V19 = V4Mul(V17, V2);
const Vec4V V21 = V4Mul(V19, V2);
const Vec4V V23 = V4Mul(V21, V2);
const Vec4V sinCoefficients0 = V4LoadA(g_PXSinCoefficients0.f);
const Vec4V sinCoefficients1 = V4LoadA(g_PXSinCoefficients1.f);
const Vec4V sinCoefficients2 = V4LoadA(g_PXSinCoefficients2.f);
const FloatV S1 = V4GetY(sinCoefficients0);
const FloatV S2 = V4GetZ(sinCoefficients0);
const FloatV S3 = V4GetW(sinCoefficients0);
const FloatV S4 = V4GetX(sinCoefficients1);
const FloatV S5 = V4GetY(sinCoefficients1);
const FloatV S6 = V4GetZ(sinCoefficients1);
const FloatV S7 = V4GetW(sinCoefficients1);
const FloatV S8 = V4GetX(sinCoefficients2);
const FloatV S9 = V4GetY(sinCoefficients2);
const FloatV S10 = V4GetZ(sinCoefficients2);
const FloatV S11 = V4GetW(sinCoefficients2);
Vec4V Result;
Result = V4MulAdd(S1, V3, V1);
Result = V4MulAdd(S2, V5, Result);
Result = V4MulAdd(S3, V7, Result);
Result = V4MulAdd(S4, V9, Result);
Result = V4MulAdd(S5, V11, Result);
Result = V4MulAdd(S6, V13, Result);
Result = V4MulAdd(S7, V15, Result);
Result = V4MulAdd(S8, V17, Result);
Result = V4MulAdd(S9, V19, Result);
Result = V4MulAdd(S10, V21, Result);
Result = V4MulAdd(S11, V23, Result);
return Result;
}
PX_FORCE_INLINE Vec4V V4Cos(const Vec4V a)
{
const Vec4V recipTwoPi = V4LoadA(g_PXReciprocalTwoPi.f);
const FloatV twoPi = V4LoadA(g_PXTwoPi.f);
const Vec4V tmp = V4Mul(a, recipTwoPi);
const Vec4V b = V4Round(tmp);
const Vec4V V1 = V4NegMulSub(twoPi, b, a);
// cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! -
// V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI)
const Vec4V V2 = V4Mul(V1, V1);
const Vec4V V4 = V4Mul(V2, V2);
const Vec4V V6 = V4Mul(V4, V2);
const Vec4V V8 = V4Mul(V4, V4);
const Vec4V V10 = V4Mul(V6, V4);
const Vec4V V12 = V4Mul(V6, V6);
const Vec4V V14 = V4Mul(V8, V6);
const Vec4V V16 = V4Mul(V8, V8);
const Vec4V V18 = V4Mul(V10, V8);
const Vec4V V20 = V4Mul(V10, V10);
const Vec4V V22 = V4Mul(V12, V10);
const Vec4V cosCoefficients0 = V4LoadA(g_PXCosCoefficients0.f);
const Vec4V cosCoefficients1 = V4LoadA(g_PXCosCoefficients1.f);
const Vec4V cosCoefficients2 = V4LoadA(g_PXCosCoefficients2.f);
const FloatV C1 = V4GetY(cosCoefficients0);
const FloatV C2 = V4GetZ(cosCoefficients0);
const FloatV C3 = V4GetW(cosCoefficients0);
const FloatV C4 = V4GetX(cosCoefficients1);
const FloatV C5 = V4GetY(cosCoefficients1);
const FloatV C6 = V4GetZ(cosCoefficients1);
const FloatV C7 = V4GetW(cosCoefficients1);
const FloatV C8 = V4GetX(cosCoefficients2);
const FloatV C9 = V4GetY(cosCoefficients2);
const FloatV C10 = V4GetZ(cosCoefficients2);
const FloatV C11 = V4GetW(cosCoefficients2);
Vec4V Result;
Result = V4MulAdd(C1, V2, V4One());
Result = V4MulAdd(C2, V4, Result);
Result = V4MulAdd(C3, V6, Result);
Result = V4MulAdd(C4, V8, Result);
Result = V4MulAdd(C5, V10, Result);
Result = V4MulAdd(C6, V12, Result);
Result = V4MulAdd(C7, V14, Result);
Result = V4MulAdd(C8, V16, Result);
Result = V4MulAdd(C9, V18, Result);
Result = V4MulAdd(C10, V20, Result);
Result = V4MulAdd(C11, V22, Result);
return Result;
}
PX_FORCE_INLINE void V4Transpose(Vec4V& col0, Vec4V& col1, Vec4V& col2, Vec4V& col3)
{
Vec4V tmp0 = _mm_unpacklo_ps(col0, col1);
Vec4V tmp2 = _mm_unpacklo_ps(col2, col3);
Vec4V tmp1 = _mm_unpackhi_ps(col0, col1);
Vec4V tmp3 = _mm_unpackhi_ps(col2, col3);
col0 = _mm_movelh_ps(tmp0, tmp2);
col1 = _mm_movehl_ps(tmp2, tmp0);
col2 = _mm_movelh_ps(tmp1, tmp3);
col3 = _mm_movehl_ps(tmp3, tmp1);
}
//////////////////////////////////
// BoolV
//////////////////////////////////
PX_FORCE_INLINE BoolV BFFFF()
{
return _mm_setzero_ps();
}
PX_FORCE_INLINE BoolV BFFFT()
{
/*const PX_ALIGN(16, PxU32 f[4])={0,0,0,0xFFFFFFFF};
const __m128 ffft=_mm_load_ps((float*)&f);
return ffft;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(-1, 0, 0, 0));
}
PX_FORCE_INLINE BoolV BFFTF()
{
/*const PX_ALIGN(16, PxU32 f[4])={0,0,0xFFFFFFFF,0};
const __m128 fftf=_mm_load_ps((float*)&f);
return fftf;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, -1, 0, 0));
}
PX_FORCE_INLINE BoolV BFFTT()
{
/*const PX_ALIGN(16, PxU32 f[4])={0,0,0xFFFFFFFF,0xFFFFFFFF};
const __m128 fftt=_mm_load_ps((float*)&f);
return fftt;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(-1, -1, 0, 0));
}
PX_FORCE_INLINE BoolV BFTFF()
{
/*const PX_ALIGN(16, PxU32 f[4])={0,0xFFFFFFFF,0,0};
const __m128 ftff=_mm_load_ps((float*)&f);
return ftff;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, 0, -1, 0));
}
PX_FORCE_INLINE BoolV BFTFT()
{
/*const PX_ALIGN(16, PxU32 f[4])={0,0xFFFFFFFF,0,0xFFFFFFFF};
const __m128 ftft=_mm_load_ps((float*)&f);
return ftft;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(-1, 0, -1, 0));
}
PX_FORCE_INLINE BoolV BFTTF()
{
/*const PX_ALIGN(16, PxU32 f[4])={0,0xFFFFFFFF,0xFFFFFFFF,0};
const __m128 fttf=_mm_load_ps((float*)&f);
return fttf;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, -1, -1, 0));
}
PX_FORCE_INLINE BoolV BFTTT()
{
/*const PX_ALIGN(16, PxU32 f[4])={0,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF};
const __m128 fttt=_mm_load_ps((float*)&f);
return fttt;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(-1, -1, -1, 0));
}
PX_FORCE_INLINE BoolV BTFFF()
{
// const PX_ALIGN(16, PxU32 f[4])={0xFFFFFFFF,0,0,0};
// const __m128 tfff=_mm_load_ps((float*)&f);
// return tfff;
return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, 0, 0, -1));
}
PX_FORCE_INLINE BoolV BTFFT()
{
/*const PX_ALIGN(16, PxU32 f[4])={0xFFFFFFFF,0,0,0xFFFFFFFF};
const __m128 tfft=_mm_load_ps((float*)&f);
return tfft;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(-1, 0, 0, -1));
}
PX_FORCE_INLINE BoolV BTFTF()
{
/*const PX_ALIGN(16, PxU32 f[4])={0xFFFFFFFF,0,0xFFFFFFFF,0};
const __m128 tftf=_mm_load_ps((float*)&f);
return tftf;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, -1, 0, -1));
}
PX_FORCE_INLINE BoolV BTFTT()
{
/*const PX_ALIGN(16, PxU32 f[4])={0xFFFFFFFF,0,0xFFFFFFFF,0xFFFFFFFF};
const __m128 tftt=_mm_load_ps((float*)&f);
return tftt;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(-1, -1, 0, -1));
}
PX_FORCE_INLINE BoolV BTTFF()
{
/*const PX_ALIGN(16, PxU32 f[4])={0xFFFFFFFF,0xFFFFFFFF,0,0};
const __m128 ttff=_mm_load_ps((float*)&f);
return ttff;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, 0, -1, -1));
}
PX_FORCE_INLINE BoolV BTTFT()
{
/*const PX_ALIGN(16, PxU32 f[4])={0xFFFFFFFF,0xFFFFFFFF,0,0xFFFFFFFF};
const __m128 ttft=_mm_load_ps((float*)&f);
return ttft;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(-1, 0, -1, -1));
}
PX_FORCE_INLINE BoolV BTTTF()
{
/*const PX_ALIGN(16, PxU32 f[4])={0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0};
const __m128 tttf=_mm_load_ps((float*)&f);
return tttf;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, -1, -1, -1));
}
PX_FORCE_INLINE BoolV BTTTT()
{
/*const PX_ALIGN(16, PxU32 f[4])={0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF};
const __m128 tttt=_mm_load_ps((float*)&f);
return tttt;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(-1, -1, -1, -1));
}
PX_FORCE_INLINE BoolV BXMask()
{
/*const PX_ALIGN(16, PxU32 f[4])={0xFFFFFFFF,0,0,0};
const __m128 tfff=_mm_load_ps((float*)&f);
return tfff;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, 0, 0, -1));
}
PX_FORCE_INLINE BoolV BYMask()
{
/*const PX_ALIGN(16, PxU32 f[4])={0,0xFFFFFFFF,0,0};
const __m128 ftff=_mm_load_ps((float*)&f);
return ftff;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, 0, -1, 0));
}
PX_FORCE_INLINE BoolV BZMask()
{
/*const PX_ALIGN(16, PxU32 f[4])={0,0,0xFFFFFFFF,0};
const __m128 fftf=_mm_load_ps((float*)&f);
return fftf;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, -1, 0, 0));
}
PX_FORCE_INLINE BoolV BWMask()
{
/*const PX_ALIGN(16, PxU32 f[4])={0,0,0,0xFFFFFFFF};
const __m128 ffft=_mm_load_ps((float*)&f);
return ffft;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(-1, 0, 0, 0));
}
PX_FORCE_INLINE BoolV BGetX(const BoolV f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(0, 0, 0, 0));
}
PX_FORCE_INLINE BoolV BGetY(const BoolV f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(1, 1, 1, 1));
}
PX_FORCE_INLINE BoolV BGetZ(const BoolV f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(2, 2, 2, 2));
}
PX_FORCE_INLINE BoolV BGetW(const BoolV f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(3, 3, 3, 3));
}
PX_FORCE_INLINE BoolV BSetX(const BoolV v, const BoolV f)
{
return V4Sel(BFTTT(), v, f);
}
PX_FORCE_INLINE BoolV BSetY(const BoolV v, const BoolV f)
{
return V4Sel(BTFTT(), v, f);
}
PX_FORCE_INLINE BoolV BSetZ(const BoolV v, const BoolV f)
{
return V4Sel(BTTFT(), v, f);
}
PX_FORCE_INLINE BoolV BSetW(const BoolV v, const BoolV f)
{
return V4Sel(BTTTF(), v, f);
}
template <int index>
BoolV BSplatElement(BoolV a)
{
return internalWindowsSimd::m128_I2F(
_mm_shuffle_epi32(internalWindowsSimd::m128_F2I(a), _MM_SHUFFLE(index, index, index, index)));
}
PX_FORCE_INLINE BoolV BAnd(const BoolV a, const BoolV b)
{
return _mm_and_ps(a, b);
}
PX_FORCE_INLINE BoolV BNot(const BoolV a)
{
const BoolV bAllTrue(BTTTT());
return _mm_xor_ps(a, bAllTrue);
}
PX_FORCE_INLINE BoolV BAndNot(const BoolV a, const BoolV b)
{
return _mm_andnot_ps(b, a);
}
PX_FORCE_INLINE BoolV BOr(const BoolV a, const BoolV b)
{
return _mm_or_ps(a, b);
}
PX_FORCE_INLINE BoolV BAllTrue4(const BoolV a)
{
const BoolV bTmp =
_mm_and_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 1, 0, 1)), _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 3, 2, 3)));
return _mm_and_ps(_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(0, 0, 0, 0)),
_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(1, 1, 1, 1)));
}
PX_FORCE_INLINE BoolV BAnyTrue4(const BoolV a)
{
const BoolV bTmp =
_mm_or_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 1, 0, 1)), _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 3, 2, 3)));
return _mm_or_ps(_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(0, 0, 0, 0)),
_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(1, 1, 1, 1)));
}
PX_FORCE_INLINE BoolV BAllTrue3(const BoolV a)
{
const BoolV bTmp =
_mm_and_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 1, 0, 1)), _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 2, 2, 2)));
return _mm_and_ps(_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(0, 0, 0, 0)),
_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(1, 1, 1, 1)));
}
PX_FORCE_INLINE BoolV BAnyTrue3(const BoolV a)
{
const BoolV bTmp =
_mm_or_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 1, 0, 1)), _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 2, 2, 2)));
return _mm_or_ps(_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(0, 0, 0, 0)),
_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(1, 1, 1, 1)));
}
PX_FORCE_INLINE PxU32 BAllEq(const BoolV a, const BoolV b)
{
const BoolV bTest = internalWindowsSimd::m128_I2F(
_mm_cmpeq_epi32(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b)));
return internalWindowsSimd::BAllTrue4_R(bTest);
}
PX_FORCE_INLINE PxU32 BAllEqTTTT(const BoolV a)
{
return PxU32(_mm_movemask_ps(a)==15);
}
PX_FORCE_INLINE PxU32 BAllEqFFFF(const BoolV a)
{
return PxU32(_mm_movemask_ps(a)==0);
}
PX_FORCE_INLINE PxU32 BGetBitMask(const BoolV a)
{
return PxU32(_mm_movemask_ps(a));
}
//////////////////////////////////
// MAT33V
//////////////////////////////////
PX_FORCE_INLINE Vec3V M33MulV3(const Mat33V& a, const Vec3V b)
{
const FloatV x = V3GetX(b);
const FloatV y = V3GetY(b);
const FloatV z = V3GetZ(b);
const Vec3V v0 = V3Scale(a.col0, x);
const Vec3V v1 = V3Scale(a.col1, y);
const Vec3V v2 = V3Scale(a.col2, z);
const Vec3V v0PlusV1 = V3Add(v0, v1);
return V3Add(v0PlusV1, v2);
}
PX_FORCE_INLINE Vec3V M33TrnspsMulV3(const Mat33V& a, const Vec3V b)
{
Vec3V v0 = V3Mul(a.col0, b);
Vec3V v1 = V3Mul(a.col1, b);
Vec3V v2 = V3Mul(a.col2, b);
V3Transpose(v0, v1, v2);
return V3Add(V3Add(v0, v1), v2);
}
PX_FORCE_INLINE Vec3V M33MulV3AddV3(const Mat33V& A, const Vec3V b, const Vec3V c)
{
const FloatV x = V3GetX(b);
const FloatV y = V3GetY(b);
const FloatV z = V3GetZ(b);
Vec3V result = V3ScaleAdd(A.col0, x, c);
result = V3ScaleAdd(A.col1, y, result);
return V3ScaleAdd(A.col2, z, result);
}
PX_FORCE_INLINE Mat33V M33MulM33(const Mat33V& a, const Mat33V& b)
{
return Mat33V(M33MulV3(a, b.col0), M33MulV3(a, b.col1), M33MulV3(a, b.col2));
}
PX_FORCE_INLINE Mat33V M33Add(const Mat33V& a, const Mat33V& b)
{
return Mat33V(V3Add(a.col0, b.col0), V3Add(a.col1, b.col1), V3Add(a.col2, b.col2));
}
PX_FORCE_INLINE Mat33V M33Scale(const Mat33V& a, const FloatV& b)
{
return Mat33V(V3Scale(a.col0, b), V3Scale(a.col1, b), V3Scale(a.col2, b));
}
PX_FORCE_INLINE Mat33V M33Sub(const Mat33V& a, const Mat33V& b)
{
return Mat33V(V3Sub(a.col0, b.col0), V3Sub(a.col1, b.col1), V3Sub(a.col2, b.col2));
}
PX_FORCE_INLINE Mat33V M33Neg(const Mat33V& a)
{
return Mat33V(V3Neg(a.col0), V3Neg(a.col1), V3Neg(a.col2));
}
PX_FORCE_INLINE Mat33V M33Abs(const Mat33V& a)
{
return Mat33V(V3Abs(a.col0), V3Abs(a.col1), V3Abs(a.col2));
}
PX_FORCE_INLINE Mat33V M33Inverse(const Mat33V& a)
{
const BoolV tfft = BTFFT();
const BoolV tttf = BTTTF();
const FloatV zero = V3Zero();
const Vec3V cross01 = V3Cross(a.col0, a.col1);
const Vec3V cross12 = V3Cross(a.col1, a.col2);
const Vec3V cross20 = V3Cross(a.col2, a.col0);
const FloatV dot = V3Dot(cross01, a.col2);
const FloatV invDet = _mm_rcp_ps(dot);
const Vec3V mergeh = _mm_unpacklo_ps(cross12, cross01);
const Vec3V mergel = _mm_unpackhi_ps(cross12, cross01);
Vec3V colInv0 = _mm_unpacklo_ps(mergeh, cross20);
colInv0 = _mm_or_ps(_mm_andnot_ps(tttf, zero), _mm_and_ps(tttf, colInv0));
const Vec3V zppd = _mm_shuffle_ps(mergeh, cross20, _MM_SHUFFLE(3, 0, 0, 2));
const Vec3V pbwp = _mm_shuffle_ps(cross20, mergeh, _MM_SHUFFLE(3, 3, 1, 0));
const Vec3V colInv1 = _mm_or_ps(_mm_andnot_ps(BTFFT(), pbwp), _mm_and_ps(BTFFT(), zppd));
const Vec3V xppd = _mm_shuffle_ps(mergel, cross20, _MM_SHUFFLE(3, 0, 0, 0));
const Vec3V pcyp = _mm_shuffle_ps(cross20, mergel, _MM_SHUFFLE(3, 1, 2, 0));
const Vec3V colInv2 = _mm_or_ps(_mm_andnot_ps(tfft, pcyp), _mm_and_ps(tfft, xppd));
return Mat33V(_mm_mul_ps(colInv0, invDet), _mm_mul_ps(colInv1, invDet), _mm_mul_ps(colInv2, invDet));
}
PX_FORCE_INLINE Mat33V M33Trnsps(const Mat33V& a)
{
Vec3V col0 = a.col0, col1 = a.col1, col2 = a.col2;
V3Transpose(col0, col1, col2);
return Mat33V(col0, col1, col2);
}
PX_FORCE_INLINE Mat33V M33Identity()
{
return Mat33V(V3UnitX(), V3UnitY(), V3UnitZ());
}
PX_FORCE_INLINE Mat33V M33Diagonal(const Vec3VArg d)
{
const FloatV x = V3Mul(V3UnitX(), d);
const FloatV y = V3Mul(V3UnitY(), d);
const FloatV z = V3Mul(V3UnitZ(), d);
return Mat33V(x, y, z);
}
//////////////////////////////////
// MAT34V
//////////////////////////////////
PX_FORCE_INLINE Vec3V M34MulV3(const Mat34V& a, const Vec3V b)
{
const FloatV x = V3GetX(b);
const FloatV y = V3GetY(b);
const FloatV z = V3GetZ(b);
const Vec3V v0 = V3Scale(a.col0, x);
const Vec3V v1 = V3Scale(a.col1, y);
const Vec3V v2 = V3Scale(a.col2, z);
const Vec3V v0PlusV1 = V3Add(v0, v1);
const Vec3V v0PlusV1Plusv2 = V3Add(v0PlusV1, v2);
return V3Add(v0PlusV1Plusv2, a.col3);
}
PX_FORCE_INLINE Vec3V M34Mul33V3(const Mat34V& a, const Vec3V b)
{
const FloatV x = V3GetX(b);
const FloatV y = V3GetY(b);
const FloatV z = V3GetZ(b);
const Vec3V v0 = V3Scale(a.col0, x);
const Vec3V v1 = V3Scale(a.col1, y);
const Vec3V v2 = V3Scale(a.col2, z);
const Vec3V v0PlusV1 = V3Add(v0, v1);
return V3Add(v0PlusV1, v2);
}
PX_FORCE_INLINE Vec3V M34TrnspsMul33V3(const Mat34V& a, const Vec3V b)
{
Vec3V v0 = V3Mul(a.col0, b);
Vec3V v1 = V3Mul(a.col1, b);
Vec3V v2 = V3Mul(a.col2, b);
V3Transpose(v0, v1, v2);
return V3Add(V3Add(v0, v1), v2);
}
PX_FORCE_INLINE Mat34V M34MulM34(const Mat34V& a, const Mat34V& b)
{
return Mat34V(M34Mul33V3(a, b.col0), M34Mul33V3(a, b.col1), M34Mul33V3(a, b.col2), M34MulV3(a, b.col3));
}
PX_FORCE_INLINE Mat33V M34MulM33(const Mat34V& a, const Mat33V& b)
{
return Mat33V(M34Mul33V3(a, b.col0), M34Mul33V3(a, b.col1), M34Mul33V3(a, b.col2));
}
PX_FORCE_INLINE Mat33V M34Mul33MM34(const Mat34V& a, const Mat34V& b)
{
return Mat33V(M34Mul33V3(a, b.col0), M34Mul33V3(a, b.col1), M34Mul33V3(a, b.col2));
}
PX_FORCE_INLINE Mat34V M34Add(const Mat34V& a, const Mat34V& b)
{
return Mat34V(V3Add(a.col0, b.col0), V3Add(a.col1, b.col1), V3Add(a.col2, b.col2), V3Add(a.col3, b.col3));
}
PX_FORCE_INLINE Mat34V M34Inverse(const Mat34V& a)
{
Mat34V aInv;
const BoolV tfft = BTFFT();
const BoolV tttf = BTTTF();
const FloatV zero = V3Zero();
const Vec3V cross01 = V3Cross(a.col0, a.col1);
const Vec3V cross12 = V3Cross(a.col1, a.col2);
const Vec3V cross20 = V3Cross(a.col2, a.col0);
const FloatV dot = V3Dot(cross01, a.col2);
const FloatV invDet = _mm_rcp_ps(dot);
const Vec3V mergeh = _mm_unpacklo_ps(cross12, cross01);
const Vec3V mergel = _mm_unpackhi_ps(cross12, cross01);
Vec3V colInv0 = _mm_unpacklo_ps(mergeh, cross20);
colInv0 = _mm_or_ps(_mm_andnot_ps(tttf, zero), _mm_and_ps(tttf, colInv0));
const Vec3V zppd = _mm_shuffle_ps(mergeh, cross20, _MM_SHUFFLE(3, 0, 0, 2));
const Vec3V pbwp = _mm_shuffle_ps(cross20, mergeh, _MM_SHUFFLE(3, 3, 1, 0));
const Vec3V colInv1 = _mm_or_ps(_mm_andnot_ps(BTFFT(), pbwp), _mm_and_ps(BTFFT(), zppd));
const Vec3V xppd = _mm_shuffle_ps(mergel, cross20, _MM_SHUFFLE(3, 0, 0, 0));
const Vec3V pcyp = _mm_shuffle_ps(cross20, mergel, _MM_SHUFFLE(3, 1, 2, 0));
const Vec3V colInv2 = _mm_or_ps(_mm_andnot_ps(tfft, pcyp), _mm_and_ps(tfft, xppd));
aInv.col0 = _mm_mul_ps(colInv0, invDet);
aInv.col1 = _mm_mul_ps(colInv1, invDet);
aInv.col2 = _mm_mul_ps(colInv2, invDet);
aInv.col3 = M34Mul33V3(aInv, V3Neg(a.col3));
return aInv;
}
PX_FORCE_INLINE Mat33V M34Trnsps33(const Mat34V& a)
{
Vec3V col0 = a.col0, col1 = a.col1, col2 = a.col2;
V3Transpose(col0, col1, col2);
return Mat33V(col0, col1, col2);
}
//////////////////////////////////
// MAT44V
//////////////////////////////////
PX_FORCE_INLINE Vec4V M44MulV4(const Mat44V& a, const Vec4V b)
{
const FloatV x = V4GetX(b);
const FloatV y = V4GetY(b);
const FloatV z = V4GetZ(b);
const FloatV w = V4GetW(b);
const Vec4V v0 = V4Scale(a.col0, x);
const Vec4V v1 = V4Scale(a.col1, y);
const Vec4V v2 = V4Scale(a.col2, z);
const Vec4V v3 = V4Scale(a.col3, w);
const Vec4V v0PlusV1 = V4Add(v0, v1);
const Vec4V v0PlusV1Plusv2 = V4Add(v0PlusV1, v2);
return V4Add(v0PlusV1Plusv2, v3);
}
PX_FORCE_INLINE Vec4V M44TrnspsMulV4(const Mat44V& a, const Vec4V b)
{
Vec4V v0 = V4Mul(a.col0, b);
Vec4V v1 = V4Mul(a.col1, b);
Vec4V v2 = V4Mul(a.col2, b);
Vec4V v3 = V4Mul(a.col3, b);
V4Transpose(v0, v1, v2, v3);
return V4Add(V4Add(v0, v1), V4Add(v2, v3));
}
PX_FORCE_INLINE Mat44V M44MulM44(const Mat44V& a, const Mat44V& b)
{
return Mat44V(M44MulV4(a, b.col0), M44MulV4(a, b.col1), M44MulV4(a, b.col2), M44MulV4(a, b.col3));
}
PX_FORCE_INLINE Mat44V M44Add(const Mat44V& a, const Mat44V& b)
{
return Mat44V(V4Add(a.col0, b.col0), V4Add(a.col1, b.col1), V4Add(a.col2, b.col2), V4Add(a.col3, b.col3));
}
PX_FORCE_INLINE Mat44V M44Trnsps(const Mat44V& a)
{
Vec4V col0 = a.col0, col1 = a.col1, col2 = a.col2, col3 = a.col3;
V4Transpose(col0, col1, col2, col3);
return Mat44V(col0, col1, col2, col3);
}
PX_FORCE_INLINE Mat44V M44Inverse(const Mat44V& a)
{
__m128 minor0, minor1, minor2, minor3;
__m128 row0, row1, row2, row3;
__m128 det, tmp1;
tmp1 = V4Zero();
row1 = V4Zero();
row3 = V4Zero();
row0 = a.col0;
row1 = _mm_shuffle_ps(a.col1, a.col1, _MM_SHUFFLE(1, 0, 3, 2));
row2 = a.col2;
row3 = _mm_shuffle_ps(a.col3, a.col3, _MM_SHUFFLE(1, 0, 3, 2));
tmp1 = _mm_mul_ps(row2, row3);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
minor0 = _mm_mul_ps(row1, tmp1);
minor1 = _mm_mul_ps(row0, tmp1);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
minor0 = _mm_sub_ps(_mm_mul_ps(row1, tmp1), minor0);
minor1 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor1);
minor1 = _mm_shuffle_ps(minor1, minor1, 0x4E);
tmp1 = _mm_mul_ps(row1, row2);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
minor0 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor0);
minor3 = _mm_mul_ps(row0, tmp1);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row3, tmp1));
minor3 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor3);
minor3 = _mm_shuffle_ps(minor3, minor3, 0x4E);
tmp1 = _mm_mul_ps(_mm_shuffle_ps(row1, row1, 0x4E), row3);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
row2 = _mm_shuffle_ps(row2, row2, 0x4E);
minor0 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor0);
minor2 = _mm_mul_ps(row0, tmp1);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row2, tmp1));
minor2 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor2);
minor2 = _mm_shuffle_ps(minor2, minor2, 0x4E);
tmp1 = _mm_mul_ps(row0, row1);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
minor2 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor2);
minor3 = _mm_sub_ps(_mm_mul_ps(row2, tmp1), minor3);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
minor2 = _mm_sub_ps(_mm_mul_ps(row3, tmp1), minor2);
minor3 = _mm_sub_ps(minor3, _mm_mul_ps(row2, tmp1));
tmp1 = _mm_mul_ps(row0, row3);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row2, tmp1));
minor2 = _mm_add_ps(_mm_mul_ps(row1, tmp1), minor2);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
minor1 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor1);
minor2 = _mm_sub_ps(minor2, _mm_mul_ps(row1, tmp1));
tmp1 = _mm_mul_ps(row0, row2);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
minor1 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor1);
minor3 = _mm_sub_ps(minor3, _mm_mul_ps(row1, tmp1));
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row3, tmp1));
minor3 = _mm_add_ps(_mm_mul_ps(row1, tmp1), minor3);
det = _mm_mul_ps(row0, minor0);
det = _mm_add_ps(_mm_shuffle_ps(det, det, 0x4E), det);
det = _mm_add_ss(_mm_shuffle_ps(det, det, 0xB1), det);
tmp1 = _mm_rcp_ss(det);
#if 0
det = _mm_sub_ss(_mm_add_ss(tmp1, tmp1), _mm_mul_ss(det, _mm_mul_ss(tmp1, tmp1)));
det = _mm_shuffle_ps(det, det, 0x00);
#else
det = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(0, 0, 0, 0));
#endif
minor0 = _mm_mul_ps(det, minor0);
minor1 = _mm_mul_ps(det, minor1);
minor2 = _mm_mul_ps(det, minor2);
minor3 = _mm_mul_ps(det, minor3);
Mat44V invTrans(minor0, minor1, minor2, minor3);
return M44Trnsps(invTrans);
}
PX_FORCE_INLINE Vec4V V4LoadXYZW(const PxF32& x, const PxF32& y, const PxF32& z, const PxF32& w)
{
return _mm_set_ps(w, z, y, x);
}
PX_FORCE_INLINE VecU32V V4U32Sel(const BoolV c, const VecU32V a, const VecU32V b)
{
return internalWindowsSimd::m128_I2F(
_mm_or_si128(_mm_andnot_si128(internalWindowsSimd::m128_F2I(c), internalWindowsSimd::m128_F2I(b)),
_mm_and_si128(internalWindowsSimd::m128_F2I(c), internalWindowsSimd::m128_F2I(a))));
}
PX_FORCE_INLINE VecU32V V4U32or(VecU32V a, VecU32V b)
{
return internalWindowsSimd::m128_I2F(_mm_or_si128(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b)));
}
PX_FORCE_INLINE VecU32V V4U32xor(VecU32V a, VecU32V b)
{
return internalWindowsSimd::m128_I2F(
_mm_xor_si128(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b)));
}
PX_FORCE_INLINE VecU32V V4U32and(VecU32V a, VecU32V b)
{
return internalWindowsSimd::m128_I2F(
_mm_and_si128(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b)));
}
PX_FORCE_INLINE VecU32V V4U32Andc(VecU32V a, VecU32V b)
{
return internalWindowsSimd::m128_I2F(
_mm_andnot_si128(internalWindowsSimd::m128_F2I(b), internalWindowsSimd::m128_F2I(a)));
}
PX_FORCE_INLINE VecI32V U4Load(const PxU32 i)
{
return _mm_load1_ps((PxF32*)&i);
}
PX_FORCE_INLINE VecU32V U4LoadU(const PxU32* i)
{
return _mm_loadu_ps((PxF32*)i);
}
PX_FORCE_INLINE VecU32V U4LoadA(const PxU32* i)
{
ASSERT_ISALIGNED16(i);
return _mm_load_ps((PxF32*)i);
}
PX_FORCE_INLINE VecI32V I4LoadXYZW(const PxI32& x, const PxI32& y, const PxI32& z, const PxI32& w)
{
return internalWindowsSimd::m128_I2F(_mm_set_epi32(w, z, y, x));
}
PX_FORCE_INLINE VecI32V I4Load(const PxI32 i)
{
return _mm_load1_ps((PxF32*)&i);
}
PX_FORCE_INLINE VecI32V I4LoadU(const PxI32* i)
{
return _mm_loadu_ps((PxF32*)i);
}
PX_FORCE_INLINE VecI32V I4LoadA(const PxI32* i)
{
ASSERT_ISALIGNED16(i);
return _mm_load_ps((PxF32*)i);
}
PX_FORCE_INLINE VecI32V VecI32V_Add(const VecI32VArg a, const VecI32VArg b)
{
return internalWindowsSimd::m128_I2F(
_mm_add_epi32(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b)));
}
PX_FORCE_INLINE VecI32V VecI32V_Sub(const VecI32VArg a, const VecI32VArg b)
{
return internalWindowsSimd::m128_I2F(
_mm_sub_epi32(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b)));
}
PX_FORCE_INLINE BoolV VecI32V_IsGrtr(const VecI32VArg a, const VecI32VArg b)
{
return internalWindowsSimd::m128_I2F(
_mm_cmpgt_epi32(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b)));
}
PX_FORCE_INLINE BoolV VecI32V_IsEq(const VecI32VArg a, const VecI32VArg b)
{
return internalWindowsSimd::m128_I2F(
_mm_cmpeq_epi32(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b)));
}
PX_FORCE_INLINE VecI32V V4I32Sel(const BoolV c, const VecI32V a, const VecI32V b)
{
return V4U32Sel(c, a, b);
}
PX_FORCE_INLINE VecI32V VecI32V_Zero()
{
return V4Zero();
}
PX_FORCE_INLINE VecI32V VecI32V_One()
{
return I4Load(1);
}
PX_FORCE_INLINE VecI32V VecI32V_Two()
{
return I4Load(2);
}
PX_FORCE_INLINE VecI32V VecI32V_MinusOne()
{
return I4Load(-1);
}
PX_FORCE_INLINE VecU32V U4Zero()
{
return U4Load(0);
}
PX_FORCE_INLINE VecU32V U4One()
{
return U4Load(1);
}
PX_FORCE_INLINE VecU32V U4Two()
{
return U4Load(2);
}
PX_FORCE_INLINE VecI32V VecI32V_Sel(const BoolV c, const VecI32VArg a, const VecI32VArg b)
{
PX_ASSERT(vecMathTests::allElementsEqualBoolV(c, BTTTT()) ||
vecMathTests::allElementsEqualBoolV(c, BFFFF()));
return _mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a));
}
PX_FORCE_INLINE VecShiftV VecI32V_PrepareShift(const VecI32VArg shift)
{
VecShiftV preparedShift;
preparedShift.shift = _mm_or_ps(_mm_andnot_ps(BTFFF(), VecI32V_Zero()), _mm_and_ps(BTFFF(), shift));
return preparedShift;
}
PX_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const VecShiftVArg count)
{
return internalWindowsSimd::m128_I2F(
_mm_sll_epi32(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(count.shift)));
}
PX_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const VecShiftVArg count)
{
return internalWindowsSimd::m128_I2F(
_mm_srl_epi32(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(count.shift)));
}
PX_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const PxU32 count)
{
return internalWindowsSimd::m128_I2F(
_mm_slli_epi32(internalWindowsSimd::m128_F2I(a), count));
}
PX_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const PxU32 count)
{
return internalWindowsSimd::m128_I2F(
_mm_srai_epi32(internalWindowsSimd::m128_F2I(a), count));
}
PX_FORCE_INLINE VecI32V VecI32V_And(const VecI32VArg a, const VecI32VArg b)
{
return internalWindowsSimd::m128_I2F(
_mm_and_si128(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b)));
}
PX_FORCE_INLINE VecI32V VecI32V_Or(const VecI32VArg a, const VecI32VArg b)
{
return internalWindowsSimd::m128_I2F(
_mm_or_si128(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b)));
}
PX_FORCE_INLINE VecI32V VecI32V_GetX(const VecI32VArg a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 0, 0, 0));
}
PX_FORCE_INLINE VecI32V VecI32V_GetY(const VecI32VArg a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 1, 1, 1));
}
PX_FORCE_INLINE VecI32V VecI32V_GetZ(const VecI32VArg a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 2, 2, 2));
}
PX_FORCE_INLINE VecI32V VecI32V_GetW(const VecI32VArg a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 3, 3, 3));
}
PX_FORCE_INLINE void PxI32_From_VecI32V(const VecI32VArg a, PxI32* i)
{
_mm_store_ss((PxF32*)i, a);
}
PX_FORCE_INLINE VecI32V VecI32V_From_BoolV(const BoolVArg a)
{
return a;
}
PX_FORCE_INLINE VecU32V VecU32V_From_BoolV(const BoolVArg a)
{
return a;
}
PX_FORCE_INLINE VecI32V VecI32V_Merge(const VecI32VArg a, const VecI32VArg b, const VecI32VArg c, const VecI32VArg d)
{
const __m128 xw = _mm_move_ss(b, a); // y, y, y, x
const __m128 yz = _mm_move_ss(c, d); // z, z, z, w
return _mm_shuffle_ps(xw, yz, _MM_SHUFFLE(0, 2, 1, 0));
}
PX_FORCE_INLINE void V4U32StoreAligned(VecU32V val, VecU32V* address)
{
*address = val;
}
PX_FORCE_INLINE Vec4V V4Andc(const Vec4V a, const VecU32V b)
{
VecU32V result32(a);
result32 = V4U32Andc(result32, b);
return Vec4V(result32);
}
PX_FORCE_INLINE VecU32V V4IsGrtrV32u(const Vec4V a, const Vec4V b)
{
return V4IsGrtr(a, b);
}
PX_FORCE_INLINE VecU16V V4U16LoadAligned(VecU16V* addr)
{
return *addr;
}
PX_FORCE_INLINE VecU16V V4U16LoadUnaligned(VecU16V* addr)
{
return *addr;
}
// unsigned compares are not supported on x86
PX_FORCE_INLINE VecU16V V4U16CompareGt(VecU16V a, VecU16V b)
{
// _mm_cmpgt_epi16 doesn't work for unsigned values unfortunately
// return m128_I2F(_mm_cmpgt_epi16(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b)));
VecU16V result;
result.m128_u16[0] = PxU16((a).m128_u16[0] > (b).m128_u16[0]);
result.m128_u16[1] = PxU16((a).m128_u16[1] > (b).m128_u16[1]);
result.m128_u16[2] = PxU16((a).m128_u16[2] > (b).m128_u16[2]);
result.m128_u16[3] = PxU16((a).m128_u16[3] > (b).m128_u16[3]);
result.m128_u16[4] = PxU16((a).m128_u16[4] > (b).m128_u16[4]);
result.m128_u16[5] = PxU16((a).m128_u16[5] > (b).m128_u16[5]);
result.m128_u16[6] = PxU16((a).m128_u16[6] > (b).m128_u16[6]);
result.m128_u16[7] = PxU16((a).m128_u16[7] > (b).m128_u16[7]);
return result;
}
PX_FORCE_INLINE VecU16V V4I16CompareGt(VecU16V a, VecU16V b)
{
return internalWindowsSimd::m128_I2F(
_mm_cmpgt_epi16(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b)));
}
PX_FORCE_INLINE Vec4V Vec4V_From_VecU32V(VecU32V a)
{
Vec4V result = V4LoadXYZW(PxF32(a.m128_u32[0]), PxF32(a.m128_u32[1]), PxF32(a.m128_u32[2]), PxF32(a.m128_u32[3]));
return result;
}
PX_FORCE_INLINE Vec4V Vec4V_From_VecI32V(VecI32V a)
{
return _mm_cvtepi32_ps(internalWindowsSimd::m128_F2I(a));
}
PX_FORCE_INLINE VecI32V VecI32V_From_Vec4V(Vec4V a)
{
return internalWindowsSimd::m128_I2F(_mm_cvttps_epi32(a));
}
PX_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecU32V(VecU32V a)
{
return Vec4V(a);
}
PX_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecI32V(VecI32V a)
{
return Vec4V(a);
}
PX_FORCE_INLINE VecU32V VecU32V_ReinterpretFrom_Vec4V(Vec4V a)
{
return VecU32V(a);
}
PX_FORCE_INLINE VecI32V VecI32V_ReinterpretFrom_Vec4V(Vec4V a)
{
return VecI32V(a);
}
template <int index>
PX_FORCE_INLINE VecU32V V4U32SplatElement(VecU32V a)
{
return internalWindowsSimd::m128_I2F(
_mm_shuffle_epi32(internalWindowsSimd::m128_F2I(a), _MM_SHUFFLE(index, index, index, index)));
}
template <int index>
PX_FORCE_INLINE Vec4V V4SplatElement(Vec4V a)
{
return internalWindowsSimd::m128_I2F(
_mm_shuffle_epi32(internalWindowsSimd::m128_F2I(a), _MM_SHUFFLE(index, index, index, index)));
}
PX_FORCE_INLINE VecU32V U4LoadXYZW(PxU32 x, PxU32 y, PxU32 z, PxU32 w)
{
VecU32V result;
result.m128_u32[0] = x;
result.m128_u32[1] = y;
result.m128_u32[2] = z;
result.m128_u32[3] = w;
return result;
}
PX_FORCE_INLINE Vec4V V4ConvertFromI32V(const VecI32V in)
{
return _mm_cvtepi32_ps(internalWindowsSimd::m128_F2I(in));
}
} // namespace aos
} // namespace physx
#endif
| 89,777 | C | 27.276535 | 154 | 0.670305 |
NVIDIA-Omniverse/PhysX/physx/include/foundation/windows/PxWindowsMathIntrinsics.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_WINDOWS_MATH_INTRINSICS_H
#define PX_WINDOWS_MATH_INTRINSICS_H
#include "foundation/Px.h"
#include "foundation/PxAssert.h"
#if !PX_WINDOWS_FAMILY
#error "This file should only be included by Windows builds!!"
#endif
#include <math.h>
#include <float.h>
#if !PX_DOXYGEN
namespace physx
{
namespace intrinsics
{
#endif
//! \brief platform-specific absolute value
PX_CUDA_CALLABLE PX_FORCE_INLINE float abs(float a)
{
return ::fabsf(a);
}
//! \brief platform-specific select float
PX_CUDA_CALLABLE PX_FORCE_INLINE float fsel(float a, float b, float c)
{
return (a >= 0.0f) ? b : c;
}
//! \brief platform-specific sign
PX_CUDA_CALLABLE PX_FORCE_INLINE float sign(float a)
{
return (a >= 0.0f) ? 1.0f : -1.0f;
}
//! \brief platform-specific reciprocal
PX_CUDA_CALLABLE PX_FORCE_INLINE float recip(float a)
{
return 1.0f / a;
}
//! \brief platform-specific reciprocal estimate
PX_CUDA_CALLABLE PX_FORCE_INLINE float recipFast(float a)
{
return 1.0f / a;
}
//! \brief platform-specific square root
PX_CUDA_CALLABLE PX_FORCE_INLINE float sqrt(float a)
{
return ::sqrtf(a);
}
//! \brief platform-specific reciprocal square root
PX_CUDA_CALLABLE PX_FORCE_INLINE float recipSqrt(float a)
{
return 1.0f / ::sqrtf(a);
}
//! \brief platform-specific reciprocal square root estimate
PX_CUDA_CALLABLE PX_FORCE_INLINE float recipSqrtFast(float a)
{
return 1.0f / ::sqrtf(a);
}
//! \brief platform-specific sine
PX_CUDA_CALLABLE PX_FORCE_INLINE float sin(float a)
{
return ::sinf(a);
}
//! \brief platform-specific cosine
PX_CUDA_CALLABLE PX_FORCE_INLINE float cos(float a)
{
return ::cosf(a);
}
//! \brief platform-specific minimum
PX_CUDA_CALLABLE PX_FORCE_INLINE float selectMin(float a, float b)
{
return a < b ? a : b;
}
//! \brief platform-specific maximum
PX_CUDA_CALLABLE PX_FORCE_INLINE float selectMax(float a, float b)
{
return a > b ? a : b;
}
//! \brief platform-specific finiteness check (not INF or NAN)
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite(float a)
{
#ifdef __CUDACC__
return !!isfinite(a);
#else
return (0 == ((_FPCLASS_SNAN | _FPCLASS_QNAN | _FPCLASS_NINF | _FPCLASS_PINF) & _fpclass(a)));
#endif
}
//! \brief platform-specific finiteness check (not INF or NAN)
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite(double a)
{
#ifdef __CUDACC__
return !!isfinite(a);
#else
return (0 == ((_FPCLASS_SNAN | _FPCLASS_QNAN | _FPCLASS_NINF | _FPCLASS_PINF) & _fpclass(a)));
#endif
}
/*!
Sets \c count bytes starting at \c dst to zero.
*/
PX_FORCE_INLINE void* memZero(void* dest, uint32_t count)
{
return memset(dest, 0, count);
}
/*!
Sets \c count bytes starting at \c dst to \c c.
*/
PX_FORCE_INLINE void* memSet(void* dest, int32_t c, uint32_t count)
{
return memset(dest, c, count);
}
/*!
Copies \c count bytes from \c src to \c dst. User memMove if regions overlap.
*/
PX_FORCE_INLINE void* memCopy(void* dest, const void* src, uint32_t count)
{
return memcpy(dest, src, count);
}
/*!
Copies \c count bytes from \c src to \c dst. Supports overlapping regions.
*/
PX_FORCE_INLINE void* memMove(void* dest, const void* src, uint32_t count)
{
return memmove(dest, src, count);
}
#if !PX_DOXYGEN
} // namespace intrinsics
} // namespace physx
#endif
#endif
| 4,908 | C | 26.272222 | 95 | 0.723105 |
NVIDIA-Omniverse/PhysX/physx/include/foundation/windows/PxWindowsInclude.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_WINDOWS_INCLUDE_H
#define PX_WINDOWS_INCLUDE_H
#ifndef _WIN32
#error "This file should only be included by Windows builds!!"
#endif
#ifdef _WINDOWS_ // windows already included
#error "Only include windows.h through this file!!"
#endif
// We only support >= Windows 7, and we need this for critical section and
// Setting this hides some important APIs (e.g. LoadPackagedLibrary), so don't do it
#define _WIN32_WINNT 0x0601
// turn off as much as we can for windows. All we really need is the thread functions(critical sections/Interlocked*
// etc)
#define NOGDICAPMASKS
#define NOVIRTUALKEYCODES
#define NOWINMESSAGES
#define NOWINSTYLES
#define NOSYSMETRICS
#define NOMENUS
#define NOICONS
#define NOKEYSTATES
#define NOSYSCOMMANDS
#define NORASTEROPS
#define NOSHOWWINDOW
#define NOATOM
#define NOCLIPBOARD
#define NOCOLOR
#define NOCTLMGR
#define NODRAWTEXT
#define NOGDI
#define NOMB
#define NOMEMMGR
#define NOMETAFILE
#define NOMINMAX
#define NOOPENFILE
#define NOSCROLL
#define NOSERVICE
#define NOSOUND
#define NOTEXTMETRIC
#define NOWH
#define NOWINOFFSETS
#define NOCOMM
#define NOKANJI
#define NOHELP
#define NOPROFILER
#define NODEFERWINDOWPOS
#define NOMCX
#define WIN32_LEAN_AND_MEAN
// We need a slightly wider API surface for e.g. MultiByteToWideChar
#define NOUSER
#define NONLS
#define NOMSG
#pragma warning(push)
#pragma warning(disable : 4668) //'symbol' is not defined as a preprocessor macro, replacing with '0' for 'directives'
#include <windows.h>
#pragma warning(pop)
#if PX_SSE2
#include <xmmintrin.h>
#endif
#endif
| 3,258 | C | 32.597938 | 118 | 0.775629 |
NVIDIA-Omniverse/PhysX/physx/include/foundation/windows/PxWindowsAoS.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_WINDOWS_AOS_H
#define PX_WINDOWS_AOS_H
// no includes here! this file should be included from PxAOS.h only!!!
#if !COMPILE_VECTOR_INTRINSICS
#error Vector intrinsics should not be included when using scalar implementation.
#endif
#if !PX_DOXYGEN
namespace physx
{
#endif
namespace aos
{
typedef __m128 FloatV;
typedef __m128 Vec3V;
typedef __m128 Vec4V;
typedef __m128 BoolV;
typedef __m128 VecU32V;
typedef __m128 VecI32V;
typedef __m128 VecU16V;
typedef __m128 VecI16V;
typedef __m128 QuatV;
#define FloatVArg FloatV &
#define Vec3VArg Vec3V &
#define Vec4VArg Vec4V &
#define BoolVArg BoolV &
#define VecU32VArg VecU32V &
#define VecI32VArg VecI32V &
#define VecU16VArg VecU16V &
#define VecI16VArg VecI16V &
#define QuatVArg QuatV &
// Optimization for situations in which you cross product multiple vectors with the same vector.
// Avoids 2X shuffles per product
struct VecCrossV
{
Vec3V mL1;
Vec3V mR1;
};
struct VecShiftV
{
VecI32V shift;
};
#define VecShiftVArg VecShiftV &
PX_ALIGN_PREFIX(16)
struct Mat33V
{
Mat33V()
{
}
Mat33V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2) : col0(c0), col1(c1), col2(c2)
{
}
Vec3V PX_ALIGN(16, col0);
Vec3V PX_ALIGN(16, col1);
Vec3V PX_ALIGN(16, col2);
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Mat34V
{
Mat34V()
{
}
Mat34V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2, const Vec3V& c3) : col0(c0), col1(c1), col2(c2), col3(c3)
{
}
Vec3V PX_ALIGN(16, col0);
Vec3V PX_ALIGN(16, col1);
Vec3V PX_ALIGN(16, col2);
Vec3V PX_ALIGN(16, col3);
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Mat43V
{
Mat43V()
{
}
Mat43V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2) : col0(c0), col1(c1), col2(c2)
{
}
Vec4V PX_ALIGN(16, col0);
Vec4V PX_ALIGN(16, col1);
Vec4V PX_ALIGN(16, col2);
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Mat44V
{
Mat44V()
{
}
Mat44V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2, const Vec4V& c3) : col0(c0), col1(c1), col2(c2), col3(c3)
{
}
Vec4V PX_ALIGN(16, col0);
Vec4V PX_ALIGN(16, col1);
Vec4V PX_ALIGN(16, col2);
Vec4V PX_ALIGN(16, col3);
} PX_ALIGN_SUFFIX(16);
} // namespace aos
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif
| 3,892 | C | 26.034722 | 116 | 0.72739 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxSceneQuerySystemExt.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_SCENE_QUERY_SYSTEM_EXT_H
#define PX_SCENE_QUERY_SYSTEM_EXT_H
/** \addtogroup extensions
@{
*/
#include "PxSceneQuerySystem.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Creates an external scene query system.
An external SQ system is the part of a PxScene that deals with scene queries (SQ). This is usually taken care of
by an internal implementation inside PxScene, but it is also possible to re-route all SQ calls to an external
implementation, potentially opening the door to some customizations in behavior and features for advanced users.
The following external SQ system is an example of how an implementation would look like. It re-uses much of the
same code as the internal version, but it could be re-implemented in a completely different way to match users'
specific needs.
\param[in] desc Scene query descriptor
\param[in] contextID Context ID parameter, sent to the profiler
\return An external SQ system instance
@see PxSceneQuerySystem PxSceneQueryDesc
*/
PxSceneQuerySystem* PxCreateExternalSceneQuerySystem(const PxSceneQueryDesc& desc, PxU64 contextID);
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 2,879 | C | 41.352941 | 113 | 0.768322 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxDefaultSimulationFilterShader.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_DEFAULT_SIMULATION_FILTER_SHADER_H
#define PX_DEFAULT_SIMULATION_FILTER_SHADER_H
/** \addtogroup extensions
@{
*/
#include "PxPhysXConfig.h"
#include "PxFiltering.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxActor;
/**
\brief 64-bit mask used for collision filtering.
The collision filtering equation for 2 objects o0 and o1 is:
<pre> (G0 op0 K0) op2 (G1 op1 K1) == b </pre>
with
<ul>
<li> G0 = PxGroupsMask for object o0. See PxSetGroupsMask </li>
<li> G1 = PxGroupsMask for object o1. See PxSetGroupsMask </li>
<li> K0 = filtering constant 0. See PxSetFilterConstants </li>
<li> K1 = filtering constant 1. See PxSetFilterConstants </li>
<li> b = filtering boolean. See PxSetFilterBool </li>
<li> op0, op1, op2 = filtering operations. See PxSetFilterOps </li>
</ul>
If the filtering equation is true, collision detection is enabled.
@see PxSetFilterOps()
*/
class PxGroupsMask
{
public:
PX_INLINE PxGroupsMask():bits0(0),bits1(0),bits2(0),bits3(0) {}
PX_INLINE ~PxGroupsMask() {}
PxU16 bits0, bits1, bits2, bits3;
};
/**
\brief Collision filtering operations.
@see PxGroupsMask
*/
struct PxFilterOp
{
enum Enum
{
PX_FILTEROP_AND,
PX_FILTEROP_OR,
PX_FILTEROP_XOR,
PX_FILTEROP_NAND,
PX_FILTEROP_NOR,
PX_FILTEROP_NXOR,
PX_FILTEROP_SWAP_AND
};
};
/**
\brief Implementation of a simple filter shader that emulates PhysX 2.8.x filtering
This shader provides the following logic:
\li If one of the two filter objects is a trigger, the pair is acccepted and #PxPairFlag::eTRIGGER_DEFAULT will be used for trigger reports
\li Else, if the filter mask logic (see further below) discards the pair it will be suppressed (#PxFilterFlag::eSUPPRESS)
\li Else, the pair gets accepted and collision response gets enabled (#PxPairFlag::eCONTACT_DEFAULT)
Filter mask logic:
Given the two #PxFilterData structures fd0 and fd1 of two collision objects, the pair passes the filter if the following
conditions are met:
1) Collision groups of the pair are enabled
2) Collision filtering equation is satisfied
@see PxSimulationFilterShader
*/
PxFilterFlags PxDefaultSimulationFilterShader(
PxFilterObjectAttributes attributes0,
PxFilterData filterData0,
PxFilterObjectAttributes attributes1,
PxFilterData filterData1,
PxPairFlags& pairFlags,
const void* constantBlock,
PxU32 constantBlockSize);
/**
\brief Determines if collision detection is performed between a pair of groups
\note Collision group is an integer between 0 and 31.
\param[in] group1 First Group
\param[in] group2 Second Group
\return True if the groups could collide
@see PxSetGroupCollisionFlag
*/
bool PxGetGroupCollisionFlag(const PxU16 group1, const PxU16 group2);
/**
\brief Specifies if collision should be performed by a pair of groups
\note Collision group is an integer between 0 and 31.
\param[in] group1 First Group
\param[in] group2 Second Group
\param[in] enable True to enable collision between the groups
@see PxGetGroupCollisionFlag
*/
void PxSetGroupCollisionFlag(const PxU16 group1, const PxU16 group2, const bool enable);
/**
\brief Retrieves the value set with PxSetGroup()
\note Collision group is an integer between 0 and 31.
\param[in] actor The actor
\return The collision group this actor belongs to
@see PxSetGroup
*/
PxU16 PxGetGroup(const PxActor& actor);
/**
\brief Sets which collision group this actor is part of
\note Collision group is an integer between 0 and 31.
\param[in] actor The actor
\param[in] collisionGroup Collision group this actor belongs to
@see PxGetGroup
*/
void PxSetGroup(PxActor& actor, const PxU16 collisionGroup);
/**
\brief Retrieves filtering operation. See comments for PxGroupsMask
\param[out] op0 First filter operator.
\param[out] op1 Second filter operator.
\param[out] op2 Third filter operator.
@see PxSetFilterOps PxSetFilterBool PxSetFilterConstants
*/
void PxGetFilterOps(PxFilterOp::Enum& op0, PxFilterOp::Enum& op1, PxFilterOp::Enum& op2);
/**
\brief Setups filtering operations. See comments for PxGroupsMask
\param[in] op0 Filter op 0.
\param[in] op1 Filter op 1.
\param[in] op2 Filter op 2.
@see PxSetFilterBool PxSetFilterConstants
*/
void PxSetFilterOps(const PxFilterOp::Enum& op0, const PxFilterOp::Enum& op1, const PxFilterOp::Enum& op2);
/**
\brief Retrieves filtering's boolean value. See comments for PxGroupsMask
\return flag Boolean value for filter.
@see PxSetFilterBool PxSetFilterConstants
*/
bool PxGetFilterBool();
/**
\brief Setups filtering's boolean value. See comments for PxGroupsMask
\param[in] enable Boolean value for filter.
@see PxSetFilterOps PxSsetFilterConstants
*/
void PxSetFilterBool(const bool enable);
/**
\brief Gets filtering constant K0 and K1. See comments for PxGroupsMask
\param[out] c0 the filtering constants, as a mask. See #PxGroupsMask.
\param[out] c1 the filtering constants, as a mask. See #PxGroupsMask.
@see PxSetFilterOps PxSetFilterBool PxSetFilterConstants
*/
void PxGetFilterConstants(PxGroupsMask& c0, PxGroupsMask& c1);
/**
\brief Setups filtering's K0 and K1 value. See comments for PxGroupsMask
\param[in] c0 The new group mask. See #PxGroupsMask.
\param[in] c1 The new group mask. See #PxGroupsMask.
@see PxSetFilterOps PxSetFilterBool PxGetFilterConstants
*/
void PxSetFilterConstants(const PxGroupsMask& c0, const PxGroupsMask& c1);
/**
\brief Gets 64-bit mask used for collision filtering. See comments for PxGroupsMask
\param[in] actor The actor
\return The group mask for the actor.
@see PxSetGroupsMask()
*/
PxGroupsMask PxGetGroupsMask(const PxActor& actor);
/**
\brief Sets 64-bit mask used for collision filtering. See comments for PxGroupsMask
\param[in] actor The actor
\param[in] mask The group mask to set for the actor.
@see PxGetGroupsMask()
*/
void PxSetGroupsMask(PxActor& actor, const PxGroupsMask& mask);
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 7,594 | C | 27.98855 | 139 | 0.764288 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxRackAndPinionJoint.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_RACK_AND_PINION_JOINT_H
#define PX_RACK_AND_PINION_JOINT_H
/** \addtogroup extensions
@{
*/
#include "extensions/PxJoint.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxRackAndPinionJoint;
/**
\brief Create a rack & pinion Joint.
\param[in] physics The physics SDK
\param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localFrame0 The position and orientation of the joint relative to actor0
\param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localFrame1 The position and orientation of the joint relative to actor1
@see PxRackAndPinionJoint
*/
PxRackAndPinionJoint* PxRackAndPinionJointCreate(PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1);
/**
\brief A joint that connects an existing revolute joint to an existing prismatic joint,
and constrains their relative angular/linear velocity and position with respect to each other.
@see PxRackAndPinionJointCreate PxJoint
*/
class PxRackAndPinionJoint : public PxJoint
{
public:
/**
\brief Set the hinge & prismatic joints connected by the rack & pinion joint.
The passed hinge joint can be either PxRevoluteJoint, PxD6Joint or PxArticulationJointReducedCoordinate.
The passed prismatic joint can be either PxPrismaticJoint or PxD6Joint.
Note that these joints are only used to compute the positional error correction term,
used to adjust potential drift between jointed actors. The rack & pinion joint can run without
calling this function, but in that case some visible overlap may develop over time between
the teeth of the rack & pinion meshes.
\note Calling this function resets the internal positional error correction term.
\param[in] hinge The hinge joint (pinion)
\param[in] prismatic The prismatic joint (rack)
\return true if success
*/
virtual bool setJoints(const PxBase* hinge, const PxBase* prismatic) = 0;
/**
\brief Get the hinge & prismatic joints connected by the rack & pinion joint.
\param[out] hinge The hinge joint (pinion)
\param[out] prismatic The prismatic joint (rack)
*/
virtual void getJoints(const PxBase*& hinge, const PxBase*& prismatic) const = 0;
/**
\brief Set the desired ratio directly.
\note You may need to use a negative gear ratio if the joint frames of involved actors are not oriented in the same direction.
\note Calling this function resets the internal positional error correction term.
\param[in] ratio Desired ratio between the hinge and the prismatic.
*/
virtual void setRatio(float ratio) = 0;
/**
\brief Get the ratio.
\return Current ratio
*/
virtual float getRatio() const = 0;
/**
\brief Set the desired ratio indirectly.
This is a simple helper function that computes the ratio from passed data:
ratio = (PI*2*nbRackTeeth)/(rackLength*nbPinionTeeth)
\note Calling this function resets the internal positional error correction term.
\param[in] nbRackTeeth Number of teeth on the rack (cannot be zero)
\param[in] nbPinionTeeth Number of teeth on the pinion (cannot be zero)
\param[in] rackLength Length of the rack
\return true if success
*/
virtual bool setData(PxU32 nbRackTeeth, PxU32 nbPinionTeeth, float rackLength) = 0;
virtual const char* getConcreteTypeName() const { return "PxRackAndPinionJoint"; }
protected:
PX_INLINE PxRackAndPinionJoint(PxType concreteType, PxBaseFlags baseFlags) : PxJoint(concreteType, baseFlags) {}
PX_INLINE PxRackAndPinionJoint(PxBaseFlags baseFlags) : PxJoint(baseFlags) {}
virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxRackAndPinionJoint", PxJoint); }
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 5,631 | C | 37.841379 | 178 | 0.755994 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxRaycastCCD.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_RAYCAST_CCD_H
#define PX_RAYCAST_CCD_H
/** \addtogroup extensions
@{
*/
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxVec3.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxScene;
class PxShape;
class PxRigidDynamic;
class RaycastCCDManagerInternal;
/**
\brief Raycast-CCD manager.
Raycast-CCD is a simple and potentially cheaper alternative to the SDK's built-in continuous collision detection algorithm.
This implementation has some limitations:
- it is only implemented for PxRigidDynamic objects (not for PxArticulationLink)
- it is only implemented for simple actors with 1 shape (not for "compounds")
Also, since it is raycast-based, the solution is not perfect. In particular:
- small dynamic objects can still go through the static world if the ray goes through a crack between edges, or a small
hole in the world (like the keyhole from a door).
- dynamic-vs-dynamic CCD is very approximate. It only works well for fast-moving dynamic objects colliding against
slow-moving dynamic objects.
Finally, since it is using the SDK's scene queries under the hood, it only works provided the simulation shapes also have
scene-query shapes associated with them. That is, if the objects in the scene only use PxShapeFlag::eSIMULATION_SHAPE
(and no PxShapeFlag::eSCENE_QUERY_SHAPE), then the raycast-CCD system will not work.
*/
class RaycastCCDManager
{
public:
RaycastCCDManager(PxScene* scene);
~RaycastCCDManager();
/**
\brief Register dynamic object for raycast CCD.
\param[in] actor object's actor
\param[in] shape object's shape
\return True if success
*/
bool registerRaycastCCDObject(PxRigidDynamic* actor, PxShape* shape);
/**
\brief Unregister dynamic object for raycast CCD.
\param[in] actor object's actor
\param[in] shape object's shape
\return True if success
*/
bool unregisterRaycastCCDObject(PxRigidDynamic* actor, PxShape* shape);
/**
\brief Perform raycast CCD. Call this after your simulate/fetchResults calls.
\param[in] doDynamicDynamicCCD True to enable dynamic-vs-dynamic CCD (more expensive, not always needed)
*/
void doRaycastCCD(bool doDynamicDynamicCCD);
private:
RaycastCCDManagerInternal* mImpl;
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 4,032 | C | 35.663636 | 124 | 0.755208 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxSphericalJoint.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_SPHERICAL_JOINT_H
#define PX_SPHERICAL_JOINT_H
/** \addtogroup extensions
@{
*/
#include "extensions/PxJoint.h"
#include "extensions/PxJointLimit.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxSphericalJoint;
/**
\brief Create a spherical joint.
\param[in] physics The physics SDK
\param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localFrame0 The position and orientation of the joint relative to actor0
\param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localFrame1 The position and orientation of the joint relative to actor1
@see PxSphericalJoint
*/
PxSphericalJoint* PxSphericalJointCreate(PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1);
/**
\brief Flags specific to the spherical joint.
@see PxSphericalJoint
*/
struct PxSphericalJointFlag
{
enum Enum
{
eLIMIT_ENABLED = 1<<1 //!< the cone limit for the spherical joint is enabled
};
};
typedef PxFlags<PxSphericalJointFlag::Enum, PxU16> PxSphericalJointFlags;
PX_FLAGS_OPERATORS(PxSphericalJointFlag::Enum, PxU16)
/**
\brief A joint which behaves in a similar way to a ball and socket.
A spherical joint removes all linear degrees of freedom from two objects.
The position of the joint on each actor is specified by the origin of the body's joint frame.
A spherical joint may have a cone limit, to restrict the motion to within a certain range. In
addition, the bodies may be projected together if the distance between them exceeds a given threshold.
Projection, drive and limits are activated by setting the appropriate flags on the joint.
@see PxRevoluteJointCreate() PxJoint
*/
class PxSphericalJoint : public PxJoint
{
public:
/**
\brief Set the limit cone.
If enabled, the limit cone will constrain the angular movement of the joint to lie
within an elliptical cone.
\return the limit cone
@see PxJointLimitCone setLimit()
*/
virtual PxJointLimitCone getLimitCone() const = 0;
/**
\brief Get the limit cone.
\param[in] limit the limit cone
@see PxJointLimitCone getLimit()
*/
virtual void setLimitCone(const PxJointLimitCone& limit) = 0;
/**
\brief get the swing angle of the joint from the Y axis
*/
virtual PxReal getSwingYAngle() const = 0;
/**
\brief get the swing angle of the joint from the Z axis
*/
virtual PxReal getSwingZAngle() const = 0;
/**
\brief Set the flags specific to the Spherical Joint.
<b>Default</b> PxSphericalJointFlags(0)
\param[in] flags The joint flags.
@see PxSphericalJointFlag setFlag() getFlags()
*/
virtual void setSphericalJointFlags(PxSphericalJointFlags flags) = 0;
/**
\brief Set a single flag specific to a Spherical Joint to true or false.
\param[in] flag The flag to set or clear.
\param[in] value the value to which to set the flag
@see PxSphericalJointFlag, getFlags() setFlags()
*/
virtual void setSphericalJointFlag(PxSphericalJointFlag::Enum flag, bool value) = 0;
/**
\brief Get the flags specific to the Spherical Joint.
\return the joint flags
@see PxSphericalJoint::flags, PxSphericalJointFlag setFlag() setFlags()
*/
virtual PxSphericalJointFlags getSphericalJointFlags() const = 0;
/**
\brief Returns string name of PxSphericalJoint, used for serialization
*/
virtual const char* getConcreteTypeName() const { return "PxSphericalJoint"; }
protected:
//serialization
/**
\brief Constructor
*/
PX_INLINE PxSphericalJoint(PxType concreteType, PxBaseFlags baseFlags) : PxJoint(concreteType, baseFlags) {}
/**
\brief Deserialization constructor
*/
PX_INLINE PxSphericalJoint(PxBaseFlags baseFlags) : PxJoint(baseFlags) {}
/**
\brief Returns whether a given type name matches with the type of this instance
*/
virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxSphericalJoint", PxJoint); }
//~serialization
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 5,841 | C | 30.408602 | 169 | 0.751241 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxShapeExt.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_SHAPE_EXT_H
#define PX_SHAPE_EXT_H
/** \addtogroup extensions
@{
*/
#include "PxPhysXConfig.h"
#include "PxShape.h"
#include "PxRigidActor.h"
#include "geometry/PxGeometryQuery.h"
#include "PxQueryReport.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief utility functions for use with PxShape
@see PxShape
*/
class PxShapeExt
{
public:
/**
\brief Retrieves the world space pose of the shape.
\param[in] shape The shape for which to get the global pose.
\param[in] actor The actor to which the shape is attached
\return Global pose of shape.
*/
static PX_INLINE PxTransform getGlobalPose(const PxShape& shape, const PxRigidActor& actor)
{
// PT:: tag: scalar transform*transform
return actor.getGlobalPose() * shape.getLocalPose();
}
/**
\brief Raycast test against the shape.
\param[in] shape the shape
\param[in] actor the actor to which the shape is attached
\param[in] rayOrigin The origin of the ray to test the geometry object against
\param[in] rayDir The direction of the ray to test the geometry object against
\param[in] maxDist Maximum ray length
\param[in] hitFlags Specify which properties per hit should be computed and written to result hit array. Combination of #PxHitFlag flags
\param[in] maxHits max number of returned hits = size of 'rayHits' buffer
\param[out] rayHits Raycast hits information
\return Number of hits between the ray and the shape
@see PxRaycastHit PxTransform
*/
static PX_INLINE PxU32 raycast(const PxShape& shape, const PxRigidActor& actor,
const PxVec3& rayOrigin, const PxVec3& rayDir, PxReal maxDist, PxHitFlags hitFlags,
PxU32 maxHits, PxRaycastHit* rayHits)
{
return PxGeometryQuery::raycast(
rayOrigin, rayDir, shape.getGeometry(), getGlobalPose(shape, actor), maxDist, hitFlags, maxHits, rayHits);
}
/**
\brief Test overlap between the shape and a geometry object
\param[in] shape the shape
\param[in] actor the actor to which the shape is attached
\param[in] otherGeom The other geometry object to test overlap with
\param[in] otherGeomPose Pose of the other geometry object
\return True if the shape overlaps the geometry object
@see PxGeometry PxTransform
*/
static PX_INLINE bool overlap(const PxShape& shape, const PxRigidActor& actor,
const PxGeometry& otherGeom, const PxTransform& otherGeomPose)
{
return PxGeometryQuery::overlap(shape.getGeometry(), getGlobalPose(shape, actor), otherGeom, otherGeomPose);
}
/**
\brief Sweep a geometry object against the shape.
Currently only box, sphere, capsule and convex mesh shapes are supported, i.e. the swept geometry object must be one of those types.
\param[in] shape the shape
\param[in] actor the actor to which the shape is attached
\param[in] unitDir Normalized direction along which the geometry object should be swept.
\param[in] distance Sweep distance. Needs to be larger than 0.
\param[in] otherGeom The geometry object to sweep against the shape
\param[in] otherGeomPose Pose of the geometry object
\param[out] sweepHit The sweep hit information. Only valid if this method returns true.
\param[in] hitFlags Specify which properties per hit should be computed and written to result hit array. Combination of #PxHitFlag flags
\return True if the swept geometry object hits the shape
@see PxGeometry PxTransform PxSweepHit
*/
static PX_INLINE bool sweep(const PxShape& shape, const PxRigidActor& actor,
const PxVec3& unitDir, const PxReal distance, const PxGeometry& otherGeom, const PxTransform& otherGeomPose,
PxSweepHit& sweepHit, PxHitFlags hitFlags)
{
return PxGeometryQuery::sweep(unitDir, distance, otherGeom, otherGeomPose, shape.getGeometry(), getGlobalPose(shape, actor), sweepHit, hitFlags);
}
/**
\brief Retrieves the axis aligned bounding box enclosing the shape.
\return The shape's bounding box.
\param[in] shape the shape
\param[in] actor the actor to which the shape is attached
\param[in] inflation Scale factor for computed world bounds. Box extents are multiplied by this value.
@see PxBounds3
*/
static PX_INLINE PxBounds3 getWorldBounds(const PxShape& shape, const PxRigidActor& actor, float inflation=1.01f)
{
PxBounds3 bounds;
PxGeometryQuery::computeGeomBounds(bounds, shape.getGeometry(), getGlobalPose(shape, actor), 0.0f, inflation);
return bounds;
}
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 6,147 | C | 37.425 | 147 | 0.75484 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxParticleExt.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PARTICLE_EXT_H
#define PX_PARTICLE_EXT_H
/** \addtogroup extensions
@{
*/
#include "PxParticleSystem.h"
#include "PxParticleBuffer.h"
#include "foundation/PxArray.h"
#include "foundation/PxHashMap.h"
#include "foundation/PxUserAllocated.h"
#include "PxAttachment.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
namespace ExtGpu
{
/**
\brief Structure to define user-defined particle state when constructing a new particle system.
*/
struct PxParticleBufferDesc
{
PxVec4* positions;
PxVec4* velocities;
PxU32* phases;
PxParticleVolume* volumes;
PxU32 numActiveParticles;
PxU32 maxParticles;
PxU32 numVolumes;
PxU32 maxVolumes;
PxParticleBufferDesc() : positions(NULL), velocities(NULL), phases(NULL), volumes(NULL), numActiveParticles(0), maxParticles(0), numVolumes(0), maxVolumes(0) { }
};
/**
\brief Structure to define user-defined particle state when constructing a new particle system that includes diffuse particles.
*/
struct PxParticleAndDiffuseBufferDesc : public PxParticleBufferDesc
{
PxDiffuseParticleParams diffuseParams;
PxU32 maxDiffuseParticles;
PxU32 maxActiveDiffuseParticles;
PxParticleAndDiffuseBufferDesc() : PxParticleBufferDesc() { }
};
/**
\brief Structure to define user-defined particle state when constructing a new particle system that includes shape-matched rigid bodies.
*/
struct PxParticleRigidDesc
{
PxParticleRigidDesc() : rigidOffsets(NULL), rigidCoefficients(NULL), rigidTranslations(NULL), rigidRotations(NULL),
rigidLocalPositions(NULL), rigidLocalNormals(NULL), maxRigids(0), numActiveRigids(0) { }
PxU32* rigidOffsets;
PxReal* rigidCoefficients;
PxVec4* rigidTranslations;
PxQuat* rigidRotations;
PxVec4* rigidLocalPositions;
PxVec4* rigidLocalNormals;
PxU32 maxRigids;
PxU32 numActiveRigids;
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
\brief Helper class to manage PxParticleClothDesc buffers used for communicating particle based cloths to PxParticleClothBuffer.
*/
class PxParticleClothBufferHelper
{
public:
virtual void release() = 0;
virtual PxU32 getMaxCloths() const = 0; //!< \return The maximum number of cloths this PxParticleClothBufferHelper can hold.
virtual PxU32 getNumCloths() const = 0; //!< \return The current number of cloths in this PxParticleClothBufferHelper.
virtual PxU32 getMaxSprings() const = 0; //!< \return The maximum number of springs this PxParticleClothBufferHelper can hold.
virtual PxU32 getNumSprings() const = 0; //!< \return The current number of springs in this PxParticleClothBufferHelper.
virtual PxU32 getMaxTriangles() const = 0; //!< \return The maximum number of triangles this PxParticleClothBufferHelper can hold.
virtual PxU32 getNumTriangles() const = 0; //!< \return The current number of triangles in this PxParticleClothBufferHelper.
virtual PxU32 getMaxParticles() const = 0; //!< \return The maximum number of particles this PxParticleClothBufferHelper can hold.
virtual PxU32 getNumParticles() const = 0; //!< \return The current number of particles in this PxParticleClothBufferHelper.
/**
\brief Adds a PxParticleCloth to this PxParticleClothBufferHelper instance.
\param[in] particleCloth The PxParticleCloth to be added.
\param[in] triangles A pointer to the triangles
\param[in] numTriangles The number of triangles
\param[in] springs A pointer to the springs
\param[in] numSprings The number of springs
\param[in] restPositions A pointer to the particle rest positions
\param[in] numParticles The number of particles in this cloth
@see PxParticleCloth PxParticleSpring
*/
virtual void addCloth(const PxParticleCloth& particleCloth,
const PxU32* triangles, const PxU32 numTriangles,
const PxParticleSpring* springs, const PxU32 numSprings, const PxVec4* restPositions, const PxU32 numParticles) = 0;
/**
\brief Adds a cloth to this PxParticleClothBufferHelper instance.
Adds a cloth to this PxParticleClothBufferHelper instance. With this method the relevant parameters for inflatable simulation
(restVolume, pressure) can be set directly.
\param[in] blendScale This should be 1.f / (numPartitions + 1) if the springs are partitioned by the user. Otherwise this will be set during spring partitioning.
\param[in] restVolume The rest volume of the inflatable
\param[in] pressure The pressure of the inflatable. The target inflatable volume is defined as restVolume * pressure. Setting this to > 0.0 will enable inflatable simulation.
\param[in] triangles A pointer to the triangles
\param[in] numTriangles The number of triangles
\param[in] springs A pointer to the springs
\param[in] numSprings The number of springs
\param[in] restPositions A pointer to the particle rest positions
\param[in] numParticles The number of particles in this cloth
@see PxParticleSpring
*/
virtual void addCloth(const PxReal blendScale, const PxReal restVolume, const PxReal pressure,
const PxU32* triangles, const PxU32 numTriangles,
const PxParticleSpring* springs, const PxU32 numSprings,
const PxVec4* restPositions, const PxU32 numParticles) = 0;
/**
\brief Returns a PxParticleClothDesc for this PxParticleClothBufferHelper instance to be used for spring partitioning.
\return the PxParticleClothDesc.
@see PxCreateAndPopulateParticleClothBuffer, PxParticleClothPreProcessor::partitionSprings
*/
virtual PxParticleClothDesc& getParticleClothDesc() = 0;
protected:
virtual ~PxParticleClothBufferHelper() {}
};
/**
\brief Helper struct that holds information about a specific mesh in a PxParticleVolumeBufferHelper.
*/
struct PxParticleVolumeMesh
{
PxU32 startIndex; //!< The index of the first triangle of this mesh in the triangle array of the PxParticleVolumeBufferHelper instance.
PxU32 count; //!< The number of triangles of this mesh.
};
/**
\brief Helper class to manage communicating PxParticleVolumes data to PxParticleBuffer.
*/
class PxParticleVolumeBufferHelper
{
public:
virtual void release() = 0;
virtual PxU32 getMaxVolumes() const = 0; //!< \return The maximum number of PxParticleVolume this PxParticleVolumeBufferHelper instance can hold.
virtual PxU32 getNumVolumes() const = 0; //!< \return The current number of PxParticleVolume in this PxParticleVolumeBufferHelper instance.
virtual PxU32 getMaxTriangles() const = 0; //!< \return The maximum number of triangles this PxParticleVolumeBufferHelper instance can hold.
virtual PxU32 getNumTriangles() const = 0; //!< \return The current number of triangles in this PxParticleVolumeBufferHelper instance.
virtual PxParticleVolume* getParticleVolumes() = 0; //!< \return A pointer to the PxParticleVolume s of this PxParticleVolumeBufferHelper instance.
virtual PxParticleVolumeMesh* getParticleVolumeMeshes() = 0; //!< \return A pointer to the PxParticleVolumeMesh structs describing the PxParticleVolumes of this PxParticleVolumeBufferHelper instance.
virtual PxU32* getTriangles() = 0; //!< \return A pointer to the triangle indices in this PxParticleVolumeBufferHelper instance.
/**
\brief Adds a PxParticleVolume with a PxParticleVolumeMesh
\param[in] volume The PxParticleVolume to be added.
\param[in] volumeMesh A PxParticleVolumeMesh that describes the volumes to be added. startIndex is the index into the triangle list of the PxParticleVolumeBufferHelper instance.
\param[in] triangles A pointer to the triangle indices of the PxParticleVolume to be added.
\param[in] numTriangles The number of triangles of the PxParticleVolume to be added.
*/
virtual void addVolume(const PxParticleVolume& volume, const PxParticleVolumeMesh& volumeMesh, const PxU32* triangles, const PxU32 numTriangles) = 0;
/**
\brief Adds a volume
\param[in] particleOffset The index of the first particle of the cloth that maps to this volume in the PxParticleClothBufferHelper instance.
\param[in] numParticles The number of particles of the cloth that maps to this volume in the PxParticleClothBufferHelper instance.
\param[in] triangles A pointer to the triangle indices of this volume.
\param[in] numTriangles The number of triangles in this volume.
*/
virtual void addVolume(const PxU32 particleOffset, const PxU32 numParticles, const PxU32* triangles, const PxU32 numTriangles) = 0;
protected:
virtual ~PxParticleVolumeBufferHelper() {}
};
/**
\brief Helper class to manage PxParticleRigidDesc buffers used for communicating particle based rigids to PxPaticleSystem.
*/
class PxParticleRigidBufferHelper
{
public:
virtual void release() = 0;
virtual PxU32 getMaxRigids() const = 0; //!< \return The maximum number of rigids this PxParticleRigidBufferHelper instance can hold.
virtual PxU32 getNumRigids() const = 0; //!< \return The current number of rigids in this PxParticleRigidBufferHelper instance.
virtual PxU32 getMaxParticles() const = 0; //!< \return The maximum number of particles this PxParticleRigidBufferHelper instance can hold.
virtual PxU32 getNumParticles() const = 0; //!< \return The current number of particles in this PxParticleRigidBufferHelper instance.
/**
\brief Adds a rigid.
\param[in] translation The world-space location of the rigid.
\param[in] rotation The world-space rotation of the rigid.
\param[in] coefficient The stiffness of the rigid.
\param[in] localPositions The particle positions in local space.
\param[in] localNormals The surface normal for all the particles in local space. Each PxVec4 has the normal in the first 3 components and the SDF in the last component.
\param[in] numParticles The number of particles in this rigid.
*/
virtual void addRigid(const PxVec3& translation, const PxQuat& rotation, const PxReal coefficient,
const PxVec4* localPositions, const PxVec4* localNormals, PxU32 numParticles) = 0;
/**
\brief Get the PxParticleRigidDesc for this buffer.
\returns A PxParticleRigidDesc.
*/
virtual PxParticleRigidDesc& getParticleRigidDesc() = 0;
protected:
virtual ~PxParticleRigidBufferHelper() {}
};
///////////////////////////////////////////////////////////////////////////////
/**
\brief Holds user-defined attachment data to attach particles to other bodies
*/
class PxParticleAttachmentBuffer : public PxUserAllocated
{
PxArray<PxParticleRigidAttachment> mAttachments;
PxArray<PxParticleRigidFilterPair> mFilters;
PxHashMap<PxRigidActor*, PxU32> mReferencedBodies;
PxArray<PxRigidActor*> mNewReferencedBodies;
PxArray<PxRigidActor*> mDestroyedRefrencedBodies;
PxParticleBuffer& mParticleBuffer;
PxParticleRigidAttachment* mDeviceAttachments;
PxParticleRigidFilterPair* mDeviceFilters;
PxU32 mNumDeviceAttachments;
PxU32 mNumDeviceFilters;
PxCudaContextManager* mCudaContextManager;
PxParticleSystem& mParticleSystem;
bool mDirty;
PX_NOCOPY(PxParticleAttachmentBuffer)
public:
PxParticleAttachmentBuffer(PxParticleBuffer& particleBuffer, PxParticleSystem& particleSystem);
~PxParticleAttachmentBuffer();
// adds attachment to attachment buffer - localPose is in actor space for attachments to all types of rigids.
void addRigidAttachment(PxRigidActor* rigidBody, const PxU32 particleID, const PxVec3& localPose, PxConeLimitedConstraint* coneLimit = NULL);
bool removeRigidAttachment(PxRigidActor* rigidBody, const PxU32 particleID);
void addRigidFilter(PxRigidActor* rigidBody, const PxU32 particleID);
bool removeRigidFilter(PxRigidActor* rigidBody, const PxU32 particleID);
void copyToDevice(CUstream stream = 0);
};
/**
\brief Creates a PxParticleRigidBufferHelper.
\param[in] maxRigids The maximum number of rigids this PxParticleRigidsBuffers instance should hold.
\param[in] maxParticles The maximum number of particles this PxParticleRigidBufferHelper instance should hold.
\param[in] cudaContextManager A pointer to a PxCudaContextManager.
\return A pointer to the new PxParticleRigidBufferHelper.
*/
PxParticleRigidBufferHelper* PxCreateParticleRigidBufferHelper(PxU32 maxRigids, PxU32 maxParticles, PxCudaContextManager* cudaContextManager);
/**
\brief Creates a PxParticleClothBufferHelper helper.
\param[in] maxCloths The maximum number of cloths this PxParticleClothBufferHelper should hold.
\param[in] maxTriangles The maximum number of triangles this PxParticleClothBufferHelper should hold.
\param[in] maxSprings The maximum number of springs this PxParticleClothBufferHelper should hold.
\param[in] maxParticles The maximum number of particles this PxParticleClothBufferHelper should hold.
\param[in] cudaContextManager A pointer to a PxCudaContextManager.
\return A pointer to the PxParticleClothBufferHelper that was created.
*/
PxParticleClothBufferHelper* PxCreateParticleClothBufferHelper(const PxU32 maxCloths, const PxU32 maxTriangles, const PxU32 maxSprings, const PxU32 maxParticles, PxCudaContextManager* cudaContextManager);
/**
\brief Creates a PxParticleVolumeBufferHelper.
\param[in] maxVolumes The maximum number of PxParticleVolume s this PxParticleVolumeBufferHelper instance should hold.
\param[in] maxTriangles The maximum number of triangles this PxParticleVolumeBufferHelper instance should hold.
\param[in] cudaContextManager A pointer to a PxCudaContextManager.
\return A pointer to the new PxParticleVolumeBufferHelper.
*/
PxParticleVolumeBufferHelper* PxCreateParticleVolumeBufferHelper(PxU32 maxVolumes, PxU32 maxTriangles, PxCudaContextManager* cudaContextManager);
/**
\brief Creates a particle attachment buffer
\param[in] particleBuffer The particle buffer that contains particles that should get attached to something
\param[in] particleSystem The particle system that is used to simulate the userBuffer
\return An attachment buffer ready to use
*/
PxParticleAttachmentBuffer* PxCreateParticleAttachmentBuffer(PxParticleBuffer& particleBuffer, PxParticleSystem& particleSystem);
/**
\brief Creates and populates a particle buffer
\param[in] desc The particle buffer descriptor
\param[in] cudaContextManager A cuda context manager
\return A fully populated particle buffer ready to use
*/
PxParticleBuffer* PxCreateAndPopulateParticleBuffer(const ExtGpu::PxParticleBufferDesc& desc, PxCudaContextManager* cudaContextManager);
/**
\brief Creates and populates a particle buffer that includes support for diffuse particles
\param[in] desc The particle buffer descriptor
\param[in] cudaContextManager A cuda context manager
\return A fully populated particle buffer ready to use
*/
PxParticleAndDiffuseBuffer* PxCreateAndPopulateParticleAndDiffuseBuffer(const ExtGpu::PxParticleAndDiffuseBufferDesc& desc, PxCudaContextManager* cudaContextManager);
/**
\brief Creates and populates a particle cloth buffer
\param[in] desc The particle buffer descriptor
\param[in] clothDesc The cloth descriptor
\param[out] output A cloth output object to further configure the behavior of the cloth
\param[in] cudaContextManager A cuda context manager
\return A fully populated particle cloth buffer ready to use
*/
PxParticleClothBuffer* PxCreateAndPopulateParticleClothBuffer(const ExtGpu::PxParticleBufferDesc& desc, const PxParticleClothDesc& clothDesc,
PxPartitionedParticleCloth& output, PxCudaContextManager* cudaContextManager);
/**
\brief Creates and populates a particle rigid buffer. Particle rigids are particles that try to keep their relative positions. They are a bit commpressible similar to softbodies.
\param[in] desc The particle buffer descriptor
\param[in] rigidDesc The rigid descriptor
\param[in] cudaContextManager A cuda context manager
\return A fully populated particle rigid buffer ready to use
*/
PxParticleRigidBuffer* PxCreateAndPopulateParticleRigidBuffer(const ExtGpu::PxParticleBufferDesc& desc, const ExtGpu::PxParticleRigidDesc& rigidDesc,
PxCudaContextManager* cudaContextManager);
} // namespace ExtGpu
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 17,570 | C | 43.483544 | 207 | 0.787991 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxTetrahedronMeshExt.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_TETRAHEDRON_MESH_EXT_H
#define PX_TETRAHEDRON_MESH_EXT_H
/** \addtogroup extensions
@{
*/
#include "foundation/PxVec3.h"
#include "foundation/PxArray.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxTetrahedronMesh;
/**
\brief utility functions for use with PxTetrahedronMesh and subclasses
*/
class PxTetrahedronMeshExt
{
public:
/** Returns the index of the tetrahedron that contains a point
\param[in] mesh The tetmesh
\param[in] point The point to find the enclosing tetrahedron for
\param[in] bary The barycentric coordinates of the point inside the enclosing tetrahedron
\param[in] tolerance Tolerance value used classify points as inside if they lie exactly a tetrahedron's surface
\return The index of the tetrahedon containing the point, -1 if not tetrahedron contains the opoint
*/
static PxI32 findTetrahedronContainingPoint(const PxTetrahedronMesh* mesh, const PxVec3& point, PxVec4& bary, PxReal tolerance = 1e-6f);
/** Returns the index of the tetrahedron closest to a point
\param[in] mesh The tetmesh
\param[in] point The point to find the closest tetrahedron for
\param[out] bary The barycentric coordinates of the point in the tetrahedron
\return The index of the tetrahedon closest to the point
*/
static PxI32 findTetrahedronClosestToPoint(const PxTetrahedronMesh* mesh, const PxVec3& point, PxVec4& bary);
/** Associates points with closest tetrahedra from input tetrahedral mesh
\param[in] tetMeshVertices The tetrahedral mesh vertices
\param[in] tetMeshIndices The tetraheral mesh indices
\param[in] pointsToEmbed The points for which the embedding should be created
\param[in] barycentricCoordinates The output barycentric coordinates for each input point relative to its closest tetrahedron
\param[in] tetLinks The output indices of the closest tetrahedron for each input point
*/
static void createPointsToTetrahedronMap(const PxArray<PxVec3>& tetMeshVertices, const PxArray<PxU32>& tetMeshIndices, const PxArray<PxVec3>& pointsToEmbed, PxArray<PxVec4>& barycentricCoordinates, PxArray<PxU32>& tetLinks);
/** Extracts the surface triangles of a tetmesh
The extracted triangle's vertex indices point to the vertex buffer of the tetmesh.
\param[in] tetrahedra The tetrahedra indices
\param[in] numTetrahedra The number of tetrahedra
\param[in] sixteenBitIndices If set to true, the tetrahedra indices are read as 16bit integers, otherwise 32bit integers are used
\param[in] surfaceTriangles The resulting surface triangles
\param[in] surfaceTriangleToTet Optional array to get the index of a tetrahedron that is adjacent to the surface triangle with the corresponding index
\param[in] flipTriangleOrientation Reverses the orientation of the ouput triangles
*/
static void extractTetMeshSurface(const void* tetrahedra, PxU32 numTetrahedra, bool sixteenBitIndices, PxArray<PxU32>& surfaceTriangles, PxArray<PxU32>* surfaceTriangleToTet = NULL, bool flipTriangleOrientation = false);
/** Extracts the surface triangles of a tetmesh
The extracted triangle's vertex indices point to the vertex buffer of the tetmesh.
\param[in] mesh The mesh from which the surface shall be computed
\param[in] surfaceTriangles The resulting surface triangles
\param[in] surfaceTriangleToTet Optional array to get the index of a tetrahedron that is adjacent to the surface triangle with the corresponding index
\param[in] flipTriangleOrientation Reverses the orientation of the ouput triangles
*/
static void extractTetMeshSurface(const PxTetrahedronMesh* mesh, PxArray<PxU32>& surfaceTriangles, PxArray<PxU32>* surfaceTriangleToTet = NULL, bool flipTriangleOrientation = false);
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 5,481 | C | 48.387387 | 226 | 0.77869 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxSmoothNormals.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_SMOOTH_NORMALS_H
#define PX_SMOOTH_NORMALS_H
/** \addtogroup extensions
@{
*/
#include "common/PxPhysXCommonConfig.h"
/**
\brief Builds smooth vertex normals over a mesh.
- "smooth" because smoothing groups are not supported here
- takes angles into account for correct cube normals computation
To use 32bit indices pass a pointer in dFaces and set wFaces to zero. Alternatively pass a pointer to
wFaces and set dFaces to zero.
\param[in] nbTris Number of triangles
\param[in] nbVerts Number of vertices
\param[in] verts Array of vertices
\param[in] dFaces Array of dword triangle indices, or null
\param[in] wFaces Array of word triangle indices, or null
\param[out] normals Array of computed normals (assumes nbVerts vectors)
\param[in] flip Flips the normals or not
\return True on success.
*/
PX_C_EXPORT bool PX_CALL_CONV PxBuildSmoothNormals(physx::PxU32 nbTris, physx::PxU32 nbVerts, const physx::PxVec3* verts,
const physx::PxU32* dFaces, const physx::PxU16* wFaces, physx::PxVec3* normals, bool flip);
/** @} */
#endif
| 2,761 | C | 45.033333 | 121 | 0.760594 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxBroadPhaseExt.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_BROAD_PHASE_EXT_H
#define PX_BROAD_PHASE_EXT_H
/** \addtogroup extensions
@{
*/
#include "PxPhysXConfig.h"
#include "common/PxPhysXCommonConfig.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxBroadPhaseExt
{
public:
/**
\brief Creates regions for PxSceneDesc, from a global box.
This helper simply subdivides the given global box into a 2D grid of smaller boxes. Each one of those smaller boxes
is a region of interest for the broadphase. There are nbSubdiv*nbSubdiv regions in the 2D grid. The function does not
subdivide along the given up axis.
This is the simplest setup one can use with PxBroadPhaseType::eMBP. A more sophisticated setup would try to cover
the game world with a non-uniform set of regions (i.e. not just a grid).
\param[out] regions Regions computed from the input global box
\param[in] globalBounds World-space box covering the game world
\param[in] nbSubdiv Grid subdivision level. The function will create nbSubdiv*nbSubdiv regions.
\param[in] upAxis Up axis (0 for X, 1 for Y, 2 for Z).
\return number of regions written out to the 'regions' array
@see PxSceneDesc PxBroadPhaseType
*/
static PxU32 createRegionsFromWorldBounds(PxBounds3* regions, const PxBounds3& globalBounds, PxU32 nbSubdiv, PxU32 upAxis=1);
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 3,047 | C | 40.189189 | 126 | 0.761405 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxRevoluteJoint.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_REVOLUTE_JOINT_H
#define PX_REVOLUTE_JOINT_H
/** \addtogroup extensions
@{
*/
#include "extensions/PxJoint.h"
#include "extensions/PxJointLimit.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxRevoluteJoint;
/**
\brief Create a revolute joint.
\param[in] physics The physics SDK
\param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localFrame0 The position and orientation of the joint relative to actor0
\param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localFrame1 The position and orientation of the joint relative to actor1
@see PxRevoluteJoint
*/
PxRevoluteJoint* PxRevoluteJointCreate(PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1);
/**
\brief Flags specific to the Revolute Joint.
@see PxRevoluteJoint
*/
struct PxRevoluteJointFlag
{
enum Enum
{
eLIMIT_ENABLED = 1<<0, //!< enable the limit
eDRIVE_ENABLED = 1<<1, //!< enable the drive
eDRIVE_FREESPIN = 1<<2 //!< if the existing velocity is beyond the drive velocity, do not add force
};
};
typedef PxFlags<PxRevoluteJointFlag::Enum, PxU16> PxRevoluteJointFlags;
PX_FLAGS_OPERATORS(PxRevoluteJointFlag::Enum, PxU16)
/**
\brief A joint which behaves in a similar way to a hinge or axle.
A hinge joint removes all but a single rotational degree of freedom from two objects.
The axis along which the two bodies may rotate is specified with a point and a direction
vector.
The position of the hinge on each body is specified by the origin of the body's joint frame.
The axis of the hinge is specified as the direction of the x-axis in the body's joint frame.
\image html revoluteJoint.png
A revolute joint can be given a motor, so that it can apply a force to rotate the attached actors.
It may also be given a limit, to restrict the revolute motion to within a certain range. In
addition, the bodies may be projected together if the distance or angle between them exceeds
a given threshold.
Projection, drive and limits are activated by setting the appropriate flags on the joint.
@see PxRevoluteJointCreate() PxJoint
*/
class PxRevoluteJoint : public PxJoint
{
public:
/**
\brief return the angle of the joint, in the range (-2*Pi, 2*Pi]
*/
virtual PxReal getAngle() const = 0;
/**
\brief return the velocity of the joint
*/
virtual PxReal getVelocity() const = 0;
/**
\brief set the joint limit parameters.
The limit is activated using the flag PxRevoluteJointFlag::eLIMIT_ENABLED
The limit angle range is (-2*Pi, 2*Pi).
\param[in] limits The joint limit parameters.
@see PxJointAngularLimitPair getLimit()
*/
virtual void setLimit(const PxJointAngularLimitPair& limits) = 0;
/**
\brief get the joint limit parameters.
\return the joint limit parameters
@see PxJointAngularLimitPair setLimit()
*/
virtual PxJointAngularLimitPair getLimit() const = 0;
/**
\brief set the target velocity for the drive model.
The motor will only be able to reach this velocity if the maxForce is sufficiently large.
If the joint is spinning faster than this velocity, the motor will actually try to brake
(see PxRevoluteJointFlag::eDRIVE_FREESPIN.)
The sign of this variable determines the rotation direction, with positive values going
the same way as positive joint angles. Setting a very large target velocity may cause
undesirable results.
\param[in] velocity the drive target velocity
\param[in] autowake Whether to wake up the joint rigids if they are asleep.
<b>Range:</b> (-PX_MAX_F32, PX_MAX_F32)<br>
<b>Default:</b> 0.0
@see PxRevoluteFlags::eDRIVE_FREESPIN
*/
virtual void setDriveVelocity(PxReal velocity, bool autowake = true) = 0;
/**
\brief gets the target velocity for the drive model.
\return the drive target velocity
@see setDriveVelocity()
*/
virtual PxReal getDriveVelocity() const = 0;
/**
\brief sets the maximum torque the drive can exert.
The value set here may be used either as an impulse limit or a force limit, depending on the flag PxConstraintFlag::eDRIVE_LIMITS_ARE_FORCES
<b>Range:</b> [0, PX_MAX_F32)<br>
<b>Default:</b> PX_MAX_F32
@see setDriveVelocity()
*/
virtual void setDriveForceLimit(PxReal limit) = 0;
/**
\brief gets the maximum torque the drive can exert.
\return the torque limit
@see setDriveVelocity()
*/
virtual PxReal getDriveForceLimit() const = 0;
/**
\brief sets the gear ratio for the drive.
When setting up the drive constraint, the velocity of the first actor is scaled by this value, and its response to drive torque is scaled down.
So if the drive target velocity is zero, the second actor will be driven to the velocity of the first scaled by the gear ratio
<b>Range:</b> [0, PX_MAX_F32)<br>
<b>Default:</b> 1.0
\param[in] ratio the drive gear ratio
@see getDriveGearRatio()
*/
virtual void setDriveGearRatio(PxReal ratio) = 0;
/**
\brief gets the gear ratio.
\return the drive gear ratio
@see setDriveGearRatio()
*/
virtual PxReal getDriveGearRatio() const = 0;
/**
\brief sets the flags specific to the Revolute Joint.
<b>Default</b> PxRevoluteJointFlags(0)
\param[in] flags The joint flags.
@see PxRevoluteJointFlag setFlag() getFlags()
*/
virtual void setRevoluteJointFlags(PxRevoluteJointFlags flags) = 0;
/**
\brief sets a single flag specific to a Revolute Joint.
\param[in] flag The flag to set or clear.
\param[in] value the value to which to set the flag
@see PxRevoluteJointFlag, getFlags() setFlags()
*/
virtual void setRevoluteJointFlag(PxRevoluteJointFlag::Enum flag, bool value) = 0;
/**
\brief gets the flags specific to the Revolute Joint.
\return the joint flags
@see PxRevoluteJoint::flags, PxRevoluteJointFlag setFlag() setFlags()
*/
virtual PxRevoluteJointFlags getRevoluteJointFlags() const = 0;
/**
\brief Returns string name of PxRevoluteJoint, used for serialization
*/
virtual const char* getConcreteTypeName() const { return "PxRevoluteJoint"; }
protected:
//serialization
/**
\brief Constructor
*/
PX_INLINE PxRevoluteJoint(PxType concreteType, PxBaseFlags baseFlags) : PxJoint(concreteType, baseFlags) {}
/**
\brief Deserialization constructor
*/
PX_INLINE PxRevoluteJoint(PxBaseFlags baseFlags) : PxJoint(baseFlags) {}
/**
\brief Returns whether a given type name matches with the type of this instance
*/
virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxRevoluteJoint", PxJoint); }
//~serialization
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 8,448 | C | 30.0625 | 167 | 0.745147 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxContactJoint.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_CONTACT_JOINT_H
#define PX_CONTACT_JOINT_H
#include "extensions/PxJoint.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxContactJoint;
/**
\brief Create a contact Joint.
\param[in] physics The physics SDK
\param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localFrame0 The position and orientation of the joint relative to actor0
\param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localFrame1 The position and orientation of the joint relative to actor1
@see PxContactJoint
*/
PX_DEPRECATED PxContactJoint* PxContactJointCreate(PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1);
struct PX_DEPRECATED PxJacobianRow
{
PxVec3 linear0;
PxVec3 linear1;
PxVec3 angular0;
PxVec3 angular1;
PxJacobianRow(){}
PxJacobianRow(const PxVec3& lin0, const PxVec3& lin1, const PxVec3& ang0, const PxVec3& ang1) :
linear0(lin0), linear1(lin1), angular0(ang0), angular1(ang1)
{
}
void operator *= (const PxReal scale)
{
linear0 *= scale;
linear1 *= scale;
angular0 *= scale;
angular1 *= scale;
}
PxJacobianRow operator * (const PxReal scale) const
{
return PxJacobianRow(linear0*scale, linear1*scale, angular0*scale, angular1*scale);
}
};
/**
\brief PxContactJoint is best viewed as a helper function for the inverse dynamics of articulations. The expected use case
is to use PxContactJoint::getConstraint() in conjunction with PxArticulationReducedCoordinate::addLoopJoint().
@see PxContactJointCreate PxJoint
*/
PX_DEPRECATED class PxContactJoint : public PxJoint
{
public:
/**
\brief Set the current contact of the joint
*/
virtual void setContact(const PxVec3& contact) = 0;
/**
\brief Set the current contact normal of the joint
*/
virtual void setContactNormal(const PxVec3& contactNormal) = 0;
/**
\brief Set the current penetration of the joint
*/
virtual void setPenetration(const PxReal penetration) = 0;
/**
\brief Return the current contact of the joint
*/
virtual PxVec3 getContact() const = 0;
/**
\brief Return the current contact normal of the joint
*/
virtual PxVec3 getContactNormal() const = 0;
/**
\brief Return the current penetration value of the joint
*/
virtual PxReal getPenetration() const = 0;
virtual PxReal getRestitution() const = 0;
virtual void setRestitution(const PxReal restitution) = 0;
virtual PxReal getBounceThreshold() const = 0;
virtual void setBounceThreshold(const PxReal bounceThreshold) = 0;
/**
\brief Returns string name of PxContactJoint, used for serialization
*/
virtual const char* getConcreteTypeName() const { return "PxContactJoint"; }
virtual void computeJacobians(PxJacobianRow* jacobian) const = 0;
virtual PxU32 getNbJacobianRows() const = 0;
protected:
//serialization
/**
\brief Constructor
*/
PX_INLINE PxContactJoint(PxType concreteType, PxBaseFlags baseFlags) : PxJoint(concreteType, baseFlags) {}
/**
\brief Deserialization constructor
*/
PX_INLINE PxContactJoint(PxBaseFlags baseFlags) : PxJoint(baseFlags) {}
/**
\brief Returns whether a given type name matches with the type of this instance
*/
virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxContactJoint", PxJoint); }
//~serialization
};
#if !PX_DOXYGEN
}
#endif
#endif
| 5,340 | C | 31.766871 | 180 | 0.733146 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxD6Joint.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_D6_JOINT_H
#define PX_D6_JOINT_H
/** \addtogroup extensions
@{
*/
#include "extensions/PxJoint.h"
#include "extensions/PxJointLimit.h"
#include "foundation/PxFlags.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxD6Joint;
/**
\brief Create a D6 joint.
\param[in] physics The physics SDK
\param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localFrame0 The position and orientation of the joint relative to actor0
\param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localFrame1 The position and orientation of the joint relative to actor1
@see PxD6Joint
*/
PxD6Joint* PxD6JointCreate(PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1);
/**
\brief Used to specify one of the degrees of freedom of a D6 joint.
@see PxD6Joint
*/
struct PxD6Axis
{
enum Enum
{
eX = 0, //!< motion along the X axis
eY = 1, //!< motion along the Y axis
eZ = 2, //!< motion along the Z axis
eTWIST = 3, //!< motion around the X axis
eSWING1 = 4, //!< motion around the Y axis
eSWING2 = 5, //!< motion around the Z axis
eCOUNT = 6
};
};
/**
\brief Used to specify the range of motions allowed for a degree of freedom in a D6 joint.
@see PxD6Joint
*/
struct PxD6Motion
{
enum Enum
{
eLOCKED, //!< The DOF is locked, it does not allow relative motion.
eLIMITED, //!< The DOF is limited, it only allows motion within a specific range.
eFREE //!< The DOF is free and has its full range of motion.
};
};
/**
\brief Used to specify which axes of a D6 joint are driven.
Each drive is an implicit force-limited damped spring:
force = spring * (target position - position) + damping * (targetVelocity - velocity)
Alternatively, the spring may be configured to generate a specified acceleration instead of a force.
A linear axis is affected by drive only if the corresponding drive flag is set. There are two possible models
for angular drive: swing/twist, which may be used to drive one or more angular degrees of freedom, or slerp,
which may only be used to drive all three angular degrees simultaneously.
@see PxD6Joint
*/
struct PxD6Drive
{
enum Enum
{
eX = 0, //!< drive along the X-axis
eY = 1, //!< drive along the Y-axis
eZ = 2, //!< drive along the Z-axis
eSWING = 3, //!< drive of displacement from the X-axis
eTWIST = 4, //!< drive of the displacement around the X-axis
eSLERP = 5, //!< drive of all three angular degrees along a SLERP-path
eCOUNT = 6
};
};
/**
\brief flags for configuring the drive model of a PxD6Joint
@see PxD6JointDrive PxD6Joint
*/
struct PxD6JointDriveFlag
{
enum Enum
{
eACCELERATION = 1 //!< drive spring is for the acceleration at the joint (rather than the force)
};
};
typedef PxFlags<PxD6JointDriveFlag::Enum, PxU32> PxD6JointDriveFlags;
PX_FLAGS_OPERATORS(PxD6JointDriveFlag::Enum, PxU32)
/**
\brief parameters for configuring the drive model of a PxD6Joint
@see PxD6Joint
*/
class PxD6JointDrive : public PxSpring
{
public:
PxReal forceLimit; //!< the force limit of the drive - may be an impulse or a force depending on PxConstraintFlag::eDRIVE_LIMITS_ARE_FORCES
PxD6JointDriveFlags flags; //!< the joint drive flags
/**
\brief default constructor for PxD6JointDrive.
*/
PxD6JointDrive(): PxSpring(0,0), forceLimit(PX_MAX_F32), flags(0) {}
/**
\brief constructor a PxD6JointDrive.
\param[in] driveStiffness The stiffness of the drive spring.
\param[in] driveDamping The damping of the drive spring
\param[in] driveForceLimit The maximum impulse or force that can be exerted by the drive
\param[in] isAcceleration Whether the drive is an acceleration drive or a force drive
*/
PxD6JointDrive(PxReal driveStiffness, PxReal driveDamping, PxReal driveForceLimit, bool isAcceleration = false)
: PxSpring(driveStiffness, driveDamping)
, forceLimit(driveForceLimit)
, flags(isAcceleration?PxU32(PxD6JointDriveFlag::eACCELERATION) : 0)
{}
/**
\brief returns true if the drive is valid
*/
bool isValid() const
{
return PxIsFinite(stiffness) && stiffness>=0 &&
PxIsFinite(damping) && damping >=0 &&
PxIsFinite(forceLimit) && forceLimit >=0;
}
};
/**
\brief A D6 joint is a general constraint between two actors.
It allows the application to individually define the linear and rotational degrees of freedom,
and also to configure a variety of limits and driven degrees of freedom.
By default all degrees of freedom are locked. So to create a prismatic joint with free motion
along the x-axis:
\code
...
joint->setMotion(PxD6Axis::eX, PxD6JointMotion::eFREE);
...
\endcode
Or a Revolute joint with motion free allowed around the x-axis:
\code
...
joint->setMotion(PxD6Axis::eTWIST, PxD6JointMotion::eFREE);
...
\endcode
Degrees of freedom may also be set to limited instead of locked.
There are two different kinds of linear limits available. The first kind is a single limit value
for all linear degrees of freedom, which may act as a linear, circular, or spherical limit depending
on which degrees of freedom are limited. This is similar to a distance limit. Then, the second kind
supports a pair of limit values for each linear axis, which can be used to implement a traditional
prismatic joint for example.
If the twist degree of freedom is limited, is supports upper and lower limits. The two swing degrees
of freedom are limited with a cone limit.
@see PxD6JointCreate() PxJoint
*/
class PxD6Joint : public PxJoint
{
public:
/**
\brief Set the motion type around the specified axis.
Each axis may independently specify that the degree of freedom is locked (blocking relative movement
along or around this axis), limited by the corresponding limit, or free.
\param[in] axis the axis around which motion is specified
\param[in] type the motion type around the specified axis
<b>Default:</b> all degrees of freedom are locked
@see getMotion() PxD6Axis PxD6Motion
*/
virtual void setMotion(PxD6Axis::Enum axis, PxD6Motion::Enum type) = 0;
/**
\brief Get the motion type around the specified axis.
@see setMotion() PxD6Axis PxD6Motion
\param[in] axis the degree of freedom around which the motion type is specified
\return the motion type around the specified axis
*/
virtual PxD6Motion::Enum getMotion(PxD6Axis::Enum axis) const = 0;
/**
\brief get the twist angle of the joint, in the range (-2*Pi, 2*Pi]
*/
virtual PxReal getTwistAngle() const = 0;
/**
\brief get the twist angle of the joint
\deprecated Use getTwistAngle instead. Deprecated since PhysX version 4.0
*/
PX_DEPRECATED PX_FORCE_INLINE PxReal getTwist() const { return getTwistAngle(); }
/**
\brief get the swing angle of the joint from the Y axis
*/
virtual PxReal getSwingYAngle() const = 0;
/**
\brief get the swing angle of the joint from the Z axis
*/
virtual PxReal getSwingZAngle() const = 0;
/**
\brief Set the distance limit for the joint.
A single limit constraints all linear limited degrees of freedom, forming a linear, circular
or spherical constraint on motion depending on the number of limited degrees. This is similar
to a distance limit.
\param[in] limit the distance limit structure
@see getDistanceLimit() PxJointLinearLimit
*/
virtual void setDistanceLimit(const PxJointLinearLimit& limit) = 0;
/**
\brief Get the distance limit for the joint.
\return the distance limit structure
@see setDistanceLimit() PxJointLinearLimit
*/
virtual PxJointLinearLimit getDistanceLimit() const = 0;
/**
\deprecated Use setDistanceLimit instead. Deprecated since PhysX version 4.0
*/
PX_DEPRECATED PX_FORCE_INLINE void setLinearLimit(const PxJointLinearLimit& limit) { setDistanceLimit(limit); }
/**
\deprecated Use getDistanceLimit instead. Deprecated since PhysX version 4.0
*/
PX_DEPRECATED PX_FORCE_INLINE PxJointLinearLimit getLinearLimit() const { return getDistanceLimit(); }
/**
\brief Set the linear limit for a given linear axis.
This function extends the previous setDistanceLimit call with the following features:
- there can be a different limit for each linear axis
- each limit is defined by two values, i.e. it can now be asymmetric
This can be used to create prismatic joints similar to PxPrismaticJoint, or point-in-quad joints,
or point-in-box joints.
\param[in] axis The limited linear axis (must be PxD6Axis::eX, PxD6Axis::eY or PxD6Axis::eZ)
\param[in] limit The linear limit pair structure
@see getLinearLimit()
*/
virtual void setLinearLimit(PxD6Axis::Enum axis, const PxJointLinearLimitPair& limit) = 0;
/**
\brief Get the linear limit for a given linear axis.
\param[in] axis The limited linear axis (must be PxD6Axis::eX, PxD6Axis::eY or PxD6Axis::eZ)
\return the linear limit pair structure from desired axis
@see setLinearLimit() PxJointLinearLimit
*/
virtual PxJointLinearLimitPair getLinearLimit(PxD6Axis::Enum axis) const = 0;
/**
\brief Set the twist limit for the joint.
The twist limit controls the range of motion around the twist axis.
The limit angle range is (-2*Pi, 2*Pi).
\param[in] limit the twist limit structure
@see getTwistLimit() PxJointAngularLimitPair
*/
virtual void setTwistLimit(const PxJointAngularLimitPair& limit) = 0;
/**
\brief Get the twist limit for the joint.
\return the twist limit structure
@see setTwistLimit() PxJointAngularLimitPair
*/
virtual PxJointAngularLimitPair getTwistLimit() const = 0;
/**
\brief Set the swing cone limit for the joint.
The cone limit is used if either or both swing axes are limited. The extents are
symmetrical and measured in the frame of the parent. If only one swing degree of freedom
is limited, the corresponding value from the cone limit defines the limit range.
\param[in] limit the cone limit structure
@see getLimitCone() PxJointLimitCone
*/
virtual void setSwingLimit(const PxJointLimitCone& limit) = 0;
/**
\brief Get the cone limit for the joint.
\return the swing limit structure
@see setLimitCone() PxJointLimitCone
*/
virtual PxJointLimitCone getSwingLimit() const = 0;
/**
\brief Set a pyramidal swing limit for the joint.
The pyramid limits will only be used in the following cases:
- both swing Y and Z are limited. The limit shape is then a pyramid.
- Y is limited and Z is locked, or vice versa. The limit shape is an asymmetric angular section, similar to
what is supported for the twist axis.
The remaining cases (Y limited and Z is free, or vice versa) are not supported.
\param[in] limit the cone limit structure
@see getLimitCone() PxJointLimitPyramid
*/
virtual void setPyramidSwingLimit(const PxJointLimitPyramid& limit) = 0;
/**
\brief Get the pyramidal swing limit for the joint.
\return the swing limit structure
@see setLimitCone() PxJointLimitPyramid
*/
virtual PxJointLimitPyramid getPyramidSwingLimit() const = 0;
/**
\brief Set the drive parameters for the specified drive type.
\param[in] index the type of drive being specified
\param[in] drive the drive parameters
@see getDrive() PxD6JointDrive
<b>Default</b> The default drive spring and damping values are zero, the force limit is zero, and no flags are set.
*/
virtual void setDrive(PxD6Drive::Enum index, const PxD6JointDrive& drive) = 0;
/**
\brief Get the drive parameters for the specified drive type.
\param[in] index the specified drive type
@see setDrive() PxD6JointDrive
*/
virtual PxD6JointDrive getDrive(PxD6Drive::Enum index) const = 0;
/**
\brief Set the drive goal pose
The goal is relative to the constraint frame of actor[0]
<b>Default</b> the identity transform
\param[in] pose The goal drive pose if positional drive is in use.
\param[in] autowake If true and the attached actors are in a scene, this call wakes them up and increases their
wake counters to #PxSceneDesc::wakeCounterResetValue if the counter value is below the reset value.
@see setDrivePosition()
*/
virtual void setDrivePosition(const PxTransform& pose, bool autowake = true) = 0;
/**
\brief Get the drive goal pose.
@see getDrivePosition()
*/
virtual PxTransform getDrivePosition() const = 0;
/**
\brief Set the target goal velocity for drive.
The velocity is measured in the constraint frame of actor[0]
\param[in] linear The goal velocity for linear drive
\param[in] angular The goal velocity for angular drive
\param[in] autowake If true and the attached actors are in a scene, this call wakes them up and increases their
wake counters to #PxSceneDesc::wakeCounterResetValue if the counter value is below the reset value.
@see getDriveVelocity()
*/
virtual void setDriveVelocity(const PxVec3& linear, const PxVec3& angular, bool autowake = true) = 0;
/**
\brief Get the target goal velocity for joint drive.
\param[in] linear The goal velocity for linear drive
\param[in] angular The goal velocity for angular drive
@see setDriveVelocity()
*/
virtual void getDriveVelocity(PxVec3& linear, PxVec3& angular) const = 0;
/**
\brief Returns string name of PxD6Joint, used for serialization
*/
virtual const char* getConcreteTypeName() const { return "PxD6Joint"; }
protected:
//serialization
/**
\brief Constructor
*/
PX_INLINE PxD6Joint(PxType concreteType, PxBaseFlags baseFlags) : PxJoint(concreteType, baseFlags) {}
/**
\brief Deserialization constructor
*/
PX_INLINE PxD6Joint(PxBaseFlags baseFlags) : PxJoint(baseFlags) {}
/**
\brief Returns whether a given type name matches with the type of this instance
*/
virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxD6Joint", PxJoint); }
//~serialization
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 15,691 | C | 30.70101 | 155 | 0.739468 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxSamplingExt.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_SAMPLING_EXT_H
#define PX_SAMPLING_EXT_H
/** \addtogroup extensions
@{
*/
#include "foundation/PxArray.h"
#include "geometry/PxGeometry.h"
#include "foundation/PxUserAllocated.h"
#include "geometry/PxSimpleTriangleMesh.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief utility functions to sample vertices on or inside a triangle mesh or other geometries
*/
class PxSamplingExt
{
public:
/** Computes samples on a triangle mesh's surface that are not closer to each other than a given distance. Optionally the mesh's interior can be filled with samples as well.
\param[in] mesh The triangle mesh
\param[in] r The closest distance two surface samples are allowed to have
\param[out] result Equally distributed samples on and if specified inside the triangle mesh
\param[in] rVolume The average distance of samples inside the mesh. If set to zero, samples will only be placed on the mesh's surface
\param[out] triangleIds Optional output containing the index of the triangle for all samples on the mesh's surface. The array will contain less entries than output vertices if volume samples are active since volume samples are not on the surface.
\param[out] barycentricCoordinates Optional output containing the barycentric coordinates for all samples on the mesh's surface. The array will contain less entries than output vertices if volume samples are active since volume samples are not on the surface.
\param[in] axisAlignedBox A box that limits the space where samples can get created
\param[in] boxOrientation The orientation of the box that limits the space where samples can get created
\param[in] maxNumSamples If larger than zero, the sampler will stop when the sample count reaches maxNumSamples
\param[in] numSampleAttemptsAroundPoint Number of repetitions the underlying algorithm performs to find a new valid sample that matches all criteria like minimal distance to existing samples etc.
\return Returns true if the sampling was successful and false if there was a problem. Usually an internal overflow is the problem for very big meshes or very small sampling radii.
*/
static bool poissonSample(const PxSimpleTriangleMesh& mesh, PxReal r, PxArray<PxVec3>& result, PxReal rVolume = 0.0f, PxArray<PxI32>* triangleIds = NULL, PxArray<PxVec3>* barycentricCoordinates = NULL,
const PxBounds3* axisAlignedBox = NULL, const PxQuat* boxOrientation = NULL, PxU32 maxNumSamples = 0, PxU32 numSampleAttemptsAroundPoint = 30);
/** Computes samples on a geometry's surface that are not closer to each other than a given distance.
\param[in] geometry The geometry that defines the surface on which the samples get created
\param[in] transform The geometry's global pose
\param[in] worldBounds The geometry's bounding box
\param[in] r The closest distance two surface samples are allowed to have
\param[out] result Equally distributed samples on and if specified inside the triangle mesh
\param[in] rVolume The average distance of samples inside the mesh. If set to zero, samples will only be placed on the mesh's surface
\param[in] axisAlignedBox A box that limits the space where samples can get created
\param[in] boxOrientation The orientation of the box that limits the space where samples can get created
\param[in] maxNumSamples If larger than zero, the sampler will stop when the sample count reaches maxNumSamples
\param[in] numSampleAttemptsAroundPoint Number of repetitions the underlying algorithm performs to find a new valid sample that matches all criteria like minimal distance to existing samples etc.
\return Returns true if the sampling was successful and false if there was a problem. Usually an internal overflow is the problem for very big meshes or very small sampling radii.
*/
static bool poissonSample(const PxGeometry& geometry, const PxTransform& transform, const PxBounds3& worldBounds, PxReal r, PxArray<PxVec3>& result, PxReal rVolume = 0.0f,
const PxBounds3* axisAlignedBox = NULL, const PxQuat* boxOrientation = NULL, PxU32 maxNumSamples = 0, PxU32 numSampleAttemptsAroundPoint = 30);
};
/**
\brief Sampler to generate Poisson Samples locally on a triangle mesh or a shape. For every local addition of new samples, an individual sampling density can be used.
*/
class PxPoissonSampler : public PxUserAllocated
{
public:
/** Sets the sampling radius
\param[in] samplingRadius The closest distance two surface samples are allowed to have. Changing the sampling radius is a bit an expensive operation.
\return Returns true if the sampling was successful and false if there was a problem. Usually an internal overflow is the problem for very big meshes or very small sampling radii.
*/
virtual bool setSamplingRadius(PxReal samplingRadius) = 0;
/** Adds samples
\param[in] samples The samples to add. Adding samples is a bit an expensive operation.
*/
virtual void addSamples(const PxArray<PxVec3>& samples) = 0;
/** Adds samples
\param[in] samples The samples to remove. Removing samples is a bit an expensive operation.
\return Returns the number of removed samples. If some samples were not found, then the number of actually removed samples will be smaller than the number of samples requested to remove
*/
virtual PxU32 removeSamples(const PxArray<PxVec3>& samples) = 0;
/** Adds new Poisson Samples inside the sphere specified
\param[in] sphereCenter The sphere's center. Used to define the region where new samples get added.
\param[in] sphereRadius The sphere's radius. Used to define the region where new samples get added.
\param[in] createVolumeSamples If set to true, samples will also get generated inside of the mesh, not just on its surface.
*/
virtual void addSamplesInSphere(const PxVec3& sphereCenter, PxReal sphereRadius, bool createVolumeSamples = false) = 0;
/** Adds new Poisson Samples inside the box specified
\param[in] axisAlignedBox The axis aligned bounding box. Used to define the region where new samples get added.
\param[in] boxOrientation The orientation making an oriented bounding box out of the axis aligned one. Used to define the region where new samples get added.
\param[in] createVolumeSamples If set to true, samples will also get generated inside of the mesh, not just on its surface.
*/
virtual void addSamplesInBox(const PxBounds3& axisAlignedBox, const PxQuat& boxOrientation, bool createVolumeSamples = false) = 0;
/** Gets the Poisson Samples
\return Returns the generated Poisson Samples
*/
virtual const PxArray<PxVec3>& getSamples() const = 0;
virtual ~PxPoissonSampler() { }
};
/** Creates a shape sampler
\param[in] geometry The shape that defines the surface on which the samples get created
\param[in] transform The shape's global pose
\param[in] worldBounds The shapes bounding box
\param[in] initialSamplingRadius The closest distance two surface samples are allowed to have
\param[in] numSampleAttemptsAroundPoint Number of repetitions the underlying algorithm performs to find a new valid sample that matches all criteria like minimal distance to existing samples etc.
\return Returns the sampler
*/
PxPoissonSampler* PxCreateShapeSampler(const PxGeometry& geometry, const PxTransform& transform, const PxBounds3& worldBounds, PxReal initialSamplingRadius, PxI32 numSampleAttemptsAroundPoint = 30);
/**
\brief Sampler to generate Poisson Samples on a triangle mesh.
*/
class PxTriangleMeshPoissonSampler : public virtual PxPoissonSampler
{
public:
/** Gets the Poisson Samples' triangle indices
\return Returns the generated Poisson Samples' triangle indices
*/
virtual const PxArray<PxI32>& getSampleTriangleIds() const = 0;
/** Gets the Poisson Samples' barycentric coordinates
\return Returns the generated Poisson Samples' barycentric coordinates
*/
virtual const PxArray<PxVec3>& getSampleBarycentrics() const = 0;
/** Checks whether a point is inside the triangle mesh
\return Returns true if the point is inside the triangle mesh
*/
virtual bool isPointInTriangleMesh(const PxVec3& p) = 0;
virtual ~PxTriangleMeshPoissonSampler() { }
};
/** Creates a triangle mesh sampler
\param[in] triangles The triangle indices of the mesh
\param[in] numTriangles The total number of triangles
\param[in] vertices The vertices of the mesh
\param[in] numVertices The total number of vertices
\param[in] initialSamplingRadius The closest distance two surface samples are allowed to have
\param[in] numSampleAttemptsAroundPoint Number of repetitions the underlying algorithm performs to find a new valid sample that matches all criteria like minimal distance to existing samples etc.
\return Returns the sampler
*/
PxTriangleMeshPoissonSampler* PxCreateTriangleMeshSampler(const PxU32* triangles, PxU32 numTriangles, const PxVec3* vertices, PxU32 numVertices, PxReal initialSamplingRadius, PxI32 numSampleAttemptsAroundPoint = 30);
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 10,597 | C | 55.978494 | 260 | 0.787676 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxExtensionsAPI.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_EXTENSIONS_API_H
#define PX_EXTENSIONS_API_H
/** \addtogroup extensions
@{
*/
#include "foundation/PxErrorCallback.h"
#include "extensions/PxDefaultAllocator.h"
#include "extensions/PxConstraintExt.h"
#include "extensions/PxDistanceJoint.h"
#include "extensions/PxContactJoint.h"
#include "extensions/PxFixedJoint.h"
#include "extensions/PxPrismaticJoint.h"
#include "extensions/PxRevoluteJoint.h"
#include "extensions/PxSphericalJoint.h"
#include "extensions/PxD6Joint.h"
#include "extensions/PxGearJoint.h"
#include "extensions/PxRackAndPinionJoint.h"
#include "extensions/PxDefaultSimulationFilterShader.h"
#include "extensions/PxDefaultErrorCallback.h"
#include "extensions/PxDefaultStreams.h"
#include "extensions/PxRigidActorExt.h"
#include "extensions/PxRigidBodyExt.h"
#include "extensions/PxShapeExt.h"
#include "extensions/PxTriangleMeshExt.h"
#include "extensions/PxSerialization.h"
#include "extensions/PxDefaultCpuDispatcher.h"
#include "extensions/PxSmoothNormals.h"
#include "extensions/PxSimpleFactory.h"
#include "extensions/PxStringTableExt.h"
#include "extensions/PxBroadPhaseExt.h"
#include "extensions/PxMassProperties.h"
#include "extensions/PxSceneQueryExt.h"
#include "extensions/PxSceneQuerySystemExt.h"
#include "extensions/PxCustomSceneQuerySystem.h"
#include "extensions/PxConvexMeshExt.h"
#include "extensions/PxSamplingExt.h"
#include "extensions/PxTetrahedronMeshExt.h"
#include "extensions/PxCustomGeometryExt.h"
#if PX_ENABLE_FEATURES_UNDER_CONSTRUCTION
#include "extensions/PxFEMClothExt.h"
#endif
/** \brief Initialize the PhysXExtensions library.
This should be called before calling any functions or methods in extensions which may require allocation.
\note This function does not need to be called before creating a PxDefaultAllocator object.
\param physics a PxPhysics object
\param pvd an PxPvd (PhysX Visual Debugger) object
@see PxCloseExtensions PxFoundation PxPhysics
*/
PX_C_EXPORT bool PX_CALL_CONV PxInitExtensions(physx::PxPhysics& physics, physx::PxPvd* pvd);
/** \brief Shut down the PhysXExtensions library.
This function should be called to cleanly shut down the PhysXExtensions library before application exit.
\note This function is required to be called to release foundation usage.
@see PxInitExtensions
*/
PX_C_EXPORT void PX_CALL_CONV PxCloseExtensions();
/** @} */
#endif
| 4,057 | C | 40.835051 | 106 | 0.791225 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxCustomSceneQuerySystem.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_NEW_SCENE_QUERY_SYSTEM_H
#define PX_NEW_SCENE_QUERY_SYSTEM_H
/** \addtogroup extensions
@{
*/
#include "PxSceneQuerySystem.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief A custom scene query system.
This is an example of a custom scene query system. It augments the PxSceneQuerySystem API to support an arbitrary number
of "pruners", instead of the usual hardcoded two.
It might not be possible to support the whole PxSceneQuerySystem API in this context. See the source code for details.
@see PxSceneQuerySystem
*/
class PxCustomSceneQuerySystem : public PxSceneQuerySystem
{
public:
PxCustomSceneQuerySystem() {}
virtual ~PxCustomSceneQuerySystem() {}
/**
\brief Adds a pruner to the system.
The internal PhysX scene-query system uses two regular pruners (one for static shapes, one for dynamic shapes) and an optional
compound pruner. Our custom scene query system supports an arbitrary number of regular pruners.
This can be useful to reduce the load on each pruner, in particular during updates, when internal trees are rebuilt in the
background. On the other hand this implementation simply iterates over all created pruners to perform queries, so their cost
might increase if a large number of pruners is used.
In any case this serves as an example of how the PxSceneQuerySystem API can be used to customize scene queries.
\param[in] primaryType Desired primary (main) type for the new pruner
\param[in] secondaryType Secondary type when primary type is PxPruningStructureType::eDYNAMIC_AABB_TREE.
\param[in] preallocated Optional number of preallocated shapes in the new pruner
\return A pruner index
@see PxCustomSceneQuerySystem PxSceneQueryUpdateMode PxCustomSceneQuerySystemAdapter PxSceneDesc::sceneQuerySystem
*/
virtual PxU32 addPruner(PxPruningStructureType::Enum primaryType, PxDynamicTreeSecondaryPruner::Enum secondaryType, PxU32 preallocated=0) = 0;
/**
\brief Start custom build-steps for all pruners
This function is used in combination with customBuildstep() and finishCustomBuildstep() to let users take control
of the pruners' build-step & commit calls - basically the pruners' update functions. These functions should be used
with the PxSceneQueryUpdateMode::eBUILD_DISABLED_COMMIT_DISABLED update mode, otherwise the build-steps will happen
automatically in fetchResults. For N pruners it can be more efficient to use these custom build-step functions to
perform the updates in parallel:
- call startCustomBuildstep() first (one synchronous call)
- for each pruner, call customBuildstep() (asynchronous calls from multiple threads)
- once it is done, call finishCustomBuildstep() to finish the update (synchronous call)
The multi-threaded update is more efficient here than what it is in PxScene, because the "flushShapes()" call is
also multi-threaded (while it is not in PxScene).
Note that users are responsible for locks here, and these calls should not overlap with other SQ calls. In particular
one should not add new objects to the SQ system or perform queries while these calls are happening.
\return The number of pruners in the system.
@see customBuildstep finishCustomBuildstep PxSceneQueryUpdateMode
*/
virtual PxU32 startCustomBuildstep() = 0;
/**
\brief Perform a custom build-step for a given pruner.
\param[in] index Pruner index (should be between 0 and the number returned by startCustomBuildstep)
@see startCustomBuildstep finishCustomBuildstep
*/
virtual void customBuildstep(PxU32 index) = 0;
/**
\brief Finish custom build-steps
Call this function once after all the customBuildstep() calls are done.
@see startCustomBuildstep customBuildstep
*/
virtual void finishCustomBuildstep() = 0;
};
/**
\brief An adapter class to customize the object-to-pruner mapping.
In the regular PhysX code static shapes went to the static pruner, and dynamic shapes went to the
dynamic pruner.
This class is a replacement for this mapping when N user-defined pruners are involved.
*/
class PxCustomSceneQuerySystemAdapter
{
public:
PxCustomSceneQuerySystemAdapter() {}
virtual ~PxCustomSceneQuerySystemAdapter() {}
/**
\brief Gets a pruner index for an actor/shape.
This user-defined function tells the system in which pruner a given actor/shape should go.
\note The returned index must be valid, i.e. it must have been previously returned to users by PxCustomSceneQuerySystem::addPruner.
\param[in] actor The actor
\param[in] shape The shape
\return A pruner index for this actor/shape.
@see PxRigidActor PxShape PxCustomSceneQuerySystem::addPruner
*/
virtual PxU32 getPrunerIndex(const PxRigidActor& actor, const PxShape& shape) const = 0;
/**
\brief Pruner filtering callback.
This will be called for each query to validate whether it should process a given pruner.
\param[in] prunerIndex The index of currently processed pruner
\param[in] context The query context
\param[in] filterData The query's filter data
\param[in] filterCall The query's filter callback
\return True to process the pruner, false to skip it entirely
*/
virtual bool processPruner(PxU32 prunerIndex, const PxQueryThreadContext* context, const PxQueryFilterData& filterData, PxQueryFilterCallback* filterCall) const = 0;
};
/**
\brief Creates a custom scene query system.
This is similar to PxCreateExternalSceneQuerySystem, except this function creates a PxCustomSceneQuerySystem object.
It can be plugged to PxScene the same way, via PxSceneDesc::sceneQuerySystem.
\param[in] sceneQueryUpdateMode Desired update mode
\param[in] contextID Context ID parameter, sent to the profiler
\param[in] adapter Adapter class implementing our extended API
\param[in] usesTreeOfPruners True to keep pruners themselves in a BVH, which might increase query performance if a lot of pruners are involved
\return A custom SQ system instance
@see PxCustomSceneQuerySystem PxSceneQueryUpdateMode PxCustomSceneQuerySystemAdapter PxSceneDesc::sceneQuerySystem
*/
PxCustomSceneQuerySystem* PxCreateCustomSceneQuerySystem(PxSceneQueryUpdateMode::Enum sceneQueryUpdateMode, PxU64 contextID, const PxCustomSceneQuerySystemAdapter& adapter, bool usesTreeOfPruners=false);
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 8,072 | C | 41.046875 | 204 | 0.776759 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxSceneQueryExt.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_SCENE_QUERY_EXT_H
#define PX_SCENE_QUERY_EXT_H
/** \addtogroup extensions
@{
*/
#include "PxPhysXConfig.h"
#include "PxScene.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
// These types have been deprecated (removed) in PhysX 3.4. We typedef them to the new types here for easy migration from 3.3 to 3.4.
typedef PxQueryHit PxSceneQueryHit;
typedef PxQueryFilterData PxSceneQueryFilterData;
typedef PxQueryFilterCallback PxSceneQueryFilterCallback;
typedef PxQueryCache PxSceneQueryCache;
typedef PxHitFlag PxSceneQueryFlag;
typedef PxHitFlags PxSceneQueryFlags;
/**
\brief utility functions for use with PxScene, related to scene queries.
Some of these functions have been deprecated (removed) in PhysX 3.4. We re-implement them here for easy migration from 3.3 to 3.4.
@see PxShape
*/
class PxSceneQueryExt
{
public:
/**
\brief Raycast returning any blocking hit, not necessarily the closest.
Returns whether any rigid actor is hit along the ray.
\note Shooting a ray from within an object leads to different results depending on the shape type. Please check the details in article SceneQuery. User can ignore such objects by using one of the provided filter mechanisms.
\param[in] scene The scene
\param[in] origin Origin of the ray.
\param[in] unitDir Normalized direction of the ray.
\param[in] distance Length of the ray. Needs to be larger than 0.
\param[out] hit Raycast hit information.
\param[in] filterData Filtering data and simple logic.
\param[in] filterCall Custom filtering logic (optional). Only used if the corresponding #PxHitFlag flags are set. If NULL, all hits are assumed to be blocking.
\param[in] cache Cached hit shape (optional). Ray is tested against cached shape first. If no hit is found the ray gets queried against the scene.
Note: Filtering is not executed for a cached shape if supplied; instead, if a hit is found, it is assumed to be a blocking hit.
Note: Using past touching hits as cache will produce incorrect behavior since the cached hit will always be treated as blocking.
\return True if a blocking hit was found.
@see PxSceneQueryFilterData PxSceneQueryFilterCallback PxSceneQueryCache PxSceneQueryHit
*/
static bool raycastAny( const PxScene& scene,
const PxVec3& origin, const PxVec3& unitDir, const PxReal distance,
PxSceneQueryHit& hit, const PxSceneQueryFilterData& filterData = PxSceneQueryFilterData(),
PxSceneQueryFilterCallback* filterCall = NULL, const PxSceneQueryCache* cache = NULL);
/**
\brief Raycast returning a single result.
Returns the first rigid actor that is hit along the ray. Data for a blocking hit will be returned as specified by the outputFlags field. Touching hits will be ignored.
\note Shooting a ray from within an object leads to different results depending on the shape type. Please check the details in article SceneQuery. User can ignore such objects by using one of the provided filter mechanisms.
\param[in] scene The scene
\param[in] origin Origin of the ray.
\param[in] unitDir Normalized direction of the ray.
\param[in] distance Length of the ray. Needs to be larger than 0.
\param[in] outputFlags Specifies which properties should be written to the hit information
\param[out] hit Raycast hit information.
\param[in] filterData Filtering data and simple logic.
\param[in] filterCall Custom filtering logic (optional). Only used if the corresponding #PxHitFlag flags are set. If NULL, all hits are assumed to be blocking.
\param[in] cache Cached hit shape (optional). Ray is tested against cached shape first then against the scene.
Note: Filtering is not executed for a cached shape if supplied; instead, if a hit is found, it is assumed to be a blocking hit.
Note: Using past touching hits as cache will produce incorrect behavior since the cached hit will always be treated as blocking.
\return True if a blocking hit was found.
@see PxSceneQueryFlags PxRaycastHit PxSceneQueryFilterData PxSceneQueryFilterCallback PxSceneQueryCache
*/
static bool raycastSingle( const PxScene& scene,
const PxVec3& origin, const PxVec3& unitDir, const PxReal distance,
PxSceneQueryFlags outputFlags, PxRaycastHit& hit,
const PxSceneQueryFilterData& filterData = PxSceneQueryFilterData(),
PxSceneQueryFilterCallback* filterCall = NULL, const PxSceneQueryCache* cache = NULL);
/**
\brief Raycast returning multiple results.
Find all rigid actors that get hit along the ray. Each result contains data as specified by the outputFlags field.
\note Touching hits are not ordered.
\note Shooting a ray from within an object leads to different results depending on the shape type. Please check the details in article SceneQuery. User can ignore such objects by using one of the provided filter mechanisms.
\param[in] scene The scene
\param[in] origin Origin of the ray.
\param[in] unitDir Normalized direction of the ray.
\param[in] distance Length of the ray. Needs to be larger than 0.
\param[in] outputFlags Specifies which properties should be written to the hit information
\param[out] hitBuffer Raycast hit information buffer. If the buffer overflows, the blocking hit is returned as the last entry together with an arbitrary subset
of the nearer touching hits (typically the query should be restarted with a larger buffer).
\param[in] hitBufferSize Size of the hit buffer.
\param[out] blockingHit True if a blocking hit was found. If found, it is the last in the buffer, preceded by any touching hits which are closer. Otherwise the touching hits are listed.
\param[in] filterData Filtering data and simple logic.
\param[in] filterCall Custom filtering logic (optional). Only used if the corresponding #PxHitFlag flags are set. If NULL, all hits are assumed to be touching.
\param[in] cache Cached hit shape (optional). Ray is tested against cached shape first then against the scene.
Note: Filtering is not executed for a cached shape if supplied; instead, if a hit is found, it is assumed to be a blocking hit.
Note: Using past touching hits as cache will produce incorrect behavior since the cached hit will always be treated as blocking.
\return Number of hits in the buffer, or -1 if the buffer overflowed.
@see PxSceneQueryFlags PxRaycastHit PxSceneQueryFilterData PxSceneQueryFilterCallback PxSceneQueryCache
*/
static PxI32 raycastMultiple( const PxScene& scene,
const PxVec3& origin, const PxVec3& unitDir, const PxReal distance,
PxSceneQueryFlags outputFlags,
PxRaycastHit* hitBuffer, PxU32 hitBufferSize, bool& blockingHit,
const PxSceneQueryFilterData& filterData = PxSceneQueryFilterData(),
PxSceneQueryFilterCallback* filterCall = NULL, const PxSceneQueryCache* cache = NULL);
/**
\brief Sweep returning any blocking hit, not necessarily the closest.
Returns whether any rigid actor is hit along the sweep path.
\note If a shape from the scene is already overlapping with the query shape in its starting position, behavior is controlled by the PxSceneQueryFlag::eINITIAL_OVERLAP flag.
\param[in] scene The scene
\param[in] geometry Geometry of object to sweep (supported types are: box, sphere, capsule, convex).
\param[in] pose Pose of the sweep object.
\param[in] unitDir Normalized direction of the sweep.
\param[in] distance Sweep distance. Needs to be larger than 0. Will be clamped to PX_MAX_SWEEP_DISTANCE.
\param[in] queryFlags Combination of PxSceneQueryFlag defining the query behavior
\param[out] hit Sweep hit information.
\param[in] filterData Filtering data and simple logic.
\param[in] filterCall Custom filtering logic (optional). Only used if the corresponding #PxHitFlag flags are set. If NULL, all hits are assumed to be blocking.
\param[in] cache Cached hit shape (optional). Sweep is performed against cached shape first. If no hit is found the sweep gets queried against the scene.
Note: Filtering is not executed for a cached shape if supplied; instead, if a hit is found, it is assumed to be a blocking hit.
Note: Using past touching hits as cache will produce incorrect behavior since the cached hit will always be treated as blocking.
\param[in] inflation This parameter creates a skin around the swept geometry which increases its extents for sweeping. The sweep will register a hit as soon as the skin touches a shape, and will return the corresponding distance and normal.
\return True if a blocking hit was found.
@see PxSceneQueryFilterData PxSceneQueryFilterCallback PxSceneQueryHit PxSceneQueryCache
*/
static bool sweepAny( const PxScene& scene,
const PxGeometry& geometry, const PxTransform& pose, const PxVec3& unitDir, const PxReal distance,
PxSceneQueryFlags queryFlags,
PxSceneQueryHit& hit,
const PxSceneQueryFilterData& filterData = PxSceneQueryFilterData(),
PxSceneQueryFilterCallback* filterCall = NULL,
const PxSceneQueryCache* cache = NULL,
PxReal inflation = 0.0f);
/**
\brief Sweep returning a single result.
Returns the first rigid actor that is hit along the ray. Data for a blocking hit will be returned as specified by the outputFlags field. Touching hits will be ignored.
\note If a shape from the scene is already overlapping with the query shape in its starting position, behavior is controlled by the PxSceneQueryFlag::eINITIAL_OVERLAP flag.
\param[in] scene The scene
\param[in] geometry Geometry of object to sweep (supported types are: box, sphere, capsule, convex).
\param[in] pose Pose of the sweep object.
\param[in] unitDir Normalized direction of the sweep.
\param[in] distance Sweep distance. Needs to be larger than 0. Will be clamped to PX_MAX_SWEEP_DISTANCE.
\param[in] outputFlags Specifies which properties should be written to the hit information.
\param[out] hit Sweep hit information.
\param[in] filterData Filtering data and simple logic.
\param[in] filterCall Custom filtering logic (optional). Only used if the corresponding #PxHitFlag flags are set. If NULL, all hits are assumed to be blocking.
\param[in] cache Cached hit shape (optional). Sweep is performed against cached shape first then against the scene.
Note: Filtering is not executed for a cached shape if supplied; instead, if a hit is found, it is assumed to be a blocking hit.
Note: Using past touching hits as cache will produce incorrect behavior since the cached hit will always be treated as blocking.
\param[in] inflation This parameter creates a skin around the swept geometry which increases its extents for sweeping. The sweep will register a hit as soon as the skin touches a shape, and will return the corresponding distance and normal.
\return True if a blocking hit was found.
@see PxSceneQueryFlags PxSweepHit PxSceneQueryFilterData PxSceneQueryFilterCallback PxSceneQueryCache
*/
static bool sweepSingle(const PxScene& scene,
const PxGeometry& geometry, const PxTransform& pose, const PxVec3& unitDir, const PxReal distance,
PxSceneQueryFlags outputFlags,
PxSweepHit& hit,
const PxSceneQueryFilterData& filterData = PxSceneQueryFilterData(),
PxSceneQueryFilterCallback* filterCall = NULL,
const PxSceneQueryCache* cache = NULL,
PxReal inflation=0.0f);
/**
\brief Sweep returning multiple results.
Find all rigid actors that get hit along the sweep. Each result contains data as specified by the outputFlags field.
\note Touching hits are not ordered.
\note If a shape from the scene is already overlapping with the query shape in its starting position, behavior is controlled by the PxSceneQueryFlag::eINITIAL_OVERLAP flag.
\param[in] scene The scene
\param[in] geometry Geometry of object to sweep (supported types are: box, sphere, capsule, convex).
\param[in] pose Pose of the sweep object.
\param[in] unitDir Normalized direction of the sweep.
\param[in] distance Sweep distance. Needs to be larger than 0. Will be clamped to PX_MAX_SWEEP_DISTANCE.
\param[in] outputFlags Specifies which properties should be written to the hit information.
\param[out] hitBuffer Sweep hit information buffer. If the buffer overflows, the blocking hit is returned as the last entry together with an arbitrary subset
of the nearer touching hits (typically the query should be restarted with a larger buffer).
\param[in] hitBufferSize Size of the hit buffer.
\param[out] blockingHit True if a blocking hit was found. If found, it is the last in the buffer, preceded by any touching hits which are closer. Otherwise the touching hits are listed.
\param[in] filterData Filtering data and simple logic.
\param[in] filterCall Custom filtering logic (optional). Only used if the corresponding #PxHitFlag flags are set. If NULL, all hits are assumed to be touching.
\param[in] cache Cached hit shape (optional). Sweep is performed against cached shape first then against the scene.
Note: Filtering is not executed for a cached shape if supplied; instead, if a hit is found, it is assumed to be a blocking hit.
Note: Using past touching hits as cache will produce incorrect behavior since the cached hit will always be treated as blocking.
\param[in] inflation This parameter creates a skin around the swept geometry which increases its extents for sweeping. The sweep will register a hit as soon as the skin touches a shape, and will return the corresponding distance and normal.
\return Number of hits in the buffer, or -1 if the buffer overflowed.
@see PxSceneQueryFlags PxSweepHit PxSceneQueryFilterData PxSceneQueryFilterCallback PxSceneQueryCache
*/
static PxI32 sweepMultiple( const PxScene& scene,
const PxGeometry& geometry, const PxTransform& pose, const PxVec3& unitDir, const PxReal distance,
PxSceneQueryFlags outputFlags, PxSweepHit* hitBuffer, PxU32 hitBufferSize, bool& blockingHit,
const PxSceneQueryFilterData& filterData = PxSceneQueryFilterData(),
PxSceneQueryFilterCallback* filterCall = NULL, const PxSceneQueryCache* cache = NULL,
PxReal inflation = 0.0f);
/**
\brief Test overlap between a geometry and objects in the scene.
\note Filtering: Overlap tests do not distinguish between touching and blocking hit types. Both get written to the hit buffer.
\note PxHitFlag::eMESH_MULTIPLE and PxHitFlag::eMESH_BOTH_SIDES have no effect in this case
\param[in] scene The scene
\param[in] geometry Geometry of object to check for overlap (supported types are: box, sphere, capsule, convex).
\param[in] pose Pose of the object.
\param[out] hitBuffer Buffer to store the overlapping objects to. If the buffer overflows, an arbitrary subset of overlapping objects is stored (typically the query should be restarted with a larger buffer).
\param[in] hitBufferSize Size of the hit buffer.
\param[in] filterData Filtering data and simple logic.
\param[in] filterCall Custom filtering logic (optional). Only used if the corresponding #PxHitFlag flags are set. If NULL, all hits are assumed to overlap.
\return Number of hits in the buffer, or -1 if the buffer overflowed.
@see PxSceneQueryFlags PxSceneQueryFilterData PxSceneQueryFilterCallback
*/
static PxI32 overlapMultiple( const PxScene& scene,
const PxGeometry& geometry, const PxTransform& pose,
PxOverlapHit* hitBuffer, PxU32 hitBufferSize,
const PxSceneQueryFilterData& filterData = PxSceneQueryFilterData(),
PxSceneQueryFilterCallback* filterCall = NULL);
/**
\brief Test returning, for a given geometry, any overlapping object in the scene.
\note Filtering: Overlap tests do not distinguish between touching and blocking hit types. Both trigger a hit.
\note PxHitFlag::eMESH_MULTIPLE and PxHitFlag::eMESH_BOTH_SIDES have no effect in this case
\param[in] scene The scene
\param[in] geometry Geometry of object to check for overlap (supported types are: box, sphere, capsule, convex).
\param[in] pose Pose of the object.
\param[out] hit Pointer to store the overlapping object to.
\param[in] filterData Filtering data and simple logic.
\param[in] filterCall Custom filtering logic (optional). Only used if the corresponding #PxHitFlag flags are set. If NULL, all hits are assumed to overlap.
\return True if an overlap was found.
@see PxSceneQueryFlags PxSceneQueryFilterData PxSceneQueryFilterCallback
*/
static bool overlapAny( const PxScene& scene,
const PxGeometry& geometry, const PxTransform& pose,
PxOverlapHit& hit,
const PxSceneQueryFilterData& filterData = PxSceneQueryFilterData(),
PxSceneQueryFilterCallback* filterCall = NULL);
};
struct PxBatchQueryStatus
{
enum Enum
{
/**
\brief This is the initial state before a query starts.
*/
ePENDING = 0,
/**
\brief The query is finished; results have been written into the result and hit buffers.
*/
eSUCCESS,
/**
\brief The query results were incomplete due to touch hit buffer overflow. Blocking hit is still correct.
*/
eOVERFLOW
};
static PX_FORCE_INLINE Enum getStatus(const PxRaycastBuffer& r)
{
return (0xffffffff == r.nbTouches) ? ePENDING : (0xffffffff == r.maxNbTouches ? eOVERFLOW : eSUCCESS);
}
static PX_FORCE_INLINE Enum getStatus(const PxSweepBuffer& r)
{
return (0xffffffff == r.nbTouches) ? ePENDING : (0xffffffff == r.maxNbTouches ? eOVERFLOW : eSUCCESS);
}
static PX_FORCE_INLINE Enum getStatus(const PxOverlapBuffer& r)
{
return (0xffffffff == r.nbTouches) ? ePENDING : (0xffffffff == r.maxNbTouches ? eOVERFLOW : eSUCCESS);
}
};
class PxBatchQueryExt
{
public:
virtual void release() = 0;
/**
\brief Performs a raycast against objects in the scene.
\note Touching hits are not ordered.
\note Shooting a ray from within an object leads to different results depending on the shape type. Please check the details in article SceneQuery. User can ignore such objects by using one of the provided filter mechanisms.
\param[in] origin Origin of the ray.
\param[in] unitDir Normalized direction of the ray.
\param[in] distance Length of the ray. Needs to be larger than 0.
\param[in] maxNbTouches Maximum number of hits to record in the touch buffer for this query. Default=0 reports a single blocking hit. If maxTouchHits is set to 0 all hits are treated as blocking by default.
\param[in] hitFlags Specifies which properties per hit should be computed and returned in hit array and blocking hit.
\param[in] filterData Filtering data passed to the filter shader. See #PxQueryFilterData #PxQueryFilterCallback
\param[in] cache Cached hit shape (optional). Query is tested against cached shape first. If no hit is found the ray gets queried against the scene.
Note: Filtering is not executed for a cached shape if supplied; instead, if a hit is found, it is assumed to be a blocking hit.
Note: Using past touching hits as cache will produce incorrect behavior since the cached hit will always be treated as blocking.
\note This query call writes to a list associated with the query object and is NOT thread safe (for performance reasons there is no lock
and overlapping writes from different threads may result in undefined behavior).
\return Returns a PxRaycastBuffer pointer that will store the result of the query after execute() is completed.
This will point either to an element of the buffer allocated on construction or to a user buffer passed to the constructor.
@see PxCreateBatchQueryExt
@see PxQueryFilterData PxQueryFilterCallback PxRaycastHit PxScene::raycast
*/
virtual PxRaycastBuffer* raycast(
const PxVec3& origin, const PxVec3& unitDir, const PxReal distance,
const PxU16 maxNbTouches = 0,
PxHitFlags hitFlags = PxHitFlags(PxHitFlag::eDEFAULT),
const PxQueryFilterData& filterData = PxQueryFilterData(),
const PxQueryCache* cache = NULL) = 0;
/**
\brief Performs a sweep test against objects in the scene.
\note Touching hits are not ordered.
\note If a shape from the scene is already overlapping with the query shape in its starting position,
the hit is returned unless eASSUME_NO_INITIAL_OVERLAP was specified.
\param[in] geometry Geometry of object to sweep (supported types are: box, sphere, capsule, convex).
\param[in] pose Pose of the sweep object.
\param[in] unitDir Normalized direction of the sweep.
\param[in] distance Sweep distance. Needs to be larger than 0. Will be clamped to PX_MAX_SWEEP_DISTANCE.
\param[in] maxNbTouches Maximum number of hits to record in the touch buffer for this query. Default=0 reports a single blocking hit. If maxTouchHits is set to 0 all hits are treated as blocking by default.
\param[in] hitFlags Specifies which properties per hit should be computed and returned in hit array and blocking hit.
\param[in] filterData Filtering data and simple logic. See #PxQueryFilterData #PxQueryFilterCallback
\param[in] cache Cached hit shape (optional). Query is tested against cached shape first. If no hit is found the ray gets queried against the scene.
Note: Filtering is not executed for a cached shape if supplied; instead, if a hit is found, it is assumed to be a blocking hit.
Note: Using past touching hits as cache will produce incorrect behavior since the cached hit will always be treated as blocking.
\param[in] inflation This parameter creates a skin around the swept geometry which increases its extents for sweeping. The sweep will register a hit as soon as the skin touches a shape, and will return the corresponding distance and normal.
Note: ePRECISE_SWEEP doesn't support inflation. Therefore the sweep will be performed with zero inflation.
\note This query call writes to a list associated with the query object and is NOT thread safe (for performance reasons there is no lock
and overlapping writes from different threads may result in undefined behavior).
\return Returns a PxSweepBuffer pointer that will store the result of the query after execute() is completed.
This will point either to an element of the buffer allocated on construction or to a user buffer passed to the constructor.
@see PxCreateBatchQueryExt
@see PxHitFlags PxQueryFilterData PxBatchQueryPreFilterShader PxBatchQueryPostFilterShader PxSweepHit
*/
virtual PxSweepBuffer* sweep(
const PxGeometry& geometry, const PxTransform& pose, const PxVec3& unitDir, const PxReal distance,
const PxU16 maxNbTouches = 0,
PxHitFlags hitFlags = PxHitFlags(PxHitFlag::eDEFAULT),
const PxQueryFilterData& filterData = PxQueryFilterData(),
const PxQueryCache* cache = NULL,
const PxReal inflation = 0.0f) = 0;
/**
\brief Performs an overlap test of a given geometry against objects in the scene.
\note Filtering: returning eBLOCK from user filter for overlap queries will cause a warning (see #PxQueryHitType).
\param[in] geometry Geometry of object to check for overlap (supported types are: box, sphere, capsule, convex).
\param[in] pose Pose of the object.
\param[in] maxNbTouches Maximum number of hits to record in the touch buffer for this query. Default=0 reports a single blocking hit. If maxTouchHits is set to 0 all hits are treated as blocking by default.
\param[in] filterData Filtering data and simple logic. See #PxQueryFilterData #PxQueryFilterCallback
\param[in] cache Cached hit shape (optional). Query is tested against cached shape first. If no hit is found the ray gets queried against the scene.
Note: Filtering is not executed for a cached shape if supplied; instead, if a hit is found, it is assumed to be a blocking hit.
Note: Using past touching hits as cache will produce incorrect behavior since the cached hit will always be treated as blocking.
\note eBLOCK should not be returned from user filters for overlap(). Doing so will result in undefined behavior, and a warning will be issued.
\note If the PxQueryFlag::eNO_BLOCK flag is set, the eBLOCK will instead be automatically converted to an eTOUCH and the warning suppressed.
\note This query call writes to a list associated with the query object and is NOT thread safe (for performance reasons there is no lock
and overlapping writes from different threads may result in undefined behavior).
\return Returns a PxOverlapBuffer pointer that will store the result of the query after execute() is completed.
This will point either to an element of the buffer allocated on construction or to a user buffer passed to the constructor.
@see PxCreateBatchQueryExt
@see PxQueryFilterData PxQueryFilterCallback
*/
virtual PxOverlapBuffer* overlap(
const PxGeometry& geometry, const PxTransform& pose,
PxU16 maxNbTouches = 0,
const PxQueryFilterData& filterData = PxQueryFilterData(),
const PxQueryCache* cache = NULL) = 0;
virtual void execute() = 0;
protected:
virtual ~PxBatchQueryExt() {}
};
/**
\brief Create a PxBatchQueryExt without the need for pre-allocated result or touch buffers.
\param[in] scene Queries will be performed against objects in the specified PxScene
\param[in] queryFilterCallback Filtering for all queries is performed using queryFilterCallback. A null pointer results in all shapes being considered.
\param[in] maxNbRaycasts A result buffer will be allocated that is large enough to accommodate maxNbRaycasts calls to PxBatchQueryExt::raycast()
\param[in] maxNbRaycastTouches A touch buffer will be allocated that is large enough to accommodate maxNbRaycastTouches touches for all raycasts in the batch.
\param[in] maxNbSweeps A result buffer will be allocated that is large enough to accommodate maxNbSweeps calls to PxBatchQueryExt::sweep()
\param[in] maxNbSweepTouches A touch buffer will be allocated that is large enough to accommodate maxNbSweepTouches touches for all sweeps in the batch.
\param[in] maxNbOverlaps A result buffer will be allocated that is large enough to accommodate maxNbOverlaps calls to PxBatchQueryExt::overlap()
\param[in] maxNbOverlapTouches A touch buffer will be allocated that is large enough to accommodate maxNbOverlapTouches touches for all overlaps in the batch.
\return Returns a PxBatchQueryExt instance. A NULL pointer will be returned if the subsequent allocations fail or if any of the arguments are illegal.
In the event that a NULL pointer is returned a corresponding error will be issued to the error stream.
*/
PxBatchQueryExt* PxCreateBatchQueryExt(
const PxScene& scene, PxQueryFilterCallback* queryFilterCallback,
const PxU32 maxNbRaycasts, const PxU32 maxNbRaycastTouches,
const PxU32 maxNbSweeps, const PxU32 maxNbSweepTouches,
const PxU32 maxNbOverlaps, const PxU32 maxNbOverlapTouches);
/**
\brief Create a PxBatchQueryExt with user-supplied result and touch buffers.
\param[in] scene Queries will be performed against objects in the specified PxScene
\param[in] queryFilterCallback Filtering for all queries is performed using queryFilterCallback. A null pointer results in all shapes being considered.
\param[in] raycastBuffers This is the array that will be used to store the results of each raycast in a batch.
\param[in] maxNbRaycasts This is the length of the raycastBuffers array.
\param[in] raycastTouches This is the array that will be used to store the touches generated by all raycasts in a batch.
\param[in] maxNbRaycastTouches This is the length of the raycastTouches array.
\param[in] sweepBuffers This is the array that will be used to store the results of each sweep in a batch.
\param[in] maxNbSweeps This is the length of the sweepBuffers array.
\param[in] sweepTouches This is the array that will be used to store the touches generated by all sweeps in a batch.
\param[in] maxNbSweepTouches This is the length of the sweepTouches array.
\param[in] overlapBuffers This is the array that will be used to store the results of each overlap in a batch.
\param[in] maxNbOverlaps This is the length of the overlapBuffers array.
\param[in] overlapTouches This is the array that will be used to store the touches generated by all overlaps in a batch.
\param[in] maxNbOverlapTouches This is the length of the overlapTouches array.
\return Returns a PxBatchQueryExt instance. A NULL pointer will be returned if the subsequent allocations fail or if any of the arguments are illegal.
In the event that a NULL pointer is returned a corresponding error will be issued to the error stream.
*/
PxBatchQueryExt* PxCreateBatchQueryExt(
const PxScene& scene, PxQueryFilterCallback* queryFilterCallback,
PxRaycastBuffer* raycastBuffers, const PxU32 maxNbRaycasts, PxRaycastHit* raycastTouches, const PxU32 maxNbRaycastTouches,
PxSweepBuffer* sweepBuffers, const PxU32 maxNbSweeps, PxSweepHit* sweepTouches, const PxU32 maxNbSweepTouches,
PxOverlapBuffer* overlapBuffers, const PxU32 maxNbOverlaps, PxOverlapHit* overlapTouches, const PxU32 maxNbOverlapTouches);
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 30,622 | C | 59.045098 | 242 | 0.775619 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxDistanceJoint.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_DISTANCE_JOINT_H
#define PX_DISTANCE_JOINT_H
/** \addtogroup extensions
@{
*/
#include "extensions/PxJoint.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxDistanceJoint;
/**
\brief Create a distance Joint.
\param[in] physics The physics SDK
\param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localFrame0 The position and orientation of the joint relative to actor0
\param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localFrame1 The position and orientation of the joint relative to actor1
@see PxDistanceJoint
*/
PxDistanceJoint* PxDistanceJointCreate(PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1);
/**
\brief flags for configuring the drive of a PxDistanceJoint
@see PxDistanceJoint
*/
struct PxDistanceJointFlag
{
enum Enum
{
eMAX_DISTANCE_ENABLED = 1<<1,
eMIN_DISTANCE_ENABLED = 1<<2,
eSPRING_ENABLED = 1<<3
};
};
typedef PxFlags<PxDistanceJointFlag::Enum, PxU16> PxDistanceJointFlags;
PX_FLAGS_OPERATORS(PxDistanceJointFlag::Enum, PxU16)
/**
\brief a joint that maintains an upper or lower bound (or both) on the distance between two points on different objects
@see PxDistanceJointCreate PxJoint
*/
class PxDistanceJoint : public PxJoint
{
public:
/**
\brief Return the current distance of the joint
*/
virtual PxReal getDistance() const = 0;
/**
\brief Set the allowed minimum distance for the joint.
The minimum distance must be no more than the maximum distance
<b>Default</b> 0.0f
<b>Range</b> [0, PX_MAX_F32)
\param[in] distance the minimum distance
@see PxDistanceJoint::minDistance, PxDistanceJointFlag::eMIN_DISTANCE_ENABLED getMinDistance()
*/
virtual void setMinDistance(PxReal distance) = 0;
/**
\brief Get the allowed minimum distance for the joint.
\return the allowed minimum distance
@see PxDistanceJoint::minDistance, PxDistanceJointFlag::eMIN_DISTANCE_ENABLED setMinDistance()
*/
virtual PxReal getMinDistance() const = 0;
/**
\brief Set the allowed maximum distance for the joint.
The maximum distance must be no less than the minimum distance.
<b>Default</b> 0.0f
<b>Range</b> [0, PX_MAX_F32)
\param[in] distance the maximum distance
@see PxDistanceJoint::maxDistance, PxDistanceJointFlag::eMAX_DISTANCE_ENABLED getMinDistance()
*/
virtual void setMaxDistance(PxReal distance) = 0;
/**
\brief Get the allowed maximum distance for the joint.
\return the allowed maximum distance
@see PxDistanceJoint::maxDistance, PxDistanceJointFlag::eMAX_DISTANCE_ENABLED setMaxDistance()
*/
virtual PxReal getMaxDistance() const = 0;
/**
\brief Set the error tolerance of the joint.
\param[in] tolerance the distance beyond the allowed range at which the joint becomes active
@see PxDistanceJoint::tolerance, getTolerance()
*/
virtual void setTolerance(PxReal tolerance) = 0;
/**
\brief Get the error tolerance of the joint.
the distance beyond the joint's [min, max] range before the joint becomes active.
<b>Default</b> 0.25f * PxTolerancesScale::length
<b>Range</b> (0, PX_MAX_F32)
This value should be used to ensure that if the minimum distance is zero and the
spring function is in use, the rest length of the spring is non-zero.
@see PxDistanceJoint::tolerance, setTolerance()
*/
virtual PxReal getTolerance() const = 0;
/**
\brief Set the strength of the joint spring.
The spring is used if enabled, and the distance exceeds the range [min-error, max+error].
<b>Default</b> 0.0f
<b>Range</b> [0, PX_MAX_F32)
\param[in] stiffness the spring strength of the joint
@see PxDistanceJointFlag::eSPRING_ENABLED getStiffness()
*/
virtual void setStiffness(PxReal stiffness) = 0;
/**
\brief Get the strength of the joint spring.
\return stiffness the spring strength of the joint
@see PxDistanceJointFlag::eSPRING_ENABLED setStiffness()
*/
virtual PxReal getStiffness() const = 0;
/**
\brief Set the damping of the joint spring.
The spring is used if enabled, and the distance exceeds the range [min-error, max+error].
<b>Default</b> 0.0f
<b>Range</b> [0, PX_MAX_F32)
\param[in] damping the degree of damping of the joint spring of the joint
@see PxDistanceJointFlag::eSPRING_ENABLED setDamping()
*/
virtual void setDamping(PxReal damping) = 0;
/**
\brief Get the damping of the joint spring.
\return the degree of damping of the joint spring of the joint
@see PxDistanceJointFlag::eSPRING_ENABLED setDamping()
*/
virtual PxReal getDamping() const = 0;
/**
\brief Set the flags specific to the Distance Joint.
<b>Default</b> PxDistanceJointFlag::eMAX_DISTANCE_ENABLED
\param[in] flags The joint flags.
@see PxDistanceJointFlag setFlag() getFlags()
*/
virtual void setDistanceJointFlags(PxDistanceJointFlags flags) = 0;
/**
\brief Set a single flag specific to a Distance Joint to true or false.
\param[in] flag The flag to set or clear.
\param[in] value the value to which to set the flag
@see PxDistanceJointFlag, getFlags() setFlags()
*/
virtual void setDistanceJointFlag(PxDistanceJointFlag::Enum flag, bool value) = 0;
/**
\brief Get the flags specific to the Distance Joint.
\return the joint flags
@see PxDistanceJoint::flags, PxDistanceJointFlag setFlag() setFlags()
*/
virtual PxDistanceJointFlags getDistanceJointFlags() const = 0;
/**
\brief Returns string name of PxDistanceJoint, used for serialization
*/
virtual const char* getConcreteTypeName() const { return "PxDistanceJoint"; }
protected:
//serialization
/**
\brief Constructor
*/
PX_INLINE PxDistanceJoint(PxType concreteType, PxBaseFlags baseFlags) : PxJoint(concreteType, baseFlags) {}
/**
\brief Deserialization constructor
*/
PX_INLINE PxDistanceJoint(PxBaseFlags baseFlags) : PxJoint(baseFlags) {}
/**
\brief Returns whether a given type name matches with the type of this instance
*/
virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxDistanceJoint", PxJoint); }
//~serialization
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 8,026 | C | 28.840149 | 167 | 0.740718 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxPrismaticJoint.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PRISMATIC_JOINT_H
#define PX_PRISMATIC_JOINT_H
/** \addtogroup extensions
@{
*/
#include "extensions/PxJoint.h"
#include "extensions/PxJointLimit.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxPrismaticJoint;
/**
\brief Create a prismatic joint.
\param[in] physics The physics SDK
\param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localFrame0 The position and orientation of the joint relative to actor0
\param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localFrame1 The position and orientation of the joint relative to actor1
@see PxPrismaticJoint
*/
PxPrismaticJoint* PxPrismaticJointCreate(PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1);
/**
\brief Flags specific to the prismatic joint.
@see PxPrismaticJoint
*/
struct PxPrismaticJointFlag
{
enum Enum
{
eLIMIT_ENABLED = 1<<1
};
};
typedef PxFlags<PxPrismaticJointFlag::Enum, PxU16> PxPrismaticJointFlags;
PX_FLAGS_OPERATORS(PxPrismaticJointFlag::Enum, PxU16)
/**
\brief A prismatic joint permits relative translational movement between two bodies along
an axis, but no relative rotational movement.
the axis on each body is defined as the line containing the origin of the joint frame and
extending along the x-axis of that frame
\image html prismJoint.png
@see PxPrismaticJointCreate() PxJoint
*/
class PxPrismaticJoint : public PxJoint
{
public:
/**
\brief returns the displacement of the joint along its axis.
*/
virtual PxReal getPosition() const = 0;
/**
\brief returns the velocity of the joint along its axis
*/
virtual PxReal getVelocity() const = 0;
/**
\brief sets the joint limit parameters.
The limit range is [-PX_MAX_F32, PX_MAX_F32], but note that the width of the limit (upper-lower) must also be
a valid float.
@see PxJointLinearLimitPair getLimit()
*/
virtual void setLimit(const PxJointLinearLimitPair&) = 0;
/**
\brief gets the joint limit parameters.
@see PxJointLinearLimit getLimit()
*/
virtual PxJointLinearLimitPair getLimit() const = 0;
/**
\brief Set the flags specific to the Prismatic Joint.
<b>Default</b> PxPrismaticJointFlags(0)
\param[in] flags The joint flags.
@see PxPrismaticJointFlag setFlag() getFlags()
*/
virtual void setPrismaticJointFlags(PxPrismaticJointFlags flags) = 0;
/**
\brief Set a single flag specific to a Prismatic Joint to true or false.
\param[in] flag The flag to set or clear.
\param[in] value The value to which to set the flag
@see PxPrismaticJointFlag, getFlags() setFlags()
*/
virtual void setPrismaticJointFlag(PxPrismaticJointFlag::Enum flag, bool value) = 0;
/**
\brief Get the flags specific to the Prismatic Joint.
\return the joint flags
@see PxPrismaticJoint::flags, PxPrismaticJointFlag setFlag() setFlags()
*/
virtual PxPrismaticJointFlags getPrismaticJointFlags() const = 0;
/**
\brief Returns string name of PxPrismaticJoint, used for serialization
*/
virtual const char* getConcreteTypeName() const { return "PxPrismaticJoint"; }
protected:
//serialization
/**
\brief Constructor
*/
PX_INLINE PxPrismaticJoint(PxType concreteType, PxBaseFlags baseFlags) : PxJoint(concreteType, baseFlags) {}
/**
\brief Deserialization constructor
*/
PX_INLINE PxPrismaticJoint(PxBaseFlags baseFlags) : PxJoint(baseFlags) {}
/**
\brief Returns whether a given type name matches with the type of this instance
*/
virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxPrismaticJoint", PxJoint); }
//~serialization
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 5,545 | C | 29.98324 | 169 | 0.750586 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxRepXSimpleType.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_REPX_SIMPLE_TYPE_H
#define PX_REPX_SIMPLE_TYPE_H
/** \addtogroup extensions
@{
*/
#include "foundation/PxSimpleTypes.h"
#include "cooking/PxCooking.h"
#include "common/PxStringTable.h"
#include "common/PxSerialFramework.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Helper class containing the mapping of id to object, and type name.
\deprecated Xml serialization is deprecated. An alternative serialization system is provided through USD Physics.
*/
struct PX_DEPRECATED PxRepXObject
{
/**
\brief Identifies the extension meant to handle this object.
@see PxTypeInfo, PX_DEFINE_TYPEINFO, PxRepXSerializer
*/
const char* typeName;
/**
\brief Pointer to the serializable this was created from
*/
const void* serializable;
/**
\brief Id given to this object at some point
*/
PxSerialObjectId id;
PxRepXObject( const char* inTypeName = "", const void* inSerializable = NULL, const PxSerialObjectId inId = 0 )
: typeName( inTypeName )
, serializable( inSerializable )
, id( inId )
{
}
bool isValid() const { return serializable != NULL; }
};
/**
\brief Arguments required to instantiate a serializable object from RepX.
\deprecated Xml serialization is deprecated. An alternative serialization system is provided through USD Physics.
Extra arguments can be added to the object map under special ids.
@see PxRepXSerializer::objectToFile, PxRepXSerializer::fileToObject
*/
struct PX_DEPRECATED PxRepXInstantiationArgs
{
PxPhysics& physics;
const PxCookingParams* cooker;
PxStringTable* stringTable;
PxRepXInstantiationArgs( PxPhysics& inPhysics, const PxCookingParams* inCooking = NULL , PxStringTable* inStringTable = NULL )
: physics( inPhysics )
, cooker( inCooking )
, stringTable( inStringTable )
{
}
PxRepXInstantiationArgs& operator=(const PxRepXInstantiationArgs&);
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 3,661 | C | 32.907407 | 129 | 0.746244 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxDefaultStreams.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_DEFAULT_STREAMS_H
#define PX_DEFAULT_STREAMS_H
/** \addtogroup extensions
@{
*/
#include <stdio.h>
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxIO.h"
#include "foundation/PxFoundation.h"
typedef FILE* PxFileHandle;
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief default implementation of a memory write stream
@see PxOutputStream
*/
class PxDefaultMemoryOutputStream: public PxOutputStream
{
public:
PxDefaultMemoryOutputStream(PxAllocatorCallback &allocator = *PxGetAllocatorCallback());
virtual ~PxDefaultMemoryOutputStream();
virtual PxU32 write(const void* src, PxU32 count);
virtual PxU32 getSize() const { return mSize; }
virtual PxU8* getData() const { return mData; }
private:
PxDefaultMemoryOutputStream(const PxDefaultMemoryOutputStream&);
PxDefaultMemoryOutputStream& operator=(const PxDefaultMemoryOutputStream&);
PxAllocatorCallback& mAllocator;
PxU8* mData;
PxU32 mSize;
PxU32 mCapacity;
};
/**
\brief default implementation of a memory read stream
@see PxInputData
*/
class PxDefaultMemoryInputData: public PxInputData
{
public:
PxDefaultMemoryInputData(PxU8* data, PxU32 length);
virtual PxU32 read(void* dest, PxU32 count);
virtual PxU32 getLength() const;
virtual void seek(PxU32 pos);
virtual PxU32 tell() const;
private:
PxU32 mSize;
const PxU8* mData;
PxU32 mPos;
};
/**
\brief default implementation of a file write stream
@see PxOutputStream
*/
class PxDefaultFileOutputStream: public PxOutputStream
{
public:
PxDefaultFileOutputStream(const char* name);
virtual ~PxDefaultFileOutputStream();
virtual PxU32 write(const void* src, PxU32 count);
virtual bool isValid();
private:
PxFileHandle mFile;
};
/**
\brief default implementation of a file read stream
@see PxInputData
*/
class PxDefaultFileInputData: public PxInputData
{
public:
PxDefaultFileInputData(const char* name);
virtual ~PxDefaultFileInputData();
virtual PxU32 read(void* dest, PxU32 count);
virtual void seek(PxU32 pos);
virtual PxU32 tell() const;
virtual PxU32 getLength() const;
bool isValid() const;
private:
PxFileHandle mFile;
PxU32 mLength;
};
#if !PX_DOXYGEN
}
#endif
/** @} */
#endif
| 3,962 | C | 25.777027 | 94 | 0.748864 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxMassProperties.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_MASS_PROPERTIES_H
#define PX_MASS_PROPERTIES_H
/** \addtogroup extensions
@{
*/
#include "PxPhysXConfig.h"
#include "foundation/PxMath.h"
#include "foundation/PxMathUtils.h"
#include "foundation/PxVec3.h"
#include "foundation/PxMat33.h"
#include "foundation/PxQuat.h"
#include "foundation/PxTransform.h"
#include "geometry/PxGeometry.h"
#include "geometry/PxBoxGeometry.h"
#include "geometry/PxSphereGeometry.h"
#include "geometry/PxCapsuleGeometry.h"
#include "geometry/PxConvexMeshGeometry.h"
#include "geometry/PxConvexMesh.h"
#include "geometry/PxCustomGeometry.h"
#include "geometry/PxTriangleMeshGeometry.h"
#include "geometry/PxTriangleMesh.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Utility class to compute and manipulate mass and inertia tensor properties.
In most cases #PxRigidBodyExt::updateMassAndInertia(), #PxRigidBodyExt::setMassAndUpdateInertia() should be enough to
setup the mass properties of a rigid body. This utility class targets users that need to customize the mass properties
computation.
*/
class PxMassProperties
{
public:
/**
\brief Default constructor.
*/
PX_FORCE_INLINE PxMassProperties() : inertiaTensor(PxIdentity), centerOfMass(0.0f), mass(1.0f) {}
/**
\brief Construct from individual elements.
*/
PX_FORCE_INLINE PxMassProperties(const PxReal m, const PxMat33& inertiaT, const PxVec3& com) : inertiaTensor(inertiaT), centerOfMass(com), mass(m) {}
/**
\brief Compute mass properties based on a provided geometry structure.
This constructor assumes the geometry has a density of 1. Mass and inertia tensor scale linearly with density.
\param[in] geometry The geometry to compute the mass properties for. Supported geometry types are: sphere, box, capsule and convex mesh.
*/
PxMassProperties(const PxGeometry& geometry)
{
switch (geometry.getType())
{
case PxGeometryType::eSPHERE:
{
const PxSphereGeometry& s = static_cast<const PxSphereGeometry&>(geometry);
mass = (4.0f / 3.0f) * PxPi * s.radius * s.radius * s.radius;
inertiaTensor = PxMat33::createDiagonal(PxVec3(2.0f / 5.0f * mass * s.radius * s.radius));
centerOfMass = PxVec3(0.0f);
}
break;
case PxGeometryType::eBOX:
{
const PxBoxGeometry& b = static_cast<const PxBoxGeometry&>(geometry);
mass = b.halfExtents.x * b.halfExtents.y * b.halfExtents.z * 8.0f;
PxVec3 d2 = b.halfExtents.multiply(b.halfExtents);
inertiaTensor = PxMat33::createDiagonal(PxVec3(d2.y + d2.z, d2.x + d2.z, d2.x + d2.y)) * (mass * 1.0f / 3.0f);
centerOfMass = PxVec3(0.0f);
}
break;
case PxGeometryType::eCAPSULE:
{
const PxCapsuleGeometry& c = static_cast<const PxCapsuleGeometry&>(geometry);
PxReal r = c.radius, h = c.halfHeight;
mass = ((4.0f / 3.0f) * r + 2 * c.halfHeight) * PxPi * r * r;
PxReal a = r*r*r * (8.0f / 15.0f) + h*r*r * (3.0f / 2.0f) + h*h*r * (4.0f / 3.0f) + h*h*h * (2.0f / 3.0f);
PxReal b = r*r*r * (8.0f / 15.0f) + h*r*r;
inertiaTensor = PxMat33::createDiagonal(PxVec3(b, a, a) * PxPi * r * r);
centerOfMass = PxVec3(0.0f);
}
break;
case PxGeometryType::eCONVEXMESH:
{
const PxConvexMeshGeometry& c = static_cast<const PxConvexMeshGeometry&>(geometry);
PxVec3 unscaledCoM;
PxMat33 unscaledInertiaTensorNonCOM; // inertia tensor of convex mesh in mesh local space
PxMat33 unscaledInertiaTensorCOM;
PxReal unscaledMass;
c.convexMesh->getMassInformation(unscaledMass, unscaledInertiaTensorNonCOM, unscaledCoM);
// inertia tensor relative to center of mass
unscaledInertiaTensorCOM[0][0] = unscaledInertiaTensorNonCOM[0][0] - unscaledMass*PxReal((unscaledCoM.y*unscaledCoM.y+unscaledCoM.z*unscaledCoM.z));
unscaledInertiaTensorCOM[1][1] = unscaledInertiaTensorNonCOM[1][1] - unscaledMass*PxReal((unscaledCoM.z*unscaledCoM.z+unscaledCoM.x*unscaledCoM.x));
unscaledInertiaTensorCOM[2][2] = unscaledInertiaTensorNonCOM[2][2] - unscaledMass*PxReal((unscaledCoM.x*unscaledCoM.x+unscaledCoM.y*unscaledCoM.y));
unscaledInertiaTensorCOM[0][1] = unscaledInertiaTensorCOM[1][0] = (unscaledInertiaTensorNonCOM[0][1] + unscaledMass*PxReal(unscaledCoM.x*unscaledCoM.y));
unscaledInertiaTensorCOM[1][2] = unscaledInertiaTensorCOM[2][1] = (unscaledInertiaTensorNonCOM[1][2] + unscaledMass*PxReal(unscaledCoM.y*unscaledCoM.z));
unscaledInertiaTensorCOM[0][2] = unscaledInertiaTensorCOM[2][0] = (unscaledInertiaTensorNonCOM[0][2] + unscaledMass*PxReal(unscaledCoM.z*unscaledCoM.x));
const PxMeshScale& s = c.scale;
mass = unscaledMass * s.scale.x * s.scale.y * s.scale.z;
centerOfMass = s.transform(unscaledCoM);
inertiaTensor = scaleInertia(unscaledInertiaTensorCOM, s.rotation, s.scale);
}
break;
case PxGeometryType::eCUSTOM:
{
*this = PxMassProperties();
static_cast<const PxCustomGeometry&>(geometry).callbacks->computeMassProperties(geometry, *this);
}
break;
case PxGeometryType::eTRIANGLEMESH:
{
const PxTriangleMeshGeometry& g = static_cast<const PxTriangleMeshGeometry&>(geometry);
PxVec3 unscaledCoM;
PxMat33 unscaledInertiaTensorNonCOM; // inertia tensor of convex mesh in mesh local space
PxMat33 unscaledInertiaTensorCOM;
PxReal unscaledMass;
g.triangleMesh->getMassInformation(unscaledMass, unscaledInertiaTensorNonCOM, unscaledCoM);
// inertia tensor relative to center of mass
unscaledInertiaTensorCOM[0][0] = unscaledInertiaTensorNonCOM[0][0] - unscaledMass * PxReal((unscaledCoM.y*unscaledCoM.y + unscaledCoM.z*unscaledCoM.z));
unscaledInertiaTensorCOM[1][1] = unscaledInertiaTensorNonCOM[1][1] - unscaledMass * PxReal((unscaledCoM.z*unscaledCoM.z + unscaledCoM.x*unscaledCoM.x));
unscaledInertiaTensorCOM[2][2] = unscaledInertiaTensorNonCOM[2][2] - unscaledMass * PxReal((unscaledCoM.x*unscaledCoM.x + unscaledCoM.y*unscaledCoM.y));
unscaledInertiaTensorCOM[0][1] = unscaledInertiaTensorCOM[1][0] = (unscaledInertiaTensorNonCOM[0][1] + unscaledMass * PxReal(unscaledCoM.x*unscaledCoM.y));
unscaledInertiaTensorCOM[1][2] = unscaledInertiaTensorCOM[2][1] = (unscaledInertiaTensorNonCOM[1][2] + unscaledMass * PxReal(unscaledCoM.y*unscaledCoM.z));
unscaledInertiaTensorCOM[0][2] = unscaledInertiaTensorCOM[2][0] = (unscaledInertiaTensorNonCOM[0][2] + unscaledMass * PxReal(unscaledCoM.z*unscaledCoM.x));
const PxMeshScale& s = g.scale;
mass = unscaledMass * s.scale.x * s.scale.y * s.scale.z;
centerOfMass = s.transform(unscaledCoM);
inertiaTensor = scaleInertia(unscaledInertiaTensorCOM, s.rotation, s.scale);
}
break;
default:
{
*this = PxMassProperties();
}
}
PX_ASSERT(inertiaTensor.column0.isFinite() && inertiaTensor.column1.isFinite() && inertiaTensor.column2.isFinite());
PX_ASSERT(centerOfMass.isFinite());
PX_ASSERT(PxIsFinite(mass));
}
/**
\brief Scale mass properties.
\param[in] scale The linear scaling factor to apply to the mass properties.
\return The scaled mass properties.
*/
PX_FORCE_INLINE PxMassProperties operator*(const PxReal scale) const
{
PX_ASSERT(PxIsFinite(scale));
return PxMassProperties(mass * scale, inertiaTensor * scale, centerOfMass);
}
/**
\brief Translate the center of mass by a given vector and adjust the inertia tensor accordingly.
\param[in] t The translation vector for the center of mass.
*/
PX_FORCE_INLINE void translate(const PxVec3& t)
{
PX_ASSERT(t.isFinite());
inertiaTensor = translateInertia(inertiaTensor, mass, t);
centerOfMass += t;
PX_ASSERT(inertiaTensor.column0.isFinite() && inertiaTensor.column1.isFinite() && inertiaTensor.column2.isFinite());
PX_ASSERT(centerOfMass.isFinite());
}
/**
\brief Get the entries of the diagonalized inertia tensor and the corresponding reference rotation.
\param[in] inertia The inertia tensor to diagonalize.
\param[out] massFrame The frame the diagonalized tensor refers to.
\return The entries of the diagonalized inertia tensor.
*/
PX_FORCE_INLINE static PxVec3 getMassSpaceInertia(const PxMat33& inertia, PxQuat& massFrame)
{
PX_ASSERT(inertia.column0.isFinite() && inertia.column1.isFinite() && inertia.column2.isFinite());
PxVec3 diagT = PxDiagonalize(inertia, massFrame);
PX_ASSERT(diagT.isFinite());
PX_ASSERT(massFrame.isFinite());
return diagT;
}
/**
\brief Translate an inertia tensor using the parallel axis theorem
\param[in] inertia The inertia tensor to translate.
\param[in] mass The mass of the object.
\param[in] t The relative frame to translate the inertia tensor to.
\return The translated inertia tensor.
*/
PX_FORCE_INLINE static PxMat33 translateInertia(const PxMat33& inertia, const PxReal mass, const PxVec3& t)
{
PX_ASSERT(inertia.column0.isFinite() && inertia.column1.isFinite() && inertia.column2.isFinite());
PX_ASSERT(PxIsFinite(mass));
PX_ASSERT(t.isFinite());
PxMat33 s( PxVec3(0,t.z,-t.y),
PxVec3(-t.z,0,t.x),
PxVec3(t.y,-t.x,0) );
PxMat33 translatedIT = s.getTranspose() * s * mass + inertia;
PX_ASSERT(translatedIT.column0.isFinite() && translatedIT.column1.isFinite() && translatedIT.column2.isFinite());
return translatedIT;
}
/**
\brief Rotate an inertia tensor around the center of mass
\param[in] inertia The inertia tensor to rotate.
\param[in] q The rotation from the new to the old coordinate frame, i.e. q.rotate(v) transforms
the coordinates of vector v from the old to the new coordinate frame.
\return The rotated inertia tensor.
*/
PX_FORCE_INLINE static PxMat33 rotateInertia(const PxMat33& inertia, const PxQuat& q)
{
PX_ASSERT(inertia.column0.isFinite() && inertia.column1.isFinite() && inertia.column2.isFinite());
PX_ASSERT(q.isUnit());
PxMat33 m(q);
PxMat33 rotatedIT = m * inertia * m.getTranspose();
PX_ASSERT(rotatedIT.column0.isFinite() && rotatedIT.column1.isFinite() && rotatedIT.column2.isFinite());
return rotatedIT;
}
/**
\brief Non-uniform scaling of the inertia tensor
\param[in] inertia The inertia tensor to scale.
\param[in] scaleRotation The rotation from the scaling frame to the frame that inertia is expressed in.
I.e. scaleRotation.rotate(v) transforms the coordinates of vertex v from inertia's frame to the scaling-axes frame.
\param[in] scale The scaling factor for each axis (relative to the frame specified with scaleRotation).
\return The scaled inertia tensor.
*/
static PxMat33 scaleInertia(const PxMat33& inertia, const PxQuat& scaleRotation, const PxVec3& scale)
{
PX_ASSERT(inertia.column0.isFinite() && inertia.column1.isFinite() && inertia.column2.isFinite());
PX_ASSERT(scaleRotation.isUnit());
PX_ASSERT(scale.isFinite());
PxMat33 localInertiaT = rotateInertia(inertia, scaleRotation); // rotate inertia into scaling frame
PxVec3 diagonal(localInertiaT[0][0], localInertiaT[1][1], localInertiaT[2][2]);
PxVec3 xyz2 = PxVec3(diagonal.dot(PxVec3(0.5f))) - diagonal; // original x^2, y^2, z^2
PxVec3 scaledxyz2 = xyz2.multiply(scale).multiply(scale);
PxReal xx = scaledxyz2.y + scaledxyz2.z,
yy = scaledxyz2.z + scaledxyz2.x,
zz = scaledxyz2.x + scaledxyz2.y;
PxReal xy = localInertiaT[0][1] * scale.x * scale.y,
xz = localInertiaT[0][2] * scale.x * scale.z,
yz = localInertiaT[1][2] * scale.y * scale.z;
PxMat33 scaledInertia( PxVec3(xx, xy, xz),
PxVec3(xy, yy, yz),
PxVec3(xz, yz, zz));
PxMat33 scaledIT = rotateInertia(scaledInertia * (scale.x * scale.y * scale.z), scaleRotation.getConjugate());
PX_ASSERT(scaledIT.column0.isFinite() && scaledIT.column1.isFinite() && scaledIT.column2.isFinite());
return scaledIT;
}
/**
\brief Sum up individual mass properties.
\param[in] props Array of mass properties to sum up.
\param[in] transforms Reference transforms for each mass properties entry.
\param[in] count The number of mass properties to sum up.
\return The summed up mass properties.
*/
static PxMassProperties sum(const PxMassProperties* props, const PxTransform* transforms, const PxU32 count)
{
PxReal combinedMass = 0.0f;
PxVec3 combinedCoM(0.0f);
PxMat33 combinedInertiaT = PxMat33(PxZero);
for(PxU32 i = 0; i < count; i++)
{
PX_ASSERT(props[i].inertiaTensor.column0.isFinite() && props[i].inertiaTensor.column1.isFinite() && props[i].inertiaTensor.column2.isFinite());
PX_ASSERT(props[i].centerOfMass.isFinite());
PX_ASSERT(PxIsFinite(props[i].mass));
combinedMass += props[i].mass;
const PxVec3 comTm = transforms[i].transform(props[i].centerOfMass);
combinedCoM += comTm * props[i].mass;
}
if(combinedMass > 0.f)
combinedCoM /= combinedMass;
for(PxU32 i = 0; i < count; i++)
{
const PxVec3 comTm = transforms[i].transform(props[i].centerOfMass);
combinedInertiaT += translateInertia(rotateInertia(props[i].inertiaTensor, transforms[i].q), props[i].mass, combinedCoM - comTm);
}
PX_ASSERT(combinedInertiaT.column0.isFinite() && combinedInertiaT.column1.isFinite() && combinedInertiaT.column2.isFinite());
PX_ASSERT(combinedCoM.isFinite());
PX_ASSERT(PxIsFinite(combinedMass));
return PxMassProperties(combinedMass, combinedInertiaT, combinedCoM);
}
PxMat33 inertiaTensor; //!< The inertia tensor of the object.
PxVec3 centerOfMass; //!< The center of mass of the object.
PxReal mass; //!< The mass of the object.
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 15,019 | C | 40.038251 | 159 | 0.731873 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxTetMakerExt.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_TETMAKER_EXT_H
#define PX_TETMAKER_EXT_H
/** \addtogroup extensions
@{
*/
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxVec3.h"
#include "common/PxCoreUtilityTypes.h"
#include "foundation/PxArray.h"
#include "PxTriangleMeshAnalysisResult.h"
#include "PxTetrahedronMeshAnalysisResult.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxTriangleMesh;
class PxTetrahedronMeshDesc;
class PxSoftBodySimulationDataDesc;
struct PxTetMakerData;
class PxSimpleTriangleMesh;
/**
\brief Provides functionality to create a tetrahedral mesh from a triangle mesh.
*/
class PxTetMaker
{
public:
/**
\brief Create conforming tetrahedron mesh using TetMaker
\param[in] triangleMesh The description of the triangle mesh including vertices and indices
\param[out] outVertices The vertices to store the conforming tetrahedral mesh
\param[out] outTetIndices The indices to store the conforming tetrahedral mesh
\param[in] validate If set to true the input triangle mesh will get analyzed to find possible deficiencies
\param[in] volumeThreshold Tetrahedra with a volume smaller than the specified threshold will be removed from the mesh
\return True if success
*/
static bool createConformingTetrahedronMesh(const PxSimpleTriangleMesh& triangleMesh, physx::PxArray<physx::PxVec3>& outVertices, physx::PxArray<physx::PxU32>& outTetIndices,
const bool validate = true, PxReal volumeThreshold = 0.0f);
/**
\brief Create voxel-based tetrahedron mesh using TetMaker
\param[in] tetMesh The description of the tetrahedral mesh including vertices and indices
\param[in] numVoxelsAlongLongestBoundingBoxAxis The number of voxels along the longest bounding box axis
\param[out] outVertices The vertices to store the voxel-based tetrahedral mesh
\param[out] outTetIndices The indices to store the voxel-based tetrahedral mesh
\param[out] inputPointToOutputTetIndex Buffer with the size of nbTetVerts that contains the tetrahedron index containing the input point with the same index
\param[in] anchorNodeIndices Some input vertices may not be referenced by any tetrahedron. They can be mapped to another input vertex that is used by a tetrahedron to support embedding of additional points.
\param[in] numTetsPerVoxel The number of tetrahedra used to fill a voxel. Only a value of 5 or 6 is supported. 5 is recommended because it mostly avoids mesh anisotropy.
\return True if success
*/
static bool createVoxelTetrahedronMesh(const PxTetrahedronMeshDesc& tetMesh, const PxU32 numVoxelsAlongLongestBoundingBoxAxis,
physx::PxArray<physx::PxVec3>& outVertices, physx::PxArray<physx::PxU32>& outTetIndices, PxI32* inputPointToOutputTetIndex = NULL, const PxU32* anchorNodeIndices = NULL, PxU32 numTetsPerVoxel = 5);
/**
\brief Create voxel-based tetrahedron mesh using TetMaker
\param[in] tetMesh The description of the tetrahedral mesh including vertices and indices
\param[in] voxelEdgeLength The edge length of a voxel.Can be adjusted slightly such that a multiple of it matches the input points' bounding box size
\param[out] outVertices The vertices to store the voxel-based tetrahedral mesh
\param[out] outTetIndices The indices to store the voxel-based tetrahedral mesh
\param[out] inputPointToOutputTetIndex Buffer with the size of nbTetVerts that contains the tetrahedron index containing the input point with the same index
\param[in] anchorNodeIndices Some input vertices may not be referenced by any tetrahedron. They can be mapped to another input vertex that is used by a tetrahedron to support embedding of additional points.
\param[in] numTetsPerVoxel The number of tetrahedra used to fill a voxel. Only a value of 5 or 6 is supported. 5 is recommended because it mostly avoids mesh anisotropy.
\return True if success
*/
static bool createVoxelTetrahedronMeshFromEdgeLength(const PxTetrahedronMeshDesc& tetMesh, const PxReal voxelEdgeLength,
physx::PxArray<physx::PxVec3>& outVertices, physx::PxArray<physx::PxU32>& outTetIndices, PxI32* inputPointToOutputTetIndex = NULL, const PxU32* anchorNodeIndices = NULL, PxU32 numTetsPerVoxel = 5);
/**
\brief Analyzes the triangle mesh to get a report about deficiencies. Some deficiencies can be handled by the tetmesher, others cannot.
\param[in] triangleMesh The description of the triangle mesh including vertices and indices
\param[in] minVolumeThreshold Minimum volume the mesh must have such that no volume warning is generated
\param[in] minTriangleAngleRadians Minimum angle allowed for triangles such that no angle warning is generated
\return Flags that describe the triangle mesh's deficiencies
*/
static PxTriangleMeshAnalysisResults validateTriangleMesh(const PxSimpleTriangleMesh& triangleMesh, const PxReal minVolumeThreshold = 1e-6f, const PxReal minTriangleAngleRadians = 10.0f*3.1415926535898f / 180.0f);
/**
\brief Analyzes the tetrahedron mesh to get a report about deficiencies. Some deficiencies can be handled by the softbody cooker, others cannot.
\param[in] points The mesh's points
\param[in] tetrahedra The mesh's tetrahedra (index buffer)
\param[in] minTetVolumeThreshold Minimum volume every tetrahedron in the mesh must have such that no volume warning is generated
\return Flags that describe the tetrahedron mesh's deficiencies
*/
static PxTetrahedronMeshAnalysisResults validateTetrahedronMesh(const PxBoundedData& points, const PxBoundedData& tetrahedra, const PxReal minTetVolumeThreshold = 1e-8f);
/**
\brief Simplifies (decimates) a triangle mesh using quadric simplification.
\param[in] inputVertices The vertices of the input triangle mesh
\param[in] inputIndices The indices of the input triangle mesh of the form (id0, id1, id2), (id0, id1, id2), ..
\param[in] targetTriangleCount Desired number of triangles in the output mesh
\param[in] maximalEdgeLength Edges below this length will not be collapsed. A value of zero means there is no limit.
\param[out] outputVertices The vertices of the output (decimated) triangle mesh
\param[out] outputIndices The indices of the output (decimated) triangle mesh of the form (id0, id1, id2), (id0, id1, id2), ..
\param[out] vertexMap Optional parameter which returns the mapping from input to output vertices. Note that multiple input vertices are typically collapsed into the same output vertex.
\param[in] edgeLengthCostWeight Factor to scale influence of edge length when prioritizing edge collapses. Has no effect if set to zero.
\param[in] flatnessDetectionThreshold Threshold used to detect edges in flat regions and to improve the placement of the collapsed point. If set to a large value it will have no effect.
\param[in] projectSimplifiedPointsOnInputMeshSurface If set to true, the simplified points will lie exactly on the original surface.
\param[out] outputVertexToInputTriangle Optional indices providing the triangle index per resulting vertex. Only available when projectSimplifiedPointsOnInputMeshSurface is set to true
\param[in] removeDisconnectedPatches Enables the optional removal of disconnected triangles in the mesh. Only the largest connected set/patch will be kept
*/
static void simplifyTriangleMesh(const PxArray<PxVec3>& inputVertices, const PxArray<PxU32>&inputIndices, int targetTriangleCount, PxF32 maximalEdgeLength,
PxArray<PxVec3>& outputVertices, PxArray<PxU32>& outputIndices,
PxArray<PxU32> *vertexMap = NULL, PxReal edgeLengthCostWeight = 0.1f, PxReal flatnessDetectionThreshold = 0.01f,
bool projectSimplifiedPointsOnInputMeshSurface = false, PxArray<PxU32>* outputVertexToInputTriangle = NULL, bool removeDisconnectedPatches = false);
/**
\brief Creates a new mesh from a given mesh. The input mesh is first voxelized. The new surface is created from the voxel surface and subsequent projection to the original mesh.
\param[in] inputVertices The vertices of the input triangle mesh
\param[in] inputIndices The indices of the input triangle mesh of the form (id0, id1, id2), (id0, id1, id2), ..
\param[in] gridResolution Size of the voxel grid (number of voxels along the longest dimension)
\param[out] outputVertices The vertices of the output (decimated) triangle mesh
\param[out] outputIndices The indices of the output (decimated) triangle mesh of the form (id0, id1, id2), (id0, id1, id2), ..
\param[out] vertexMap Optional parameter which returns a mapping from input to output vertices. Since the meshes are independent, the mapping returns an output vertex that is topologically close to the input vertex.
*/
static void remeshTriangleMesh(const PxArray<PxVec3>& inputVertices, const PxArray<PxU32>&inputIndices, PxU32 gridResolution,
PxArray<PxVec3>& outputVertices, PxArray<PxU32>& outputIndices, PxArray<PxU32> *vertexMap = NULL);
/**
\brief Creates a new mesh from a given mesh. The input mesh is first voxelized. The new surface is created from the voxel surface and subsequent projection to the original mesh.
\param[in] inputVertices The vertices of the input triangle mesh
\param[in] nbVertices The number of vertices of the input triangle mesh
\param[in] inputIndices The indices of the input triangle mesh of the form (id0, id1, id2), (id0, id1, id2), ..
\param[in] nbIndices The number of indices of the input triangle mesh (equal to three times the number of triangles)
\param[in] gridResolution Size of the voxel grid (number of voxels along the longest dimension)
\param[out] outputVertices The vertices of the output (decimated) triangle mesh
\param[out] outputIndices The indices of the output (decimated) triangle mesh of the form (id0, id1, id2), (id0, id1, id2), ..
\param[out] vertexMap Optional parameter which returns a mapping from input to output vertices. Since the meshes are independent, the mapping returns an output vertex that is topologically close to the input vertex.
*/
static void remeshTriangleMesh(const PxVec3* inputVertices, PxU32 nbVertices, const PxU32* inputIndices, PxU32 nbIndices, PxU32 gridResolution,
PxArray<PxVec3>& outputVertices, PxArray<PxU32>& outputIndices, PxArray<PxU32> *vertexMap = NULL);
/**
\brief Creates a tetrahedral mesh using an octree.
\param[in] inputVertices The vertices of the input triangle mesh
\param[in] inputIndices The indices of the input triangle mesh of the form (id0, id1, id2), (id0, id1, id2), ..
\param[in] useTreeNodes Using the nodes of the octree as tetrahedral vertices
\param[out] outputVertices The vertices of the output tetrahedral mesh
\param[out] outputIndices The indices of the output tetrahedral mesh of the form (id0, id1, id2, id3), (id0, id1, id2, id3), ..
\param[in] volumeThreshold Tetrahedra with a volume smaller than the specified threshold will be removed from the mesh
*/
static void createTreeBasedTetrahedralMesh(const PxArray<PxVec3>& inputVertices, const PxArray<PxU32>&inputIndices,
bool useTreeNodes, PxArray<PxVec3>& outputVertices, PxArray<PxU32>& outputIndices, PxReal volumeThreshold = 0.0f);
/**
\brief Creates a tetrahedral mesh by relaxing a voxel mesh around the input mesh
\param[in] inputVertices The vertices of the input triangle mesh
\param[in] inputIndices The indices of the input triangle mesh of the form (id0, id1, id2), (id0, id1, id2), ..
\param[out] outputVertices The vertices of the output tetrahedral mesh
\param[out] outputIndices The indices of the output tetrahedral mesh of the form (id0, id1, id2, id3), (id0, id1, id2, id3), ..
\param[in] resolution The grid spacing is computed as the diagonal of the bounding box of the input mesh divided by the resolution.
\param[in] numRelaxationIterations Number of iterations to pull the tetrahedral mesh towards the input mesh
\param[in] relMinTetVolume Constrains the volumes of the tetrahedra to stay abobe relMinTetvolume times the tetrahedron's rest volume.
*/
static void createRelaxedVoxelTetrahedralMesh(const PxArray<PxVec3>& inputVertices, const PxArray<PxU32>&inputIndices,
PxArray<PxVec3>& outputVertices, PxArray<PxU32>& outputIndices,
PxI32 resolution, PxI32 numRelaxationIterations = 5, PxF32 relMinTetVolume = 0.05f);
/**
\brief Creates a tetrahedral mesh by relaxing a voxel mesh around the input mesh
\param[in] triangles The indices of the input triangle mesh of the form (id0, id1, id2), (id0, id1, id2), ..
\param[in] numTriangles The number of triangles
\param[out] islandIndexPerTriangle Every triangle gets an island index assigned. Triangles with the same island index belong to the same patch of connected triangles.
*/
static void detectTriangleIslands(const PxI32* triangles, PxU32 numTriangles, PxArray<PxU32>& islandIndexPerTriangle);
/**
\brief Creates a tetrahedral mesh by relaxing a voxel mesh around the input mesh
\param[in] islandIndexPerTriangle An island marker per triangles. All triangles with the same marker belong to an island. Can becomputed using the method detectTriangleIslands.
\param[in] numTriangles The number of triangles
\return The marker value of the island that contains the most triangles
*/
static PxU32 findLargestIslandId(const PxU32* islandIndexPerTriangle, PxU32 numTriangles);
};
#if !PX_DOXYGEN
}
#endif
/** @} */
#endif
| 14,854 | C | 64.730088 | 216 | 0.790359 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxTriangleMeshAnalysisResult.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
#ifndef PX_TRIANGLE_MESH_ANALYSIS_RESULT_H
#define PX_TRIANGLE_MESH_ANALYSIS_RESULT_H
#include "PxPhysXConfig.h"
#include "foundation/PxFlags.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief These flags indicate what kind of deficiencies a triangle mesh has and describe if the mesh is considered ok, problematic or invalid for tetmeshing
*/
class PxTriangleMeshAnalysisResult
{
public:
enum Enum
{
eVALID = 0,
eZERO_VOLUME = (1 << 0), //!< invalid: Flat mesh without meaningful amount of volume - cannot be meshed since a tetmesh is volumetric
eOPEN_BOUNDARIES = (1 << 1), //!< problematic: Open boundary means that the mesh is not watertight and that there are holes. The mesher can fill holes but the surface might have an unexpected shape where the hole was.
eSELF_INTERSECTIONS = (1 << 2), //!< problematic: The surface of the resulting mesh won't match exactly at locations of self-intersections. The tetmesh might be connected at self-intersections even if the input triangle mesh is not
eINCONSISTENT_TRIANGLE_ORIENTATION = (1 << 3), //!< invalid: It is not possible to distinguish what is inside and outside of the mesh. If there are no self-intersections and not edges shared by more than two triangles, a call to makeTriOrientationConsistent can fix this. Without fixing it, the output from the tetmesher will be incorrect
eCONTAINS_ACUTE_ANGLED_TRIANGLES = (1 << 4), //!< problematic: An ideal mesh for a softbody has triangles with similar angles and evenly distributed vertices. Acute angles can be handled but might lead to a poor quality tetmesh.
eEDGE_SHARED_BY_MORE_THAN_TWO_TRIANGLES = (1 << 5), //!< problematic: Border case of a self-intersecting mesh. The tetmesh might not match the surace exactly near such edges.
eCONTAINS_DUPLICATE_POINTS = (1 << 6), //!< ok: Duplicate points can be handled by the mesher without problems. The resulting tetmesh will only make use of first unique point that is found, duplicate points will get mapped to that unique point in the tetmesh. Therefore the tetmesh can contain points that are not accessed by a tet.
eCONTAINS_INVALID_POINTS = (1 << 7), //!< invalid: Points contain NAN, infinity or similar values that will lead to an invalid mesh
eREQUIRES_32BIT_INDEX_BUFFER = (1 << 8), //!< invalid: Mesh contains more indices than a 16bit index buffer can address
eTRIANGLE_INDEX_OUT_OF_RANGE = (1 << 9), //!< invalid: A mesh triangle index is negative or lager than the size of the vertex buffer
eMESH_IS_PROBLEMATIC = (1 << 10), //!< flag is set if the mesh is categorized as problematic
eMESH_IS_INVALID = (1 << 11) //!< flag is set if the mesh is categorized as invalid
};
};
typedef PxFlags<PxTriangleMeshAnalysisResult::Enum, PxU32> PxTriangleMeshAnalysisResults;
PX_FLAGS_OPERATORS(PxTriangleMeshAnalysisResult::Enum, PxU32)
#if !PX_DOXYGEN
}
#endif
#endif
| 4,498 | C | 62.366196 | 343 | 0.749 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxCustomGeometryExt.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_CUSTOM_GEOMETRY_EXT_H
#define PX_CUSTOM_GEOMETRY_EXT_H
/** \addtogroup extensions
@{
*/
#include <geometry/PxCustomGeometry.h>
#include <geometry/PxGjkQuery.h>
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxGeometry;
class PxMassProperties;
class PxGeometryHolder;
struct PxContactPoint;
/**
\brief Pre-made custom geometry callbacks implementations.
*/
class PxCustomGeometryExt
{
public:
/// \cond PRIVATE
struct BaseConvexCallbacks : PxCustomGeometry::Callbacks, PxGjkQuery::Support
{
BaseConvexCallbacks(float _margin) : margin(_margin) {}
// override PxCustomGeometry::Callbacks
virtual PxBounds3 getLocalBounds(const PxGeometry& geometry) const;
virtual bool generateContacts(const PxGeometry& geom0, const PxGeometry& geom1, const PxTransform& pose0, const PxTransform& pose1,
const PxReal contactDistance, const PxReal meshContactMargin, const PxReal toleranceLength,
PxContactBuffer& contactBuffer) const;
virtual PxU32 raycast(const PxVec3& origin, const PxVec3& unitDir, const PxGeometry& geom, const PxTransform& pose,
PxReal maxDist, PxHitFlags hitFlags, PxU32 maxHits, PxGeomRaycastHit* rayHits, PxU32 stride, PxRaycastThreadContext*) const;
virtual bool overlap(const PxGeometry& geom0, const PxTransform& pose0, const PxGeometry& geom1, const PxTransform& pose1, PxOverlapThreadContext*) const;
virtual bool sweep(const PxVec3& unitDir, const PxReal maxDist,
const PxGeometry& geom0, const PxTransform& pose0, const PxGeometry& geom1, const PxTransform& pose1,
PxGeomSweepHit& sweepHit, PxHitFlags hitFlags, const PxReal inflation, PxSweepThreadContext*) const;
virtual bool usePersistentContactManifold(const PxGeometry& geometry, PxReal& breakingThreshold) const;
// override PxGjkQuery::Support
virtual PxReal getMargin() const { return margin; }
// set margin
void setMargin(float m);
protected:
// Shape margin
float margin;
// Substitute geometry
virtual bool useSubstituteGeometry(PxGeometryHolder& geom, PxTransform& preTransform, const PxContactPoint& p, const PxTransform& pose0) const = 0;
};
/// \endcond
/**
\brief Cylinder geometry callbacks
*/
struct CylinderCallbacks : BaseConvexCallbacks
{
/**
\brief Construct cylinder geometry callbacks object
\param[in] height The cylinder height.
\param[in] radius The cylinder radius.
\param[in] axis The cylinder axis (0 - X, 1 - Y, 2 - Z).
\param[in] margin The cylinder margin.
*/
CylinderCallbacks(float height, float radius, int axis = 0, float margin = 0);
/// \brief Set cylinder height
/// \param[in] h The cylinder height
void setHeight(float h);
/// \brief Get cylinder height
/// \return The cylinder height
float getHeight() const { return height; }
/// \brief Set cylinder radius
/// \param[in] r The cylinder radius.
void setRadius(float r);
/// \brief Get cylinder radius
/// \return The cylinder radius
float getRadius() const { return radius; }
/// \brief Set cylinder axis
/// \param[in] a The cylinder axis (0 - X, 1 - Y, 2 - Z).
void setAxis(int a);
/// \brief Get cylinder axis
/// \return The cylinder axis
int getAxis() const { return axis; }
/// \cond PRIVATE
// override PxCustomGeometry::Callbacks
DECLARE_CUSTOM_GEOMETRY_TYPE
virtual void visualize(const PxGeometry&, PxRenderOutput&, const PxTransform&, const PxBounds3&) const;
virtual void computeMassProperties(const PxGeometry& geometry, PxMassProperties& massProperties) const;
// override PxGjkQuery::Support
virtual PxVec3 supportLocal(const PxVec3& dir) const;
protected:
// Cylinder height
float height;
// Cylinder radius
float radius;
// Cylinder axis
int axis;
// Substitute geometry
virtual bool useSubstituteGeometry(PxGeometryHolder& geom, PxTransform& preTransform, const PxContactPoint& p, const PxTransform& pose0) const;
// Radius at height
float getRadiusAtHeight(float height) const;
/// \endcond
};
/**
\brief Cone geometry callbacks
*/
struct ConeCallbacks : BaseConvexCallbacks
{
/**
\brief Construct cone geometry callbacks object
\param[in] height The cylinder height.
\param[in] radius The cylinder radius.
\param[in] axis The cylinder axis (0 - X, 1 - Y, 2 - Z).
\param[in] margin The cylinder margin.
*/
ConeCallbacks(float height, float radius, int axis = 0, float margin = 0);
/// \brief Set cone height
/// \param[in] h The cone height
void setHeight(float h);
/// \brief Get cone height
/// \return The cone height
float getHeight() const { return height; }
/// \brief Set cone radius
/// \param[in] r The cone radius
void setRadius(float r);
/// \brief Get cone radius
/// \return The cone radius
float getRadius() const { return radius; }
/// \brief Set cone axis
/// \param[in] a The cone axis
void setAxis(int a);
/// \brief Get cone axis
/// \return The cone axis
int getAxis() const { return axis; }
/// \cond PRIVATE
// override PxCustomGeometry::Callbacks
DECLARE_CUSTOM_GEOMETRY_TYPE
virtual void visualize(const PxGeometry&, PxRenderOutput&, const PxTransform&, const PxBounds3&) const;
virtual void computeMassProperties(const PxGeometry& geometry, PxMassProperties& massProperties) const;
// override PxGjkQuery::Support
virtual PxVec3 supportLocal(const PxVec3& dir) const;
protected:
// Cone height
float height;
// Cone radius
float radius;
// Cone axis
int axis;
// Substitute geometry
virtual bool useSubstituteGeometry(PxGeometryHolder& geom, PxTransform& preTransform, const PxContactPoint& p, const PxTransform& pose0) const;
// Radius at height
float getRadiusAtHeight(float height) const;
/// \endcond
};
};
/// \cond PRIVATE
// OmniPVD friendly aliases
typedef PxCustomGeometryExt::BaseConvexCallbacks PxCustomGeometryExtBaseConvexCallbacks;
typedef PxCustomGeometryExt::CylinderCallbacks PxCustomGeometryExtCylinderCallbacks;
typedef PxCustomGeometryExt::ConeCallbacks PxCustomGeometryExtConeCallbacks;
/// \endcond
#if !PX_DOXYGEN
}
#endif
/** @} */
#endif
| 7,757 | C | 33.327433 | 156 | 0.743329 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxJoint.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_JOINT_H
#define PX_JOINT_H
/** \addtogroup extensions
@{
*/
#include "foundation/PxTransform.h"
#include "PxRigidActor.h"
#include "PxConstraint.h"
#include "common/PxBase.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxRigidActor;
class PxScene;
class PxPhysics;
class PxConstraint;
/**
\brief an enumeration of PhysX' built-in joint types
@see PxJoint
*/
struct PxJointConcreteType
{
enum Enum
{
eSPHERICAL = PxConcreteType::eFIRST_PHYSX_EXTENSION,
eREVOLUTE,
ePRISMATIC,
eFIXED,
eDISTANCE,
eD6,
eCONTACT,
eGEAR,
eRACK_AND_PINION,
eLast
};
};
PX_DEFINE_TYPEINFO(PxJoint, PxConcreteType::eUNDEFINED)
PX_DEFINE_TYPEINFO(PxRackAndPinionJoint, PxJointConcreteType::eRACK_AND_PINION)
PX_DEFINE_TYPEINFO(PxGearJoint, PxJointConcreteType::eGEAR)
PX_DEFINE_TYPEINFO(PxD6Joint, PxJointConcreteType::eD6)
PX_DEFINE_TYPEINFO(PxDistanceJoint, PxJointConcreteType::eDISTANCE)
PX_DEFINE_TYPEINFO(PxContactJoint, PxJointConcreteType::eCONTACT)
PX_DEFINE_TYPEINFO(PxFixedJoint, PxJointConcreteType::eFIXED)
PX_DEFINE_TYPEINFO(PxPrismaticJoint, PxJointConcreteType::ePRISMATIC)
PX_DEFINE_TYPEINFO(PxRevoluteJoint, PxJointConcreteType::eREVOLUTE)
PX_DEFINE_TYPEINFO(PxSphericalJoint, PxJointConcreteType::eSPHERICAL)
/**
\brief an enumeration for specifying one or other of the actors referenced by a joint
@see PxJoint
*/
struct PxJointActorIndex
{
enum Enum
{
eACTOR0,
eACTOR1,
COUNT
};
};
/**
\brief a base interface providing common functionality for PhysX joints
*/
class PxJoint : public PxBase
{
public:
/**
\brief Set the actors for this joint.
An actor may be NULL to indicate the world frame. At most one of the actors may be NULL.
\param[in] actor0 the first actor.
\param[in] actor1 the second actor
@see getActors()
*/
virtual void setActors(PxRigidActor* actor0, PxRigidActor* actor1) = 0;
/**
\brief Get the actors for this joint.
\param[out] actor0 the first actor.
\param[out] actor1 the second actor
@see setActors()
*/
virtual void getActors(PxRigidActor*& actor0, PxRigidActor*& actor1) const = 0;
/**
\brief Set the joint local pose for an actor.
This is the relative pose which locates the joint frame relative to the actor.
\param[in] actor 0 for the first actor, 1 for the second actor.
\param[in] localPose the local pose for the actor this joint
@see getLocalPose()
*/
virtual void setLocalPose(PxJointActorIndex::Enum actor, const PxTransform& localPose) = 0;
/**
\brief get the joint local pose for an actor.
\param[in] actor 0 for the first actor, 1 for the second actor.
return the local pose for this joint
@see setLocalPose()
*/
virtual PxTransform getLocalPose(PxJointActorIndex::Enum actor) const = 0;
/**
\brief get the relative pose for this joint
This function returns the pose of the joint frame of actor1 relative to actor0
*/
virtual PxTransform getRelativeTransform() const = 0;
/**
\brief get the relative linear velocity of the joint
This function returns the linear velocity of the origin of the constraint frame of actor1, relative to the origin of the constraint
frame of actor0. The value is returned in the constraint frame of actor0
*/
virtual PxVec3 getRelativeLinearVelocity() const = 0;
/**
\brief get the relative angular velocity of the joint
This function returns the angular velocity of actor1 relative to actor0. The value is returned in the constraint frame of actor0
*/
virtual PxVec3 getRelativeAngularVelocity() const = 0;
/**
\brief set the break force for this joint.
if the constraint force or torque on the joint exceeds the specified values, the joint will break,
at which point it will not constrain the two actors and the flag PxConstraintFlag::eBROKEN will be set. The
force and torque are measured in the joint frame of the first actor
\param[in] force the maximum force the joint can apply before breaking
\param[in] torque the maximum torque the joint can apply before breaking
*/
virtual void setBreakForce(PxReal force, PxReal torque) = 0;
/**
\brief get the break force for this joint.
\param[out] force the maximum force the joint can apply before breaking
\param[out] torque the maximum torque the joint can apply before breaking
@see setBreakForce()
*/
virtual void getBreakForce(PxReal& force, PxReal& torque) const = 0;
/**
\brief set the constraint flags for this joint.
\param[in] flags the constraint flags
@see PxConstraintFlag
*/
virtual void setConstraintFlags(PxConstraintFlags flags) = 0;
/**
\brief set a constraint flags for this joint to a specified value.
\param[in] flag the constraint flag
\param[in] value the value to which to set the flag
@see PxConstraintFlag
*/
virtual void setConstraintFlag(PxConstraintFlag::Enum flag, bool value) = 0;
/**
\brief get the constraint flags for this joint.
\return the constraint flags
@see PxConstraintFlag
*/
virtual PxConstraintFlags getConstraintFlags() const = 0;
/**
\brief set the inverse mass scale for actor0.
\param[in] invMassScale the scale to apply to the inverse mass of actor 0 for resolving this constraint
@see getInvMassScale0
*/
virtual void setInvMassScale0(PxReal invMassScale) = 0;
/**
\brief get the inverse mass scale for actor0.
\return inverse mass scale for actor0
@see setInvMassScale0
*/
virtual PxReal getInvMassScale0() const = 0;
/**
\brief set the inverse inertia scale for actor0.
\param[in] invInertiaScale the scale to apply to the inverse inertia of actor0 for resolving this constraint
@see getInvMassScale0
*/
virtual void setInvInertiaScale0(PxReal invInertiaScale) = 0;
/**
\brief get the inverse inertia scale for actor0.
\return inverse inertia scale for actor0
@see setInvInertiaScale0
*/
virtual PxReal getInvInertiaScale0() const = 0;
/**
\brief set the inverse mass scale for actor1.
\param[in] invMassScale the scale to apply to the inverse mass of actor 1 for resolving this constraint
@see getInvMassScale1
*/
virtual void setInvMassScale1(PxReal invMassScale) = 0;
/**
\brief get the inverse mass scale for actor1.
\return inverse mass scale for actor1
@see setInvMassScale1
*/
virtual PxReal getInvMassScale1() const = 0;
/**
\brief set the inverse inertia scale for actor1.
\param[in] invInertiaScale the scale to apply to the inverse inertia of actor1 for resolving this constraint
@see getInvInertiaScale1
*/
virtual void setInvInertiaScale1(PxReal invInertiaScale) = 0;
/**
\brief get the inverse inertia scale for actor1.
\return inverse inertia scale for actor1
@see setInvInertiaScale1
*/
virtual PxReal getInvInertiaScale1() const = 0;
/**
\brief Retrieves the PxConstraint corresponding to this joint.
This can be used to determine, among other things, the force applied at the joint.
\return the constraint
*/
virtual PxConstraint* getConstraint() const = 0;
/**
\brief Sets a name string for the object that can be retrieved with getName().
This is for debugging and is not used by the SDK. The string is not copied by the SDK,
only the pointer is stored.
\param[in] name String to set the objects name to.
@see getName()
*/
virtual void setName(const char* name) = 0;
/**
\brief Retrieves the name string set with setName().
\return Name string associated with object.
@see setName()
*/
virtual const char* getName() const = 0;
/**
\brief Deletes the joint.
\note This call does not wake up the connected rigid bodies.
*/
virtual void release() = 0;
/**
\brief Retrieves the scene which this joint belongs to.
\return Owner Scene. NULL if not part of a scene.
@see PxScene
*/
virtual PxScene* getScene() const = 0;
void* userData; //!< user can assign this to whatever, usually to create a 1:1 relationship with a user object.
//serialization
/**
\brief Put class meta data in stream, used for serialization
*/
static void getBinaryMetaData(PxOutputStream& stream);
//~serialization
protected:
virtual ~PxJoint() {}
//serialization
/**
\brief Constructor
*/
PX_INLINE PxJoint(PxType concreteType, PxBaseFlags baseFlags) : PxBase(concreteType, baseFlags), userData(NULL) {}
/**
\brief Deserialization constructor
*/
PX_INLINE PxJoint(PxBaseFlags baseFlags) : PxBase(baseFlags) {}
/**
\brief Returns whether a given type name matches with the type of this instance
*/
virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxJoint", PxBase); }
//~serialization
};
class PxSpring
{
public:
PxReal stiffness; //!< the spring strength of the drive: that is, the force proportional to the position error
PxReal damping; //!< the damping strength of the drive: that is, the force proportional to the velocity error
PxSpring(PxReal stiffness_, PxReal damping_): stiffness(stiffness_), damping(damping_) {}
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** \brief Helper function to setup a joint's global frame
This replaces the following functions from previous SDK versions:
void NxJointDesc::setGlobalAnchor(const NxVec3& wsAnchor);
void NxJointDesc::setGlobalAxis(const NxVec3& wsAxis);
The function sets the joint's localPose using world-space input parameters.
\param[in] wsAnchor Global frame anchor point. <b>Range:</b> position vector
\param[in] wsAxis Global frame axis. <b>Range:</b> direction vector
\param[in,out] joint Joint having its global frame set.
*/
PX_C_EXPORT void PX_CALL_CONV PxSetJointGlobalFrame(physx::PxJoint& joint, const physx::PxVec3* wsAnchor, const physx::PxVec3* wsAxis);
/** @} */
#endif
| 11,402 | C | 26.744525 | 135 | 0.741361 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxCollectionExt.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_COLLECTION_EXT_H
#define PX_COLLECTION_EXT_H
/** \addtogroup extensions
@{
*/
#include "PxPhysXConfig.h"
#include "common/PxCollection.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxCollection;
class PxScene;
class PxCollectionExt
{
public:
/**
\brief Removes and releases all object from a collection.
The Collection itself is not released.
If the releaseExclusiveShapes flag is not set to true, release() will not be called on exclusive shapes.
It is assumed that the application holds a reference to each of the objects in the collection, with the exception of objects that are not releasable
(PxBase::isReleasable()). In general, objects that violate this assumption need to be removed from the collection prior to calling releaseObjects.
\note when a shape is created with PxRigidActor::createShape() or PxRigidActorExt::createExclusiveShape(), the only counted reference is held by the actor.
If such a shape and its actor are present in the collection, the reference count will be decremented once when the actor is released, and once when the
shape is released, resulting in undefined behavior. Shape reference counts can be incremented with PxShape::acquireReference().
\param[in] collection to remove and release all object from.
\param[in] releaseExclusiveShapes if this parameter is set to false, release() will not be called on exclusive shapes.
*/
static void releaseObjects(PxCollection& collection, bool releaseExclusiveShapes = true);
/**
\brief Removes objects of a given type from a collection, potentially adding them to another collection.
\param[in,out] collection Collection from which objects are removed
\param[in] concreteType PxConcreteType of sdk objects that should be removed
\param[in,out] to Optional collection to which the removed objects are added
@see PxCollection, PxConcreteType
*/
static void remove(PxCollection& collection, PxType concreteType, PxCollection* to = NULL);
/**
\brief Collects all objects in PxPhysics that are shareable across multiple scenes.
This function creates a new collection from all objects that are shareable across multiple
scenes. Instances of the following types are included: PxConvexMesh, PxTriangleMesh,
PxHeightField, PxShape and PxMaterial.
This is a helper function to ease the creation of collections for serialization.
\param[in] physics The physics SDK instance from which objects are collected. See #PxPhysics
\return Collection to which objects are added. See #PxCollection
@see PxCollection, PxPhysics
*/
static PxCollection* createCollection(PxPhysics& physics);
/**
\brief Collects all objects from a PxScene.
This function creates a new collection from all objects that were added to the specified
PxScene. Instances of the following types are included: PxActor, PxAggregate,
PxArticulationReducedCoordinate and PxJoint (other PxConstraint types are not included).
This is a helper function to ease the creation of collections for serialization.
The function PxSerialization.complete() can be used to complete the collection with required objects prior to
serialization.
\param[in] scene The PxScene instance from which objects are collected. See #PxScene
\return Collection to which objects are added. See #PxCollection
@see PxCollection, PxScene, PxSerialization.complete()
*/
static PxCollection* createCollection(PxScene& scene);
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 5,234 | C | 42.625 | 158 | 0.77073 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxGjkQueryExt.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_GJK_QUERY_EXT_H
#define PX_GJK_QUERY_EXT_H
#include "geometry/PxGjkQuery.h"
#include "geometry/PxGeometry.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxSphereGeometry;
class PxCapsuleGeometry;
class PxBoxGeometry;
class PxConvexMeshGeometry;
class PxContactBuffer;
class PxConvexMesh;
/**
\brief Pre-made support mapping for built-in convex geometry types.
*/
class PxGjkQueryExt
{
public:
/**
\brief Pre-made support mapping for a sphere
*/
struct SphereSupport : PxGjkQuery::Support
{
PxReal radius;
/**
\brief Default constructor
*/
SphereSupport();
/**
\brief Constructs a SphereSupport for a sphere radius
*/
SphereSupport(PxReal radius);
/**
\brief Constructs a SphereSupport for a PxSphereGeometry
*/
SphereSupport(const PxSphereGeometry& geom);
virtual PxReal getMargin() const;
virtual PxVec3 supportLocal(const PxVec3& dir) const;
};
/**
\brief Pre-made support mapping for a capsule
*/
struct CapsuleSupport : PxGjkQuery::Support
{
PxReal radius, halfHeight;
/**
\brief Default constructor
*/
CapsuleSupport();
/**
\brief Constructs a CapsuleSupport for capsule radius and halfHeight
*/
CapsuleSupport(PxReal radius, PxReal halfHeight);
/**
\brief Constructs a CapsuleSupport for a PxCapsuleGeometry
*/
CapsuleSupport(const PxCapsuleGeometry& geom);
virtual PxReal getMargin() const;
virtual PxVec3 supportLocal(const PxVec3& dir) const;
};
/**
\brief Pre-made support mapping for a box
*/
struct BoxSupport : PxGjkQuery::Support
{
PxVec3 halfExtents;
PxReal margin;
/**
\brief Default constructor
*/
BoxSupport();
/**
\brief Constructs a BoxSupport for a box halfExtents with optional margin
*/
BoxSupport(const PxVec3& halfExtents, PxReal margin = 0);
/**
\brief Constructs a BoxSupport for a PxBoxGeometry
*/
BoxSupport(const PxBoxGeometry& box, PxReal margin = 0);
virtual PxReal getMargin() const;
virtual PxVec3 supportLocal(const PxVec3& dir) const;
};
/**
\brief Pre-made support mapping for a convex mesh
*/
struct ConvexMeshSupport : PxGjkQuery::Support
{
const PxConvexMesh* convexMesh;
PxVec3 scale;
PxQuat scaleRotation;
PxReal margin;
/**
\brief Default constructor
*/
ConvexMeshSupport();
/**
\brief Constructs a BoxSupport for a PxConvexMesh
*/
ConvexMeshSupport(const PxConvexMesh& convexMesh, const PxVec3& scale = PxVec3(1), const PxQuat& scaleRotation = PxQuat(PxIdentity), PxReal margin = 0);
/**
\brief Constructs a BoxSupport for a PxConvexMeshGeometry
*/
ConvexMeshSupport(const PxConvexMeshGeometry& convexMesh, PxReal margin = 0);
virtual PxReal getMargin() const;
virtual PxVec3 supportLocal(const PxVec3& dir) const;
};
/**
\brief Pre-made support mapping for any PhysX's convex geometry (sphere, capsule, box, convex mesh)
*/
struct ConvexGeomSupport : PxGjkQuery::Support
{
/**
\brief Default constructor
*/
ConvexGeomSupport();
/**
\brief Constructs a BoxSupport for a PxGeometry
*/
ConvexGeomSupport(const PxGeometry& geom, PxReal margin = 0);
/**
\brief Destructor
*/
~ConvexGeomSupport();
/**
\brief Returns false if ConvexGeomSupport was constructed from non-convex geometry
*/
bool isValid() const;
virtual PxReal getMargin() const;
virtual PxVec3 supportLocal(const PxVec3& dir) const;
private:
PxGeometryType::Enum mType;
union {
void* alignment;
PxU8 sphere[sizeof(SphereSupport)];
PxU8 capsule[sizeof(CapsuleSupport)];
PxU8 box[sizeof(BoxSupport)];
PxU8 convexMesh[sizeof(ConvexMeshSupport)];
} mSupport;
};
/**
\brief Generates a contact point between two shapes using GJK-EPA algorithm
\param[in] a Shape A support mapping
\param[in] b Shape B support mapping
\param[in] poseA Shape A transformation
\param[in] poseB Shape B transformation
\param[in] contactDistance The distance at which contacts begin to be generated between the shapes
\param[in] toleranceLength The toleranceLength. Used for scaling distance-based thresholds internally to produce appropriate results given simulations in different units
\param[out] contactBuffer A buffer to store the contact
\return True if there is a contact.
*/
static bool generateContacts(const PxGjkQuery::Support& a, const PxGjkQuery::Support& b, const PxTransform& poseA, const PxTransform& poseB,
PxReal contactDistance, PxReal toleranceLength, PxContactBuffer& contactBuffer);
};
#if !PX_DOXYGEN
}
#endif
#endif
| 6,199 | C | 28.107981 | 170 | 0.742055 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxD6JointCreate.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_D6_JOINT_CREATE_H
#define PX_D6_JOINT_CREATE_H
#include "common/PxPhysXCommonConfig.h"
/** \addtogroup extensions
@{
*/
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxPhysics;
class PxRigidActor;
class PxJoint;
/**
\brief Helper function to create a fixed joint, using either a PxD6Joint or PxFixedJoint.
For fixed joints it is important that the joint frames have the same orientation. This helper function uses an identity rotation for both.
It is also important that the joint frames have an equivalent position in world space. The function does not check this, so it is up to users
to ensure that this is the case.
\param[in] physics The physics SDK
\param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localPos0 The position of the joint relative to actor0
\param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localPos1 The position of the joint relative to actor1
\param[in] useD6 True to use a PxD6Joint, false to use a PxFixedJoint;
\return The created joint.
@see PxD6Joint PxFixedJoint
*/
PxJoint* PxD6JointCreate_Fixed(PxPhysics& physics, PxRigidActor* actor0, const PxVec3& localPos0, PxRigidActor* actor1, const PxVec3& localPos1, bool useD6);
/**
\brief Helper function to create a distance joint, using either a PxD6Joint or PxDistanceJoint.
This helper function only supports a maximum distance constraint, because PxD6Joint does not support a minimum distance constraint (contrary
to PxDistanceJoint).
The distance is computed between the joint frames' world-space positions. The joint frames' orientations are irrelevant here so the function
sets them to identity.
\param[in] physics The physics SDK
\param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localPos0 The position of the joint relative to actor0
\param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localPos1 The position of the joint relative to actor1
\param[in] maxDist The maximum allowed distance
\param[in] useD6 True to use a PxD6Joint, false to use a PxDistanceJoint;
\return The created joint.
@see PxD6Joint PxDistanceJoint
*/
PxJoint* PxD6JointCreate_Distance(PxPhysics& physics, PxRigidActor* actor0, const PxVec3& localPos0, PxRigidActor* actor1, const PxVec3& localPos1, float maxDist, bool useD6);
/**
\brief Helper function to create a prismatic joint, using either a PxD6Joint or PxPrismaticJoint.
This function enforces that the joint frames have the same orientation, which is a local frame whose X is the desired translation axis.
This orientation is computed by the function, so users only have to define the desired translation axis (typically 1;0;0 or 0;1;0 or 0;0;1).
The translation can be limited. Limits are enforced if minLimit<maxLimit. If minLimit=maxLimit the axis is locked. If minLimit>maxLimit the
limits are not enforced and the axis is free. The limit values are computed relative to the position of actor0's joint frame.
The function creates hard limits, and uses PhysX's default contact distance parameter.
\param[in] physics The physics SDK
\param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localPos0 The position of the joint relative to actor0
\param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localPos1 The position of the joint relative to actor1
\param[in] axis The axis along which objects are allowed to move, expressed in the actors' local space
\param[in] minLimit The minimum allowed position along the axis
\param[in] maxLimit The maximum allowed position along the axis
\param[in] useD6 True to use a PxD6Joint, false to use a PxPrismaticJoint;
\return The created joint.
@see PxD6Joint PxPrismaticJoint
*/
PxJoint* PxD6JointCreate_Prismatic(PxPhysics& physics, PxRigidActor* actor0, const PxVec3& localPos0, PxRigidActor* actor1, const PxVec3& localPos1, const PxVec3& axis, float minLimit, float maxLimit, bool useD6);
/**
\brief Helper function to create a revolute joint, using either a PxD6Joint or PxRevoluteJoint.
This function enforces that the joint frames have the same orientation, which is a local frame whose X is the desired rotation axis.
This orientation is computed by the function, so users only have to define the desired rotation axis (typically 1;0;0 or 0;1;0 or 0;0;1).
The rotation can be limited. Limits are enforced if minLimit<maxLimit. If minLimit=maxLimit the axis is locked. If minLimit>maxLimit the
limits are not enforced and the axis is free. The limit values are computed relative to the rotation of actor0's joint frame.
The function creates hard limits, and uses PhysX's default contact distance parameter.
Limits are expressed in radians. Allowed range is ]-2*PI;+2*PI[
\param[in] physics The physics SDK
\param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localPos0 The position of the joint relative to actor0
\param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localPos1 The position of the joint relative to actor1
\param[in] axis The axis around which objects are allowed to move, expressed in the actors' local space
\param[in] minLimit The minimum allowed rotation along the axis
\param[in] maxLimit The maximum allowed rotation along the axis
\param[in] useD6 True to use a PxD6Joint, false to use a PxRevoluteJoint;
\return The created joint.
@see PxD6Joint PxRevoluteJoint
*/
PxJoint* PxD6JointCreate_Revolute(PxPhysics& physics, PxRigidActor* actor0, const PxVec3& localPos0, PxRigidActor* actor1, const PxVec3& localPos1, const PxVec3& axis, float minLimit, float maxLimit, bool useD6);
/**
\brief Helper function to create a spherical joint, using either a PxD6Joint or PxSphericalJoint.
This function supports a cone limit shape, defined by a cone axis and two angular limit values.
This function enforces that the joint frames have the same orientation, which is a local frame whose X is the desired cone axis.
This orientation is computed by the function, so users only have to define the desired cone axis (typically 1;0;0 or 0;1;0 or 0;0;1).
The rotations can be limited. Limits are enforced if limit1>0 and limit2>0. Otherwise the motion is free. The limit values define an ellipse,
which is the cross-section of the cone limit shape.
The function creates hard limits, and uses PhysX's default contact distance parameter.
Limits are expressed in radians. Allowed range is ]0;PI[. Limits are symmetric around the cone axis.
The cone axis is equivalent to the twist axis for the D6 joint. The twist motion is not limited.
\param[in] physics The physics SDK
\param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localPos0 The position of the joint relative to actor0
\param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localPos1 The position of the joint relative to actor1
\param[in] axis The cone axis, expressed in the actors' local space
\param[in] limit1 Max angular limit for the ellipse along the joint frame's second axis (first axis = cone axis)
\param[in] limit2 Max angular limit for the ellipse along the joint frame's third axis (first axis = cone axis)
\param[in] useD6 True to use a PxD6Joint, false to use a PxSphericalJoint;
\return The created joint.
@see PxD6Joint PxSphericalJoint
*/
PxJoint* PxD6JointCreate_Spherical(PxPhysics& physics, PxRigidActor* actor0, const PxVec3& localPos0, PxRigidActor* actor1, const PxVec3& localPos1, const PxVec3& axis, float limit1, float limit2, bool useD6);
/**
\brief Helper function to create a spherical joint, using either a PxD6Joint or PxSphericalJoint.
This function supports a cone limit shape, defined by two pairs of angular limit values. This can be used to create an asymmetric cone. If the
angular limit values are symmetric (i.e. minLimit1=-maxLimit1 and minLimit2=-maxLimit2) then the cone axis is the X axis in actor0's space.
If the limits are not symmetric, the function rotates the cone axis accordingly so that limits remain symmetric for PhysX. If this happens,
the initial joint frames will be different for both actors. By default minLimit1/maxLimit1 are limits around the joint's Y axis, and
minLimit2/maxLimit2 are limits around the joint's Z axis.
The function creates hard limits, and uses PhysX's default contact distance parameter.
Limits are expressed in radians. Allowed range is ]-PI;PI[.
The cone axis is equivalent to the twist axis for the D6 joint. The twist motion is not limited.
The returned apiroty and apirotz values can later be added to retrieved Y and Z swing angle values (from the joint), to remap
angle values to the given input range.
\param[out] apiroty Amount of rotation around Y used to setup actor0's joint frame
\param[out] apirotz Amount of rotation around Z used to setup actor0's joint frame
\param[in] physics The physics SDK
\param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localPos0 The position of the joint relative to actor0
\param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localPos1 The position of the joint relative to actor1
\param[in] minLimit1 Min angular limit along the joint frame's second axis (first axis = cone axis)
\param[in] maxLimit1 Max angular limit along the joint frame's second axis (first axis = cone axis)
\param[in] minLimit2 Min angular limit along the joint frame's third axis (first axis = cone axis)
\param[in] maxLimit2 Max angular limit along the joint frame's third axis (first axis = cone axis)
\param[in] useD6 True to use a PxD6Joint, false to use a PxSphericalJoint;
\return The created joint.
@see PxD6Joint PxSphericalJoint
*/
PxJoint* PxD6JointCreate_GenericCone(float& apiroty, float& apirotz, PxPhysics& physics, PxRigidActor* actor0, const PxVec3& localPos0, PxRigidActor* actor1, const PxVec3& localPos1, float minLimit1, float maxLimit1, float minLimit2, float maxLimit2, bool useD6);
/**
\brief Helper function to create a D6 joint with pyramidal swing limits.
This function supports a pyramid limit shape, defined by two pairs of angular limit values. This can be used to create an asymmetric pyramid. If the
angular limit values are symmetric (i.e. minLimit1=-maxLimit1 and minLimit2=-maxLimit2) then the pyramid axis is the X axis in actor0's space.
By default minLimit1/maxLimit1 are limits around the joint's Y axis, and minLimit2/maxLimit2 are limits around the joint's Z axis.
The function creates hard limits, and uses PhysX's default contact distance parameter.
Limits are expressed in radians. Allowed range is ]-PI;PI[.
The pyramid axis is equivalent to the twist axis for the D6 joint. The twist motion is not limited.
\param[in] physics The physics SDK
\param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localPos0 The position of the joint relative to actor0
\param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localPos1 The position of the joint relative to actor1
\param[in] axis The pyramid axis, expressed in the actors' local space
\param[in] minLimit1 Min angular limit along the joint frame's second axis (first axis = pyramid axis)
\param[in] maxLimit1 Max angular limit along the joint frame's second axis (first axis = pyramid axis)
\param[in] minLimit2 Min angular limit along the joint frame's third axis (first axis = pyramid axis)
\param[in] maxLimit2 Max angular limit along the joint frame's third axis (first axis = pyramid axis)
\return The created joint.
@see PxD6Joint
*/
PxJoint* PxD6JointCreate_Pyramid(PxPhysics& physics, PxRigidActor* actor0, const PxVec3& localPos0, PxRigidActor* actor1, const PxVec3& localPos1, const PxVec3& axis,
float minLimit1, float maxLimit1, float minLimit2, float maxLimit2);
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 14,632 | C | 56.384314 | 263 | 0.774399 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxFixedJoint.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_FIXED_JOINT_H
#define PX_FIXED_JOINT_H
/** \addtogroup extensions
@{
*/
#include "extensions/PxJoint.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxFixedJoint;
/**
\brief Create a fixed joint.
\param[in] physics The physics SDK
\param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localFrame0 The position and orientation of the joint relative to actor0
\param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localFrame1 The position and orientation of the joint relative to actor1
@see PxFixedJoint
*/
PxFixedJoint* PxFixedJointCreate(PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1);
/**
\brief A fixed joint permits no relative movement between two bodies. ie the bodies are glued together.
\image html fixedJoint.png
@see PxFixedJointCreate() PxJoint
*/
class PxFixedJoint : public PxJoint
{
public:
/**
\brief Returns string name of PxFixedJoint, used for serialization
*/
virtual const char* getConcreteTypeName() const { return "PxFixedJoint"; }
protected:
//serialization
/**
\brief Constructor
*/
PX_INLINE PxFixedJoint(PxType concreteType, PxBaseFlags baseFlags) : PxJoint(concreteType, baseFlags) {}
/**
\brief Deserialization constructor
*/
PX_INLINE PxFixedJoint(PxBaseFlags baseFlags) : PxJoint(baseFlags) {}
/**
\brief Returns whether a given type name matches with the type of this instance
*/
virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxFixedJoint", PxJoint); }
//~serialization
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 3,528 | C | 33.940594 | 161 | 0.753118 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxDefaultAllocator.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_DEFAULT_ALLOCATOR_H
#define PX_DEFAULT_ALLOCATOR_H
/** \addtogroup extensions
@{
*/
#include "foundation/PxAllocatorCallback.h"
#include "foundation/PxAssert.h"
#include "foundation/PxMemory.h"
#include "common/PxPhysXCommonConfig.h"
#include <stdlib.h>
#if PX_WINDOWS_FAMILY || PX_LINUX_FAMILY || PX_SWITCH
#include <malloc.h>
#endif
#if !PX_DOXYGEN
namespace physx
{
#endif
#if PX_WINDOWS_FAMILY
// on win32 we only have 8-byte alignment guaranteed, but the CRT provides special aligned allocation fns
PX_FORCE_INLINE void* platformAlignedAlloc(size_t size)
{
return _aligned_malloc(size, 16);
}
PX_FORCE_INLINE void platformAlignedFree(void* ptr)
{
_aligned_free(ptr);
}
#elif PX_LINUX_FAMILY || PX_SWITCH
PX_FORCE_INLINE void* platformAlignedAlloc(size_t size)
{
return ::memalign(16, size);
}
PX_FORCE_INLINE void platformAlignedFree(void* ptr)
{
::free(ptr);
}
#else
// on all other platforms we get 16-byte alignment by default
PX_FORCE_INLINE void* platformAlignedAlloc(size_t size)
{
return ::malloc(size);
}
PX_FORCE_INLINE void platformAlignedFree(void* ptr)
{
::free(ptr);
}
#endif
/**
\brief default implementation of the allocator interface required by the SDK
*/
class PxDefaultAllocator : public PxAllocatorCallback
{
public:
virtual void* allocate(size_t size, const char*, const char*, int)
{
void* ptr = platformAlignedAlloc(size);
PX_ASSERT((size_t(ptr) & 15)==0);
#if PX_STOMP_ALLOCATED_MEMORY
if(ptr != NULL)
{
PxMemSet(ptr, PxI32(0xcd), PxU32(size));
}
#endif
return ptr;
}
virtual void deallocate(void* ptr)
{
platformAlignedFree(ptr);
}
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 3,376 | C | 28.112069 | 105 | 0.744372 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxTriangleMeshExt.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_TRIANGLE_MESH_EXT_H
#define PX_TRIANGLE_MESH_EXT_H
/** \addtogroup extensions
@{
*/
#include "PxPhysXConfig.h"
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxArray.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxGeometry;
class PxTriangleMesh;
class PxTriangleMeshGeometry;
class PxHeightFieldGeometry;
/**
\brief Utility class to find mesh triangles touched by a specified geometry object.
This class is a helper calling PxMeshQuery::findOverlapTriangleMesh or PxMeshQuery::findOverlapHeightField under the hood,
while taking care of necessary memory management issues.
PxMeshQuery::findOverlapTriangleMesh and PxMeshQuery::findOverlapHeightField are the "raw" functions operating on user-provided fixed-size
buffers. These functions abort with an error code in case of buffer overflow. PxMeshOverlapUtil is a convenient helper function checking
this error code, and resizing buffers appropriately, until the desired call succeeds.
Returned triangle indices are stored within the class, and can be used with PxMeshQuery::getTriangle() to retrieve the triangle properties.
*/
class PxMeshOverlapUtil
{
public:
PxMeshOverlapUtil();
~PxMeshOverlapUtil();
/**
\brief Find the mesh triangles which touch the specified geometry object.
\param[in] geom The geometry object to test for mesh triangle overlaps. Supported geometries are #PxSphereGeometry, #PxCapsuleGeometry and #PxBoxGeometry
\param[in] geomPose Pose of the geometry object
\param[in] meshGeom The triangle mesh geometry to check overlap against
\param[in] meshPose Pose of the triangle mesh
\return Number of overlaps found. Triangle indices can then be accessed through the #getResults() function.
@see PxGeometry PxTransform PxTriangleMeshGeometry PxMeshQuery::findOverlapTriangleMesh
*/
PxU32 findOverlap(const PxGeometry& geom, const PxTransform& geomPose, const PxTriangleMeshGeometry& meshGeom, const PxTransform& meshPose);
/**
\brief Find the height field triangles which touch the specified geometry object.
\param[in] geom The geometry object to test for height field overlaps. Supported geometries are #PxSphereGeometry, #PxCapsuleGeometry and #PxBoxGeometry. The sphere and capsule queries are currently conservative estimates.
\param[in] geomPose Pose of the geometry object
\param[in] hfGeom The height field geometry to check overlap against
\param[in] hfPose Pose of the height field
\return Number of overlaps found. Triangle indices can then be accessed through the #getResults() function.
@see PxGeometry PxTransform PxHeightFieldGeometry PxMeshQuery::findOverlapHeightField
*/
PxU32 findOverlap(const PxGeometry& geom, const PxTransform& geomPose, const PxHeightFieldGeometry& hfGeom, const PxTransform& hfPose);
/**
\brief Retrieves array of triangle indices after a findOverlap call.
\return Indices of touched triangles
*/
PX_FORCE_INLINE const PxU32* getResults() const { return mResultsMemory; }
/**
\brief Retrieves number of triangle indices after a findOverlap call.
\return Number of touched triangles
*/
PX_FORCE_INLINE PxU32 getNbResults() const { return mNbResults; }
private:
PxU32* mResultsMemory;
PxU32 mResults[256];
PxU32 mNbResults;
PxU32 mMaxNbResults;
};
/**
\brief Computes an approximate minimum translational distance (MTD) between a geometry object and a mesh.
This iterative function computes an approximate vector that can be used to depenetrate a geom object
from a triangle mesh. Returned depenetration vector should be applied to 'geom', to get out of the mesh.
The function works best when the amount of overlap between the geom object and the mesh is small. If the
geom object's center goes inside the mesh, backface culling usually kicks in, no overlap is detected,
and the function does not compute an MTD vector.
The function early exits if no overlap is detected after a depenetration attempt. This means that if
maxIter = N, the code will attempt at most N iterations but it might exit earlier if depenetration has
been successful. Usually N = 4 gives good results.
\param[out] direction Computed MTD unit direction
\param[out] depth Penetration depth. Always positive or zero.
\param[in] geom The geometry object
\param[in] geomPose Pose for the geometry object
\param[in] meshGeom The mesh geometry
\param[in] meshPose Pose for the mesh
\param[in] maxIter Max number of iterations before returning.
\param[out] usedIter Number of depenetrations attempts performed during the call. Will not be returned if the pointer is NULL.
\return True if the MTD has successfully been computed, i.e. if objects do overlap.
@see PxGeometry PxTransform PxTriangleMeshGeometry
*/
bool PxComputeTriangleMeshPenetration(PxVec3& direction,
PxReal& depth,
const PxGeometry& geom,
const PxTransform& geomPose,
const PxTriangleMeshGeometry& meshGeom,
const PxTransform& meshPose,
PxU32 maxIter,
PxU32* usedIter = NULL);
/**
\brief Computes an approximate minimum translational distance (MTD) between a geometry object and a heightfield.
This iterative function computes an approximate vector that can be used to depenetrate a geom object
from a heightfield. Returned depenetration vector should be applied to 'geom', to get out of the heightfield.
The function works best when the amount of overlap between the geom object and the mesh is small. If the
geom object's center goes inside the heightfield, backface culling usually kicks in, no overlap is detected,
and the function does not compute an MTD vector.
The function early exits if no overlap is detected after a depenetration attempt. This means that if
maxIter = N, the code will attempt at most N iterations but it might exit earlier if depenetration has
been successful. Usually N = 4 gives good results.
\param[out] direction Computed MTD unit direction
\param[out] depth Penetration depth. Always positive or zero.
\param[in] geom The geometry object
\param[in] geomPose Pose for the geometry object
\param[in] heightFieldGeom The heightfield geometry
\param[in] heightFieldPose Pose for the heightfield
\param[in] maxIter Max number of iterations before returning.
\param[out] usedIter Number of depenetrations attempts performed during the call. Will not be returned if the pointer is NULL.
\return True if the MTD has successfully been computed, i.e. if objects do overlap.
@see PxGeometry PxTransform PxHeightFieldGeometry
*/
bool PxComputeHeightFieldPenetration(PxVec3& direction,
PxReal& depth,
const PxGeometry& geom,
const PxTransform& geomPose,
const PxHeightFieldGeometry& heightFieldGeom,
const PxTransform& heightFieldPose,
PxU32 maxIter,
PxU32* usedIter = NULL);
/**
\brief Extracts an isosurface from the SDF of a mesh if it the SDF is available.
\param[in] triangleMesh The triangle mesh
\param[out] isosurfaceVertices The vertices of the extracted isosurface
\param[out] isosurfaceTriangleIndices The triangles of the extracted isosurface
*/
bool PxExtractIsosurfaceFromSDF(const PxTriangleMesh& triangleMesh, PxArray<PxVec3>& isosurfaceVertices, PxArray<PxU32>& isosurfaceTriangleIndices);
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 9,134 | C | 45.136363 | 223 | 0.768448 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxJointLimit.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_JOINT_LIMIT_H
#define PX_JOINT_LIMIT_H
/** \addtogroup extensions
@{
*/
#include "foundation/PxMath.h"
#include "common/PxTolerancesScale.h"
#include "extensions/PxJoint.h"
#include "PxPhysXConfig.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Describes the parameters for a joint limit.
Limits are enabled or disabled by setting flags or other configuration parameters joints, see the
documentation for specific joint types for details.
*/
class PxJointLimitParameters
{
public:
/**
\brief Controls the amount of bounce when the joint hits a limit.
A restitution value of 1.0 causes the joint to bounce back with the velocity which it hit the limit.
A value of zero causes the joint to stop dead.
In situations where the joint has many locked DOFs (e.g. 5) the restitution may not be applied
correctly. This is due to a limitation in the solver which causes the restitution velocity to become zero
as the solver enforces constraints on the other DOFs.
This limitation applies to both angular and linear limits, however it is generally most apparent with limited
angular DOFs. Disabling joint projection and increasing the solver iteration count may improve this behavior
to some extent.
Also, combining soft joint limits with joint drives driving against those limits may affect stability.
<b>Range:</b> [0,1]<br>
<b>Default:</b> 0.0
*/
PxReal restitution;
/**
determines the minimum impact velocity which will cause the joint to bounce
*/
PxReal bounceThreshold;
/**
\brief if greater than zero, the limit is soft, i.e. a spring pulls the joint back to the limit
<b>Range:</b> [0, PX_MAX_F32)<br>
<b>Default:</b> 0.0
*/
PxReal stiffness;
/**
\brief if spring is greater than zero, this is the damping of the limit spring
<b>Range:</b> [0, PX_MAX_F32)<br>
<b>Default:</b> 0.0
*/
PxReal damping;
PxJointLimitParameters() :
restitution (0.0f),
bounceThreshold (0.0f),
stiffness (0.0f),
damping (0.0f)
{
}
PxJointLimitParameters(const PxJointLimitParameters& p) :
restitution (p.restitution),
bounceThreshold (p.bounceThreshold),
stiffness (p.stiffness),
damping (p.damping)
{
}
/**
\brief Returns true if the current settings are valid.
\return true if the current settings are valid
*/
PX_INLINE bool isValid() const
{
return PxIsFinite(restitution) && restitution >= 0 && restitution <= 1 &&
PxIsFinite(stiffness) && stiffness >= 0 &&
PxIsFinite(damping) && damping >= 0 &&
PxIsFinite(bounceThreshold) && bounceThreshold >= 0;
}
PX_INLINE bool isSoft() const
{
return damping>0 || stiffness>0;
}
protected:
~PxJointLimitParameters() {}
};
/**
\brief Describes a one-sided linear limit.
*/
class PxJointLinearLimit : public PxJointLimitParameters
{
public:
/**
\brief the extent of the limit.
<b>Range:</b> (0, PX_MAX_F32) <br>
<b>Default:</b> PX_MAX_F32
*/
PxReal value;
/**
\brief construct a linear hard limit
\param[in] extent The extent of the limit
@see PxJointLimitParameters
*/
PxJointLinearLimit(PxReal extent) : value(extent)
{
}
/**
\brief construct a linear soft limit
\param[in] extent the extent of the limit
\param[in] spring the stiffness and damping parameters for the limit spring
@see PxJointLimitParameters
*/
PxJointLinearLimit(PxReal extent, const PxSpring& spring) : value(extent)
{
stiffness = spring.stiffness;
damping = spring.damping;
}
/**
\brief Returns true if the limit is valid
\return true if the current settings are valid
*/
PX_INLINE bool isValid() const
{
return PxJointLimitParameters::isValid() &&
PxIsFinite(value) &&
value > 0.0f;
}
};
/**
\brief Describes a two-sided limit.
*/
class PxJointLinearLimitPair : public PxJointLimitParameters
{
public:
/**
\brief the range of the limit. The upper limit must be no lower than the
lower limit, and if they are equal the limited degree of freedom will be treated as locked.
<b>Range:</b> See the joint on which the limit is used for details<br>
<b>Default:</b> lower = -PX_MAX_F32/3, upper = PX_MAX_F32/3
*/
PxReal upper, lower;
/**
\brief Construct a linear hard limit pair. The lower distance value must be less than the upper distance value.
\param[in] scale A PxTolerancesScale struct. Should be the same as used when creating the PxPhysics object.
\param[in] lowerLimit The lower distance of the limit
\param[in] upperLimit The upper distance of the limit
@see PxJointLimitParameters PxTolerancesScale
*/
PxJointLinearLimitPair(const PxTolerancesScale& scale, PxReal lowerLimit = -PX_MAX_F32/3.0f, PxReal upperLimit = PX_MAX_F32/3.0f) :
upper(upperLimit),
lower(lowerLimit)
{
bounceThreshold = 2.0f*scale.length;
}
/**
\brief construct a linear soft limit pair
\param[in] lowerLimit The lower distance of the limit
\param[in] upperLimit The upper distance of the limit
\param[in] spring The stiffness and damping parameters of the limit spring
@see PxJointLimitParameters
*/
PxJointLinearLimitPair(PxReal lowerLimit, PxReal upperLimit, const PxSpring& spring) :
upper(upperLimit),
lower(lowerLimit)
{
stiffness = spring.stiffness;
damping = spring.damping;
}
/**
\brief Returns true if the limit is valid.
\return true if the current settings are valid
*/
PX_INLINE bool isValid() const
{
return PxJointLimitParameters::isValid() &&
PxIsFinite(upper) && PxIsFinite(lower) && upper >= lower &&
PxIsFinite(upper - lower);
}
};
class PxJointAngularLimitPair : public PxJointLimitParameters
{
public:
/**
\brief the range of the limit. The upper limit must be no lower than the lower limit.
<b>Unit:</b> Angular: Radians
<b>Range:</b> See the joint on which the limit is used for details<br>
<b>Default:</b> lower = -PI/2, upper = PI/2
*/
PxReal upper, lower;
/**
\brief construct an angular hard limit pair.
The lower value must be less than the upper value.
\param[in] lowerLimit The lower angle of the limit
\param[in] upperLimit The upper angle of the limit
@see PxJointLimitParameters
*/
PxJointAngularLimitPair(PxReal lowerLimit, PxReal upperLimit) :
upper(upperLimit),
lower(lowerLimit)
{
bounceThreshold = 0.5f;
}
/**
\brief construct an angular soft limit pair.
The lower value must be less than the upper value.
\param[in] lowerLimit The lower angle of the limit
\param[in] upperLimit The upper angle of the limit
\param[in] spring The stiffness and damping of the limit spring
@see PxJointLimitParameters
*/
PxJointAngularLimitPair(PxReal lowerLimit, PxReal upperLimit, const PxSpring& spring) :
upper(upperLimit),
lower(lowerLimit)
{
stiffness = spring.stiffness;
damping = spring.damping;
}
/**
\brief Returns true if the limit is valid.
\return true if the current settings are valid
*/
PX_INLINE bool isValid() const
{
return PxJointLimitParameters::isValid() &&
PxIsFinite(upper) && PxIsFinite(lower) && upper >= lower;
}
};
/**
\brief Describes an elliptical conical joint limit. Note that very small or highly elliptical limit cones may
result in jitter.
@see PxD6Joint PxSphericalJoint
*/
class PxJointLimitCone : public PxJointLimitParameters
{
public:
/**
\brief the maximum angle from the Y axis of the constraint frame.
<b>Unit:</b> Angular: Radians
<b>Range:</b> Angular: (0,PI)<br>
<b>Default:</b> PI/2
*/
PxReal yAngle;
/**
\brief the maximum angle from the Z-axis of the constraint frame.
<b>Unit:</b> Angular: Radians
<b>Range:</b> Angular: (0,PI)<br>
<b>Default:</b> PI/2
*/
PxReal zAngle;
/**
\brief Construct a cone hard limit.
\param[in] yLimitAngle The limit angle from the Y-axis of the constraint frame
\param[in] zLimitAngle The limit angle from the Z-axis of the constraint frame
@see PxJointLimitParameters
*/
PxJointLimitCone(PxReal yLimitAngle, PxReal zLimitAngle) :
yAngle(yLimitAngle),
zAngle(zLimitAngle)
{
bounceThreshold = 0.5f;
}
/**
\brief Construct a cone soft limit.
\param[in] yLimitAngle The limit angle from the Y-axis of the constraint frame
\param[in] zLimitAngle The limit angle from the Z-axis of the constraint frame
\param[in] spring The stiffness and damping of the limit spring
@see PxJointLimitParameters
*/
PxJointLimitCone(PxReal yLimitAngle, PxReal zLimitAngle, const PxSpring& spring) :
yAngle(yLimitAngle),
zAngle(zLimitAngle)
{
stiffness = spring.stiffness;
damping = spring.damping;
}
/**
\brief Returns true if the limit is valid.
\return true if the current settings are valid
*/
PX_INLINE bool isValid() const
{
return PxJointLimitParameters::isValid() &&
PxIsFinite(yAngle) && yAngle>0 && yAngle<PxPi &&
PxIsFinite(zAngle) && zAngle>0 && zAngle<PxPi;
}
};
/**
\brief Describes a pyramidal joint limit.
@see PxD6Joint
*/
class PxJointLimitPyramid : public PxJointLimitParameters
{
public:
/**
\brief the minimum angle from the Y axis of the constraint frame.
<b>Unit:</b> Angular: Radians
<b>Range:</b> Angular: (-PI,PI)<br>
<b>Default:</b> -PI/2
*/
PxReal yAngleMin;
/**
\brief the maximum angle from the Y axis of the constraint frame.
<b>Unit:</b> Angular: Radians
<b>Range:</b> Angular: (-PI,PI)<br>
<b>Default:</b> PI/2
*/
PxReal yAngleMax;
/**
\brief the minimum angle from the Z-axis of the constraint frame.
<b>Unit:</b> Angular: Radians
<b>Range:</b> Angular: (-PI,PI)<br>
<b>Default:</b> -PI/2
*/
PxReal zAngleMin;
/**
\brief the maximum angle from the Z-axis of the constraint frame.
<b>Unit:</b> Angular: Radians
<b>Range:</b> Angular: (-PI,PI)<br>
<b>Default:</b> PI/2
*/
PxReal zAngleMax;
/**
\brief Construct a pyramid hard limit.
\param[in] yLimitAngleMin The minimum limit angle from the Y-axis of the constraint frame
\param[in] yLimitAngleMax The maximum limit angle from the Y-axis of the constraint frame
\param[in] zLimitAngleMin The minimum limit angle from the Z-axis of the constraint frame
\param[in] zLimitAngleMax The maximum limit angle from the Z-axis of the constraint frame
@see PxJointLimitParameters
*/
PxJointLimitPyramid(PxReal yLimitAngleMin, PxReal yLimitAngleMax, PxReal zLimitAngleMin, PxReal zLimitAngleMax) :
yAngleMin(yLimitAngleMin),
yAngleMax(yLimitAngleMax),
zAngleMin(zLimitAngleMin),
zAngleMax(zLimitAngleMax)
{
bounceThreshold = 0.5f;
}
/**
\brief Construct a pyramid soft limit.
\param[in] yLimitAngleMin The minimum limit angle from the Y-axis of the constraint frame
\param[in] yLimitAngleMax The maximum limit angle from the Y-axis of the constraint frame
\param[in] zLimitAngleMin The minimum limit angle from the Z-axis of the constraint frame
\param[in] zLimitAngleMax The maximum limit angle from the Z-axis of the constraint frame
\param[in] spring The stiffness and damping of the limit spring
@see PxJointLimitParameters
*/
PxJointLimitPyramid(PxReal yLimitAngleMin, PxReal yLimitAngleMax, PxReal zLimitAngleMin, PxReal zLimitAngleMax, const PxSpring& spring) :
yAngleMin(yLimitAngleMin),
yAngleMax(yLimitAngleMax),
zAngleMin(zLimitAngleMin),
zAngleMax(zLimitAngleMax)
{
stiffness = spring.stiffness;
damping = spring.damping;
}
/**
\brief Returns true if the limit is valid.
\return true if the current settings are valid
*/
PX_INLINE bool isValid() const
{
return PxJointLimitParameters::isValid() &&
PxIsFinite(yAngleMin) && yAngleMin>-PxPi && yAngleMin<PxPi &&
PxIsFinite(yAngleMax) && yAngleMax>-PxPi && yAngleMax<PxPi &&
PxIsFinite(zAngleMin) && zAngleMin>-PxPi && zAngleMin<PxPi &&
PxIsFinite(zAngleMax) && zAngleMax>-PxPi && zAngleMax<PxPi &&
yAngleMax>=yAngleMin && zAngleMax>=zAngleMin;
}
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 13,452 | C | 26.567623 | 138 | 0.726881 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxSoftBodyExt.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_SOFT_BODY_EXT_H
#define PX_SOFT_BODY_EXT_H
/** \addtogroup extensions
@{
*/
#include "foundation/PxTransform.h"
#include "foundation/PxUserAllocated.h"
#include "PxSoftBody.h"
#include "PxSoftBodyFlag.h"
#include "cudamanager/PxCudaContextManager.h"
#include "cudamanager/PxCudaTypes.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
struct PxCookingParams;
class PxSimpleTriangleMesh;
class PxInsertionCallback;
class PxSoftBodyMesh;
/**
\brief Utility functions for use with PxSoftBody and subclasses
*/
class PxSoftBodyExt
{
public:
/**
\brief Computes the SoftBody's vertex masses from the provided density and the volume of the tetrahedra
The buffers affected by this operation can be obtained from the SoftBody using the methods getSimPositionInvMassBufferD() and getSimVelocityBufferD()
The inverse mass is stored in the 4th component (the first three components are x, y, z coordinates) of the simulation mesh's position buffer.
\param[in] softBody The soft body which will get its mass updated
\param[in] density The density to used to calculate the mass from the body's volume
\param[in] maxInvMassRatio Maximum allowed ratio defined as max(vertexMasses) / min(vertexMasses) where vertexMasses is a list of float values with a mass for every vertex in the simulation mesh
\param[in] simPositionsPinned A pointer to a pinned host memory buffer containing positions and inverse masses for each vertex of the simulation mesh.
@see PxSoftBody PxSoftBody::getSimPositionInvMassBufferD()
*/
static void updateMass(PxSoftBody& softBody, const PxReal density, const PxReal maxInvMassRatio, PxVec4* simPositionsPinned);
/**
\brief Computes the SoftBody's vertex masses such that the sum of all masses is equal to the provided mass
The buffers affected by this operation can be obtained from the SoftBody using the methods getSimPositionInvMassBufferD()) and getSimVelocityBufferD()
The inverse mass is stored in the 4th component (the first three components are x, y, z coordinates) of the simulation mesh's position buffer.
\param[in] softBody The soft body which will get its mass updated
\param[in] mass The SoftBody's mass
\param[in] maxInvMassRatio Maximum allowed ratio defined as max(vertexMasses) / min(vertexMasses) where vertexMasses is a list of float values with a mass for every vertex in the simulation mesh
\param[in] simPositionsPinned A pointer to a pinned host memory buffer containing positions and inverse masses for each vertex of the simulation mesh.
@see PxSoftBody PxSoftBody::getSimPositionInvMassBufferD()
*/
static void setMass(PxSoftBody& softBody, const PxReal mass, const PxReal maxInvMassRatio, PxVec4* simPositionsPinned);
/**
\brief Transforms a SoftBody
The buffers affected by this operation can be obtained from the SoftBody using the methods getSimPositionInvMassBufferD() and getSimVelocityBufferD()
Applies a transformation to the simulation mesh's positions an velocities. Velocities only get rotated and scaled (translation is not applicable to direction vectors).
It does not modify the body's mass.
If the method is called multiple times, the transformation will compound with the ones previously applied.
\param[in] softBody The soft body which is transformed
\param[in] transform The transform to apply
\param[in] scale A scaling factor
\param[in] simPositionsPinned A pointer to a pinned host memory buffer containing positions and inverse masses for each vertex of the simulation mesh.
\param[in] simVelocitiesPinned A pointer to a pinned host memory buffer containing velocities for each vertex of the simulation mesh.
\param[in] collPositionsPinned A pointer to a pinned host memory buffer containing positions and inverse masses for each vertex of the collision mesh.
\param[in] restPositionsPinned A pointer to a pinned host memory buffer containing rest positions of the collision mesh.
@see PxSoftBody
*/
static void transform(PxSoftBody& softBody, const PxTransform& transform, const PxReal scale, PxVec4* simPositionsPinned, PxVec4* simVelocitiesPinned, PxVec4* collPositionsPinned, PxVec4* restPositionsPinned);
/**
\brief Updates the collision mesh's vertex positions to match the simulation mesh's transformation and scale.
The buffer affected by this operation can be obtained from the SoftBody using the method getPositionInvMassBufferD()
\param[in] softBody The soft body which will get its collision mesh vertices updated
\param[in] simPositionsPinned A pointer to a pinned host memory buffer containing positions and inverse masses for each vertex of the simulation mesh.
\param[in] collPositionsPinned A pointer to a pinned host memory buffer containing positions and inverse masses for each vertex of the collision mesh.
@see PxSoftBody
*/
static void updateEmbeddedCollisionMesh(PxSoftBody& softBody, PxVec4* simPositionsPinned, PxVec4* collPositionsPinned);
/**
\brief Uploads prepared SoftBody data to the GPU. It ensures that the embedded collision mesh matches the simulation mesh's transformation and scale.
\param[in] softBody The soft body which will perform the data upload
\param[in] flags Specifies which buffers the data transfer should include
\param[in] simPositionsPinned A pointer to a pinned host memory buffer containing positions and inverse masses for each vertex of the simulation mesh.
\param[in] simVelocitiesPinned A pointer to a pinned host memory buffer containing velocities for each vertex of the simulation mesh.
\param[in] collPositionsPinned A pointer to a pinned host memory buffer containing positions and inverse masses for each vertex of the collision mesh.
\param[in] restPositionsPinned A pointer to a pinned host memory buffer containing rest positions of the collision mesh.
\param[in] stream A cuda stream to perform the copies.
@see PxSoftBody
@deprecated Use copyToDevice() instead.
*/
PX_DEPRECATED static void commit(PxSoftBody& softBody, PxSoftBodyDataFlags flags, PxVec4* simPositionsPinned, PxVec4* simVelocitiesPinned, PxVec4* collPositionsPinned, PxVec4* restPositionsPinned, CUstream stream = CUstream(0));
/**
\brief Uploads prepared SoftBody data to the GPU. It ensures that the embedded collision mesh matches the simulation mesh's transformation and scale.
\param[in] softBody The soft body which will perform the data upload
\param[in] flags Specifies which buffers the data transfer should include
\param[in] simPositionsPinned A pointer to a pinned host memory buffer containing positions and inverse masses for each vertex of the simulation mesh.
\param[in] simVelocitiesPinned A pointer to a pinned host memory buffer containing velocities for each vertex of the simulation mesh.
\param[in] collPositionsPinned A pointer to a pinned host memory buffer containing positions and inverse masses for each vertex of the collision mesh.
\param[in] restPositionsPinned A pointer to a pinned host memory buffer containing rest positions of the collision mesh.
\param[in] stream A cuda stream to perform the copies.
@see PxSoftBody
*/
static void copyToDevice(PxSoftBody& softBody, PxSoftBodyDataFlags flags, PxVec4* simPositionsPinned, PxVec4* simVelocitiesPinned, PxVec4* collPositionsPinned, PxVec4* restPositionsPinned, CUstream stream = CUstream(0));
/**
\brief Creates a full SoftBody mesh matching the shape given as input. Uses a voxel mesh for FEM simulation and a surface-matching mesh for collision detection.
\param[in] params Cooking params instance required for mesh processing
\param[in] surfaceMesh Input triangle mesh that represents the surface of the SoftBody
\param[in] numVoxelsAlongLongestAABBAxis The number of voxels along the longest bounding box axis
\param[in] insertionCallback The insertion interface from PxPhysics
\param[in] validate If set to true the input triangle mesh will get analyzed to find possible deficiencies
\return SoftBody mesh if cooking was successful, NULL otherwise
@see PxSoftBodyMesh
*/
static PxSoftBodyMesh* createSoftBodyMesh(const PxCookingParams& params, const PxSimpleTriangleMesh& surfaceMesh, PxU32 numVoxelsAlongLongestAABBAxis, PxInsertionCallback& insertionCallback, const bool validate = true);
/**
\brief Creates a full SoftBody mesh matching the shape given as input. Uses the same surface-matching mesh for collision detection and FEM simulation.
\param[in] params Cooking params instance required for mesh processing
\param[in] surfaceMesh Input triangle mesh that represents the surface of the SoftBody
\param[in] insertionCallback The insertion interface from PxPhysics
\param[in] maxWeightRatioInTet Upper limit for the ratio of node weights that are adjacent to the same tetrahedron. The closer to one (while remaining larger than one), the more stable the simulation.
\param[in] validate If set to true the input triangle mesh will get analyzed to find possible deficiencies
\return SoftBody mesh if cooking was successful, NULL otherwise
@see PxSoftBodyMesh
*/
static PxSoftBodyMesh* createSoftBodyMeshNoVoxels(const PxCookingParams& params, const PxSimpleTriangleMesh& surfaceMesh, PxInsertionCallback& insertionCallback, PxReal maxWeightRatioInTet = 1.5f, const bool validate = true);
/**
\brief Creates a SoftBody instance from a SoftBody mesh
\param[in] softBodyMesh The SoftBody mesh
\param[in] transform The transform that defines initial position and orientation of the SoftBody
\param[in] material The material
\param[in] cudaContextManager A cuda context manager
\param[in] density The density used to compute the mass properties
\param[in] solverIterationCount The number of iterations the solver should apply during simulation
\param[in] femParams Additional parameters to specify e. g. damping
\param[in] scale The scaling of the SoftBody
\return SoftBody instance
@see PxSoftBodyMesh, PxSoftBody
*/
static PxSoftBody* createSoftBodyFromMesh(PxSoftBodyMesh* softBodyMesh, const PxTransform& transform,
const PxFEMSoftBodyMaterial& material, PxCudaContextManager& cudaContextManager, PxReal density = 100.0f, PxU32 solverIterationCount = 30,
const PxFEMParameters& femParams = PxFEMParameters(), PxReal scale = 1.0f);
/**
\brief Creates a SoftBody instance with a box shape
\param[in] transform The transform that defines initial position and orientation of the SoftBody
\param[in] boxDimensions The dimensions (side lengths) of the box shape
\param[in] material The material
\param[in] cudaContextManager A cuda context manager
\param[in] maxEdgeLength The maximal length of a triangle edge. Subdivision will get applied until the edge length criteria is matched. -1 means no subdivision is applied.
\param[in] density The density used to compute the mass properties
\param[in] solverIterationCount The number of iterations the solver should apply during simulation
\param[in] femParams Additional parameters to specify e. g. damping
\param[in] numVoxelsAlongLongestAABBAxis The number of voxels to use for the simulation mesh along the longest bounding box dimension
\param[in] scale The scaling of the SoftBody
\return SoftBody instance
@see PxSoftBodyMesh, PxSoftBody
*/
static PxSoftBody* createSoftBodyBox(const PxTransform& transform, const PxVec3& boxDimensions, const PxFEMSoftBodyMaterial& material,
PxCudaContextManager& cudaContextManager, PxReal maxEdgeLength = -1.0f, PxReal density = 100.0f, PxU32 solverIterationCount = 30,
const PxFEMParameters& femParams = PxFEMParameters(), PxU32 numVoxelsAlongLongestAABBAxis = 10, PxReal scale = 1.0f);
/**
\brief allocates and initializes pinned host memory buffers from an actor with shape.
\param[in] softBody A PxSoftBody that has a valid shape attached to it.
\param[in] cudaContextManager The PxCudaContextManager of the scene this soft body will be simulated in
\param[in] simPositionInvMassPinned A reference to a pointer for the return value of the simPositionInvMassPinned buffer, will be set by this function.
\param[in] simVelocityPinned A reference to a pointer for the return value of the simVelocityPinned buffer, will be set by this function.
\param[in] collPositionInvMassPinned A reference to a pointer for the return value of the collPositionInvMassPinned buffer, will be set by this function.
\param[in] restPositionPinned A reference to a pointer for the return value of the restPositionPinned buffer, will be set by this function.
@see PxSoftBody
*/
static void allocateAndInitializeHostMirror(PxSoftBody& softBody, PxCudaContextManager* cudaContextManager, PxVec4*& simPositionInvMassPinned, PxVec4*& simVelocityPinned, PxVec4*& collPositionInvMassPinned, PxVec4*& restPositionPinned);
/**
\brief Given a set of points and a set of tetrahedra, it finds the equilibrium state of the softbody. Every input point is either fixed or can move freely.
\param[in] verticesOriginal Mesh vertex positions in undeformed original state.
\param[in] verticesDeformed Mesh vertex positions in new deformed state. Only fixed vertices must have their final location, all other locations will get updated by the method.
\param[in] nbVertices The number of vertices.
\param[in] tetrahedra The tetrahedra.
\param[in] nbTetraheda The number of tetrahedra.
\param[in] vertexIsFixed Optional input that specifies which vertex is fixed and which one can move to relax the tension. If not provided, vertices from verticesOriginal which have a .w value of 0 will be considered fixed.
\param[in] numIterations The number of stress relaxation iterations to run.
*/
static void relaxSoftBodyMesh(const PxVec4* verticesOriginal, PxVec4* verticesDeformed, PxU32 nbVertices, const PxU32* tetrahedra, PxU32 nbTetraheda, const bool* vertexIsFixed = NULL, PxU32 numIterations = 200);
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 15,522 | C | 59.87451 | 237 | 0.799575 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxDefaultCpuDispatcher.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_DEFAULT_CPU_DISPATCHER_H
#define PX_DEFAULT_CPU_DISPATCHER_H
/** \addtogroup extensions
@{
*/
#include "common/PxPhysXCommonConfig.h"
#include "task/PxCpuDispatcher.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief A default implementation for a CPU task dispatcher.
@see PxDefaultCpuDispatcherCreate() PxCpuDispatcher
*/
class PxDefaultCpuDispatcher : public PxCpuDispatcher
{
public:
/**
\brief Deletes the dispatcher.
Do not keep a reference to the deleted instance.
@see PxDefaultCpuDispatcherCreate()
*/
virtual void release() = 0;
/**
\brief Enables profiling at task level.
\note By default enabled only in profiling builds.
\param[in] runProfiled True if tasks should be profiled.
*/
virtual void setRunProfiled(bool runProfiled) = 0;
/**
\brief Checks if profiling is enabled at task level.
\return True if tasks should be profiled.
*/
virtual bool getRunProfiled() const = 0;
};
/**
\brief If a thread ends up waiting for work it will find itself in a spin-wait loop until work becomes available.
Three strategies are available to limit wasted cycles.
The strategies are as follows:
a) wait until a work task signals the end of the spin-wait period.
b) yield the thread by providing a hint to reschedule thread execution, thereby allowing other threads to run.
c) yield the processor by informing it that it is waiting for work and requesting it to more efficiently use compute resources.
*/
struct PxDefaultCpuDispatcherWaitForWorkMode
{
enum Enum
{
eWAIT_FOR_WORK,
eYIELD_THREAD,
eYIELD_PROCESSOR
};
};
/**
\brief Create default dispatcher, extensions SDK needs to be initialized first.
\param[in] numThreads Number of worker threads the dispatcher should use.
\param[in] affinityMasks Array with affinity mask for each thread. If not defined, default masks will be used.
\param[in] mode is the strategy employed when a busy-wait is encountered.
\param[in] yieldProcessorCount specifies the number of times a OS-specific yield processor command will be executed
during each cycle of a busy-wait in the event that the specified mode is eYIELD_PROCESSOR
\note numThreads may be zero in which case no worker thread are initialized and
simulation tasks will be executed on the thread that calls PxScene::simulate()
\note yieldProcessorCount must be greater than zero if eYIELD_PROCESSOR is the chosen mode and equal to zero for all other modes.
\note eYIELD_THREAD and eYIELD_PROCESSOR modes will use compute resources even if the simulation is not running.
It is left to users to keep threads inactive, if so desired, when no simulation is running.
@see PxDefaultCpuDispatcher
*/
PxDefaultCpuDispatcher* PxDefaultCpuDispatcherCreate(PxU32 numThreads, PxU32* affinityMasks = NULL, PxDefaultCpuDispatcherWaitForWorkMode::Enum mode = PxDefaultCpuDispatcherWaitForWorkMode::eWAIT_FOR_WORK, PxU32 yieldProcessorCount = 0);
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 4,656 | C | 36.556451 | 237 | 0.773196 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxTetrahedronMeshAnalysisResult.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
#ifndef PX_TETRAHEDRON_MESH_ANALYSIS_RESULT_H
#define PX_TETRAHEDRON_MESH_ANALYSIS_RESULT_H
#include "PxPhysXConfig.h"
#include "foundation/PxFlags.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief These flags indicate what kind of deficiencies a tetrahedron mesh has and describe if the mesh is considered ok, problematic or invalid for softbody cooking
*/
class PxTetrahedronMeshAnalysisResult
{
public:
enum Enum
{
eVALID = 0,
eDEGENERATE_TETRAHEDRON = (1 << 0), //!< At least one tetrahedron has zero or negative volume. This can happen when the input triangle mesh contains triangles that are very elongated, e. g. one edge is a lot shorther than the other two.
eMESH_IS_PROBLEMATIC = (1 << 1), //!< flag is set if the mesh is categorized as problematic
eMESH_IS_INVALID = (1 << 2) //!< flag is set if the mesh is categorized as invalid
};
};
typedef PxFlags<PxTetrahedronMeshAnalysisResult::Enum, PxU32> PxTetrahedronMeshAnalysisResults;
PX_FLAGS_OPERATORS(PxTetrahedronMeshAnalysisResult::Enum, PxU32)
#if !PX_DOXYGEN
}
#endif
#endif
| 2,657 | C | 41.870967 | 243 | 0.756492 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxRepXSerializer.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_REPX_SERIALIZER_H
#define PX_REPX_SERIALIZER_H
/** \addtogroup Serializers
@{
*/
#include "common/PxBase.h"
#include "extensions/PxRepXSimpleType.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class XmlMemoryAllocator;
class XmlWriter;
class XmlReader;
class MemoryBuffer;
/**
\brief Serializer interface for RepX (Xml) serialization.
\deprecated Xml serialization is deprecated. An alternative serialization system is provided through USD Physics.
In order to serialize a class to RepX both a PxSerializer and
a PxRepXSerializer implementation are needed.
A repx Serializer provides the ability to capture a live
object to a descriptor or static state and the ability to
write that state out to a file. Objects allocated
by the Serializer using the allocator are freed when the
collection itself is freed.
SnRepXCoreSerializers.cpp implements a set of Serializers
for the core PhysX types.
\note Implementing a PxRepXSerializer is currently not practical without including the internal PhysXExtension header "SnRepXSerializerImpl.h".
@see PxSerializer, PX_NEW_REPX_SERIALIZER, PxSerializationRegistry::registerRepXSerializer
*/
class PX_DEPRECATED PxRepXSerializer
{
protected:
virtual ~PxRepXSerializer(){}
public:
/**
\brief The type this Serializer is meant to operate on.
@see PxRepXObject::typeName
*/
virtual const char* getTypeName() = 0;
/**
\brief Convert from a RepX object to a key-value pair hierarchy
\param[in] inLiveObject The object to convert to the passed in descriptor.
\param[in] inCollection The collection to use to find ids of references of this object.
\param[in] inWriter Interface to write data to.
\param[in] inTempBuffer used to for temporary allocations.
\param[in] inArgs The arguments used in create resources and objects.
*/
virtual void objectToFile( const PxRepXObject& inLiveObject, PxCollection* inCollection, XmlWriter& inWriter, MemoryBuffer& inTempBuffer, PxRepXInstantiationArgs& inArgs ) = 0;
/**
\brief Convert from a descriptor to a live object. Must be an object of this Serializer type.
\param[in] inReader The inverse of the writer, a key-value pair database.
\param[in] inAllocator An allocator to use for temporary allocations. These will be freed after instantiation completes.
\param[in] inArgs The arguments used in create resources and objects.
\param[in] inCollection The collection used to find references.
\return The new live object. It can be an invalid object if the instantiation cannot take place.
*/
virtual PxRepXObject fileToObject( XmlReader& inReader, XmlMemoryAllocator& inAllocator, PxRepXInstantiationArgs& inArgs, PxCollection* inCollection ) = 0;
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/**
\brief Inline helper template function to create PxRepXObject from TDataType type supporting PxTypeInfo<TDataType>::name.
\deprecated Xml serialization is deprecated. An alternative serialization system is provided through USD Physics.
*/
template<typename TDataType>
PX_DEPRECATED PX_INLINE physx::PxRepXObject PxCreateRepXObject(const TDataType* inType, const physx::PxSerialObjectId inId)
{
return physx::PxRepXObject(physx::PxTypeInfo<TDataType>::name(), inType, inId);
}
/**
\brief Inline helper function to create PxRepXObject from a PxBase instance.
\deprecated Xml serialization is deprecated. An alternative serialization system is provided through USD Physics.
*/
PX_DEPRECATED PX_INLINE physx::PxRepXObject PxCreateRepXObject(const physx::PxBase* inType, const physx::PxSerialObjectId inId)
{
PX_ASSERT(inType);
return physx::PxRepXObject(inType->getConcreteTypeName(), inType, inId);
}
/**
\brief Inline helper template function to create PxRepXObject form TDataType type using inType pointer as a PxSerialObjectId id.
\deprecated Xml serialization is deprecated. An alternative serialization system is provided through USD Physics.
*/
template<typename TDataType>
PX_DEPRECATED PX_INLINE physx::PxRepXObject PxCreateRepXObject(const TDataType* inType)
{
return PxCreateRepXObject(inType, static_cast<physx::PxSerialObjectId>(size_t(inType)));
}
/**
\brief Preprocessor macro for RepX serializer creation.
\deprecated Xml serialization is deprecated. An alternative serialization system is provided through USD Physics.
*/
#define PX_NEW_REPX_SERIALIZER(T) \
*PX_PLACEMENT_NEW(PxGetAllocatorCallback()->allocate(sizeof(T), "PxRepXSerializer", PX_FL ), T)(*PxGetAllocatorCallback())
/**
\brief Preprocessor Macro to simplify RepX serializer delete.
\deprecated Xml serialization is deprecated. An alternative serialization system is provided through USD Physics.
*/
#define PX_DELETE_REPX_SERIALIZER(x) \
{ PxRepXSerializer* s = x; if (s) { PxGetAllocatorCallback()->deallocate(s); } }
/** @} */
#endif
| 6,513 | C | 40.490446 | 178 | 0.775986 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxRigidBodyExt.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_RIGID_BODY_EXT_H
#define PX_RIGID_BODY_EXT_H
/** \addtogroup extensions
@{
*/
#include "PxPhysXConfig.h"
#include "PxRigidBody.h"
#include "PxQueryReport.h"
#include "PxQueryFiltering.h"
#include "extensions/PxMassProperties.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxScene;
struct PxQueryCache;
class PxShape;
/**
\brief utility functions for use with PxRigidBody and subclasses
@see PxRigidBody PxRigidDynamic PxArticulationLink
*/
class PxRigidBodyExt
{
public:
/**
\brief Computation of mass properties for a rigid body actor
To simulate a dynamic rigid actor, the SDK needs a mass and an inertia tensor.
This method offers functionality to compute the necessary mass and inertia properties based on the shapes declared in
the PxRigidBody descriptor and some additionally specified parameters. For each shape, the shape geometry,
the shape positioning within the actor and the specified shape density are used to compute the body's mass and
inertia properties.
<ul>
<li>Shapes without PxShapeFlag::eSIMULATION_SHAPE set are ignored unless includeNonSimShapes is true.</li>
<li>Shapes with plane, triangle mesh or heightfield geometry and PxShapeFlag::eSIMULATION_SHAPE set are not allowed for PxRigidBody collision.</li>
</ul>
This method will set the mass, center of mass, and inertia tensor
if no collision shapes are found, the inertia tensor is set to (1,1,1) and the mass to 1
if massLocalPose is non-NULL, the rigid body's center of mass parameter will be set
to the user provided value (massLocalPose) and the inertia tensor will be resolved at that point.
\note If all shapes of the actor have the same density then the overloaded method updateMassAndInertia() with a single density parameter can be used instead.
\param[in,out] body The rigid body.
\param[in] shapeDensities The per shape densities. There must be one entry for each shape which has the PxShapeFlag::eSIMULATION_SHAPE set (or for all shapes if includeNonSimShapes is set to true). Other shapes are ignored. The density values must be greater than 0.
\param[in] shapeDensityCount The number of provided density values.
\param[in] massLocalPose The center of mass relative to the actor frame. If set to null then (0,0,0) is assumed.
\param[in] includeNonSimShapes True if all kind of shapes (PxShapeFlag::eSCENE_QUERY_SHAPE, PxShapeFlag::eTRIGGER_SHAPE) should be taken into account.
\return Boolean. True on success else false.
@see PxRigidBody::setMassLocalPose PxRigidBody::setMassSpaceInertiaTensor PxRigidBody::setMass
*/
static bool updateMassAndInertia(PxRigidBody& body, const PxReal* shapeDensities, PxU32 shapeDensityCount, const PxVec3* massLocalPose = NULL, bool includeNonSimShapes = false);
/**
\brief Computation of mass properties for a rigid body actor
See previous method for details.
\param[in,out] body The rigid body.
\param[in] density The density of the body. Used to compute the mass of the body. The density must be greater than 0.
\param[in] massLocalPose The center of mass relative to the actor frame. If set to null then (0,0,0) is assumed.
\param[in] includeNonSimShapes True if all kind of shapes (PxShapeFlag::eSCENE_QUERY_SHAPE, PxShapeFlag::eTRIGGER_SHAPE) should be taken into account.
\return Boolean. True on success else false.
@see PxRigidBody::setMassLocalPose PxRigidBody::setMassSpaceInertiaTensor PxRigidBody::setMass
*/
static bool updateMassAndInertia(PxRigidBody& body, PxReal density, const PxVec3* massLocalPose = NULL, bool includeNonSimShapes = false);
/**
\brief Computation of mass properties for a rigid body actor
This method sets the mass, inertia and center of mass of a rigid body. The mass is set to the sum of all user-supplied
shape mass values, and the inertia and center of mass are computed according to the rigid body's shapes and the per shape mass input values.
If no collision shapes are found, the inertia tensor is set to (1,1,1)
\note If a single mass value should be used for the actor as a whole then the overloaded method setMassAndUpdateInertia() with a single mass parameter can be used instead.
@see updateMassAndInertia for more details.
\param[in,out] body The rigid body for which to set the mass and centre of mass local pose properties.
\param[in] shapeMasses The per shape mass values. There must be one entry for each shape which has the PxShapeFlag::eSIMULATION_SHAPE set. Other shapes are ignored. The mass values must be greater than 0.
\param[in] shapeMassCount The number of provided mass values.
\param[in] massLocalPose The center of mass relative to the actor frame. If set to null then (0,0,0) is assumed.
\param[in] includeNonSimShapes True if all kind of shapes (PxShapeFlag::eSCENE_QUERY_SHAPE, PxShapeFlag::eTRIGGER_SHAPE) should be taken into account.
\return Boolean. True on success else false.
@see PxRigidBody::setCMassLocalPose PxRigidBody::setMassSpaceInertiaTensor PxRigidBody::setMass
*/
static bool setMassAndUpdateInertia(PxRigidBody& body, const PxReal* shapeMasses, PxU32 shapeMassCount, const PxVec3* massLocalPose = NULL, bool includeNonSimShapes = false);
/**
\brief Computation of mass properties for a rigid body actor
This method sets the mass, inertia and center of mass of a rigid body. The mass is set to the user-supplied
value, and the inertia and center of mass are computed according to the rigid body's shapes and the input mass.
If no collision shapes are found, the inertia tensor is set to (1,1,1)
@see updateMassAndInertia for more details.
\param[in,out] body The rigid body for which to set the mass and centre of mass local pose properties.
\param[in] mass The mass of the body. Must be greater than 0.
\param[in] massLocalPose The center of mass relative to the actor frame. If set to null then (0,0,0) is assumed.
\param[in] includeNonSimShapes True if all kind of shapes (PxShapeFlag::eSCENE_QUERY_SHAPE, PxShapeFlag::eTRIGGER_SHAPE) should be taken into account.
\return Boolean. True on success else false.
@see PxRigidBody::setCMassLocalPose PxRigidBody::setMassSpaceInertiaTensor PxRigidBody::setMass
*/
static bool setMassAndUpdateInertia(PxRigidBody& body, PxReal mass, const PxVec3* massLocalPose = NULL, bool includeNonSimShapes = false);
/**
\brief Compute the mass, inertia tensor and center of mass from a list of shapes.
\param[in] shapes The shapes to compute the mass properties from.
\param[in] shapeCount The number of provided shapes.
\return The mass properties from the combined shapes.
@see PxRigidBody::setCMassLocalPose PxRigidBody::setMassSpaceInertiaTensor PxRigidBody::setMass
*/
static PxMassProperties computeMassPropertiesFromShapes(const PxShape* const* shapes, PxU32 shapeCount);
/**
\brief Applies a force (or impulse) defined in the global coordinate frame, acting at a particular
point in global coordinates, to the actor.
Note that if the force does not act along the center of mass of the actor, this
will also add the corresponding torque. Because forces are reset at the end of every timestep,
you can maintain a total external force on an object by calling this once every frame.
\note if this call is used to apply a force or impulse to an articulation link, only the link is updated, not the entire
articulation
::PxForceMode determines if the force is to be conventional or impulsive. Only eFORCE and eIMPULSE are supported, as the
force required to produce a given velocity change or acceleration is underdetermined given only the desired change at a
given point.
<b>Sleeping:</b> This call wakes the actor if it is sleeping and the wakeup parameter is true (default).
\param[in] body The rigid body to apply the force to.
\param[in] force Force/impulse to add, defined in the global frame. <b>Range:</b> force vector
\param[in] pos Position in the global frame to add the force at. <b>Range:</b> position vector
\param[in] mode The mode to use when applying the force/impulse(see #PxForceMode).
\param[in] wakeup Specify if the call should wake up the actor.
@see PxForceMode
@see addForceAtLocalPos() addLocalForceAtPos() addLocalForceAtLocalPos()
*/
static void addForceAtPos(PxRigidBody& body, const PxVec3& force, const PxVec3& pos, PxForceMode::Enum mode = PxForceMode::eFORCE, bool wakeup = true);
/**
\brief Applies a force (or impulse) defined in the global coordinate frame, acting at a particular
point in local coordinates, to the actor.
Note that if the force does not act along the center of mass of the actor, this
will also add the corresponding torque. Because forces are reset at the end of every timestep, you can maintain a
total external force on an object by calling this once every frame.
\note if this call is used to apply a force or impulse to an articulation link, only the link is updated, not the entire
articulation
::PxForceMode determines if the force is to be conventional or impulsive. Only eFORCE and eIMPULSE are supported, as the
force required to produce a given velocity change or acceleration is underdetermined given only the desired change at a
given point.
<b>Sleeping:</b> This call wakes the actor if it is sleeping and the wakeup parameter is true (default).
\param[in] body The rigid body to apply the force to.
\param[in] force Force/impulse to add, defined in the global frame. <b>Range:</b> force vector
\param[in] pos Position in the local frame to add the force at. <b>Range:</b> position vector
\param[in] mode The mode to use when applying the force/impulse(see #PxForceMode).
\param[in] wakeup Specify if the call should wake up the actor.
@see PxForceMode
@see addForceAtPos() addLocalForceAtPos() addLocalForceAtLocalPos()
*/
static void addForceAtLocalPos(PxRigidBody& body, const PxVec3& force, const PxVec3& pos, PxForceMode::Enum mode = PxForceMode::eFORCE, bool wakeup = true);
/**
\brief Applies a force (or impulse) defined in the actor local coordinate frame, acting at a
particular point in global coordinates, to the actor.
Note that if the force does not act along the center of mass of the actor, this
will also add the corresponding torque. Because forces are reset at the end of every timestep, you can maintain a
total external force on an object by calling this once every frame.
\note if this call is used to apply a force or impulse to an articulation link, only the link is updated, not the entire
articulation
::PxForceMode determines if the force is to be conventional or impulsive. Only eFORCE and eIMPULSE are supported, as the
force required to produce a given velocity change or acceleration is underdetermined given only the desired change at a
given point.
<b>Sleeping:</b> This call wakes the actor if it is sleeping and the wakeup parameter is true (default).
\param[in] body The rigid body to apply the force to.
\param[in] force Force/impulse to add, defined in the local frame. <b>Range:</b> force vector
\param[in] pos Position in the global frame to add the force at. <b>Range:</b> position vector
\param[in] mode The mode to use when applying the force/impulse(see #PxForceMode).
\param[in] wakeup Specify if the call should wake up the actor.
@see PxForceMode
@see addForceAtPos() addForceAtLocalPos() addLocalForceAtLocalPos()
*/
static void addLocalForceAtPos(PxRigidBody& body, const PxVec3& force, const PxVec3& pos, PxForceMode::Enum mode = PxForceMode::eFORCE, bool wakeup = true);
/**
\brief Applies a force (or impulse) defined in the actor local coordinate frame, acting at a
particular point in local coordinates, to the actor.
Note that if the force does not act along the center of mass of the actor, this
will also add the corresponding torque. Because forces are reset at the end of every timestep, you can maintain a
total external force on an object by calling this once every frame.
\note if this call is used to apply a force or impulse to an articulation link, only the link is updated, not the entire
articulation
::PxForceMode determines if the force is to be conventional or impulsive. Only eFORCE and eIMPULSE are supported, as the
force required to produce a given velocity change or acceleration is underdetermined given only the desired change at a
given point.
<b>Sleeping:</b> This call wakes the actor if it is sleeping and the wakeup parameter is true (default).
\param[in] body The rigid body to apply the force to.
\param[in] force Force/impulse to add, defined in the local frame. <b>Range:</b> force vector
\param[in] pos Position in the local frame to add the force at. <b>Range:</b> position vector
\param[in] mode The mode to use when applying the force/impulse(see #PxForceMode).
\param[in] wakeup Specify if the call should wake up the actor.
@see PxForceMode
@see addForceAtPos() addForceAtLocalPos() addLocalForceAtPos()
*/
static void addLocalForceAtLocalPos(PxRigidBody& body, const PxVec3& force, const PxVec3& pos, PxForceMode::Enum mode = PxForceMode::eFORCE, bool wakeup = true);
/**
\brief Computes the velocity of a point given in world coordinates if it were attached to the
specified body and moving with it.
\param[in] body The rigid body the point is attached to.
\param[in] pos Position we wish to determine the velocity for, defined in the global frame. <b>Range:</b> position vector
\return The velocity of point in the global frame.
@see getLocalPointVelocity()
*/
static PxVec3 getVelocityAtPos(const PxRigidBody& body, const PxVec3& pos);
/**
\brief Computes the velocity of a point given in local coordinates if it were attached to the
specified body and moving with it.
\param[in] body The rigid body the point is attached to.
\param[in] pos Position we wish to determine the velocity for, defined in the local frame. <b>Range:</b> position vector
\return The velocity of point in the local frame.
@see getLocalPointVelocity()
*/
static PxVec3 getLocalVelocityAtLocalPos(const PxRigidBody& body, const PxVec3& pos);
/**
\brief Computes the velocity of a point (offset from the origin of the body) given in world coordinates if it were attached to the
specified body and moving with it.
\param[in] body The rigid body the point is attached to.
\param[in] pos Position (offset from the origin of the body) we wish to determine the velocity for, defined in the global frame. <b>Range:</b> position vector
\return The velocity of point (offset from the origin of the body) in the global frame.
@see getLocalPointVelocity()
*/
static PxVec3 getVelocityAtOffset(const PxRigidBody& body, const PxVec3& pos);
/**
\brief Compute the change to linear and angular velocity that would occur if an impulsive force and torque were to be applied to a specified rigid body.
The rigid body is left unaffected unless a subsequent independent call is executed that actually applies the computed changes to velocity and angular velocity.
\note if this call is used to determine the velocity delta for an articulation link, only the mass properties of the link are taken into account.
@see PxRigidBody::getLinearVelocity, PxRigidBody::setLinearVelocity, PxRigidBody::getAngularVelocity, PxRigidBody::setAngularVelocity
\param[in] body The body under consideration.
\param[in] impulsiveForce The impulsive force that would be applied to the specified rigid body.
\param[in] impulsiveTorque The impulsive torque that would be applied to the specified rigid body.
\param[out] deltaLinearVelocity The change in linear velocity that would arise if impulsiveForce was to be applied to the specified rigid body.
\param[out] deltaAngularVelocity The change in angular velocity that would arise if impulsiveTorque was to be applied to the specified rigid body.
*/
static void computeVelocityDeltaFromImpulse(const PxRigidBody& body, const PxVec3& impulsiveForce, const PxVec3& impulsiveTorque, PxVec3& deltaLinearVelocity, PxVec3& deltaAngularVelocity);
/**
\brief Computes the linear and angular velocity change vectors for a given impulse at a world space position taking a mass and inertia scale into account
This function is useful for extracting the respective linear and angular velocity changes from a contact or joint when the mass/inertia ratios have been adjusted.
\note if this call is used to determine the velocity delta for an articulation link, only the mass properties of the link are taken into account.
\param[in] body The rigid body
\param[in] globalPose The body's world space transform
\param[in] point The point in world space where the impulse is applied
\param[in] impulse The impulse vector in world space
\param[in] invMassScale The inverse mass scale
\param[in] invInertiaScale The inverse inertia scale
\param[out] deltaLinearVelocity The linear velocity change
\param[out] deltaAngularVelocity The angular velocity change
*/
static void computeVelocityDeltaFromImpulse(const PxRigidBody& body, const PxTransform& globalPose, const PxVec3& point, const PxVec3& impulse, const PxReal invMassScale,
const PxReal invInertiaScale, PxVec3& deltaLinearVelocity, PxVec3& deltaAngularVelocity);
/**
\brief Computes the linear and angular impulse vectors for a given impulse at a world space position taking a mass and inertia scale into account
This function is useful for extracting the respective linear and angular impulses from a contact or joint when the mass/inertia ratios have been adjusted.
\param[in] body The rigid body
\param[in] globalPose The body's world space transform
\param[in] point The point in world space where the impulse is applied
\param[in] impulse The impulse vector in world space
\param[in] invMassScale The inverse mass scale
\param[in] invInertiaScale The inverse inertia scale
\param[out] linearImpulse The linear impulse
\param[out] angularImpulse The angular impulse
*/
static void computeLinearAngularImpulse(const PxRigidBody& body, const PxTransform& globalPose, const PxVec3& point, const PxVec3& impulse, const PxReal invMassScale,
const PxReal invInertiaScale, PxVec3& linearImpulse, PxVec3& angularImpulse);
/**
\brief Performs a linear sweep through space with the body's geometry objects.
\note Supported geometries are: box, sphere, capsule, convex. Other geometry types will be ignored.
\note If eTOUCH is returned from the filter callback, it will trigger an error and the hit will be discarded.
The function sweeps all shapes attached to a given rigid body through space and reports the nearest
object in the scene which intersects any of of the shapes swept paths.
Information about the closest intersection is written to a #PxSweepHit structure.
\param[in] body The rigid body to sweep.
\param[in] scene The scene object to process the query.
\param[in] unitDir Normalized direction of the sweep.
\param[in] distance Sweep distance. Needs to be larger than 0.
\param[in] outputFlags Specifies which properties should be written to the hit information.
\param[out] closestHit Closest hit result.
\param[out] shapeIndex Index of the body shape that caused the closest hit.
\param[in] filterData If any word in filterData.data is non-zero then filterData.data will be used for filtering,
otherwise shape->getQueryFilterData() will be used instead.
\param[in] filterCall Custom filtering logic (optional). Only used if the corresponding #PxQueryFlag flags are set. If NULL, all hits are assumed to be blocking.
\param[in] cache Cached hit shape (optional). Ray is tested against cached shape first then against the scene.
Note: Filtering is not executed for a cached shape if supplied; instead, if a hit is found, it is assumed to be a blocking hit.
\param[in] inflation This parameter creates a skin around the swept geometry which increases its extents for sweeping. The sweep will register a hit as soon as the skin touches a shape, and will return the corresponding distance and normal.
\return True if a blocking hit was found.
@see PxScene PxQueryFlags PxFilterData PxSweepHit
*/
static bool linearSweepSingle(
PxRigidBody& body, PxScene& scene, const PxVec3& unitDir, const PxReal distance,
PxHitFlags outputFlags,
PxSweepHit& closestHit, PxU32& shapeIndex,
const PxQueryFilterData& filterData = PxQueryFilterData(),
PxQueryFilterCallback* filterCall = NULL,
const PxQueryCache* cache = NULL,
const PxReal inflation=0.0f);
/**
\brief Performs a linear sweep through space with the body's geometry objects, returning all overlaps.
\note Supported geometries are: box, sphere, capsule, convex. Other geometry types will be ignored.
This function sweeps all shapes attached to a given rigid body through space and reports all
objects in the scene that intersect any of the shapes' swept paths until there are no more objects to report
or a blocking hit is encountered.
\param[in] body The rigid body to sweep.
\param[in] scene The scene object to process the query.
\param[in] unitDir Normalized direction of the sweep.
\param[in] distance Sweep distance. Needs to be larger than 0.
\param[in] outputFlags Specifies which properties should be written to the hit information.
\param[out] touchHitBuffer Raycast hit information buffer. If the buffer overflows, an arbitrary subset of touch hits
is returned (typically the query should be restarted with a larger buffer).
\param[out] touchHitShapeIndices After the query is completed, touchHitShapeIndices[i] will contain the body index that caused the hit stored in hitBuffer[i]
\param[in] touchHitBufferSize Size of both touch hit buffers in elements.
\param[out] block Closest blocking hit is returned via this reference.
\param[out] blockingShapeIndex Set to -1 if if a blocking hit was not found, otherwise set to closest blocking hit shape index. The touching hits are reported separately in hitBuffer.
\param[out] overflow Set to true if touchHitBuffer didn't have enough space for all results. Touch hits will be incomplete if overflow occurred. Possible solution is to restart the query with a larger buffer.
\param[in] filterData If any word in filterData.data is non-zero then filterData.data will be used for filtering,
otherwise shape->getQueryFilterData() will be used instead.
\param[in] filterCall Custom filtering logic (optional). Only used if the corresponding #PxQueryFlag flags are set. If NULL, all hits are assumed to be blocking.
\param[in] cache Cached hit shape (optional). Ray is tested against cached shape first then against the scene.
Note: Filtering is not executed for a cached shape if supplied; instead, if a hit is found, it is assumed to be a blocking hit.
\param[in] inflation This parameter creates a skin around the swept geometry which increases its extents for sweeping. The sweep will register a hit as soon as the skin touches a shape, and will return the corresponding distance and normal.
\return the number of touching hits. If overflow is set to true, the results are incomplete. In case of overflow there are also no guarantees that all touching hits returned are closer than the blocking hit.
@see PxScene PxQueryFlags PxFilterData PxSweepHit
*/
static PxU32 linearSweepMultiple(
PxRigidBody& body, PxScene& scene, const PxVec3& unitDir, const PxReal distance,
PxHitFlags outputFlags,
PxSweepHit* touchHitBuffer, PxU32* touchHitShapeIndices, PxU32 touchHitBufferSize,
PxSweepHit& block, PxI32& blockingShapeIndex, bool& overflow,
const PxQueryFilterData& filterData = PxQueryFilterData(),
PxQueryFilterCallback* filterCall = NULL,
const PxQueryCache* cache = NULL, const PxReal inflation = 0.0f);
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 25,577 | C | 55.966592 | 267 | 0.77382 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxGearJoint.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_GEAR_JOINT_H
#define PX_GEAR_JOINT_H
/** \addtogroup extensions
@{
*/
#include "extensions/PxJoint.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxGearJoint;
/**
\brief Create a gear Joint.
\param[in] physics The physics SDK
\param[in] actor0 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localFrame0 The position and orientation of the joint relative to actor0
\param[in] actor1 An actor to which the joint is attached. NULL may be used to attach the joint to a specific point in the world frame
\param[in] localFrame1 The position and orientation of the joint relative to actor1
@see PxGearJoint
*/
PxGearJoint* PxGearJointCreate(PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1);
/**
\brief A joint that connects two existing revolute joints and constrains their relative angular velocity and position with respect to each other.
@see PxGearJointCreate PxJoint
*/
class PxGearJoint : public PxJoint
{
public:
/**
\brief Set the hinge/revolute joints connected by the gear joint.
The passed joints can be either PxRevoluteJoint, PxD6Joint or PxArticulationJointReducedCoordinate.
The joints must define degrees of freedom around the twist axis.
Note that these joints are only used to compute the positional error correction term,
used to adjust potential drift between jointed actors. The gear joint can run without
calling this function, but in that case some visible overlap may develop over time between
the teeth of the gear meshes.
\note Calling this function resets the internal positional error correction term.
\param[in] hinge0 The first hinge joint
\param[in] hinge1 The second hinge joint
\return true if success
*/
virtual bool setHinges(const PxBase* hinge0, const PxBase* hinge1) = 0;
/**
\brief Get the hinge/revolute joints connected by the gear joint.
\param[out] hinge0 The first hinge joint
\param[out] hinge1 The second hinge joint
*/
virtual void getHinges(const PxBase*& hinge0, const PxBase*& hinge1) const = 0;
/**
\brief Set the desired gear ratio.
For two gears with n0 and n1 teeth respectively, the gear ratio is n0/n1.
\note You may need to use a negative gear ratio if the joint frames of involved actors are not oriented in the same direction.
\note Calling this function resets the internal positional error correction term.
\param[in] ratio Desired ratio between the two hinges.
*/
virtual void setGearRatio(float ratio) = 0;
/**
\brief Get the gear ratio.
\return Current ratio
*/
virtual float getGearRatio() const = 0;
virtual const char* getConcreteTypeName() const { return "PxGearJoint"; }
protected:
PX_INLINE PxGearJoint(PxType concreteType, PxBaseFlags baseFlags) : PxJoint(concreteType, baseFlags) {}
PX_INLINE PxGearJoint(PxBaseFlags baseFlags) : PxJoint(baseFlags) {}
virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxGearJoint", PxJoint); }
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 4,893 | C | 36.646154 | 160 | 0.752708 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxDefaultErrorCallback.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_DEFAULT_ERROR_CALLBACK_H
#define PX_DEFAULT_ERROR_CALLBACK_H
/** \addtogroup extensions
@{
*/
#include "foundation/PxErrorCallback.h"
#include "PxPhysXConfig.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief default implementation of the error callback
This class is provided in order to enable the SDK to be started with the minimum of user code. Typically an application
will use its own error callback, and log the error to file or otherwise make it visible. Warnings and error messages from
the SDK are usually indicative that changes are required in order for PhysX to function correctly, and should not be ignored.
*/
class PxDefaultErrorCallback : public PxErrorCallback
{
public:
PxDefaultErrorCallback();
virtual ~PxDefaultErrorCallback();
virtual void reportError(PxErrorCode::Enum code, const char* message, const char* file, int line) PX_OVERRIDE;
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 2,666 | C | 39.40909 | 126 | 0.762191 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxBinaryConverter.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_BINARY_CONVERTER_H
#define PX_BINARY_CONVERTER_H
/** \addtogroup extensions
@{
*/
#include "common/PxPhysXCommonConfig.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
struct PX_DEPRECATED PxConverterReportMode
{
enum Enum
{
eNONE, //!< Silent mode. If enabled, no information is sent to the error stream.
eNORMAL, //!< Normal mode. If enabled, only important information is sent to the error stream.
eVERBOSE //!< Verbose mode. If enabled, detailed information is sent to the error stream.
};
};
/**
\brief Binary converter for serialized streams.
\deprecated Binary conversion and binary meta data are deprecated.
The binary converter class is targeted at converting binary streams from authoring platforms,
such as windows, osx or linux to any game runtime platform supported by PhysX. Particularly
it is currently not supported to run the converter on a platforms that has an endian mismatch
with the platform corresponding to the source binary file and source meta data.
If you want to use multiple threads for batch conversions, please create one instance
of this class for each thread.
@see PxSerialization.createBinaryConverter
*/
class PX_DEPRECATED PxBinaryConverter
{
public:
/**
\brief Releases binary converter
*/
virtual void release() = 0;
/**
\brief Sets desired report mode.
\param[in] mode Report mode
*/
virtual void setReportMode(PxConverterReportMode::Enum mode) = 0;
/**
\brief Setups source and target meta-data streams
The source meta data provided needs to have the same endianness as the platform the converter is run on.
The meta data needs to be set before calling the conversion method.
\param[in] srcMetaData Source platform's meta-data stream
\param[in] dstMetaData Target platform's meta-data stream
\return True if success
@see PxSerialization::dumpBinaryMetaData
*/
virtual bool setMetaData(PxInputStream& srcMetaData, PxInputStream& dstMetaData) = 0;
/**
\brief Test utility function to compare two sets of meta data.
The meta data needs to be set before calling the compareMetaData method.
This method will issue PxErrorCode::eDEBUG_INFO messages if mismatches are encountered.
\return True if meta data is equivalend
*/
virtual bool compareMetaData() const = 0;
/**
\brief Converts binary stream from source platform to target platform
The converter needs to be configured with source and destination meta data before calling the conversion method.
The source meta data needs to correspond to the same platform as the source binary data.
\param[in] srcStream Source stream
\param[in] srcSize Number of bytes to convert
\param[in] targetStream Target stream
\return True if success
*/
virtual bool convert(PxInputStream& srcStream, PxU32 srcSize, PxOutputStream& targetStream) = 0;
protected:
PxBinaryConverter() {}
virtual ~PxBinaryConverter() {}
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 4,680 | C | 33.674074 | 114 | 0.75812 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxSimpleFactory.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_SIMPLE_FACTORY_H
#define PX_SIMPLE_FACTORY_H
/** \addtogroup extensions
@{
*/
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxTransform.h"
#include "foundation/PxPlane.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxPhysics;
class PxMaterial;
class PxRigidActor;
class PxRigidDynamic;
class PxRigidStatic;
class PxGeometry;
class PxShape;
/** \brief simple method to create a PxRigidDynamic actor with a single PxShape.
\param[in] sdk the PxPhysics object
\param[in] transform the global pose of the new object
\param[in] geometry the geometry of the new object's shape, which must be a sphere, capsule, box or convex
\param[in] material the material for the new object's shape
\param[in] density the density of the new object. Must be greater than zero.
\param[in] shapeOffset an optional offset for the new shape, defaults to identity
\return a new dynamic actor with the PxRigidBodyFlag, or NULL if it could
not be constructed
@see PxRigidDynamic PxShapeFlag
*/
PxRigidDynamic* PxCreateDynamic(PxPhysics& sdk,
const PxTransform& transform,
const PxGeometry& geometry,
PxMaterial& material,
PxReal density,
const PxTransform& shapeOffset = PxTransform(PxIdentity));
/** \brief simple method to create a PxRigidDynamic actor with a single PxShape.
\param[in] sdk the PxPhysics object
\param[in] transform the transform of the new object
\param[in] shape the shape of the new object
\param[in] density the density of the new object. Must be greater than zero.
\return a new dynamic actor with the PxRigidBodyFlag, or NULL if it could
not be constructed
@see PxRigidDynamic PxShapeFlag
*/
PxRigidDynamic* PxCreateDynamic(PxPhysics& sdk,
const PxTransform& transform,
PxShape& shape,
PxReal density);
/** \brief simple method to create a kinematic PxRigidDynamic actor with a single PxShape.
\param[in] sdk the PxPhysics object
\param[in] transform the global pose of the new object
\param[in] geometry the geometry of the new object's shape
\param[in] material the material for the new object's shape
\param[in] density the density of the new object. Must be greater than zero if the object is to participate in simulation.
\param[in] shapeOffset an optional offset for the new shape, defaults to identity
\note unlike PxCreateDynamic, the geometry is not restricted to box, capsule, sphere or convex. However,
kinematics of other geometry types may not participate in simulation collision and may be used only for
triggers or scene queries of moving objects under animation control. In this case the density parameter
will be ignored and the created shape will be set up as a scene query only shape (see #PxShapeFlag::eSCENE_QUERY_SHAPE)
\return a new dynamic actor with the PxRigidBodyFlag::eKINEMATIC set, or NULL if it could
not be constructed
@see PxRigidDynamic PxShapeFlag
*/
PxRigidDynamic* PxCreateKinematic(PxPhysics& sdk,
const PxTransform& transform,
const PxGeometry& geometry,
PxMaterial& material,
PxReal density,
const PxTransform& shapeOffset = PxTransform(PxIdentity));
/** \brief simple method to create a kinematic PxRigidDynamic actor with a single PxShape.
\param[in] sdk the PxPhysics object
\param[in] transform the global pose of the new object
\param[in] density the density of the new object. Must be greater than zero if the object is to participate in simulation.
\param[in] shape the shape of the new object
\note unlike PxCreateDynamic, the geometry is not restricted to box, capsule, sphere or convex. However,
kinematics of other geometry types may not participate in simulation collision and may be used only for
triggers or scene queries of moving objects under animation control. In this case the density parameter
will be ignored and the created shape will be set up as a scene query only shape (see #PxShapeFlag::eSCENE_QUERY_SHAPE)
\return a new dynamic actor with the PxRigidBodyFlag::eKINEMATIC set, or NULL if it could
not be constructed
@see PxRigidDynamic PxShapeFlag
*/
PxRigidDynamic* PxCreateKinematic(PxPhysics& sdk,
const PxTransform& transform,
PxShape& shape,
PxReal density);
/** \brief simple method to create a PxRigidStatic actor with a single PxShape.
\param[in] sdk the PxPhysics object
\param[in] transform the global pose of the new object
\param[in] geometry the geometry of the new object's shape
\param[in] material the material for the new object's shape
\param[in] shapeOffset an optional offset for the new shape, defaults to identity
\return a new static actor, or NULL if it could not be constructed
@see PxRigidStatic
*/
PxRigidStatic* PxCreateStatic(PxPhysics& sdk,
const PxTransform& transform,
const PxGeometry& geometry,
PxMaterial& material,
const PxTransform& shapeOffset = PxTransform(PxIdentity));
/** \brief simple method to create a PxRigidStatic actor with a single PxShape.
\param[in] sdk the PxPhysics object
\param[in] transform the global pose of the new object
\param[in] shape the new object's shape
\return a new static actor, or NULL if it could not be constructed
@see PxRigidStatic
*/
PxRigidStatic* PxCreateStatic(PxPhysics& sdk,
const PxTransform& transform,
PxShape& shape);
/**
\brief create a shape by copying attributes from another shape
The function clones a PxShape. The following properties are copied:
- geometry
- flags
- materials
- actor-local pose
- contact offset
- rest offset
- simulation filter data
- query filter data
- torsional patch radius
- minimum torsional patch radius
The following are not copied and retain their default values:
- name
- user data
\param[in] physicsSDK - the physics SDK used to allocate the shape
\param[in] shape the shape from which to take the attributes.
\param[in] isExclusive whether the new shape should be an exclusive or shared shape.
\return the newly-created rigid static
*/
PxShape* PxCloneShape(PxPhysics& physicsSDK,
const PxShape& shape,
bool isExclusive);
/**
\brief create a static body by copying attributes from another rigid actor
The function clones a PxRigidDynamic or PxRigidStatic as a PxRigidStatic. A uniform scale is applied. The following properties are copied:
- shapes
- actor flags
- owner client and client behavior bits
- dominance group
The following are not copied and retain their default values:
- name
- joints or observers
- aggregate or scene membership
- user data
\note Transforms are not copied with bit-exact accuracy.
\param[in] physicsSDK - the physics SDK used to allocate the rigid static
\param[in] actor the rigid actor from which to take the attributes.
\param[in] transform the transform of the new static.
\return the newly-created rigid static
*/
PxRigidStatic* PxCloneStatic(PxPhysics& physicsSDK,
const PxTransform& transform,
const PxRigidActor& actor);
/**
\brief create a dynamic body by copying attributes from an existing body
The following properties are copied:
- shapes
- actor flags, rigidDynamic flags and rigidDynamic lock flags
- mass, moment of inertia, and center of mass frame
- linear and angular velocity
- linear and angular damping
- maximum linear velocity
- maximum angular velocity
- position and velocity solver iterations
- maximum depenetration velocity
- sleep threshold
- contact report threshold
- dominance group
- owner client and client behavior bits
- name pointer
- kinematic target
The following are not copied and retain their default values:
- name
- joints or observers
- aggregate or scene membership
- sleep timer
- user data
\note Transforms are not copied with bit-exact accuracy.
\param[in] physicsSDK PxPhysics - the physics SDK used to allocate the rigid static
\param[in] body the rigid dynamic to clone.
\param[in] transform the transform of the new dynamic
\return the newly-created rigid static
*/
PxRigidDynamic* PxCloneDynamic(PxPhysics& physicsSDK,
const PxTransform& transform,
const PxRigidDynamic& body);
/** \brief create a plane actor. The plane equation is n.x + d = 0
\param[in] sdk the PxPhysics object
\param[in] plane a plane of the form n.x + d = 0
\param[in] material the material for the new object's shape
\return a new static actor, or NULL if it could not be constructed
@see PxRigidStatic
*/
PxRigidStatic* PxCreatePlane(PxPhysics& sdk,
const PxPlane& plane,
PxMaterial& material);
/**
\brief scale a rigid actor by a uniform scale
The geometry and relative positions of the actor are multiplied by the given scale value. If the actor is a rigid body or an
articulation link and the scaleMassProps value is true, the mass properties are scaled assuming the density is constant: the
center of mass is linearly scaled, the mass is multiplied by the cube of the scale, and the inertia tensor by the fifth power of the scale.
\param[in] actor a rigid actor
\param[in] scale the scale by which to multiply the actor. Must be >0.
\param[in] scaleMassProps whether to scale the mass properties
*/
void PxScaleRigidActor(PxRigidActor& actor, PxReal scale, bool scaleMassProps = true);
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 11,038 | C | 36.043624 | 140 | 0.758108 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxRigidActorExt.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_RIGID_ACTOR_EXT_H
#define PX_RIGID_ACTOR_EXT_H
/** \addtogroup extensions
@{
*/
#include "PxPhysXConfig.h"
#include "PxPhysics.h"
#include "PxRigidActor.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxBVH;
/**
\brief utility functions for use with PxRigidActor and subclasses
@see PxRigidActor PxRigidStatic PxRigidBody PxRigidDynamic PxArticulationLink
*/
class PxRigidActorExt
{
public:
/**
\brief Creates a new shape with default properties and a list of materials and adds it to the list of shapes of this actor.
This is equivalent to the following
PxShape* shape(...) = PxGetPhysics().createShape(...); // reference count is 1
actor->attachShape(shape); // increments reference count
shape->release(); // releases user reference, leaving reference count at 1
As a consequence, detachShape() will result in the release of the last reference, and the shape will be deleted.
\note The default shape flags to be set are: eVISUALIZATION, eSIMULATION_SHAPE, eSCENE_QUERY_SHAPE (see #PxShapeFlag).
Triangle mesh, heightfield or plane geometry shapes configured as eSIMULATION_SHAPE are not supported for
non-kinematic PxRigidDynamic instances.
\note Creating compounds with a very large number of shapes may adversely affect performance and stability.
<b>Sleeping:</b> Does <b>NOT</b> wake the actor up automatically.
\param[in] actor the actor to which to attach the shape
\param[in] geometry the geometry of the shape
\param[in] materials a pointer to an array of material pointers
\param[in] materialCount the count of materials
\param[in] shapeFlags optional PxShapeFlags
\return The newly created shape.
@see PxShape PxShape::release(), PxPhysics::createShape(), PxRigidActor::attachShape()
*/
static PxShape* createExclusiveShape(PxRigidActor& actor, const PxGeometry& geometry, PxMaterial*const* materials, PxU16 materialCount,
PxShapeFlags shapeFlags = PxShapeFlag::eVISUALIZATION | PxShapeFlag::eSCENE_QUERY_SHAPE | PxShapeFlag::eSIMULATION_SHAPE)
{
PxShape* shape = PxGetPhysics().createShape(geometry, materials, materialCount, true, shapeFlags);
if(shape)
{
bool status = actor.attachShape(*shape); // attach can fail, if e.g. we try and attach a trimesh simulation shape to a dynamic actor
shape->release(); // if attach fails, we hold the only counted reference, and so this cleans up properly
if(!status)
shape = NULL;
}
return shape;
}
/**
\brief Creates a new shape with default properties and a single material adds it to the list of shapes of this actor.
This is equivalent to the following
PxShape* shape(...) = PxGetPhysics().createShape(...); // reference count is 1
actor->attachShape(shape); // increments reference count
shape->release(); // releases user reference, leaving reference count at 1
As a consequence, detachShape() will result in the release of the last reference, and the shape will be deleted.
\note The default shape flags to be set are: eVISUALIZATION, eSIMULATION_SHAPE, eSCENE_QUERY_SHAPE (see #PxShapeFlag).
Triangle mesh, heightfield or plane geometry shapes configured as eSIMULATION_SHAPE are not supported for
non-kinematic PxRigidDynamic instances.
\note Creating compounds with a very large number of shapes may adversely affect performance and stability.
<b>Sleeping:</b> Does <b>NOT</b> wake the actor up automatically.
\param[in] actor the actor to which to attach the shape
\param[in] geometry the geometry of the shape
\param[in] material the material for the shape
\param[in] shapeFlags optional PxShapeFlags
\return The newly created shape.
@see PxShape PxShape::release(), PxPhysics::createShape(), PxRigidActor::attachShape()
*/
static PX_FORCE_INLINE PxShape* createExclusiveShape(PxRigidActor& actor, const PxGeometry& geometry, const PxMaterial& material,
PxShapeFlags shapeFlags = PxShapeFlag::eVISUALIZATION | PxShapeFlag::eSCENE_QUERY_SHAPE | PxShapeFlag::eSIMULATION_SHAPE)
{
PxMaterial* materialPtr = const_cast<PxMaterial*>(&material);
return createExclusiveShape(actor, geometry, &materialPtr, 1, shapeFlags);
}
/**
\brief Gets a list of bounds based on shapes in rigid actor. This list can be used to cook/create
bounding volume hierarchy though PxCooking API.
\param[in] actor The actor from which the bounds list is retrieved.
\param[out] numBounds Number of bounds in returned list.
@see PxShape PxBVH PxCooking::createBVH PxCooking::cookBVH
*/
static PxBounds3* getRigidActorShapeLocalBoundsList(const PxRigidActor& actor, PxU32& numBounds);
/**
\brief Convenience function to create a PxBVH object from a PxRigidActor.
The computed PxBVH can then be used in PxScene::addActor() or PxAggregate::addActor().
After adding the actor & BVH to the scene/aggregate, release the PxBVH object by calling PxBVH::release().
\param[in] physics The physics object. The function will retrieve the insertion callback from it.
\param[in] actor The actor to compute a PxBVH for.
\return The PxBVH for this actor.
@see PxBVH PxScene::addActor PxAggregate::addActor
*/
static PxBVH* createBVHFromActor(PxPhysics& physics, const PxRigidActor& actor);
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 6,988 | C | 40.850299 | 143 | 0.75601 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxSerialization.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_SERIALIZATION_H
#define PX_SERIALIZATION_H
/** \addtogroup extensions
@{
*/
#include "PxPhysXConfig.h"
#include "common/PxBase.h"
#include "cooking/PxCooking.h"
#include "foundation/PxIO.h"
#include "common/PxTolerancesScale.h"
#include "common/PxTypeInfo.h"
#include "common/PxStringTable.h"
/**
PX_BINARY_SERIAL_VERSION is used to version the PhysX binary data and meta data. The global unique identifier of the PhysX SDK needs to match
the one in the data and meta data, otherwise they are considered incompatible. A 32 character wide GUID can be generated with https://www.guidgenerator.com/ for example.
*/
#define PX_BINARY_SERIAL_VERSION "F57A6B4570DF49E38116AB1E0284A98B"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxBinaryConverter;
/**
\brief Utility functions for serialization
@see PxCollection, PxSerializationRegistry
*/
class PxSerialization
{
public:
/**
\brief Additional PxScene and PxPhysics options stored in XML serialized data.
\deprecated Xml serialization is deprecated. An alternative serialization system is provided through USD Physics.
The PxXmlMiscParameter parameter can be serialized and deserialized along with PxCollection instances (XML only).
This is for application use only and has no impact on how objects are serialized or deserialized.
@see PxSerialization::createCollectionFromXml, PxSerialization::serializeCollectionToXml
*/
struct PX_DEPRECATED PxXmlMiscParameter
{
/**
\brief Up vector for the scene reference coordinate system.
*/
PxVec3 upVector;
/**
\brief Tolerances scale to be used for the scene.
*/
PxTolerancesScale scale;
PxXmlMiscParameter() : upVector(0) {}
PxXmlMiscParameter(PxVec3& inUpVector, PxTolerancesScale inScale) : upVector(inUpVector), scale(inScale) {}
};
/**
\brief Returns whether the collection is serializable with the externalReferences collection.
Some definitions to explain whether a collection can be serialized or not:
For definitions of <b>requires</b> and <b>complete</b> see #PxSerialization::complete
A serializable object is <b>subordinate</b> if it cannot be serialized on its own
The following objects are subordinate:
- articulation links
- articulation joints
- joints
A collection C can be serialized with external references collection D iff
- C is complete relative to D (no dangling references)
- Every object in D required by an object in C has a valid ID (no unnamed references)
- Every subordinate object in C is required by another object in C (no orphans)
\param[in] collection Collection to be checked
\param[in] sr PxSerializationRegistry instance with information about registered classes.
\param[in] externalReferences the external References collection
\return Whether the collection is serializable
@see PxSerialization::complete, PxSerialization::serializeCollectionToBinary, PxSerialization::serializeCollectionToXml, PxSerializationRegistry
*/
static bool isSerializable(PxCollection& collection, PxSerializationRegistry& sr, const PxCollection* externalReferences = NULL);
/**
\brief Adds to a collection all objects such that it can be successfully serialized.
A collection C is complete relative to an other collection D if every object required by C is either in C or D.
This function adds objects to a collection, such that it becomes complete with respect to the exceptFor collection.
Completeness is needed for serialization. See #PxSerialization::serializeCollectionToBinary,
#PxSerialization::serializeCollectionToXml.
Sdk objects require other sdk object according to the following rules:
- joints require their actors and constraint
- rigid actors require their shapes
- shapes require their material(s) and mesh (triangle mesh, convex mesh or height field), if any
- articulations require their links and joints
- aggregates require their actors
If followJoints is specified another rule is added:
- actors require their joints
Specifying followJoints will make whole jointed actor chains being added to the collection. Following chains
is interrupted whenever a object in exceptFor is encountered.
\param[in,out] collection Collection which is completed
\param[in] sr PxSerializationRegistry instance with information about registered classes.
\param[in] exceptFor Optional exemption collection
\param[in] followJoints Specifies whether joints should be added for jointed actors
@see PxCollection, PxSerialization::serializeCollectionToBinary, PxSerialization::serializeCollectionToXml, PxSerializationRegistry
*/
static void complete(PxCollection& collection, PxSerializationRegistry& sr, const PxCollection* exceptFor = NULL, bool followJoints = false);
/**
\brief Creates PxSerialObjectId values for unnamed objects in a collection.
Creates PxSerialObjectId names for unnamed objects in a collection starting at a base value and incrementing,
skipping values that are already assigned to objects in the collection.
\param[in,out] collection Collection for which names are created
\param[in] base Start address for PxSerialObjectId names
@see PxCollection
*/
static void createSerialObjectIds(PxCollection& collection, const PxSerialObjectId base);
/**
\brief Creates a PxCollection from XML data.
\deprecated Xml serialization is deprecated. An alternative serialization system is provided through USD Physics.
\param inputData The input data containing the XML collection.
\param params Cooking parameters used for sdk object instantiation.
\param sr PxSerializationRegistry instance with information about registered classes.
\param externalRefs PxCollection used to resolve external references.
\param stringTable PxStringTable instance used for storing object names.
\param outArgs Optional parameters of physics and scene deserialized from XML. See #PxSerialization::PxXmlMiscParameter
\return a pointer to a PxCollection if successful or NULL if it failed.
@see PxCollection, PxSerializationRegistry, PxInputData, PxStringTable, PxCooking, PxSerialization::PxXmlMiscParameter
*/
PX_DEPRECATED static PxCollection* createCollectionFromXml(PxInputData& inputData, const PxCookingParams& params, PxSerializationRegistry& sr, const PxCollection* externalRefs = NULL, PxStringTable* stringTable = NULL, PxXmlMiscParameter* outArgs = NULL);
/**
\brief Deserializes a PxCollection from memory.
Creates a collection from memory. If the collection has external dependencies another collection
can be provided to resolve these.
The memory block provided has to be 128 bytes aligned and contain a contiguous serialized collection as written
by PxSerialization::serializeCollectionToBinary. The contained binary data needs to be compatible with the current binary format version
which is defined by "PX_PHYSICS_VERSION_MAJOR.PX_PHYSICS_VERSION_MINOR.PX_PHYSICS_VERSION_BUGFIX-PX_BINARY_SERIAL_VERSION".
For a list of compatible sdk releases refer to the documentation of PX_BINARY_SERIAL_VERSION.
\param[in] memBlock Pointer to memory block containing the serialized collection
\param[in] sr PxSerializationRegistry instance with information about registered classes.
\param[in] externalRefs Collection to resolve external dependencies
@see PxCollection, PxSerialization::complete, PxSerialization::serializeCollectionToBinary, PxSerializationRegistry, PX_BINARY_SERIAL_VERSION
*/
static PxCollection* createCollectionFromBinary(void* memBlock, PxSerializationRegistry& sr, const PxCollection* externalRefs = NULL);
/**
\brief Serializes a physics collection to an XML output stream.
\deprecated Xml serialization is deprecated. An alternative serialization system is provided through USD Physics.
The collection to be serialized needs to be complete @see PxSerialization.complete.
Optionally the XML may contain meshes in binary cooked format for fast loading. It does this when providing a valid non-null PxCooking pointer.
\note Serialization of objects in a scene that is simultaneously being simulated is not supported and leads to undefined behavior.
\param outputStream Stream to save collection to.
\param collection PxCollection instance which is serialized. The collection needs to be complete with respect to the externalRefs collection.
\param sr PxSerializationRegistry instance with information about registered classes.
\param params Optional pointer to cooking params. If provided, cooked mesh data is cached for fast loading.
\param externalRefs Collection containing external references.
\param inArgs Optional parameters of physics and scene serialized to XML along with the collection. See #PxSerialization::PxXmlMiscParameter
\return true if the collection is successfully serialized.
@see PxCollection, PxOutputStream, PxSerializationRegistry, PxCooking, PxSerialization::PxXmlMiscParameter
*/
PX_DEPRECATED static bool serializeCollectionToXml(PxOutputStream& outputStream, PxCollection& collection, PxSerializationRegistry& sr, const PxCookingParams* params = NULL, const PxCollection* externalRefs = NULL, PxXmlMiscParameter* inArgs = NULL);
/**
\brief Serializes a collection to a binary stream.
Serializes a collection to a stream. In order to resolve external dependencies the externalReferences collection has to be provided.
Optionally names of objects that where set for example with #PxActor::setName are serialized along with the objects.
The collection can be successfully serialized if isSerializable(collection) returns true. See #isSerializable.
The implementation of the output stream needs to fulfill the requirements on the memory block input taken by
PxSerialization::createCollectionFromBinary.
\note Serialization of objects in a scene that is simultaneously being simulated is not supported and leads to undefined behavior.
\param[out] outputStream into which the collection is serialized
\param[in] collection Collection to be serialized
\param[in] sr PxSerializationRegistry instance with information about registered classes.
\param[in] externalRefs Collection used to resolve external dependencies
\param[in] exportNames Specifies whether object names are serialized
\return Whether serialization was successful
@see PxCollection, PxOutputStream, PxSerialization::complete, PxSerialization::createCollectionFromBinary, PxSerializationRegistry
*/
static bool serializeCollectionToBinary(PxOutputStream& outputStream, PxCollection& collection, PxSerializationRegistry& sr, const PxCollection* externalRefs = NULL, bool exportNames = false );
/**
\brief Serializes a collection to a binary stream.
\deprecated Deterministic binary serialization is deprecated. PxSerialization::serializeCollectionToBinary might become deterministic in the future.
Convenience function that serializes a collection to a stream while rebasing memory addresses and handles
to achieve a deterministic output, independent of the PhysX runtime environment the objects have been created in.
The same functionality can be achieved by manually
- creating a binary data stream with PxSerialization::serializeCollectionToBinary
- producing the binary meta data of the current runtime platform with PxSerialization::dumpBinaryMetaData
- converting the binary data stream with the PxBinaryConverter, using the binary meta for both source and destination
@see PxSerialization::serializeCollectionToBinary, PxSerialization::dumpBinaryMetaData, PxBinaryConverter
*/
PX_DEPRECATED static bool serializeCollectionToBinaryDeterministic(PxOutputStream& outputStream, PxCollection& collection, PxSerializationRegistry& sr, const PxCollection* externalRefs = NULL, bool exportNames = false);
/**
\brief Dumps the binary meta-data to a stream.
\deprecated Binary conversion and binary meta data are deprecated.
A meta-data file contains information about the SDK's internal classes and about custom user types ready
for serialization. Such a file is needed to convert binary-serialized data from one platform to another (re-targeting).
The converter needs meta-data files for the source and target platforms to perform conversions.
Custom user types can be supported with PxSerializationRegistry::registerBinaryMetaDataCallback (see the guide for more information).
\param[out] outputStream Stream to write meta data to
\param[in] sr PxSerializationRegistry instance with information about registered classes used for conversion.
@see PxOutputStream, PxSerializationRegistry
*/
PX_DEPRECATED static void dumpBinaryMetaData(PxOutputStream& outputStream, PxSerializationRegistry& sr);
/**
\brief Creates binary converter for re-targeting binary-serialized data.
\deprecated Binary conversion and binary meta data are deprecated.
\return Binary converter instance.
*/
PX_DEPRECATED static PxBinaryConverter* createBinaryConverter();
/**
\brief Creates an application managed registry for serialization.
\param[in] physics Physics SDK to generate create serialization registry
\return PxSerializationRegistry instance.
@see PxSerializationRegistry
*/
static PxSerializationRegistry* createSerializationRegistry(PxPhysics& physics);
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 14,930 | C | 48.440397 | 256 | 0.804488 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxRemeshingExt.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_REMESHING_EXT_H
#define PX_REMESHING_EXT_H
/** \addtogroup extensions
@{
*/
#include "foundation/PxVec3.h"
#include "foundation/PxArray.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Provides methods to adjust the tessellation of meshes
*/
class PxRemeshingExt
{
public:
/**
\brief Processes a triangle mesh and makes sure that no triangle edge is longer than the maximal edge length specified
To shorten edges that are too long, additional points get inserted at their center leading to a subdivision of the input mesh.
This process is executed repeatedly until the maximum edge length criterion is satisfied
\param[in,out] triangles The triangles of the mesh where a maximum edge length should be enforced. They will be modified in place during the process.
\param[in,out] points The vertices of the mesh where a maximum edge length should be enforced. They will be modified in place during the process.
\param[in] maxEdgeLength The maximum edge length allowed after processing the input
\param[in] maxIterations The maximum number of subdivision iterations
\param[out] triangleMap An optional map that provides the index of the original triangle for every triangle after the subdivision
\param[in] triangleCountThreshold Optional limit to the number of triangles. Not guaranteed to match exactly, the algorithm will just stop as soon as possible after reaching the limit.
\return True if any remeshing was applied
*/
static bool limitMaxEdgeLength(PxArray<PxU32>& triangles, PxArray<PxVec3>& points, PxReal maxEdgeLength,
PxU32 maxIterations = 100, PxArray<PxU32>* triangleMap = NULL, PxU32 triangleCountThreshold = 0xFFFFFFFF);
/**
\brief Processes a triangle mesh and makes sure that no triangle edge is longer than the maximal edge length specified
To shorten edges that are too long, additional points get inserted at their center leading to a subdivision of the input mesh.
This process is executed repeatedly until the maximum edge length criterion is satisfied
\param[in,out] triangles The triangles of the mesh where a maximum edge length should be enforced. They will be modified in place during the process.
\param[in,out] points The vertices of the mesh where a maximum edge length should be enforced. They will be modified in place during the process.
\param[in] maxEdgeLength The maximum edge length allowed after processing the input
\param[in] maxIterations The maximum number of subdivision iterations
\param[out] triangleMap An optional map that provides the index of the original triangle for every triangle after the subdivision
\param[in] triangleCountThreshold Optional limit to the number of triangles. Not guaranteed to match exactly, the algorithm will just stop as soon as possible after reaching the limit.
\return True if any remeshing was applied
*/
static bool reduceSliverTriangles(PxArray<PxU32>& triangles, PxArray<PxVec3>& points, PxReal maxEdgeLength,
PxU32 maxIterations = 3, PxArray<PxU32>* triangleMap = NULL, PxU32 triangleCountThreshold = 0xFFFFFFFF);
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 4,885 | C | 52.108695 | 186 | 0.77175 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxParticleClothCooker.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PARTICLE_CLOTH_COOKER_H
#define PX_PARTICLE_CLOTH_COOKER_H
/** \addtogroup extensions
@{
*/
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxVec4.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
namespace ExtGpu
{
/**
\brief Holds all the information for a particle cloth constraint used in the PxParticleClothCooker.
*/
struct PxParticleClothConstraint
{
enum
{
eTYPE_INVALID_CONSTRAINT = 0,
eTYPE_HORIZONTAL_CONSTRAINT = 1,
eTYPE_VERTICAL_CONSTRAINT = 2,
eTYPE_DIAGONAL_CONSTRAINT = 4,
eTYPE_BENDING_CONSTRAINT = 8,
eTYPE_DIAGONAL_BENDING_CONSTRAINT = 16,
eTYPE_ALL = eTYPE_HORIZONTAL_CONSTRAINT | eTYPE_VERTICAL_CONSTRAINT | eTYPE_DIAGONAL_CONSTRAINT | eTYPE_BENDING_CONSTRAINT | eTYPE_DIAGONAL_BENDING_CONSTRAINT
};
PxU32 particleIndexA; //!< The first particle index of this constraint.
PxU32 particleIndexB; //!< The second particle index of this constraint.
PxReal length; //!< The distance between particle A and B.
PxU32 constraintType; //!< The type of constraint, see the constraint type enum.
};
/*
\brief Generates PxParticleClothConstraint constraints that connect the individual particles of a particle cloth.
*/
class PxParticleClothCooker
{
public:
virtual void release() = 0;
/**
\brief Generate the constraint list and triangle index list.
\param[in] constraints A pointer to an array of PxParticleClothConstraint constraints. If NULL, the cooker will generate all the constraints. Otherwise, the user-provided constraints will be added.
\param[in] numConstraints The number of user-provided PxParticleClothConstraint s.
*/
virtual void cookConstraints(const PxParticleClothConstraint* constraints = NULL, const PxU32 numConstraints = 0) = 0;
virtual PxU32* getTriangleIndices() = 0; //!< \return A pointer to the triangle indices.
virtual PxU32 getTriangleIndicesCount() = 0; //!< \return The number of triangle indices.
virtual PxParticleClothConstraint* getConstraints() = 0; //!< \return A pointer to the PxParticleClothConstraint constraints.
virtual PxU32 getConstraintCount() = 0; //!< \return The number of constraints.
virtual void calculateMeshVolume() = 0; //!< Computes the volume of a closed mesh and the contraintScale. Expects vertices in local space - 'close' to origin.
virtual PxReal getMeshVolume() = 0; //!< \return The mesh volume calculated by PxParticleClothCooker::calculateMeshVolume.
protected:
virtual ~PxParticleClothCooker() {}
};
} // namespace ExtGpu
/**
\brief Creates a PxParticleClothCooker.
\param[in] vertexCount The number of vertices of the particle cloth.
\param[in] inVertices The vertex positions of the particle cloth.
\param[in] triangleIndexCount The number of triangles of the cloth mesh.
\param[in] inTriangleIndices The triangle indices of the cloth mesh
\param[in] constraintTypeFlags The types of constraints to generate. See PxParticleClothConstraint.
\param[in] verticalDirection The vertical direction of the cloth mesh. This is needed to generate the correct horizontal and vertical constraints to model shear stiffness.
\param[in] bendingConstraintMaxAngle The maximum angle (in radians) considered in the bending constraints.
\return A pointer to the new PxParticleClothCooker.
*/
ExtGpu::PxParticleClothCooker* PxCreateParticleClothCooker(PxU32 vertexCount, physx::PxVec4* inVertices, PxU32 triangleIndexCount, PxU32* inTriangleIndices,
PxU32 constraintTypeFlags = ExtGpu::PxParticleClothConstraint::eTYPE_ALL,
PxVec3 verticalDirection = PxVec3(0.0f, 1.0f, 0.0f), PxReal bendingConstraintMaxAngle = 20.0f*PxTwoPi/360.0f
);
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 5,357 | C | 43.280991 | 199 | 0.770581 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxStringTableExt.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_STRING_TABLE_EXT_H
#define PX_STRING_TABLE_EXT_H
#include "foundation/Px.h"
#include "common/PxStringTable.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief a factory class for creating PxStringTable with a specific allocator.
@see PxStringTable
*/
class PxStringTableExt
{
public:
static PxStringTable& createStringTable( physx::PxAllocatorCallback& inAllocator );
};
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif
| 2,142 | C | 37.267856 | 84 | 0.764239 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxConstraintExt.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_CONSTRAINT_EXT_H
#define PX_CONSTRAINT_EXT_H
#include "foundation/PxPreprocessor.h"
/** \addtogroup extensions
@{
*/
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Unique identifiers for extensions classes which implement a constraint based on PxConstraint.
\note Users which want to create their own custom constraint types should choose an ID larger or equal to eNEXT_FREE_ID
and not eINVALID_ID.
@see PxConstraint PxSimulationEventCallback.onConstraintBreak()
*/
struct PxConstraintExtIDs
{
enum Enum
{
eJOINT,
eVEHICLE_SUSP_LIMIT_DEPRECATED,
eVEHICLE_STICKY_TYRE_DEPRECATED,
eVEHICLE_JOINT,
eNEXT_FREE_ID,
eINVALID_ID = 0x7fffffff
};
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 2,441 | C | 33.885714 | 119 | 0.759934 |
NVIDIA-Omniverse/PhysX/physx/include/extensions/PxConvexMeshExt.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_CONVEX_MESH_EXT_H
#define PX_CONVEX_MESH_EXT_H
/** \addtogroup extensions
@{
*/
#include "PxPhysXConfig.h"
#include "common/PxPhysXCommonConfig.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxConvexMeshGeometry;
/**
\brief Computes closest polygon of the convex hull geometry for a given impact point
and impact direction. When doing sweeps against a scene, one might want to delay
the rather expensive computation of the hit face index for convexes until it is clear
the information is really needed and then use this method to get the corresponding
face index.
\param[in] convexGeom The convex mesh geometry.
\param[in] geomPose Pose for the geometry object.
\param[in] impactPos Impact position.
\param[in] unitDir Normalized impact direction.
\return Closest face index of the convex geometry.
@see PxTransform PxConvexMeshGeometry
*/
PxU32 PxFindFaceIndex(const PxConvexMeshGeometry& convexGeom,
const PxTransform& geomPose,
const PxVec3& impactPos,
const PxVec3& unitDir);
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 2,815 | C | 38.111111 | 86 | 0.757016 |
NVIDIA-Omniverse/PhysX/physx/include/vehicle2/PxVehicleLimits.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#pragma once
/** \addtogroup vehicle2
@{
*/
#include "foundation/PxPreprocessor.h"
#if !PX_DOXYGEN
namespace physx
{
namespace vehicle2
{
#endif
struct PxVehicleLimits
{
enum Enum
{
eMAX_NB_WHEELS = 20,
eMAX_NB_AXLES = eMAX_NB_WHEELS
};
};
#if !PX_DOXYGEN
} // namespace vehicle2
} // namespace physx
#endif
/** @} */
| 2,030 | C | 34.017241 | 74 | 0.749261 |
NVIDIA-Omniverse/PhysX/physx/include/vehicle2/PxVehicleComponentSequence.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#pragma once
/** \addtogroup vehicle2
@{
*/
#include "foundation/PxAssert.h"
#include "foundation/PxErrors.h"
#include "foundation/PxFoundation.h"
#include "PxVehicleComponent.h"
#if !PX_DOXYGEN
namespace physx
{
namespace vehicle2
{
#endif
struct PxVehicleComponentSequenceLimits
{
enum Enum
{
eMAX_NB_SUBGROUPS = 16,
eMAX_NB_COMPONENTS = 64,
eMAX_NB_SUBGROUPELEMENTS = eMAX_NB_SUBGROUPS + eMAX_NB_COMPONENTS
};
};
struct PxVehicleComponentSequence
{
enum
{
eINVALID_SUBSTEP_GROUP = 0xff
};
PxVehicleComponentSequence()
: mNbComponents(0), mNbSubgroups(1), mNbSubGroupElements(0), mActiveSubgroup(0)
{
}
/**
\brief Add a component to the sequence.
\param[in] component The component to add to the sequence.
\return True on success, else false (for example due to component count limit being reached).
*/
PX_FORCE_INLINE bool add(PxVehicleComponent* component);
/**
\brief Start a substepping group.
\note All components added using #add() will be added to the new substepping group until either the group
is marked as complete with a call to #endSubstepGroup() or a subsequent substepping group is started with
a call to #beginSubstepGroup().
\note Groups can be nested with stacked calls to #beginSubstepGroup().
\note Each group opened by #beginSubstepGroup() must be closed with a complementary #endSubstepGroup() prior to calling #update().
\param[in] nbSubSteps is the number of substeps for the group's sequence. This can be changed with a call to #setSubsteps().
\return Handle for the substepping group on success, else eINVALID_SUBSTEP_GROUP
@see setSubsteps()
@see endSubstepGroup()
*/
PX_FORCE_INLINE PxU8 beginSubstepGroup(const PxU8 nbSubSteps = 1);
/**
\brief End a substepping group
\note The group most recently opened with #beginSubstepGroup() will be closed by this call.
@see setSubsteps()
@see beginSubstepGroup()
*/
PX_FORCE_INLINE void endSubstepGroup()
{
mActiveSubgroup = mSubGroups[mActiveSubgroup].parentGroup;
}
/**
\brief Set the number of substeps to perform for a specific substepping group.
\param[in] subGroupHandle specifies the substepping group
\param[in] nbSteps is the number of times to invoke the sequence of components and groups in the specified substepping group.
@see beginSubstepGroup()
@see endSubstepGroup()
*/
void setSubsteps(const PxU8 subGroupHandle, const PxU8 nbSteps)
{
PX_ASSERT(subGroupHandle < mNbSubgroups);
mSubGroups[subGroupHandle].nbSteps = nbSteps;
}
/**
\brief Update each component in the sequence.
\note If the update method of a component in the sequence returns false, the update process gets aborted.
\param[in] dt is the timestep of the update. The provided value has to be positive.
\param[in] context specifies global quantities of the simulation such as gravitational acceleration.
*/
void update(const PxReal dt, const PxVehicleSimulationContext& context)
{
PX_ASSERT(0 == mActiveSubgroup);
if (dt > 0.0f)
{
updateSubGroup(dt, context, 0, 1);
}
else
{
PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL,
"PxVehicleComponentSequence::update: The timestep must be positive!");
}
}
private:
enum
{
eINVALID_COMPONENT = 0xff,
eINVALID_SUB_GROUP_ELEMENT = 0xff
};
//Elements have the form of a linked list to allow traversal over a list of elements.
//Each element is either a single component or a subgroup.
struct SubGroupElement
{
SubGroupElement()
: childGroup(eINVALID_SUBSTEP_GROUP),
component(eINVALID_COMPONENT),
nextElement(eINVALID_SUB_GROUP_ELEMENT)
{
}
PxU8 childGroup;
PxU8 component;
PxU8 nextElement;
};
//A group is a linked list of elements to be processed in sequence.
//Each group stores the first element in the sequence.
//Each element in the sequence stores the next element in the sequence
//to allow traversal over the list of elements in the group.
struct Group
{
Group()
: parentGroup(eINVALID_SUBSTEP_GROUP),
firstElement(eINVALID_SUB_GROUP_ELEMENT),
nbSteps(1)
{
}
PxU8 parentGroup;
PxU8 firstElement;
PxU8 nbSteps;
};
PxVehicleComponent* mComponents[PxVehicleComponentSequenceLimits::eMAX_NB_COMPONENTS];
PxU8 mNbComponents;
Group mSubGroups[PxVehicleComponentSequenceLimits::eMAX_NB_SUBGROUPS];
PxU8 mNbSubgroups;
SubGroupElement mSubGroupElements[PxVehicleComponentSequenceLimits::eMAX_NB_SUBGROUPELEMENTS];
PxU8 mNbSubGroupElements;
PxU8 mActiveSubgroup;
bool updateSubGroup(const PxReal dt, const PxVehicleSimulationContext& context, const PxU8 groupId, const PxU8 parentSepMultiplier)
{
const PxU8 nbSteps = mSubGroups[groupId].nbSteps;
const PxU8 stepMultiplier = parentSepMultiplier * nbSteps;
const PxReal timestepForGroup = dt / PxReal(stepMultiplier);
for (PxU8 k = 0; k < nbSteps; k++)
{
PxU8 nextElement = mSubGroups[groupId].firstElement;
while (eINVALID_SUB_GROUP_ELEMENT != nextElement)
{
const SubGroupElement& e = mSubGroupElements[nextElement];
PX_ASSERT(e.component != eINVALID_COMPONENT || e.childGroup != eINVALID_SUBSTEP_GROUP);
if (eINVALID_COMPONENT != e.component)
{
PxVehicleComponent* c = mComponents[e.component];
if (!c->update(timestepForGroup, context))
return false;
}
else
{
PX_ASSERT(eINVALID_SUBSTEP_GROUP != e.childGroup);
if (!updateSubGroup(dt, context, e.childGroup, stepMultiplier))
return false;
}
nextElement = e.nextElement;
}
}
return true;
}
PxU8 getLastKnownElementInGroup(const PxU8 groupId) const
{
PxU8 currElement = mSubGroups[groupId].firstElement;
PxU8 nextElement = mSubGroups[groupId].firstElement;
while (nextElement != eINVALID_SUB_GROUP_ELEMENT)
{
currElement = nextElement;
nextElement = mSubGroupElements[nextElement].nextElement;
}
return currElement;
}
};
bool PxVehicleComponentSequence::add(PxVehicleComponent* c)
{
if (PxVehicleComponentSequenceLimits::eMAX_NB_COMPONENTS == mNbComponents)
return false;
if (PxVehicleComponentSequenceLimits::eMAX_NB_SUBGROUPELEMENTS == mNbSubGroupElements)
return false;
//Create a new element and point it at the component.
SubGroupElement& nextElementInGroup = mSubGroupElements[mNbSubGroupElements];
nextElementInGroup.childGroup = eINVALID_SUBSTEP_GROUP;
nextElementInGroup.component = mNbComponents;
nextElementInGroup.nextElement = eINVALID_SUB_GROUP_ELEMENT;
if (eINVALID_SUB_GROUP_ELEMENT == mSubGroups[mActiveSubgroup].firstElement)
{
//The group is empty so add the first element to it.
//Point the group at the new element because this will
//be the first element in the group.
mSubGroups[mActiveSubgroup].firstElement = mNbSubGroupElements;
}
else
{
//We are extending the sequence of element of the group.
//Add the new element to the end of the group's sequence.
mSubGroupElements[getLastKnownElementInGroup(mActiveSubgroup)].nextElement = mNbSubGroupElements;
}
//Increment the number of elements.
mNbSubGroupElements++;
//Record the component and increment the number of components.
mComponents[mNbComponents] = c;
mNbComponents++;
return true;
}
PxU8 PxVehicleComponentSequence::beginSubstepGroup(const PxU8 nbSubSteps)
{
if (mNbSubgroups == PxVehicleComponentSequenceLimits::eMAX_NB_SUBGROUPS)
return eINVALID_SUBSTEP_GROUP;
if (mNbSubGroupElements == PxVehicleComponentSequenceLimits::eMAX_NB_SUBGROUPELEMENTS)
return eINVALID_SUBSTEP_GROUP;
//We have a parent and child group relationship.
const PxU8 parentGroup = mActiveSubgroup;
const PxU8 childGroup = mNbSubgroups;
//Set up the child group.
mSubGroups[childGroup].parentGroup = parentGroup;
mSubGroups[childGroup].firstElement = eINVALID_SUB_GROUP_ELEMENT;
mSubGroups[childGroup].nbSteps = nbSubSteps;
//Create a new element to add to the parent group and point it at the child group.
SubGroupElement& nextElementIInGroup = mSubGroupElements[mNbSubGroupElements];
nextElementIInGroup.childGroup = childGroup;
nextElementIInGroup.nextElement = eINVALID_SUB_GROUP_ELEMENT;
nextElementIInGroup.component = eINVALID_COMPONENT;
//Add the new element to the parent group.
if (eINVALID_SUB_GROUP_ELEMENT == mSubGroups[parentGroup].firstElement)
{
//The parent group is empty so add the first element to it.
//Point the parent group at the new element because this will
//be the first element in the group.
mSubGroups[parentGroup].firstElement = mNbSubGroupElements;
}
else
{
//We are extending the sequence of elements of the parent group.
//Add the new element to the end of the group's sequence.
mSubGroupElements[getLastKnownElementInGroup(parentGroup)].nextElement = mNbSubGroupElements;
}
//Push the active group.
//All subsequent operations will now address the child group and we push or pop the group.
mActiveSubgroup = childGroup;
//Increment the number of elements.
mNbSubGroupElements++;
//Increment the number of groups.
mNbSubgroups++;
//Return the group id.
return mActiveSubgroup;
}
#if !PX_DOXYGEN
} // namespace vehicle2
} // namespace physx
#endif
/** @} */
| 10,775 | C | 31.954128 | 132 | 0.757865 |
NVIDIA-Omniverse/PhysX/physx/include/vehicle2/PxVehicleAPI.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#pragma once
/** \addtogroup vehicle2
@{
*/
#include "vehicle2/PxVehicleLimits.h"
#include "vehicle2/PxVehicleComponent.h"
#include "vehicle2/PxVehicleComponentSequence.h"
#include "vehicle2/PxVehicleParams.h"
#include "vehicle2/PxVehicleFunctions.h"
#include "vehicle2/PxVehicleMaths.h"
#include "vehicle2/braking/PxVehicleBrakingParams.h"
#include "vehicle2/braking/PxVehicleBrakingFunctions.h"
#include "vehicle2/commands/PxVehicleCommandParams.h"
#include "vehicle2/commands/PxVehicleCommandStates.h"
#include "vehicle2/commands/PxVehicleCommandHelpers.h"
#include "vehicle2/drivetrain/PxVehicleDrivetrainParams.h"
#include "vehicle2/drivetrain/PxVehicleDrivetrainStates.h"
#include "vehicle2/drivetrain/PxVehicleDrivetrainHelpers.h"
#include "vehicle2/drivetrain/PxVehicleDrivetrainFunctions.h"
#include "vehicle2/drivetrain/PxVehicleDrivetrainComponents.h"
#include "vehicle2/physxActor/PxVehiclePhysXActorStates.h"
#include "vehicle2/physxActor/PxVehiclePhysXActorHelpers.h"
#include "vehicle2/physxActor/PxVehiclePhysXActorFunctions.h"
#include "vehicle2/physxActor/PxVehiclePhysXActorComponents.h"
#include "vehicle2/physxConstraints/PxVehiclePhysXConstraintParams.h"
#include "vehicle2/physxConstraints/PxVehiclePhysXConstraintStates.h"
#include "vehicle2/physxConstraints/PxVehiclePhysXConstraintHelpers.h"
#include "vehicle2/physxConstraints/PxVehiclePhysXConstraintFunctions.h"
#include "vehicle2/physxConstraints/PxVehiclePhysXConstraintComponents.h"
#include "vehicle2/physxRoadGeometry/PxVehiclePhysXRoadGeometryState.h"
#include "vehicle2/physxRoadGeometry/PxVehiclePhysXRoadGeometryParams.h"
#include "vehicle2/physxRoadGeometry/PxVehiclePhysXRoadGeometryHelpers.h"
#include "vehicle2/physxRoadGeometry/PxVehiclePhysXRoadGeometryFunctions.h"
#include "vehicle2/physxRoadGeometry/PxVehiclePhysXRoadGeometryComponents.h"
#include "vehicle2/pvd/PxVehiclePvdHelpers.h"
#include "vehicle2/pvd/PxVehiclePvdFunctions.h"
#include "vehicle2/pvd/PxVehiclePvdComponents.h"
#include "vehicle2/rigidBody/PxVehicleRigidBodyParams.h"
#include "vehicle2/rigidBody/PxVehicleRigidBodyStates.h"
#include "vehicle2/rigidBody/PxVehicleRigidBodyFunctions.h"
#include "vehicle2/rigidBody/PxVehicleRigidBodyComponents.h"
#include "vehicle2/roadGeometry/PxVehicleRoadGeometryState.h"
#include "vehicle2/steering/PxVehicleSteeringParams.h"
#include "vehicle2/steering/PxVehicleSteeringFunctions.h"
#include "vehicle2/suspension/PxVehicleSuspensionParams.h"
#include "vehicle2/suspension/PxVehicleSuspensionStates.h"
#include "vehicle2/suspension/PxVehicleSuspensionHelpers.h"
#include "vehicle2/suspension/PxVehicleSuspensionFunctions.h"
#include "vehicle2/suspension/PxVehicleSuspensionComponents.h"
#include "vehicle2/tire/PxVehicleTireParams.h"
#include "vehicle2/tire/PxVehicleTireStates.h"
#include "vehicle2/tire/PxVehicleTireHelpers.h"
#include "vehicle2/tire/PxVehicleTireFunctions.h"
#include "vehicle2/tire/PxVehicleTireComponents.h"
#include "vehicle2/wheel/PxVehicleWheelParams.h"
#include "vehicle2/wheel/PxVehicleWheelStates.h"
#include "vehicle2/wheel/PxVehicleWheelHelpers.h"
#include "vehicle2/wheel/PxVehicleWheelFunctions.h"
#include "vehicle2/wheel/PxVehicleWheelComponents.h"
#if !PX_DOXYGEN
namespace physx
{
namespace vehicle2
{
#endif
/** \brief Initialize the PhysX Vehicle library.
This should be called before calling any functions or methods in extensions which may require allocation.
\note This function does not need to be called before creating a PxDefaultAllocator object.
\param foundation a PxFoundation object
@see PxCloseVehicleExtension PxFoundation
*/
PX_FORCE_INLINE bool PxInitVehicleExtension(physx::PxFoundation& foundation)
{
PX_UNUSED(foundation);
PX_CHECK_AND_RETURN_VAL(&PxGetFoundation() == &foundation, "Supplied foundation must match the one that will be used to perform allocations", false);
PxIncFoundationRefCount();
return true;
}
/** \brief Shut down the PhysX Vehicle library.
This function should be called to cleanly shut down the PhysX Vehicle library before application exit.
\note This function is required to be called to release foundation usage.
@see PxInitVehicleExtension
*/
PX_FORCE_INLINE void PxCloseVehicleExtension()
{
PxDecFoundationRefCount();
}
#if !PX_DOXYGEN
} // namespace vehicle2
} // namespace physx
#endif
/** @} */
| 6,044 | C | 39.844594 | 151 | 0.812376 |
NVIDIA-Omniverse/PhysX/physx/include/vehicle2/PxVehicleFunctions.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#pragma once
/** \addtogroup vehicle2
@{
*/
#include "foundation/PxTransform.h"
#include "foundation/PxMat33.h"
#include "foundation/PxSimpleTypes.h"
#include "PxRigidBody.h"
#include "PxVehicleParams.h"
#include "roadGeometry/PxVehicleRoadGeometryState.h"
#include "rigidBody/PxVehicleRigidBodyStates.h"
#include "physxRoadGeometry/PxVehiclePhysXRoadGeometryState.h"
#include "physxActor/PxVehiclePhysXActorStates.h"
#if !PX_DOXYGEN
namespace physx
{
namespace vehicle2
{
#endif
PX_FORCE_INLINE PxVec3 PxVehicleTransformFrameToFrame
(const PxVehicleFrame& srcFrame, const PxVehicleFrame& trgFrame, const PxVec3& v)
{
PxVec3 result = v;
if ((srcFrame.lngAxis != trgFrame.lngAxis) || (srcFrame.latAxis != trgFrame.latAxis) || (srcFrame.vrtAxis != trgFrame.vrtAxis))
{
const PxMat33 a = srcFrame.getFrame();
const PxMat33 r = trgFrame.getFrame();
result = (r * a.getTranspose() * v);
}
return result;
}
PX_FORCE_INLINE PxVec3 PxVehicleTransformFrameToFrame
(const PxVehicleFrame& srcFrame, const PxVehicleFrame& trgFrame,
const PxVehicleScale& srcScale, const PxVehicleScale& trgScale,
const PxVec3& v)
{
PxVec3 result = PxVehicleTransformFrameToFrame(srcFrame, trgFrame, v);
if((srcScale.scale != trgScale.scale))
result *= (trgScale.scale / srcScale.scale);
return result;
}
PX_FORCE_INLINE PxTransform PxVehicleTransformFrameToFrame
(const PxVehicleFrame& srcFrame, const PxVehicleFrame& trgFrame,
const PxVehicleScale& srcScale, const PxVehicleScale& trgScale,
const PxTransform& v)
{
PxTransform result(PxVehicleTransformFrameToFrame(srcFrame, trgFrame, srcScale, trgScale, v.p), v.q);
if ((srcFrame.lngAxis != trgFrame.lngAxis) || (srcFrame.latAxis != trgFrame.latAxis) || (srcFrame.vrtAxis != trgFrame.vrtAxis))
{
PxF32 angle;
PxVec3 axis;
v.q.toRadiansAndUnitAxis(angle, axis);
result.q = PxQuat(angle, PxVehicleTransformFrameToFrame(srcFrame, trgFrame, axis));
}
return result;
}
PX_FORCE_INLINE PxVec3 PxVehicleComputeTranslation(const PxVehicleFrame& frame, const PxReal lng, const PxReal lat, const PxReal vrt)
{
const PxVec3 v = frame.getFrame()*PxVec3(lng, lat, vrt);
return v;
}
PX_FORCE_INLINE PxQuat PxVehicleComputeRotation(const PxVehicleFrame& frame, const PxReal roll, const PxReal pitch, const PxReal yaw)
{
const PxMat33 m = frame.getFrame();
const PxVec3& lngAxis = m.column0;
const PxVec3& latAxis = m.column1;
const PxVec3& vrtAxis = m.column2;
const PxQuat quatPitch(pitch, latAxis);
const PxQuat quatRoll(roll, lngAxis);
const PxQuat quatYaw(yaw, vrtAxis);
const PxQuat result = quatYaw * quatRoll * quatPitch;
return result;
}
PX_FORCE_INLINE PxF32 PxVehicleComputeSign(const PxReal f)
{
return physx::intrinsics::fsel(f, physx::intrinsics::fsel(-f, 0.0f, 1.0f), -1.0f);
}
/**
\brief Shift the origin of a vehicle by the specified vector.
Call this method to adjust the internal data structures of vehicles to reflect the shifted origin location
(the shift vector will get subtracted from all world space spatial data).
\param[in] axleDesc is a description of the wheels on the vehicle.
\param[in] shift is the translation vector used to shift the origin.
\param[in] rigidBodyState stores the current position of the vehicle
\param[in] roadGeometryStates stores the hit plane under each wheel.
\param[in] physxActor stores the PxRigidActor that is the vehicle's PhysX representation.
\param[in] physxQueryStates stores the hit point of the most recent execution of PxVehiclePhysXRoadGeometryQueryUpdate() for each wheel.
\note It is the user's responsibility to keep track of the summed total origin shift and adjust all input/output to/from the vehicle accordingly.
\note This call will not automatically shift the PhysX scene and its objects. PxScene::shiftOrigin() must be called seperately to keep the systems in sync.
\note If there is no associated PxRigidActor then set physxActor to NULL.
\note If there is an associated PxRigidActor and it is already in a PxScene then the complementary call to PxScene::shiftOrigin() will take care of
shifting the associated PxRigidActor. This being the case, set physxActor to NULL. physxActor should be a non-NULL pointer only when there is an
associated PxRigidActor and it is not part of a PxScene. This can occur if the associated PxRigidActor is updated using PhysX immediate mode.
\note If scene queries are independent of PhysX geometry then set queryStates to NULL.
*/
PX_FORCE_INLINE void PxVehicleShiftOrigin
(const PxVehicleAxleDescription& axleDesc, const PxVec3& shift,
PxVehicleRigidBodyState& rigidBodyState, PxVehicleRoadGeometryState* roadGeometryStates,
PxVehiclePhysXActor* physxActor = NULL, PxVehiclePhysXRoadGeometryQueryState* physxQueryStates = NULL)
{
//Adjust the vehicle's internal pose.
rigidBodyState.pose.p -= shift;
//Optionally adjust the PxRigidActor pose.
if (physxActor && !physxActor->rigidBody->getScene())
{
const PxTransform oldPose = physxActor->rigidBody->getGlobalPose();
const PxTransform newPose(oldPose.p - shift, oldPose.q);
physxActor->rigidBody->setGlobalPose(newPose);
}
for (PxU32 i = 0; i < axleDesc.nbWheels; i++)
{
const PxU32 wheelId = axleDesc.wheelIdsInAxleOrder[i];
//Optionally adjust the hit position.
if (physxQueryStates && physxQueryStates[wheelId].actor)
physxQueryStates[wheelId].hitPosition -= shift;
//Adjust the hit plane.
if (roadGeometryStates[wheelId].hitState)
{
const PxPlane plane = roadGeometryStates[wheelId].plane;
PxU32 largestNormalComponentAxis = 0;
PxReal largestNormalComponent = 0.0f;
const PxF32 normalComponents[3] = { plane.n.x, plane.n.y, plane.n.z };
for (PxU32 k = 0; k < 3; k++)
{
if (PxAbs(normalComponents[k]) > largestNormalComponent)
{
largestNormalComponent = PxAbs(normalComponents[k]);
largestNormalComponentAxis = k;
}
}
PxVec3 pointInPlane(PxZero);
switch (largestNormalComponentAxis)
{
case 0:
pointInPlane.x = -plane.d / plane.n.x;
break;
case 1:
pointInPlane.y = -plane.d / plane.n.y;
break;
case 2:
pointInPlane.z = -plane.d / plane.n.z;
break;
default:
break;
}
roadGeometryStates[wheelId].plane.d = -plane.n.dot(pointInPlane - shift);
}
}
}
#if !PX_DOXYGEN
} // namespace vehicle2
} // namespace physx
#endif
/** @} */
| 7,998 | C | 38.995 | 155 | 0.759565 |
NVIDIA-Omniverse/PhysX/physx/include/vehicle2/PxVehicleMaths.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#pragma once
/** \addtogroup vehicle2
@{
*/
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxMemory.h"
#include "PxVehicleLimits.h"
#if !PX_DOXYGEN
namespace physx
{
namespace vehicle2
{
#endif
class PxVehicleVectorN
{
public:
enum
{
eMAX_SIZE = PxVehicleLimits::eMAX_NB_WHEELS + 3
};
PxVehicleVectorN(const PxU32 size)
: mSize(size)
{
PX_ASSERT(mSize <= PxVehicleVectorN::eMAX_SIZE);
PxMemZero(mValues, sizeof(PxReal)*PxVehicleVectorN::eMAX_SIZE);
}
~PxVehicleVectorN()
{
}
PxVehicleVectorN(const PxVehicleVectorN& src)
{
for (PxU32 i = 0; i < src.mSize; i++)
{
mValues[i] = src.mValues[i];
}
mSize = src.mSize;
}
PX_FORCE_INLINE PxVehicleVectorN& operator=(const PxVehicleVectorN& src)
{
for (PxU32 i = 0; i < src.mSize; i++)
{
mValues[i] = src.mValues[i];
}
mSize = src.mSize;
return *this;
}
PX_FORCE_INLINE PxReal& operator[] (const PxU32 i)
{
PX_ASSERT(i < mSize);
return (mValues[i]);
}
PX_FORCE_INLINE const PxReal& operator[] (const PxU32 i) const
{
//PX_ASSERT(i < mSize);
return (mValues[i]);
}
PX_FORCE_INLINE PxU32 getSize() const { return mSize; }
private:
PxReal mValues[PxVehicleVectorN::eMAX_SIZE];
PxU32 mSize;
};
class PxVehicleMatrixNN
{
public:
PxVehicleMatrixNN()
: mSize(0)
{
}
PxVehicleMatrixNN(const PxU32 size)
: mSize(size)
{
PX_ASSERT(mSize <= PxVehicleVectorN::eMAX_SIZE);
PxMemZero(mValues, sizeof(PxReal)*PxVehicleVectorN::eMAX_SIZE*PxVehicleVectorN::eMAX_SIZE);
}
PxVehicleMatrixNN(const PxVehicleMatrixNN& src)
{
for (PxU32 i = 0; i < src.mSize; i++)
{
for (PxU32 j = 0; j < src.mSize; j++)
{
mValues[i][j] = src.mValues[i][j];
}
}
mSize = src.mSize;
}
~PxVehicleMatrixNN()
{
}
PX_FORCE_INLINE PxVehicleMatrixNN& operator=(const PxVehicleMatrixNN& src)
{
for (PxU32 i = 0; i < src.mSize; i++)
{
for (PxU32 j = 0; j < src.mSize; j++)
{
mValues[i][j] = src.mValues[i][j];
}
}
mSize = src.mSize;
return *this;
}
PX_FORCE_INLINE PxReal get(const PxU32 i, const PxU32 j) const
{
PX_ASSERT(i < mSize);
PX_ASSERT(j < mSize);
return mValues[i][j];
}
PX_FORCE_INLINE void set(const PxU32 i, const PxU32 j, const PxReal val)
{
PX_ASSERT(i < mSize);
PX_ASSERT(j < mSize);
mValues[i][j] = val;
}
PX_FORCE_INLINE PxU32 getSize() const { return mSize; }
PX_FORCE_INLINE void setSize(const PxU32 size)
{
PX_ASSERT(size <= PxVehicleVectorN::eMAX_SIZE);
mSize = size;
}
public:
PxReal mValues[PxVehicleVectorN::eMAX_SIZE][PxVehicleVectorN::eMAX_SIZE];
PxU32 mSize;
};
/*
LUPQ decomposition
Based upon "Outer Product LU with Complete Pivoting," from Matrix Computations (4th Edition), Golub and Van Loan
Solve A*x = b using:
MatrixNNLUSolver solver;
solver.decomposeLU(A);
solver.solve(b, x);
*/
class PxVehicleMatrixNNLUSolver
{
private:
PxVehicleMatrixNN mLU;
PxU32 mP[PxVehicleVectorN::eMAX_SIZE - 1]; // Row permutation
PxU32 mQ[PxVehicleVectorN::eMAX_SIZE - 1]; // Column permutation
PxReal mDetM;
public:
PxVehicleMatrixNNLUSolver() {}
~PxVehicleMatrixNNLUSolver() {}
PxReal getDet() const { return mDetM; }
void decomposeLU(const PxVehicleMatrixNN& A);
//Given a matrix A and a vector b find x that satisfies Ax = b, where the matrix A is the matrix that was passed to #decomposeLU.
//Returns true if the lu decomposition indicates that the matrix has an inverse and x was successfully computed.
//Returns false if the lu decomposition resulted in zero determinant ie the matrix has no inverse and no solution exists for x.
//Returns false if the size of either b or x doesn't match the size of the matrix passed to #decomposeLU.
//If false is returned then each relevant element of x is set to zero.
bool solve(const PxVehicleVectorN& b, PxVehicleVectorN& x) const;
};
class PxVehicleMatrixNGaussSeidelSolver
{
public:
void solve(const PxU32 maxIterations, const PxReal tolerance, const PxVehicleMatrixNN& A, const PxVehicleVectorN& b, PxVehicleVectorN& result) const;
};
class PxVehicleMatrix33Solver
{
public:
bool solve(const PxVehicleMatrixNN& A_, const PxVehicleVectorN& b_, PxVehicleVectorN& result) const;
};
#if !PX_DOXYGEN
} //namespace vehicle2
} //namespace physx
#endif
/** @} */
| 5,957 | C | 24.033613 | 150 | 0.715125 |
NVIDIA-Omniverse/PhysX/physx/include/vehicle2/PxVehicleParams.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#pragma once
/** \addtogroup vehicle2
@{
*/
#include "foundation/PxFoundation.h"
#include "foundation/PxAssert.h"
#include "foundation/PxMemory.h"
#include "foundation/PxVec3.h"
#include "foundation/PxMat33.h"
#include "PxVehicleLimits.h"
class OmniPvdWriter;
#if !PX_DOXYGEN
namespace physx
{
class PxConvexMesh;
class PxScene;
namespace vehicle2
{
#endif
struct PxVehicleAxleDescription
{
PX_FORCE_INLINE void setToDefault()
{
PxMemZero(this, sizeof(PxVehicleAxleDescription));
}
/**
\brief Add an axle to the vehicle by specifying the number of wheels on the axle and an array of wheel ids specifying each wheel on the axle.
\param[in] nbWheelsOnAxle is the number of wheels on the axle to be added.
\param[in] wheelIdsOnAxle is an array of wheel ids specifying all the wheels on the axle to be added.
*/
void addAxle(const PxU32 nbWheelsOnAxle, const PxU32* const wheelIdsOnAxle)
{
PX_ASSERT((nbWheels + nbWheelsOnAxle) < PxVehicleLimits::eMAX_NB_WHEELS);
PX_ASSERT(nbAxles < PxVehicleLimits::eMAX_NB_AXLES);
nbWheelsPerAxle[nbAxles] = nbWheelsOnAxle;
axleToWheelIds[nbAxles] = nbWheels;
for (PxU32 i = 0; i < nbWheelsOnAxle; i++)
{
wheelIdsInAxleOrder[nbWheels + i] = wheelIdsOnAxle[i];
}
nbWheels += nbWheelsOnAxle;
nbAxles++;
}
/**
\brief Return the number of axles on the vehicle.
\return The number of axles.
@see getNbWheelsOnAxle()
*/
PX_FORCE_INLINE PxU32 getNbAxles() const
{
return nbAxles;
}
/**
\brief Return the number of wheels on the ith axle.
\param[in] i specifies the axle to be queried for its wheel count.
\return The number of wheels on the specified axle.
@see getWheelOnAxle()
*/
PX_FORCE_INLINE PxU32 getNbWheelsOnAxle(const PxU32 i) const
{
return nbWheelsPerAxle[i];
}
/**
\brief Return the wheel id of the jth wheel on the ith axle.
\param[in] j specifies that the wheel id to be returned is the jth wheel in the list of wheels on the specified axle.
\param[in] i specifies the axle to be queried.
\return The wheel id of the jth wheel on the ith axle.
@see getNbWheelsOnAxle()
*/
PX_FORCE_INLINE PxU32 getWheelOnAxle(const PxU32 j, const PxU32 i) const
{
return wheelIdsInAxleOrder[axleToWheelIds[i] + j];
}
/**
\brief Return the number of wheels on the vehicle.
\return The number of wheels.
*/
PX_FORCE_INLINE PxU32 getNbWheels() const
{
return nbWheels;
}
/**
\brief Return the axle of a specified wheel.
\param[in] wheelId is the wheel whose axle is to be queried.
\return The axle of the specified wheel.
*/
PX_FORCE_INLINE PxU32 getAxle(const PxU32 wheelId) const
{
for (PxU32 i = 0; i < getNbAxles(); i++)
{
for (PxU32 j = 0; j < getNbWheelsOnAxle(i); j++)
{
if (getWheelOnAxle(j, i) == wheelId)
return i;
}
}
return 0xffffffff;
}
PX_FORCE_INLINE bool isValid() const
{
PX_CHECK_AND_RETURN_VAL(nbAxles > 0, "PxVehicleAxleDescription.nbAxles must be greater than zero", false);
PX_CHECK_AND_RETURN_VAL(nbWheels > 0, "PxVehicleAxleDescription.nbWheels must be greater than zero", false);
return true;
}
PxU32 nbAxles; //!< The number of axles on the vehicle
PxU32 nbWheelsPerAxle[PxVehicleLimits::eMAX_NB_AXLES]; //!< The number of wheels on each axle.
PxU32 axleToWheelIds[PxVehicleLimits::eMAX_NB_AXLES]; //!< The list of wheel ids for the ith axle begins at wheelIdsInAxleOrder[axleToWheelIds[i]]
PxU32 wheelIdsInAxleOrder[PxVehicleLimits::eMAX_NB_WHEELS]; //!< The list of all wheel ids on the vehicle.
PxU32 nbWheels; //!< The number of wheels on the vehicle.
PX_COMPILE_TIME_ASSERT(PxVehicleLimits::eMAX_NB_AXLES == PxVehicleLimits::eMAX_NB_WHEELS);
// It should be possible to support cases where each wheel is controlled individually and thus
// having a wheel per axle for up to the max wheel count.
};
struct PxVehicleAxes
{
enum Enum
{
ePosX = 0, //!< The +x axis
eNegX, //!< The -x axis
ePosY, //!< The +y axis
eNegY, //!< The -y axis
ePosZ, //!< The +z axis
eNegZ, //!< The -z axis
eMAX_NB_AXES
};
};
struct PxVehicleFrame
{
PxVehicleAxes::Enum lngAxis; //!< The axis defining the longitudinal (forward) direction of the vehicle.
PxVehicleAxes::Enum latAxis; //!< The axis defining the lateral (side) direction of the vehicle.
PxVehicleAxes::Enum vrtAxis; //!< The axis defining the vertical (up) direction of the vehicle.
PX_FORCE_INLINE void setToDefault()
{
lngAxis = PxVehicleAxes::ePosX;
latAxis = PxVehicleAxes::ePosY;
vrtAxis = PxVehicleAxes::ePosZ;
}
PX_FORCE_INLINE PxMat33 getFrame() const
{
const PxVec3 basisDirs[6] = { PxVec3(1,0,0), PxVec3(-1,0,0), PxVec3(0,1,0), PxVec3(0,-1,0), PxVec3(0,0,1), PxVec3(0,0,-1) };
const PxMat33 mat33(basisDirs[lngAxis], basisDirs[latAxis], basisDirs[vrtAxis]);
return mat33;
}
PX_FORCE_INLINE PxVec3 getLngAxis() const
{
const PxVec3 basisDirs[6] = { PxVec3(1,0,0), PxVec3(-1,0,0), PxVec3(0,1,0), PxVec3(0,-1,0), PxVec3(0,0,1), PxVec3(0,0,-1) };
return basisDirs[lngAxis];
}
PX_FORCE_INLINE PxVec3 getLatAxis() const
{
const PxVec3 basisDirs[6] = { PxVec3(1,0,0), PxVec3(-1,0,0), PxVec3(0,1,0), PxVec3(0,-1,0), PxVec3(0,0,1), PxVec3(0,0,-1) };
return basisDirs[latAxis];
}
PX_FORCE_INLINE PxVec3 getVrtAxis() const
{
const PxVec3 basisDirs[6] = { PxVec3(1,0,0), PxVec3(-1,0,0), PxVec3(0,1,0), PxVec3(0,-1,0), PxVec3(0,0,1), PxVec3(0,0,-1) };
return basisDirs[vrtAxis];
}
PX_FORCE_INLINE bool isValid() const
{
PX_CHECK_AND_RETURN_VAL(lngAxis < PxVehicleAxes::eMAX_NB_AXES, "PxVehicleFrame.lngAxis is invalid", false);
PX_CHECK_AND_RETURN_VAL(latAxis < PxVehicleAxes::eMAX_NB_AXES, "PxVehicleFrame.latAxis is invalid", false);
PX_CHECK_AND_RETURN_VAL(vrtAxis < PxVehicleAxes::eMAX_NB_AXES, "PxVehicleFrame.vrtAxis is invalid", false);
const PxMat33 frame = getFrame();
const PxQuat quat(frame);
PX_CHECK_AND_RETURN_VAL(quat.isFinite() && quat.isUnit() && quat.isSane(), "PxVehicleFrame is not a legal frame", false);
return true;
}
};
struct PxVehicleScale
{
PxReal scale; //!< The length scale used for the vehicle. For example, if 1.0 is considered meters, then 100.0 would be for centimeters.
PX_FORCE_INLINE void setToDefault()
{
scale = 1.0f;
}
PX_FORCE_INLINE bool isValid() const
{
PX_CHECK_AND_RETURN_VAL(scale > 0.0f, "PxVehicleScale.scale must be greater than zero", false);
return true;
}
};
/**
\brief Helper struct to pass array type data to vehice components and functions.
The Vehicle SDK tries to give the user a certain freedom in how the parameters and
states are stored. This helper struct presents a way to either use array of structs
or array of pointers to structs to pass data into the provided vehicle components
and functions.
*/
template<typename T>
struct PxVehicleArrayData
{
enum DataFormat
{
eARRAY_OF_STRUCTS = 0, //!< The data is provided as an array of structs and stored in #arrayOfStructs.
eARRAY_OF_POINTERS //!< The data is provided as an array of pointers and stored in #arrayOfPointers.
};
/**
\brief Set the data as an array of structs.
\param[in] data The data as an array of structs.
*/
PX_FORCE_INLINE void setData(T* data)
{
arrayOfStructs = data;
dataFormat = eARRAY_OF_STRUCTS;
}
/**
\brief Set the data as an array of pointers.
\param[in] data The data as an array of pointers.
*/
PX_FORCE_INLINE void setData(T*const* data)
{
arrayOfPointers= data;
dataFormat = eARRAY_OF_POINTERS;
}
PX_FORCE_INLINE PxVehicleArrayData()
{
}
PX_FORCE_INLINE explicit PxVehicleArrayData(T* data)
{
setData(data);
}
PX_FORCE_INLINE explicit PxVehicleArrayData(T*const* data)
{
setData(data);
}
/**
\brief Get the data entry at a given index.
\param[in] index The index to retrieve the data entry for.
\return Reference to the requested data entry.
*/
PX_FORCE_INLINE T& getData(PxU32 index)
{
if (dataFormat == eARRAY_OF_STRUCTS)
return arrayOfStructs[index];
else
return *arrayOfPointers[index];
}
PX_FORCE_INLINE T& operator[](PxU32 index)
{
return getData(index);
}
/**
\brief Get the data entry at a given index.
\param[in] index The index to retrieve the data entry for.
\return Reference to the requested data entry.
*/
PX_FORCE_INLINE const T& getData(PxU32 index) const
{
if (dataFormat == eARRAY_OF_STRUCTS)
return arrayOfStructs[index];
else
return *arrayOfPointers[index];
}
PX_FORCE_INLINE const T& operator[](PxU32 index) const
{
return getData(index);
}
/**
\brief Set as empty.
*/
PX_FORCE_INLINE void setEmpty()
{
arrayOfStructs = NULL;
}
/**
\brief Check if declared as empty.
\return True if empty, else false.
*/
PX_FORCE_INLINE bool isEmpty() const
{
return (arrayOfStructs == NULL);
}
/**
\brief Get a reference to the array but read only.
\return Read only version of the data.
*/
PX_FORCE_INLINE const PxVehicleArrayData<const T>& getConst() const
{
return reinterpret_cast<const PxVehicleArrayData<const T>&>(*this);
}
union
{
T* arrayOfStructs; //!< The data stored as an array of structs.
T*const* arrayOfPointers; //!< The data stored as an array of pointers.
};
PxU8 dataFormat;
};
template<typename T>
struct PxVehicleSizedArrayData : public PxVehicleArrayData<T>
{
/**
\brief Set the data as an array of structs and set the number of data entries.
\param[in] data The data as an array of structs.
\param[in] count The number of entries in the data array.
*/
PX_FORCE_INLINE void setDataAndCount(T* data, const PxU32 count)
{
PxVehicleArrayData<T>::setData(data);
size = count;
}
/**
\brief Set the data as an array of pointers and set the number of data entries.
\param[in] data The data as an array of pointers.
\param[in] count The number of entries in the data array.
*/
PX_FORCE_INLINE void setDataAndCount(T*const* data, const PxU32 count)
{
PxVehicleArrayData<T>::setData(data);
size = count;
}
/**
\brief Set as empty.
*/
PX_FORCE_INLINE void setEmpty()
{
PxVehicleArrayData<T>::setEmpty();
size = 0;
}
/**
\brief Check if declared as empty.
\return True if empty, else false.
*/
PX_FORCE_INLINE bool isEmpty() const
{
return ((size == 0) || PxVehicleArrayData<T>::isEmpty());
}
PxU32 size;
};
/**
\brief Determine whether the PhysX actor associated with a vehicle is to be updated with a velocity change or an acceleration change.
A velocity change will be immediately reflected in linear and angular velocity queries against the vehicle. An acceleration change, on the other hand,
will leave the linear and angular velocities unchanged until the next PhysX scene update has applied the acceleration update to the actor's linear and
angular velocities.
@see PxVehiclePhysXActorEndComponent
@see PxVehicleWriteRigidBodyStateToPhysXActor
*/
struct PxVehiclePhysXActorUpdateMode
{
enum Enum
{
eAPPLY_VELOCITY = 0,
eAPPLY_ACCELERATION
};
};
/**
\brief Tire slip values are computed using ratios with potential for divide-by-zero errors. PxVehicleTireSlipParams
introduces a minimum value for the denominator of each of these ratios.
*/
struct PxVehicleTireSlipParams
{
/**
\brief The lateral slip angle is typically computed as a function of the ratio of lateral and longitudinal speeds
of the rigid body in the tire's frame. This leads to a divide-by-zero in the event that the longitudinal speed
approaches zero. The parameter minLatSlipDenominator sets a minimum denominator for the ratio of speeds used to
compute the lateral slip angle.
\note Larger timesteps typically require larger values of minLatSlipDenominator.
<b>Range:</b> (0, inf)<br>
<b>Unit:</b> velocity = length / time
*/
PxReal minLatSlipDenominator;
/**
\brief The longitudinal slip represents the difference between the longitudinal speed of the rigid body in the tire's
frame and the linear speed arising from the rotation of the wheel. This is typically normalized using the reciprocal
of the longitudinal speed of the rigid body in the tire's frame. This leads to a divide-by-zero in the event that the
longitudinal speed approaches zero. The parameter minPassiveLongSlipDenominator sets a minimum denominator for the normalized
longitudinal slip when the wheel experiences zero drive torque and zero brake torque and zero handbrake torque. The aim is
to bring the vehicle to rest without experiencing wheel rotational speeds that oscillate around zero.
\note The vehicle will come to rest more smoothly with larger values of minPassiveLongSlipDenominator, particularly
with large timesteps that often lead to oscillation in wheel rotation speeds when the wheel rotation speed approaches
zero.
\note It is recommended that minActiveLongSlipDenominator < minPassiveLongSlipDenominator.
<b>Range:</b> (0, inf)<br>
<b>Unit:</b> velocity = length / time
*/
PxReal minPassiveLongSlipDenominator;
/**
\brief The longitudinal slip represents the difference between the longitudinal speed of the rigid body in the tire's
frame and the linear speed arising from the rotation of the wheel. This is typically normalized using the reciprocal
of the longitudinal speed of the rigid body in the tire's frame. This leads to a divide-by-zero in the event that the
longitudinal speed approaches zero. The parameter minActiveLongSlipDenominator sets a minimum denominator for the normalized
longitudinal slip when the wheel experiences either a non-zero drive torque or a non-zero brake torque or a non-zero handbrake
torque.
\note Larger timesteps typically require larger values of minActiveLongSlipDenominator to avoid instabilities occurring when
the vehicle is aggressively throttled from rest.
\note It is recommended that minActiveLongSlipDenominator < minPassiveLongSlipDenominator.
<b>Range:</b> (0, inf)<br>
<b>Unit:</b> velocity = length / time
*/
PxReal minActiveLongSlipDenominator;
PX_FORCE_INLINE void setToDefault()
{
minLatSlipDenominator = 1.0f;
minActiveLongSlipDenominator = 0.1f;
minPassiveLongSlipDenominator = 4.0f;
}
PX_FORCE_INLINE PxVehicleTireSlipParams transformAndScale(
const PxVehicleFrame& srcFrame, const PxVehicleFrame& trgFrame, const PxVehicleScale& srcScale, const PxVehicleScale& trgScale) const
{
PX_UNUSED(srcFrame);
PX_UNUSED(trgFrame);
PxVehicleTireSlipParams p = *this;
const PxReal scaleRatio = trgScale.scale / srcScale.scale;
p.minLatSlipDenominator *= scaleRatio;
p.minPassiveLongSlipDenominator *= scaleRatio;
p.minActiveLongSlipDenominator *= scaleRatio;
return p;
}
PX_FORCE_INLINE bool isValid() const
{
PX_CHECK_AND_RETURN_VAL(minLatSlipDenominator > 0.0f, "PxVehicleTireSlipParams.minLatSlipDenominator must be greater than zero", false);
PX_CHECK_AND_RETURN_VAL(minPassiveLongSlipDenominator > 0.0f, "PxVehicleTireSlipParams.minPassiveLongSlipDenominator must be greater than zero", false);
PX_CHECK_AND_RETURN_VAL(minActiveLongSlipDenominator > 0.0f, "PxVehicleTireSlipParams.minActiveLongSlipDenominator must be greater than zero", false);
return true;
}
};
/**
\brief Tires have two important directions for the purposes of tire force computation: longitudinal and lateral.
*/
struct PxVehicleTireDirectionModes
{
enum Enum
{
eLONGITUDINAL = 0,
eLATERAL,
eMAX_NB_PLANAR_DIRECTIONS
};
};
/**
\brief The low speed regime often presents numerical difficulties for the tire model due to the potential for divide-by-zero errors.
This particularly affects scenarios where the vehicle is slowing down due to damping and drag. In scenarios where there is no
significant brake or drive torque, numerical error begins to dominate and it can be difficult to bring the vehicle to rest. A solution
to this problem is to recognise that the vehicle is close to rest and to replace the tire forces with velocity constraints that will
bring the vehicle to rest. This regime is known as the "sticky tire" regime. PxVehicleTireAxisStickyParams describes velocity and time
thresholds that categorise the "sticky tire" regime. It also describes the rate at which the velocity constraints approach zero speed.
*/
struct PxVehicleTireAxisStickyParams
{
/**
\brief A tire enters the "sticky tire" regime when it has been below a speed specified by #thresholdSpeed for a continuous time
specified by #thresholdTime.
<b>Range:</b> [0, inf)<br>
<b>Unit:</b> velocity = length / time
*/
PxReal thresholdSpeed;
/**
\brief A tire enters the "sticky tire" regime when it has been below a speed specified by #thresholdSpeed for a continuous time
specified by #thresholdTime.
<b>Range:</b> [0, inf)<br>
<b>Unit:</b> time
*/
PxReal thresholdTime;
/**
\brief The rate at which the velocity constraint approaches zero is controlled by the damping parameter.
\note Larger values of damping lead to faster approaches to zero. Since the damping behaves like a
stiffness with respect to the velocity, too large a value can lead to instabilities.
<b>Range:</b> [0, inf)<br>
<b>Unit:</b> 1 / time (acceleration instead of force based damping, thus not mass/time)
*/
PxReal damping;
PX_FORCE_INLINE PxVehicleTireAxisStickyParams transformAndScale(
const PxVehicleFrame& srcFrame, const PxVehicleFrame& trgFrame, const PxVehicleScale& srcScale, const PxVehicleScale& trgScale) const
{
PX_UNUSED(srcFrame);
PX_UNUSED(trgFrame);
PxVehicleTireAxisStickyParams p = *this;
const PxReal scaleRatio = trgScale.scale / srcScale.scale;
p.thresholdSpeed *= scaleRatio;
return p;
}
PX_FORCE_INLINE bool isValid() const
{
PX_CHECK_AND_RETURN_VAL(thresholdSpeed >= 0.0f, "PxVehicleTireAxisStickyParams.thresholdSpeed must be greater than or equal to zero", false);
PX_CHECK_AND_RETURN_VAL(thresholdTime >= 0.0f, "PxVehicleTireAxisStickyParams.thresholdTime must be greater than or equal to zero", false);
PX_CHECK_AND_RETURN_VAL(damping >= 0.0f, "PxVehicleTireAxisStickyParams.damping must be greater than or equal to zero", false);
return true;
}
};
/**
\brief For each tire, the forces of the tire model may be replaced by velocity constraints when the tire enters the "sticky tire"
regime. The "sticky tire" regime of the lateral and longitudinal directions of the tire are managed separately.
*/
struct PxVehicleTireStickyParams
{
/**
The "sticky tire" regime of the lateral and longitudinal directions of the tire are managed separately and are individually
parameterized.
*/
PxVehicleTireAxisStickyParams stickyParams[PxVehicleTireDirectionModes::eMAX_NB_PLANAR_DIRECTIONS];
PX_FORCE_INLINE void setToDefault()
{
stickyParams[PxVehicleTireDirectionModes::eLONGITUDINAL].thresholdSpeed = 0.2f;
stickyParams[PxVehicleTireDirectionModes::eLONGITUDINAL].thresholdTime = 1.0f;
stickyParams[PxVehicleTireDirectionModes::eLONGITUDINAL].damping = 1.0f;
stickyParams[PxVehicleTireDirectionModes::eLATERAL].thresholdSpeed = 0.2f;
stickyParams[PxVehicleTireDirectionModes::eLATERAL].thresholdTime = 1.0f;
stickyParams[PxVehicleTireDirectionModes::eLATERAL].damping = 0.1f;
}
PX_FORCE_INLINE PxVehicleTireStickyParams transformAndScale(
const PxVehicleFrame& srcFrame, const PxVehicleFrame& trgFrame, const PxVehicleScale& srcScale, const PxVehicleScale& trgScale) const
{
PxVehicleTireStickyParams p = *this;
p.stickyParams[PxVehicleTireDirectionModes::eLONGITUDINAL] =
stickyParams[PxVehicleTireDirectionModes::eLONGITUDINAL].transformAndScale(srcFrame, trgFrame, srcScale, trgScale);
p.stickyParams[PxVehicleTireDirectionModes::eLATERAL] =
stickyParams[PxVehicleTireDirectionModes::eLATERAL].transformAndScale(srcFrame, trgFrame, srcScale, trgScale);
return p;
}
PX_FORCE_INLINE bool isValid() const
{
if (!stickyParams[PxVehicleTireDirectionModes::eLONGITUDINAL].isValid())
return false;
if (!stickyParams[PxVehicleTireDirectionModes::eLATERAL].isValid())
return false;
return true;
}
};
struct PxVehicleSimulationContextType
{
enum Enum
{
eDEFAULT, //!< The simulation context inherits from PxVehicleSimulationContext
ePHYSX //!< The simulation context inherits from PxVehiclePhysXSimulationContext
};
};
/**
\brief Structure to support Omni PVD, the PhysX Visual Debugger.
*/
struct PxVehiclePvdContext
{
public:
PX_FORCE_INLINE void setToDefault()
{
attributeHandles = NULL;
writer = NULL;
}
/**
\brief The attribute handles used to reflect vehicle parameter and state data in omnipvd.
\note A null value will result in no values being reflected in omnipvd.
\note #attributeHandles and #writer both need to be non-NULL to reflect vehicle values in omnipvd.
@see PxVehiclePvdAttributesCreate
@see PxVehiclePvdAttributesRelease
@see PxVehiclePVDComponent
*/
const struct PxVehiclePvdAttributeHandles* attributeHandles;
/**
\brief An instance of OmniPvdWriter used to write vehicle prameter and state data to omnipvd.
\note A null value will result in no values being reflected in omnipvd.
\note #attributeHandles and #writer both need to be non-NULL to reflect vehicle values in omnipvd.
@see PxVehiclePvdAttributesCreate
@see PxVehiclePvdAttributesRelease
@see PxVehiclePVDComponent
*/
OmniPvdWriter* writer;
};
struct PxVehicleSimulationContext
{
PxVehicleSimulationContext()
: type(PxVehicleSimulationContextType::eDEFAULT)
{}
PxVec3 gravity;
PxVehicleFrame frame;
PxVehicleScale scale;
//Tire
PxVehicleTireSlipParams tireSlipParams;
PxVehicleTireStickyParams tireStickyParams;
/**
\brief Forward wheel speed below which the wheel rotation speed gets blended with the rolling speed.
The blended rotation speed is used to integrate the wheel rotation angle. At low forward wheel speed,
the wheel rotation speed can get unstable (depending on the tire model used) and, for example, oscillate.
\note If brake or throttle is applied, there will be no blending.
<b>Unit:</b> velocity = length / time
*/
PxReal thresholdForwardSpeedForWheelAngleIntegration;
/**
\brief Structure to support Omni PVD, the PhysX Visual Debugger.
*/
PxVehiclePvdContext pvdContext;
protected:
PxVehicleSimulationContextType::Enum type;
public:
PX_FORCE_INLINE PxVehicleSimulationContextType::Enum getType() const { return type; }
PX_FORCE_INLINE void setToDefault()
{
frame.setToDefault();
scale.setToDefault();
gravity = frame.getVrtAxis() * (-9.81f * scale.scale);
tireSlipParams.setToDefault();
tireStickyParams.setToDefault();
thresholdForwardSpeedForWheelAngleIntegration = 5.0f * scale.scale;
pvdContext.setToDefault();
}
PX_FORCE_INLINE PxVehicleSimulationContext transformAndScale(
const PxVehicleFrame& srcFrame, const PxVehicleFrame& trgFrame, const PxVehicleScale& srcScale, const PxVehicleScale& trgScale) const
{
PxVehicleSimulationContext c = *this;
const PxReal scaleRatio = trgScale.scale / srcScale.scale;
c.gravity = trgFrame.getFrame()*srcFrame.getFrame().getTranspose()*c.gravity;
c.gravity *= scaleRatio;
c.tireSlipParams = tireSlipParams.transformAndScale(srcFrame, trgFrame, srcScale, trgScale);
c.tireStickyParams = tireStickyParams.transformAndScale(srcFrame, trgFrame, srcScale, trgScale);
c.thresholdForwardSpeedForWheelAngleIntegration *= scaleRatio;
c.frame = trgFrame;
c.scale = trgScale;
return c;
}
};
struct PxVehiclePhysXSimulationContext : public PxVehicleSimulationContext
{
PxVehiclePhysXSimulationContext()
: PxVehicleSimulationContext()
{
type = PxVehicleSimulationContextType::ePHYSX;
}
//Road geometry queries to find the plane under the wheel.
const PxConvexMesh* physxUnitCylinderSweepMesh;
const PxScene* physxScene;
//PhysX actor update
PxVehiclePhysXActorUpdateMode::Enum physxActorUpdateMode;
/**
\brief Wake counter value to set on the physx actor if a reset is required.
Certain vehicle states should keep a physx actor of a vehicle awake. This
will be achieved by resetting the wake counter value if needed. The wake
counter value is the minimum simulation time that a physx actor will stay
awake.
<b>Unit:</b> time
@see physxActorWakeCounterThreshold PxVehiclePhysxActorKeepAwakeCheck
*/
PxReal physxActorWakeCounterResetValue;
/**
\brief Threshold below which to check whether the physx actor wake counter
should get reset.
<b>Unit:</b> time
@see physxActorWakeCounterResetValue PxVehiclePhysxActorKeepAwakeCheck
*/
PxReal physxActorWakeCounterThreshold;
PX_FORCE_INLINE void setToDefault()
{
PxVehicleSimulationContext::setToDefault();
physxUnitCylinderSweepMesh = NULL;
physxScene = NULL;
physxActorUpdateMode = PxVehiclePhysXActorUpdateMode::eAPPLY_VELOCITY;
physxActorWakeCounterResetValue = 20.0f * 0.02f; // 20 timesteps of size 0.02
physxActorWakeCounterThreshold = 0.5f * physxActorWakeCounterResetValue;
}
PX_FORCE_INLINE PxVehiclePhysXSimulationContext transformAndScale(
const PxVehicleFrame& srcFrame, const PxVehicleFrame& trgFrame, const PxVehicleScale& srcScale, const PxVehicleScale& trgScale) const
{
PxVehiclePhysXSimulationContext r = *this;
static_cast<PxVehicleSimulationContext&>(r) = PxVehicleSimulationContext::transformAndScale(srcFrame, trgFrame, srcScale, trgScale);
return r;
}
};
/**
* \brief Express a function as a sequence of points {(x, y)} that form a piecewise polynomial.
*/
template <class T, unsigned int NB_ELEMENTS>
class PxVehicleFixedSizeLookupTable
{
public:
PxVehicleFixedSizeLookupTable()
: nbDataPairs(0)
{
}
PxVehicleFixedSizeLookupTable(const PxVehicleFixedSizeLookupTable& src)
{
PxMemCopy(xVals, src.xVals, sizeof(PxReal)* src.nbDataPairs);
PxMemCopy(yVals, src.yVals, sizeof(T)*src.nbDataPairs);
nbDataPairs = src.nbDataPairs;
}
~PxVehicleFixedSizeLookupTable()
{
}
PxVehicleFixedSizeLookupTable& operator=(const PxVehicleFixedSizeLookupTable& src)
{
PxMemCopy(xVals, src.xVals, sizeof(PxReal)*src.nbDataPairs);
PxMemCopy(yVals, src.yVals, sizeof(T)*src.nbDataPairs);
nbDataPairs = src.nbDataPairs;
return *this;
}
/**
\brief Add one more point to create one more polynomial segment of a piecewise polynomial.
*/
PX_FORCE_INLINE bool addPair(const PxReal x, const T y)
{
PX_CHECK_AND_RETURN_VAL(nbDataPairs < NB_ELEMENTS, "PxVehicleFixedSizeLookupTable::addPair() exceeded fixed size capacity", false);
xVals[nbDataPairs] = x;
yVals[nbDataPairs] = y;
nbDataPairs++;
return true;
}
/**
\brief Identify the segment of the piecewise polynomial that includes x and compute the corresponding y value by linearly interpolating the gradient of the segment.
\param[in] x is the value on the x-axis of the piecewise polynomial.
\return Returns the y value that corresponds to the input x.
*/
PX_FORCE_INLINE T interpolate(const PxReal x) const
{
if (0 == nbDataPairs)
{
return T(0);
}
if (1 == nbDataPairs || x < xVals[0])
{
return yVals[0];
}
PxReal x0 = xVals[0];
T y0 = yVals[0];
for (PxU32 i = 1; i < nbDataPairs; i++)
{
const PxReal x1 = xVals[i];
const T y1 = yVals[i];
if ((x >= x0) && (x < x1))
{
return (y0 + (y1 - y0) * (x - x0) / (x1 - x0));
}
x0 = x1;
y0 = y1;
}
PX_ASSERT(x >= xVals[nbDataPairs - 1]);
return yVals[nbDataPairs - 1];
}
void clear()
{
PxMemSet(xVals, 0, NB_ELEMENTS * sizeof(PxReal));
PxMemSet(yVals, 0, NB_ELEMENTS * sizeof(T));
nbDataPairs = 0;
}
PxReal xVals[NB_ELEMENTS];
T yVals[NB_ELEMENTS];
PxU32 nbDataPairs;
PX_FORCE_INLINE bool isValid() const
{
for (PxU32 i = 1; i < nbDataPairs; i++)
{
PX_CHECK_AND_RETURN_VAL(xVals[i] > xVals[i - 1], "PxVehicleFixedSizeLookupTable:: xVals[i+1] must be greater than xVals[i]", false);
}
return true;
}
};
#if !PX_DOXYGEN
} // namespace vehicle2
} // namespace physx
#endif
/** @} */
| 29,370 | C | 31.099454 | 165 | 0.746612 |
NVIDIA-Omniverse/PhysX/physx/include/vehicle2/PxVehicleComponent.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#pragma once
/** \addtogroup vehicle2
@{
*/
#include "foundation/PxSimpleTypes.h"
#if !PX_DOXYGEN
namespace physx
{
namespace vehicle2
{
#endif
struct PxVehicleSimulationContext;
class PxVehicleComponent
{
public:
virtual ~PxVehicleComponent() {}
/**
\brief Update function for a vehicle component.
\param[in] dt The timestep size to use for the update step.
\param[in] context Vehicle simulation context holding global data or data that usually applies to a
large group of vehicles.
\return True if subsequent components in a sequence should get updated, false if the sequence should
be aborted.
@see PxVehicleComponentSequence
*/
virtual bool update(const PxReal dt, const PxVehicleSimulationContext& context) = 0;
};
#if !PX_DOXYGEN
} // namespace vehicle2
} // namespace physx
#endif
/** @} */
| 2,546 | C | 34.873239 | 101 | 0.752553 |
NVIDIA-Omniverse/PhysX/physx/include/vehicle2/physxActor/PxVehiclePhysXActorStates.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#pragma once
/** \addtogroup vehicle2
@{
*/
#include "foundation/PxPreprocessor.h"
#include "foundation/PxMemory.h"
#include "PxRigidBody.h"
#include "vehicle2/PxVehicleLimits.h"
#if !PX_DOXYGEN
namespace physx
{
class PxShape;
namespace vehicle2
{
#endif
/**
\brief A description of the PhysX actor and shapes that represent the vehicle in an associated PxScene.
*/
struct PxVehiclePhysXActor
{
/**
\brief The PhysX rigid body that represents the vehcle in the associated PhysX scene.
\note PxActorFlag::eDISABLE_GRAVITY must be set true on the PxRigidBody
*/
PxRigidBody* rigidBody;
/**
\brief An array of shapes with one shape pointer (or NULL) for each wheel.
*/
PxShape* wheelShapes[PxVehicleLimits::eMAX_NB_WHEELS];
PX_FORCE_INLINE void setToDefault()
{
PxMemZero(this, sizeof(PxVehiclePhysXActor));
}
};
#define PX_VEHICLE_UNSPECIFIED_STEER_STATE PX_MAX_F32
/**
\brief A description of the previous steer command applied to the vehicle.
*/
struct PxVehiclePhysXSteerState
{
/**
\brief The steer command that was most previously applied to the vehicle.
*/
PxReal previousSteerCommand;
PX_FORCE_INLINE void setToDefault()
{
previousSteerCommand = PX_VEHICLE_UNSPECIFIED_STEER_STATE;
}
};
#if !PX_DOXYGEN
} // namespace vehicle2
} // namespace physx
#endif
/** @} */
| 3,012 | C | 29.434343 | 103 | 0.753984 |
NVIDIA-Omniverse/PhysX/physx/include/vehicle2/physxActor/PxVehiclePhysXActorHelpers.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#pragma once
/** \addtogroup vehicle2
@{
*/
#include "PxFiltering.h"
#include "PxShape.h"
#if !PX_DOXYGEN
namespace physx
{
class PxGeometry;
class PxMaterial;
struct PxCookingParams;
namespace vehicle2
{
#endif
struct PxVehicleRigidBodyParams;
struct PxVehicleAxleDescription;
struct PxVehicleWheelParams;
struct PxVehiclePhysXActor;
struct PxVehicleFrame;
struct PxVehicleSuspensionParams;
class PxVehiclePhysXRigidActorParams
{
PX_NOCOPY(PxVehiclePhysXRigidActorParams)
public:
PxVehiclePhysXRigidActorParams(const PxVehicleRigidBodyParams& _physxActorRigidBodyParams, const char* _physxActorName)
: rigidBodyParams(_physxActorRigidBodyParams),
physxActorName(_physxActorName)
{
}
const PxVehicleRigidBodyParams& rigidBodyParams;
const char* physxActorName;
};
class PxVehiclePhysXRigidActorShapeParams
{
PX_NOCOPY(PxVehiclePhysXRigidActorShapeParams)
public:
PxVehiclePhysXRigidActorShapeParams
(const PxGeometry& _geometry, const PxTransform& _localPose, const PxMaterial& _material,
const PxShapeFlags _flags, const PxFilterData& _simulationFilterData, const PxFilterData& _queryFilterData)
: geometry(_geometry),
localPose(_localPose),
material(_material),
flags(_flags),
simulationFilterData(_simulationFilterData),
queryFilterData(_queryFilterData)
{
}
const PxGeometry& geometry;
const PxTransform& localPose;
const PxMaterial& material;
PxShapeFlags flags;
PxFilterData simulationFilterData;
PxFilterData queryFilterData;
};
class PxVehiclePhysXWheelParams
{
PX_NOCOPY(PxVehiclePhysXWheelParams)
public:
PxVehiclePhysXWheelParams(const PxVehicleAxleDescription& _axleDescription, const PxVehicleWheelParams* _wheelParams)
: axleDescription(_axleDescription),
wheelParams(_wheelParams)
{
}
const PxVehicleAxleDescription& axleDescription;
const PxVehicleWheelParams* wheelParams;
};
class PxVehiclePhysXWheelShapeParams
{
PX_NOCOPY(PxVehiclePhysXWheelShapeParams)
public:
PxVehiclePhysXWheelShapeParams(const PxMaterial& _material, const PxShapeFlags _flags, const PxFilterData _simulationFilterData, const PxFilterData _queryFilterData)
: material(_material),
flags(_flags),
simulationFilterData(_simulationFilterData),
queryFilterData(_queryFilterData)
{
}
const PxMaterial& material;
PxShapeFlags flags;
PxFilterData simulationFilterData;
PxFilterData queryFilterData;
};
/**
\brief Create a PxRigidDynamic instance, instantiate it with desired properties and populate it with PxShape instances.
\param[in] vehicleFrame describes the frame of the vehicle.
\param[in] rigidActorParams describes the mass and moment of inertia of the rigid body.
\param[in] rigidActorCmassLocalPose specifies the mapping between actor and rigid body frame.
\param[in] rigidActorShapeParams describes the collision geometry associated with the rigid body.
\param[in] wheelParams describes the radius and half-width of the wheels.
\param[in] wheelShapeParams describes the PxMaterial and PxShapeFlags to apply to the wheel shapes.
\param[in] physics is a PxPhysics instance.
\param[in] params is a PxCookingParams instance
\param[in] vehiclePhysXActor is a record of the PxRigidDynamic and PxShape instances instantiated.
\note This is an alternative to PxVehiclePhysXArticulationLinkCreate.
\note PxVehiclePhysXActorCreate primarily serves as an illustration of the instantiation of the PhysX class instances
required to simulate a vehicle with a PxRigidDynamic.
@see PxVehiclePhysXActorDestroy
*/
void PxVehiclePhysXActorCreate
(const PxVehicleFrame& vehicleFrame,
const PxVehiclePhysXRigidActorParams& rigidActorParams, const PxTransform& rigidActorCmassLocalPose,
const PxVehiclePhysXRigidActorShapeParams& rigidActorShapeParams,
const PxVehiclePhysXWheelParams& wheelParams, const PxVehiclePhysXWheelShapeParams& wheelShapeParams,
PxPhysics& physics, const PxCookingParams& params,
PxVehiclePhysXActor& vehiclePhysXActor);
/**
\brief Configure an actor so that it is ready for vehicle simulation.
\param[in] rigidActorParams describes the mass and moment of inertia of the rigid body.
\param[in] rigidActorCmassLocalPose specifies the mapping between actor and rigid body frame.
\param[out] rigidBody is the body to be prepared for simulation.
*/
void PxVehiclePhysXActorConfigure
(const PxVehiclePhysXRigidActorParams& rigidActorParams, const PxTransform& rigidActorCmassLocalPose,
PxRigidBody& rigidBody);
/**
\brief Create a PxArticulationReducedCoordinate and a single PxArticulationLink,
instantiate the PxArticulationLink with desired properties and populate it with PxShape instances.
\param[in] vehicleFrame describes the frame of the vehicle.
\param[in] rigidActorParams describes the mass and moment of inertia of the rigid body.
\param[in] rigidActorCmassLocalPose specifies the mapping between actor and rigid body frame.
\param[in] rigidActorShapeParams describes the collision geometry associated with the rigid body.
\param[in] wheelParams describes the radius and half-width of the wheels.
\param[in] wheelShapeParams describes the PxMaterial and PxShapeFlags to apply to the wheel shapes.
\param[in] physics is a PxPhysics instance.
\param[in] params is a PxCookingParams instance
\param[in] vehiclePhysXActor is a record of the PxArticulationReducedCoordinate, PxArticulationLink and PxShape instances instantiated.
\note This is an alternative to PxVehiclePhysXActorCreate.
\note PxVehiclePhysXArticulationLinkCreate primarily serves as an illustration of the instantiation of the PhysX class instances
required to simulate a vehicle as part of an articulated ensemble.
@see PxVehiclePhysXActorDestroy
*/
void PxVehiclePhysXArticulationLinkCreate
(const PxVehicleFrame& vehicleFrame,
const PxVehiclePhysXRigidActorParams& rigidActorParams, const PxTransform& rigidActorCmassLocalPose,
const PxVehiclePhysXRigidActorShapeParams& rigidActorShapeParams,
const PxVehiclePhysXWheelParams& wheelParams, const PxVehiclePhysXWheelShapeParams& wheelShapeParams,
PxPhysics& physics, const PxCookingParams& params,
PxVehiclePhysXActor& vehiclePhysXActor);
/**
\brief Release the PxRigidDynamic, PxArticulationReducedCoordinate, PxArticulationLink and PxShape instances
instantiated by PxVehiclePhysXActorCreate or PxVehiclePhysXArticulationLinkCreate.
\param[in] vehiclePhysXActor is a description of the PhysX instances to be released.
*/
void PxVehiclePhysXActorDestroy(PxVehiclePhysXActor& vehiclePhysXActor);
#if !PX_DOXYGEN
} // namespace vehicle2
} // namespace physx
#endif
/** @} */
| 8,225 | C | 38.171428 | 166 | 0.814225 |
NVIDIA-Omniverse/PhysX/physx/include/vehicle2/physxActor/PxVehiclePhysXActorFunctions.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#pragma once
/** \addtogroup vehicle2
@{
*/
#include "foundation/PxSimpleTypes.h"
#include "vehicle2/PxVehicleParams.h"
#if !PX_DOXYGEN
namespace physx
{
class PxRigidBody;
class PxShape;
namespace vehicle2
{
#endif
struct PxVehicleCommandState;
struct PxVehicleEngineDriveTransmissionCommandState;
struct PxVehicleEngineParams;
struct PxVehicleEngineState;
struct PxVehicleGearboxParams;
struct PxVehicleGearboxState;
struct PxVehicleRigidBodyState;
struct PxVehiclePhysXConstraints;
struct PxVehicleWheelLocalPose;
struct PxVehicleWheelParams;
struct PxVehicleWheelRigidBody1dState;
struct PxVehiclePhysXSteerState;
/**
\brief Wake up the physx actor if the actor is asleep and the commands signal an intent to
change the state of the vehicle.
\param[in] commands are the brake, throttle and steer values that will drive the vehicle.
\param[in] transmissionCommands are the target gear and clutch values that will control
the transmission. If the target gear is different from the current gearbox
target gear, then the physx actor will get woken up. Can be set to NULL if the
vehicle does not have a gearbox or if this is not a desired behavior. If
specified, then gearParams and gearState has to be specifed too.
\param[in] gearParams The gearbox parameters. Can be set to NULL if the vehicle does
not have a gearbox and transmissionCommands is NULL.
\param[in] gearState The state of the gearbox. Can be set to NULL if the vehicle does
not have a gearbox and transmissionCommands is NULL.
\param[in] physxActor is the PxRigidBody instance associated with the vehicle.
\param[in,out] physxSteerState and commands are compared to
determine if the steering state has changed since the last call to PxVehiclePhysxActorWakeup().
\note If the steering has changed, the actor will be woken up.
\note On exit from PxVehiclePhysxActorWakeup, physxSteerState.previousSteerCommand is assigned to the value
of commands.steer so that the steer state may be propagated to the subsequent call to PxVehiclePhysxActorWakeup().
\note If physxSteerState.previousSteerCommand has value PX_VEHICLE_UNSPECIFIED_STEER_STATE, the steering state
is treated as though it has not changed.
*/
void PxVehiclePhysxActorWakeup(
const PxVehicleCommandState& commands,
const PxVehicleEngineDriveTransmissionCommandState* transmissionCommands,
const PxVehicleGearboxParams* gearParams,
const PxVehicleGearboxState* gearState,
PxRigidBody& physxActor,
PxVehiclePhysXSteerState& physxSteerState);
/**
\brief Check if the physx actor is sleeping and clear certain vehicle states if it is.
\param[in] axleDescription identifies the wheels on each axle.
\param[in] physxActor is the PxRigidBody instance associated with the vehicle.
\param[in] engineParams The engine parameters. Can be set to NULL if the vehicle does
not have an engine. Must be specified, if engineState is specified.
\param[in,out] rigidBodyState is the state of the rigid body used by the Vehicle SDK.
\param[in,out] physxConstraints The state of the suspension limit and low speed tire constraints.
If the vehicle actor is sleeping and constraints are active, they will be
deactivated and marked as dirty.
\param[in,out] wheelRigidBody1dStates describes the angular speed of the wheels.
\param[out] engineState The engine state. Can be set to NULL if the vehicle does
not have an engine. If specified, then engineParams has to be specifed too.
The engine rotation speed will get set to the idle rotation speed if
the actor is sleeping.
\return True if the actor was sleeping, else false.
*/
bool PxVehiclePhysxActorSleepCheck
(const PxVehicleAxleDescription& axleDescription,
const PxRigidBody& physxActor,
const PxVehicleEngineParams* engineParams,
PxVehicleRigidBodyState& rigidBodyState,
PxVehiclePhysXConstraints& physxConstraints,
PxVehicleArrayData<PxVehicleWheelRigidBody1dState>& wheelRigidBody1dStates,
PxVehicleEngineState* engineState);
/**
\brief Check if the physx actor has to be kept awake.
Certain criteria should keep the vehicle physx actor awake, for example, if the
(mass normalized) rotational kinetic energy of the wheels is above a certain
threshold or if a gear change is pending or if throttle is applied.
This method will reset the wake counter of the physx actor to a specified value,
if any of the mentioned criteria are met.
\note The physx actor's sleep threshold will be used as threshold to test against
for the energy criteria.
\param[in] axleDescription identifies the wheels on each axle.
\param[in] wheelParams describes the radius, mass etc. of the wheels.
\param[in] wheelRigidBody1dStates describes the angular speed of the wheels.
\param[in] wakeCounterThreshold Once the wake counter of the physx actor falls
below this threshold, the method will start testing if the wake
counter needs to be reset.
\param[in] wakeCounterResetValue The value to set the physx actor wake counter
to, if any of the criteria to do so are met.
\param[in] gearState The gear state. Can be set to NULL if the vehicle does
not have gears or if the mentioned behavior is not desired.
\param[in] throttle The throttle command state (see #PxVehicleCommandState).
Can be set to NULL if the vehicle is not controlled through
PxVehicleCommandState or if the mentioned behavior is not desired.
\param[in] physxActor is the PxRigidBody instance associated with the vehicle.
*/
void PxVehiclePhysxActorKeepAwakeCheck
(const PxVehicleAxleDescription& axleDescription,
const PxVehicleArrayData<const PxVehicleWheelParams>& wheelParams,
const PxVehicleArrayData<const PxVehicleWheelRigidBody1dState>& wheelRigidBody1dStates,
const PxReal wakeCounterThreshold,
const PxReal wakeCounterResetValue,
const PxVehicleGearboxState* gearState,
const PxReal* throttle,
PxRigidBody& physxActor);
/**
\brief Read the rigid body state from a PhysX actor.
\param[in] physxActor is a reference to a PhysX actor.
\param[out] rigidBodyState is the state of the rigid body used by the Vehicle SDK.
*/
void PxVehicleReadRigidBodyStateFromPhysXActor
(const PxRigidBody& physxActor,
PxVehicleRigidBodyState& rigidBodyState);
/**
\brief Update the local pose of a PxShape that is associated with a wheel.
\param[in] wheelLocalPose describes the local pose of each wheel in the rigid body frame.
\param[in] wheelShapeLocalPose describes the local pose to apply to the PxShape instance in the wheel's frame.
\param[in] shape is the target PxShape.
*/
void PxVehicleWriteWheelLocalPoseToPhysXWheelShape
(const PxTransform& wheelLocalPose, const PxTransform& wheelShapeLocalPose, PxShape* shape);
/**
\brief Write the rigid body state to a PhysX actor.
\param[in] physxActorUpdateMode controls whether the PhysX actor is to be updated with
instantaneous velocity changes or with accumulated accelerations to be applied in
the next simulation step of the associated PxScene.
\param[in] rigidBodyState is the state of the rigid body.
\param[in] dt is the simulation time that has elapsed since the last call to
PxVehicleWriteRigidBodyStateToPhysXActor().
\param[out] physXActor is a reference to the PhysX actor.
*/
void PxVehicleWriteRigidBodyStateToPhysXActor
(const PxVehiclePhysXActorUpdateMode::Enum physxActorUpdateMode,
const PxVehicleRigidBodyState& rigidBodyState,
const PxReal dt,
PxRigidBody& physXActor);
#if !PX_DOXYGEN
} // namespace vehicle2
} // namespace physx
#endif
/** @} */
| 9,175 | C | 44.88 | 114 | 0.795095 |
NVIDIA-Omniverse/PhysX/physx/include/vehicle2/physxActor/PxVehiclePhysXActorComponents.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#pragma once
/** \addtogroup vehicle2
@{
*/
#include "vehicle2/PxVehicleParams.h"
#include "vehicle2/PxVehicleComponent.h"
#include "vehicle2/wheel/PxVehicleWheelStates.h"
#include "PxVehiclePhysXActorFunctions.h"
#include "PxVehiclePhysXActorStates.h"
#include "common/PxProfileZone.h"
#if !PX_DOXYGEN
namespace physx
{
namespace vehicle2
{
#endif
/**
\brief Work items at the beginning of an update step for a PhysX actor based vehicle.
Includes:
- Waking the actor up if it is sleeping and a throttle or steer command is issued.
- Clearing certain states if the actor is sleeping.
- Reading the state from the PhysX actor and copy to the vehicle internal state.
@see PxVehiclePhysxActorWakeup PxVehiclePhysxActorSleepCheck PxVehicleReadRigidBodyStateFromPhysXActor
*/
class PxVehiclePhysXActorBeginComponent : public PxVehicleComponent
{
public:
PxVehiclePhysXActorBeginComponent() : PxVehicleComponent() {}
virtual ~PxVehiclePhysXActorBeginComponent() {}
/**
\brief Provide vehicle data items for this component.
\param[out] axleDescription identifies the wheels on each axle.
\param[out] commands are the brake, throttle and steer values that will drive the vehicle.
\param[out] transmissionCommands are the target gear and clutch values that will control
the transmission. Can be set to NULL if the vehicle does not have a gearbox. If
specified, then gearParams and gearState has to be specifed too.
\param[out] gearParams The gearbox parameters. Can be set to NULL if the vehicle does
not have a gearbox and transmissionCommands is NULL.
\param[out] gearState The state of the gearbox. Can be set to NULL if the vehicle does
not have a gearbox and transmissionCommands is NULL.
\param[out] engineParams The engine parameters. Can be set to NULL if the vehicle does
not have an engine. Must be specified, if engineState is specified.
\param[out] physxActor is the PxRigidBody instance associated with the vehicle.
\param[out] physxSteerState is the previous state of the steer and is used to determine if the
steering wheel has changed by comparing with PxVehicleCommandState::steer.
\param[out] physxConstraints The state of the suspension limit and low speed tire constraints.
If the vehicle actor is sleeping and constraints are active, they will be
deactivated and marked as dirty.
\param[out] rigidBodyState is the state of the rigid body used by the Vehicle SDK.
\param[out] wheelRigidBody1dStates describes the angular speed of each wheel.
\param[out] engineState The engine state. Can be set to NULL if the vehicle does
not have an engine. If specified, then engineParams has to be specifed too.
*/
virtual void getDataForPhysXActorBeginComponent(
const PxVehicleAxleDescription*& axleDescription,
const PxVehicleCommandState*& commands,
const PxVehicleEngineDriveTransmissionCommandState*& transmissionCommands,
const PxVehicleGearboxParams*& gearParams,
const PxVehicleGearboxState*& gearState,
const PxVehicleEngineParams*& engineParams,
PxVehiclePhysXActor*& physxActor,
PxVehiclePhysXSteerState*& physxSteerState,
PxVehiclePhysXConstraints*& physxConstraints,
PxVehicleRigidBodyState*& rigidBodyState,
PxVehicleArrayData<PxVehicleWheelRigidBody1dState>& wheelRigidBody1dStates,
PxVehicleEngineState*& engineState) = 0;
virtual bool update(const PxReal dt, const PxVehicleSimulationContext& context)
{
PX_UNUSED(dt);
PX_UNUSED(context);
PX_PROFILE_ZONE("PxVehiclePhysXActorBeginComponent::update", 0);
const PxVehicleAxleDescription* axleDescription;
const PxVehicleCommandState* commands;
const PxVehicleEngineDriveTransmissionCommandState* transmissionCommands;
const PxVehicleGearboxParams* gearParams;
const PxVehicleGearboxState* gearState;
const PxVehicleEngineParams* engineParams;
PxVehiclePhysXActor* physxActor;
PxVehiclePhysXSteerState* physxSteerState;
PxVehiclePhysXConstraints* physxConstraints;
PxVehicleRigidBodyState* rigidBodyState;
PxVehicleArrayData<PxVehicleWheelRigidBody1dState> wheelRigidBody1dStates;
PxVehicleEngineState* engineState;
getDataForPhysXActorBeginComponent(axleDescription, commands, transmissionCommands,
gearParams, gearState, engineParams,
physxActor, physxSteerState, physxConstraints,
rigidBodyState, wheelRigidBody1dStates, engineState);
if (physxActor->rigidBody->getScene()) // Considering case where actor is not in a scene and constraints get solved via immediate mode
{
PxVehiclePhysxActorWakeup(*commands, transmissionCommands, gearParams, gearState,
*physxActor->rigidBody, *physxSteerState);
if (PxVehiclePhysxActorSleepCheck(*axleDescription, *physxActor->rigidBody, engineParams,
*rigidBodyState, *physxConstraints, wheelRigidBody1dStates, engineState))
{
return false;
}
}
PxVehicleReadRigidBodyStateFromPhysXActor(*physxActor->rigidBody, *rigidBodyState);
return true;
}
};
/**
\brief Work items at the end of an update step for a PhysX actor based vehicle.
Includes:
- Writing vehicle internal state to the PhysX actor.
- Keeping the vehicle awake if certain criteria are met.
*/
class PxVehiclePhysXActorEndComponent : public PxVehicleComponent
{
public:
PxVehiclePhysXActorEndComponent() : PxVehicleComponent() {}
virtual ~PxVehiclePhysXActorEndComponent() {}
/**
\brief Provide vehicle data items for this component.
\param[out] axleDescription identifies the wheels on each axle.
\param[out] rigidBodyState is the state of the rigid body used by the Vehicle SDK.
\param[out] wheelParams describes the radius, mass etc. of the wheels.
\param[out] wheelShapeLocalPoses are the local poses in the wheel's frame to apply to the PxShape instances that represent the wheel
\param[out] wheelRigidBody1dStates describes the angular speed of the wheels.
\param[out] wheelLocalPoses describes the local poses of the wheels in the rigid body frame.
\param[out] gearState The gear state. Can be set to NULL if the vehicle does
not have gears.
\param[out] throttle The throttle command state (see #PxVehicleCommandState).
Can be set to NULL if the vehicle is not controlled through
PxVehicleCommandState.
\param[out] physxActor is the PxRigidBody instance associated with the vehicle.
*/
virtual void getDataForPhysXActorEndComponent(
const PxVehicleAxleDescription*& axleDescription,
const PxVehicleRigidBodyState*& rigidBodyState,
PxVehicleArrayData<const PxVehicleWheelParams>& wheelParams,
PxVehicleArrayData<const PxTransform>& wheelShapeLocalPoses,
PxVehicleArrayData<const PxVehicleWheelRigidBody1dState>& wheelRigidBody1dStates,
PxVehicleArrayData<const PxVehicleWheelLocalPose>& wheelLocalPoses,
const PxVehicleGearboxState*& gearState,
const PxReal*& throttle,
PxVehiclePhysXActor*& physxActor) = 0;
virtual bool update(const PxReal dt, const PxVehicleSimulationContext& context)
{
PX_UNUSED(dt);
PX_PROFILE_ZONE("PxVehiclePhysXActorEndComponent::update", 0);
const PxVehicleAxleDescription* axleDescription;
const PxVehicleRigidBodyState* rigidBodyState;
PxVehicleArrayData<const PxVehicleWheelParams> wheelParams;
PxVehicleArrayData<const PxTransform> wheelShapeLocalPoses;
PxVehicleArrayData<const PxVehicleWheelRigidBody1dState> wheelRigidBody1dStates;
PxVehicleArrayData<const PxVehicleWheelLocalPose> wheelLocalPoses;
const PxVehicleGearboxState* gearState;
const PxReal* throttle;
PxVehiclePhysXActor* physxActor;
getDataForPhysXActorEndComponent(axleDescription, rigidBodyState,
wheelParams, wheelShapeLocalPoses, wheelRigidBody1dStates, wheelLocalPoses, gearState, throttle,
physxActor);
for (PxU32 i = 0; i < axleDescription->nbWheels; i++)
{
const PxU32 wheelId = axleDescription->wheelIdsInAxleOrder[i];
PxVehicleWriteWheelLocalPoseToPhysXWheelShape(wheelLocalPoses[wheelId].localPose, wheelShapeLocalPoses[wheelId],
physxActor->wheelShapes[wheelId]);
}
if (context.getType() == PxVehicleSimulationContextType::ePHYSX)
{
const PxVehiclePhysXSimulationContext& physxContext = static_cast<const PxVehiclePhysXSimulationContext&>(context);
PxVehicleWriteRigidBodyStateToPhysXActor(physxContext.physxActorUpdateMode, *rigidBodyState, dt, *physxActor->rigidBody);
PxVehiclePhysxActorKeepAwakeCheck(*axleDescription, wheelParams, wheelRigidBody1dStates,
physxContext.physxActorWakeCounterThreshold, physxContext.physxActorWakeCounterResetValue, gearState, throttle,
*physxActor->rigidBody);
}
else
{
PX_ALWAYS_ASSERT();
}
return true;
}
};
#if !PX_DOXYGEN
} // namespace vehicle2
} // namespace physx
#endif
/** @} */
| 10,366 | C | 41.487705 | 137 | 0.791434 |
NVIDIA-Omniverse/PhysX/physx/include/vehicle2/rigidBody/PxVehicleRigidBodyStates.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#pragma once
/** \addtogroup vehicle2
@{
*/
#include "foundation/PxTransform.h"
#include "foundation/PxVec3.h"
#include "vehicle2/PxVehicleParams.h"
#if !PX_DOXYGEN
namespace physx
{
namespace vehicle2
{
#endif
struct PxVehicleRigidBodyState
{
PxTransform pose; //!< the body's pose (in world space)
PxVec3 linearVelocity; //!< the body's linear velocity (in world space)
PxVec3 angularVelocity; //!< the body's angular velocity (in world space)
PxVec3 previousLinearVelocity; //!< the previous linear velocity of the body (in world space)
PxVec3 previousAngularVelocity; //!< the previous angular velocity of the body (in world space)
PxVec3 externalForce; //!< external force (in world space) affecting the rigid body (usually excluding gravitational force)
PxVec3 externalTorque; //!< external torque (in world space) affecting the rigid body
PX_FORCE_INLINE void setToDefault()
{
pose = PxTransform(PxIdentity);
linearVelocity = PxVec3(PxZero);
angularVelocity = PxVec3(PxZero);
externalForce = PxVec3(PxZero);
externalTorque = PxVec3(PxZero);
}
/**
\brief Compute the vertical speed of the rigid body transformed to the world frame.
\param[in] frame describes the axes of the vehicle
*/
PX_FORCE_INLINE PxReal getVerticalSpeed(const PxVehicleFrame& frame) const
{
return linearVelocity.dot(pose.q.rotate(frame.getVrtAxis()));
}
/**
\param[in] frame describes the axes of the vehicle
\brief Compute the lateral speed of the rigid body transformed to the world frame.
*/
PX_FORCE_INLINE PxReal getLateralSpeed(const PxVehicleFrame& frame) const
{
return linearVelocity.dot(pose.q.rotate(frame.getLatAxis()));
}
/**
\brief Compute the longitudinal speed of the rigid body transformed to the world frame.
\param[in] frame describes the axes of the vehicle
*/
PX_FORCE_INLINE PxReal getLongitudinalSpeed(const PxVehicleFrame& frame) const
{
return linearVelocity.dot(pose.q.rotate(frame.getLngAxis()));
}
};
#if !PX_DOXYGEN
} // namespace vehicle2
} // namespace physx
#endif
/** @} */
| 3,754 | C | 36.929293 | 126 | 0.75333 |
NVIDIA-Omniverse/PhysX/physx/include/vehicle2/rigidBody/PxVehicleRigidBodyParams.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#pragma once
/** \addtogroup vehicle2
@{
*/
#include "foundation/PxFoundation.h"
#include "vehicle2/PxVehicleParams.h"
#include "vehicle2/PxVehicleFunctions.h"
#if !PX_DOXYGEN
namespace physx
{
namespace vehicle2
{
#endif
/**
\brief The properties of the rigid body.
*/
struct PxVehicleRigidBodyParams
{
/**
\brief The mass of the rigid body.
<b>Range:</b> (0, inf)<br>
<b>Unit:</b> mass
*/
PxReal mass;
/**
\brief The moment of inertia of the rigid body.
<b>Range:</b> (0, inf)<br>
<b>Unit:</b> mass * (length^2)
*/
PxVec3 moi;
PX_FORCE_INLINE PxVehicleRigidBodyParams transformAndScale(
const PxVehicleFrame& srcFrame, const PxVehicleFrame& trgFrame, const PxVehicleScale& srcScale, const PxVehicleScale& trgScale) const
{
PxVehicleRigidBodyParams r = *this;
r.moi = PxVehicleTransformFrameToFrame(srcFrame, trgFrame, moi).abs();
const PxReal scale = trgScale.scale/srcScale.scale;
r.moi *= (scale*scale);
return r;
}
PX_FORCE_INLINE bool isValid() const
{
PX_CHECK_AND_RETURN_VAL(mass > 0.0f, "PxVehicleRigidBodyParams.mass must be greater than zero", false);
PX_CHECK_AND_RETURN_VAL(moi.x > 0.0f && moi.y > 0.0f && moi.z> 0.0f, "PxVehicleRigidBodyParams.moi must be greater than zero", false);
return true;
}
};
#if !PX_DOXYGEN
} // namespace vehicle2
} // namespace physx
#endif
/** @} */
| 3,042 | C | 32.076087 | 136 | 0.737015 |
NVIDIA-Omniverse/PhysX/physx/include/vehicle2/rigidBody/PxVehicleRigidBodyComponents.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#pragma once
/** \addtogroup vehicle2
@{
*/
#include "vehicle2/PxVehicleParams.h"
#include "vehicle2/PxVehicleComponent.h"
#include "PxVehicleRigidBodyFunctions.h"
#include "common/PxProfileZone.h"
#if !PX_DOXYGEN
namespace physx
{
namespace vehicle2
{
#endif
/**
\brief Forward integrate the momentum and pose of the vehicle's rigid body after applying forces and torques
from the suspension, tires and anti-roll bars.
*/
class PxVehicleRigidBodyComponent : public PxVehicleComponent
{
public:
PxVehicleRigidBodyComponent() : PxVehicleComponent() {}
virtual ~PxVehicleRigidBodyComponent() {}
/**
\brief Retrieve pointers to the parameter and state data required to update the dynamic state of a rigid body.
\param[out] axleDescription must be returned as a non-null pointer to a single PxVehicleAxleDescription instance that describes the wheels and axles
of the vehicle.
\param[out] rigidBodyParams must be returned as a non-null pointer to a single PxVehicleRigidBodyParams instance that describes the mass and moment of
inertia of the rigid body.
\param[out] suspensionForces must be returned as a non-null pointer to an array of suspension forces and torques in the world frame.
The suspension forces and torques will be applied to the rigid body to update *rigidBodyState*.
\param[out] tireForces must be returned as a non-null pointer to an array of tire forces and torques in the world frame.
The tire forces and torques will be applied to the rigid body to update *rigidBodyState*.
\param[out] antiRollTorque may be returned an optionally non-null pointer to a single PxVehicleAntiRollTorque instance that contains the accumulated anti-roll
torque to apply to the rigid body.
\param[out] rigidBodyState imust be returned as a non-null pointer to a single PxVehicleRigidBodyState instance that is to be forward integrated.
\note The suspensionForces array must contain an entry for each wheel listed as an active wheel in axleDescription.
\note The tireForces array must contain an entry for each wheel listed as an active wheel in axleDescription.
\note If antiRollTorque is returned as a null pointer then zero anti-roll torque will be applied to the rigid body.
*/
virtual void getDataForRigidBodyComponent(
const PxVehicleAxleDescription*& axleDescription,
const PxVehicleRigidBodyParams*& rigidBodyParams,
PxVehicleArrayData<const PxVehicleSuspensionForce>& suspensionForces,
PxVehicleArrayData<const PxVehicleTireForce>& tireForces,
const PxVehicleAntiRollTorque*& antiRollTorque,
PxVehicleRigidBodyState*& rigidBodyState) = 0;
virtual bool update(const PxReal dt, const PxVehicleSimulationContext& context)
{
PX_PROFILE_ZONE("PxVehicleRigidBodyComponent::update", 0);
const PxVehicleAxleDescription* axleDescription;
const PxVehicleRigidBodyParams* rigidBodyParams;
PxVehicleArrayData<const PxVehicleSuspensionForce> suspensionForces;
PxVehicleArrayData<const PxVehicleTireForce> tireForces;
const PxVehicleAntiRollTorque* antiRollTorque;
PxVehicleRigidBodyState* rigidBodyState;
getDataForRigidBodyComponent(axleDescription, rigidBodyParams,
suspensionForces, tireForces, antiRollTorque,
rigidBodyState);
PxVehicleRigidBodyUpdate(
*axleDescription, *rigidBodyParams,
suspensionForces, tireForces, antiRollTorque,
dt, context.gravity,
*rigidBodyState);
return true;
}
};
#if !PX_DOXYGEN
} // namespace vehicle2
} // namespace physx
#endif
/** @} */
| 5,162 | C | 43.895652 | 159 | 0.78826 |
NVIDIA-Omniverse/PhysX/physx/include/vehicle2/rigidBody/PxVehicleRigidBodyFunctions.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#pragma once
/** \addtogroup vehicle2
@{
*/
#include "foundation/PxVec3.h"
#include "foundation/PxSimpleTypes.h"
#include "vehicle2/PxVehicleParams.h"
#if !PX_DOXYGEN
namespace physx
{
namespace vehicle2
{
#endif
struct PxVehicleRigidBodyParams;
struct PxVehicleSuspensionForce;
struct PxVehicleTireForce;
struct PxVehicleAntiRollTorque;
struct PxVehicleRigidBodyState;
/**
\brief Forward integrate rigid body state.
\param[in] axleDescription is a description of the axles of the vehicle and the wheels on each axle.
\param[in] rigidBodyParams is a description of rigid body mass and moment of inertia.
\param[in] suspensionForces is an array of suspension forces and torques in the world frame to be applied to the rigid body.
\param[in] tireForces is an array of tire forces and torques in the world frame to be applied to the rigid body.
\param[in] antiRollTorque is an optional pointer to a single PxVehicleAntiRollTorque instance that contains the accumulated anti-roll
torque to apply to the rigid body.
\param[in] dt is the timestep of the forward integration.
\param[in] gravity is gravitational acceleration.
\param[in,out] rigidBodyState is the rigid body state that is to be updated.
\note The suspensionForces array must contain an entry for each wheel listed as an active wheel in axleDescription.
\note The tireForces array must contain an entry for each wheel listed as an active wheel in axleDescription.
\note If antiRollTorque is a null pointer then zero anti-roll torque will be applied to the rigid body.
*/
void PxVehicleRigidBodyUpdate
(const PxVehicleAxleDescription& axleDescription, const PxVehicleRigidBodyParams& rigidBodyParams,
const PxVehicleArrayData<const PxVehicleSuspensionForce>& suspensionForces,
const PxVehicleArrayData<const PxVehicleTireForce>& tireForces,
const PxVehicleAntiRollTorque* antiRollTorque,
const PxReal dt, const PxVec3& gravity,
PxVehicleRigidBodyState& rigidBodyState);
#if !PX_DOXYGEN
} // namespace vehicle2
} // namespace physx
#endif
/** @} */
| 3,720 | C | 44.938271 | 133 | 0.787366 |
NVIDIA-Omniverse/PhysX/physx/include/vehicle2/braking/PxVehicleBrakingFunctions.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#pragma once
/** \addtogroup vehicle2
@{
*/
#include "foundation/PxPreprocessor.h"
#include "foundation/PxMath.h"
#include "PxVehicleBrakingParams.h"
#include "../commands/PxVehicleCommandStates.h"
#include "../commands/PxVehicleCommandHelpers.h"
#include "../drivetrain/PxVehicleDrivetrainParams.h"
#if !PX_DOXYGEN
namespace physx
{
namespace vehicle2
{
#endif
/**
\brief Compute the brake torque response to an array of brake commands.
\param[in] brakeCommands is the array of input brake commands to be applied to the vehicle.
\param[in] nbBrakeCommands is the number of input brake commands to be applied to the vehicle.
\param[in] longitudinalSpeed is the longitudinal speed of the vehicle.
\param[in] wheelId specifies the wheel that is to have its brake response computed.
\param[in] brakeResponseParams specifies the per wheel brake torque response to each brake command as a nonlinear function of brake command and longitudinal speed.
\param[out] brakeResponseState is the brake torque response to the input brake command.
\note commands.brakes[i] and brakeResponseParams[i] are treated as pairs of brake command and brake command response.
*/
PX_FORCE_INLINE void PxVehicleBrakeCommandResponseUpdate
(const PxReal* brakeCommands, const PxU32 nbBrakeCommands, const PxReal longitudinalSpeed,
const PxU32 wheelId, const PxVehicleSizedArrayData<const PxVehicleBrakeCommandResponseParams>& brakeResponseParams,
PxReal& brakeResponseState)
{
PX_CHECK_AND_RETURN(nbBrakeCommands <= brakeResponseParams.size, "PxVehicleBrakeCommandLinearUpdate: nbBrakes must be less than or equal to brakeResponseParams.size");
PxReal sum = 0.0f;
for (PxU32 i = 0; i < nbBrakeCommands; i++)
{
sum += PxVehicleNonLinearResponseCompute(brakeCommands[i], longitudinalSpeed, wheelId, brakeResponseParams[i]);
}
brakeResponseState = sum;
}
#if !PX_DOXYGEN
} // namespace vehicle2
} // namespace physx
#endif
/** @} */
| 3,618 | C | 44.237499 | 168 | 0.778883 |
NVIDIA-Omniverse/PhysX/physx/include/vehicle2/braking/PxVehicleBrakingParams.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#pragma once
/** \addtogroup vehicle2
@{
*/
#include "foundation/PxFoundation.h"
#include "vehicle2/PxVehicleParams.h"
#include "vehicle2/commands/PxVehicleCommandParams.h"
#if !PX_DOXYGEN
namespace physx
{
namespace vehicle2
{
#endif
/**
\brief Distribute a brake response to the wheels of a vehicle.
\note The brake torque of each wheel on the ith wheel is brakeCommand * maxResponse * wheelResponseMultipliers[i].
\note A typical use case is to set maxResponse to be the vehicle's maximum achievable brake torque
that occurs when the brake command is equal to 1.0. The array wheelResponseMultipliers[i] would then be used
to specify the maximum achievable brake torque per wheel as a fractional multiplier of the vehicle's maximum achievable brake torque.
*/
struct PxVehicleBrakeCommandResponseParams : public PxVehicleCommandResponseParams
{
PX_FORCE_INLINE PxVehicleBrakeCommandResponseParams transformAndScale(
const PxVehicleFrame& srcFrame, const PxVehicleFrame& trgFrame, const PxVehicleScale& srcScale, const PxVehicleScale& trgScale) const
{
PX_UNUSED(srcFrame);
PX_UNUSED(trgFrame);
PxVehicleBrakeCommandResponseParams r = *this;
const PxReal scale = trgScale.scale/srcScale.scale;
r.maxResponse *= (scale*scale); //maxResponse is a torque!
return r;
}
PX_FORCE_INLINE bool isValid(const PxVehicleAxleDescription& axleDesc) const
{
if (!axleDesc.isValid())
return false;
PX_CHECK_AND_RETURN_VAL(maxResponse >= 0.0f, "PxVehicleBrakeCommandResponseParams.maxResponse must be greater than or equal to zero", false);
for (PxU32 i = 0; i < axleDesc.nbWheels; i++)
{
PX_CHECK_AND_RETURN_VAL(wheelResponseMultipliers[axleDesc.wheelIdsInAxleOrder[i]] >= 0.0f, "PxVehicleBrakeCommandResponseParams.wheelResponseMultipliers[i] must be greater than or equal to zero.", false);
}
return true;
}
};
#if !PX_DOXYGEN
} // namespace vehicle2
} // namespace physx
#endif
/** @} */
| 3,626 | C | 40.689655 | 207 | 0.768064 |
NVIDIA-Omniverse/PhysX/physx/include/vehicle2/roadGeometry/PxVehicleRoadGeometryState.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#pragma once
/** \addtogroup vehicle2
@{
*/
#include "foundation/PxPlane.h"
#include "foundation/PxMemory.h"
#if !PX_DOXYGEN
namespace physx
{
namespace vehicle2
{
#endif
struct PxVehicleRoadGeometryState
{
PxPlane plane; //!< the plane under the wheel
PxReal friction; //!< the friction to be used by the tire model
PxVec3 velocity; //!< the velocity of the road geometry
bool hitState; //!< true if a plane is found, false if there is no plane.
PX_FORCE_INLINE void setToDefault()
{
PxMemZero(this, sizeof(PxVehicleRoadGeometryState));
}
};
#if !PX_DOXYGEN
} // namespace vehicle2
} // namespace physx
#endif
/** @} */
| 2,355 | C | 35.812499 | 79 | 0.744798 |
NVIDIA-Omniverse/PhysX/physx/include/vehicle2/wheel/PxVehicleWheelFunctions.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#pragma once
/** \addtogroup vehicle2
@{
*/
#include "foundation/PxSimpleTypes.h"
#if !PX_DOXYGEN
namespace physx
{
namespace vehicle2
{
#endif
struct PxVehicleWheelParams;
struct PxVehicleWheelActuationState;
struct PxVehicleSuspensionState;
struct PxVehicleTireSpeedState;
struct PxVehicleScale;
struct PxVehicleWheelRigidBody1dState;
/**
\brief Forward integrate the rotation angle of a wheel
\note The rotation angle of the wheel plays no role in simulation but is important to compute the pose of the wheel for rendering.
\param[in] wheelParams describes the radius and half-width of the wheel
\param[in] actuationState describes whether the wheel has drive or brake torque applied to it.
\param[in] suspensionState describes whether the wheel touches the ground.
\param[in] tireSpeedState describes the components of rigid body velocity at the ground contact point along the tire's lateral and longitudinal directions.
\param[in] thresholdForwardSpeedForWheelAngleIntegration Forward wheel speed below which the wheel rotation speed gets blended with the rolling
speed (based on the forward wheel speed) which is then used to integrate the wheel rotation angle. At low forward wheel speed, the wheel
rotation speed can get unstable (depending on the tire model used) and, for example, oscillate. If brake or throttle is applied, there
will be no blending.
\param[in] dt is the simulation time that has lapsed since the last call to PxVehicleWheelRotationAngleUpdate
\param[in,out] wheelRigidBody1dState describes the current angular speed and angle of the wheel.
\note At low speeds and large timesteps, wheel rotation speed can become noisy due to singularities in the tire slip computations.
At low speeds, therefore, the wheel speed used for integrating the angle is a blend of current angular speed and rolling angular speed if the
wheel experiences neither brake nor drive torque and can be placed on the ground. The blended rotation speed gets stored in
PxVehicleWheelRigidBody1dState::correctedRotationSpeed.
*/
void PxVehicleWheelRotationAngleUpdate
(const PxVehicleWheelParams& wheelParams,
const PxVehicleWheelActuationState& actuationState, const PxVehicleSuspensionState& suspensionState, const PxVehicleTireSpeedState& tireSpeedState,
const PxReal thresholdForwardSpeedForWheelAngleIntegration, const PxReal dt,
PxVehicleWheelRigidBody1dState& wheelRigidBody1dState);
#if !PX_DOXYGEN
} // namespace vehicle2
} // namespace physx
#endif
/** @} */
| 4,201 | C | 50.243902 | 155 | 0.794335 |
NVIDIA-Omniverse/PhysX/physx/include/vehicle2/wheel/PxVehicleWheelStates.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#pragma once
/** \addtogroup vehicle2
@{
*/
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxTransform.h"
#include "foundation/PxMemory.h"
#if !PX_DOXYGEN
namespace physx
{
namespace vehicle2
{
#endif
/**
\brief It is useful to know if a brake or drive torque is to be applied to a wheel.
*/
struct PxVehicleWheelActuationState
{
bool isBrakeApplied; //!< True if a brake torque is applied, false if not.
bool isDriveApplied; //!< True if a drive torque is applied, false if not.
PX_FORCE_INLINE void setToDefault()
{
PxMemZero(this, sizeof(PxVehicleWheelActuationState));
}
};
struct PxVehicleWheelRigidBody1dState
{
/**
\brief The rotation speed of the wheel around the lateral axis.
<b>Unit:</b> radians / time
*/
PxReal rotationSpeed;
/**
\brief The corrected rotation speed of the wheel around the lateral axis in radians per second.
At low forward wheel speed, the wheel rotation speed can get unstable (depending on the tire
model used) and, for example, oscillate. To integrate the wheel rotation angle, a (potentially)
blended rotation speed is used which gets stored in #correctedRotationSpeed.
<b>Unit:</b> radians / time
@see PxVehicleSimulationContext::thresholdForwardSpeedForWheelAngleIntegration
*/
PxReal correctedRotationSpeed;
/**
\brief The accumulated angle of the wheel around the lateral axis in radians in range (-2*Pi,2*Pi)
*/
PxReal rotationAngle;
PX_FORCE_INLINE void setToDefault()
{
PxMemZero(this, sizeof(PxVehicleWheelRigidBody1dState));
}
};
struct PxVehicleWheelLocalPose
{
PxTransform localPose; //!< The pose of the wheel in the rigid body frame.
PX_FORCE_INLINE void setToDefault()
{
localPose = PxTransform(PxIdentity);
}
};
#if !PX_DOXYGEN
} // namespace vehicle2
} // namespace physx
#endif
/** @} */
| 3,511 | C | 31.220183 | 99 | 0.753062 |
NVIDIA-Omniverse/PhysX/physx/include/vehicle2/wheel/PxVehicleWheelParams.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#pragma once
/** \addtogroup vehicle2
@{
*/
#include "foundation/PxFoundation.h"
#include "vehicle2/PxVehicleParams.h"
#if !PX_DOXYGEN
namespace physx
{
namespace vehicle2
{
#endif
struct PxVehicleWheelParams
{
/**
\brief Radius of unit that includes metal wheel plus rubber tire.
<b>Range:</b> (0, inf)<br>
<b>Unit:</b> length
*/
PxReal radius;
/**
\brief Half-width of unit that includes wheel plus tire.
<b>Range:</b> (0, inf)<br>
<b>Unit:</b> length
*/
PxReal halfWidth;
/**
\brief Mass of unit that includes wheel plus tire.
<b>Range:</b> (0, inf)<br>
<b>Unit:</b> mass
*/
PxReal mass;
/**
\brief Moment of inertia of unit that includes wheel plus tire about the rolling axis.
<b>Range:</b> (0, inf)<br>
<b>Unit:</b> mass * (length^2)
*/
PxReal moi;
/**
\brief Damping rate applied to wheel.
<b>Range:</b> [0, inf)<br>
<b>Unit:</b> torque * time = mass * (length^2) / time
*/
PxReal dampingRate;
PX_FORCE_INLINE PxVehicleWheelParams transformAndScale(
const PxVehicleFrame& srcFrame, const PxVehicleFrame& trgFrame, const PxVehicleScale& srcScale, const PxVehicleScale& trgScale) const
{
PX_UNUSED(srcFrame);
PX_UNUSED(trgFrame);
PxVehicleWheelParams r = *this;
const PxReal scale = trgScale.scale/srcScale.scale;
r.radius *= scale;
r.halfWidth *= scale;
r.moi *= (scale*scale);
r.dampingRate *= (scale*scale);
return r;
}
PX_FORCE_INLINE bool isValid() const
{
PX_CHECK_AND_RETURN_VAL(radius > 0.0f, "PxVehicleWheelParams.radius must be greater than zero", false);
PX_CHECK_AND_RETURN_VAL(halfWidth > 0.0f, "PxVehicleWheelParams.halfWidth must be greater than zero", false);
PX_CHECK_AND_RETURN_VAL(mass > 0.0f, "PxVehicleWheelParams.mass must be greater than zero", false);
PX_CHECK_AND_RETURN_VAL(moi > 0.0f, "PxVehicleWheelParams.moi must be greater than zero", false);
PX_CHECK_AND_RETURN_VAL(dampingRate >= 0.0f, "PxVehicleWheelParams.dampingRate must be greater than or equal to zero", false);
return true;
}
};
#if !PX_DOXYGEN
} // namespace vehicle2
} // namespace physx
#endif
/** @} */
| 3,789 | C | 30.583333 | 135 | 0.724202 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.