file_path
stringlengths
21
224
content
stringlengths
0
80.8M
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsVecMathAoSScalar.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_PHYSICS_COMMON_VECMATH_INLINE_SCALAR #define NV_PHYSICS_COMMON_VECMATH_INLINE_SCALAR #if COMPILE_VECTOR_INTRINSICS #error Scalar version should not be included when using vector intrinsics. #endif //Remove this define when all platforms use simd solver. #define NV_SUPPORT_SIMD struct VecU8V; struct VecI16V; struct VecU16V; struct VecI32V; struct VecU32V; struct Vec4V; typedef Vec4V QuatV; NV_ALIGN_PREFIX(16) struct FloatV { float x; float pad[3]; FloatV(){} FloatV(const float _x) : x(_x) { } } NV_ALIGN_SUFFIX(16); NV_ALIGN_PREFIX(16) struct Vec4V { float x, y, z, w; Vec4V(){} Vec4V(const float _x, const float _y, const float _z, const float _w) : x(_x), y(_y), z(_z), w(_w) { } } NV_ALIGN_SUFFIX(16); NV_ALIGN_PREFIX(16) struct Vec3V { float x,y,z; float pad; Vec3V(){} Vec3V(const float _x, const float _y, const float _z) : x(_x), y(_y), z(_z), pad(0.0f) { } } NV_ALIGN_SUFFIX(16); NV_ALIGN_PREFIX(16) struct BoolV { uint32_t ux, uy, uz, uw; BoolV(){} BoolV(const uint32_t _x, const uint32_t _y, const uint32_t _z, const uint32_t _w) : ux(_x), uy(_y), uz(_z), uw(_w) { } } NV_ALIGN_SUFFIX(16); struct Mat33V { Mat33V(){} Mat33V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2) : col0(c0), col1(c1), col2(c2) { } Vec3V col0; Vec3V col1; Vec3V col2; }; struct Mat34V { Mat34V(){} Mat34V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2, const Vec3V& c3) : col0(c0), col1(c1), col2(c2), col3(c3) { } Vec3V col0; Vec3V col1; Vec3V col2; Vec3V col3; }; struct Mat43V { Mat43V(){} Mat43V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2) : col0(c0), col1(c1), col2(c2) { } Vec4V col0; Vec4V col1; Vec4V col2; }; struct Mat44V { Mat44V(){} Mat44V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2, const Vec4V& c3) : col0(c0), col1(c1), col2(c2), col3(c3) { } Vec4V col0; Vec4V col1; Vec4V col2; Vec4V col3; }; NV_ALIGN_PREFIX(16) struct VecU32V { uint32_t u32[4]; NV_FORCE_INLINE VecU32V() {} NV_FORCE_INLINE VecU32V(uint32_t a, uint32_t b, uint32_t c, uint32_t d) { u32[0] = a; u32[1] = b; u32[2] = c; u32[3] = d; } } NV_ALIGN_SUFFIX(16); NV_ALIGN_PREFIX(16) struct VecI32V { int32_t i32[4]; NV_FORCE_INLINE VecI32V() {} NV_FORCE_INLINE VecI32V(int32_t a, int32_t b, int32_t c, int32_t d) { i32[0] = a; i32[1] = b; i32[2] = c; i32[3] = d; } } NV_ALIGN_SUFFIX(16); NV_ALIGN_PREFIX(16) struct VecI16V { int16_t i16[8]; NV_FORCE_INLINE VecI16V() {} NV_FORCE_INLINE VecI16V(int16_t a, int16_t b, int16_t c, int16_t d, int16_t e, int16_t f, int16_t g, int16_t h) { i16[0] = a; i16[1] = b; i16[2] = c; i16[3] = d; i16[4] = e; i16[5] = f; i16[6] = g; i16[7] = h; } } NV_ALIGN_SUFFIX(16); NV_ALIGN_PREFIX(16) struct VecU16V { union { uint16_t u16[8]; int16_t i16[8]; }; NV_FORCE_INLINE VecU16V() {} NV_FORCE_INLINE VecU16V(uint16_t a, uint16_t b, uint16_t c, uint16_t d, uint16_t e, uint16_t f, uint16_t g, uint16_t h) { u16[0] = a; u16[1] = b; u16[2] = c; u16[3] = d; u16[4] = e; u16[5] = f; u16[6] = g; u16[7] = h; } } NV_ALIGN_SUFFIX(16); NV_ALIGN_PREFIX(16) struct VecU8V { uint8_t u8[8]; NV_FORCE_INLINE VecU8V() {} NV_FORCE_INLINE VecU8V(uint8_t a, uint8_t b, uint8_t c, uint8_t d) { u8[0] = a; u8[1] = b; u8[2] = c; u8[3] = d; } } NV_ALIGN_SUFFIX(16); #define FloatVArg FloatV& #define Vec3VArg Vec3V& #define Vec4VArg Vec4V& #define BoolVArg BoolV& #define VecU32VArg VecU32V& #define VecI32VArg VecI32V& #define VecU16VArg VecU16V& #define VecI16VArg VecI16V& #define VecU8VArg VecU8V& #define QuatVArg QuatV& #define VecCrossV Vec3V typedef VecI32V VecShiftV; #define VecShiftVArg VecShiftV& #endif //NV_PHYSICS_COMMON_VECMATH_INLINE_SCALAR
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsInlineAoS.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef PS_INLINE_AOS_H #define PS_INLINE_AOS_H #include "NvPreprocessor.h" #if NV_WINDOWS_FAMILY #include "platform/windows/NsWindowsTrigConstants.h" #include "platform/windows/NsWindowsInlineAoS.h" #elif NV_X360 #include "xbox360/NsXbox360InlineAoS.h" #elif (NV_LINUX || NV_ANDROID || NV_APPLE || NV_PS4 || (NV_WINRT && NV_NEON)) #include "platform/unix/NsUnixTrigConstants.h" #include "platform/unix/NsUnixInlineAoS.h" #elif NV_PS3 #include "ps3/NsPS3InlineAoS.h" #elif NV_PSP2 #include "psp2/NsPSP2InlineAoS.h" #elif NV_XBOXONE #include "XboxOne/NsXboxOneTrigConstants.h" #include "XboxOne/NsXboxOneInlineAoS.h" #else #error "Platform not supported!" #endif #endif
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsVecMathAoSScalarInline.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_PHYSICS_COMMON_VECMATH_SCALAR_INLINE #define NV_PHYSICS_COMMON_VECMATH_SCALAR_INLINE #if COMPILE_VECTOR_INTRINSICS #error Scalar version should not be included when using vector intrinsics. #endif ///////////////////////////////////////////////////////////////////// ////INTERNAL USE ONLY AND TESTS ///////////////////////////////////////////////////////////////////// namespace internalScalarSimd { NV_FORCE_INLINE bool hasZeroElementInFloatV(const FloatV a) { return (0==a.x); } NV_FORCE_INLINE bool hasZeroElementInVec3V(const Vec3V a) { return (0==a.x || 0==a.y || 0==a.z); } NV_FORCE_INLINE bool hasZeroElementInVec4V(const Vec4V a) { return (0==a.x || 0==a.y || 0==a.z || 0==a.w); } } namespace _VecMathTests { NV_FORCE_INLINE bool allElementsEqualFloatV(const FloatV a, const FloatV b) { return (a.x==b.x); } NV_FORCE_INLINE bool allElementsEqualVec3V(const Vec3V a, const Vec3V b) { return (a.x==b.x && a.y==b.y && a.z==b.z); } NV_FORCE_INLINE bool allElementsEqualVec4V(const Vec4V a, const Vec4V b) { return (a.x==b.x && a.y==b.y && a.z==b.z && a.w==b.w); } NV_FORCE_INLINE bool allElementsEqualBoolV(const BoolV a, const BoolV b) { return (a.ux==b.ux && a.uy==b.uy && a.uz==b.uz && a.uw==b.uw); } NV_FORCE_INLINE bool allElementsEqualVecU32V(const VecU32V a, const VecU32V b) { return (a.u32[0]==b.u32[0] && a.u32[1]==b.u32[1] && a.u32[2]==b.u32[2] && a.u32[3]==b.u32[3]); } NV_FORCE_INLINE bool allElementsEqualVecI32V(const VecI32V a, const VecI32V b) { return (a.i32[0]==b.i32[0] && a.i32[1]==b.i32[1] && a.i32[2]==b.i32[2] && a.i32[3]==b.i32[3]); } #define VECMATH_AOS_EPSILON (1e-3f) NV_FORCE_INLINE bool allElementsNearEqualFloatV(const FloatV a, const FloatV b) { const float cx=a.x-b.x; return (cx>-VECMATH_AOS_EPSILON && cx<VECMATH_AOS_EPSILON); } NV_FORCE_INLINE bool allElementsNearEqualVec3V(const Vec3V a, const Vec3V b) { const float cx=a.x-b.x; const float cy=a.y-b.y; const float cz=a.z-b.z; return ( cx>-VECMATH_AOS_EPSILON && cx<VECMATH_AOS_EPSILON && cy>-VECMATH_AOS_EPSILON && cy<VECMATH_AOS_EPSILON && cz>-VECMATH_AOS_EPSILON && cz<VECMATH_AOS_EPSILON ); } NV_FORCE_INLINE bool allElementsNearEqualVec4V(const Vec4V a, const Vec4V b) { const float cx=a.x-b.x; const float cy=a.y-b.y; const float cz=a.z-b.z; const float cw=a.w-b.w; return ( cx>-VECMATH_AOS_EPSILON && cx<VECMATH_AOS_EPSILON && cy>-VECMATH_AOS_EPSILON && cy<VECMATH_AOS_EPSILON && cz>-VECMATH_AOS_EPSILON && cz<VECMATH_AOS_EPSILON && cw>-VECMATH_AOS_EPSILON && cw<VECMATH_AOS_EPSILON ); } } /////////////////////////////////////////////////////// NV_FORCE_INLINE bool isValidVec3V(const Vec3V a) { return a.pad == 0.f; } NV_FORCE_INLINE bool isFiniteFloatV(const FloatV a) { return NvIsFinite(a.x); } NV_FORCE_INLINE bool isFiniteVec3V(const Vec3V a) { return NvIsFinite(a.x) && NvIsFinite(a.y) && NvIsFinite(a.z); } NV_FORCE_INLINE bool isFiniteVec4V(const Vec4V a) { return NvIsFinite(a.x) && NvIsFinite(a.y) && NvIsFinite(a.z) && NvIsFinite(a.w); } ///////////////////////////////////////////////////////////////////// ////VECTORISED FUNCTION IMPLEMENTATIONS ///////////////////////////////////////////////////////////////////// NV_FORCE_INLINE FloatV FLoad(const float f) { return FloatV(f); } NV_FORCE_INLINE Vec3V V3Load(const float f) { return Vec3V(f,f,f); } NV_FORCE_INLINE Vec4V V4Load(const float f) { return Vec4V(f,f,f,f); } NV_FORCE_INLINE BoolV BLoad(const bool f) { #if NV_ARM // SD: Android ARM builds fail if this is done with a cast. // Might also fail because of something else but the select // operator here seems to fix everything that failed in release builds. return f ? BTTTT() : BFFFF(); #else uint32_t i=-(int32_t)f; return BoolV(i,i,i,i); #endif } NV_FORCE_INLINE Vec3V V3LoadA(const NvVec3& f) { VECMATHAOS_ASSERT(0 == (reinterpret_cast<uint64_t>(&f) & 0x0f)); return Vec3V(f.x,f.y,f.z); } NV_FORCE_INLINE Vec3V V3LoadU(const NvVec3& f) { return Vec3V(f.x,f.y,f.z); } NV_FORCE_INLINE Vec3V V3LoadUnsafeA(const NvVec3& f) { return Vec3V(f.x,f.y,f.z); } NV_FORCE_INLINE Vec3V V3LoadA(const float* const f) { return Vec3V(f[0], f[1], f[2]); } NV_FORCE_INLINE Vec3V V3LoadU(const float* const f) { return Vec3V(f[0], f[1], f[2]); } NV_FORCE_INLINE Vec3V Vec3V_From_Vec4V(Vec4V f) { return Vec3V(f.x,f.y,f.z); } NV_FORCE_INLINE Vec3V Vec3V_From_Vec4V_WUndefined(const Vec4V v) { return Vec3V(v.x, v.y, v.z); } NV_FORCE_INLINE Vec4V Vec4V_From_Vec3V(Vec3V f) { return Vec4V(f.x,f.y,f.z, 0.0f); } NV_FORCE_INLINE Vec4V Vec4V_From_FloatV(FloatV f) { return Vec4V(f.x,f.x,f.x,f.x); } NV_FORCE_INLINE Vec3V Vec3V_From_FloatV(FloatV f) { return Vec3V(f.x,f.x,f.x); } NV_FORCE_INLINE Vec3V Vec3V_From_FloatV_WUndefined(FloatV f) { return Vec3V(f.x,f.x,f.x); } NV_FORCE_INLINE Vec4V V4LoadA(const float* const f) { VECMATHAOS_ASSERT(0 == ((uint64_t)f & 0x0f)); return Vec4V(f[0],f[1],f[2],f[3]); } NV_FORCE_INLINE void V4StoreA(const Vec4V a, float* f) { VECMATHAOS_ASSERT(0 == ((uint64_t)f & 0x0f)); *reinterpret_cast<Vec4V*>(f) = a; } NV_FORCE_INLINE void V4StoreU(const Vec4V a, float* f) { *reinterpret_cast<Vec4V*>(f) = a; } NV_FORCE_INLINE void BStoreA(const BoolV a, uint32_t* f) { VECMATHAOS_ASSERT(0 == ((uint64_t)f & 0x0f)); *reinterpret_cast<BoolV*>(f) = a; } NV_FORCE_INLINE void U4StoreA(const VecU32V uv, uint32_t* u) { VECMATHAOS_ASSERT(0 == ((uint64_t)u & 0x0f)); *reinterpret_cast<VecU32V*>(u) = uv; } NV_FORCE_INLINE void I4StoreA(const VecI32V iv, int32_t* i) { VECMATHAOS_ASSERT(0 == ((uint64_t)i & 0x0f)); *reinterpret_cast<VecI32V*>(i) = iv; } NV_FORCE_INLINE Vec4V V4LoadU(const float* const f) { return Vec4V(f[0],f[1],f[2],f[3]); } NV_FORCE_INLINE Vec4V Vec4V_From_NvVec3_WUndefined(const NvVec3& f) { return Vec4V(f[0],f[1],f[2],0.f); } NV_FORCE_INLINE BoolV BLoad(const bool* const f) { return BoolV(-(int32_t)f[0],-(int32_t)f[1],-(int32_t)f[2],-(int32_t)f[3]); } NV_FORCE_INLINE float FStore(const FloatV a) { return a.x; } NV_FORCE_INLINE void FStore(const FloatV a, float* NV_RESTRICT f) { *f = a.x; } NV_FORCE_INLINE void V3StoreA(const Vec3V a, NvVec3& f) { f=NvVec3(a.x,a.y,a.z); } NV_FORCE_INLINE void V3StoreU(const Vec3V a, NvVec3& f) { f=NvVec3(a.x,a.y,a.z); } ////////////////////////// //FLOATV ////////////////////////// NV_FORCE_INLINE FloatV FZero() { return FLoad(0.0f); } NV_FORCE_INLINE FloatV FOne() { return FLoad(1.0f); } NV_FORCE_INLINE FloatV FHalf() { return FLoad(0.5f); } NV_FORCE_INLINE FloatV FEps() { return FLoad(NV_EPS_REAL); } NV_FORCE_INLINE FloatV FEps6() { return FLoad(1e-6f); } NV_FORCE_INLINE FloatV FMax() { return FLoad(NV_MAX_REAL); } NV_FORCE_INLINE FloatV FNegMax() { return FLoad(-NV_MAX_REAL); } NV_FORCE_INLINE FloatV FNeg(const FloatV f) { return FloatV(-f.x); } NV_FORCE_INLINE FloatV FAdd(const FloatV a, const FloatV b) { return FloatV(a.x+b.x); } NV_FORCE_INLINE FloatV FSub(const FloatV a, const FloatV b) { return FloatV(a.x-b.x); } NV_FORCE_INLINE FloatV FMul(const FloatV a, const FloatV b) { return FloatV(a.x*b.x); } NV_FORCE_INLINE FloatV FDiv(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(!internalScalarSimd::hasZeroElementInFloatV(b)); return FloatV(a.x/b.x); } NV_FORCE_INLINE FloatV FDivFast(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(!internalScalarSimd::hasZeroElementInFloatV(b)); return FloatV(a.x/b.x); } NV_FORCE_INLINE FloatV FRecip(const FloatV a) { VECMATHAOS_ASSERT(!internalScalarSimd::hasZeroElementInFloatV(a)); return (1.0f/a.x); } NV_FORCE_INLINE FloatV FRecipFast(const FloatV a) { VECMATHAOS_ASSERT(!internalScalarSimd::hasZeroElementInFloatV(a)); return (1.0f/a.x); } NV_FORCE_INLINE FloatV FRsqrt(const FloatV a) { VECMATHAOS_ASSERT(!internalScalarSimd::hasZeroElementInFloatV(a)); return NvRecipSqrt(a.x); } NV_FORCE_INLINE FloatV FSqrt(const FloatV a) { VECMATHAOS_ASSERT(!internalScalarSimd::hasZeroElementInFloatV(a)); return NvSqrt(a.x); } NV_FORCE_INLINE FloatV FRsqrtFast(const FloatV a) { VECMATHAOS_ASSERT(!internalScalarSimd::hasZeroElementInFloatV(a)); return NvRecipSqrt(a.x); } NV_FORCE_INLINE FloatV FScaleAdd(const FloatV a, const FloatV b, const FloatV c) { return FAdd(FMul(a,b),c); } NV_FORCE_INLINE FloatV FNegScaleSub(const FloatV a, const FloatV b, const FloatV c) { return FSub(c,FMul(a,b)); } NV_FORCE_INLINE FloatV FAbs(const FloatV a) { return FloatV(NvAbs(a.x)); } NV_FORCE_INLINE FloatV FSel(const BoolV c, const FloatV a, const FloatV b) { return FloatV(c.ux ? a.x : b.x); } NV_FORCE_INLINE BoolV FIsGrtr(const FloatV a, const FloatV b) { return BLoad(a.x>b.x); } NV_FORCE_INLINE BoolV FIsGrtrOrEq(const FloatV a, const FloatV b) { return BLoad(a.x>=b.x); } NV_FORCE_INLINE BoolV FIsEq(const FloatV a, const FloatV b) { return BLoad(a.x==b.x); } NV_FORCE_INLINE FloatV FMax(const FloatV a, const FloatV b) { return (a.x>b.x ? FloatV(a.x) : FloatV(b.x)); } NV_FORCE_INLINE FloatV FMin(const FloatV a, const FloatV b) { return (a.x>b.x ? FloatV(b.x) : FloatV(a.x)); } NV_FORCE_INLINE FloatV FClamp(const FloatV a, const FloatV minV, const FloatV maxV) { return FMax(FMin(a,maxV),minV); } NV_FORCE_INLINE uint32_t FAllGrtr(const FloatV a, const FloatV b) { return (a.x > b.x); } NV_FORCE_INLINE uint32_t FAllGrtrOrEq(const FloatV a, const FloatV b) { return (a.x >= b.x); } NV_FORCE_INLINE uint32_t FAllEq(const FloatV a, const FloatV b) { return(a.x == b.x); } NV_FORCE_INLINE FloatV FRound(const FloatV a) { return floor(a.x + 0.5f); } NV_FORCE_INLINE FloatV FSin(const FloatV a) { return sinf(a.x); } NV_FORCE_INLINE FloatV FCos(const FloatV a) { return cosf(a.x); } NV_FORCE_INLINE uint32_t FOutOfBounds(const FloatV a, const FloatV min, const FloatV max) { return (a.x>max.x || a.x<min.x); } NV_FORCE_INLINE uint32_t FInBounds(const FloatV a, const FloatV min, const FloatV max) { return (a.x>=min.x && a.x<=max.x); } NV_FORCE_INLINE uint32_t FOutOfBounds(const FloatV a, const FloatV bounds) { return FOutOfBounds(a, FNeg(bounds), bounds); } NV_FORCE_INLINE uint32_t FInBounds(const FloatV a, const FloatV bounds) { return FInBounds(a, FNeg(bounds), bounds); } ///////////////////// //VEC3V ///////////////////// NV_FORCE_INLINE Vec3V V3Splat(const FloatV f) { return Vec3V(f.x,f.x,f.x); } NV_FORCE_INLINE Vec3V V3Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z) { return Vec3V(x.x,y.x,z.x); } NV_FORCE_INLINE Vec3V V3UnitX() { return Vec3V(1.0f,0.0f,0.0f); } NV_FORCE_INLINE Vec3V V3UnitY() { return Vec3V(0.0f,1.0f,0.0f); } NV_FORCE_INLINE Vec3V V3UnitZ() { return Vec3V(0.0f,0.0f,1.0f); } NV_FORCE_INLINE FloatV V3GetX(const Vec3V f) { return FloatV(f.x); } NV_FORCE_INLINE FloatV V3GetY(const Vec3V f) { return FloatV(f.y); } NV_FORCE_INLINE FloatV V3GetZ(const Vec3V f) { return FloatV(f.z); } NV_FORCE_INLINE Vec3V V3SetX(const Vec3V v, const FloatV f) { return Vec3V(f.x,v.y,v.z); } NV_FORCE_INLINE Vec3V V3SetY(const Vec3V v, const FloatV f) { return Vec3V(v.x,f.x,v.z); } NV_FORCE_INLINE Vec3V V3SetZ(const Vec3V v, const FloatV f) { return Vec3V(v.x,v.y,f.x); } NV_FORCE_INLINE Vec3V V3ColX(const Vec3V a, const Vec3V b, const Vec3V c) { return Vec3V(a.x,b.x,c.x); } NV_FORCE_INLINE Vec3V V3ColY(const Vec3V a, const Vec3V b, const Vec3V c) { return Vec3V(a.y,b.y,c.y); } NV_FORCE_INLINE Vec3V V3ColZ(const Vec3V a, const Vec3V b, const Vec3V c) { return Vec3V(a.z,b.z,c.z); } NV_FORCE_INLINE Vec3V V3Zero() { return V3Load(0.0f); } NV_FORCE_INLINE Vec3V V3One() { return V3Load(1.0f); } NV_FORCE_INLINE Vec3V V3Eps() { return V3Load(NV_EPS_REAL); } NV_FORCE_INLINE Vec3V V3Neg(const Vec3V c) { return Vec3V(-c.x,-c.y,-c.z); } NV_FORCE_INLINE Vec3V V3Add(const Vec3V a, const Vec3V b) { return Vec3V(a.x+b.x,a.y+b.y,a.z+b.z); } NV_FORCE_INLINE Vec3V V3Sub(const Vec3V a, const Vec3V b) { return Vec3V(a.x-b.x,a.y-b.y,a.z-b.z); } NV_FORCE_INLINE Vec3V V3Scale(const Vec3V a, const FloatV b) { return Vec3V(a.x*b.x,a.y*b.x,a.z*b.x); } NV_FORCE_INLINE Vec3V V3Mul(const Vec3V a, const Vec3V b) { return Vec3V(a.x*b.x,a.y*b.y,a.z*b.z); } NV_FORCE_INLINE Vec3V V3ScaleInv(const Vec3V a, const FloatV b) { const float bInv=1.0f/b.x; return Vec3V(a.x*bInv,a.y*bInv,a.z*bInv); } NV_FORCE_INLINE Vec3V V3Div(const Vec3V a, const Vec3V b) { return Vec3V(a.x/b.x,a.y/b.y,a.z/b.z); } NV_FORCE_INLINE Vec3V V3ScaleInvFast(const Vec3V a, const FloatV b) { const float bInv=1.0f/b.x; return Vec3V(a.x*bInv,a.y*bInv,a.z*bInv); } NV_FORCE_INLINE Vec3V V3DivFast(const Vec3V a, const Vec3V b) { return Vec3V(a.x/b.x,a.y/b.y,a.z/b.z); } NV_FORCE_INLINE Vec3V V3Recip(const Vec3V a) { return Vec3V(1.0f/a.x,1.0f/a.y,1.0f/a.z); } NV_FORCE_INLINE Vec3V V3RecipFast(const Vec3V a) { return Vec3V(1.0f/a.x,1.0f/a.y,1.0f/a.z); } NV_FORCE_INLINE Vec3V V3Rsqrt(const Vec3V a) { return Vec3V(NvRecipSqrt(a.x),NvRecipSqrt(a.y),NvRecipSqrt(a.z)); } NV_FORCE_INLINE Vec3V V3RsqrtFast(const Vec3V a) { return Vec3V(NvRecipSqrt(a.x),NvRecipSqrt(a.y),NvRecipSqrt(a.z)); } NV_FORCE_INLINE Vec3V V3ScaleAdd(const Vec3V a, const FloatV b, const Vec3V c) { return V3Add(V3Scale(a,b),c); } NV_FORCE_INLINE Vec3V V3NegScaleSub(const Vec3V a, const FloatV b, const Vec3V c) { return V3Sub(c,V3Scale(a,b)); } NV_FORCE_INLINE Vec3V V3MulAdd(const Vec3V a, const Vec3V b, const Vec3V c) { return V3Add(V3Mul(a,b),c); } NV_FORCE_INLINE Vec3V V3NegMulSub(const Vec3V a, const Vec3V b, const Vec3V c) { return V3Sub(c,V3Mul(a,b)); } NV_FORCE_INLINE FloatV V3Dot(const Vec3V a, const Vec3V b) { return FloatV(a.x*b.x+a.y*b.y+a.z*b.z); } NV_FORCE_INLINE VecCrossV V3PrepareCross(const Vec3VArg normal) { return normal; } NV_FORCE_INLINE Vec3V V3Cross(const Vec3V a, const Vec3V b) { return Vec3V ( a.y*b.z-a.z*b.y, a.z*b.x-a.x*b.z, a.x*b.y-a.y*b.x ); } NV_FORCE_INLINE FloatV V3Length(const Vec3V a) { return FloatV(NvSqrt(a.x*a.x + a.y*a.y + a.z*a.z)); } NV_FORCE_INLINE FloatV V3LengthSq(const Vec3V a) { return FloatV(a.x*a.x + a.y*a.y + a.z*a.z); } NV_FORCE_INLINE Vec3V V3Normalize(const Vec3V a) { VECMATHAOS_ASSERT(a.x!=0 || a.y!=0 || a.z!=0); const float lengthInv=1.0f/(NvSqrt(a.x*a.x + a.y*a.y + a.z*a.z)); return Vec3V(a.x*lengthInv,a.y*lengthInv,a.z*lengthInv); } NV_FORCE_INLINE Vec3V V3NormalizeSafe(const Vec3V a) { const float length=NvSqrt(a.x*a.x + a.y*a.y + a.z*a.z); if(NV_EPS_REAL >= length) { return Vec3V(0.0f,0.0f,0.0f); } else { const float lengthInv=1.0f/length; return Vec3V(a.x*lengthInv,a.y*lengthInv,a.z*lengthInv); } } NV_FORCE_INLINE Vec3V V3NormalizeFast(const Vec3V a) { VECMATHAOS_ASSERT(a.x!=0 || a.y!=0 || a.z!=0); const float lengthInv=1.0f/(NvSqrt(a.x*a.x + a.y*a.y + a.z*a.z)); return Vec3V(a.x*lengthInv,a.y*lengthInv,a.z*lengthInv); } NV_FORCE_INLINE Vec3V V3Sel(const BoolV c, const Vec3V a, const Vec3V b) { return Vec3V(c.ux ? a.x : b.x, c.uy ? a.y : b.y, c.uz ? a.z : b.z); } NV_FORCE_INLINE BoolV V3IsGrtr(const Vec3V a, const Vec3V b) { return BoolV(a.x>b.x ? -1 : 0, a.y>b.y ? -1 : 0, a.z>b.z ? -1 : 0, 0); } NV_FORCE_INLINE BoolV V3IsGrtrOrEq(const Vec3V a, const Vec3V b) { return BoolV(a.x>=b.x ? (uint32_t)-1 : 0, a.y>=b.y ? (uint32_t)-1 : 0, a.z>=b.z ? (uint32_t)-1 : 0, (uint32_t)-1); } NV_FORCE_INLINE BoolV V3IsEq(const Vec3V a, const Vec3V b) { return BoolV(a.x==b.x ? (uint32_t)-1 : 0, a.y==b.y ? (uint32_t)-1 : 0, a.z==b.z ? (uint32_t)-1 : 0, (uint32_t)-1); } NV_FORCE_INLINE Vec3V V3Max(const Vec3V a, const Vec3V b) { return Vec3V(a.x>b.x ? a.x : b.x, a.y>b.y ? a.y : b.y, a.z>b.z ? a.z : b.z); } NV_FORCE_INLINE Vec3V V3Min(const Vec3V a, const Vec3V b) { return Vec3V(a.x<b.x ? a.x : b.x, a.y<b.y ? a.y : b.y, a.z<b.z ? a.z : b.z); } //Extract the maximum value from a NV_FORCE_INLINE FloatV V3ExtractMax(const Vec3V a) { const float t0 = (a.x >= a.y) ? a.x : a.y; return t0 >= a.z ? t0 : a.z; } //Extract the maximum value from a NV_FORCE_INLINE FloatV V3ExtractMin(const Vec3V a) { const float t0 = (a.x <= a.y) ? a.x : a.y; return t0 <= a.z ? t0 : a.z; } //return (a >= 0.0f) ? 1.0f : -1.0f; NV_FORCE_INLINE Vec3V V3Sign(const Vec3V a) { return Vec3V((a.x >= 0.f ? 1.f : -1.f), (a.y >= 0.f ? 1.f : -1.f), (a.z >= 0.f ? 1.f : -1.f)); } NV_FORCE_INLINE Vec3V V3Clamp(const Vec3V a, const Vec3V minV, const Vec3V maxV) { return V3Max(V3Min(a,maxV),minV); } NV_FORCE_INLINE Vec3V V3Abs(const Vec3V a) { return V3Max(a,V3Neg(a)); } NV_FORCE_INLINE uint32_t V3AllGrtr(const Vec3V a, const Vec3V b) { return ((a.x > b.x) & (a.y > b.y) & (a.z > b.z)) ? 1 : 0; } NV_FORCE_INLINE uint32_t V3AllGrtrOrEq(const Vec3V a, const Vec3V b) { return ((a.x >= b.x) & (a.y >= b.y) & (a.z >= b.z)) ? 1 : 0; } NV_FORCE_INLINE uint32_t V3AllEq(const Vec3V a, const Vec3V b) { return ((a.x == b.x) & (a.y == b.y) & (a.z == b.z)) ? 1 : 0; } NV_FORCE_INLINE Vec3V V3Round(const Vec3V a) { return Vec3V(floor(a.x + 0.5f), floor(a.y + 0.5f), floor(a.z + 0.5f)); } NV_FORCE_INLINE Vec3V V3Sin(const Vec3V a) { return Vec3V(sinf(a.x), sinf(a.y), sinf(a.z)); } NV_FORCE_INLINE Vec3V V3Cos(const Vec3V a) { return Vec3V(cosf(a.x), cosf(a.y), cosf(a.z)); } NV_FORCE_INLINE Vec3V V3PermYZZ(const Vec3V a) { return Vec3V(a.y,a.z,a.z); } NV_FORCE_INLINE Vec3V V3PermXYX(const Vec3V a) { return Vec3V(a.x,a.y,a.x); } NV_FORCE_INLINE Vec3V V3PermYZX(const Vec3V a) { return Vec3V(a.y,a.z,a.x); } NV_FORCE_INLINE Vec3V V3PermZXY(const Vec3V a) { return Vec3V(a.z,a.x,a.y); } NV_FORCE_INLINE Vec3V V3PermZZY(const Vec3V a) { return Vec3V(a.z,a.z,a.y); } NV_FORCE_INLINE Vec3V V3PermYXX(const Vec3V a) { return Vec3V(a.y,a.x,a.x); } NV_FORCE_INLINE Vec3V V3Perm_Zero_1Z_0Y(const Vec3V v0, const Vec3V v1) { return Vec3V(0.0f, v1.z, v0.y); } NV_FORCE_INLINE Vec3V V3Perm_0Z_Zero_1X(const Vec3V v0, const Vec3V v1) { return Vec3V(v0.z, 0.0f, v1.x); } NV_FORCE_INLINE Vec3V V3Perm_1Y_0X_Zero(const Vec3V v0, const Vec3V v1) { return Vec3V(v1.y, v0.x, 0.0f); } NV_FORCE_INLINE FloatV V3SumElems(const Vec3V a) { return FloatV(a.x + a.y + a.z); } NV_FORCE_INLINE uint32_t V3OutOfBounds(const Vec3V a, const Vec3V min, const Vec3V max) { return (a.x>max.x || a.y>max.y || a.z>max.z || a.x<min.x || a.y<min.y || a.z<min.z); } NV_FORCE_INLINE uint32_t V3InBounds(const Vec3V a, const Vec3V min, const Vec3V max) { return (a.x<=max.x && a.y<=max.y && a.z<=max.z && a.x>=min.x && a.y>=min.y && a.z>=min.z); } NV_FORCE_INLINE uint32_t V3OutOfBounds(const Vec3V a, const Vec3V bounds) { return V3OutOfBounds(a, V3Neg(bounds), bounds); } NV_FORCE_INLINE uint32_t V3InBounds(const Vec3V a, const Vec3V bounds) { return V3InBounds(a, V3Neg(bounds), bounds); } ///////////////////////// //VEC4V ///////////////////////// NV_FORCE_INLINE Vec4V V4Splat(const FloatV f) { return Vec4V(f.x,f.x,f.x,f.x); } NV_FORCE_INLINE Vec4V V4Merge(const FloatV* const floatVArray) { return Vec4V(floatVArray[0].x,floatVArray[1].x,floatVArray[2].x,floatVArray[3].x); } NV_FORCE_INLINE Vec4V V4Merge(const FloatVArg x,const FloatVArg y, const FloatVArg z, const FloatVArg w) { return Vec4V(x.x,y.x,z.x,w.x); } NV_FORCE_INLINE Vec4V V4MergeW(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { return Vec4V(x.w, y.w, z.w, w.w); } NV_FORCE_INLINE Vec4V V4MergeZ(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { return Vec4V(x.z, y.z, z.z, w.z); } NV_FORCE_INLINE Vec4V V4MergeY(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { return Vec4V(x.y, y.y, z.y, w.y); } NV_FORCE_INLINE Vec4V V4MergeX(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { return Vec4V(x.x, y.x, z.x, w.x); } NV_FORCE_INLINE Vec4V V4UnpackXY(const Vec4VArg a, const Vec4VArg b) { return Vec4V(a.x, b.x, a.y, b.y); } NV_FORCE_INLINE Vec4V V4UnpackZW(const Vec4VArg a, const Vec4VArg b) { return Vec4V(a.z, b.z, a.w, b.w); } NV_FORCE_INLINE Vec4V V4UnitX() { return Vec4V(1.0f,0.0f,0.0f,0.0f); } NV_FORCE_INLINE Vec4V V4UnitY() { return Vec4V(0.0f,1.0f,0.0f,0.0f); } NV_FORCE_INLINE Vec4V V4UnitZ() { return Vec4V(0.0f,0.0f,1.0f,0.0f); } NV_FORCE_INLINE Vec4V V4UnitW() { return Vec4V(0.0f,0.0f,0.0f,1.0f); } NV_FORCE_INLINE FloatV V4GetX(const Vec4V f) { return FloatV(f.x); } NV_FORCE_INLINE FloatV V4GetY(const Vec4V f) { return FloatV(f.y); } NV_FORCE_INLINE FloatV V4GetZ(const Vec4V f) { return FloatV(f.z); } NV_FORCE_INLINE FloatV V4GetW(const Vec4V f) { return FloatV(f.w); } NV_FORCE_INLINE Vec4V V4SetX(const Vec4V v, const FloatV f) { return Vec4V(f.x,v.y,v.z,v.w); } NV_FORCE_INLINE Vec4V V4SetY(const Vec4V v, const FloatV f) { return Vec4V(v.x,f.x,v.z,v.w); } NV_FORCE_INLINE Vec4V V4SetZ(const Vec4V v, const FloatV f) { return Vec4V(v.x,v.y,f.x,v.w); } NV_FORCE_INLINE Vec4V V4SetW(const Vec4V v, const FloatV f) { return Vec4V(v.x,v.y,v.z,f.x); } NV_FORCE_INLINE Vec4V V4SetW(const Vec3V v, const FloatV f) { return Vec4V(v.x,v.y,v.z,f.x); } NV_FORCE_INLINE Vec4V V4ClearW(const Vec4V v) { return Vec4V(v.x,v.y,v.z,0); } NV_FORCE_INLINE Vec4V V4Perm_YXWZ(const Vec4V v) { return Vec4V(v.y, v.x, v.w, v.z); } NV_FORCE_INLINE Vec4V V4Perm_XZXZ(const Vec4V v) { return Vec4V(v.x, v.z, v.x, v.z); } NV_FORCE_INLINE Vec4V V4Perm_YWYW(const Vec4V v) { return Vec4V(v.y, v.w, v.y, v.w); } template<uint8_t _x, uint8_t _y, uint8_t _z, uint8_t _w> NV_FORCE_INLINE Vec4V V4Perm(const Vec4V v) { const float f[4] = {v.x,v.y,v.z,v.w}; return Vec4V(f[_x], f[_y], f[_z], f[_w]); } NV_FORCE_INLINE Vec4V V4Zero() { return V4Load(0.0f); } NV_FORCE_INLINE Vec4V V4One() { return V4Load(1.0f); } NV_FORCE_INLINE Vec4V V4Eps() { return V4Load(NV_EPS_REAL); } NV_FORCE_INLINE Vec4V V4Neg(const Vec4V c) { return Vec4V(-c.x,-c.y,-c.z,-c.w); } NV_FORCE_INLINE Vec4V V4Add(const Vec4V a, const Vec4V b) { return Vec4V(a.x+b.x,a.y+b.y,a.z+b.z,a.w+b.w); } NV_FORCE_INLINE Vec4V V4Sub(const Vec4V a, const Vec4V b) { return Vec4V(a.x-b.x,a.y-b.y,a.z-b.z,a.w-b.w); } NV_FORCE_INLINE Vec4V V4Scale(const Vec4V a, const FloatV b) { return Vec4V(a.x*b.x,a.y*b.x,a.z*b.x,a.w*b.x); } NV_FORCE_INLINE Vec4V V4Mul(const Vec4V a, const Vec4V b) { return Vec4V(a.x*b.x,a.y*b.y,a.z*b.z,a.w*b.w); } NV_FORCE_INLINE Vec4V V4ScaleInv(const Vec4V a, const FloatV b) { const float bInv=1.0f/b.x; return Vec4V(a.x*bInv,a.y*bInv,a.z*bInv,a.w*bInv); } NV_FORCE_INLINE Vec4V V4Div(const Vec4V a, const Vec4V b) { VECMATHAOS_ASSERT(b.x!=0 && b.y!=0 && b.z!=0 && b.w!=0); return Vec4V(a.x/b.x,a.y/b.y,a.z/b.z,a.w/b.w); } NV_FORCE_INLINE Vec4V V4ScaleInvFast(const Vec4V a, const FloatV b) { const float bInv=1.0f/b.x; return Vec4V(a.x*bInv,a.y*bInv,a.z*bInv,a.w*bInv); } NV_FORCE_INLINE Vec4V V4DivFast(const Vec4V a, const Vec4V b) { return Vec4V(a.x/b.x,a.y/b.y,a.z/b.z,a.w/b.w); } NV_FORCE_INLINE Vec4V V4Recip(const Vec4V a) { return Vec4V(1.0f/a.x,1.0f/a.y,1.0f/a.z,1.0f/a.w); } NV_FORCE_INLINE Vec4V V4RecipFast(const Vec4V a) { return Vec4V(1.0f/a.x,1.0f/a.y,1.0f/a.z,1.0f/a.w); } NV_FORCE_INLINE Vec4V V4Rsqrt(const Vec4V a) { return Vec4V(NvRecipSqrt(a.x),NvRecipSqrt(a.y),NvRecipSqrt(a.z),NvRecipSqrt(a.w)); } NV_FORCE_INLINE Vec4V V4RsqrtFast(const Vec4V a) { return Vec4V(NvRecipSqrt(a.x),NvRecipSqrt(a.y),NvRecipSqrt(a.z),NvRecipSqrt(a.w)); } NV_FORCE_INLINE Vec4V V4Sqrt(const Vec4V a) { return Vec4V(NvSqrt(a.x),NvSqrt(a.y),NvSqrt(a.z),NvSqrt(a.w)); } NV_FORCE_INLINE Vec4V V4ScaleAdd(const Vec4V a, const FloatV b, const Vec4V c) { return V4Add(V4Scale(a,b),c); } NV_FORCE_INLINE Vec4V V4NegScaleSub(const Vec4V a, const FloatV b, const Vec4V c) { return V4Sub(c,V4Scale(a,b)); } NV_FORCE_INLINE Vec4V V4MulAdd(const Vec4V a, const Vec4V b, const Vec4V c) { return V4Add(V4Mul(a,b),c); } NV_FORCE_INLINE Vec4V V4NegMulSub(const Vec4V a, const Vec4V b, const Vec4V c) { return V4Sub(c,V4Mul(a,b)); } NV_FORCE_INLINE FloatV V4SumElements(const Vec4V a) { return FloatV(a.x + a.y + a.z + a.w); } NV_FORCE_INLINE FloatV V4Dot(const Vec4V a, const Vec4V b) { return FloatV(a.x*b.x+a.y*b.y+a.z*b.z+a.w*b.w); } NV_FORCE_INLINE FloatV V4Length(const Vec4V a) { return FloatV(NvSqrt(a.x*a.x + a.y*a.y +a.z*a.z + a.w*a.w)); } NV_FORCE_INLINE FloatV V4LengthSq(const Vec4V a) { return V4Dot(a,a); } NV_FORCE_INLINE Vec4V V4Normalize(const Vec4V a) { VECMATHAOS_ASSERT(0!=a.x || 0!=a.y || 0!=a.z || 0!=a.w); const FloatV length=FloatV(V4Length(a)); return V4ScaleInv(a,length); } NV_FORCE_INLINE Vec4V V4NormalizeSafe(const Vec4V a) { const FloatV length=FloatV(V4Length(a)); if(NV_EPS_REAL>=length.x) { return Vec4V(0.0f,0.0f,0.0f,0.0f); } else { return V4ScaleInv(a,length); } } NV_FORCE_INLINE Vec4V V4NormalizeFast(const Vec4V a) { VECMATHAOS_ASSERT(0!=a.x || 0!=a.y || 0!=a.z || 0!=a.w); const FloatV length=FloatV(V4Length(a)); return V4ScaleInv(a,length); } NV_FORCE_INLINE Vec4V V4Sel(const BoolV c, const Vec4V a, const Vec4V b) { return Vec4V(c.ux ? a.x : b.x, c.uy ? a.y : b.y, c.uz ? a.z : b.z, c.uw ? a.w : b.w); } NV_FORCE_INLINE BoolV V4IsGrtr(const Vec4V a, const Vec4V b) { return BoolV(a.x>b.x ? -1 : 0, a.y>b.y ? -1 : 0, a.z>b.z ? -1 : 0, a.w>b.w ? -1 : 0); }; NV_FORCE_INLINE BoolV V4IsGrtrOrEq(const Vec4V a, const Vec4V b) { return BoolV(a.x>=b.x ? -1 : 0, a.y>=b.y ? -1 : 0, a.z>=b.z ? -1 : 0, a.w>=b.w ? -1 : 0); } NV_FORCE_INLINE BoolV V4IsEq(const Vec4V a, const Vec4V b) { return BoolV(a.x==b.x ? -1 : 0, a.y==b.y ? -1 : 0, a.z==b.z ? -1 : 0, a.w==b.w ? -1 : 0); } NV_FORCE_INLINE Vec4V V4Max(const Vec4V a, const Vec4V b) { return Vec4V(a.x>b.x ? a.x : b.x, a.y>b.y ? a.y : b.y, a.z>b.z ? a.z : b.z, a.w>b.w ? a.w : b.w); } NV_FORCE_INLINE Vec4V V4Min(const Vec4V a, const Vec4V b) { return Vec4V(a.x<b.x ? a.x : b.x, a.y<b.y ? a.y : b.y, a.z<b.z ? a.z : b.z, a.w<b.w ? a.w : b.w); } //Extract the maximum value from a NV_FORCE_INLINE FloatV V4ExtractMax(const Vec4V a) { const float t0 = (a.x >= a.y) ? a.x : a.y; const float t1 = (a.z >= a.w) ? a.x : a.w; return t0 >= t1 ? t0 : t1; } //Extract the maximum value from a NV_FORCE_INLINE FloatV V4ExtractMin(const Vec4V a) { const float t0 = (a.x <= a.y) ? a.x : a.y; const float t1 = (a.z <= a.w) ? a.x : a.w; return t0 <= t1 ? t0 : t1; } NV_FORCE_INLINE Vec4V V4Clamp(const Vec4V a, const Vec4V minV, const Vec4V maxV) { return V4Max(V4Min(a,maxV),minV); } NV_FORCE_INLINE Vec4V V4Round(const Vec4V a) { return Vec4V(floor(a.x + 0.5f), floor(a.y + 0.5f), floor(a.z + 0.5f), floor(a.w + 0.5f)); } NV_FORCE_INLINE Vec4V V4Sin(const Vec4V a) { return Vec4V(sinf(a.x), sinf(a.y), sinf(a.z), sinf(a.w)); } NV_FORCE_INLINE Vec4V V4Cos(const Vec4V a) { return Vec4V(cosf(a.x), cosf(a.y), cosf(a.z), cosf(a.w)); } NV_FORCE_INLINE uint32_t V4AllGrtr(const Vec4V a, const Vec4V b) { return ((a.x > b.x) & (a.y > b.y) & (a.z > b.z) & (a.w > b.w)) ? 1 : 0; } NV_FORCE_INLINE uint32_t V4AllGrtrOrEq(const Vec4V a, const Vec4V b) { return ((a.x >= b.x) & (a.y >= b.y) & (a.z >= b.z) & (a.w >= b.w)) ? 1 : 0; } NV_FORCE_INLINE uint32_t V4AllEq(const Vec4V a, const Vec4V b) { return ((a.x == b.x) & (a.y == b.y) & (a.z == b.z) & (a.w == b.w)) ? 1 : 0; } NV_FORCE_INLINE void V4Transpose(Vec4V& col0, Vec4V& col1, Vec4V& col2, Vec4V& col3) { const float t01 = col0.y, t02 = col0.z, t03 = col0.w; const float t12 = col1.z, t13 = col1.w; const float t23 = col2.w; col0.y = col1.x; col0.z = col2.x; col0.w = col3.x; col1.z = col2.y; col1.w = col3.y; col2.w = col3.z; col1.x = t01; col2.x = t02; col3.x = t03; col2.y = t12; col3.y = t13; col3.z = t23; } NV_FORCE_INLINE BoolV BFFFF() { return BoolV(0, 0, 0, 0); } NV_FORCE_INLINE BoolV BFFFT() { return BoolV(0, 0, 0, (uint32_t)-1); } NV_FORCE_INLINE BoolV BFFTF() { return BoolV(0, 0, (uint32_t)-1, 0); } NV_FORCE_INLINE BoolV BFFTT() { return BoolV(0, 0, (uint32_t)-1, (uint32_t)-1); } NV_FORCE_INLINE BoolV BFTFF() { return BoolV(0, (uint32_t)-1, 0, 0); } NV_FORCE_INLINE BoolV BFTFT() { return BoolV(0, (uint32_t)-1, 0, (uint32_t)-1); } NV_FORCE_INLINE BoolV BFTTF() { return BoolV(0, (uint32_t)-1, (uint32_t)-1, 0); } NV_FORCE_INLINE BoolV BFTTT() { return BoolV(0, (uint32_t)-1, (uint32_t)-1, (uint32_t)-1); } NV_FORCE_INLINE BoolV BTFFF() { return BoolV((uint32_t)-1, 0, 0, 0); } NV_FORCE_INLINE BoolV BTFFT() { return BoolV((uint32_t)-1, 0, 0, (uint32_t)-1); } NV_FORCE_INLINE BoolV BTFTF() { return BoolV ((uint32_t)-1, 0, (uint32_t)-1, 0); } NV_FORCE_INLINE BoolV BTFTT() { return BoolV((uint32_t)-1, 0, (uint32_t)-1, (uint32_t)-1); } NV_FORCE_INLINE BoolV BTTFF() { return BoolV((uint32_t)-1, (uint32_t)-1, 0, 0); } NV_FORCE_INLINE BoolV BTTFT() { return BoolV((uint32_t)-1, (uint32_t)-1, 0, (uint32_t)-1); } NV_FORCE_INLINE BoolV BTTTF() { return BoolV((uint32_t)-1, (uint32_t)-1, (uint32_t)-1, 0); } NV_FORCE_INLINE BoolV BTTTT() { return BoolV((uint32_t)-1, (uint32_t)-1, (uint32_t)-1, (uint32_t)-1); } NV_FORCE_INLINE BoolV BXMask() {return BTFFF();} NV_FORCE_INLINE BoolV BYMask() {return BFTFF();} NV_FORCE_INLINE BoolV BZMask() {return BFFTF();} NV_FORCE_INLINE BoolV BWMask() {return BFFFT();} NV_FORCE_INLINE BoolV BGetX(const BoolV a) { return BoolV(a.ux, a.ux, a.ux, a.ux); } NV_FORCE_INLINE BoolV BGetY(const BoolV a) { return BoolV(a.uy, a.uy, a.uy, a.uy); } NV_FORCE_INLINE BoolV BGetZ(const BoolV a) { return BoolV(a.uz, a.uz, a.uz, a.uz); } NV_FORCE_INLINE BoolV BGetW(const BoolV a) { return BoolV(a.uw, a.uw, a.uw, a.uw); } NV_FORCE_INLINE BoolV BSetX(const BoolV v, const BoolV f) { return BoolV(f.ux,v.uy,v.uz,v.uw); } NV_FORCE_INLINE BoolV BSetY(const BoolV v, const BoolV f) { return BoolV(v.ux, f.uy, v.uz, v.uw); } NV_FORCE_INLINE BoolV BSetZ(const BoolV v, const BoolV f) { return BoolV(v.ux, v.uy, f.uz, v.uw); } NV_FORCE_INLINE BoolV BSetW(const BoolV v, const BoolV f) { return BoolV(v.ux, v.uy, v.uz, f.uw); } template<int index> BoolV BSplatElement(BoolV a) { uint32_t* b=(uint32_t*)&a; return BoolV(b[index], b[index], b[index], b[index]); } NV_FORCE_INLINE BoolV BAnd(const BoolV a, const BoolV b) { return BoolV(a.ux && b.ux ? (uint32_t)-1 : 0, a.uy && b.uy ? (uint32_t)-1 : 0, a.uz && b.uz ? (uint32_t)-1 : 0, a.uw && b.uw ? (uint32_t)-1 : 0); } NV_FORCE_INLINE BoolV BAndNot(const BoolV a, const BoolV b) { return BoolV(a.ux & ~b.ux, a.uy & ~b.uy, a.uz & ~b.uz, a.uw & ~b.uw); } NV_FORCE_INLINE BoolV BNot(const BoolV a) { return BoolV(~a.ux, ~a.uy, ~a.uz, ~a.uw); } NV_FORCE_INLINE BoolV BOr(const BoolV a, const BoolV b) { return BoolV(a.ux || b.ux ? (uint32_t)-1 : 0, a.uy || b.uy ? (uint32_t)-1 : 0, a.uz || b.uz ? (uint32_t)-1 : 0, a.uw || b.uw ? (uint32_t)-1 : 0); } NV_FORCE_INLINE uint32_t BAllEq(const BoolV a, const BoolV b) { return (a.ux==b.ux && a.uy==b.uy && a.uz==b.uz && a.uw==b.uw ? 1 : 0); } NV_FORCE_INLINE BoolV BAllTrue4(const BoolV a) { return (a.ux & a.uy & a.uz & a.uw) ? BTTTT() : BFFFF(); } NV_FORCE_INLINE BoolV BAnyTrue4(const BoolV a) { return (a.ux | a.uy | a.uz | a.uw) ? BTTTT() : BFFFF(); } NV_FORCE_INLINE BoolV BAllTrue3(const BoolV a) { return (a.ux & a.uy & a.uz) ? BTTTT() : BFFFF(); } NV_FORCE_INLINE BoolV BAnyTrue3(const BoolV a) { return (a.ux | a.uy | a.uz) ? BTTTT() : BFFFF(); } NV_FORCE_INLINE uint32_t BAllEqTTTT(const BoolV a) { return BAllEq(a, BTTTT()); } NV_FORCE_INLINE uint32_t BAllEqFFFF(const BoolV a) { return BAllEq(a, BFFFF()); } NV_FORCE_INLINE uint32_t BGetBitMask(const BoolV a) { return (a.ux & 1) | (a.uy & 2) | (a.uz & 4) | (a.uw & 8); } ////////////////////////////////// //MAT33V ////////////////////////////////// NV_FORCE_INLINE Vec3V M33MulV3(const Mat33V& a, const Vec3V b) { return Vec3V ( a.col0.x*b.x + a.col1.x*b.y + a.col2.x*b.z, a.col0.y*b.x + a.col1.y*b.y + a.col2.y*b.z, a.col0.z*b.x + a.col1.z*b.y + a.col2.z*b.z ); } NV_FORCE_INLINE Vec3V M33TrnspsMulV3(const Mat33V& a, const Vec3V b) { return Vec3V ( a.col0.x*b.x + a.col0.y*b.y + a.col0.z*b.z, a.col1.x*b.x + a.col1.y*b.y + a.col1.z*b.z, a.col2.x*b.x + a.col2.y*b.y + a.col2.z*b.z ); } NV_FORCE_INLINE Vec3V M33MulV3AddV3(const Mat33V& A, const Vec3V b, const Vec3V c) { const FloatV x=V3GetX(b); const FloatV y=V3GetY(b); const FloatV z=V3GetZ(b); Vec3V result = V3ScaleAdd(A.col0, x, c); result = V3ScaleAdd(A.col1, y, result); return V3ScaleAdd(A.col2, z, result); } NV_FORCE_INLINE Mat33V M33MulM33(const Mat33V& a, const Mat33V& b) { return Mat33V(M33MulV3(a,b.col0),M33MulV3(a,b.col1),M33MulV3(a,b.col2)); } NV_FORCE_INLINE Mat33V M33Add(const Mat33V& a, const Mat33V& b) { return Mat33V(V3Add(a.col0,b.col0),V3Add(a.col1,b.col1),V3Add(a.col2,b.col2)); } NV_FORCE_INLINE Mat33V M33Scale(const Mat33V& a, const FloatV& b) { return Mat33V(V3Scale(a.col0,b),V3Scale(a.col1,b),V3Scale(a.col2,b)); } NV_FORCE_INLINE Mat33V M33Sub(const Mat33V& a, const Mat33V& b) { return Mat33V(V3Sub(a.col0,b.col0),V3Sub(a.col1,b.col1),V3Sub(a.col2,b.col2)); } NV_FORCE_INLINE Mat33V M33Neg(const Mat33V& a) { return Mat33V(V3Neg(a.col0),V3Neg(a.col1),V3Neg(a.col2)); } NV_FORCE_INLINE Mat33V M33Abs(const Mat33V& a) { return Mat33V(V3Abs(a.col0),V3Abs(a.col1),V3Abs(a.col2)); } NV_FORCE_INLINE Mat33V M33Diagonal(const Vec3VArg d) { const Vec3V x = V3Mul(V3UnitX(), d); const Vec3V y = V3Mul(V3UnitY(), d); const Vec3V z = V3Mul(V3UnitZ(), d); return Mat33V(x, y, z); } NV_FORCE_INLINE Mat33V M33Inverse(const Mat33V& a) { const float det = a.col0.x*(a.col1.y*a.col2.z - a.col1.z*a.col2.y) -a.col1.x*(a.col0.y*a.col2.z - a.col2.y*a.col0.z) +a.col2.x*(a.col0.y*a.col1.z - a.col1.y*a.col0.z); const float invDet = 1.0f/det; Mat33V ret; ret.col0.x = invDet*(a.col1.y*a.col2.z - a.col2.y*a.col1.z); ret.col0.y = invDet*(a.col2.y*a.col0.z - a.col0.y*a.col2.z); ret.col0.z = invDet*(a.col0.y*a.col1.z - a.col1.y*a.col0.z); ret.col1.x = invDet*(a.col2.x*a.col1.z - a.col1.x*a.col2.z); ret.col1.y = invDet*(a.col0.x*a.col2.z - a.col2.x*a.col0.z); ret.col1.z = invDet*(a.col1.x*a.col0.z - a.col0.x*a.col1.z); ret.col2.x = invDet*(a.col1.x*a.col2.y - a.col2.x*a.col1.y); ret.col2.y = invDet*(a.col2.x*a.col0.y - a.col0.x*a.col2.y); ret.col2.z = invDet*(a.col0.x*a.col1.y - a.col1.x*a.col0.y); return ret; } NV_FORCE_INLINE Mat33V Mat33V_From_NvMat33(const NvMat33 &m) { return Mat33V(V3LoadU(m.column0), V3LoadU(m.column1), V3LoadU(m.column2)); } NV_FORCE_INLINE void NvMat33_From_Mat33V(const Mat33V &m, NvMat33 &out) { NV_ASSERT((size_t(&out)&15)==0); V3StoreU(m.col0, out.column0); V3StoreU(m.col1, out.column1); V3StoreU(m.col2, out.column2); } NV_FORCE_INLINE Mat33V M33Trnsps(const Mat33V& a) { return Mat33V(Vec3V(a.col0.x,a.col1.x,a.col2.x),Vec3V(a.col0.y,a.col1.y,a.col2.y),Vec3V(a.col0.z,a.col1.z,a.col2.z)); } NV_FORCE_INLINE Mat33V M33Identity() { return Mat33V ( V3UnitX(), V3UnitY(), V3UnitZ() ); } ////////////////////////////////// //MAT34V ////////////////////////////////// NV_FORCE_INLINE Vec3V M34MulV3(const Mat34V& a, const Vec3V b) { return Vec3V ( a.col0.x*b.x + a.col1.x*b.y + a.col2.x*b.z + a.col3.x, a.col0.y*b.x + a.col1.y*b.y + a.col2.y*b.z + a.col3.y, a.col0.z*b.x + a.col1.z*b.y + a.col2.z*b.z + a.col3.z ); } NV_FORCE_INLINE Vec3V M34Mul33V3(const Mat34V& a, const Vec3V b) { return Vec3V ( a.col0.x*b.x + a.col1.x*b.y + a.col2.x*b.z, a.col0.y*b.x + a.col1.y*b.y + a.col2.y*b.z, a.col0.z*b.x + a.col1.z*b.y + a.col2.z*b.z ); } NV_FORCE_INLINE Vec3V M34TrnspsMul33V3(const Mat34V& a, const Vec3V b) { return Vec3V ( a.col0.x*b.x + a.col0.y*b.y + a.col0.z*b.z, a.col1.x*b.x + a.col1.y*b.y + a.col1.z*b.z, a.col2.x*b.x + a.col2.y*b.y + a.col2.z*b.z ); } NV_FORCE_INLINE Mat34V M34MulM34(const Mat34V& a, const Mat34V& b) { return Mat34V(M34Mul33V3(a,b.col0),M34Mul33V3(a,b.col1),M34Mul33V3(a,b.col2),M34MulV3(a,b.col3)); } NV_FORCE_INLINE Mat33V M34MulM33(const Mat34V& a, const Mat33V& b) { return Mat33V(M34Mul33V3(a,b.col0),M34Mul33V3(a,b.col1),M34Mul33V3(a,b.col2)); } NV_FORCE_INLINE Mat33V M34Mul33V3(const Mat34V& a, const Mat33V& b) { return Mat33V(M34Mul33V3(a,b.col0),M34Mul33V3(a,b.col1),M34Mul33V3(a,b.col2)); } NV_FORCE_INLINE Mat33V M34Mul33MM34(const Mat34V& a, const Mat34V& b) { return Mat33V(M34Mul33V3(a,b.col0),M34Mul33V3(a,b.col1),M34Mul33V3(a,b.col2)); } NV_FORCE_INLINE Mat34V M34Add(const Mat34V& a, const Mat34V& b) { return Mat34V(V3Add(a.col0,b.col0),V3Add(a.col1,b.col1),V3Add(a.col2,b.col2),V3Add(a.col3,b.col3)); } NV_FORCE_INLINE Mat33V M34Trnsps33(const Mat34V& a) { return Mat33V(Vec3V(a.col0.x,a.col1.x,a.col2.x),Vec3V(a.col0.y,a.col1.y,a.col2.y),Vec3V(a.col0.z,a.col1.z,a.col2.z)); } ////////////////////////////////// //MAT44V ////////////////////////////////// NV_FORCE_INLINE Vec4V M44MulV4(const Mat44V& a, const Vec4V b) { return Vec4V ( a.col0.x*b.x + a.col1.x*b.y + a.col2.x*b.z + a.col3.x*b.w, a.col0.y*b.x + a.col1.y*b.y + a.col2.y*b.z + a.col3.y*b.w, a.col0.z*b.x + a.col1.z*b.y + a.col2.z*b.z + a.col3.z*b.w, a.col0.w*b.x + a.col1.w*b.y + a.col2.w*b.z + a.col3.w*b.w ); } NV_FORCE_INLINE Vec4V M44TrnspsMulV4(const Mat44V& a, const Vec4V b) { return Vec4V ( a.col0.x*b.x + a.col0.y*b.y + a.col0.z*b.z + a.col0.w*b.w, a.col1.x*b.x + a.col1.y*b.y + a.col1.z*b.z + a.col1.w*b.w, a.col2.x*b.x + a.col2.y*b.y + a.col2.z*b.z + a.col2.w*b.w, a.col3.x*b.x + a.col3.y*b.y + a.col3.z*b.z + a.col3.w*b.w ); } NV_FORCE_INLINE Mat44V M44MulM44(const Mat44V& a, const Mat44V& b) { return Mat44V(M44MulV4(a,b.col0),M44MulV4(a,b.col1),M44MulV4(a,b.col2),M44MulV4(a,b.col3)); } NV_FORCE_INLINE Mat44V M44Add(const Mat44V& a, const Mat44V& b) { return Mat44V(V4Add(a.col0,b.col0),V4Add(a.col1,b.col1),V4Add(a.col2,b.col2),V4Add(a.col3,b.col3)); } NV_FORCE_INLINE Mat44V M44Inverse(const Mat44V& a) { float tmp[12]; float dst[16]; float det; const float src[16] = { a.col0.x, a.col0.y, a.col0.z, a.col0.w, a.col1.x, a.col1.y, a.col1.z, a.col1.w, a.col2.x, a.col2.y, a.col2.z, a.col2.w, a.col3.x, a.col3.y, a.col3.z, a.col3.w }; tmp[0] = src[10] * src[15]; tmp[1] = src[11] * src[14]; tmp[2] = src[9] * src[15]; tmp[3] = src[11] * src[13]; tmp[4] = src[9] * src[14]; tmp[5] = src[10] * src[13]; tmp[6] = src[8] * src[15]; tmp[7] = src[11] * src[12]; tmp[8] = src[8] * src[14]; tmp[9] = src[10] * src[12]; tmp[10] = src[8] * src[13]; tmp[11] = src[9] * src[12]; dst[0] = tmp[0]*src[5] + tmp[3]*src[6] + tmp[4]*src[7]; dst[0] -= tmp[1]*src[5] + tmp[2]*src[6] + tmp[5]*src[7]; dst[1] = tmp[1]*src[4] + tmp[6]*src[6] + tmp[9]*src[7]; dst[1] -= tmp[0]*src[4] + tmp[7]*src[6] + tmp[8]*src[7]; dst[2] = tmp[2]*src[4] + tmp[7]*src[5] + tmp[10]*src[7]; dst[2] -= tmp[3]*src[4] + tmp[6]*src[5] + tmp[11]*src[7]; dst[3] = tmp[5]*src[4] + tmp[8]*src[5] + tmp[11]*src[6]; dst[3] -= tmp[4]*src[4] + tmp[9]*src[5] + tmp[10]*src[6]; dst[4] = tmp[1]*src[1] + tmp[2]*src[2] + tmp[5]*src[3]; dst[4] -= tmp[0]*src[1] + tmp[3]*src[2] + tmp[4]*src[3]; dst[5] = tmp[0]*src[0] + tmp[7]*src[2] + tmp[8]*src[3]; dst[5] -= tmp[1]*src[0] + tmp[6]*src[2] + tmp[9]*src[3]; dst[6] = tmp[3]*src[0] + tmp[6]*src[1] + tmp[11]*src[3]; dst[6] -= tmp[2]*src[0] + tmp[7]*src[1] + tmp[10]*src[3]; dst[7] = tmp[4]*src[0] + tmp[9]*src[1] + tmp[10]*src[2]; dst[7] -= tmp[5]*src[0] + tmp[8]*src[1] + tmp[11]*src[2]; tmp[0] = src[2]*src[7]; tmp[1] = src[3]*src[6]; tmp[2] = src[1]*src[7]; tmp[3] = src[3]*src[5]; tmp[4] = src[1]*src[6]; tmp[5] = src[2]*src[5]; tmp[6] = src[0]*src[7]; tmp[7] = src[3]*src[4]; tmp[8] = src[0]*src[6]; tmp[9] = src[2]*src[4]; tmp[10] = src[0]*src[5]; tmp[11] = src[1]*src[4]; dst[8] = tmp[0]*src[13] + tmp[3]*src[14] + tmp[4]*src[15]; dst[8] -= tmp[1]*src[13] + tmp[2]*src[14] + tmp[5]*src[15]; dst[9] = tmp[1]*src[12] + tmp[6]*src[14] + tmp[9]*src[15]; dst[9] -= tmp[0]*src[12] + tmp[7]*src[14] + tmp[8]*src[15]; dst[10] = tmp[2]*src[12] + tmp[7]*src[13] + tmp[10]*src[15]; dst[10]-= tmp[3]*src[12] + tmp[6]*src[13] + tmp[11]*src[15]; dst[11] = tmp[5]*src[12] + tmp[8]*src[13] + tmp[11]*src[14]; dst[11]-= tmp[4]*src[12] + tmp[9]*src[13] + tmp[10]*src[14]; dst[12] = tmp[2]*src[10] + tmp[5]*src[11] + tmp[1]*src[9]; dst[12]-= tmp[4]*src[11] + tmp[0]*src[9] + tmp[3]*src[10]; dst[13] = tmp[8]*src[11] + tmp[0]*src[8] + tmp[7]*src[10]; dst[13]-= tmp[6]*src[10] + tmp[9]*src[11] + tmp[1]*src[8]; dst[14] = tmp[6]*src[9] + tmp[11]*src[11] + tmp[3]*src[8]; dst[14]-= tmp[10]*src[11] + tmp[2]*src[8] + tmp[7]*src[9]; dst[15] = tmp[10]*src[10] + tmp[4]*src[8] + tmp[9]*src[9]; dst[15]-= tmp[8]*src[9] + tmp[11]*src[10] + tmp[5]*src[8]; det=src[0]*dst[0]+src[1]*dst[1]+src[2]*dst[2]+src[3]*dst[3]; det = 1.0f/det; for(uint32_t j=0;j<16;j++) { dst[j] *= det; } return Mat44V ( Vec4V(dst[0],dst[4],dst[8],dst[12]), Vec4V(dst[1],dst[5],dst[9],dst[13]), Vec4V(dst[2],dst[6],dst[10],dst[14]), Vec4V(dst[3],dst[7],dst[11],dst[15]) ); } NV_FORCE_INLINE Mat44V M44Trnsps(const Mat44V& a) { return Mat44V ( Vec4V(a.col0.x,a.col1.x,a.col2.x,a.col3.x), Vec4V(a.col0.y,a.col1.y,a.col2.y,a.col3.y), Vec4V(a.col0.z,a.col1.z,a.col2.z,a.col3.z), Vec4V(a.col0.w,a.col1.w,a.col2.w,a.col3.w) ); } NV_FORCE_INLINE Vec4V V4LoadXYZW(const float& x, const float& y, const float& z, const float& w) { return Vec4V(x, y, z, w); } /* NV_FORCE_INLINE VecU16V V4U32PK(VecU32V a, VecU32V b) { return VecU16V( uint16_t(NvClamp<uint32_t>((a).u32[0], 0, 0xFFFF)), uint16_t(NvClamp<uint32_t>((a).u32[1], 0, 0xFFFF)), uint16_t(NvClamp<uint32_t>((a).u32[2], 0, 0xFFFF)), uint16_t(NvClamp<uint32_t>((a).u32[3], 0, 0xFFFF)), uint16_t(NvClamp<uint32_t>((b).u32[0], 0, 0xFFFF)), uint16_t(NvClamp<uint32_t>((b).u32[1], 0, 0xFFFF)), uint16_t(NvClamp<uint32_t>((b).u32[2], 0, 0xFFFF)), uint16_t(NvClamp<uint32_t>((b).u32[3], 0, 0xFFFF))); } */ NV_FORCE_INLINE VecU32V V4U32Sel(const BoolV c, const VecU32V a, const VecU32V b) { return VecU32V( c.ux ? a.u32[0] : b.u32[0], c.uy ? a.u32[1] : b.u32[1], c.uz ? a.u32[2] : b.u32[2], c.uw ? a.u32[3] : b.u32[3] ); } NV_FORCE_INLINE VecU32V V4U32or(VecU32V a, VecU32V b) { return VecU32V((a).u32[0]|(b).u32[0], (a).u32[1]|(b).u32[1], (a).u32[2]|(b).u32[2], (a).u32[3]|(b).u32[3]); } NV_FORCE_INLINE VecU32V V4U32and(VecU32V a, VecU32V b) { return VecU32V((a).u32[0]&(b).u32[0], (a).u32[1]&(b).u32[1], (a).u32[2]&(b).u32[2], (a).u32[3]&(b).u32[3]); } NV_FORCE_INLINE VecU32V V4U32Andc(VecU32V a, VecU32V b) { return VecU32V((a).u32[0]&~(b).u32[0], (a).u32[1]&~(b).u32[1], (a).u32[2]&~(b).u32[2], (a).u32[3]&~(b).u32[3]); } /* NV_FORCE_INLINE VecU16V V4U16Or(VecU16V a, VecU16V b) { return VecU16V( (a).u16[0]|(b).u16[0], (a).u16[1]|(b).u16[1], (a).u16[2]|(b).u16[2], (a).u16[3]|(b).u16[3], (a).u16[4]|(b).u16[4], (a).u16[5]|(b).u16[5], (a).u16[6]|(b).u16[6], (a).u16[7]|(b).u16[7]); } */ /* NV_FORCE_INLINE VecU16V V4U16And(VecU16V a, VecU16V b) { return VecU16V( (a).u16[0]&(b).u16[0], (a).u16[1]&(b).u16[1], (a).u16[2]&(b).u16[2], (a).u16[3]&(b).u16[3], (a).u16[4]&(b).u16[4], (a).u16[5]&(b).u16[5], (a).u16[6]&(b).u16[6], (a).u16[7]&(b).u16[7]); } */ /* NV_FORCE_INLINE VecU16V V4U16Andc(VecU16V a, VecU16V b) { return VecU16V( (a).u16[0]&~(b).u16[0], (a).u16[1]&~(b).u16[1], (a).u16[2]&~(b).u16[2], (a).u16[3]&~(b).u16[3], (a).u16[4]&~(b).u16[4], (a).u16[5]&~(b).u16[5], (a).u16[6]&~(b).u16[6], (a).u16[7]&~(b).u16[7]); } */ /* template<int a> NV_FORCE_INLINE VecI32V V4ISplat() { return VecI32V(a, a, a, a); } template<uint32_t a> NV_FORCE_INLINE VecU32V V4USplat() { return VecU32V(a, a, a, a); } */ /* NV_FORCE_INLINE void V4U16StoreAligned(VecU16V val, VecU16V* address) { *address = val; } */ NV_FORCE_INLINE void V4U32StoreAligned(VecU32V val, VecU32V* address) { *address = val; } NV_FORCE_INLINE Vec4V V4Andc(const Vec4V a, const VecU32V b) { VecU32V r = V4U32Andc(*reinterpret_cast<const VecU32V*>(&a),b); return (*reinterpret_cast<const Vec4V*>(&r)); } NV_FORCE_INLINE VecU32V V4IsGrtrV32u(const Vec4V a, const Vec4V b) { return VecU32V( a.x > b.x ? 0xFFFFffff : 0, a.y > b.y ? 0xFFFFffff : 0, a.z > b.z ? 0xFFFFffff : 0, a.w > b.w ? 0xFFFFffff : 0); } NV_FORCE_INLINE VecU16V V4U16LoadAligned(VecU16V* addr) { return *addr; } NV_FORCE_INLINE VecU16V V4U16LoadUnaligned(VecU16V* addr) { return *addr; } NV_FORCE_INLINE VecU16V V4U16CompareGt(VecU16V a, VecU16V b) { return VecU16V( (a).u16[0]>(b).u16[0], (a).u16[1]>(b).u16[1], (a).u16[2]>(b).u16[2], (a).u16[3]>(b).u16[3], (a).u16[4]>(b).u16[4], (a).u16[5]>(b).u16[5], (a).u16[6]>(b).u16[6], (a).u16[7]>(b).u16[7]); } NV_FORCE_INLINE VecU16V V4I16CompareGt(VecU16V a, VecU16V b) { return VecU16V( (a).i16[0]>(b).i16[0], (a).i16[1]>(b).i16[1], (a).i16[2]>(b).i16[2], (a).i16[3]>(b).i16[3], (a).i16[4]>(b).i16[4], (a).i16[5]>(b).i16[5], (a).i16[6]>(b).i16[6], (a).i16[7]>(b).i16[7]); } NV_FORCE_INLINE Vec4V Vec4V_From_VecU32V(VecU32V a) { return Vec4V(float((a).u32[0]), float((a).u32[1]), float((a).u32[2]), float((a).u32[3])); } NV_FORCE_INLINE Vec4V Vec4V_From_VecI32V(VecI32V a) { return Vec4V(float((a).i32[0]), float((a).i32[1]), float((a).i32[2]), float((a).i32[3])); } NV_FORCE_INLINE VecI32V VecI32V_From_Vec4V(Vec4V a) { float* data = (float*)&a; return VecI32V(int32_t(data[0]), int32_t(data[1]), int32_t(data[2]), int32_t(data[3])); } NV_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecU32V(VecU32V a) { Vec4V b = *reinterpret_cast<Vec4V*>(&a); return b; } NV_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecI32V(VecI32V a) { Vec4V b = *reinterpret_cast<Vec4V*>(&a); return b; } NV_FORCE_INLINE VecU32V VecU32V_ReinterpretFrom_Vec4V(Vec4V a) { VecU32V b = *reinterpret_cast<VecU32V*>(&a); return b; } NV_FORCE_INLINE VecI32V VecI32V_ReinterpretFrom_Vec4V(Vec4V a) { VecI32V b= *reinterpret_cast<VecI32V*>(&a); return b; } template<int index> NV_FORCE_INLINE VecU32V V4U32SplatElement(VecU32V a) { return VecU32V((a).u32[index], (a).u32[index], (a).u32[index], (a).u32[index]); } template<int index> NV_FORCE_INLINE VecU32V V4U32SplatElement(BoolV a) { const uint32_t u = (&a.ux)[index]; return VecU32V(u, u, u, u); } template<int index> NV_FORCE_INLINE Vec4V V4SplatElement(Vec4V a) { float* data = (float*)&a; return Vec4V(data[index], data[index], data[index], data[index]); } template<int index> NV_FORCE_INLINE VecU16V V4U16SplatElement(VecU16V a) { return VecU16V( (a).u16[index], (a).u16[index], (a).u16[index], (a).u16[index], (a).u16[index], (a).u16[index], (a).u16[index], (a).u16[index]); } template<int imm> NV_FORCE_INLINE VecI16V V4I16SplatImmediate() { return VecI16V(imm, imm, imm, imm, imm, imm, imm, imm); } template<uint16_t imm> NV_FORCE_INLINE VecU16V V4U16SplatImmediate() { return VecU16V(imm, imm, imm, imm, imm, imm, imm, imm); } NV_FORCE_INLINE VecU16V V4U16SubtractModulo(VecU16V a, VecU16V b) { return VecU16V( (a).u16[0] - (b).u16[0], (a).u16[1] - (b).u16[1], (a).u16[2] - (b).u16[2], (a).u16[3] - (b).u16[3], (a).u16[4] - (b).u16[4], (a).u16[5] - (b).u16[5], (a).u16[6] - (b).u16[6], (a).u16[7] - (b).u16[7]); } NV_FORCE_INLINE VecU16V V4U16AddModulo(VecU16V a, VecU16V b) { return VecU16V( (a).u16[0] + (b).u16[0], (a).u16[1] + (b).u16[1], (a).u16[2] + (b).u16[2], (a).u16[3] + (b).u16[3], (a).u16[4] + (b).u16[4], (a).u16[5] + (b).u16[5], (a).u16[6] + (b).u16[6], (a).u16[7] + (b).u16[7]); } NV_FORCE_INLINE VecU32V V4U16GetLo16(VecU16V a) { return VecU32V((a).u16[0], (a).u16[2], (a).u16[4], (a).u16[6]); } NV_FORCE_INLINE VecU32V V4U16GetHi16(VecU16V a) { return VecU32V((a).u16[1], (a).u16[3], (a).u16[5], (a).u16[7]); } NV_FORCE_INLINE VecU32V VecU32VLoadXYZW(uint32_t x, uint32_t y, uint32_t z, uint32_t w) { return VecU32V(x, y, z, w); } NV_FORCE_INLINE Vec4V V4Abs(const Vec4V a) { return V4Max(a,V4Neg(a)); } NV_FORCE_INLINE BoolV V4IsEqU32(const VecU32V a, const VecU32V b) { return BoolV(a.u32[0]==b.u32[0] ? -1 : 0, a.u32[1]==b.u32[1] ? -1 : 0, a.u32[2]==b.u32[2] ? -1 : 0, a.u32[3]==b.u32[3] ? -1 : 0); } NV_FORCE_INLINE VecU32V U4Load(const uint32_t i) { return VecU32V(i, i, i, i); } NV_FORCE_INLINE VecU32V U4LoadU(const uint32_t* i) { return VecU32V(i[0], i[1], i[2], i[3]); } NV_FORCE_INLINE VecU32V U4LoadA(const uint32_t* i) { return VecU32V(i[0], i[1], i[2], i[3]); } NV_FORCE_INLINE VecI32V I4Load(const int32_t i) { return VecI32V(i, i, i, i); } NV_FORCE_INLINE VecI32V I4LoadU(const int32_t* i) { return VecI32V(i[0], i[1], i[2], i[3]); } NV_FORCE_INLINE VecI32V I4LoadA(const int32_t* i) { return VecI32V(i[0], i[1], i[2], i[3]); } NV_FORCE_INLINE VecI32V VecI32V_Add(const VecI32VArg a, const VecI32VArg b) { return VecI32V(a.i32[0] + b.i32[0], a.i32[1] + b.i32[1], a.i32[2] + b.i32[2], a.i32[3] + b.i32[3]); } NV_FORCE_INLINE VecI32V VecI32V_Sub(const VecI32VArg a, const VecI32VArg b) { return VecI32V(a.i32[0] - b.i32[0], a.i32[1] - b.i32[1], a.i32[2] - b.i32[2], a.i32[3] - b.i32[3]); } NV_FORCE_INLINE BoolV VecI32V_IsGrtr(const VecI32VArg a, const VecI32VArg b) { return BoolV(a.i32[0] > b.i32[0] ? -1 : 0, a.i32[1] > b.i32[1] ? -1 : 0, a.i32[2] > b.i32[2] ? -1 : 0, a.i32[3] > b.i32[3] ? -1 : 0); } NV_FORCE_INLINE BoolV VecI32V_IsEq(const VecI32VArg a, const VecI32VArg b) { return BoolV(a.i32[0] == b.i32[0] ? -1 : 0, a.i32[1] == b.i32[1] ? -1 : 0, a.i32[2] == b.i32[2] ? -1 : 0, a.i32[3] == b.i32[3] ? -1 : 0); } NV_FORCE_INLINE VecI32V V4I32Sel(const BoolV c, const VecI32V a, const VecI32V b) { return VecI32V( c.ux ? a.i32[0] : b.i32[0], c.uy ? a.i32[1] : b.i32[1], c.uz ? a.i32[2] : b.i32[2], c.uw ? a.i32[3] : b.i32[3] ); } NV_FORCE_INLINE VecI32V VecI32V_Zero() { return VecI32V(0,0,0,0); } NV_FORCE_INLINE VecI32V VecI32V_One() { return VecI32V(1,1,1,1); } NV_FORCE_INLINE VecI32V VecI32V_Two() { return VecI32V(2,2,2,2); } NV_FORCE_INLINE VecI32V VecI32V_MinusOne() { return VecI32V(-1,-1,-1,-1); } NV_FORCE_INLINE VecU32V U4Zero() { return VecU32V(0,0,0,0); } NV_FORCE_INLINE VecU32V U4One() { return VecU32V(1,1,1,1); } NV_FORCE_INLINE VecU32V U4Two() { return VecU32V(2,2,2,2); } NV_FORCE_INLINE VecShiftV VecI32V_PrepareShift(const VecI32VArg shift) { return shift; } NV_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const VecShiftVArg count) { return VecI32V(a.i32[0] << count.i32[0], a.i32[1] << count.i32[1], a.i32[2] << count.i32[2], a.i32[3] << count.i32[3]); } NV_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const VecShiftVArg count) { return VecI32V(a.i32[0] >> count.i32[0], a.i32[1] >> count.i32[1], a.i32[2] >> count.i32[2], a.i32[3] >> count.i32[3]); } NV_FORCE_INLINE VecI32V VecI32V_And(const VecI32VArg a, const VecI32VArg b) { return VecI32V(a.i32[0]&b.i32[0], a.i32[1]&b.i32[1], a.i32[2]&b.i32[2], a.i32[3]&b.i32[3]); } NV_FORCE_INLINE VecI32V VecI32V_Or(const VecI32VArg a, const VecI32VArg b) { return VecI32V(a.i32[0]|b.i32[0], a.i32[1]|b.i32[1], a.i32[2]|b.i32[2], a.i32[3]|b.i32[3]); } NV_FORCE_INLINE VecI32V VecI32V_GetX(const VecI32VArg a) { return VecI32V(a.i32[0], a.i32[0], a.i32[0], a.i32[0]); } NV_FORCE_INLINE VecI32V VecI32V_GetY(const VecI32VArg a) { return VecI32V(a.i32[1], a.i32[1], a.i32[1], a.i32[1]); } NV_FORCE_INLINE VecI32V VecI32V_GetZ(const VecI32VArg a) { return VecI32V(a.i32[2], a.i32[2], a.i32[2], a.i32[2]); } NV_FORCE_INLINE VecI32V VecI32V_GetW(const VecI32VArg a) { return VecI32V(a.i32[3], a.i32[3], a.i32[3], a.i32[3]); } NV_FORCE_INLINE VecI32V VecI32V_Sel(const BoolV c, const VecI32VArg a, const VecI32VArg b) { return VecI32V(c.ux ? a.i32[0] : b.i32[0], c.uy ? a.i32[1] : b.i32[1], c.uz ? a.i32[2] : b.i32[2], c.uw ? a.i32[3] : b.i32[3]); } NV_FORCE_INLINE VecI32V VecI32V_Merge(const VecI32VArg a, const VecI32VArg b, const VecI32VArg c, const VecI32VArg d) { return VecI32V(a.i32[0], b.i32[0], c.i32[0], d.i32[0]); } NV_FORCE_INLINE void NvI32_From_VecI32V(const VecI32VArg a, int32_t* i) { *i = a.i32[0]; } NV_FORCE_INLINE VecI32V VecI32V_From_BoolV(const BoolVArg b) { return VecI32V(b.ux, b.uy, b.uz, b.uw); } NV_FORCE_INLINE VecU32V VecU32V_From_BoolV(const BoolVArg b) { return VecU32V(b.ux, b.uy, b.uz, b.uw); } //not used /* NV_FORCE_INLINE Vec4V V4LoadAligned(Vec4V* addr) { return *addr; } */ /* NV_FORCE_INLINE Vec4V V4LoadUnaligned(Vec4V* addr) { return *addr; } */ /* NV_FORCE_INLINE Vec4V V4Ceil(const Vec4V a) { return Vec4V(NvCeil(a.x), NvCeil(a.y), NvCeil(a.z), NvCeil(a.w)); } NV_FORCE_INLINE Vec4V V4Floor(const Vec4V a) { return Vec4V(NvFloor(a.x), NvFloor(a.y), NvFloor(a.z), NvFloor(a.w)); } */ /* NV_FORCE_INLINE VecU32V V4ConvertToU32VSaturate(const Vec4V a, uint32_t power) { NV_ASSERT(power == 0 && "Non-zero power not supported in convertToU32VSaturate"); NV_UNUSED(power); // prevent warning in release builds float ffffFFFFasFloat = float(0xFFFF0000); return VecU32V( uint32_t(NvClamp<float>((a).x, 0.0f, ffffFFFFasFloat)), uint32_t(NvClamp<float>((a).y, 0.0f, ffffFFFFasFloat)), uint32_t(NvClamp<float>((a).z, 0.0f, ffffFFFFasFloat)), uint32_t(NvClamp<float>((a).w, 0.0f, ffffFFFFasFloat))); } */ #endif //NV_PHYSICS_COMMON_VECMATH_SCALAR_INLINE
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsAlignedMalloc.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NSFOUNDATION_NSALIGNEDMALLOC_H #define NV_NSFOUNDATION_NSALIGNEDMALLOC_H #include "NsUserAllocated.h" /*! Allocate aligned memory. Alignment must be a power of 2! -- should be templated by a base allocator */ namespace nvidia { namespace shdfnd { /** Allocator, which is used to access the global NvAllocatorCallback instance (used for dynamic data types template instantiation), which can align memory */ // SCS: AlignedMalloc with 3 params not found, seems not used on PC either // disabled for now to avoid GCC error template <uint32_t N, typename BaseAllocator = NonTrackingAllocator> class AlignedAllocator : public BaseAllocator { public: AlignedAllocator(const BaseAllocator& base = BaseAllocator()) : BaseAllocator(base) { } void* allocate(size_t size, const char* file, int line) { size_t pad = N - 1 + sizeof(size_t); // store offset for delete. uint8_t* base = reinterpret_cast<uint8_t*>(BaseAllocator::allocate(size + pad, file, line)); if(!base) return NULL; uint8_t* ptr = reinterpret_cast<uint8_t*>(size_t(base + pad) & ~(size_t(N) - 1)); // aligned pointer, ensuring N is a size_t // wide mask reinterpret_cast<size_t*>(ptr)[-1] = size_t(ptr - base); // store offset return ptr; } void deallocate(void* ptr) { if(ptr == NULL) return; uint8_t* base = reinterpret_cast<uint8_t*>(ptr) - reinterpret_cast<size_t*>(ptr)[-1]; BaseAllocator::deallocate(base); } }; } // namespace shdfnd } // namespace nvidia #endif // #ifndef NV_NSFOUNDATION_NSALIGNEDMALLOC_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsVecQuat.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_PHYSICS_COMMON_VECQUAT #define NV_PHYSICS_COMMON_VECQUAT //#include "NsInlineAoS.h" #ifndef NV_PIDIV2 #define NV_PIDIV2 1.570796327f #endif ////////////////////////////////// //QuatV ////////////////////////////////// NV_FORCE_INLINE QuatV QuatVLoadXYZW(const float x, const float y, const float z, const float w) { return V4LoadXYZW(x, y, z, w); } NV_FORCE_INLINE QuatV QuatVLoadU(const float* v) { return V4LoadU(v); } NV_FORCE_INLINE QuatV QuatVLoadA(const float* v) { return V4LoadA(v); } NV_FORCE_INLINE QuatV QuatV_From_RotationAxisAngle(const Vec3V u, const FloatV a) { //q = cos(a/2) + u*sin(a/2) const FloatV half = FLoad(0.5f); const FloatV hangle = FMul(a, half); const FloatV piByTwo(FLoad(NV_PIDIV2)); const FloatV PiByTwoMinHangle(FSub(piByTwo, hangle)); const Vec4V hangle2(Vec4V_From_Vec3V(V3Merge(hangle, PiByTwoMinHangle, hangle))); /*const FloatV sina = FSin(hangle); const FloatV cosa = FCos(hangle);*/ const Vec4V _sina = V4Sin(hangle2); const FloatV sina = V4GetX(_sina); const FloatV cosa = V4GetY(_sina); const Vec3V v = V3Scale(u, sina); //return V4Sel(BTTTF(), Vec4V_From_Vec3V(v), V4Splat(cosa)); return V4SetW(Vec4V_From_Vec3V(v) , cosa); } //Normalize NV_FORCE_INLINE QuatV QuatNormalize(const QuatV q) { return V4Normalize(q); } NV_FORCE_INLINE FloatV QuatLength(const QuatV q) { return V4Length(q); } NV_FORCE_INLINE FloatV QuatLengthSq(const QuatV q) { return V4LengthSq(q); } NV_FORCE_INLINE FloatV QuatDot(const QuatV a, const QuatV b) // convert this NvQuat to a unit quaternion { return V4Dot(a, b); } NV_FORCE_INLINE QuatV QuatConjugate(const QuatV q) { return V4SetW(V4Neg(q), V4GetW(q)); } NV_FORCE_INLINE Vec3V QuatGetImaginaryPart(const QuatV q) { return Vec3V_From_Vec4V(q); } /** brief computes rotation of x-axis */ NV_FORCE_INLINE Vec3V QuatGetBasisVector0(const QuatV q) { /*const float x2 = x*2.0f; const float w2 = w*2.0f; return NvVec3( (w * w2) - 1.0f + x*x2, (z * w2) + y*x2, (-y * w2) + z*x2);*/ const FloatV two = FLoad(2.f); const FloatV w = V4GetW(q); const Vec3V u = Vec3V_From_Vec4V(q); const FloatV x2 = FMul(V3GetX(u), two); const FloatV w2 = FMul(w, two); const Vec3V a = V3Scale(u, x2); const Vec3V tmp = V3Merge(w, V3GetZ(u), FNeg(V3GetY(u))); //const Vec3V b = V3Scale(tmp, w2); //const Vec3V ab = V3Add(a, b); const Vec3V ab = V3ScaleAdd(tmp, w2, a); return V3SetX(ab, FSub(V3GetX(ab), FOne())); } /** brief computes rotation of y-axis */ NV_FORCE_INLINE Vec3V QuatGetBasisVector1(const QuatV q) { /*const float y2 = y*2.0f; const float w2 = w*2.0f; return NvVec3( (-z * w2) + x*y2, (w * w2) - 1.0f + y*y2, (x * w2) + z*y2);*/ const FloatV two = FLoad(2.f); const FloatV w = V4GetW(q); const Vec3V u = Vec3V_From_Vec4V(q); const FloatV y2 = FMul(V3GetY(u), two); const FloatV w2 = FMul(w, two); const Vec3V a = V3Scale(u, y2); const Vec3V tmp = V3Merge(FNeg(V3GetZ(u)),w, V3GetX(u)); //const Vec3V b = V3Scale(tmp, w2); //const Vec3V ab = V3Add(a, b); const Vec3V ab = V3ScaleAdd(tmp, w2, a); return V3SetY(ab, FSub(V3GetY(ab), FOne())); } /** brief computes rotation of z-axis */ NV_FORCE_INLINE Vec3V QuatGetBasisVector2(const QuatV q) { /*const float z2 = z*2.0f; const float w2 = w*2.0f; return NvVec3( (y * w2) + x*z2, (-x * w2) + y*z2, (w * w2) - 1.0f + z*z2);*/ const FloatV two = FLoad(2.f); const FloatV w = V4GetW(q); const Vec3V u = Vec3V_From_Vec4V(q); const FloatV z2 = FMul(V3GetZ(u), two); const FloatV w2 = FMul(w, two); const Vec3V a = V3Scale(u, z2); const Vec3V tmp = V3Merge(V3GetY(u), FNeg(V3GetX(u)), w); /*const Vec3V b = V3Scale(tmp, w2); const Vec3V ab = V3Add(a, b);*/ const Vec3V ab = V3ScaleAdd(tmp, w2, a); return V3SetZ(ab, FSub(V3GetZ(ab), FOne())); } NV_FORCE_INLINE Vec3V QuatRotate(const QuatV q, const Vec3V v) { /* const NvVec3 qv(x,y,z); return (v*(w*w-0.5f) + (qv.cross(v))*w + qv*(qv.dot(v)))*2; */ const FloatV two = FLoad(2.f); //const FloatV half = FloatV_From_F32(0.5f); const FloatV nhalf = FLoad(-0.5f); const Vec3V u = Vec3V_From_Vec4V(q); const FloatV w = V4GetW(q); //const FloatV w2 = FSub(FMul(w, w), half); const FloatV w2 = FScaleAdd(w, w, nhalf); const Vec3V a = V3Scale(v, w2); //const Vec3V b = V3Scale(V3Cross(u, v), w); //const Vec3V c = V3Scale(u, V3Dot(u, v)); //return V3Scale(V3Add(V3Add(a, b), c), two); const Vec3V temp = V3ScaleAdd(V3Cross(u, v), w, a); return V3Scale(V3ScaleAdd(u, V3Dot(u, v), temp), two); } NV_FORCE_INLINE Vec3V QuatTransform(const QuatV q, const Vec3V p, const Vec3V v) { //p + q.rotate(v) const FloatV two = FLoad(2.f); //const FloatV half = FloatV_From_F32(0.5f); const FloatV nhalf = FLoad(-0.5f); const Vec3V u = Vec3V_From_Vec4V(q); const FloatV w = V4GetW(q); //const FloatV w2 = FSub(FMul(w, w), half); const FloatV w2 = FScaleAdd(w, w, nhalf); const Vec3V a = V3Scale(v, w2); /*const Vec3V b = V3Scale(V3Cross(u, v), w); const Vec3V c = V3Scale(u, V3Dot(u, v)); return V3ScaleAdd(V3Add(V3Add(a, b), c), two, p);*/ const Vec3V temp = V3ScaleAdd(V3Cross(u, v), w, a); const Vec3V z = V3ScaleAdd(u, V3Dot(u, v), temp); return V3ScaleAdd(z, two, p); } NV_FORCE_INLINE Vec3V QuatRotateInv(const QuatV q, const Vec3V v) { // const NvVec3 qv(x,y,z); // return (v*(w*w-0.5f) - (qv.cross(v))*w + qv*(qv.dot(v)))*2; const FloatV two = FLoad(2.f); const FloatV nhalf = FLoad(-0.5f); const Vec3V u = Vec3V_From_Vec4V(q); const FloatV w = V4GetW(q); const FloatV w2 = FScaleAdd(w, w, nhalf); const Vec3V a = V3Scale(v, w2); /*const Vec3V b = V3Scale(V3Cross(u, v), w); const Vec3V c = V3Scale(u, V3Dot(u, v)); return V3Scale(V3Add(V3Sub(a, b), c), two);*/ const Vec3V temp = V3NegScaleSub(V3Cross(u, v), w, a); return V3Scale(V3ScaleAdd(u, V3Dot(u, v), temp), two); } NV_FORCE_INLINE QuatV QuatMul(const QuatV a, const QuatV b) { const Vec3V imagA = Vec3V_From_Vec4V(a); const Vec3V imagB = Vec3V_From_Vec4V(b); const FloatV rA = V4GetW(a); const FloatV rB = V4GetW(b); const FloatV real = FSub(FMul(rA, rB), V3Dot(imagA, imagB)); const Vec3V v0 = V3Scale(imagA, rB); const Vec3V v1 = V3Scale(imagB, rA); const Vec3V v2 = V3Cross(imagA, imagB); const Vec3V imag = V3Add(V3Add(v0, v1), v2); return V4SetW(Vec4V_From_Vec3V(imag), real); } NV_FORCE_INLINE QuatV QuatAdd(const QuatV a, const QuatV b) { return V4Add(a, b); } NV_FORCE_INLINE QuatV QuatNeg(const QuatV q) { return V4Neg(q); } NV_FORCE_INLINE QuatV QuatSub(const QuatV a, const QuatV b) { return V4Sub(a, b); } NV_FORCE_INLINE QuatV QuatScale(const QuatV a, const FloatV b) { return V4Scale(a, b); } NV_FORCE_INLINE QuatV QuatMerge(const FloatV* const floatVArray) { return V4Merge(floatVArray); } NV_FORCE_INLINE QuatV QuatMerge(const FloatVArg x, const FloatVArg y, const FloatVArg z, const FloatVArg w) { return V4Merge(x, y, z, w); } NV_FORCE_INLINE QuatV QuatIdentity() { return V4SetW(V4Zero(), FOne()); } NV_FORCE_INLINE bool isFiniteQuatV(const QuatV q) { return isFiniteVec4V(q); } NV_FORCE_INLINE bool isValidQuatV(const QuatV q) { const FloatV unitTolerance = FLoad((float)1e-4); const FloatV tmp = FAbs(FSub(QuatLength(q), FOne())); const BoolV con = FIsGrtr(unitTolerance, tmp); return isFiniteVec4V(q) & (BAllEq(con, BTTTT())==1); } NV_FORCE_INLINE bool isSaneQuatV(const QuatV q) { const FloatV unitTolerance = FLoad((float)1e-2); const FloatV tmp = FAbs(FSub(QuatLength(q), FOne())); const BoolV con = FIsGrtr(unitTolerance, tmp); return isFiniteVec4V(q) & (BAllEq(con, BTTTT())==1); } NV_FORCE_INLINE void QuatGetMat33V(const QuatVArg q, Vec3V& column0, Vec3V& column1, Vec3V& column2) { const FloatV one = FOne(); const FloatV x = V4GetX(q); const FloatV y = V4GetY(q); const FloatV z = V4GetZ(q); const FloatV w = V4GetW(q); const FloatV x2 = FAdd(x, x); const FloatV y2 = FAdd(y, y); const FloatV z2 = FAdd(z, z); const FloatV xx = FMul(x2,x); const FloatV yy = FMul(y2,y); const FloatV zz = FMul(z2,z); const FloatV xy = FMul(x2,y); const FloatV xz = FMul(x2,z); const FloatV xw = FMul(x2,w); const FloatV yz = FMul(y2,z); const FloatV yw = FMul(y2,w); const FloatV zw = FMul(z2,w); const FloatV v = FSub(one, xx); column0 = V3Merge(FSub(FSub(one, yy), zz), FAdd(xy, zw), FSub(xz, yw)); column1 = V3Merge(FSub(xy, zw), FSub(v ,zz), FAdd(yz, xw)); column2 = V3Merge(FAdd(xz, yw), FSub(yz, xw), FSub(v, yy)); } NV_FORCE_INLINE Mat33V QuatGetMat33V(const QuatVArg q) { //const FloatV two = FloatV_From_F32(2.f); //const FloatV one = FOne(); //const FloatV x = V4GetX(q); //const FloatV y = V4GetY(q); //const FloatV z = V4GetZ(q); //const Vec4V _q = V4Mul(q, two); // ////const FloatV w = V4GetW(q); //const Vec4V t0 = V4Mul(_q, x); // 2xx, 2xy, 2xz, 2xw //const Vec4V t1 = V4Mul(_q, y); // 2xy, 2yy, 2yz, 2yw //const Vec4V t2 = V4Mul(_q, z); // 2xz, 2yz, 2zz, 2zw ////const Vec4V t3 = V4Mul(_q, w); // 2xw, 2yw, 2zw, 2ww //const FloatV xx2 = V4GetX(t0); //const FloatV xy2 = V4GetY(t0); //const FloatV xz2 = V4GetZ(t0); //const FloatV xw2 = V4GetW(t0); //const FloatV yy2 = V4GetY(t1); //const FloatV yz2 = V4GetZ(t1); //const FloatV yw2 = V4GetW(t1); //const FloatV zz2 = V4GetZ(t2); //const FloatV zw2 = V4GetW(t2); ////const FloatV ww2 = V4GetW(t3); //const FloatV c00 = FSub(one, FAdd(yy2, zz2)); //const FloatV c01 = FSub(xy2, zw2); //const FloatV c02 = FAdd(xz2, yw2); //const FloatV c10 = FAdd(xy2, zw2); //const FloatV c11 = FSub(one, FAdd(xx2, zz2)); //const FloatV c12 = FSub(yz2, xw2); //const FloatV c20 = FSub(xz2, yw2); //const FloatV c21 = FAdd(yz2, xw2); //const FloatV c22 = FSub(one, FAdd(xx2, yy2)); //const Vec3V c0 = V3Merge(c00, c10, c20); //const Vec3V c1 = V3Merge(c01, c11, c21); //const Vec3V c2 = V3Merge(c02, c12, c22); //return Mat33V(c0, c1, c2); const FloatV one = FOne(); const FloatV x = V4GetX(q); const FloatV y = V4GetY(q); const FloatV z = V4GetZ(q); const FloatV w = V4GetW(q); const FloatV x2 = FAdd(x, x); const FloatV y2 = FAdd(y, y); const FloatV z2 = FAdd(z, z); const FloatV xx = FMul(x2,x); const FloatV yy = FMul(y2,y); const FloatV zz = FMul(z2,z); const FloatV xy = FMul(x2,y); const FloatV xz = FMul(x2,z); const FloatV xw = FMul(x2,w); const FloatV yz = FMul(y2,z); const FloatV yw = FMul(y2,w); const FloatV zw = FMul(z2,w); const FloatV v = FSub(one, xx); const Vec3V column0 = V3Merge(FSub(FSub(one, yy), zz), FAdd(xy, zw), FSub(xz, yw)); const Vec3V column1 = V3Merge(FSub(xy, zw), FSub(v ,zz), FAdd(yz, xw)); const Vec3V column2 = V3Merge(FAdd(xz, yw), FSub(yz, xw), FSub(v, yy)); return Mat33V(column0, column1, column2); } NV_FORCE_INLINE QuatV Mat33GetQuatV(const Mat33V& a) { const FloatV one = FOne(); const FloatV zero = FZero(); const FloatV half = FLoad(0.5f); const FloatV two = FLoad(2.f); const FloatV scale = FLoad(0.25f); const FloatV a00 = V3GetX(a.col0); const FloatV a11 = V3GetY(a.col1); const FloatV a22 = V3GetZ(a.col2); const FloatV a21 = V3GetZ(a.col1);//row=2, col=1; const FloatV a12 = V3GetY(a.col2);//row=1, col=2; const FloatV a02 = V3GetX(a.col2);//row=0, col=2; const FloatV a20 = V3GetZ(a.col0);//row=2, col=0; const FloatV a10 = V3GetY(a.col0);//row=1, col=0; const FloatV a01 = V3GetX(a.col1);//row=0, col=1; const Vec3V vec0 = V3Merge(a21, a02, a10); const Vec3V vec1 = V3Merge(a12, a20, a01); const Vec3V v = V3Sub(vec0, vec1); const Vec3V g = V3Add(vec0, vec1); const FloatV trace = FAdd(a00, FAdd(a11, a22)); if(FAllGrtrOrEq(trace, zero)) { const FloatV h = FSqrt(FAdd(trace, one)); const FloatV w = FMul(half, h); const FloatV s = FMul(half, FRecip(h)); const Vec3V u = V3Scale(v, s); return V4SetW(Vec4V_From_Vec3V(u), w); } else { const FloatV ntrace = FNeg(trace); const Vec3V d= V3Merge(a00, a11, a22); const BoolV con0 = BAllTrue3(V3IsGrtrOrEq(V3Splat(a00), d)); const BoolV con1 = BAllTrue3(V3IsGrtrOrEq(V3Splat(a11), d)); const FloatV t0 = FAdd(one, FScaleAdd(a00, two, ntrace)); const FloatV t1 = FAdd(one, FScaleAdd(a11, two, ntrace)); const FloatV t2 = FAdd(one, FScaleAdd(a22, two, ntrace)); const FloatV t = FSel(con0, t0, FSel(con1, t1, t2)); const FloatV h = FMul(two, FSqrt(t)); const FloatV s = FRecip(h); const FloatV g0 = FMul(scale, h); const Vec3V vs = V3Scale(v, s); const Vec3V gs = V3Scale(g, s); const FloatV gsx = V3GetX(gs); const FloatV gsy = V3GetY(gs); const FloatV gsz = V3GetZ(gs); //vs.x= (a21 - a12)*s; vs.y=(a02 - a20)*s; vs.z=(a10 - a01)*s; //gs.x= (a21 + a12)*s; gs.y=(a02 + a20)*s; gs.z=(a10 + a01)*s; const Vec4V v0 = V4Merge(g0, gsz, gsy, V3GetX(vs)); const Vec4V v1 = V4Merge(gsz, g0, gsx, V3GetY(vs)); const Vec4V v2 = V4Merge(gsy, gsx, g0, V3GetZ(vs)); return V4Sel(con0, v0, V4Sel(con1, v1, v2)); } } #endif
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsBitUtils.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NSFOUNDATION_NSBITUTILS_H #define NV_NSFOUNDATION_NSBITUTILS_H #include "NvIntrinsics.h" #include "NsIntrinsics.h" #include "NvAssert.h" #include "Ns.h" namespace nvidia { namespace shdfnd { NV_INLINE uint32_t bitCount(uint32_t v) { // from http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel uint32_t const w = v - ((v >> 1) & 0x55555555); uint32_t const x = (w & 0x33333333) + ((w >> 2) & 0x33333333); return (((x + (x >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24; } NV_INLINE bool isPowerOfTwo(uint32_t x) { return x != 0 && (x & (x - 1)) == 0; } // "Next Largest Power of 2 // Given a binary integer value x, the next largest power of 2 can be computed by a SWAR algorithm // that recursively "folds" the upper bits into the lower bits. This process yields a bit vector with // the same most significant 1 as x, but all 1's below it. Adding 1 to that value yields the next // largest power of 2. For a 32-bit value:" NV_INLINE uint32_t nextPowerOfTwo(uint32_t x) { x |= (x >> 1); x |= (x >> 2); x |= (x >> 4); x |= (x >> 8); x |= (x >> 16); return x + 1; } /*! Return the index of the highest set bit. Not valid for zero arg. */ NV_INLINE uint32_t lowestSetBit(uint32_t x) { NV_ASSERT(x); return lowestSetBitUnsafe(x); } /*! Return the index of the highest set bit. Not valid for zero arg. */ NV_INLINE uint32_t highestSetBit(uint32_t x) { NV_ASSERT(x); return highestSetBitUnsafe(x); } // Helper function to approximate log2 of an integer value // assumes that the input is actually power of two. // todo: replace 2 usages with 'highestSetBit' NV_INLINE uint32_t ilog2(uint32_t num) { for(uint32_t i = 0; i < 32; i++) { num >>= 1; if(num == 0) return i; } NV_ASSERT(0); return uint32_t(-1); } } // namespace shdfnd } // namespace nvidia #endif // #ifndef NV_NSFOUNDATION_NSBITUTILS_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsVecMath.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_PHYSICS_COMMON_VECMATH #define NV_PHYSICS_COMMON_VECMATH #include "Ns.h" #include "NsIntrinsics.h" #include "NvPreprocessor.h" #include "NvVec3.h" #include "NvVec4.h" #include "NvMat33.h" #include "NvUnionCast.h" //We can activate asserts in vectorised functions for testing. //NEVER submit with asserts activated. //Only activate asserts for local testing. #define AOS_ASSERTS_ON 0 //We can opt to use the scalar version of vectorised functions. //This can catch type safety issues and might even work out more optimal on pc. //It will also be useful for benchmarking and testing. //NEVER submit with vector intrinsics deactivated without good reason. //AM: deactivating SIMD for debug win64 just so autobuild will also exercise //non-SIMD path, until a dedicated non-SIMD platform sich as Arm comes online. //TODO: dima: reference all platforms with SIMD support here, //all unknown/experimental cases should better default to NO SIMD. #if NV_X86 || NV_X64 || NV_WINRT || NV_PS3 || NV_X360 || (NV_LINUX && (NV_X86 || NV_X64)) || (NV_ANDROID && NV_NEON) || NV_XBOXONE #define COMPILE_VECTOR_INTRINSICS 1 // use SIMD #else #define COMPILE_VECTOR_INTRINSICS 0 // do not use SIMD #endif #if AOS_ASSERTS_ON #define VECMATHAOS_ASSERT NV_ASSERT #else #define VECMATHAOS_ASSERT(x) {} #endif #if COMPILE_VECTOR_INTRINSICS && (NV_X86 || NV_X64) && (NV_LINUX || NV_ANDROID || NV_APPLE || NV_PS4 || (NV_WINRT && NV_NEON)) // only SSE2 compatible platforms should reach this #include <xmmintrin.h> #endif namespace nvidia { namespace shdfnd { namespace aos { //Basic AoS types are //FloatV - 16-byte aligned representation of float. //Vec3V - 16-byte aligned representation of NvVec3 stored as (x y z 0). //Vec4V - 16-byte aligned representation of vector of 4 floats stored as (x y z w). //BoolV - 16-byte aligned representation of vector of 4 bools stored as (x y z w). //VecU32V - 16-byte aligned representation of 4 unsigned ints stored as (x y z w). //VecI32V - 16-byte aligned representation of 4 signed ints stored as (x y z w). //Mat33V - 16-byte aligned representation of any 3x3 matrix. //Mat34V - 16-byte aligned representation of transformation matrix (rotation in col1,col2,col3 and translation in col4). //Mat44V - 16-byte aligned representation of any 4x4 matrix. #if COMPILE_VECTOR_INTRINSICS #include "NsAoS.h" #else #include "NsVecMathAoSScalar.h" #endif ////////////////////////////////////////// //Construct a simd type from a scalar type ////////////////////////////////////////// //FloatV //(f,f,f,f) NV_FORCE_INLINE FloatV FLoad(const float f); //Vec3V //(f,f,f,0) NV_FORCE_INLINE Vec3V V3Load(const float f); //(f.x,f.y,f.z,0) NV_FORCE_INLINE Vec3V V3LoadU(const NvVec3& f); //(f.x,f.y,f.z,0), f must be 16-byte aligned NV_FORCE_INLINE Vec3V V3LoadA(const NvVec3& f); //(f.x,f.y,f.z,w_undefined), f must be 16-byte aligned NV_FORCE_INLINE Vec3V V3LoadUnsafeA(const NvVec3& f); //(f.x,f.y,f.z,0) NV_FORCE_INLINE Vec3V V3LoadU(const float* f); //(f.x,f.y,f.z,0), f must be 16-byte aligned NV_FORCE_INLINE Vec3V V3LoadA(const float* f); //Vec4V //(f,f,f,f) NV_FORCE_INLINE Vec4V V4Load(const float f); //(f[0],f[1],f[2],f[3]) NV_FORCE_INLINE Vec4V V4LoadU(const float* const f); //(f[0],f[1],f[2],f[3]), f must be 16-byte aligned NV_FORCE_INLINE Vec4V V4LoadA(const float* const f); //(x,y,z,w) NV_FORCE_INLINE Vec4V V4LoadXYZW(const float& x, const float& y, const float& z, const float& w); //BoolV //(f,f,f,f) NV_FORCE_INLINE BoolV BLoad(const bool f); //(f[0],f[1],f[2],f[3]) NV_FORCE_INLINE BoolV BLoad(const bool* const f); //VecU32V //(f,f,f,f) NV_FORCE_INLINE VecU32V U4Load(const uint32_t f); //(f[0],f[1],f[2],f[3]) NV_FORCE_INLINE VecU32V U4LoadU(const uint32_t* f); //(f[0],f[1],f[2],f[3]), f must be 16-byte aligned NV_FORCE_INLINE VecU32V U4LoadA(const uint32_t* f); //((U32)x, (U32)y, (U32)z, (U32)w) NV_DEPRECATED NV_FORCE_INLINE VecU32V VecU32VLoadXYZW(uint32_t x, uint32_t y, uint32_t z, uint32_t w); //VecI32V //(i,i,i,i) NV_FORCE_INLINE VecI32V I4Load(const int32_t i); //(i,i,i,i) NV_FORCE_INLINE VecI32V I4LoadU(const int32_t* i); //(i,i,i,i) NV_FORCE_INLINE VecI32V I4LoadA(const int32_t* i); //QuatV //(x = v[0], y=v[1], z=v[2], w=v3[3]) and array don't need to aligned NV_FORCE_INLINE QuatV QuatVLoadU(const float* v); //(x = v[0], y=v[1], z=v[2], w=v3[3]) and array need to aligned, fast load NV_FORCE_INLINE QuatV QuatVLoadA(const float* v); //(x, y, z, w) NV_DEPRECATED NV_FORCE_INLINE QuatV QuatVLoadXYZW(const float x, const float y, const float z, const float w); //not added to public api Vec4V Vec4V_From_NvVec3_WUndefined(const NvVec3& v); /////////////////////////////////////////////////// //Construct a simd type from a different simd type /////////////////////////////////////////////////// //Vec3V //(v.x,v.y,v.z,0) NV_FORCE_INLINE Vec3V Vec3V_From_Vec4V(Vec4V v) ; //(v.x,v.y,v.z,undefined) NV_FORCE_INLINE Vec3V Vec3V_From_Vec4V_WUndefined(const Vec4V v); //Vec4V //(f.x,f.y,f.z,f.w) NV_FORCE_INLINE Vec4V Vec4V_From_Vec3V(Vec3V f); //((float)f.x, (float)f.y, (float)f.z, (float)f.w) NV_FORCE_INLINE Vec4V Vec4V_From_VecU32V(VecU32V a); //((float)f.x, (float)f.y, (float)f.z, (float)f.w) NV_FORCE_INLINE Vec4V Vec4V_From_VecI32V(VecI32V a); //(*(reinterpret_cast<float*>(&f.x), (reinterpret_cast<float*>(&f.y), (reinterpret_cast<float*>(&f.z), (reinterpret_cast<float*>(&f.w)) NV_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecU32V(VecU32V a); //(*(reinterpret_cast<float*>(&f.x), (reinterpret_cast<float*>(&f.y), (reinterpret_cast<float*>(&f.z), (reinterpret_cast<float*>(&f.w)) NV_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecI32V(VecI32V a); //VecU32V //(*(reinterpret_cast<uint32_t*>(&f.x), (reinterpret_cast<uint32_t*>(&f.y), (reinterpret_cast<uint32_t*>(&f.z), (reinterpret_cast<uint32_t*>(&f.w)) NV_FORCE_INLINE VecU32V VecU32V_ReinterpretFrom_Vec4V(Vec4V a); //(b[0], b[1], b[2], b[3]) NV_DEPRECATED NV_FORCE_INLINE VecU32V VecU32V_From_BoolV(const BoolVArg b); //VecI32V //(*(reinterpret_cast<int32_t*>(&f.x), (reinterpret_cast<int32_t*>(&f.y), (reinterpret_cast<int32_t*>(&f.z), (reinterpret_cast<int32_t*>(&f.w)) NV_FORCE_INLINE VecI32V VecI32V_ReinterpretFrom_Vec4V(Vec4V a); //((I32)a.x, (I32)a.y, (I32)a.z, (I32)a.w) NV_DEPRECATED NV_FORCE_INLINE VecI32V VecI32V_From_Vec4V(Vec4V a); //((I32)b.x, (I32)b.y, (I32)b.z, (I32)b.w) NV_DEPRECATED NV_FORCE_INLINE VecI32V VecI32V_From_BoolV(const BoolVArg b); /////////////////////////////////////////////////// //Convert from a simd type back to a scalar type /////////////////////////////////////////////////// //FloatV //a.x NV_DEPRECATED NV_FORCE_INLINE float FStore(const FloatV a); //a.x NV_FORCE_INLINE void FStore(const FloatV a, float* NV_RESTRICT f); //Vec3V //(a.x,a.y,a.z) NV_FORCE_INLINE void V3StoreA(const Vec3V a, NvVec3& f); //(a.x,a.y,a.z) NV_FORCE_INLINE void V3StoreU(const Vec3V a, NvVec3& f); //Vec4V NV_FORCE_INLINE void V4StoreA(const Vec4V a, float* f); NV_FORCE_INLINE void V4StoreU(const Vec4V a, float* f); //BoolV NV_FORCE_INLINE void BStoreA(const BoolV b, uint32_t* f); //VecU32V NV_FORCE_INLINE void U4StoreA(const VecU32V uv, uint32_t* u); //VecI32V NV_FORCE_INLINE void I4StoreA(const VecI32V iv, int32_t* i); ////////////////////////////////////////////////////////////////// //Test that simd types have elements in the floating point range ////////////////////////////////////////////////////////////////// //check for each component is valid ie in floating point range NV_FORCE_INLINE bool isFiniteFloatV(const FloatV a); //check for each component is valid ie in floating point range NV_FORCE_INLINE bool isFiniteVec3V(const Vec3V a); //check for each component is valid ie in floating point range NV_FORCE_INLINE bool isFiniteVec4V(const Vec4V a); //Check that w-component is zero. NV_FORCE_INLINE bool isValidVec3V(const Vec3V a); ////////////////////////////////////////////////////////////////// //Tests that all elements of two 16-byte types are completely equivalent. //Use these tests for unit testing and asserts only. ////////////////////////////////////////////////////////////////// namespace _VecMathTests { NV_FORCE_INLINE bool allElementsEqualFloatV(const FloatV a, const FloatV b); NV_FORCE_INLINE bool allElementsEqualVec3V(const Vec3V a, const Vec3V b); NV_FORCE_INLINE bool allElementsEqualVec4V(const Vec4V a, const Vec4V b); NV_FORCE_INLINE bool allElementsEqualBoolV(const BoolV a, const BoolV b); NV_FORCE_INLINE bool allElementsEqualVecU32V(const VecU32V a, const VecU32V b); NV_FORCE_INLINE bool allElementsEqualVecI32V(const VecI32V a, const VecI32V b); NV_FORCE_INLINE bool allElementsEqualMat33V(const Mat33V& a, const Mat33V& b) { return ( allElementsEqualVec3V(a.col0,b.col0) && allElementsEqualVec3V(a.col1,b.col1) && allElementsEqualVec3V(a.col2,b.col2) ); } NV_FORCE_INLINE bool allElementsEqualMat34V(const Mat34V& a, const Mat34V& b) { return ( allElementsEqualVec3V(a.col0,b.col0) && allElementsEqualVec3V(a.col1,b.col1) && allElementsEqualVec3V(a.col2,b.col2) && allElementsEqualVec3V(a.col3,b.col3) ); } NV_FORCE_INLINE bool allElementsEqualMat44V(const Mat44V& a, const Mat44V& b) { return ( allElementsEqualVec4V(a.col0,b.col0) && allElementsEqualVec4V(a.col1,b.col1) && allElementsEqualVec4V(a.col2,b.col2) && allElementsEqualVec4V(a.col3,b.col3) ); } NV_FORCE_INLINE bool allElementsNearEqualFloatV(const FloatV a, const FloatV b); NV_FORCE_INLINE bool allElementsNearEqualVec3V(const Vec3V a, const Vec3V b); NV_FORCE_INLINE bool allElementsNearEqualVec4V(const Vec4V a, const Vec4V b); NV_FORCE_INLINE bool allElementsNearEqualMat33V(const Mat33V& a, const Mat33V& b) { return ( allElementsNearEqualVec3V(a.col0,b.col0) && allElementsNearEqualVec3V(a.col1,b.col1) && allElementsNearEqualVec3V(a.col2,b.col2) ); } NV_FORCE_INLINE bool allElementsNearEqualMat34V(const Mat34V& a, const Mat34V& b) { return ( allElementsNearEqualVec3V(a.col0,b.col0) && allElementsNearEqualVec3V(a.col1,b.col1) && allElementsNearEqualVec3V(a.col2,b.col2) && allElementsNearEqualVec3V(a.col3,b.col3) ); } NV_FORCE_INLINE bool allElementsNearEqualMat44V(const Mat44V& a, const Mat44V& b) { return ( allElementsNearEqualVec4V(a.col0,b.col0) && allElementsNearEqualVec4V(a.col1,b.col1) && allElementsNearEqualVec4V(a.col2,b.col2) && allElementsNearEqualVec4V(a.col3,b.col3) ); } }; ////////////////////////////////////////////////////////////////// //Math operations on FloatV ////////////////////////////////////////////////////////////////// //(0,0,0,0) NV_FORCE_INLINE FloatV FZero(); //(1,1,1,1) NV_FORCE_INLINE FloatV FOne(); //(0.5,0.5,0.5,0.5) NV_FORCE_INLINE FloatV FHalf(); //(NV_EPS_REAL,NV_EPS_REAL,NV_EPS_REAL,NV_EPS_REAL) NV_FORCE_INLINE FloatV FEps(); //(NV_MAX_REAL, NV_MAX_REAL, NV_MAX_REAL NV_MAX_REAL) NV_FORCE_INLINE FloatV FMax(); //(-NV_MAX_REAL, -NV_MAX_REAL, -NV_MAX_REAL -NV_MAX_REAL) NV_FORCE_INLINE FloatV FNegMax(); //(1e-6f, 1e-6f, 1e-6f, 1e-6f) NV_FORCE_INLINE FloatV FEps6(); //((float*)&1, (float*)&1, (float*)&1, (float*)&1) //-f (per component) NV_FORCE_INLINE FloatV FNeg(const FloatV f) ; //a+b (per component) NV_FORCE_INLINE FloatV FAdd(const FloatV a, const FloatV b); //a-b (per component) NV_FORCE_INLINE FloatV FSub(const FloatV a, const FloatV b) ; //a*b (per component) NV_FORCE_INLINE FloatV FMul(const FloatV a, const FloatV b) ; //a/b (per component) NV_FORCE_INLINE FloatV FDiv(const FloatV a, const FloatV b); //a/b (per component) NV_FORCE_INLINE FloatV FDivFast(const FloatV a, const FloatV b); //1.0f/a NV_FORCE_INLINE FloatV FRecip(const FloatV a); //1.0f/a NV_FORCE_INLINE FloatV FRecipFast(const FloatV a); //1.0f/sqrt(a) NV_FORCE_INLINE FloatV FRsqrt(const FloatV a); //1.0f/sqrt(a) NV_FORCE_INLINE FloatV FRsqrtFast(const FloatV a); //sqrt(a) NV_FORCE_INLINE FloatV FSqrt(const FloatV a); //a*b+c NV_FORCE_INLINE FloatV FScaleAdd(const FloatV a, const FloatV b, const FloatV c); //c-a*b NV_FORCE_INLINE FloatV FNegScaleSub(const FloatV a, const FloatV b, const FloatV c); //fabs(a) NV_FORCE_INLINE FloatV FAbs(const FloatV a); //c ? a : b (per component) NV_FORCE_INLINE FloatV FSel(const BoolV c, const FloatV a, const FloatV b); //a>b (per component) NV_FORCE_INLINE BoolV FIsGrtr(const FloatV a, const FloatV b); //a>=b (per component) NV_FORCE_INLINE BoolV FIsGrtrOrEq(const FloatV a, const FloatV b); //a==b (per component) NV_FORCE_INLINE BoolV FIsEq(const FloatV a, const FloatV b); //Max(a,b) (per component) NV_FORCE_INLINE FloatV FMax(const FloatV a, const FloatV b); //Min(a,b) (per component) NV_FORCE_INLINE FloatV FMin(const FloatV a, const FloatV b); //Clamp(a,b) (per component) NV_FORCE_INLINE FloatV FClamp(const FloatV a, const FloatV minV, const FloatV maxV); //a*b+c NV_DEPRECATED NV_FORCE_INLINE FloatV FMulAdd(const FloatV a, const FloatV b, const FloatV c) { return FScaleAdd(a,b,c); } //c-a*b NV_DEPRECATED NV_FORCE_INLINE FloatV FNegMulSub(const FloatV a, const FloatV b, const FloatV c) { return FNegScaleSub(a,b,c); } //a.x>b.x NV_FORCE_INLINE uint32_t FAllGrtr(const FloatV a, const FloatV b); //a.x>=b.x NV_FORCE_INLINE uint32_t FAllGrtrOrEq(const FloatV a, const FloatV b); //a.x==b.x NV_FORCE_INLINE uint32_t FAllEq(const FloatV a, const FloatV b); //a<min || a>max NV_FORCE_INLINE uint32_t FOutOfBounds(const FloatV a, const FloatV min, const FloatV max); //a>=min && a<=max NV_FORCE_INLINE uint32_t FInBounds(const FloatV a, const FloatV min, const FloatV max); //a<-bounds || a>bounds NV_FORCE_INLINE uint32_t FOutOfBounds(const FloatV a, const FloatV bounds); //a>=-bounds && a<=bounds NV_FORCE_INLINE uint32_t FInBounds(const FloatV a, const FloatV bounds); //round float a to the near int NV_FORCE_INLINE FloatV FRound(const FloatV a); //calculate the sin of float a NV_FORCE_INLINE FloatV FSin(const FloatV a); //calculate the cos of float b NV_FORCE_INLINE FloatV FCos(const FloatV a); ////////////////////////////////////////////////////////////////// //Math operations on Vec3V ////////////////////////////////////////////////////////////////// //(f,f,f,f) NV_FORCE_INLINE Vec3V V3Splat(const FloatV f); //(x,y,z) NV_FORCE_INLINE Vec3V V3Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z); //(1,0,0,0) NV_FORCE_INLINE Vec3V V3UnitX(); //(0,1,0,0) NV_FORCE_INLINE Vec3V V3UnitY(); //(0,0,1,0) NV_FORCE_INLINE Vec3V V3UnitZ(); //(f.x,f.x,f.x,f.x) NV_FORCE_INLINE FloatV V3GetX(const Vec3V f); //(f.y,f.y,f.y,f.y) NV_FORCE_INLINE FloatV V3GetY(const Vec3V f); //(f.z,f.z,f.z,f.z) NV_FORCE_INLINE FloatV V3GetZ(const Vec3V f); //(f,v.y,v.z,v.w) NV_FORCE_INLINE Vec3V V3SetX(const Vec3V v, const FloatV f); //(v.x,f,v.z,v.w) NV_FORCE_INLINE Vec3V V3SetY(const Vec3V v, const FloatV f); //(v.x,v.y,f,v.w) NV_FORCE_INLINE Vec3V V3SetZ(const Vec3V v, const FloatV f); //v.x=f NV_FORCE_INLINE void V3WriteX(Vec3V& v, const float f); //v.y=f NV_FORCE_INLINE void V3WriteY(Vec3V& v, const float f); //v.z=f NV_FORCE_INLINE void V3WriteZ(Vec3V& v, const float f); //v.x=f.x, v.y=f.y, v.z=f.z NV_FORCE_INLINE void V3WriteXYZ(Vec3V& v, const NvVec3& f); //return v.x NV_FORCE_INLINE float V3ReadX(const Vec3V& v); //return v.y NV_FORCE_INLINE float V3ReadY(const Vec3V& v); //return v.y NV_FORCE_INLINE float V3ReadZ(const Vec3V& v); //return (v.x,v.y,v.z) NV_FORCE_INLINE const NvVec3& V3ReadXYZ(const Vec3V& v); //(a.x, b.x, c.x) NV_FORCE_INLINE Vec3V V3ColX(const Vec3V a, const Vec3V b, const Vec3V c); //(a.y, b.y, c.y) NV_FORCE_INLINE Vec3V V3ColY(const Vec3V a, const Vec3V b, const Vec3V c); //(a.z, b.z, c.z) NV_FORCE_INLINE Vec3V V3ColZ(const Vec3V a, const Vec3V b, const Vec3V c); //(0,0,0,0) NV_FORCE_INLINE Vec3V V3Zero(); //(1,1,1,1) NV_FORCE_INLINE Vec3V V3One(); //(NV_EPS_REAL,NV_EPS_REAL,NV_EPS_REAL,NV_EPS_REAL) NV_FORCE_INLINE Vec3V V3Eps(); //-c (per component) NV_FORCE_INLINE Vec3V V3Neg(const Vec3V c); //a+b (per component) NV_FORCE_INLINE Vec3V V3Add(const Vec3V a, const Vec3V b); //a-b (per component) NV_FORCE_INLINE Vec3V V3Sub(const Vec3V a, const Vec3V b); //a*b (per component) NV_FORCE_INLINE Vec3V V3Scale(const Vec3V a, const FloatV b); //a*b (per component) NV_FORCE_INLINE Vec3V V3Mul(const Vec3V a, const Vec3V b); //a/b (per component) NV_FORCE_INLINE Vec3V V3ScaleInv(const Vec3V a, const FloatV b); //a/b (per component) NV_FORCE_INLINE Vec3V V3Div(const Vec3V a, const Vec3V b); //a/b (per component) NV_FORCE_INLINE Vec3V V3ScaleInvFast(const Vec3V a, const FloatV b); //a/b (per component) NV_FORCE_INLINE Vec3V V3DivFast(const Vec3V a, const Vec3V b); //1.0f/a NV_FORCE_INLINE Vec3V V3Recip(const Vec3V a); //1.0f/a NV_FORCE_INLINE Vec3V V3RecipFast(const Vec3V a); //1.0f/sqrt(a) NV_FORCE_INLINE Vec3V V3Rsqrt(const Vec3V a); //1.0f/sqrt(a) NV_FORCE_INLINE Vec3V V3RsqrtFast(const Vec3V a); //a*b+c NV_FORCE_INLINE Vec3V V3ScaleAdd(const Vec3V a, const FloatV b, const Vec3V c); //c-a*b NV_FORCE_INLINE Vec3V V3NegScaleSub(const Vec3V a, const FloatV b, const Vec3V c); //a*b+c NV_FORCE_INLINE Vec3V V3MulAdd(const Vec3V a, const Vec3V b, const Vec3V c); //c-a*b NV_FORCE_INLINE Vec3V V3NegMulSub(const Vec3V a, const Vec3V b, const Vec3V c); //fabs(a) NV_FORCE_INLINE Vec3V V3Abs(const Vec3V a); //a.b NV_FORCE_INLINE FloatV V3Dot(const Vec3V a, const Vec3V b); //aXb NV_FORCE_INLINE Vec3V V3Cross(const Vec3V a, const Vec3V b); //|a.a|^1/2 NV_FORCE_INLINE FloatV V3Length(const Vec3V a); //a.a NV_FORCE_INLINE FloatV V3LengthSq(const Vec3V a); //a*|a.a|^-1/2 NV_FORCE_INLINE Vec3V V3Normalize(const Vec3V a); //a.a>0 ? a*|a.a|^-1/2 : (0,0,0,0) NV_FORCE_INLINE FloatV V3Length(const Vec3V a); //a*|a.a|^-1/2 NV_FORCE_INLINE Vec3V V3NormalizeSafe(const Vec3V a); //a.x + a.y +a.z NV_FORCE_INLINE FloatV V3SumElems(const Vec3V a); //c ? a : b (per component) NV_FORCE_INLINE Vec3V V3Sel(const BoolV c, const Vec3V a, const Vec3V b); //a>b (per component) NV_FORCE_INLINE BoolV V3IsGrtr(const Vec3V a, const Vec3V b); //a>=b (per component) NV_FORCE_INLINE BoolV V3IsGrtrOrEq(const Vec3V a, const Vec3V b); //a==b (per component) NV_FORCE_INLINE BoolV V3IsEq(const Vec3V a, const Vec3V b); //Max(a,b) (per component) NV_FORCE_INLINE Vec3V V3Max(const Vec3V a, const Vec3V b); //Min(a,b) (per component) NV_FORCE_INLINE Vec3V V3Min(const Vec3V a, const Vec3V b); //Extract the maximum value from a NV_FORCE_INLINE FloatV V3ExtractMax(const Vec3V a); //Extract the maximum value from a NV_FORCE_INLINE FloatV V3ExtractMin(const Vec3V a); //Clamp(a,b) (per component) NV_FORCE_INLINE Vec3V V3Clamp(const Vec3V a, const Vec3V minV, const Vec3V maxV); //Extract the sign for each component NV_FORCE_INLINE Vec3V V3Sign(const Vec3V a); //Test all components. //(a.x>b.x && a.y>b.y && a.z>b.z) NV_FORCE_INLINE uint32_t V3AllGrtr(const Vec3V a, const Vec3V b); //(a.x>=b.x && a.y>=b.y && a.z>=b.z) NV_FORCE_INLINE uint32_t V3AllGrtrOrEq(const Vec3V a, const Vec3V b); //(a.x==b.x && a.y==b.y && a.z==b.z) NV_FORCE_INLINE uint32_t V3AllEq(const Vec3V a, const Vec3V b); //a.x<min.x || a.y<min.y || a.z<min.z || a.x>max.x || a.y>max.y || a.z>max.z NV_FORCE_INLINE uint32_t V3OutOfBounds(const Vec3V a, const Vec3V min, const Vec3V max); //a.x>=min.x && a.y>=min.y && a.z>=min.z && a.x<=max.x && a.y<=max.y && a.z<=max.z NV_FORCE_INLINE uint32_t V3InBounds(const Vec3V a, const Vec3V min, const Vec3V max); //a.x<-bounds.x || a.y<=-bounds.y || a.z<bounds.z || a.x>bounds.x || a.y>bounds.y || a.z>bounds.z NV_FORCE_INLINE uint32_t V3OutOfBounds(const Vec3V a, const Vec3V bounds); //a.x>=-bounds.x && a.y>=-bounds.y && a.z>=-bounds.z && a.x<=bounds.x && a.y<=bounds.y && a.z<=bounds.z NV_FORCE_INLINE uint32_t V3InBounds(const Vec3V a, const Vec3V bounds); //(floor(a.x + 0.5f), floor(a.y + 0.5f), floor(a.z + 0.5f)) NV_FORCE_INLINE Vec3V V3Round(const Vec3V a); //(sinf(a.x), sinf(a.y), sinf(a.z)) NV_FORCE_INLINE Vec3V V3Sin(const Vec3V a); //(cosf(a.x), cosf(a.y), cosf(a.z)) NV_FORCE_INLINE Vec3V V3Cos(const Vec3V a); //(a.y,a.z,a.z) NV_FORCE_INLINE Vec3V V3PermYZZ(const Vec3V a); //(a.x,a.y,a.x) NV_FORCE_INLINE Vec3V V3PermXYX(const Vec3V a); //(a.y,a.z,a.x) NV_FORCE_INLINE Vec3V V3PermYZX(const Vec3V a); //(a.z, a.x, a.y) NV_FORCE_INLINE Vec3V V3PermZXY(const Vec3V a); //(a.z,a.z,a.y) NV_FORCE_INLINE Vec3V V3PermZZY(const Vec3V a); //(a.y,a.x,a.x) NV_FORCE_INLINE Vec3V V3PermYXX(const Vec3V a); //(0, v1.z, v0.y) NV_FORCE_INLINE Vec3V V3Perm_Zero_1Z_0Y(const Vec3V v0, const Vec3V v1); //(v0.z, 0, v1.x) NV_FORCE_INLINE Vec3V V3Perm_0Z_Zero_1X(const Vec3V v0, const Vec3V v1); //(v1.y, v0.x, 0) NV_FORCE_INLINE Vec3V V3Perm_1Y_0X_Zero(const Vec3V v0, const Vec3V v1); ////////////////////////////////////////////////////////////////// //Math operations on Vec4V ////////////////////////////////////////////////////////////////// //(f,f,f,f) NV_FORCE_INLINE Vec4V V4Splat(const FloatV f); //(f[0],f[1],f[2],f[3]) NV_FORCE_INLINE Vec4V V4Merge(const FloatV* const f); //(x,y,z,w) NV_FORCE_INLINE Vec4V V4Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z, const FloatVArg w); //(x.w, y.w, z.w, w.w) NV_FORCE_INLINE Vec4V V4MergeW(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w); //(x.z, y.z, z.z, w.z) NV_FORCE_INLINE Vec4V V4MergeZ(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w); //(x.y, y.y, z.y, w.y) NV_FORCE_INLINE Vec4V V4MergeY(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w); //(x.x, y.x, z.x, w.x) NV_FORCE_INLINE Vec4V V4MergeX(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w); //(a.x, b.x, a.y, b.y) NV_FORCE_INLINE Vec4V V4UnpackXY(const Vec4VArg a, const Vec4VArg b); //(a.z, b.z, a.w, b.w) NV_FORCE_INLINE Vec4V V4UnpackZW(const Vec4VArg a, const Vec4VArg b); //(1,0,0,0) NV_FORCE_INLINE Vec4V V4UnitW(); //(0,1,0,0) NV_FORCE_INLINE Vec4V V4UnitY(); //(0,0,1,0) NV_FORCE_INLINE Vec4V V4UnitZ(); //(0,0,0,1) NV_FORCE_INLINE Vec4V V4UnitW(); //(f.x,f.x,f.x,f.x) NV_FORCE_INLINE FloatV V4GetX(const Vec4V f); //(f.y,f.y,f.y,f.y) NV_FORCE_INLINE FloatV V4GetY(const Vec4V f); //(f.z,f.z,f.z,f.z) NV_FORCE_INLINE FloatV V4GetZ(const Vec4V f); //(f.w,f.w,f.w,f.w) NV_FORCE_INLINE FloatV V4GetW(const Vec4V f); //(f,v.y,v.z,v.w) NV_FORCE_INLINE Vec4V V4SetX(const Vec4V v, const FloatV f); //(v.x,f,v.z,v.w) NV_FORCE_INLINE Vec4V V4SetY(const Vec4V v, const FloatV f); //(v.x,v.y,f,v.w) NV_FORCE_INLINE Vec4V V4SetZ(const Vec4V v, const FloatV f); //(v.x,v.y,v.z,f) NV_FORCE_INLINE Vec4V V4SetW(const Vec4V v, const FloatV f); //(v.x,v.y,v.z,0) NV_FORCE_INLINE Vec4V V4ClearW(const Vec4V v); //(a[elementIndex], a[elementIndex], a[elementIndex], a[elementIndex]) template<int elementIndex> NV_FORCE_INLINE Vec4V V4SplatElement(Vec4V a); //v.x=f NV_FORCE_INLINE void V4WriteX(Vec4V& v, const float f); //v.y=f NV_FORCE_INLINE void V4WriteY(Vec4V& v, const float f); //v.z=f NV_FORCE_INLINE void V4WriteZ(Vec4V& v, const float f); //v.w=f NV_FORCE_INLINE void V4WriteW(Vec4V& v, const float f); //v.x=f.x, v.y=f.y, v.z=f.z NV_FORCE_INLINE void V4WriteXYZ(Vec4V& v, const NvVec3& f); //return v.x NV_FORCE_INLINE float V4ReadX(const Vec4V& v); //return v.y NV_FORCE_INLINE float V4ReadY(const Vec4V& v); //return v.z NV_FORCE_INLINE float V4ReadZ(const Vec4V& v); //return v.w NV_FORCE_INLINE float V4ReadW(const Vec4V& v); //return (v.x,v.y,v.z) NV_FORCE_INLINE const NvVec3& V4ReadXYZ(const Vec4V& v); //(0,0,0,0) NV_FORCE_INLINE Vec4V V4Zero(); //(1,1,1,1) NV_FORCE_INLINE Vec4V V4One(); //(NV_EPS_REAL,NV_EPS_REAL,NV_EPS_REAL,NV_EPS_REAL) NV_FORCE_INLINE Vec4V V4Eps(); //-c (per component) NV_FORCE_INLINE Vec4V V4Neg(const Vec4V c); //a+b (per component) NV_FORCE_INLINE Vec4V V4Add(const Vec4V a, const Vec4V b); //a-b (per component) NV_FORCE_INLINE Vec4V V4Sub(const Vec4V a, const Vec4V b); //a*b (per component) NV_FORCE_INLINE Vec4V V4Scale(const Vec4V a, const FloatV b); //a*b (per component) NV_FORCE_INLINE Vec4V V4Mul(const Vec4V a, const Vec4V b); //a/b (per component) NV_FORCE_INLINE Vec4V V4ScaleInv(const Vec4V a, const FloatV b); //a/b (per component) NV_FORCE_INLINE Vec4V V4Div(const Vec4V a, const Vec4V b); //a/b (per component) NV_FORCE_INLINE Vec4V V4ScaleInvFast(const Vec4V a, const FloatV b); //a/b (per component) NV_FORCE_INLINE Vec4V V4DivFast(const Vec4V a, const Vec4V b); //1.0f/a NV_FORCE_INLINE Vec4V V4Recip(const Vec4V a); //1.0f/a NV_FORCE_INLINE Vec4V V4RecipFast(const Vec4V a); //1.0f/sqrt(a) NV_FORCE_INLINE Vec4V V4Rsqrt(const Vec4V a); //1.0f/sqrt(a) NV_FORCE_INLINE Vec4V V4RsqrtFast(const Vec4V a); //a*b+c NV_FORCE_INLINE Vec4V V4ScaleAdd(const Vec4V a, const FloatV b, const Vec4V c); //c-a*b NV_FORCE_INLINE Vec4V V4NegScaleSub(const Vec4V a, const FloatV b, const Vec4V c); //a*b+c NV_FORCE_INLINE Vec4V V4MulAdd(const Vec4V a, const Vec4V b, const Vec4V c); //c-a*b NV_FORCE_INLINE Vec4V V4NegMulSub(const Vec4V a, const Vec4V b, const Vec4V c); //fabs(a) NV_FORCE_INLINE Vec4V V4Abs(const Vec4V a); //bitwise a & ~b NV_FORCE_INLINE Vec4V V4Andc(const Vec4V a, const VecU32V b); //a.b NV_FORCE_INLINE FloatV V4Dot(const Vec4V a, const Vec4V b); //|a.a|^1/2 NV_FORCE_INLINE FloatV V4Length(const Vec4V a); //a.a NV_FORCE_INLINE FloatV V4LengthSq(const Vec4V a); //a*|a.a|^-1/2 NV_FORCE_INLINE Vec4V V4Normalize(const Vec4V a); //a.a>0 ? a*|a.a|^-1/2 : (0,0,0,0) NV_FORCE_INLINE Vec4V V4NormalizeSafe(const Vec4V a); //a*|a.a|^-1/2 NV_FORCE_INLINE Vec4V V4NormalizeFast(const Vec4V a); //c ? a : b (per component) NV_FORCE_INLINE Vec4V V4Sel(const BoolV c, const Vec4V a, const Vec4V b); //a>b (per component) NV_FORCE_INLINE BoolV V4IsGrtr(const Vec4V a, const Vec4V b); //a>=b (per component) NV_FORCE_INLINE BoolV V4IsGrtrOrEq(const Vec4V a, const Vec4V b); //a==b (per component) NV_FORCE_INLINE BoolV V4IsEq(const Vec4V a, const Vec4V b); //Max(a,b) (per component) NV_FORCE_INLINE Vec4V V4Max(const Vec4V a, const Vec4V b); //Min(a,b) (per component) NV_FORCE_INLINE Vec4V V4Min(const Vec4V a, const Vec4V b); //Get the maximum component from a NV_FORCE_INLINE FloatV V4ExtractMax(const Vec4V a); //Get the minimum component from a NV_FORCE_INLINE FloatV V4ExtractMin(const Vec4V a); //Clamp(a,b) (per component) NV_FORCE_INLINE Vec4V V4Clamp(const Vec4V a, const Vec4V minV, const Vec4V maxV); //return 1 if all components of a are greater than all components of b. NV_FORCE_INLINE uint32_t V4AllGrtr(const Vec4V a, const Vec4V b); //return 1 if all components of a are greater than or equal to all components of b NV_FORCE_INLINE uint32_t V4AllGrtrOrEq(const Vec4V a, const Vec4V b); //return 1 if all components of a are equal to all components of b NV_FORCE_INLINE uint32_t V4AllEq(const Vec4V a, const Vec4V b); // round(a)(per component) NV_FORCE_INLINE Vec4V V4Round(const Vec4V a); // sin(a) (per component) NV_FORCE_INLINE Vec4V V4Sin(const Vec4V a); //cos(a) (per component) NV_FORCE_INLINE Vec4V V4Cos(const Vec4V a); //Permute v into a new vec4v with YXWZ format NV_FORCE_INLINE Vec4V V4Perm_YXWZ(const Vec4V v); //Permute v into a new vec4v with XZXZ format NV_FORCE_INLINE Vec4V V4Perm_XZXZ(const Vec4V v); //Permute v into a new vec4v with YWYW format NV_FORCE_INLINE Vec4V V4Perm_YWYW(const Vec4V v); //Permute v into a new vec4v with format {a[x], a[y], a[z], a[w]} //V4Perm<1,3,1,3> is equal to V4Perm_YWYW //V4Perm<0,2,0,2> is equal to V4Perm_XZXZ //V3Perm<1,0,3,2> is equal to V4Perm_YXWZ template<uint8_t x, uint8_t y, uint8_t z, uint8_t w> NV_FORCE_INLINE Vec4V V4Perm(const Vec4V a); //q = cos(a/2) + u*sin(a/2) NV_FORCE_INLINE QuatV QuatV_From_RotationAxisAngle(const Vec3V u, const FloatV a); // convert q to a unit quaternion NV_FORCE_INLINE QuatV QuatNormalize(const QuatV q); //|q.q|^1/2 NV_FORCE_INLINE FloatV QuatLength(const QuatV q); //q.q NV_FORCE_INLINE FloatV QuatLengthSq(const QuatV q); //a.b NV_FORCE_INLINE FloatV QuatDot(const QuatV a, const QuatV b); //(-q.x, -q.y, -q.z, q.w) NV_FORCE_INLINE QuatV QuatConjugate(const QuatV q); //(q.x, q.y, q.z) NV_FORCE_INLINE Vec3V QuatGetImaginaryPart(const QuatV q); //convert quaterion to matrix 33 NV_FORCE_INLINE Mat33V QuatGetMat33V(const QuatVArg q); //convert matrix 33 to quaterion NV_FORCE_INLINE QuatV Mat33GetQuatV(const Mat33V& a); // brief computes rotation of x-axis NV_FORCE_INLINE Vec3V QuatGetBasisVector0(const QuatV q); // brief computes rotation of y-axis NV_FORCE_INLINE Vec3V QuatGetBasisVector1(const QuatV q); // brief computes rotation of z-axis NV_FORCE_INLINE Vec3V QuatGetBasisVector2(const QuatV q); // calculate the rotation vector from q and v NV_FORCE_INLINE Vec3V QuatRotate(const QuatV q, const Vec3V v); // calculate the roation vector from the conjuate quaterion and v NV_FORCE_INLINE Vec3V QuatRotateInv(const QuatV q, const Vec3V v); // quaternion multiplication NV_FORCE_INLINE QuatV QuatMul(const QuatV a, const QuatV b); // quaternion add NV_FORCE_INLINE QuatV QuatAdd(const QuatV a, const QuatV b); // (-q.x, -q.y, -q.z, -q.w) NV_FORCE_INLINE QuatV QuatNeg(const QuatV q); // (a.x - b.x, a.y-b.y, a.z-b.z, a.w-b.w ) NV_FORCE_INLINE QuatV QuatSub(const QuatV a, const QuatV b); // (a.x*b, a.y*b, a.z*b, a.w*b) NV_FORCE_INLINE QuatV QuatScale(const QuatV a, const FloatV b); // (x = v[0], y = v[1], z = v[2], w =v[3]) NV_FORCE_INLINE QuatV QuatMerge(const FloatV* const v); // (x = v[0], y = v[1], z = v[2], w =v[3]) NV_FORCE_INLINE QuatV QuatMerge(const FloatVArg x, const FloatVArg y, const FloatVArg z, const FloatVArg w); // (x = 0.f, y = 0.f, z = 0.f, w = 1.f) NV_FORCE_INLINE QuatV QuatIdentity(); //check for each component is valid NV_FORCE_INLINE bool isFiniteQuatV(const QuatV q); //check for each component is valid NV_FORCE_INLINE bool isValidQuatV(const QuatV q); //check for each component is valid NV_FORCE_INLINE bool isSaneQuatV(const QuatV q); //transpose 4x4 matrix represented by its columns NV_FORCE_INLINE void V4Transpose(Vec4V& col0, Vec4V& col1, Vec4V& col2, Vec4V& col3); //Math operations on 16-byte aligned booleans. //x=false y=false z=false w=false NV_FORCE_INLINE BoolV BFFFF(); //x=false y=false z=false w=true NV_FORCE_INLINE BoolV BFFFT(); //x=false y=false z=true w=false NV_FORCE_INLINE BoolV BFFTF(); //x=false y=false z=true w=true NV_FORCE_INLINE BoolV BFFTT(); //x=false y=true z=false w=false NV_FORCE_INLINE BoolV BFTFF(); //x=false y=true z=false w=true NV_FORCE_INLINE BoolV BFTFT(); //x=false y=true z=true w=false NV_FORCE_INLINE BoolV BFTTF(); //x=false y=true z=true w=true NV_FORCE_INLINE BoolV BFTTT(); //x=true y=false z=false w=false NV_FORCE_INLINE BoolV BTFFF(); //x=true y=false z=false w=true NV_FORCE_INLINE BoolV BTFFT(); //x=true y=false z=true w=false NV_FORCE_INLINE BoolV BTFTF(); //x=true y=false z=true w=true NV_FORCE_INLINE BoolV BTFTT(); //x=true y=true z=false w=false NV_FORCE_INLINE BoolV BTTFF(); //x=true y=true z=false w=true NV_FORCE_INLINE BoolV BTTFT(); //x=true y=true z=true w=false NV_FORCE_INLINE BoolV BTTTF(); //x=true y=true z=true w=true NV_FORCE_INLINE BoolV BTTTT(); //x=false y=false z=false w=true NV_FORCE_INLINE BoolV BWMask(); //x=true y=false z=false w=false NV_FORCE_INLINE BoolV BXMask(); //x=false y=true z=false w=false NV_FORCE_INLINE BoolV BYMask(); //x=false y=false z=true w=false NV_FORCE_INLINE BoolV BZMask(); //get x component NV_FORCE_INLINE BoolV BGetX(const BoolV f); //get y component NV_FORCE_INLINE BoolV BGetY(const BoolV f); //get z component NV_FORCE_INLINE BoolV BGetZ(const BoolV f); //get w component NV_FORCE_INLINE BoolV BGetW(const BoolV f); //Use elementIndex to splat xxxx or yyyy or zzzz or wwww template<int elementIndex> NV_FORCE_INLINE BoolV BSplatElement(Vec4V a); //component-wise && (AND) NV_FORCE_INLINE BoolV BAnd(const BoolV a, const BoolV b); //component-wise || (OR) NV_FORCE_INLINE BoolV BOr(const BoolV a, const BoolV b); //component-wise not NV_FORCE_INLINE BoolV BNot(const BoolV a); //if all four components are true, return true, otherwise return false NV_FORCE_INLINE BoolV BAllTrue4(const BoolV a); //if any four components is true, return true, otherwise return false NV_FORCE_INLINE BoolV BAnyTrue4(const BoolV a); //if all three(0, 1, 2) components are true, return true, otherwise return false NV_FORCE_INLINE BoolV BAllTrue3(const BoolV a); //if any three (0, 1, 2) components is true, return true, otherwise return false NV_FORCE_INLINE BoolV BAnyTrue3(const BoolV a); //Return 1 if all components equal, zero otherwise. NV_FORCE_INLINE uint32_t BAllEq(const BoolV a, const BoolV b); // Specialized/faster BAllEq function for b==TTTT NV_FORCE_INLINE uint32_t BAllEqTTTT(const BoolV a); // Specialized/faster BAllEq function for b==FFFF NV_FORCE_INLINE uint32_t BAllEqFFFF(const BoolV a); /// Get BoolV as bits set in an uint32_t. A bit in the output is set if the element is 'true' in the input. /// There is a bit for each element in a, with element 0s value held in bit0, element 1 in bit 1s and so forth. /// If nothing is true in the input it will return 0, and if all are true if will return 0xf. /// NOTE! That performance of the function varies considerably by platform, thus it is recommended to use /// where your algorithm really needs a BoolV in an integer variable. NV_FORCE_INLINE uint32_t BGetBitMask(const BoolV a); //VecI32V stuff NV_FORCE_INLINE VecI32V VecI32V_Zero(); NV_FORCE_INLINE VecI32V VecI32V_One(); NV_FORCE_INLINE VecI32V VecI32V_Two(); NV_FORCE_INLINE VecI32V VecI32V_MinusOne(); //Compute a shift parameter for VecI32V_LeftShift and VecI32V_RightShift //Each element of shift must be identical ie the vector must have form {count, count, count, count} with count>=0 NV_FORCE_INLINE VecShiftV VecI32V_PrepareShift(const VecI32VArg shift); //Shift each element of a leftwards by the same amount //Compute shift with VecI32V_PrepareShift //{a.x<<shift[0], a.y<<shift[0], a.z<<shift[0], a.w<<shift[0]} NV_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const VecShiftVArg shift); //Shift each element of a rightwards by the same amount //Compute shift with VecI32V_PrepareShift //{a.x>>shift[0], a.y>>shift[0], a.z>>shift[0], a.w>>shift[0]} NV_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const VecShiftVArg shift); NV_FORCE_INLINE VecI32V VecI32V_Add(const VecI32VArg a, const VecI32VArg b); NV_FORCE_INLINE VecI32V VecI32V_Or(const VecI32VArg a, const VecI32VArg b); NV_FORCE_INLINE VecI32V VecI32V_GetX(const VecI32VArg a); NV_FORCE_INLINE VecI32V VecI32V_GetY(const VecI32VArg a); NV_FORCE_INLINE VecI32V VecI32V_GetZ(const VecI32VArg a); NV_FORCE_INLINE VecI32V VecI32V_GetW(const VecI32VArg a); NV_FORCE_INLINE VecI32V VecI32V_Sub(const VecI32VArg a, const VecI32VArg b); NV_FORCE_INLINE BoolV VecI32V_IsGrtr(const VecI32VArg a, const VecI32VArg b); NV_FORCE_INLINE BoolV VecI32V_IsEq(const VecI32VArg a, const VecI32VArg b); NV_FORCE_INLINE VecI32V V4I32Sel(const BoolV c, const VecI32V a, const VecI32V b); //VecU32V stuff NV_FORCE_INLINE VecU32V U4Zero(); NV_FORCE_INLINE VecU32V U4One(); NV_FORCE_INLINE VecU32V U4Two(); NV_FORCE_INLINE BoolV V4IsEqU32(const VecU32V a, const VecU32V b); NV_FORCE_INLINE VecU32V V4U32Sel(const BoolV c, const VecU32V a, const VecU32V b); NV_FORCE_INLINE VecU32V V4U32or(VecU32V a, VecU32V b); NV_FORCE_INLINE VecU32V V4U32and(VecU32V a, VecU32V b); NV_FORCE_INLINE VecU32V V4U32Andc(VecU32V a, VecU32V b); //VecU32 - why does this not return a bool? NV_FORCE_INLINE VecU32V V4IsGrtrV32u(const Vec4V a, const Vec4V b); //Math operations on 16-byte aligned Mat33s (represents any 3x3 matrix) //a*b NV_FORCE_INLINE Vec3V M33MulV3(const Mat33V& a, const Vec3V b); //A*x + b NV_FORCE_INLINE Vec3V M33MulV3AddV3(const Mat33V& A, const Vec3V b, const Vec3V c); //transpose(a) * b NV_FORCE_INLINE Vec3V M33TrnspsMulV3(const Mat33V& a, const Vec3V b); //a*b NV_FORCE_INLINE Mat33V M33MulM33(const Mat33V& a, const Mat33V& b); //a+b NV_FORCE_INLINE Mat33V M33Add(const Mat33V& a, const Mat33V& b); //a+b NV_FORCE_INLINE Mat33V M33Sub(const Mat33V& a, const Mat33V& b); //-a NV_FORCE_INLINE Mat33V M33Neg(const Mat33V& a); //absolute value of the matrix NV_FORCE_INLINE Mat33V M33Abs(const Mat33V& a); //inverse mat NV_FORCE_INLINE Mat33V M33Inverse(const Mat33V& a); //transpose(a) NV_FORCE_INLINE Mat33V M33Trnsps(const Mat33V& a); //create an identity matrix NV_FORCE_INLINE Mat33V M33Identity(); //create a vec3 to store the diagonal element of the M33 NV_FORCE_INLINE Mat33V M33Diagonal(const Vec3VArg); //Not implemented //return 1 if all components of a are equal to all components of b //NV_FORCE_INLINE uint32_t V4U32AllEq(const VecU32V a, const VecU32V b); //v.w=f //NV_FORCE_INLINE void V3WriteW(Vec3V& v, const float f); //NV_FORCE_INLINE float V3ReadW(const Vec3V& v); //Not used //NV_FORCE_INLINE Vec4V V4LoadAligned(Vec4V* addr); //NV_FORCE_INLINE Vec4V V4LoadUnaligned(Vec4V* addr); //floor(a)(per component) //NV_FORCE_INLINE Vec4V V4Floor(Vec4V a); //ceil(a) (per component) //NV_FORCE_INLINE Vec4V V4Ceil(Vec4V a); //NV_FORCE_INLINE VecU32V V4ConvertToU32VSaturate(const Vec4V a, uint32_t power); //Math operations on 16-byte aligned Mat34s (represents transformation matrix - rotation and translation). //namespace _Mat34V //{ // //a*b // NV_FORCE_INLINE Vec3V multiplyV(const Mat34V& a, const Vec3V b); // //a_rotation * b // NV_FORCE_INLINE Vec3V multiply3X3V(const Mat34V& a, const Vec3V b); // //transpose(a_rotation)*b // NV_FORCE_INLINE Vec3V multiplyTranspose3X3V(const Mat34V& a, const Vec3V b); // //a*b // NV_FORCE_INLINE Mat34V multiplyV(const Mat34V& a, const Mat34V& b); // //a_rotation*b // NV_FORCE_INLINE Mat33V multiply3X3V(const Mat34V& a, const Mat33V& b); // //a_rotation*b_rotation // NV_FORCE_INLINE Mat33V multiply3X3V(const Mat34V& a, const Mat34V& b); // //a+b // NV_FORCE_INLINE Mat34V addV(const Mat34V& a, const Mat34V& b); // //a^-1 // NV_FORCE_INLINE Mat34V getInverseV(const Mat34V& a); // //transpose(a_rotation) // NV_FORCE_INLINE Mat33V getTranspose3X3(const Mat34V& a); //}; //namespace _Mat34V //a*b //#define M34MulV3(a,b) (M34MulV3(a,b)) ////a_rotation * b //#define M34Mul33V3(a,b) (M34Mul33V3(a,b)) ////transpose(a_rotation)*b //#define M34TrnspsMul33V3(a,b) (M34TrnspsMul33V3(a,b)) ////a*b //#define M34MulM34(a,b) (_Mat34V::multiplyV(a,b)) //a_rotation*b //#define M34MulM33(a,b) (M34MulM33(a,b)) //a_rotation*b_rotation //#define M34Mul33MM34(a,b) (M34MulM33(a,b)) //a+b //#define M34Add(a,b) (M34Add(a,b)) ////a^-1 //#define M34Inverse(a,b) (M34Inverse(a)) //transpose(a_rotation) //#define M34Trnsps33(a) (M33Trnsps3X3(a)) //Math operations on 16-byte aligned Mat44s (represents any 4x4 matrix) //namespace _Mat44V //{ // //a*b // NV_FORCE_INLINE Vec4V multiplyV(const Mat44V& a, const Vec4V b); // //transpose(a)*b // NV_FORCE_INLINE Vec4V multiplyTransposeV(const Mat44V& a, const Vec4V b); // //a*b // NV_FORCE_INLINE Mat44V multiplyV(const Mat44V& a, const Mat44V& b); // //a+b // NV_FORCE_INLINE Mat44V addV(const Mat44V& a, const Mat44V& b); // //a&-1 // NV_FORCE_INLINE Mat44V getInverseV(const Mat44V& a); // //transpose(a) // NV_FORCE_INLINE Mat44V getTransposeV(const Mat44V& a); //}; //namespace _Mat44V //namespace _VecU32V //{ // // pack 8 U32s to 8 U16s with saturation // NV_FORCE_INLINE VecU16V pack2U32VToU16VSaturate(VecU32V a, VecU32V b); // NV_FORCE_INLINE VecU32V orV(VecU32V a, VecU32V b); // NV_FORCE_INLINE VecU32V andV(VecU32V a, VecU32V b); // NV_FORCE_INLINE VecU32V andcV(VecU32V a, VecU32V b); // // conversion from integer to float // NV_FORCE_INLINE Vec4V convertToVec4V(VecU32V a); // // splat a[elementIndex] into all fields of a // template<int elementIndex> // NV_FORCE_INLINE VecU32V splatElement(VecU32V a); // NV_FORCE_INLINE void storeAligned(VecU32V a, VecU32V* address); //}; //namespace _VecI32V //{ // template<int a> NV_FORCE_INLINE VecI32V splatI32(); //}; // //namespace _VecU16V //{ // NV_FORCE_INLINE VecU16V orV(VecU16V a, VecU16V b); // NV_FORCE_INLINE VecU16V andV(VecU16V a, VecU16V b); // NV_FORCE_INLINE VecU16V andcV(VecU16V a, VecU16V b); // NV_FORCE_INLINE void storeAligned(VecU16V val, VecU16V *address); // NV_FORCE_INLINE VecU16V loadAligned(VecU16V* addr); // NV_FORCE_INLINE VecU16V loadUnaligned(VecU16V* addr); // NV_FORCE_INLINE VecU16V compareGt(VecU16V a, VecU16V b); // template<int elementIndex> // NV_FORCE_INLINE VecU16V splatElement(VecU16V a); // NV_FORCE_INLINE VecU16V subtractModulo(VecU16V a, VecU16V b); // NV_FORCE_INLINE VecU16V addModulo(VecU16V a, VecU16V b); // NV_FORCE_INLINE VecU32V getLo16(VecU16V a); // [0,2,4,6] 16-bit values to [0,1,2,3] 32-bit vector // NV_FORCE_INLINE VecU32V getHi16(VecU16V a); // [1,3,5,7] 16-bit values to [0,1,2,3] 32-bit vector //}; // //namespace _VecI16V //{ // template <int val> NV_FORCE_INLINE VecI16V splatImmediate(); //}; // //namespace _VecU8V //{ //}; //a*b //#define M44MulV4(a,b) (M44MulV4(a,b)) ////transpose(a)*b //#define M44TrnspsMulV4(a,b) (M44TrnspsMulV4(a,b)) ////a*b //#define M44MulM44(a,b) (M44MulM44(a,b)) ////a+b //#define M44Add(a,b) (M44Add(a,b)) ////a&-1 //#define M44Inverse(a) (M44Inverse(a)) ////transpose(a) //#define M44Trnsps(a) (M44Trnsps(a)) // dsequeira: these used to be assert'd out in SIMD builds, but they're necessary if // we want to be able to write some scalar functions which run using SIMD data structures NV_FORCE_INLINE void V3WriteX(Vec3V& v, const float f) { ((NvVec3 &)v).x=f; } NV_FORCE_INLINE void V3WriteY(Vec3V& v, const float f) { ((NvVec3 &)v).y=f; } NV_FORCE_INLINE void V3WriteZ(Vec3V& v, const float f) { ((NvVec3 &)v).z=f; } NV_FORCE_INLINE void V3WriteXYZ(Vec3V& v, const NvVec3& f) { (NvVec3 &)v = f; } NV_FORCE_INLINE float V3ReadX(const Vec3V& v) { return ((NvVec3 &)v).x; } NV_FORCE_INLINE float V3ReadY(const Vec3V& v) { return ((NvVec3 &)v).y; } NV_FORCE_INLINE float V3ReadZ(const Vec3V& v) { return ((NvVec3 &)v).z; } NV_FORCE_INLINE const NvVec3& V3ReadXYZ(const Vec3V& v) { return (NvVec3&)v; } NV_FORCE_INLINE void V4WriteX(Vec4V& v, const float f) { ((NvVec4&)v).x=f; } NV_FORCE_INLINE void V4WriteY(Vec4V& v, const float f) { ((NvVec4&)v).y=f; } NV_FORCE_INLINE void V4WriteZ(Vec4V& v, const float f) { ((NvVec4&)v).z=f; } NV_FORCE_INLINE void V4WriteW(Vec4V& v, const float f) { ((NvVec4&)v).w=f; } NV_FORCE_INLINE void V4WriteXYZ(Vec4V& v, const NvVec3& f) { ((NvVec3&)v)=f; } NV_FORCE_INLINE float V4ReadX(const Vec4V& v) { return ((NvVec4&)v).x; } NV_FORCE_INLINE float V4ReadY(const Vec4V& v) { return ((NvVec4&)v).y; } NV_FORCE_INLINE float V4ReadZ(const Vec4V& v) { return ((NvVec4&)v).z; } NV_FORCE_INLINE float V4ReadW(const Vec4V& v) { return ((NvVec4&)v).w; } NV_FORCE_INLINE const NvVec3& V4ReadXYZ(const Vec4V& v) { return (NvVec3&)v; } //this macro trnasposes 4 Vec4V into 3 Vec4V (assuming that the W component can be ignored #define NV_TRANSPOSE_44_34(inA, inB, inC, inD, outA, outB, outC) \ outA = V4UnpackXY(inA, inC); \ inA = V4UnpackZW(inA, inC); \ inC = V4UnpackXY(inB, inD); \ inB = V4UnpackZW(inB, inD); \ outB = V4UnpackZW(outA, inC); \ outA = V4UnpackXY(outA, inC); \ outC = V4UnpackXY(inA, inB); //this macro transposes 3 Vec4V into 4 Vec4V (with W components as garbage!) #define NV_TRANSPOSE_34_44(inA, inB, inC, outA, outB, outC, outD) \ outA = V4UnpackXY(inA, inC); \ inA = V4UnpackZW(inA, inC); \ outC = V4UnpackXY(inB, inB); \ inC = V4UnpackZW(inB, inB); \ outB = V4UnpackZW(outA, outC); \ outA = V4UnpackXY(outA, outC); \ outC = V4UnpackXY(inA, inC); \ outD = V4UnpackZW(inA, inC); #define NV_TRANSPOSE_44(inA, inB, inC, inD, outA, outB, outC, outD) \ outA = V4UnpackXY(inA, inC); \ inA = V4UnpackZW(inA, inC); \ inC = V4UnpackXY(inB, inD); \ inB = V4UnpackZW(inB, inD); \ outB = V4UnpackZW(outA, inC); \ outA = V4UnpackXY(outA, inC); \ outC = V4UnpackXY(inA, inB); \ outD = V4UnpackZW(inA, inB); //In all platforms except 360, this is a fast way of calculating 4 dot product at once. On 360, it may be faster to call V3Dot 4 times because there is an //instruction to perform a dot product that completes in 14 cycles //It returns a Vec4V, where each element is the dot product of one pair of Vec3Vs NV_FORCE_INLINE Vec4V V3Dot4(const Vec3VArg a0, const Vec3VArg b0, const Vec3VArg a1, const Vec3VArg b1, const Vec3VArg a2, const Vec3VArg b2, const Vec3VArg a3, const Vec3VArg b3) { Vec4V a0b0 = Vec4V_From_Vec3V(V3Mul(a0, b0)); Vec4V a1b1 = Vec4V_From_Vec3V(V3Mul(a1, b1)); Vec4V a2b2 = Vec4V_From_Vec3V(V3Mul(a2, b2)); Vec4V a3b3 = Vec4V_From_Vec3V(V3Mul(a3, b3)); Vec4V aTrnsps, bTrnsps, cTrnsps; NV_TRANSPOSE_44_34(a0b0, a1b1, a2b2, a3b3, aTrnsps, bTrnsps, cTrnsps); return V4Add(V4Add(aTrnsps, bTrnsps), cTrnsps); } //Now for the cross-platform implementations of the 16-byte aligned maths functions (win32/360/ppu/spu etc). #if COMPILE_VECTOR_INTRINSICS #include "NsInlineAoS.h" #else // #if COMPILE_VECTOR_INTRINSICS #include "NsVecMathAoSScalarInline.h" #endif // #if !COMPILE_VECTOR_INTRINSICS #include "NsVecQuat.h" } // namespace aos } // namespace shdfnd } // namespace nvidia #endif //PS_VECMATH_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsHashSet.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NSFOUNDATION_NSHASHSET_H #define NV_NSFOUNDATION_NSHASHSET_H #include "NsHashInternals.h" // TODO: make this doxy-format // This header defines two hash sets. Hash sets // * support custom initial table sizes (rounded up internally to power-of-2) // * support custom static allocator objects // * auto-resize, based on a load factor (i.e. a 64-entry .75 load factor hash will resize // when the 49th element is inserted) // * are based on open hashing // // Sets have STL-like copying semantics, and properly initialize and destruct copies of objects // // There are two forms of set: coalesced and uncoalesced. Coalesced sets keep the entries in the // initial segment of an array, so are fast to iterate over; however deletion is approximately // twice as expensive. // // HashSet<T>: // bool insert(const T& k) amortized O(1) (exponential resize policy) // bool contains(const T& k) const; O(1) // bool erase(const T& k); O(1) // uint32_t size() const; constant // void reserve(uint32_t size); O(MAX(size, currentOccupancy)) // void clear(); O(currentOccupancy) (with zero constant for objects without // destructors) // Iterator getIterator(); // // Use of iterators: // // for(HashSet::Iterator iter = test.getIterator(); !iter.done(); ++iter) // myFunction(*iter); // // CoalescedHashSet<T> does not support getIterator, but instead supports // const Key *getEntries(); // // insertion into a set already containing the element fails returning false, as does // erasure of an element not in the set // namespace nvidia { namespace shdfnd { template <class Key, class HashFn = Hash<Key>, class Allocator = NonTrackingAllocator> class HashSet : public internal::HashSetBase<Key, HashFn, Allocator, false> { public: typedef internal::HashSetBase<Key, HashFn, Allocator, false> HashSetBase; typedef typename HashSetBase::Iterator Iterator; HashSet(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : HashSetBase(initialTableSize, loadFactor) { } HashSet(uint32_t initialTableSize, float loadFactor, const Allocator& alloc) : HashSetBase(initialTableSize, loadFactor, alloc) { } HashSet(const Allocator& alloc) : HashSetBase(64, 0.75f, alloc) { } Iterator getIterator() { return Iterator(HashSetBase::mBase); } }; template <class Key, class HashFn = Hash<Key>, class Allocator = NonTrackingAllocator> class CoalescedHashSet : public internal::HashSetBase<Key, HashFn, Allocator, true> { public: typedef typename internal::HashSetBase<Key, HashFn, Allocator, true> HashSetBase; CoalescedHashSet(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : HashSetBase(initialTableSize, loadFactor) { } CoalescedHashSet(uint32_t initialTableSize, float loadFactor, const Allocator& alloc) : HashSetBase(initialTableSize, loadFactor, alloc) { } CoalescedHashSet(const Allocator& alloc) : HashSetBase(64, 0.75f, alloc) { } const Key* getEntries() const { return HashSetBase::mBase.getEntries(); } }; } // namespace shdfnd } // namespace nvidia #endif // #ifndef NV_NSFOUNDATION_NSHASHSET_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsGlobals.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NSFOUNDATION_NSGLOBALS_H #define NV_NSFOUNDATION_NSGLOBALS_H #include "NvErrors.h" namespace nvidia { class NvAssertHandler; class NvErrorCallback; class NvAllocatorCallback; class NvProfilerCallback; namespace shdfnd { // note: it's illegal to initialize the shared foundation twice without terminating in between NV_FOUNDATION_API void initializeSharedFoundation(uint32_t version, NvAllocatorCallback&, NvErrorCallback&); NV_FOUNDATION_API bool sharedFoundationIsInitialized(); NV_FOUNDATION_API void terminateSharedFoundation(); // number of times foundation has been init'd. 0 means never initialized, so if we wrap we go from UINT32_MAX to 1. Used // for things that happen at most once (e.g. some warnings) NV_FOUNDATION_API uint32_t getInitializationCount(); NV_FOUNDATION_API NvAllocatorCallback& getAllocator(); NV_FOUNDATION_API NvErrorCallback& getErrorCallback(); // on some platforms (notably 360) the CRT does non-recoverable allocations when asked for type names. Hence // we provide a mechanism to disable this capability NV_FOUNDATION_API void setReflectionAllocatorReportsNames(bool val); NV_FOUNDATION_API bool getReflectionAllocatorReportsNames(); NV_FOUNDATION_API NvProfilerCallback *getProfilerCallback(); NV_FOUNDATION_API void setProfilerCallback(NvProfilerCallback *profiler); } } #endif // #ifndef NV_NSFOUNDATION_NSGLOBALS_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/platform/unix/NsUnixTrigConstants.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef PS_UNIX_TRIG_CONSTANTS_H #define PS_UNIX_TRIG_CONSTANTS_H //#define NV_GLOBALCONST extern const __declspec(selectany) #if NV_WINRT #define NV_GLOBALCONST extern const __declspec(selectany) #else #define NV_GLOBALCONST extern const __attribute__((weak)) #endif NV_ALIGN_PREFIX(16) struct NV_VECTORF32 { float f[4]; } NV_ALIGN_SUFFIX(16); NV_GLOBALCONST NV_VECTORF32 g_NVSinCoefficients0 = {{1.0f, -0.166666667f, 8.333333333e-3f, -1.984126984e-4f}}; NV_GLOBALCONST NV_VECTORF32 g_NVSinCoefficients1 = {{2.755731922e-6f, -2.505210839e-8f, 1.605904384e-10f, -7.647163732e-13f}}; NV_GLOBALCONST NV_VECTORF32 g_NVSinCoefficients2 = {{2.811457254e-15f, -8.220635247e-18f, 1.957294106e-20f, -3.868170171e-23f}}; NV_GLOBALCONST NV_VECTORF32 g_NVCosCoefficients0 = {{1.0f, -0.5f, 4.166666667e-2f, -1.388888889e-3f}}; NV_GLOBALCONST NV_VECTORF32 g_NVCosCoefficients1 = {{2.480158730e-5f, -2.755731922e-7f, 2.087675699e-9f, -1.147074560e-11f}}; NV_GLOBALCONST NV_VECTORF32 g_NVCosCoefficients2 = {{4.779477332e-14f, -1.561920697e-16f, 4.110317623e-19f, -8.896791392e-22f}}; NV_GLOBALCONST NV_VECTORF32 g_NVTanCoefficients0 = {{1.0f, 0.333333333f, 0.133333333f, 5.396825397e-2f}}; NV_GLOBALCONST NV_VECTORF32 g_NVTanCoefficients1 = {{2.186948854e-2f, 8.863235530e-3f, 3.592128167e-3f, 1.455834485e-3f}}; NV_GLOBALCONST NV_VECTORF32 g_NVTanCoefficients2 = {{5.900274264e-4f, 2.391290764e-4f, 9.691537707e-5f, 3.927832950e-5f}}; NV_GLOBALCONST NV_VECTORF32 g_NVASinCoefficients0 = {{-0.05806367563904f, -0.41861972469416f, 0.22480114791621f, 2.17337241360606f}}; NV_GLOBALCONST NV_VECTORF32 g_NVASinCoefficients1 = {{0.61657275907170f, 4.29696498283455f, -1.18942822255452f, -6.53784832094831f}}; NV_GLOBALCONST NV_VECTORF32 g_NVASinCoefficients2 = {{-1.36926553863413f, -4.48179294237210f, 1.41810672941833f, 5.48179257935713f}}; NV_GLOBALCONST NV_VECTORF32 g_NVATanCoefficients0 = {{1.0f, 0.333333334f, 0.2f, 0.142857143f}}; NV_GLOBALCONST NV_VECTORF32 g_NVATanCoefficients1 = {{1.111111111e-1f, 9.090909091e-2f, 7.692307692e-2f, 6.666666667e-2f}}; NV_GLOBALCONST NV_VECTORF32 g_NVATanCoefficients2 = {{5.882352941e-2f, 5.263157895e-2f, 4.761904762e-2f, 4.347826087e-2f}}; NV_GLOBALCONST NV_VECTORF32 g_NVSinEstCoefficients = {{1.0f, -1.66521856991541e-1f, 8.199913018755e-3f, -1.61475937228e-4f}}; NV_GLOBALCONST NV_VECTORF32 g_NVCosEstCoefficients = {{1.0f, -4.95348008918096e-1f, 3.878259962881e-2f, -9.24587976263e-4f}}; NV_GLOBALCONST NV_VECTORF32 g_NVTanEstCoefficients = {{2.484f, -1.954923183e-1f, 2.467401101f, NvInvPi}}; NV_GLOBALCONST NV_VECTORF32 g_NVATanEstCoefficients = {{7.689891418951e-1f, 1.104742493348f, 8.661844266006e-1f, NvPiDivTwo}}; NV_GLOBALCONST NV_VECTORF32 g_NVASinEstCoefficients = {{-1.36178272886711f, 2.37949493464538f, -8.08228565650486e-1f, 2.78440142746736e-1f}}; NV_GLOBALCONST NV_VECTORF32 g_NVASinEstConstants = {{1.00000011921f, NvPiDivTwo, 0.0f, 0.0f}}; NV_GLOBALCONST NV_VECTORF32 g_NVPiConstants0 = {{NvPi, NvTwoPi, NvInvPi, NvInvTwoPi}}; NV_GLOBALCONST NV_VECTORF32 g_NVReciprocalTwoPi = {{NvInvTwoPi, NvInvTwoPi, NvInvTwoPi, NvInvTwoPi}}; NV_GLOBALCONST NV_VECTORF32 g_NVTwoPi = {{NvTwoPi, NvTwoPi, NvTwoPi, NvTwoPi}}; #endif
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/platform/unix/NsUnixAoS.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef PS_UNIX_AOS_H #define PS_UNIX_AOS_H // no includes here! this file should be included from NvcVecMath.h only!!! #if !COMPILE_VECTOR_INTRINSICS #error Vector intrinsics should not be included when using scalar implementation. #endif #if NV_X86 || NV_X64 # include "sse2/NsUnixSse2AoS.h" #elif NV_NEON # include "neon/NsUnixNeonAoS.h" #else # error No SIMD implementation for this unix platform. #endif #endif //PS_UNIX_AOS_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/platform/unix/NsUnixFPU.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_UNIX_NSUNIXFPU_H #define NV_UNIX_NSUNIXFPU_H #include "NvPreprocessor.h" #if NV_LINUX || NV_PS4 || NV_OSX #if NV_X86 || NV_X64 #include <xmmintrin.h> #elif NV_NEON #include <arm_neon.h> #endif NV_INLINE nvidia::shdfnd::SIMDGuard::SIMDGuard() { mControlWord = _mm_getcsr(); // set default (disable exceptions: _MM_MASK_MASK) and FTZ (_MM_FLUSH_ZERO_ON), DAZ (_MM_DENORMALS_ZERO_ON: (1<<6)) _mm_setcsr(_MM_MASK_MASK | _MM_FLUSH_ZERO_ON | (1 << 6)); } NV_INLINE nvidia::shdfnd::SIMDGuard::~SIMDGuard() { // restore control word and clear exception flags // (setting exception state flags cause exceptions on the first following fp operation) _mm_setcsr(mControlWord & ~_MM_EXCEPT_MASK); } #else #error No SIMD implementation for this unix platform. #endif // NV_LINUX || NV_PS4 || NV_OSX #endif // #ifndef NV_UNIX_NSUNIXFPU_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/platform/unix/NsUnixInlineAoS.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef PS_UNIX_INLINE_AOS_H #define PS_UNIX_INLINE_AOS_H #if !COMPILE_VECTOR_INTRINSICS #error Vector intrinsics should not be included when using scalar implementation. #endif //Remove this define when all platforms use simd solver. #define NV_SUPPORT_SIMD #if NV_X86 || NV_X64 # include "sse2/NsUnixSse2InlineAoS.h" #elif NV_NEON # include "neon/NsUnixNeonInlineAoS.h" #else # error No SIMD implementation for this unix platform. #endif #endif //PS_UNIX_INLINE_AOS_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/platform/unix/NsUnixIntrinsics.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_UNIX_NSUNIXINTRINSICS_H #define NV_UNIX_NSUNIXINTRINSICS_H #include "Ns.h" #include "NvAssert.h" #include <math.h> #if NV_ANDROID #include <signal.h> // for Ns::debugBreak() { raise(SIGTRAP); } #endif #if 0 #include <libkern/OSAtomic.h> #endif // this file is for internal intrinsics - that is, intrinsics that are used in // cross platform code but do not appear in the API #if !(NV_LINUX || NV_ANDROID || NV_PS4 || NV_APPLE_FAMILY) #error "This file should only be included by unix builds!!" #endif namespace nvidia { namespace shdfnd { NV_FORCE_INLINE void memoryBarrier() { __sync_synchronize(); } /*! Return the index of the highest set bit. Undefined for zero arg. */ NV_INLINE uint32_t highestSetBitUnsafe(uint32_t v) { return 31 -__builtin_clz(v); } /*! Return the index of the highest set bit. Undefined for zero arg. */ NV_INLINE int32_t lowestSetBitUnsafe(uint32_t v) { return __builtin_ctz(v); } /*! Returns the index of the highest set bit. Undefined for zero arg. */ NV_INLINE uint32_t countLeadingZeros(uint32_t v) { return __builtin_clz(v); } /*! Prefetch aligned 64B x86, 32b ARM around \c ptr+offset. */ NV_FORCE_INLINE void prefetchLine(const void* ptr, uint32_t offset = 0) { __builtin_prefetch(reinterpret_cast<const char * NV_RESTRICT>(ptr) + offset, 0, 3); } /*! Prefetch \c count bytes starting at \c ptr. */ #if NV_ANDROID || NV_IOS NV_FORCE_INLINE void prefetch(const void* ptr, uint32_t count = 1) { const char* cp = static_cast<const char*>(ptr); size_t p = reinterpret_cast<size_t>(ptr); uint32_t startLine = uint32_t(p >> 5), endLine = uint32_t((p + count - 1) >> 5); uint32_t lines = endLine - startLine + 1; do { prefetchLine(cp); cp += 32; } while(--lines); } #else NV_FORCE_INLINE void prefetch(const void* ptr, uint32_t count = 1) { const char* cp = reinterpret_cast<const char*>(ptr); uint64_t p = size_t(ptr); uint64_t startLine = p >> 6, endLine = (p + count - 1) >> 6; uint64_t lines = endLine - startLine + 1; do { prefetchLine(cp); cp += 64; } while(--lines); } #endif //! \brief platform-specific reciprocal NV_CUDA_CALLABLE NV_FORCE_INLINE float recipFast(float a) { return 1.0f / a; } //! \brief platform-specific fast reciprocal square root NV_CUDA_CALLABLE NV_FORCE_INLINE float recipSqrtFast(float a) { return 1.0f / ::sqrtf(a); } //! \brief platform-specific floor NV_CUDA_CALLABLE NV_FORCE_INLINE float floatFloor(float x) { return ::floorf(x); } #define NS_EXPECT_TRUE(x) x #define NS_EXPECT_FALSE(x) x } // namespace shdfnd } // namespace nvidia #endif // #ifndef NV_UNIX_NSUNIXINTRINSICS_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/platform/unix/sse2/NsUnixSse2InlineAoS.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef PS_UNIX_SSE2_INLINE_AOS_H #define PS_UNIX_SSE2_INLINE_AOS_H #if !COMPILE_VECTOR_INTRINSICS #error Vector intrinsics should not be included when using scalar implementation. #endif //Remove this define when all platforms use simd solver. #define NV_SUPPORT_SIMD #ifdef __SSE4_2__ #include "smmintrin.h" #endif #define _NV_FPCLASS_SNAN 0x0001 /* signaling NaN */ #define _NV_FPCLASS_QNAN 0x0002 /* quiet NaN */ #define _NV_FPCLASS_NINF 0x0004 /* negative infinity */ #define _NV_FPCLASS_PINF 0x0200 /* positive infinity */ NV_FORCE_INLINE __m128 m128_I2F(__m128i n) { return _mm_castsi128_ps(n); } NV_FORCE_INLINE __m128i m128_F2I(__m128 n) { return _mm_castps_si128(n); } namespace internalUnitSSE2Simd { NV_FORCE_INLINE uint32_t BAllTrue4_R(const BoolV a) { const int32_t moveMask = _mm_movemask_ps(a); return moveMask == (0xf); } NV_FORCE_INLINE uint32_t BAnyTrue4_R(const BoolV a) { const int32_t moveMask = _mm_movemask_ps(a); return moveMask != (0x0); } NV_FORCE_INLINE uint32_t BAllTrue3_R(const BoolV a) { const int32_t moveMask = _mm_movemask_ps(a); return (moveMask & 0x7) == (0x7); } NV_FORCE_INLINE uint32_t BAnyTrue3_R(const BoolV a) { const int32_t moveMask = _mm_movemask_ps(a); return (moveMask & 0x7) != (0x0); } NV_FORCE_INLINE uint32_t FiniteTestEq(const Vec4V a, const Vec4V b) { //This is a bit of a bodge. //_mm_comieq_ss returns 1 if either value is nan so we need to re-cast a and b with true encoded as a non-nan number. //There must be a better way of doing this in sse. const BoolV one = FOne(); const BoolV zero = FZero(); const BoolV a1 =V4Sel(a,one,zero); const BoolV b1 =V4Sel(b,one,zero); return ( _mm_comieq_ss(a1, b1) && _mm_comieq_ss(_mm_shuffle_ps(a1, a1, _MM_SHUFFLE(1,1,1,1)),_mm_shuffle_ps(b1, b1, _MM_SHUFFLE(1,1,1,1))) && _mm_comieq_ss(_mm_shuffle_ps(a1, a1, _MM_SHUFFLE(2,2,2,2)),_mm_shuffle_ps(b1, b1, _MM_SHUFFLE(2,2,2,2))) && _mm_comieq_ss(_mm_shuffle_ps(a1, a1, _MM_SHUFFLE(3,3,3,3)),_mm_shuffle_ps(b1, b1, _MM_SHUFFLE(3,3,3,3))) ); } const NV_ALIGN(16, uint32_t gMaskXYZ[4])={0xffffffff, 0xffffffff, 0xffffffff, 0}; } namespace _VecMathTests { NV_FORCE_INLINE bool allElementsEqualFloatV(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return(_mm_comieq_ss(a,b)!=0); } NV_FORCE_INLINE bool allElementsEqualVec3V(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return V3AllEq(a, b) != 0; } NV_FORCE_INLINE bool allElementsEqualVec4V(const Vec4V a, const Vec4V b) { return V4AllEq(a, b) != 0; } NV_FORCE_INLINE bool allElementsEqualBoolV(const BoolV a, const BoolV b) { return internalUnitSSE2Simd::BAllTrue4_R(VecI32V_IsEq(a, b)) != 0; } NV_FORCE_INLINE bool allElementsEqualVecU32V(const VecU32V a, const VecU32V b) { return internalUnitSSE2Simd::BAllTrue4_R(V4IsEqU32(a, b)) != 0; } NV_FORCE_INLINE bool allElementsEqualVecI32V(const VecI32V a, const VecI32V b) { BoolV c = m128_I2F(_mm_cmpeq_epi32(m128_F2I(a), m128_F2I(b))); return internalUnitSSE2Simd::BAllTrue4_R(c) != 0; } #define VECMATH_AOS_EPSILON (1e-3f) NV_FORCE_INLINE bool allElementsNearEqualFloatV(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); const FloatV c=FSub(a,b); const FloatV minError=FLoad(-VECMATH_AOS_EPSILON); const FloatV maxError=FLoad(VECMATH_AOS_EPSILON); return (_mm_comigt_ss(c,minError) && _mm_comilt_ss(c,maxError)); } NV_FORCE_INLINE bool allElementsNearEqualVec3V(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); const Vec3V c=V3Sub(a,b); const Vec3V minError=V3Load(-VECMATH_AOS_EPSILON); const Vec3V maxError=V3Load(VECMATH_AOS_EPSILON); return ( _mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0,0,0,0)),minError) && _mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0,0,0,0)),maxError) && _mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1,1,1,1)),minError) && _mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1,1,1,1)),maxError) && _mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2,2,2,2)),minError) && _mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2,2,2,2)),maxError) ); } NV_FORCE_INLINE bool allElementsNearEqualVec4V(const Vec4V a, const Vec4V b) { const Vec4V c=V4Sub(a,b); const Vec4V minError=V4Load(-VECMATH_AOS_EPSILON); const Vec4V maxError=V4Load(VECMATH_AOS_EPSILON); return ( _mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0,0,0,0)),minError) && _mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0,0,0,0)),maxError) && _mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1,1,1,1)),minError) && _mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1,1,1,1)),maxError) && _mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2,2,2,2)),minError) && _mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2,2,2,2)),maxError) && _mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(3,3,3,3)),minError) && _mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(3,3,3,3)),maxError) ); } } ///////////////////////////////////////////////////////////////////// ////FUNCTIONS USED ONLY FOR ASSERTS IN VECTORISED IMPLEMENTATIONS ///////////////////////////////////////////////////////////////////// NV_FORCE_INLINE bool isValidFloatV(const FloatV a) { return ( _mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)),_mm_shuffle_ps(a, a, _MM_SHUFFLE(1,1,1,1))) && _mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)),_mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2))) && _mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)),_mm_shuffle_ps(a, a, _MM_SHUFFLE(3,3,3,3))) ); } NV_FORCE_INLINE bool isValidVec3V(const Vec3V a) { return (_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(3,3,3,3)),FZero()) ? true : false); } NV_FORCE_INLINE bool isFiniteFloatV(const FloatV a) { float badNumber = nvidia::NvUnionCast<float, uint32_t>(_NV_FPCLASS_SNAN | _NV_FPCLASS_QNAN | _NV_FPCLASS_NINF | _NV_FPCLASS_PINF); const FloatV vBadNum = FLoad((float&)badNumber); const BoolV vMask = BAnd(vBadNum, a); return internalUnitSSE2Simd::FiniteTestEq(vMask, BFFFF()) == 1; } NV_FORCE_INLINE bool isFiniteVec3V(const Vec3V a) { float badNumber = nvidia::NvUnionCast<float, uint32_t>(_NV_FPCLASS_SNAN | _NV_FPCLASS_QNAN | _NV_FPCLASS_NINF | _NV_FPCLASS_PINF); const Vec3V vBadNum = V3Load((float&)badNumber); const BoolV vMask = BAnd(BAnd(vBadNum, a), BTTTF()); return internalUnitSSE2Simd::FiniteTestEq(vMask, BFFFF()) == 1; } NV_FORCE_INLINE bool isFiniteVec4V(const Vec4V a) { /*Vec4V a; NV_ALIGN(16, float f[4]); F32Array_Aligned_From_Vec4V(a, f); return NvIsFinite(f[0]) && NvIsFinite(f[1]) && NvIsFinite(f[2]) && NvIsFinite(f[3]);*/ float badNumber = nvidia::NvUnionCast<float, uint32_t>(_NV_FPCLASS_SNAN | _NV_FPCLASS_QNAN | _NV_FPCLASS_NINF | _NV_FPCLASS_PINF); const Vec4V vBadNum = V4Load((float&)badNumber); const BoolV vMask = BAnd(vBadNum, a); return internalUnitSSE2Simd::FiniteTestEq(vMask, BFFFF()) == 1; } NV_FORCE_INLINE bool hasZeroElementinFloatV(const FloatV a) { VECMATHAOS_ASSERT(isValidFloatV(a)); return (_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)),FZero()) ? true : false); } NV_FORCE_INLINE bool hasZeroElementInVec3V(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); return ( _mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)),FZero()) || _mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(1,1,1,1)),FZero()) || _mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2)),FZero()) ); } NV_FORCE_INLINE bool hasZeroElementInVec4V(const Vec4V a) { return ( _mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)),FZero()) || _mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(1,1,1,1)),FZero()) || _mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2)),FZero()) || _mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(3,3,3,3)),FZero()) ); } ///////////////////////////////////////////////////////////////////// ////VECTORISED FUNCTION IMPLEMENTATIONS ///////////////////////////////////////////////////////////////////// NV_FORCE_INLINE FloatV FLoad(const float f) { return (_mm_load1_ps(&f)); } NV_FORCE_INLINE Vec3V V3Load(const float f) { return _mm_set_ps(0.0f,f,f,f); } NV_FORCE_INLINE Vec4V V4Load(const float f) { return (_mm_load1_ps(&f)); } NV_FORCE_INLINE BoolV BLoad(const bool f) { const uint32_t i=-(int32_t)f; return _mm_load1_ps((float*)&i); } NV_FORCE_INLINE Vec3V V3LoadA(const NvVec3& f) { VECMATHAOS_ASSERT(0 == ((size_t)&f & 0x0f)); return _mm_and_ps((Vec3V&)f, (VecI32V&)internalUnitSSE2Simd::gMaskXYZ); } NV_FORCE_INLINE Vec3V V3LoadU(const NvVec3& f) { return (_mm_set_ps(0.0f,f.z,f.y,f.x)); } NV_FORCE_INLINE Vec3V V3LoadUnsafeA(const NvVec3& f) { return (_mm_set_ps(0.0f,f.z,f.y,f.x)); } NV_FORCE_INLINE Vec3V V3LoadA(const float* const f) { VECMATHAOS_ASSERT(0 == ((uint64_t)f & 0x0f)); return _mm_and_ps((Vec3V&)*f, (VecI32V&)internalUnitSSE2Simd::gMaskXYZ); } NV_FORCE_INLINE Vec3V V3LoadU(const float* const i) { return (_mm_set_ps(0.0f,i[2],i[1],i[0])); } NV_FORCE_INLINE Vec3V Vec3V_From_Vec4V(Vec4V v) { return V4ClearW(v); } NV_FORCE_INLINE Vec3V Vec3V_From_Vec4V_WUndefined(const Vec4V v) { return v; } NV_FORCE_INLINE Vec4V Vec4V_From_Vec3V(Vec3V f) { return f; //ok if it is implemented as the same type. } NV_FORCE_INLINE Vec4V Vec4V_From_NvVec3_WUndefined(const NvVec3& f) { return (_mm_set_ps(0.0f,f.z,f.y,f.x)); } NV_FORCE_INLINE Vec4V Vec4V_From_FloatV(FloatV f) { return f; } NV_FORCE_INLINE Vec3V Vec3V_From_FloatV(FloatV f) { return Vec3V_From_Vec4V(Vec4V_From_FloatV(f)); } NV_FORCE_INLINE Vec3V Vec3V_From_FloatV_WUndefined(FloatV f) { return Vec3V_From_Vec4V_WUndefined(Vec4V_From_FloatV(f)); } NV_FORCE_INLINE Mat33V Mat33V_From_NvMat33(const NvMat33 &m) { return Mat33V(V3LoadU(m.column0), V3LoadU(m.column1), V3LoadU(m.column2)); } NV_FORCE_INLINE void NvMat33_From_Mat33V(const Mat33V &m, NvMat33 &out) { NV_ASSERT((size_t(&out)&15)==0); V3StoreU(m.col0, out.column0); V3StoreU(m.col1, out.column1); V3StoreU(m.col2, out.column2); } NV_FORCE_INLINE Vec4V V4LoadA(const float* const f) { VECMATHAOS_ASSERT(0 == ((uint64_t)f & 0x0f)); return (_mm_load_ps(f)); } NV_FORCE_INLINE void V4StoreA(Vec4V a, float* f) { VECMATHAOS_ASSERT(0 == ((uint64_t)f & 0x0f)); _mm_store_ps(f,a); } NV_FORCE_INLINE void V4StoreU(const Vec4V a, float* f) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(0 == ((int)&a & 0x0F)); _mm_storeu_ps(f,a); } NV_FORCE_INLINE void BStoreA(const BoolV a, uint32_t* f) { VECMATHAOS_ASSERT(0 == ((uint64_t)f & 0x0f)); _mm_store_ps((float*)f,a); } NV_FORCE_INLINE void U4StoreA(const VecU32V uv, uint32_t* u) { VECMATHAOS_ASSERT(0 == ((uint64_t)u & 0x0f)); _mm_store_ps((float*)u,uv); } NV_FORCE_INLINE void I4StoreA(const VecI32V iv, int32_t* i) { VECMATHAOS_ASSERT(0 == ((uint64_t)i & 0x0f)); _mm_store_ps((float*)i,iv); } NV_FORCE_INLINE Vec4V V4LoadU(const float* const f) { return (_mm_loadu_ps(f)); } NV_FORCE_INLINE BoolV BLoad(const bool* const f) { const NV_ALIGN(16, int32_t) b[4]={-(int32_t)f[0],-(int32_t)f[1],-(int32_t)f[2],-(int32_t)f[3]}; return _mm_load_ps((float*)&b); } NV_FORCE_INLINE float FStore(const FloatV a) { VECMATHAOS_ASSERT(isValidFloatV(a)); float f; _mm_store_ss(&f,a); return f; } NV_FORCE_INLINE void FStore(const FloatV a, float* NV_RESTRICT f) { VECMATHAOS_ASSERT(isValidFloatV(a)); _mm_store_ss(f,a); } NV_FORCE_INLINE void V3StoreA(const Vec3V a, NvVec3& f) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(0 == ((int)&a & 0x0F)); VECMATHAOS_ASSERT(0 == ((int)&f & 0x0F)); NV_ALIGN(16,float) f2[4]; _mm_store_ps(f2,a); f=NvVec3(f2[0],f2[1],f2[2]); } NV_FORCE_INLINE void V3StoreU(const Vec3V a, NvVec3& f) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(0 == ((int)&a & 0x0F)); NV_ALIGN(16,float) f2[4]; _mm_store_ps(f2,a); f=NvVec3(f2[0],f2[1],f2[2]); } NV_FORCE_INLINE VecI32V U4Load(const uint32_t i) { return (_mm_load1_ps((float*)&i)); } NV_FORCE_INLINE VecU32V U4LoadU(const uint32_t* i) { return _mm_loadu_ps((float*)i); } NV_FORCE_INLINE VecU32V U4LoadA(const uint32_t* i) { VECMATHAOS_ASSERT(0==((size_t)i & 0x0f)); return _mm_load_ps((float*)i); } ////////////////////////////////// //FLOATV ////////////////////////////////// NV_FORCE_INLINE FloatV FZero() { return FLoad(0.0f); } NV_FORCE_INLINE FloatV FOne() { return FLoad(1.0f); } NV_FORCE_INLINE FloatV FHalf() { return FLoad(0.5f); } NV_FORCE_INLINE FloatV FEps() { return FLoad(NV_EPS_REAL); } NV_FORCE_INLINE FloatV FEps6() { return FLoad(1e-6f); } NV_FORCE_INLINE FloatV FMax() { return FLoad(NV_MAX_REAL); } NV_FORCE_INLINE FloatV FNegMax() { return FLoad(-NV_MAX_REAL); } NV_FORCE_INLINE FloatV IZero() { const uint32_t zero = 0; return _mm_load1_ps((float*)&zero); } NV_FORCE_INLINE FloatV IOne() { const uint32_t one = 1; return _mm_load1_ps((float*)&one); } NV_FORCE_INLINE FloatV ITwo() { const uint32_t two = 2; return _mm_load1_ps((float*)&two); } NV_FORCE_INLINE FloatV IThree() { const uint32_t three = 3; return _mm_load1_ps((float*)&three); } NV_FORCE_INLINE FloatV IFour() { uint32_t four = 4; return _mm_load1_ps((float*)&four); } NV_FORCE_INLINE FloatV FNeg(const FloatV f) { VECMATHAOS_ASSERT(isValidFloatV(f)); return _mm_sub_ps( _mm_setzero_ps(), f); } NV_FORCE_INLINE FloatV FAdd(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_add_ps(a,b); } NV_FORCE_INLINE FloatV FSub(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_sub_ps(a,b); } NV_FORCE_INLINE FloatV FMul(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_mul_ps(a,b); } NV_FORCE_INLINE FloatV FDiv(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_div_ps(a,b); } NV_FORCE_INLINE FloatV FDivFast(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_mul_ps(a,_mm_rcp_ps(b)); } NV_FORCE_INLINE FloatV FRecip(const FloatV a) { VECMATHAOS_ASSERT(isValidFloatV(a)); return _mm_div_ps(FOne(),a); } NV_FORCE_INLINE FloatV FRecipFast(const FloatV a) { return _mm_rcp_ps(a); } NV_FORCE_INLINE FloatV FRsqrt(const FloatV a) { VECMATHAOS_ASSERT(isValidFloatV(a)); return _mm_div_ps(FOne(),_mm_sqrt_ps(a)); } NV_FORCE_INLINE FloatV FSqrt(const FloatV a) { VECMATHAOS_ASSERT(isValidFloatV(a)); return _mm_sqrt_ps(a); } NV_FORCE_INLINE FloatV FRsqrtFast(const FloatV a) { return _mm_rsqrt_ps(a); } NV_FORCE_INLINE FloatV FScaleAdd(const FloatV a, const FloatV b, const FloatV c) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); VECMATHAOS_ASSERT(isValidFloatV(c)); return FAdd(FMul(a,b),c); } NV_FORCE_INLINE FloatV FNegScaleSub(const FloatV a, const FloatV b, const FloatV c) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); VECMATHAOS_ASSERT(isValidFloatV(c)); return FSub(c,FMul(a,b)); } NV_FORCE_INLINE FloatV FAbs(const FloatV a) { VECMATHAOS_ASSERT(isValidFloatV(a)); NV_ALIGN(16,const uint32_t) absMask[4] = {0x7fFFffFF, 0x7fFFffFF, 0x7fFFffFF, 0x7fFFffFF}; return _mm_and_ps(a, _mm_load_ps((float*)absMask)); } NV_FORCE_INLINE FloatV FSel(const BoolV c, const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(_VecMathTests::allElementsEqualBoolV(c,BTTTT()) || _VecMathTests::allElementsEqualBoolV(c,BFFFF())); VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a)); } NV_FORCE_INLINE BoolV FIsGrtr(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_cmpgt_ps(a,b); } NV_FORCE_INLINE BoolV FIsGrtrOrEq(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_cmpge_ps(a,b); } NV_FORCE_INLINE BoolV FIsEq(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_cmpeq_ps(a,b); } NV_FORCE_INLINE FloatV FMax(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_max_ps(a, b); } NV_FORCE_INLINE FloatV FMin(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_min_ps(a, b); } NV_FORCE_INLINE FloatV FClamp(const FloatV a, const FloatV minV, const FloatV maxV) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(minV)); VECMATHAOS_ASSERT(isValidFloatV(maxV)); return FMax(FMin(a,maxV),minV); } NV_FORCE_INLINE uint32_t FAllGrtr(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return(_mm_comigt_ss(a,b)); } NV_FORCE_INLINE uint32_t FAllGrtrOrEq(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return(_mm_comige_ss(a,b)); } NV_FORCE_INLINE uint32_t FAllEq(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return(_mm_comieq_ss(a,b)); } NV_FORCE_INLINE FloatV FRound(const FloatV a) { #ifdef __SSE4_2__ return _mm_round_ps( a, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC ); #else //return _mm_round_ps(a, 0x0); const FloatV half = FLoad(0.5f); const __m128 signBit = _mm_cvtepi32_ps(_mm_srli_epi32(_mm_cvtps_epi32(a), 31)); const FloatV aRound = FSub(FAdd(a, half), signBit); __m128i tmp = _mm_cvttps_epi32(aRound); return _mm_cvtepi32_ps(tmp); #endif } NV_FORCE_INLINE FloatV FSin(const FloatV a) { //Vec4V V1, V2, V3, V5, V7, V9, V11, V13, V15, V17, V19, V21, V23; //Vec4V S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11; FloatV Result; // Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI const FloatV recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f); const FloatV twoPi = V4LoadA(g_NVTwoPi.f); const FloatV tmp = FMul(a, recipTwoPi); const FloatV b = FRound(tmp); const FloatV V1 = FNegMulSub(twoPi, b, a); // sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! - // V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI) const FloatV V2 = FMul(V1, V1); const FloatV V3 = FMul(V2, V1); const FloatV V5 = FMul(V3, V2); const FloatV V7 = FMul(V5, V2); const FloatV V9 = FMul(V7, V2); const FloatV V11 = FMul(V9, V2); const FloatV V13 = FMul(V11, V2); const FloatV V15 = FMul(V13, V2); const FloatV V17 = FMul(V15, V2); const FloatV V19 = FMul(V17, V2); const FloatV V21 = FMul(V19, V2); const FloatV V23 = FMul(V21, V2); const Vec4V sinCoefficients0 = V4LoadA(g_NVSinCoefficients0.f); const Vec4V sinCoefficients1 = V4LoadA(g_NVSinCoefficients1.f); const Vec4V sinCoefficients2 = V4LoadA(g_NVSinCoefficients2.f); const FloatV S1 = V4GetY(sinCoefficients0); const FloatV S2 = V4GetZ(sinCoefficients0); const FloatV S3 = V4GetW(sinCoefficients0); const FloatV S4 = V4GetX(sinCoefficients1); const FloatV S5 = V4GetY(sinCoefficients1); const FloatV S6 = V4GetZ(sinCoefficients1); const FloatV S7 = V4GetW(sinCoefficients1); const FloatV S8 = V4GetX(sinCoefficients2); const FloatV S9 = V4GetY(sinCoefficients2); const FloatV S10 = V4GetZ(sinCoefficients2); const FloatV S11 = V4GetW(sinCoefficients2); Result = FMulAdd(S1, V3, V1); Result = FMulAdd(S2, V5, Result); Result = FMulAdd(S3, V7, Result); Result = FMulAdd(S4, V9, Result); Result = FMulAdd(S5, V11, Result); Result = FMulAdd(S6, V13, Result); Result = FMulAdd(S7, V15, Result); Result = FMulAdd(S8, V17, Result); Result = FMulAdd(S9, V19, Result); Result = FMulAdd(S10, V21, Result); Result = FMulAdd(S11, V23, Result); return Result; } NV_FORCE_INLINE FloatV FCos(const FloatV a) { //XMVECTOR V1, V2, V4, V6, V8, V10, V12, V14, V16, V18, V20, V22; //XMVECTOR C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11; FloatV Result; // Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI const FloatV recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f); const FloatV twoPi = V4LoadA(g_NVTwoPi.f); const FloatV tmp = FMul(a, recipTwoPi); const FloatV b = FRound(tmp); const FloatV V1 = FNegMulSub(twoPi, b, a); // cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! - // V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI) const FloatV V2 = FMul(V1, V1); const FloatV V4 = FMul(V2, V2); const FloatV V6 = FMul(V4, V2); const FloatV V8 = FMul(V4, V4); const FloatV V10 = FMul(V6, V4); const FloatV V12 = FMul(V6, V6); const FloatV V14 = FMul(V8, V6); const FloatV V16 = FMul(V8, V8); const FloatV V18 = FMul(V10, V8); const FloatV V20 = FMul(V10, V10); const FloatV V22 = FMul(V12, V10); const Vec4V cosCoefficients0 = V4LoadA(g_NVCosCoefficients0.f); const Vec4V cosCoefficients1 = V4LoadA(g_NVCosCoefficients1.f); const Vec4V cosCoefficients2 = V4LoadA(g_NVCosCoefficients2.f); const FloatV C1 = V4GetY(cosCoefficients0); const FloatV C2 = V4GetZ(cosCoefficients0); const FloatV C3 = V4GetW(cosCoefficients0); const FloatV C4 = V4GetX(cosCoefficients1); const FloatV C5 = V4GetY(cosCoefficients1); const FloatV C6 = V4GetZ(cosCoefficients1); const FloatV C7 = V4GetW(cosCoefficients1); const FloatV C8 = V4GetX(cosCoefficients2); const FloatV C9 = V4GetY(cosCoefficients2); const FloatV C10 = V4GetZ(cosCoefficients2); const FloatV C11 = V4GetW(cosCoefficients2); Result = FMulAdd(C1, V2, V4One()); Result = FMulAdd(C2, V4, Result); Result = FMulAdd(C3, V6, Result); Result = FMulAdd(C4, V8, Result); Result = FMulAdd(C5, V10, Result); Result = FMulAdd(C6, V12, Result); Result = FMulAdd(C7, V14, Result); Result = FMulAdd(C8, V16, Result); Result = FMulAdd(C9, V18, Result); Result = FMulAdd(C10, V20, Result); Result = FMulAdd(C11, V22, Result); return Result; } NV_FORCE_INLINE uint32_t FOutOfBounds(const FloatV a, const FloatV min, const FloatV max) { const BoolV ffff = BFFFF(); const BoolV c = BOr(FIsGrtr(a, max), FIsGrtr(min, a)); return !BAllEq(c, ffff); } NV_FORCE_INLINE uint32_t FInBounds(const FloatV a, const FloatV min, const FloatV max) { const BoolV tttt = BTTTT(); const BoolV c = BAnd(FIsGrtrOrEq(a, min), FIsGrtrOrEq(max, a)); return BAllEq(c, tttt); } NV_FORCE_INLINE uint32_t FOutOfBounds(const FloatV a, const FloatV bounds) { return FOutOfBounds(a, FNeg(bounds), bounds); } NV_FORCE_INLINE uint32_t FInBounds(const FloatV a, const FloatV bounds) { return FInBounds(a, FNeg(bounds), bounds); } ////////////////////////////////// //VEC3V ////////////////////////////////// NV_FORCE_INLINE Vec3V V3Splat(const FloatV f) { VECMATHAOS_ASSERT(isValidFloatV(f)); const __m128 zero=FZero(); const __m128 fff0 = _mm_move_ss(f, zero); return _mm_shuffle_ps(fff0, fff0, _MM_SHUFFLE(0,1,2,3)); } NV_FORCE_INLINE Vec3V V3Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z) { VECMATHAOS_ASSERT(isValidFloatV(x)); VECMATHAOS_ASSERT(isValidFloatV(y)); VECMATHAOS_ASSERT(isValidFloatV(z)); // static on zero causes compiler crash on x64 debug_opt const __m128 zero=FZero(); const __m128 xy = _mm_move_ss(x, y); const __m128 z0 = _mm_move_ss(zero, z); return _mm_shuffle_ps(xy, z0, _MM_SHUFFLE(1,0,0,1)); } NV_FORCE_INLINE Vec3V V3UnitX() { const NV_ALIGN(16,float) x[4]={1.0f,0.0f,0.0f,0.0f}; const __m128 x128=_mm_load_ps(x); return x128; } NV_FORCE_INLINE Vec3V V3UnitY() { const NV_ALIGN(16,float) y[4]={0.0f,1.0f,0.0f,0.0f}; const __m128 y128=_mm_load_ps(y); return y128; } NV_FORCE_INLINE Vec3V V3UnitZ() { const NV_ALIGN(16,float) z[4]={0.0f,0.0f,1.0f,0.0f}; const __m128 z128=_mm_load_ps(z); return z128; } NV_FORCE_INLINE FloatV V3GetX(const Vec3V f) { VECMATHAOS_ASSERT(isValidVec3V(f)); return _mm_shuffle_ps(f, f, _MM_SHUFFLE(0,0,0,0)); } NV_FORCE_INLINE FloatV V3GetY(const Vec3V f) { VECMATHAOS_ASSERT(isValidVec3V(f)); return _mm_shuffle_ps(f, f, _MM_SHUFFLE(1,1,1,1)); } NV_FORCE_INLINE FloatV V3GetZ(const Vec3V f) { VECMATHAOS_ASSERT(isValidVec3V(f)); return _mm_shuffle_ps(f, f, _MM_SHUFFLE(2,2,2,2)); } NV_FORCE_INLINE Vec3V V3SetX(const Vec3V v, const FloatV f) { VECMATHAOS_ASSERT(isValidVec3V(v)); VECMATHAOS_ASSERT(isValidFloatV(f)); return V3Sel(BFTTT(),v,f); } NV_FORCE_INLINE Vec3V V3SetY(const Vec3V v, const FloatV f) { VECMATHAOS_ASSERT(isValidVec3V(v)); VECMATHAOS_ASSERT(isValidFloatV(f)); return V3Sel(BTFTT(),v,f); } NV_FORCE_INLINE Vec3V V3SetZ(const Vec3V v, const FloatV f) { VECMATHAOS_ASSERT(isValidVec3V(v)); VECMATHAOS_ASSERT(isValidFloatV(f)); return V3Sel(BTTFT(),v,f); } NV_FORCE_INLINE Vec3V V3ColX(const Vec3V a, const Vec3V b, const Vec3V c) { Vec3V r = _mm_shuffle_ps(a,c,_MM_SHUFFLE(3,0,3,0)); return V3SetY(r, V3GetX(b)); } NV_FORCE_INLINE Vec3V V3ColY(const Vec3V a, const Vec3V b, const Vec3V c) { Vec3V r = _mm_shuffle_ps(a,c,_MM_SHUFFLE(3,1,3,1)); return V3SetY(r, V3GetY(b)); } NV_FORCE_INLINE Vec3V V3ColZ(const Vec3V a, const Vec3V b, const Vec3V c) { Vec3V r = _mm_shuffle_ps(a,c,_MM_SHUFFLE(3,2,3,2)); return V3SetY(r, V3GetZ(b)); } NV_FORCE_INLINE Vec3V V3Zero() { return V3Load(0.0f); } NV_FORCE_INLINE Vec3V V3Eps() { return V3Load(NV_EPS_REAL); } NV_FORCE_INLINE Vec3V V3One() { return V3Load(1.0f); } NV_FORCE_INLINE Vec3V V3Neg(const Vec3V f) { VECMATHAOS_ASSERT(isValidVec3V(f)); return _mm_sub_ps( _mm_setzero_ps(), f); } NV_FORCE_INLINE Vec3V V3Add(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return _mm_add_ps(a,b); } NV_FORCE_INLINE Vec3V V3Sub(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return _mm_sub_ps(a,b); } NV_FORCE_INLINE Vec3V V3Scale(const Vec3V a, const FloatV b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_mul_ps(a,b); } NV_FORCE_INLINE Vec3V V3Mul(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return _mm_mul_ps(a,b); } NV_FORCE_INLINE Vec3V V3ScaleInv(const Vec3V a, const FloatV b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_div_ps(a,b); } NV_FORCE_INLINE Vec3V V3Div(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); const __m128 one=V3One(); const __m128 tttf=BTTTF(); const __m128 b1=V3Sel(tttf,b,one); return _mm_div_ps(a,b1); } NV_FORCE_INLINE Vec3V V3ScaleInvFast(const Vec3V a, const FloatV b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_mul_ps(a,_mm_rcp_ps(b)); } NV_FORCE_INLINE Vec3V V3DivFast(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); const __m128 one=V3One(); const __m128 tttf=BTTTF(); const __m128 b1=V3Sel(tttf,b,one); return _mm_mul_ps(a,_mm_rcp_ps(b1)); } NV_FORCE_INLINE Vec3V V3Recip(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); const __m128 zero=V3Zero(); const __m128 tttf=BTTTF(); const __m128 recipA=_mm_div_ps(V3One(),a); return V3Sel(tttf,recipA,zero); } NV_FORCE_INLINE Vec3V V3RecipFast(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); const __m128 zero=V3Zero(); const __m128 tttf=BTTTF(); const __m128 recipA=_mm_rcp_ps(a); return V3Sel(tttf,recipA,zero); } NV_FORCE_INLINE Vec3V V3Rsqrt(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); const __m128 zero=V3Zero(); const __m128 tttf=BTTTF(); const __m128 recipA=_mm_div_ps(V3One(),_mm_sqrt_ps(a)); return V3Sel(tttf,recipA,zero); } NV_FORCE_INLINE Vec3V V3RsqrtFast(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); const __m128 zero=V3Zero(); const __m128 tttf=BTTTF(); const __m128 recipA=_mm_rsqrt_ps(a); return V3Sel(tttf,recipA,zero); } NV_FORCE_INLINE Vec3V V3ScaleAdd(const Vec3V a, const FloatV b, const Vec3V c) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); VECMATHAOS_ASSERT(isValidVec3V(c)); return V3Add(V3Scale(a,b),c); } NV_FORCE_INLINE Vec3V V3NegScaleSub(const Vec3V a, const FloatV b, const Vec3V c) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); VECMATHAOS_ASSERT(isValidVec3V(c)); return V3Sub(c,V3Scale(a,b)); } NV_FORCE_INLINE Vec3V V3MulAdd(const Vec3V a, const Vec3V b, const Vec3V c) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); VECMATHAOS_ASSERT(isValidVec3V(c)); return V3Add(V3Mul(a,b),c); } NV_FORCE_INLINE Vec3V V3NegMulSub(const Vec3V a, const Vec3V b, const Vec3V c) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); VECMATHAOS_ASSERT(isValidVec3V(c)); return V3Sub(c, V3Mul(a,b)); } NV_FORCE_INLINE Vec3V V3Abs(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); return V3Max(a,V3Neg(a)); } NV_FORCE_INLINE FloatV V3Dot(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); #ifdef __SSE4_2__ return _mm_dp_ps(a, b, 0x7f); #else __m128 dot1 = _mm_mul_ps(a, b); //w,z,y,x __m128 shuf1 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(0,0,0,0)); //z,y,x,w __m128 shuf2 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(1,1,1,1)); //y,x,w,z __m128 shuf3 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(2,2,2,2)); //x,w,z,y return _mm_add_ps(_mm_add_ps(shuf1, shuf2), shuf3); #endif } NV_FORCE_INLINE Vec3V V3Cross(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); __m128 r1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 0, 2)); //z,x,y,w __m128 r2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1)); //y,z,x,w __m128 l1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); //y,z,x,w __m128 l2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 1, 0, 2)); //z,x,y,w return _mm_sub_ps(_mm_mul_ps(l1, l2), _mm_mul_ps(r1,r2)); } NV_FORCE_INLINE VecCrossV V3PrepareCross(const Vec3V a) { VecCrossV v; v.mR1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 0, 2)); //z,x,y,w v.mL1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); //y,z,x,w return v; } NV_FORCE_INLINE Vec3V V3Cross(const VecCrossV& a, const Vec3V b) { __m128 r2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1)); //y,z,x,w __m128 l2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 1, 0, 2)); //z,x,y,w return _mm_sub_ps(_mm_mul_ps(a.mL1, l2), _mm_mul_ps(a.mR1, r2)); } NV_FORCE_INLINE Vec3V V3Cross(const Vec3V a, const VecCrossV& b) { __m128 r2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); //y,z,x,w __m128 l2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 0, 2)); //z,x,y,w return _mm_sub_ps(_mm_mul_ps(b.mR1, r2), _mm_mul_ps(b.mL1, l2)); } NV_FORCE_INLINE Vec3V V3Cross(const VecCrossV& a, const VecCrossV& b) { return _mm_sub_ps(_mm_mul_ps(a.mL1, b.mR1), _mm_mul_ps(a.mR1, b.mL1)); } NV_FORCE_INLINE FloatV V3Length(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); return _mm_sqrt_ps(V3Dot(a,a)); } NV_FORCE_INLINE FloatV V3LengthSq(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); return V3Dot(a,a); } NV_FORCE_INLINE Vec3V V3Normalize(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(V3Dot(a,a)!=FZero()) return V3ScaleInv(a, _mm_sqrt_ps(V3Dot(a,a))); } NV_FORCE_INLINE Vec3V V3NormalizeFast(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); return V3Mul(a, _mm_rsqrt_ps(V3Dot(a,a))); } NV_FORCE_INLINE Vec3V V3NormalizeSafe(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); const __m128 zero=V3Zero(); const __m128 eps=V3Eps(); const __m128 length=V3Length(a); const __m128 isGreaterThanZero=FIsGrtr(length,eps); return V3Sel(isGreaterThanZero,V3ScaleInv(a,length),zero); } NV_FORCE_INLINE Vec3V V3Sel(const BoolV c, const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return _mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a)); } NV_FORCE_INLINE BoolV V3IsGrtr(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return _mm_cmpgt_ps(a,b); } NV_FORCE_INLINE BoolV V3IsGrtrOrEq(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return _mm_cmpge_ps(a,b); } NV_FORCE_INLINE BoolV V3IsEq(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return _mm_cmpeq_ps(a,b); } NV_FORCE_INLINE Vec3V V3Max(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return _mm_max_ps(a, b); } NV_FORCE_INLINE Vec3V V3Min(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return _mm_min_ps(a, b); } //Extract the maximum value from a NV_FORCE_INLINE FloatV V3ExtractMax(const Vec3V a) { const __m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)); const __m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1,1,1,1)); const __m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2)); return _mm_max_ps(_mm_max_ps(shuf1, shuf2), shuf3); } //Extract the maximum value from a NV_FORCE_INLINE FloatV V3ExtractMin(const Vec3V a) { const __m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)); const __m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1,1,1,1)); const __m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2)); return _mm_min_ps(_mm_min_ps(shuf1, shuf2), shuf3); } //return (a >= 0.0f) ? 1.0f : -1.0f; NV_FORCE_INLINE Vec3V V3Sign(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); const __m128 zero = V3Zero(); const __m128 one = V3One(); const __m128 none = V3Neg(one); return V3Sel(V3IsGrtrOrEq(a, zero), one, none); } NV_FORCE_INLINE Vec3V V3Clamp(const Vec3V a, const Vec3V minV, const Vec3V maxV) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(minV)); VECMATHAOS_ASSERT(isValidVec3V(maxV)); return V3Max(V3Min(a,maxV),minV); } NV_FORCE_INLINE uint32_t V3AllGrtr(const Vec3V a, const Vec3V b) { return internalUnitSSE2Simd::BAllTrue3_R(V4IsGrtr(a, b)); } NV_FORCE_INLINE uint32_t V3AllGrtrOrEq(const Vec3V a, const Vec3V b) { return internalUnitSSE2Simd::BAllTrue3_R(V4IsGrtrOrEq(a, b)); } NV_FORCE_INLINE uint32_t V3AllEq(const Vec3V a, const Vec3V b) { return internalUnitSSE2Simd::BAllTrue3_R(V4IsEq(a, b)); } NV_FORCE_INLINE Vec3V V3Round(const Vec3V a) { #ifdef __SSE4_2__ return _mm_round_ps( a, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC ); #else //return _mm_round_ps(a, 0x0); const Vec3V half = V3Load(0.5f); const __m128 signBit = _mm_cvtepi32_ps(_mm_srli_epi32(_mm_cvtps_epi32(a), 31)); const Vec3V aRound = V3Sub(V3Add(a, half), signBit); __m128i tmp = _mm_cvttps_epi32(aRound); return _mm_cvtepi32_ps(tmp); #endif } NV_FORCE_INLINE Vec3V V3Sin(const Vec3V a) { //Vec4V V1, V2, V3, V5, V7, V9, V11, V13, V15, V17, V19, V21, V23; //Vec4V S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11; Vec3V Result; // Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI const Vec3V recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f); const Vec3V twoPi = V4LoadA(g_NVTwoPi.f); const Vec3V tmp = V3Mul(a, recipTwoPi); const Vec3V b = V3Round(tmp); const Vec3V V1 = V3NegMulSub(twoPi, b, a); // sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! - // V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI) const Vec3V V2 = V3Mul(V1, V1); const Vec3V V3 = V3Mul(V2, V1); const Vec3V V5 = V3Mul(V3, V2); const Vec3V V7 = V3Mul(V5, V2); const Vec3V V9 = V3Mul(V7, V2); const Vec3V V11 = V3Mul(V9, V2); const Vec3V V13 = V3Mul(V11, V2); const Vec3V V15 = V3Mul(V13, V2); const Vec3V V17 = V3Mul(V15, V2); const Vec3V V19 = V3Mul(V17, V2); const Vec3V V21 = V3Mul(V19, V2); const Vec3V V23 = V3Mul(V21, V2); const Vec4V sinCoefficients0 = V4LoadA(g_NVSinCoefficients0.f); const Vec4V sinCoefficients1 = V4LoadA(g_NVSinCoefficients1.f); const Vec4V sinCoefficients2 = V4LoadA(g_NVSinCoefficients2.f); const FloatV S1 = V4GetY(sinCoefficients0); const FloatV S2 = V4GetZ(sinCoefficients0); const FloatV S3 = V4GetW(sinCoefficients0); const FloatV S4 = V4GetX(sinCoefficients1); const FloatV S5 = V4GetY(sinCoefficients1); const FloatV S6 = V4GetZ(sinCoefficients1); const FloatV S7 = V4GetW(sinCoefficients1); const FloatV S8 = V4GetX(sinCoefficients2); const FloatV S9 = V4GetY(sinCoefficients2); const FloatV S10 = V4GetZ(sinCoefficients2); const FloatV S11 = V4GetW(sinCoefficients2); Result = V3MulAdd(S1, V3, V1); Result = V3MulAdd(S2, V5, Result); Result = V3MulAdd(S3, V7, Result); Result = V3MulAdd(S4, V9, Result); Result = V3MulAdd(S5, V11, Result); Result = V3MulAdd(S6, V13, Result); Result = V3MulAdd(S7, V15, Result); Result = V3MulAdd(S8, V17, Result); Result = V3MulAdd(S9, V19, Result); Result = V3MulAdd(S10, V21, Result); Result = V3MulAdd(S11, V23, Result); return Result; } NV_FORCE_INLINE Vec3V V3Cos(const Vec3V a) { //XMVECTOR V1, V2, V4, V6, V8, V10, V12, V14, V16, V18, V20, V22; //XMVECTOR C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11; Vec3V Result; // Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI const Vec3V recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f); const Vec3V twoPi = V4LoadA(g_NVTwoPi.f); const Vec3V tmp = V3Mul(a, recipTwoPi); const Vec3V b = V3Round(tmp); const Vec3V V1 = V3NegMulSub(twoPi, b, a); // cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! - // V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI) const Vec3V V2 = V3Mul(V1, V1); const Vec3V V4 = V3Mul(V2, V2); const Vec3V V6 = V3Mul(V4, V2); const Vec3V V8 = V3Mul(V4, V4); const Vec3V V10 = V3Mul(V6, V4); const Vec3V V12 = V3Mul(V6, V6); const Vec3V V14 = V3Mul(V8, V6); const Vec3V V16 = V3Mul(V8, V8); const Vec3V V18 = V3Mul(V10, V8); const Vec3V V20 = V3Mul(V10, V10); const Vec3V V22 = V3Mul(V12, V10); const Vec4V cosCoefficients0 = V4LoadA(g_NVCosCoefficients0.f); const Vec4V cosCoefficients1 = V4LoadA(g_NVCosCoefficients1.f); const Vec4V cosCoefficients2 = V4LoadA(g_NVCosCoefficients2.f); const FloatV C1 = V4GetY(cosCoefficients0); const FloatV C2 = V4GetZ(cosCoefficients0); const FloatV C3 = V4GetW(cosCoefficients0); const FloatV C4 = V4GetX(cosCoefficients1); const FloatV C5 = V4GetY(cosCoefficients1); const FloatV C6 = V4GetZ(cosCoefficients1); const FloatV C7 = V4GetW(cosCoefficients1); const FloatV C8 = V4GetX(cosCoefficients2); const FloatV C9 = V4GetY(cosCoefficients2); const FloatV C10 = V4GetZ(cosCoefficients2); const FloatV C11 = V4GetW(cosCoefficients2); Result = V3MulAdd(C1, V2, V4One()); Result = V3MulAdd(C2, V4, Result); Result = V3MulAdd(C3, V6, Result); Result = V3MulAdd(C4, V8, Result); Result = V3MulAdd(C5, V10, Result); Result = V3MulAdd(C6, V12, Result); Result = V3MulAdd(C7, V14, Result); Result = V3MulAdd(C8, V16, Result); Result = V3MulAdd(C9, V18, Result); Result = V3MulAdd(C10, V20, Result); Result = V3MulAdd(C11, V22, Result); return Result; } NV_FORCE_INLINE Vec3V V3PermYZZ(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); return _mm_shuffle_ps(a,a,_MM_SHUFFLE(3,2,2,1)); } NV_FORCE_INLINE Vec3V V3PermXYX(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); return _mm_shuffle_ps(a,a,_MM_SHUFFLE(3,0,1,0)); } NV_FORCE_INLINE Vec3V V3PermYZX(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); return _mm_shuffle_ps(a,a,_MM_SHUFFLE(3,0,2,1)); } NV_FORCE_INLINE Vec3V V3PermZXY(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3,1,0,2)); } NV_FORCE_INLINE Vec3V V3PermZZY(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3,1,2,2)); } NV_FORCE_INLINE Vec3V V3PermYXX(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3,0,0,1)); } NV_FORCE_INLINE Vec3V V3Perm_Zero_1Z_0Y(const Vec3V v0, const Vec3V v1) { VECMATHAOS_ASSERT(isValidVec3V(v0)); VECMATHAOS_ASSERT(isValidVec3V(v1)); return _mm_shuffle_ps(v1, v0, _MM_SHUFFLE(3,1,2,3)); } NV_FORCE_INLINE Vec3V V3Perm_0Z_Zero_1X(const Vec3V v0, const Vec3V v1) { VECMATHAOS_ASSERT(isValidVec3V(v0)); VECMATHAOS_ASSERT(isValidVec3V(v1)); return _mm_shuffle_ps(v0, v1, _MM_SHUFFLE(3,0,3,2)); } NV_FORCE_INLINE Vec3V V3Perm_1Y_0X_Zero(const Vec3V v0, const Vec3V v1) { VECMATHAOS_ASSERT(isValidVec3V(v0)); VECMATHAOS_ASSERT(isValidVec3V(v1)); //There must be a better way to do this. Vec3V v2=V3Zero(); FloatV y1=V3GetY(v1); FloatV x0=V3GetX(v0); v2=V3SetX(v2,y1); return V3SetY(v2,x0); } NV_FORCE_INLINE FloatV V3SumElems(const Vec3V a) { #ifdef __SSE4_2__ Vec3V r = _mm_hadd_ps(a,a); r = _mm_hadd_ps(r,r); return r; #else VECMATHAOS_ASSERT(isValidVec3V(a)); __m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)); //z,y,x,w __m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1,1,1,1)); //y,x,w,z __m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2)); //x,w,z,y return _mm_add_ps(_mm_add_ps(shuf1, shuf2), shuf3); #endif } NV_FORCE_INLINE uint32_t V3OutOfBounds(const Vec3V a, const Vec3V min, const Vec3V max) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(min)); VECMATHAOS_ASSERT(isValidVec3V(max)); const BoolV ffff = BFFFF(); const BoolV c = BOr(V3IsGrtr(a, max), V3IsGrtr(min, a)); return !BAllEq(c, ffff); } NV_FORCE_INLINE uint32_t V3InBounds(const Vec3V a, const Vec3V min, const Vec3V max) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(min)); VECMATHAOS_ASSERT(isValidVec3V(max)); const BoolV tttt = BTTTT(); const BoolV c = BAnd(V3IsGrtrOrEq(a, min), V3IsGrtrOrEq(max, a)); return BAllEq(c, tttt); } NV_FORCE_INLINE uint32_t V3OutOfBounds(const Vec3V a, const Vec3V bounds) { return V3OutOfBounds(a, V3Neg(bounds), bounds); } NV_FORCE_INLINE uint32_t V3InBounds(const Vec3V a, const Vec3V bounds) { return V3InBounds(a, V3Neg(bounds), bounds); } ////////////////////////////////// //VEC4V ////////////////////////////////// NV_FORCE_INLINE Vec4V V4Splat(const FloatV f) { VECMATHAOS_ASSERT(isValidFloatV(f)); //return _mm_shuffle_ps(f, f, _MM_SHUFFLE(0,0,0,0)); return f; } NV_FORCE_INLINE Vec4V V4Merge(const FloatV* const floatVArray) { VECMATHAOS_ASSERT(isValidFloatV(floatVArray[0])); VECMATHAOS_ASSERT(isValidFloatV(floatVArray[1])); VECMATHAOS_ASSERT(isValidFloatV(floatVArray[2])); VECMATHAOS_ASSERT(isValidFloatV(floatVArray[3])); __m128 xw = _mm_move_ss(floatVArray[1], floatVArray[0]); //y, y, y, x __m128 yz = _mm_move_ss(floatVArray[2], floatVArray[3]); //z, z, z, w return (_mm_shuffle_ps(xw,yz,_MM_SHUFFLE(0,2,1,0))); } NV_FORCE_INLINE Vec4V V4Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z, const FloatVArg w) { VECMATHAOS_ASSERT(isValidFloatV(x)); VECMATHAOS_ASSERT(isValidFloatV(y)); VECMATHAOS_ASSERT(isValidFloatV(z)); VECMATHAOS_ASSERT(isValidFloatV(w)); __m128 xw = _mm_move_ss(y, x); //y, y, y, x __m128 yz = _mm_move_ss(z, w); //z, z, z, w return (_mm_shuffle_ps(xw,yz,_MM_SHUFFLE(0,2,1,0))); } NV_FORCE_INLINE Vec4V V4MergeW(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { const Vec4V xz = _mm_unpackhi_ps(x, z); const Vec4V yw = _mm_unpackhi_ps(y, w); return _mm_unpackhi_ps(xz, yw); } NV_FORCE_INLINE Vec4V V4MergeZ(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { const Vec4V xz = _mm_unpackhi_ps(x, z); const Vec4V yw = _mm_unpackhi_ps(y, w); return _mm_unpacklo_ps(xz, yw); } NV_FORCE_INLINE Vec4V V4MergeY(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { const Vec4V xz = _mm_unpacklo_ps(x, z); const Vec4V yw = _mm_unpacklo_ps(y, w); return _mm_unpackhi_ps(xz, yw); } NV_FORCE_INLINE Vec4V V4MergeX(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { const Vec4V xz = _mm_unpacklo_ps(x, z); const Vec4V yw = _mm_unpacklo_ps(y, w); return _mm_unpacklo_ps(xz, yw); } NV_FORCE_INLINE Vec4V V4UnpackXY(const Vec4VArg a, const Vec4VArg b) { return _mm_unpacklo_ps(a, b); } NV_FORCE_INLINE Vec4V V4UnpackZW(const Vec4VArg a, const Vec4VArg b) { return _mm_unpackhi_ps(a, b); } NV_FORCE_INLINE Vec4V V4UnitW() { const NV_ALIGN(16,float) w[4]={0.0f,0.0f,0.0f,1.0f}; const __m128 w128=_mm_load_ps(w); return w128; } NV_FORCE_INLINE Vec4V V4UnitX() { const NV_ALIGN(16,float) x[4]={1.0f,0.0f,0.0f,0.0f}; const __m128 x128=_mm_load_ps(x); return x128; } NV_FORCE_INLINE Vec4V V4UnitY() { const NV_ALIGN(16,float) y[4]={0.0f,1.0f,0.0f,0.0f}; const __m128 y128=_mm_load_ps(y); return y128; } NV_FORCE_INLINE Vec4V V4UnitZ() { const NV_ALIGN(16,float) z[4]={0.0f,0.0f,1.0f,0.0f}; const __m128 z128=_mm_load_ps(z); return z128; } NV_FORCE_INLINE FloatV V4GetW(const Vec4V f) { return _mm_shuffle_ps(f, f, _MM_SHUFFLE(3,3,3,3)); } NV_FORCE_INLINE FloatV V4GetX(const Vec4V f) { return _mm_shuffle_ps(f, f, _MM_SHUFFLE(0,0,0,0)); } NV_FORCE_INLINE FloatV V4GetY(const Vec4V f) { return _mm_shuffle_ps(f, f, _MM_SHUFFLE(1,1,1,1)); } NV_FORCE_INLINE FloatV V4GetZ(const Vec4V f) { return _mm_shuffle_ps(f, f, _MM_SHUFFLE(2,2,2,2)); } NV_FORCE_INLINE Vec4V V4SetW(const Vec4V v, const FloatV f) { VECMATHAOS_ASSERT(isValidFloatV(f)); return V4Sel(BTTTF(),v,f); } NV_FORCE_INLINE Vec4V V4SetX(const Vec4V v, const FloatV f) { VECMATHAOS_ASSERT(isValidFloatV(f)); return V4Sel(BFTTT(),v,f); } NV_FORCE_INLINE Vec4V V4SetY(const Vec4V v, const FloatV f) { VECMATHAOS_ASSERT(isValidFloatV(f)); return V4Sel(BTFTT(),v,f); } NV_FORCE_INLINE Vec4V V4SetZ(const Vec4V v, const FloatV f) { VECMATHAOS_ASSERT(isValidVec3V(v)); VECMATHAOS_ASSERT(isValidFloatV(f)); return V4Sel(BTTFT(),v,f); } NV_FORCE_INLINE Vec4V V4ClearW(const Vec4V v) { return _mm_and_ps(v, (VecI32V&)internalUnitSSE2Simd::gMaskXYZ); } NV_FORCE_INLINE Vec4V V4Perm_YXWZ(const Vec4V a) { return _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,3,0,1)); } NV_FORCE_INLINE Vec4V V4Perm_XZXZ(const Vec4V a) { return _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,0,2,0)); } NV_FORCE_INLINE Vec4V V4Perm_YWYW(const Vec4V a) { return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3,1,3,1)); } template<uint8_t x, uint8_t y, uint8_t z, uint8_t w> NV_FORCE_INLINE Vec4V V4Perm(const Vec4V a) { return _mm_shuffle_ps(a, a, _MM_SHUFFLE(w, z, y, x)); } NV_FORCE_INLINE Vec4V V4Zero() { return V4Load(0.0f); } NV_FORCE_INLINE Vec4V V4One() { return V4Load(1.0f); } NV_FORCE_INLINE Vec4V V4Eps() { return V4Load(NV_EPS_REAL); } NV_FORCE_INLINE Vec4V V4Neg(const Vec4V f) { return _mm_sub_ps( _mm_setzero_ps(), f); } NV_FORCE_INLINE Vec4V V4Add(const Vec4V a, const Vec4V b) { return _mm_add_ps(a,b); } NV_FORCE_INLINE Vec4V V4Sub(const Vec4V a, const Vec4V b) { return _mm_sub_ps(a,b); } NV_FORCE_INLINE Vec4V V4Scale(const Vec4V a, const FloatV b) { return _mm_mul_ps(a,b); } NV_FORCE_INLINE Vec4V V4Mul(const Vec4V a, const Vec4V b) { return _mm_mul_ps(a,b); } NV_FORCE_INLINE Vec4V V4ScaleInv(const Vec4V a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_div_ps(a,b); } NV_FORCE_INLINE Vec4V V4Div(const Vec4V a, const Vec4V b) { return _mm_div_ps(a,b); } NV_FORCE_INLINE Vec4V V4ScaleInvFast(const Vec4V a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_mul_ps(a,_mm_rcp_ps(b)); } NV_FORCE_INLINE Vec4V V4DivFast(const Vec4V a, const Vec4V b) { return _mm_mul_ps(a,_mm_rcp_ps(b)); } NV_FORCE_INLINE Vec4V V4Recip(const Vec4V a) { return _mm_div_ps(V4One(),a); } NV_FORCE_INLINE Vec4V V4RecipFast(const Vec4V a) { return _mm_rcp_ps(a); } NV_FORCE_INLINE Vec4V V4Rsqrt(const Vec4V a) { return _mm_div_ps(V4One(),_mm_sqrt_ps(a)); } NV_FORCE_INLINE Vec4V V4RsqrtFast(const Vec4V a) { return _mm_rsqrt_ps(a); } NV_FORCE_INLINE Vec4V V4Sqrt(const Vec4V a) { return _mm_sqrt_ps(a); } NV_FORCE_INLINE Vec4V V4ScaleAdd(const Vec4V a, const FloatV b, const Vec4V c) { VECMATHAOS_ASSERT(isValidFloatV(b)); return V4Add(V4Scale(a,b),c); } NV_FORCE_INLINE Vec4V V4NegScaleSub(const Vec4V a, const FloatV b, const Vec4V c) { VECMATHAOS_ASSERT(isValidFloatV(b)); return V4Sub(c,V4Scale(a,b)); } NV_FORCE_INLINE Vec4V V4MulAdd(const Vec4V a, const Vec4V b, const Vec4V c) { return V4Add(V4Mul(a,b),c); } NV_FORCE_INLINE Vec4V V4NegMulSub(const Vec4V a, const Vec4V b, const Vec4V c) { return V4Sub(c,V4Mul(a,b)); } NV_FORCE_INLINE Vec4V V4Abs(const Vec4V a) { return V4Max(a,V4Neg(a)); } NV_FORCE_INLINE FloatV V4SumElements(const Vec4V a) { #ifdef __SSE4_2__ Vec4V r = _mm_hadd_ps(a,a); r = _mm_hadd_ps(r,r); return r; #else const Vec4V xy = V4UnpackXY(a, a); //x,x,y,y const Vec4V zw = V4UnpackZW(a, a); //z,z,w,w const Vec4V xz_yw = V4Add(xy, zw); //x+z,x+z,y+w,y+w const FloatV xz = V4GetX(xz_yw); //x+z const FloatV yw = V4GetZ(xz_yw); //y+w return FAdd(xz, yw); //sum #endif } NV_FORCE_INLINE FloatV V4Dot(const Vec4V a, const Vec4V b) { #ifdef __SSE4_2__ return _mm_dp_ps(a, b, 0xff); #else __m128 dot1 = _mm_mul_ps(a, b); //x,y,z,w __m128 shuf1 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(2,1,0,3)); //w,x,y,z __m128 shuf2 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(1,0,3,2)); //z,w,x,y __m128 shuf3 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(0,3,2,1)); //y,z,w,x return _mm_add_ps(_mm_add_ps(shuf2, shuf3), _mm_add_ps(dot1,shuf1)); #endif } NV_FORCE_INLINE FloatV V4Length(const Vec4V a) { return _mm_sqrt_ps(V4Dot(a,a)); } NV_FORCE_INLINE FloatV V4LengthSq(const Vec4V a) { return V4Dot(a,a); } NV_FORCE_INLINE Vec4V V4Normalize(const Vec4V a) { VECMATHAOS_ASSERT(V4Dot(a,a)!=FZero()) return V4ScaleInv(a,_mm_sqrt_ps(V4Dot(a,a))); } NV_FORCE_INLINE Vec4V V4NormalizeFast(const Vec4V a) { return V4ScaleInvFast(a,_mm_sqrt_ps(V4Dot(a,a))); } NV_FORCE_INLINE Vec4V V4NormalizeSafe(const Vec4V a) { const __m128 zero=FZero(); const __m128 eps=V3Eps(); const __m128 length=V4Length(a); const __m128 isGreaterThanZero=V4IsGrtr(length,eps); return V4Sel(isGreaterThanZero,V4ScaleInv(a,length),zero); } NV_FORCE_INLINE BoolV V4IsEqU32(const VecU32V a, const VecU32V b) { return m128_I2F(_mm_cmpeq_epi32(m128_F2I(a), m128_F2I(b))); } NV_FORCE_INLINE Vec4V V4Sel(const BoolV c, const Vec4V a, const Vec4V b) { return _mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a)); } NV_FORCE_INLINE BoolV V4IsGrtr(const Vec4V a, const Vec4V b) { return _mm_cmpgt_ps(a,b); } NV_FORCE_INLINE BoolV V4IsGrtrOrEq(const Vec4V a, const Vec4V b) { return _mm_cmpge_ps(a,b); } NV_FORCE_INLINE BoolV V4IsEq(const Vec4V a, const Vec4V b) { return _mm_cmpeq_ps(a,b); } NV_FORCE_INLINE Vec4V V4Max(const Vec4V a, const Vec4V b) { return _mm_max_ps(a, b); } NV_FORCE_INLINE Vec4V V4Min(const Vec4V a, const Vec4V b) { return _mm_min_ps(a, b); } NV_FORCE_INLINE FloatV V4ExtractMax(const Vec4V a) { __m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,1,0,3)); __m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1,0,3,2)); __m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0,3,2,1)); return _mm_max_ps(_mm_max_ps(a, shuf1), _mm_max_ps(shuf2, shuf3)); } NV_FORCE_INLINE FloatV V4ExtractMin(const Vec4V a) { __m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,1,0,3)); __m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1,0,3,2)); __m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0,3,2,1)); return _mm_min_ps(_mm_min_ps(a, shuf1), _mm_min_ps(shuf2, shuf3)); } NV_FORCE_INLINE Vec4V V4Clamp(const Vec4V a, const Vec4V minV, const Vec4V maxV) { return V4Max(V4Min(a,maxV),minV); } NV_FORCE_INLINE uint32_t V4AllGrtr(const Vec4V a, const Vec4V b) { return internalUnitSSE2Simd::BAllTrue4_R(V4IsGrtr(a, b)); } NV_FORCE_INLINE uint32_t V4AllGrtrOrEq(const Vec4V a, const Vec4V b) { return internalUnitSSE2Simd::BAllTrue4_R(V4IsGrtrOrEq(a, b)); } NV_FORCE_INLINE uint32_t V4AllEq(const Vec4V a, const Vec4V b) { return internalUnitSSE2Simd::BAllTrue4_R(V4IsEq(a, b)); } NV_FORCE_INLINE Vec4V V4Round(const Vec4V a) { #ifdef __SSE4_2__ return _mm_round_ps( a, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC ); #else //return _mm_round_ps(a, 0x0); const Vec4V half = V4Load(0.5f); const __m128 signBit = _mm_cvtepi32_ps(_mm_srli_epi32(_mm_cvtps_epi32(a), 31)); const Vec4V aRound = V4Sub(V4Add(a, half), signBit); __m128i tmp = _mm_cvttps_epi32(aRound); return _mm_cvtepi32_ps(tmp); #endif } NV_FORCE_INLINE Vec4V V4Sin(const Vec4V a) { //Vec4V V1, V2, V3, V5, V7, V9, V11, V13, V15, V17, V19, V21, V23; //Vec4V S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11; Vec4V Result; const Vec4V recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f); const Vec4V twoPi = V4LoadA(g_NVTwoPi.f); const Vec4V tmp = V4Mul(a, recipTwoPi); const Vec4V b = V4Round(tmp); const Vec4V V1 = V4NegMulSub(twoPi, b, a); // sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! - // V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI) const Vec4V V2 = V4Mul(V1, V1); const Vec4V V3 = V4Mul(V2, V1); const Vec4V V5 = V4Mul(V3, V2); const Vec4V V7 = V4Mul(V5, V2); const Vec4V V9 = V4Mul(V7, V2); const Vec4V V11 = V4Mul(V9, V2); const Vec4V V13 = V4Mul(V11, V2); const Vec4V V15 = V4Mul(V13, V2); const Vec4V V17 = V4Mul(V15, V2); const Vec4V V19 = V4Mul(V17, V2); const Vec4V V21 = V4Mul(V19, V2); const Vec4V V23 = V4Mul(V21, V2); const Vec4V sinCoefficients0 = V4LoadA(g_NVSinCoefficients0.f); const Vec4V sinCoefficients1 = V4LoadA(g_NVSinCoefficients1.f); const Vec4V sinCoefficients2 = V4LoadA(g_NVSinCoefficients2.f); const FloatV S1 = V4GetY(sinCoefficients0); const FloatV S2 = V4GetZ(sinCoefficients0); const FloatV S3 = V4GetW(sinCoefficients0); const FloatV S4 = V4GetX(sinCoefficients1); const FloatV S5 = V4GetY(sinCoefficients1); const FloatV S6 = V4GetZ(sinCoefficients1); const FloatV S7 = V4GetW(sinCoefficients1); const FloatV S8 = V4GetX(sinCoefficients2); const FloatV S9 = V4GetY(sinCoefficients2); const FloatV S10 = V4GetZ(sinCoefficients2); const FloatV S11 = V4GetW(sinCoefficients2); Result = V4MulAdd(S1, V3, V1); Result = V4MulAdd(S2, V5, Result); Result = V4MulAdd(S3, V7, Result); Result = V4MulAdd(S4, V9, Result); Result = V4MulAdd(S5, V11, Result); Result = V4MulAdd(S6, V13, Result); Result = V4MulAdd(S7, V15, Result); Result = V4MulAdd(S8, V17, Result); Result = V4MulAdd(S9, V19, Result); Result = V4MulAdd(S10, V21, Result); Result = V4MulAdd(S11, V23, Result); return Result; } NV_FORCE_INLINE Vec4V V4Cos(const Vec4V a) { //XMVECTOR V1, V2, V4, V6, V8, V10, V12, V14, V16, V18, V20, V22; //XMVECTOR C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11; Vec4V Result; const Vec4V recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f); const Vec4V twoPi = V4LoadA(g_NVTwoPi.f); const Vec4V tmp = V4Mul(a, recipTwoPi); const Vec4V b = V4Round(tmp); const Vec4V V1 = V4NegMulSub(twoPi, b, a); // cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! - // V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI) const Vec4V V2 = V4Mul(V1, V1); const Vec4V V4 = V4Mul(V2, V2); const Vec4V V6 = V4Mul(V4, V2); const Vec4V V8 = V4Mul(V4, V4); const Vec4V V10 = V4Mul(V6, V4); const Vec4V V12 = V4Mul(V6, V6); const Vec4V V14 = V4Mul(V8, V6); const Vec4V V16 = V4Mul(V8, V8); const Vec4V V18 = V4Mul(V10, V8); const Vec4V V20 = V4Mul(V10, V10); const Vec4V V22 = V4Mul(V12, V10); const Vec4V cosCoefficients0 = V4LoadA(g_NVCosCoefficients0.f); const Vec4V cosCoefficients1 = V4LoadA(g_NVCosCoefficients1.f); const Vec4V cosCoefficients2 = V4LoadA(g_NVCosCoefficients2.f); const FloatV C1 = V4GetY(cosCoefficients0); const FloatV C2 = V4GetZ(cosCoefficients0); const FloatV C3 = V4GetW(cosCoefficients0); const FloatV C4 = V4GetX(cosCoefficients1); const FloatV C5 = V4GetY(cosCoefficients1); const FloatV C6 = V4GetZ(cosCoefficients1); const FloatV C7 = V4GetW(cosCoefficients1); const FloatV C8 = V4GetX(cosCoefficients2); const FloatV C9 = V4GetY(cosCoefficients2); const FloatV C10 = V4GetZ(cosCoefficients2); const FloatV C11 = V4GetW(cosCoefficients2); Result = V4MulAdd(C1, V2, V4One()); Result = V4MulAdd(C2, V4, Result); Result = V4MulAdd(C3, V6, Result); Result = V4MulAdd(C4, V8, Result); Result = V4MulAdd(C5, V10, Result); Result = V4MulAdd(C6, V12, Result); Result = V4MulAdd(C7, V14, Result); Result = V4MulAdd(C8, V16, Result); Result = V4MulAdd(C9, V18, Result); Result = V4MulAdd(C10, V20, Result); Result = V4MulAdd(C11, V22, Result); return Result; } NV_FORCE_INLINE void V4Transpose(Vec4V& col0, Vec4V& col1, Vec4V& col2, Vec4V& col3) { Vec4V tmp0 = _mm_unpacklo_ps(col0, col1); Vec4V tmp2 = _mm_unpacklo_ps(col2, col3); Vec4V tmp1 = _mm_unpackhi_ps(col0, col1); Vec4V tmp3 = _mm_unpackhi_ps(col2, col3); col0 = _mm_movelh_ps(tmp0, tmp2); col1 = _mm_movehl_ps(tmp2, tmp0); col2 = _mm_movelh_ps(tmp1, tmp3); col3 = _mm_movehl_ps(tmp3, tmp1); } ////////////////////////////////// //BoolV ////////////////////////////////// NV_FORCE_INLINE BoolV BFFFF() { return _mm_setzero_ps(); } NV_FORCE_INLINE BoolV BFFFT() { /*const NV_ALIGN(16, uint32_t f[4])={0,0,0,0xFFFFFFFF}; const __m128 ffft=_mm_load_ps((float*)&f); return ffft;*/ return m128_I2F(_mm_set_epi32(-1, 0, 0, 0)); } NV_FORCE_INLINE BoolV BFFTF() { /*const NV_ALIGN(16, uint32_t f[4])={0,0,0xFFFFFFFF,0}; const __m128 fftf=_mm_load_ps((float*)&f); return fftf;*/ return m128_I2F(_mm_set_epi32(0, -1, 0, 0)); } NV_FORCE_INLINE BoolV BFFTT() { /*const NV_ALIGN(16, uint32_t f[4])={0,0,0xFFFFFFFF,0xFFFFFFFF}; const __m128 fftt=_mm_load_ps((float*)&f); return fftt;*/ return m128_I2F(_mm_set_epi32(-1, -1, 0, 0)); } NV_FORCE_INLINE BoolV BFTFF() { /*const NV_ALIGN(16, uint32_t f[4])={0,0xFFFFFFFF,0,0}; const __m128 ftff=_mm_load_ps((float*)&f); return ftff;*/ return m128_I2F(_mm_set_epi32(0, 0, -1, 0)); } NV_FORCE_INLINE BoolV BFTFT() { /*const NV_ALIGN(16, uint32_t f[4])={0,0xFFFFFFFF,0,0xFFFFFFFF}; const __m128 ftft=_mm_load_ps((float*)&f); return ftft;*/ return m128_I2F(_mm_set_epi32(-1, 0, -1, 0)); } NV_FORCE_INLINE BoolV BFTTF() { /*const NV_ALIGN(16, uint32_t f[4])={0,0xFFFFFFFF,0xFFFFFFFF,0}; const __m128 fttf=_mm_load_ps((float*)&f); return fttf;*/ return m128_I2F(_mm_set_epi32(0, -1, -1, 0)); } NV_FORCE_INLINE BoolV BFTTT() { /*const NV_ALIGN(16, uint32_t f[4])={0,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF}; const __m128 fttt=_mm_load_ps((float*)&f); return fttt;*/ return m128_I2F(_mm_set_epi32(-1, -1, -1, 0)); } NV_FORCE_INLINE BoolV BTFFF() { //const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0,0,0}; //const __m128 tfff=_mm_load_ps((float*)&f); //return tfff; return m128_I2F(_mm_set_epi32(0, 0, 0, -1)); } NV_FORCE_INLINE BoolV BTFFT() { /*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0,0,0xFFFFFFFF}; const __m128 tfft=_mm_load_ps((float*)&f); return tfft;*/ return m128_I2F(_mm_set_epi32(-1, 0, 0, -1)); } NV_FORCE_INLINE BoolV BTFTF() { /*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0,0xFFFFFFFF,0}; const __m128 tftf=_mm_load_ps((float*)&f); return tftf;*/ return m128_I2F(_mm_set_epi32(0, -1, 0, -1)); } NV_FORCE_INLINE BoolV BTFTT() { /*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0,0xFFFFFFFF,0xFFFFFFFF}; const __m128 tftt=_mm_load_ps((float*)&f); return tftt;*/ return m128_I2F(_mm_set_epi32(-1, -1, 0, -1)); } NV_FORCE_INLINE BoolV BTTFF() { /*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0xFFFFFFFF,0,0}; const __m128 ttff=_mm_load_ps((float*)&f); return ttff;*/ return m128_I2F(_mm_set_epi32(0, 0, -1, -1)); } NV_FORCE_INLINE BoolV BTTFT() { /*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0xFFFFFFFF,0,0xFFFFFFFF}; const __m128 ttft=_mm_load_ps((float*)&f); return ttft;*/ return m128_I2F(_mm_set_epi32(-1, 0, -1, -1)); } NV_FORCE_INLINE BoolV BTTTF() { /*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0}; const __m128 tttf=_mm_load_ps((float*)&f); return tttf;*/ return m128_I2F(_mm_set_epi32(0, -1, -1, -1)); } NV_FORCE_INLINE BoolV BTTTT() { /*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF}; const __m128 tttt=_mm_load_ps((float*)&f); return tttt;*/ return m128_I2F(_mm_set_epi32(-1, -1, -1, -1)); } NV_FORCE_INLINE BoolV BXMask() { /*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0,0,0}; const __m128 tfff=_mm_load_ps((float*)&f); return tfff;*/ return m128_I2F(_mm_set_epi32(0, 0, 0, -1)); } NV_FORCE_INLINE BoolV BYMask() { /*const NV_ALIGN(16, uint32_t f[4])={0,0xFFFFFFFF,0,0}; const __m128 ftff=_mm_load_ps((float*)&f); return ftff;*/ return m128_I2F(_mm_set_epi32(0, 0, -1, 0)); } NV_FORCE_INLINE BoolV BZMask() { /*const NV_ALIGN(16, uint32_t f[4])={0,0,0xFFFFFFFF,0}; const __m128 fftf=_mm_load_ps((float*)&f); return fftf;*/ return m128_I2F(_mm_set_epi32(0, -1, 0, 0)); } NV_FORCE_INLINE BoolV BWMask() { /*const NV_ALIGN(16, uint32_t f[4])={0,0,0,0xFFFFFFFF}; const __m128 ffft=_mm_load_ps((float*)&f); return ffft;*/ return m128_I2F(_mm_set_epi32(-1, 0, 0, 0)); } NV_FORCE_INLINE BoolV BGetX(const BoolV f) { return _mm_shuffle_ps(f, f, _MM_SHUFFLE(0,0,0,0)); } NV_FORCE_INLINE BoolV BGetY(const BoolV f) { return _mm_shuffle_ps(f, f, _MM_SHUFFLE(1,1,1,1)); } NV_FORCE_INLINE BoolV BGetZ(const BoolV f) { return _mm_shuffle_ps(f, f, _MM_SHUFFLE(2,2,2,2)); } NV_FORCE_INLINE BoolV BGetW(const BoolV f) { return _mm_shuffle_ps(f, f, _MM_SHUFFLE(3,3,3,3)); } NV_FORCE_INLINE BoolV BSetX(const BoolV v, const BoolV f) { return V4Sel(BFTTT(),v,f); } NV_FORCE_INLINE BoolV BSetY(const BoolV v, const BoolV f) { return V4Sel(BTFTT(),v,f); } NV_FORCE_INLINE BoolV BSetZ(const BoolV v, const BoolV f) { return V4Sel(BTTFT(),v,f); } NV_FORCE_INLINE BoolV BSetW(const BoolV v, const BoolV f) { return V4Sel(BTTTF(),v,f); } NV_FORCE_INLINE BoolV BAnd(const BoolV a, const BoolV b) { return (_mm_and_ps(a,b)); } NV_FORCE_INLINE BoolV BNot(const BoolV a) { const BoolV bAllTrue(BTTTT()); return _mm_xor_ps(a, bAllTrue); } NV_FORCE_INLINE BoolV BAndNot(const BoolV a, const BoolV b) { return (_mm_andnot_ps(b,a)); } NV_FORCE_INLINE BoolV BOr(const BoolV a, const BoolV b) { return (_mm_or_ps(a,b)); } NV_FORCE_INLINE BoolV BAllTrue4(const BoolV a) { const BoolV bTmp = _mm_and_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,1,0,1)), _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,3,2,3))); return _mm_and_ps(_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(0,0,0,0)), _mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(1,1,1,1))); } NV_FORCE_INLINE BoolV BAnyTrue4(const BoolV a) { const BoolV bTmp = _mm_or_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,1,0,1)), _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,3,2,3))); return _mm_or_ps(_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(0,0,0,0)), _mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(1,1,1,1))); } NV_FORCE_INLINE BoolV BAllTrue3(const BoolV a) { const BoolV bTmp = _mm_and_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,1,0,1)), _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2))); return _mm_and_ps(_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(0,0,0,0)), _mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(1,1,1,1))); } NV_FORCE_INLINE BoolV BAnyTrue3(const BoolV a) { const BoolV bTmp = _mm_or_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,1,0,1)), _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2))); return _mm_or_ps(_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(0,0,0,0)), _mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(1,1,1,1))); } NV_FORCE_INLINE uint32_t BAllEq(const BoolV a, const BoolV b) { const BoolV bTest = m128_I2F(_mm_cmpeq_epi32(m128_F2I(a), m128_F2I(b))); return internalUnitSSE2Simd::BAllTrue4_R(bTest); } NV_FORCE_INLINE uint32_t BAllEqTTTT(const BoolV a) { return uint32_t(_mm_movemask_ps(a)==15); } NV_FORCE_INLINE uint32_t BAllEqFFFF(const BoolV a) { return uint32_t(_mm_movemask_ps(a)==0); } NV_FORCE_INLINE uint32_t BGetBitMask(const BoolV a) { return uint32_t(_mm_movemask_ps(a)); } ////////////////////////////////// //MAT33V ////////////////////////////////// NV_FORCE_INLINE Vec3V M33MulV3(const Mat33V& a, const Vec3V b) { const FloatV x=V3GetX(b); const FloatV y=V3GetY(b); const FloatV z=V3GetZ(b); const Vec3V v0=V3Scale(a.col0,x); const Vec3V v1=V3Scale(a.col1,y); const Vec3V v2=V3Scale(a.col2,z); const Vec3V v0PlusV1=V3Add(v0,v1); return V3Add(v0PlusV1,v2); } NV_FORCE_INLINE Vec3V M33TrnspsMulV3(const Mat33V& a, const Vec3V b) { const FloatV x=V3Dot(a.col0,b); const FloatV y=V3Dot(a.col1,b); const FloatV z=V3Dot(a.col2,b); return V3Merge(x,y,z); } NV_FORCE_INLINE Vec3V M33MulV3AddV3(const Mat33V& A, const Vec3V b, const Vec3V c) { const FloatV x=V3GetX(b); const FloatV y=V3GetY(b); const FloatV z=V3GetZ(b); Vec3V result = V3MulAdd(A.col0, x, c); result = V3MulAdd(A.col1, y, result); return V3MulAdd(A.col2, z, result); } NV_FORCE_INLINE Mat33V M33MulM33(const Mat33V& a, const Mat33V& b) { return Mat33V(M33MulV3(a,b.col0),M33MulV3(a,b.col1),M33MulV3(a,b.col2)); } NV_FORCE_INLINE Mat33V M33Add(const Mat33V& a, const Mat33V& b) { return Mat33V(V3Add(a.col0,b.col0),V3Add(a.col1,b.col1),V3Add(a.col2,b.col2)); } NV_FORCE_INLINE Mat33V M33Scale(const Mat33V& a, const FloatV& b) { return Mat33V(V3Scale(a.col0,b),V3Scale(a.col1,b),V3Scale(a.col2,b)); } NV_FORCE_INLINE Mat33V M33Inverse(const Mat33V& a) { const BoolV tfft=BTFFT(); const BoolV tttf=BTTTF(); const FloatV zero=FZero(); const Vec3V cross01 = V3Cross(a.col0,a.col1); const Vec3V cross12 = V3Cross(a.col1,a.col2); const Vec3V cross20 = V3Cross(a.col2,a.col0); const FloatV dot = V3Dot(cross01,a.col2); const FloatV invDet = _mm_rcp_ps(dot); const Vec3V mergeh = _mm_unpacklo_ps(cross12,cross01); const Vec3V mergel = _mm_unpackhi_ps(cross12,cross01); Vec3V colInv0 = _mm_unpacklo_ps(mergeh,cross20); colInv0 = _mm_or_ps(_mm_andnot_ps(tttf, zero), _mm_and_ps(tttf, colInv0)); const Vec3V zppd=_mm_shuffle_ps(mergeh,cross20,_MM_SHUFFLE(3,0,0,2)); const Vec3V pbwp=_mm_shuffle_ps(cross20,mergeh,_MM_SHUFFLE(3,3,1,0)); const Vec3V colInv1=_mm_or_ps(_mm_andnot_ps(BTFFT(), pbwp), _mm_and_ps(BTFFT(), zppd)); const Vec3V xppd=_mm_shuffle_ps(mergel,cross20,_MM_SHUFFLE(3,0,0,0)); const Vec3V pcyp=_mm_shuffle_ps(cross20,mergel,_MM_SHUFFLE(3,1,2,0)); const Vec3V colInv2=_mm_or_ps(_mm_andnot_ps(tfft, pcyp), _mm_and_ps(tfft, xppd)); return Mat33V ( _mm_mul_ps(colInv0,invDet), _mm_mul_ps(colInv1,invDet), _mm_mul_ps(colInv2,invDet) ); } NV_FORCE_INLINE Mat33V M33Trnsps(const Mat33V& a) { return Mat33V ( V3Merge(V3GetX(a.col0),V3GetX(a.col1),V3GetX(a.col2)), V3Merge(V3GetY(a.col0),V3GetY(a.col1),V3GetY(a.col2)), V3Merge(V3GetZ(a.col0),V3GetZ(a.col1),V3GetZ(a.col2)) ); } NV_FORCE_INLINE Mat33V M33Identity() { return Mat33V ( V3UnitX(), V3UnitY(), V3UnitZ() ); } NV_FORCE_INLINE Mat33V M33Sub(const Mat33V& a, const Mat33V& b) { return Mat33V(V3Sub(a.col0,b.col0),V3Sub(a.col1,b.col1),V3Sub(a.col2,b.col2)); } NV_FORCE_INLINE Mat33V M33Neg(const Mat33V& a) { return Mat33V(V3Neg(a.col0),V3Neg(a.col1),V3Neg(a.col2)); } NV_FORCE_INLINE Mat33V M33Abs(const Mat33V& a) { return Mat33V(V3Abs(a.col0),V3Abs(a.col1),V3Abs(a.col2)); } NV_FORCE_INLINE Mat33V PromoteVec3V(const Vec3V v) { const BoolV bTFFF = BTFFF(); const BoolV bFTFF = BFTFF(); const BoolV bFFTF = BTFTF(); const Vec3V zero = V3Zero(); return Mat33V( V3Sel(bTFFF, v, zero), V3Sel(bFTFF, v, zero), V3Sel(bFFTF, v, zero)); } NV_FORCE_INLINE Mat33V M33Diagonal(const Vec3VArg d) { const FloatV x = V3Mul(V3UnitX(), d); const FloatV y = V3Mul(V3UnitY(), d); const FloatV z = V3Mul(V3UnitZ(), d); return Mat33V(x, y, z); } ////////////////////////////////// //MAT34V ////////////////////////////////// NV_FORCE_INLINE Vec3V M34MulV3(const Mat34V& a, const Vec3V b) { const FloatV x=V3GetX(b); const FloatV y=V3GetY(b); const FloatV z=V3GetZ(b); const Vec3V v0=V3Scale(a.col0,x); const Vec3V v1=V3Scale(a.col1,y); const Vec3V v2=V3Scale(a.col2,z); const Vec3V v0PlusV1=V3Add(v0,v1); const Vec3V v0PlusV1Plusv2=V3Add(v0PlusV1,v2); return (V3Add(v0PlusV1Plusv2,a.col3)); } NV_FORCE_INLINE Vec3V M34Mul33V3(const Mat34V& a, const Vec3V b) { const FloatV x=V3GetX(b); const FloatV y=V3GetY(b); const FloatV z=V3GetZ(b); const Vec3V v0=V3Scale(a.col0,x); const Vec3V v1=V3Scale(a.col1,y); const Vec3V v2=V3Scale(a.col2,z); const Vec3V v0PlusV1=V3Add(v0,v1); return V3Add(v0PlusV1,v2); } NV_FORCE_INLINE Vec3V M34TrnspsMul33V3(const Mat34V& a, const Vec3V b) { const FloatV x=V3Dot(a.col0,b); const FloatV y=V3Dot(a.col1,b); const FloatV z=V3Dot(a.col2,b); return V3Merge(x,y,z); } NV_FORCE_INLINE Mat34V M34MulM34(const Mat34V& a, const Mat34V& b) { return Mat34V(M34Mul33V3(a,b.col0), M34Mul33V3(a,b.col1),M34Mul33V3(a,b.col2),M34MulV3(a,b.col3)); } NV_FORCE_INLINE Mat33V M34MulM33(const Mat34V& a, const Mat33V& b) { return Mat33V(M34Mul33V3(a,b.col0),M34Mul33V3(a,b.col1),M34Mul33V3(a,b.col2)); } NV_FORCE_INLINE Mat33V M34Mul33MM34(const Mat34V& a, const Mat34V& b) { return Mat33V(M34Mul33V3(a,b.col0),M34Mul33V3(a,b.col1),M34Mul33V3(a,b.col2)); } NV_FORCE_INLINE Mat34V M34Add(const Mat34V& a, const Mat34V& b) { return Mat34V(V3Add(a.col0,b.col0),V3Add(a.col1,b.col1),V3Add(a.col2,b.col2),V3Add(a.col3,b.col3)); } NV_FORCE_INLINE Mat33V M34Trnsps33(const Mat34V& a) { return Mat33V ( V3Merge(V3GetX(a.col0),V3GetX(a.col1),V3GetX(a.col2)), V3Merge(V3GetY(a.col0),V3GetY(a.col1),V3GetY(a.col2)), V3Merge(V3GetZ(a.col0),V3GetZ(a.col1),V3GetZ(a.col2)) ); } ////////////////////////////////// //MAT44V ////////////////////////////////// NV_FORCE_INLINE Vec4V M44MulV4(const Mat44V& a, const Vec4V b) { const FloatV x=V4GetX(b); const FloatV y=V4GetY(b); const FloatV z=V4GetZ(b); const FloatV w=V4GetW(b); const Vec4V v0=V4Scale(a.col0,x); const Vec4V v1=V4Scale(a.col1,y); const Vec4V v2=V4Scale(a.col2,z); const Vec4V v3=V4Scale(a.col3,w); const Vec4V v0PlusV1=V4Add(v0,v1); const Vec4V v0PlusV1Plusv2=V4Add(v0PlusV1,v2); return (V4Add(v0PlusV1Plusv2,v3)); } NV_FORCE_INLINE Vec4V M44TrnspsMulV4(const Mat44V& a, const Vec4V b) { NV_ALIGN(16,FloatV) dotProdArray[4]= { V4Dot(a.col0,b), V4Dot(a.col1,b), V4Dot(a.col2,b), V4Dot(a.col3,b) }; return V4Merge(dotProdArray); } NV_FORCE_INLINE Mat44V M44MulM44(const Mat44V& a, const Mat44V& b) { return Mat44V(M44MulV4(a,b.col0),M44MulV4(a,b.col1),M44MulV4(a,b.col2),M44MulV4(a,b.col3)); } NV_FORCE_INLINE Mat44V M44Add(const Mat44V& a, const Mat44V& b) { return Mat44V(V4Add(a.col0,b.col0),V4Add(a.col1,b.col1),V4Add(a.col2,b.col2),V4Add(a.col3,b.col3)); } NV_FORCE_INLINE Mat44V M44Trnsps(const Mat44V& a) { const Vec4V v0 = _mm_unpacklo_ps(a.col0, a.col2); const Vec4V v1 = _mm_unpackhi_ps(a.col0, a.col2); const Vec4V v2 = _mm_unpacklo_ps(a.col1, a.col3); const Vec4V v3 = _mm_unpackhi_ps(a.col1, a.col3); return Mat44V( _mm_unpacklo_ps(v0, v2),_mm_unpackhi_ps(v0, v2),_mm_unpacklo_ps(v1, v3),_mm_unpackhi_ps(v1, v3)); } NV_FORCE_INLINE Mat44V M44Inverse(const Mat44V& a) { __m128 minor0, minor1, minor2, minor3; __m128 row0, row1, row2, row3; __m128 det, tmp1; tmp1=V4Zero(); row1=V4Zero(); row3=V4Zero(); row0=a.col0; row1=_mm_shuffle_ps(a.col1,a.col1,_MM_SHUFFLE(1,0,3,2)); row2=a.col2; row3=_mm_shuffle_ps(a.col3,a.col3,_MM_SHUFFLE(1,0,3,2)); tmp1 = _mm_mul_ps(row2, row3); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1); minor0 = _mm_mul_ps(row1, tmp1); minor1 = _mm_mul_ps(row0, tmp1); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E); minor0 = _mm_sub_ps(_mm_mul_ps(row1, tmp1), minor0); minor1 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor1); minor1 = _mm_shuffle_ps(minor1, minor1, 0x4E); tmp1 = _mm_mul_ps(row1, row2); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1); minor0 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor0); minor3 = _mm_mul_ps(row0, tmp1); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E); minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row3, tmp1)); minor3 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor3); minor3 = _mm_shuffle_ps(minor3, minor3, 0x4E); tmp1 = _mm_mul_ps(_mm_shuffle_ps(row1, row1, 0x4E), row3); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1); row2 = _mm_shuffle_ps(row2, row2, 0x4E); minor0 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor0); minor2 = _mm_mul_ps(row0, tmp1); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E); minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row2, tmp1)); minor2 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor2); minor2 = _mm_shuffle_ps(minor2, minor2, 0x4E); tmp1 = _mm_mul_ps(row0, row1); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1); minor2 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor2); minor3 = _mm_sub_ps(_mm_mul_ps(row2, tmp1), minor3); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E); minor2 = _mm_sub_ps(_mm_mul_ps(row3, tmp1), minor2); minor3 = _mm_sub_ps(minor3, _mm_mul_ps(row2, tmp1)); tmp1 = _mm_mul_ps(row0, row3); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1); minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row2, tmp1)); minor2 = _mm_add_ps(_mm_mul_ps(row1, tmp1), minor2); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E); minor1 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor1); minor2 = _mm_sub_ps(minor2, _mm_mul_ps(row1, tmp1)); tmp1 = _mm_mul_ps(row0, row2); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1); minor1 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor1); minor3 = _mm_sub_ps(minor3, _mm_mul_ps(row1, tmp1)); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E); minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row3, tmp1)); minor3 = _mm_add_ps(_mm_mul_ps(row1, tmp1), minor3); det = _mm_mul_ps(row0, minor0); det = _mm_add_ps(_mm_shuffle_ps(det, det, 0x4E), det); det = _mm_add_ss(_mm_shuffle_ps(det, det, 0xB1), det); tmp1 = _mm_rcp_ss(det); #if 0 det = _mm_sub_ss(_mm_add_ss(tmp1, tmp1), _mm_mul_ss(det, _mm_mul_ss(tmp1, tmp1))); det = _mm_shuffle_ps(det, det, 0x00); #else det= _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(0,0,0,0)); #endif minor0 = _mm_mul_ps(det, minor0); minor1 = _mm_mul_ps(det, minor1); minor2 = _mm_mul_ps(det, minor2); minor3 = _mm_mul_ps(det, minor3); Mat44V invTrans(minor0,minor1,minor2,minor3); return M44Trnsps(invTrans); } NV_FORCE_INLINE Vec4V V4LoadXYZW(const float& x, const float& y, const float& z, const float& w) { return _mm_set_ps(w, z, y, x); } /* // AP: work in progress - use proper SSE intrinsics where possible NV_FORCE_INLINE VecU16V V4U32PK(VecU32V a, VecU32V b) { VecU16V result; result.m128_u16[0] = uint16_t(NvClamp<uint32_t>((a).m128_u32[0], 0, 0xFFFF)); result.m128_u16[1] = uint16_t(NvClamp<uint32_t>((a).m128_u32[1], 0, 0xFFFF)); result.m128_u16[2] = uint16_t(NvClamp<uint32_t>((a).m128_u32[2], 0, 0xFFFF)); result.m128_u16[3] = uint16_t(NvClamp<uint32_t>((a).m128_u32[3], 0, 0xFFFF)); result.m128_u16[4] = uint16_t(NvClamp<uint32_t>((b).m128_u32[0], 0, 0xFFFF)); result.m128_u16[5] = uint16_t(NvClamp<uint32_t>((b).m128_u32[1], 0, 0xFFFF)); result.m128_u16[6] = uint16_t(NvClamp<uint32_t>((b).m128_u32[2], 0, 0xFFFF)); result.m128_u16[7] = uint16_t(NvClamp<uint32_t>((b).m128_u32[3], 0, 0xFFFF)); return result; } */ NV_FORCE_INLINE VecU32V V4U32Sel(const BoolV c, const VecU32V a, const VecU32V b) { return m128_I2F(_mm_or_si128( _mm_andnot_si128(m128_F2I(c), m128_F2I(b)), _mm_and_si128(m128_F2I(c), m128_F2I(a)) )); } NV_FORCE_INLINE VecU32V V4U32or(VecU32V a, VecU32V b) { return m128_I2F(_mm_or_si128(m128_F2I(a), m128_F2I(b))); } NV_FORCE_INLINE VecU32V V4U32and(VecU32V a, VecU32V b) { return m128_I2F(_mm_and_si128(m128_F2I(a), m128_F2I(b))); } NV_FORCE_INLINE VecU32V V4U32Andc(VecU32V a, VecU32V b) { return m128_I2F(_mm_andnot_si128(m128_F2I(b), m128_F2I(a))); } /* NV_FORCE_INLINE VecU16V V4U16Or(VecU16V a, VecU16V b) { return m128_I2F(_mm_or_si128(m128_F2I(a), m128_F2I(b))); } */ /* NV_FORCE_INLINE VecU16V V4U16And(VecU16V a, VecU16V b) { return m128_I2F(_mm_and_si128(m128_F2I(a), m128_F2I(b))); } */ /* NV_FORCE_INLINE VecU16V V4U16Andc(VecU16V a, VecU16V b) { return m128_I2F(_mm_andnot_si128(m128_F2I(b), m128_F2I(a))); } */ NV_FORCE_INLINE VecI32V I4Load(const int32_t i) { return (_mm_load1_ps((float*)&i)); } NV_FORCE_INLINE VecI32V I4LoadU(const int32_t* i) { return _mm_loadu_ps((float*)i); } NV_FORCE_INLINE VecI32V I4LoadA(const int32_t* i) { return _mm_load_ps((float*)i); } NV_FORCE_INLINE VecI32V VecI32V_Add(const VecI32VArg a, const VecI32VArg b) { return m128_I2F(_mm_add_epi32(m128_F2I(a), m128_F2I(b))); } NV_FORCE_INLINE VecI32V VecI32V_Sub(const VecI32VArg a, const VecI32VArg b) { return m128_I2F(_mm_sub_epi32(m128_F2I(a), m128_F2I(b))); } NV_FORCE_INLINE BoolV VecI32V_IsGrtr(const VecI32VArg a, const VecI32VArg b) { return m128_I2F(_mm_cmpgt_epi32(m128_F2I(a), m128_F2I(b))); } NV_FORCE_INLINE BoolV VecI32V_IsEq(const VecI32VArg a, const VecI32VArg b) { return m128_I2F(_mm_cmpeq_epi32(m128_F2I(a), m128_F2I(b))); } NV_FORCE_INLINE VecI32V V4I32Sel(const BoolV c, const VecI32V a, const VecI32V b) { return V4U32Sel(c, a, b); } NV_FORCE_INLINE VecI32V VecI32V_Zero() { return V4Zero(); } NV_FORCE_INLINE VecI32V VecI32V_One() { return I4Load(1); } NV_FORCE_INLINE VecI32V VecI32V_Two() { return I4Load(2); } NV_FORCE_INLINE VecI32V VecI32V_MinusOne() { return I4Load(-1); } NV_FORCE_INLINE VecU32V U4Zero() { return U4Load(0); } NV_FORCE_INLINE VecU32V U4One() { return U4Load(1); } NV_FORCE_INLINE VecU32V U4Two() { return U4Load(2); } NV_FORCE_INLINE VecI32V VecI32V_Sel(const BoolV c, const VecI32VArg a, const VecI32VArg b) { VECMATHAOS_ASSERT(_VecMathTests::allElementsEqualBoolV(c,BTTTT()) || _VecMathTests::allElementsEqualBoolV(c,BFFFF())); return _mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a)); } NV_FORCE_INLINE VecShiftV VecI32V_PrepareShift(const VecI32VArg shift) { VecShiftV s; s.shift = VecI32V_Sel(BTFFF(), shift, VecI32V_Zero()); return s; } NV_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const VecShiftVArg count) { return m128_I2F(_mm_sll_epi32(m128_F2I(a), m128_F2I(count.shift))); } NV_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const VecShiftVArg count) { return m128_I2F(_mm_srl_epi32(m128_F2I(a), m128_F2I(count.shift))); } NV_FORCE_INLINE VecI32V VecI32V_And(const VecI32VArg a, const VecI32VArg b) { return _mm_and_ps(a, b); } NV_FORCE_INLINE VecI32V VecI32V_Or(const VecI32VArg a, const VecI32VArg b) { return _mm_or_ps(a, b); } NV_FORCE_INLINE VecI32V VecI32V_GetX(const VecI32VArg a) { return _mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)); } NV_FORCE_INLINE VecI32V VecI32V_GetY(const VecI32VArg a) { return _mm_shuffle_ps(a, a, _MM_SHUFFLE(1,1,1,1)); } NV_FORCE_INLINE VecI32V VecI32V_GetZ(const VecI32VArg a) { return _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2)); } NV_FORCE_INLINE VecI32V VecI32V_GetW(const VecI32VArg a) { return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3,3,3,3)); } NV_FORCE_INLINE void NvI32_From_VecI32V(const VecI32VArg a, int32_t* i) { _mm_store_ss((float*)i,a); } NV_FORCE_INLINE VecI32V VecI32V_Merge(const VecI32VArg a, const VecI32VArg b, const VecI32VArg c, const VecI32VArg d) { return V4Merge(a, b, c, d); } NV_FORCE_INLINE VecI32V VecI32V_From_BoolV(const BoolVArg a) { return a; } NV_FORCE_INLINE VecU32V VecU32V_From_BoolV(const BoolVArg a) { return a; } /* template<int a> NV_FORCE_INLINE VecI32V V4ISplat() { VecI32V result; result.m128_i32[0] = a; result.m128_i32[1] = a; result.m128_i32[2] = a; result.m128_i32[3] = a; return result; } template<uint32_t a> NV_FORCE_INLINE VecU32V V4USplat() { VecU32V result; result.m128_u32[0] = a; result.m128_u32[1] = a; result.m128_u32[2] = a; result.m128_u32[3] = a; return result; } */ /* NV_FORCE_INLINE void V4U16StoreAligned(VecU16V val, VecU16V* address) { *address = val; } */ NV_FORCE_INLINE void V4U32StoreAligned(VecU32V val, VecU32V* address) { *address = val; } NV_FORCE_INLINE Vec4V V4LoadAligned(Vec4V* addr) { return *addr; } NV_FORCE_INLINE Vec4V V4LoadUnaligned(Vec4V* addr) { return V4LoadU((float*)addr); } NV_FORCE_INLINE Vec4V V4Andc(const Vec4V a, const VecU32V b) { VecU32V result32(a); result32 = V4U32Andc(result32, b); return Vec4V(result32); } NV_FORCE_INLINE VecU32V V4IsGrtrV32u(const Vec4V a, const Vec4V b) { return V4IsGrtr(a, b); } NV_FORCE_INLINE VecU16V V4U16LoadAligned(VecU16V* addr) { return *addr; } NV_FORCE_INLINE VecU16V V4U16LoadUnaligned(VecU16V* addr) { return *addr; } NV_FORCE_INLINE VecU16V V4U16CompareGt(VecU16V a, VecU16V b) { // _mm_cmpgt_epi16 doesn't work for unsigned values unfortunately // return m128_I2F(_mm_cmpgt_epi16(m128_F2I(a), m128_F2I(b))); VecU16V result; result.m128_u16[0] = (a).m128_u16[0]>(b).m128_u16[0]; result.m128_u16[1] = (a).m128_u16[1]>(b).m128_u16[1]; result.m128_u16[2] = (a).m128_u16[2]>(b).m128_u16[2]; result.m128_u16[3] = (a).m128_u16[3]>(b).m128_u16[3]; result.m128_u16[4] = (a).m128_u16[4]>(b).m128_u16[4]; result.m128_u16[5] = (a).m128_u16[5]>(b).m128_u16[5]; result.m128_u16[6] = (a).m128_u16[6]>(b).m128_u16[6]; result.m128_u16[7] = (a).m128_u16[7]>(b).m128_u16[7]; return result; } NV_FORCE_INLINE VecU16V V4I16CompareGt(VecU16V a, VecU16V b) { return m128_I2F(_mm_cmpgt_epi16(m128_F2I(a), m128_F2I(b))); } NV_FORCE_INLINE Vec4V Vec4V_From_VecU32V(VecU32V a) { Vec4V result = V4LoadXYZW(float(a.m128_u32[0]), float(a.m128_u32[1]), float(a.m128_u32[2]), float(a.m128_u32[3])); return result; } NV_FORCE_INLINE Vec4V Vec4V_From_VecI32V(VecI32V in) { return _mm_cvtepi32_ps(m128_F2I(in)); } NV_FORCE_INLINE VecI32V VecI32V_From_Vec4V(Vec4V a) { return _mm_cvttps_epi32(a); } NV_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecU32V(VecU32V a) { return Vec4V(a); } NV_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecI32V(VecI32V a) { return Vec4V(a); } NV_FORCE_INLINE VecU32V VecU32V_ReinterpretFrom_Vec4V(Vec4V a) { return VecU32V(a); } NV_FORCE_INLINE VecI32V VecI32V_ReinterpretFrom_Vec4V(Vec4V a) { return VecI32V(a); } /* template<int index> NV_FORCE_INLINE BoolV BSplatElement(BoolV a) { BoolV result; result[0] = result[1] = result[2] = result[3] = a[index]; return result; } */ template<int index> BoolV BSplatElement(BoolV a) { float* data = (float*)&a; return V4Load(data[index]); } template<int index> NV_FORCE_INLINE VecU32V V4U32SplatElement(VecU32V a) { VecU32V result; result.m128_u32[0] = result.m128_u32[1] = result.m128_u32[2] = result.m128_u32[3] = a.m128_u32[index]; return result; } template<int index> NV_FORCE_INLINE Vec4V V4SplatElement(Vec4V a) { float* data = (float*)&a; return V4Load(data[index]); } template<int index> NV_FORCE_INLINE VecU16V V4U16SplatElement(VecU16V a) { VecU16V result; for (int i = 0; i < 8; i ++) result.m128_u16[i] = a.m128_u16[index]; return result; } template<int imm> NV_FORCE_INLINE VecI16V V4I16SplatImmediate() { VecI16V result; result.m128_i16[0] = imm; result.m128_i16[1] = imm; result.m128_i16[2] = imm; result.m128_i16[3] = imm; result.m128_i16[4] = imm; result.m128_i16[5] = imm; result.m128_i16[6] = imm; result.m128_i16[7] = imm; return result; } template<uint16_t imm> NV_FORCE_INLINE VecU16V V4U16SplatImmediate() { VecU16V result; result.m128_u16[0] = imm; result.m128_u16[1] = imm; result.m128_u16[2] = imm; result.m128_u16[3] = imm; result.m128_u16[4] = imm; result.m128_u16[5] = imm; result.m128_u16[6] = imm; result.m128_u16[7] = imm; return result; } NV_FORCE_INLINE VecU16V V4U16SubtractModulo(VecU16V a, VecU16V b) { return m128_I2F(_mm_sub_epi16(m128_F2I(a), m128_F2I(b))); } NV_FORCE_INLINE VecU16V V4U16AddModulo(VecU16V a, VecU16V b) { return m128_I2F(_mm_add_epi16(m128_F2I(a), m128_F2I(b))); } NV_FORCE_INLINE VecU32V V4U16GetLo16(VecU16V a) { VecU32V result; result.m128_u32[0] = a.m128_u16[0]; result.m128_u32[1] = a.m128_u16[2]; result.m128_u32[2] = a.m128_u16[4]; result.m128_u32[3] = a.m128_u16[6]; return result; } NV_FORCE_INLINE VecU32V V4U16GetHi16(VecU16V a) { VecU32V result; result.m128_u32[0] = a.m128_u16[1]; result.m128_u32[1] = a.m128_u16[3]; result.m128_u32[2] = a.m128_u16[5]; result.m128_u32[3] = a.m128_u16[7]; return result; } NV_FORCE_INLINE VecU32V VecU32VLoadXYZW(uint32_t x, uint32_t y, uint32_t z, uint32_t w) { VecU32V result; result.m128_u32[0] = x; result.m128_u32[1] = y; result.m128_u32[2] = z; result.m128_u32[3] = w; return result; } NV_FORCE_INLINE Vec4V V4Ceil(const Vec4V in) { UnionM128 a(in); return V4LoadXYZW(NvCeil(a.m128_f32[0]), NvCeil(a.m128_f32[1]), NvCeil(a.m128_f32[2]), NvCeil(a.m128_f32[3])); } NV_FORCE_INLINE Vec4V V4Floor(const Vec4V in) { UnionM128 a(in); return V4LoadXYZW(NvFloor(a.m128_f32[0]), NvFloor(a.m128_f32[1]), NvFloor(a.m128_f32[2]), NvFloor(a.m128_f32[3])); } NV_FORCE_INLINE VecU32V V4ConvertToU32VSaturate(const Vec4V in, uint32_t power) { NV_ASSERT(power == 0 && "Non-zero power not supported in convertToU32VSaturate"); NV_UNUSED(power); // prevent warning in release builds float ffffFFFFasFloat = float(0xFFFF0000); UnionM128 a(in); VecU32V result; result.m128_u32[0] = uint32_t(NvClamp<float>((a).m128_f32[0], 0.0f, ffffFFFFasFloat)); result.m128_u32[1] = uint32_t(NvClamp<float>((a).m128_f32[1], 0.0f, ffffFFFFasFloat)); result.m128_u32[2] = uint32_t(NvClamp<float>((a).m128_f32[2], 0.0f, ffffFFFFasFloat)); result.m128_u32[3] = uint32_t(NvClamp<float>((a).m128_f32[3], 0.0f, ffffFFFFasFloat)); return result; } #endif //PS_UNIX_SSE2_INLINE_AOS_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/platform/unix/sse2/NsUnixSse2AoS.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef PS_UNIX_SSE2_AOS_H #define PS_UNIX_SSE2_AOS_H // no includes here! this file should be included from NvcVecMath.h only!!! #if !COMPILE_VECTOR_INTRINSICS #error Vector intrinsics should not be included when using scalar implementation. #endif typedef union UnionM128 { UnionM128(){} UnionM128(__m128 in) { m128 = in; } UnionM128(__m128i in) { m128i = in; } operator __m128() { return m128; } operator const __m128() const { return m128; } float m128_f32[4]; __int8_t m128_i8[16]; __int16_t m128_i16[8]; __int32_t m128_i32[4]; __int64_t m128_i64[2]; __uint16_t m128_u16[8]; __uint32_t m128_u32[4]; __uint64_t m128_u64[2]; __m128 m128; __m128i m128i; } UnionM128; typedef __m128 FloatV; typedef __m128 Vec3V; typedef __m128 Vec4V; typedef __m128 BoolV; typedef __m128 QuatV; //typedef __m128 VecU32V; //typedef __m128 VecI32V; //typedef __m128 VecU16V; //typedef __m128 VecI16V; //typedef __m128 VecU8V; typedef UnionM128 VecU32V; typedef UnionM128 VecI32V; typedef UnionM128 VecU16V; typedef UnionM128 VecI16V; typedef UnionM128 VecU8V; #define FloatVArg FloatV& #define Vec3VArg Vec3V& #define Vec4VArg Vec4V& #define BoolVArg BoolV& #define VecU32VArg VecU32V& #define VecI32VArg VecI32V& #define VecU16VArg VecU16V& #define VecI16VArg VecI16V& #define VecU8VArg VecU8V& #define QuatVArg QuatV& //Optimization for situations in which you cross product multiple vectors with the same vector. //Avoids 2X shuffles per product struct VecCrossV { Vec3V mL1; Vec3V mR1; }; struct VecShiftV { VecI32V shift; }; #define VecShiftVArg VecShiftV& NV_ALIGN_PREFIX(16) struct Mat33V { Mat33V(){} Mat33V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2) : col0(c0), col1(c1), col2(c2) { } Vec3V NV_ALIGN(16,col0); Vec3V NV_ALIGN(16,col1); Vec3V NV_ALIGN(16,col2); }NV_ALIGN_SUFFIX(16); NV_ALIGN_PREFIX(16) struct Mat34V { Mat34V(){} Mat34V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2, const Vec3V& c3) : col0(c0), col1(c1), col2(c2), col3(c3) { } Vec3V NV_ALIGN(16,col0); Vec3V NV_ALIGN(16,col1); Vec3V NV_ALIGN(16,col2); Vec3V NV_ALIGN(16,col3); }NV_ALIGN_SUFFIX(16); NV_ALIGN_PREFIX(16) struct Mat43V { Mat43V(){} Mat43V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2) : col0(c0), col1(c1), col2(c2) { } Vec4V NV_ALIGN(16,col0); Vec4V NV_ALIGN(16,col1); Vec4V NV_ALIGN(16,col2); }NV_ALIGN_SUFFIX(16); NV_ALIGN_PREFIX(16) struct Mat44V { Mat44V(){} Mat44V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2, const Vec4V& c3) : col0(c0), col1(c1), col2(c2), col3(c3) { } Vec4V NV_ALIGN(16,col0); Vec4V NV_ALIGN(16,col1); Vec4V NV_ALIGN(16,col2); Vec4V NV_ALIGN(16,col3); }NV_ALIGN_SUFFIX(16); #endif //PS_UNIX_SSE2_AOS_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/platform/unix/neon/NsUnixNeonAoS.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef PS_UNIX_NEON_AOS_H #define PS_UNIX_NEON_AOS_H // no includes here! this file should be included from NvcVecMath.h only!!! #if !COMPILE_VECTOR_INTRINSICS #error Vector intrinsics should not be included when using scalar implementation. #endif // only ARM NEON compatible platforms should reach this #include <arm_neon.h> typedef float32x2_t FloatV; typedef float32x4_t Vec3V; typedef float32x4_t Vec4V; typedef uint32x4_t BoolV; typedef float32x4_t QuatV; typedef uint32x4_t VecU32V; typedef int32x4_t VecI32V; typedef uint16x8_t VecU16V; typedef int16x8_t VecI16V; typedef uint8x16_t VecU8V; #define FloatVArg FloatV& #define Vec3VArg Vec3V& #define Vec4VArg Vec4V& #define BoolVArg BoolV& #define VecU32VArg VecU32V& #define VecI32VArg VecI32V& #define VecU16VArg VecU16V& #define VecI16VArg VecI16V& #define VecU8VArg VecU8V& #define QuatVArg QuatV& //KS - TODO - make an actual VecCrossV type for NEON #define VecCrossV Vec3V typedef VecI32V VecShiftV; #define VecShiftVArg VecShiftV& NV_ALIGN_PREFIX(16) struct Mat33V { Mat33V(){} Mat33V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2) : col0(c0), col1(c1), col2(c2) { } Vec3V NV_ALIGN(16,col0); Vec3V NV_ALIGN(16,col1); Vec3V NV_ALIGN(16,col2); }NV_ALIGN_SUFFIX(16); NV_ALIGN_PREFIX(16) struct Mat34V { Mat34V(){} Mat34V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2, const Vec3V& c3) : col0(c0), col1(c1), col2(c2), col3(c3) { } Vec3V NV_ALIGN(16,col0); Vec3V NV_ALIGN(16,col1); Vec3V NV_ALIGN(16,col2); Vec3V NV_ALIGN(16,col3); }NV_ALIGN_SUFFIX(16); NV_ALIGN_PREFIX(16) struct Mat43V { Mat43V(){} Mat43V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2) : col0(c0), col1(c1), col2(c2) { } Vec4V NV_ALIGN(16,col0); Vec4V NV_ALIGN(16,col1); Vec4V NV_ALIGN(16,col2); }NV_ALIGN_SUFFIX(16); NV_ALIGN_PREFIX(16) struct Mat44V { Mat44V(){} Mat44V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2, const Vec4V& c3) : col0(c0), col1(c1), col2(c2), col3(c3) { } Vec4V NV_ALIGN(16,col0); Vec4V NV_ALIGN(16,col1); Vec4V NV_ALIGN(16,col2); Vec4V NV_ALIGN(16,col3); }NV_ALIGN_SUFFIX(16); #endif //PS_UNIX_NEON_AOS_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/platform/unix/neon/NsUnixNeonInlineAoS.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef PS_UNIX_NEON_INLINE_AOS_H #define PS_UNIX_NEON_INLINE_AOS_H #if !COMPILE_VECTOR_INTRINSICS #error Vector intrinsics should not be included when using scalar implementation. #endif // improved estimates #define VRECIPEQ recipq_newton<1> #define VRECIPE recip_newton<1> #define VRECIPSQRTEQ rsqrtq_newton<1> #define VRECIPSQRTE rsqrt_newton<1> // "exact" #define VRECIPQ recipq_newton<4> #define VRECIP recip_newton<4> #define VRECIPSQRTQ rsqrtq_newton<4> #define VRECIPSQRT rsqrt_newton<4> #define VECMATH_AOS_EPSILON (1e-3f) //Remove this define when all platforms use simd solver. #define NV_SUPPORT_SIMD namespace internalUnitNeonSimd { NV_FORCE_INLINE uint32_t BAllTrue4_R(const BoolV a) { const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a)); const uint16x4_t dLow = vmovn_u32(a); uint16x8_t combined = vcombine_u16(dLow, dHigh); const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined)); return uint32_t(vget_lane_u32(finalReduce, 0) == 0xffffFFFF); } NV_FORCE_INLINE uint32_t BAnyTrue4_R(const BoolV a) { const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a)); const uint16x4_t dLow = vmovn_u32(a); uint16x8_t combined = vcombine_u16(dLow, dHigh); const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined)); return uint32_t(vget_lane_u32(finalReduce, 0) != 0x0); } NV_FORCE_INLINE uint32_t BAllTrue3_R(const BoolV a) { const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a)); const uint16x4_t dLow = vmovn_u32(a); uint16x8_t combined = vcombine_u16(dLow, dHigh); const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined)); return uint32_t((vget_lane_u32(finalReduce, 0) & 0xffFFff) == 0xffFFff); } NV_FORCE_INLINE uint32_t BAnyTrue3_R(const BoolV a) { const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a)); const uint16x4_t dLow = vmovn_u32(a); uint16x8_t combined = vcombine_u16(dLow, dHigh); const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined)); return uint32_t((vget_lane_u32(finalReduce, 0) & 0xffFFff) != 0); } } namespace _VecMathTests { NV_FORCE_INLINE bool allElementsEqualFloatV(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return vget_lane_u32(vceq_f32(a, b), 0) != 0; } NV_FORCE_INLINE bool allElementsEqualVec3V(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return V3AllEq(a, b) != 0; } NV_FORCE_INLINE bool allElementsEqualVec4V(const Vec4V a, const Vec4V b) { return V4AllEq(a, b) != 0; } NV_FORCE_INLINE bool allElementsEqualBoolV(const BoolV a, const BoolV b) { return internalUnitNeonSimd::BAllTrue4_R(vceqq_u32(a, b)) != 0; } NV_FORCE_INLINE uint32_t V4U32AllEq(const VecU32V a, const VecU32V b) { return internalUnitNeonSimd::BAllTrue4_R(V4IsEqU32(a, b)); } NV_FORCE_INLINE bool allElementsEqualVecU32V(const VecU32V a, const VecU32V b) { return V4U32AllEq(a, b) != 0; } NV_FORCE_INLINE BoolV V4IsEqI32(const VecI32V a, const VecI32V b) { return vceqq_s32(a, b); } NV_FORCE_INLINE uint32_t V4I32AllEq(const VecI32V a, const VecI32V b) { return internalUnitNeonSimd::BAllTrue4_R(V4IsEqI32(a, b)); } NV_FORCE_INLINE bool allElementsEqualVecI32V(const VecI32V a, const VecI32V b) { return V4I32AllEq(a, b) != 0; } NV_FORCE_INLINE bool allElementsNearEqualFloatV(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); const float32x2_t c = vsub_f32(a, b); const float32x2_t error = vdup_n_f32(VECMATH_AOS_EPSILON); // absolute compare abs(error) > abs(c) #if NV_WINRT const uint32x2_t greater = vacgt_f32(error, c); #else const uint32x2_t greater = vcagt_f32(error, c); #endif const uint32x2_t min = vpmin_u32(greater, greater); return vget_lane_u32(min, 0) != 0x0; } NV_FORCE_INLINE bool allElementsNearEqualVec3V(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); const float32x4_t c = vsubq_f32(a, b); const float32x4_t error = vdupq_n_f32(VECMATH_AOS_EPSILON); // absolute compare abs(error) > abs(c) #if NV_WINRT const uint32x4_t greater = vacgtq_f32(error, c); #else const uint32x4_t greater = vcagtq_f32(error, c); #endif return internalUnitNeonSimd::BAllTrue3_R(greater) != 0; } NV_FORCE_INLINE bool allElementsNearEqualVec4V(const Vec4V a, const Vec4V b) { const float32x4_t c = vsubq_f32(a, b); const float32x4_t error = vdupq_n_f32(VECMATH_AOS_EPSILON); // absolute compare abs(error) > abs(c) #if NV_WINRT const uint32x4_t greater = vacgtq_f32(error, c); #else const uint32x4_t greater = vcagtq_f32(error, c); #endif return internalUnitNeonSimd::BAllTrue4_R(greater) != 0x0; } } #if 0 // debugging printfs #include <stdio.h> NV_FORCE_INLINE void printVec(const float32x4_t& v, const char* name) { NV_ALIGN(16, float32_t) data[4]; vst1q_f32(data, v); printf("%s: (%f, %f, %f, %f)\n", name, data[0], data[1], data[2], data[3]); } NV_FORCE_INLINE void printVec(const float32x2_t& v, const char* name) { NV_ALIGN(16, float32_t) data[2]; vst1_f32(data, v); printf("%s: (%f, %f)\n", name, data[0], data[1]); } NV_FORCE_INLINE void printVec(const uint32x4_t& v, const char* name) { NV_ALIGN(16, uint32_t) data[4]; vst1q_u32(data, v); printf("%s: (0x%x, 0x%x, 0x%x, 0x%x)\n", name, data[0], data[1], data[2], data[3]); } NV_FORCE_INLINE void printVec(const uint16x8_t& v, const char* name) { NV_ALIGN(16, uint16_t) data[8]; vst1q_u16(data, v); printf("%s: (0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n", name, data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7]); } NV_FORCE_INLINE void printVec(const int32x4_t& v, const char* name) { NV_ALIGN(16, int32_t) data[4]; vst1q_s32(data, v); printf("%s: (0x%x, 0x%x, 0x%x, 0x%x)\n", name, data[0], data[1], data[2], data[3]); } NV_FORCE_INLINE void printVec(const int16x8_t& v, const char* name) { NV_ALIGN(16, int16_t) data[8]; vst1q_s16(data, v); printf("%s: (0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n", name, data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7]); } NV_FORCE_INLINE void printVec(const uint16x4_t& v, const char* name) { NV_ALIGN(16, uint16_t) data[4]; vst1_u16(data, v); printf("%s: (0x%x, 0x%x, 0x%x, 0x%x)\n", name, data[0], data[1], data[2], data[3]); } NV_FORCE_INLINE void printVec(const uint32x2_t& v, const char* name) { NV_ALIGN(16, uint32_t) data[2]; vst1_u32(data, v); printf("%s: (0x%x, 0x%x)\n", name, data[0], data[1]); } NV_FORCE_INLINE void printVar(const uint32_t v, const char* name) { printf("%s: 0x%x\n", name, v); } NV_FORCE_INLINE void printVar(const float v, const char* name) { printf("%s: %f\n", name, v); } #define PRINT_VAR(X) printVar((X), #X) #define PRINT_VEC(X) printVec((X), #X) #define PRINT_VEC_TITLE(TITLE, X) printVec((X), TITLE #X) #endif // debugging printf ///////////////////////////////////////////////////////////////////// ////FUNCTIONS USED ONLY FOR ASSERTS IN VECTORISED IMPLEMENTATIONS ///////////////////////////////////////////////////////////////////// NV_FORCE_INLINE bool isValidFloatV(const FloatV a) { NV_ALIGN(16,float) data[4]; vst1_f32((float32_t*)data, a); if(isFiniteFloatV(a)) return data[0] == data[1]; else { uint32_t* intData = (uint32_t*)data; return intData[0] == intData[1]; } } NV_FORCE_INLINE bool isValidVec3V(const Vec3V a) { const float32_t w = vgetq_lane_f32(a, 3); if(isFiniteVec3V(a)) return w == 0.0f; else { NV_ALIGN(16,float) data[4]; vst1q_f32((float32_t*)data, a); uint32_t* intData = (uint32_t*)data; return !intData[3] || ((intData[0] == intData[1]) && (intData[0] == intData[2]) && (intData[0] == intData[3])); } } NV_FORCE_INLINE bool isFiniteFloatV(const FloatV a) { NV_ALIGN(16,float) data[4]; vst1_f32((float32_t*)data, a); return NvIsFinite(data[0]) && NvIsFinite(data[1]); } NV_FORCE_INLINE bool isFiniteVec3V(const Vec3V a) { NV_ALIGN(16,float) data[4]; vst1q_f32((float32_t*)data, a); return NvIsFinite(data[0]) && NvIsFinite(data[1]) && NvIsFinite(data[2]); } NV_FORCE_INLINE bool isFiniteVec4V(const Vec4V a) { NV_ALIGN(16,float) data[4]; vst1q_f32((float32_t*)data, a); return NvIsFinite(data[0]) && NvIsFinite(data[1]) && NvIsFinite(data[2]) && NvIsFinite(data[3]); } NV_FORCE_INLINE bool hasZeroElementinFloatV(const FloatV a) { VECMATHAOS_ASSERT(isValidFloatV(a)); return vget_lane_u32(vreinterpret_u32_f32(a), 0) == 0; } NV_FORCE_INLINE bool hasZeroElementInVec3V(const Vec3V a) { const uint32x2_t dLow = vget_low_u32(vreinterpretq_u32_f32(a)); const uint32x2_t dMin = vpmin_u32(dLow, dLow); return vget_lane_u32(dMin, 0) == 0 || vgetq_lane_u32(vreinterpretq_u32_f32(a), 2) == 0; } NV_FORCE_INLINE bool hasZeroElementInVec4V(const Vec4V a) { const uint32x2_t dHigh = vget_high_u32(vreinterpretq_u32_f32(a)); const uint32x2_t dLow = vget_low_u32(vreinterpretq_u32_f32(a)); const uint32x2_t dMin = vmin_u32(dHigh, dLow); const uint32x2_t pairMin = vpmin_u32(dMin, dMin); return vget_lane_u32(pairMin, 0) == 0; } ///////////////////////////////////////////////////////////////////// ////VECTORISED FUNCTION IMPLEMENTATIONS ///////////////////////////////////////////////////////////////////// NV_FORCE_INLINE FloatV FLoad(const float f) { return vdup_n_f32(reinterpret_cast<const float32_t&>(f)); } NV_FORCE_INLINE FloatV FLoadA(const float* const f) { VECMATHAOS_ASSERT(0 == ((uint64_t)f & 0x0f)); return vld1_f32((const float32_t*)f); } NV_FORCE_INLINE Vec3V V3Load(const float f) { NV_ALIGN(16, float) data[4] = {f, f, f, 0.0f}; return V4LoadA(data); } NV_FORCE_INLINE Vec4V V4Load(const float f) { return vdupq_n_f32(reinterpret_cast<const float32_t&>(f)); } NV_FORCE_INLINE BoolV BLoad(const bool f) { const uint32_t i=uint32_t(-(int32_t)f); return vdupq_n_u32(i); } NV_FORCE_INLINE Vec3V V3LoadA(const NvVec3& f) { VECMATHAOS_ASSERT(0 == ((size_t)&f & 0x0f)); NV_ALIGN(16, float) data[4] = {f.x, f.y, f.z, 0.0f}; return V4LoadA(data); } NV_FORCE_INLINE Vec3V V3LoadU(const NvVec3& f) { NV_ALIGN(16, float) data[4] = {f.x, f.y, f.z, 0.0f}; return V4LoadA(data); } NV_FORCE_INLINE Vec3V V3LoadUnsafeA(const NvVec3& f) { NV_ALIGN(16, float) data[4] = {f.x, f.y, f.z, 0.0f}; return V4LoadA(data); } NV_FORCE_INLINE Vec3V V3LoadA(const float* f) { VECMATHAOS_ASSERT(0 == ((size_t)&f & 0x0f)); NV_ALIGN(16, float) data[4] = {f[0], f[1], f[2], 0.0f}; return V4LoadA(data); } NV_FORCE_INLINE Vec3V V3LoadU(const float* f) { NV_ALIGN(16, float) data[4] = {f[0], f[1], f[2], 0.0f}; return V4LoadA(data); } NV_FORCE_INLINE Vec3V Vec3V_From_Vec4V(Vec4V v) { return vsetq_lane_f32(0.0f, v, 3); } NV_FORCE_INLINE Vec3V Vec3V_From_Vec4V_WUndefined(Vec4V v) { return v; } NV_FORCE_INLINE Vec4V Vec4V_From_Vec3V(Vec3V f) { return f; //ok if it is implemented as the same type. } NV_FORCE_INLINE Vec4V Vec4V_From_FloatV(FloatV f) { return vcombine_f32(f, f); } NV_FORCE_INLINE Vec3V Vec3V_From_FloatV(FloatV f) { return Vec3V_From_Vec4V(Vec4V_From_FloatV(f)); } NV_FORCE_INLINE Vec3V Vec3V_From_FloatV_WUndefined(FloatV f) { return Vec3V_From_Vec4V_WUndefined(Vec4V_From_FloatV(f)); } NV_FORCE_INLINE Vec4V Vec4V_From_NvVec3_WUndefined(const NvVec3& f) { NV_ALIGN(16, float) data[4] = {f.x, f.y, f.z, 0.0f}; return V4LoadA(data); } NV_FORCE_INLINE Mat33V Mat33V_From_NvMat33(const NvMat33 &m) { return Mat33V(V3LoadU(m.column0), V3LoadU(m.column1), V3LoadU(m.column2)); } NV_FORCE_INLINE void NvMat33_From_Mat33V(const Mat33V &m, NvMat33 &out) { NV_ASSERT((size_t(&out)&15)==0); V3StoreU(m.col0, out.column0); V3StoreU(m.col1, out.column1); V3StoreU(m.col2, out.column2); } NV_FORCE_INLINE Vec4V V4LoadA(const float* const f) { VECMATHAOS_ASSERT(0 == ((uint64_t)f & 0x0f)); return vld1q_f32((const float32_t*)f); } NV_FORCE_INLINE void V4StoreA(Vec4V a, float* f) { VECMATHAOS_ASSERT(0 == ((uint64_t)f & 0x0f)); vst1q_f32((float32_t*)f,a); } NV_FORCE_INLINE void V4StoreU(const Vec4V a, float* f) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(0 == ((int)&a & 0x0F)); NV_ALIGN(16,float) f2[4]; vst1q_f32((float32_t*)f2, a); f[0] = f2[0]; f[1] = f2[1]; f[2] = f2[2]; f[3] = f2[3]; } NV_FORCE_INLINE void BStoreA(const BoolV a, uint32_t* u) { VECMATHAOS_ASSERT(0 == ((uint64_t)u & 0x0f)); vst1q_u32((uint32_t*)u,a); } NV_FORCE_INLINE void U4StoreA(const VecU32V uv, uint32_t* u) { VECMATHAOS_ASSERT(0 == ((uint64_t)u & 0x0f)); vst1q_u32((uint32_t*)u,uv); } NV_FORCE_INLINE void I4StoreA(const VecI32V iv, int32_t* i) { VECMATHAOS_ASSERT(0 == ((uint64_t)i & 0x0f)); vst1q_s32((int32_t*)i,iv); } NV_FORCE_INLINE Vec4V V4LoadU(const float* const f) { return vld1q_f32((const float32_t*)f); } NV_FORCE_INLINE BoolV BLoad(const bool* const f) { const NV_ALIGN(16, uint32_t) b[4]={(uint32_t)(-(int32_t)f[0]), (uint32_t)(-(int32_t)f[1]), (uint32_t)(-(int32_t)f[2]), (uint32_t)(-(int32_t)f[3])}; return vld1q_u32(b); } NV_FORCE_INLINE float FStore(const FloatV a) { VECMATHAOS_ASSERT(isValidFloatV(a)); float f = vget_lane_f32(a, 0); return f; } NV_FORCE_INLINE void FStore(const FloatV a, float* NV_RESTRICT f) { VECMATHAOS_ASSERT(isValidFloatV(a)); //vst1q_lane_f32(f, a, 0); // causes vst1 alignment bug *f = vget_lane_f32(a, 0); } NV_FORCE_INLINE void V3StoreA(const Vec3V a, NvVec3& f) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(0 == ((int)&a & 0x0F)); VECMATHAOS_ASSERT(0 == ((int)&f & 0x0F)); NV_ALIGN(16,float) f2[4]; vst1q_f32((float32_t*)f2, a); f = NvVec3(f2[0], f2[1], f2[2]); } NV_FORCE_INLINE void V3StoreU(const Vec3V a, NvVec3& f) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(0 == ((int)&a & 0x0F)); NV_ALIGN(16,float) f2[4]; vst1q_f32((float32_t*)f2, a); f = NvVec3(f2[0], f2[1], f2[2]); } ////////////////////////////////// //FLOATV ////////////////////////////////// NV_FORCE_INLINE FloatV FZero() { return FLoad(0.0f); } NV_FORCE_INLINE FloatV FOne() { return FLoad(1.0f); } NV_FORCE_INLINE FloatV FHalf() { return FLoad(0.5f); } NV_FORCE_INLINE FloatV FEps() { return FLoad(NV_EPS_REAL); } NV_FORCE_INLINE FloatV FEps6() { return FLoad(1e-6f); } NV_FORCE_INLINE FloatV FMax() { return FLoad(NV_MAX_REAL); } NV_FORCE_INLINE FloatV FNegMax() { return FLoad(-NV_MAX_REAL); } NV_FORCE_INLINE FloatV IZero() { return vreinterpret_f32_u32(vdup_n_u32(0)); } NV_FORCE_INLINE FloatV IOne() { return vreinterpret_f32_u32(vdup_n_u32(1)); } NV_FORCE_INLINE FloatV ITwo() { return vreinterpret_f32_u32(vdup_n_u32(2)); } NV_FORCE_INLINE FloatV IThree() { return vreinterpret_f32_u32(vdup_n_u32(3)); } NV_FORCE_INLINE FloatV IFour() { return vreinterpret_f32_u32(vdup_n_u32(4)); } NV_FORCE_INLINE FloatV FNeg(const FloatV f) { VECMATHAOS_ASSERT(isValidFloatV(f)); return vneg_f32(f); } NV_FORCE_INLINE FloatV FAdd(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return vadd_f32(a, b); } NV_FORCE_INLINE FloatV FSub(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return vsub_f32(a, b); } NV_FORCE_INLINE FloatV FMul(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return vmul_f32(a, b); } template <int n> NV_FORCE_INLINE float32x2_t recip_newton(const float32x2_t& in) { float32x2_t recip = vrecpe_f32(in); for(int i=0; i<n; ++i) recip = vmul_f32(recip, vrecps_f32(in, recip)); return recip; } template <int n> NV_FORCE_INLINE float32x4_t recipq_newton(const float32x4_t& in) { float32x4_t recip = vrecpeq_f32(in); for(int i=0; i<n; ++i) recip = vmulq_f32(recip, vrecpsq_f32(recip, in)); return recip; } template <int n> NV_FORCE_INLINE float32x2_t rsqrt_newton(const float32x2_t& in) { float32x2_t rsqrt = vrsqrte_f32(in); for(int i=0; i<n; ++i) rsqrt = vmul_f32(rsqrt, vrsqrts_f32(vmul_f32(rsqrt, rsqrt), in)); return rsqrt; } template <int n> NV_FORCE_INLINE float32x4_t rsqrtq_newton(const float32x4_t& in) { float32x4_t rsqrt = vrsqrteq_f32(in); for(int i=0; i<n; ++i) rsqrt = vmulq_f32(rsqrt, vrsqrtsq_f32(vmulq_f32(rsqrt, rsqrt), in)); return rsqrt; } NV_FORCE_INLINE FloatV FDiv(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return vmul_f32(a, VRECIP(b)); } NV_FORCE_INLINE FloatV FDivFast(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return vmul_f32(a, VRECIPE(b)); } NV_FORCE_INLINE FloatV FRecip(const FloatV a) { return VRECIP(a); } NV_FORCE_INLINE FloatV FRecipFast(const FloatV a) { return VRECIPE(a); } NV_FORCE_INLINE FloatV FRsqrt(const FloatV a) { return VRECIPSQRT(a); } NV_FORCE_INLINE FloatV FSqrt(const FloatV a) { return vmul_f32(a, VRECIPSQRT(a)); } NV_FORCE_INLINE FloatV FRsqrtFast(const FloatV a) { return VRECIPSQRTE(a); } NV_FORCE_INLINE FloatV FScaleAdd(const FloatV a, const FloatV b, const FloatV c) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); VECMATHAOS_ASSERT(isValidFloatV(c)); return vmla_f32(c, a, b); } NV_FORCE_INLINE FloatV FNegScaleSub(const FloatV a, const FloatV b, const FloatV c) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); VECMATHAOS_ASSERT(isValidFloatV(c)); return vmls_f32(c, a, b); } NV_FORCE_INLINE FloatV FAbs(const FloatV a) { VECMATHAOS_ASSERT(isValidFloatV(a)); return vabs_f32(a); } NV_FORCE_INLINE FloatV FSel(const BoolV c, const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(_VecMathTests::allElementsEqualBoolV(c,BTTTT()) || _VecMathTests::allElementsEqualBoolV(c,BFFFF())); VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return vbsl_f32(vget_low_u32(c), a, b); } NV_FORCE_INLINE BoolV FIsGrtr(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return vdupq_lane_u32(vcgt_f32(a, b), 0); } NV_FORCE_INLINE BoolV FIsGrtrOrEq(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return vdupq_lane_u32(vcge_f32(a, b), 0); } NV_FORCE_INLINE BoolV FIsEq(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return vdupq_lane_u32(vceq_f32(a, b), 0); } NV_FORCE_INLINE FloatV FMax(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return vmax_f32(a, b); } NV_FORCE_INLINE FloatV FMin(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return vmin_f32(a, b); } NV_FORCE_INLINE FloatV FClamp(const FloatV a, const FloatV minV, const FloatV maxV) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(minV)); VECMATHAOS_ASSERT(isValidFloatV(maxV)); return FMax(FMin(a,maxV),minV); } NV_FORCE_INLINE uint32_t FAllGrtr(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return vget_lane_u32(vcgt_f32(a, b), 0); } NV_FORCE_INLINE uint32_t FAllGrtrOrEq(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return vget_lane_u32(vcge_f32(a, b), 0); } NV_FORCE_INLINE uint32_t FAllEq(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return vget_lane_u32(vceq_f32(a, b), 0); } NV_FORCE_INLINE FloatV FRound(const FloatV a) { //truncate(a + (0.5f - sign(a))) const float32x2_t half = vdup_n_f32(0.5f); const float32x2_t sign = vcvt_f32_u32((vshr_n_u32(vreinterpret_u32_f32(a), 31))); const float32x2_t aPlusHalf = vadd_f32(a, half); const float32x2_t aRound = vsub_f32(aPlusHalf, sign); int32x2_t tmp = vcvt_s32_f32(aRound); return vcvt_f32_s32(tmp); } NV_FORCE_INLINE FloatV FSin(const FloatV a) { //Vec4V V1, V2, V3, V5, V7, V9, V11, V13, V15, V17, V19, V21, V23; //Vec4V S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11; FloatV Result; // Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI const FloatV recipTwoPi = FLoadA(g_NVReciprocalTwoPi.f); const FloatV twoPi = FLoadA(g_NVTwoPi.f); const FloatV tmp = FMul(a, recipTwoPi); const FloatV b = FRound(tmp); const FloatV V1 = FNegMulSub(twoPi, b, a); // sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! - // V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI) const FloatV V2 = FMul(V1, V1); const FloatV V3 = FMul(V2, V1); const FloatV V5 = FMul(V3, V2); const FloatV V7 = FMul(V5, V2); const FloatV V9 = FMul(V7, V2); const FloatV V11 = FMul(V9, V2); const FloatV V13 = FMul(V11, V2); const FloatV V15 = FMul(V13, V2); const FloatV V17 = FMul(V15, V2); const FloatV V19 = FMul(V17, V2); const FloatV V21 = FMul(V19, V2); const FloatV V23 = FMul(V21, V2); const Vec4V sinCoefficients0 = V4LoadA(g_NVSinCoefficients0.f); const Vec4V sinCoefficients1 = V4LoadA(g_NVSinCoefficients1.f); const Vec4V sinCoefficients2 = V4LoadA(g_NVSinCoefficients2.f); const FloatV S1 = V4GetY(sinCoefficients0); const FloatV S2 = V4GetZ(sinCoefficients0); const FloatV S3 = V4GetW(sinCoefficients0); const FloatV S4 = V4GetX(sinCoefficients1); const FloatV S5 = V4GetY(sinCoefficients1); const FloatV S6 = V4GetZ(sinCoefficients1); const FloatV S7 = V4GetW(sinCoefficients1); const FloatV S8 = V4GetX(sinCoefficients2); const FloatV S9 = V4GetY(sinCoefficients2); const FloatV S10 = V4GetZ(sinCoefficients2); const FloatV S11 = V4GetW(sinCoefficients2); Result = FMulAdd(S1, V3, V1); Result = FMulAdd(S2, V5, Result); Result = FMulAdd(S3, V7, Result); Result = FMulAdd(S4, V9, Result); Result = FMulAdd(S5, V11, Result); Result = FMulAdd(S6, V13, Result); Result = FMulAdd(S7, V15, Result); Result = FMulAdd(S8, V17, Result); Result = FMulAdd(S9, V19, Result); Result = FMulAdd(S10, V21, Result); Result = FMulAdd(S11, V23, Result); return Result; } NV_FORCE_INLINE FloatV FCos(const FloatV a) { //XMVECTOR V1, V2, V4, V6, V8, V10, V12, V14, V16, V18, V20, V22; //XMVECTOR C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11; FloatV Result; // Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI const FloatV recipTwoPi = FLoadA(g_NVReciprocalTwoPi.f); const FloatV twoPi = FLoadA(g_NVTwoPi.f); const FloatV tmp = FMul(a, recipTwoPi); const FloatV b = FRound(tmp); const FloatV V1 = FNegMulSub(twoPi, b, a); // cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! - // V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI) const FloatV V2 = FMul(V1, V1); const FloatV V4 = FMul(V2, V2); const FloatV V6 = FMul(V4, V2); const FloatV V8 = FMul(V4, V4); const FloatV V10 = FMul(V6, V4); const FloatV V12 = FMul(V6, V6); const FloatV V14 = FMul(V8, V6); const FloatV V16 = FMul(V8, V8); const FloatV V18 = FMul(V10, V8); const FloatV V20 = FMul(V10, V10); const FloatV V22 = FMul(V12, V10); const Vec4V cosCoefficients0 = V4LoadA(g_NVCosCoefficients0.f); const Vec4V cosCoefficients1 = V4LoadA(g_NVCosCoefficients1.f); const Vec4V cosCoefficients2 = V4LoadA(g_NVCosCoefficients2.f); const FloatV C1 = V4GetY(cosCoefficients0); const FloatV C2 = V4GetZ(cosCoefficients0); const FloatV C3 = V4GetW(cosCoefficients0); const FloatV C4 = V4GetX(cosCoefficients1); const FloatV C5 = V4GetY(cosCoefficients1); const FloatV C6 = V4GetZ(cosCoefficients1); const FloatV C7 = V4GetW(cosCoefficients1); const FloatV C8 = V4GetX(cosCoefficients2); const FloatV C9 = V4GetY(cosCoefficients2); const FloatV C10 = V4GetZ(cosCoefficients2); const FloatV C11 = V4GetW(cosCoefficients2); Result = FMulAdd(C1, V2, FOne()); Result = FMulAdd(C2, V4, Result); Result = FMulAdd(C3, V6, Result); Result = FMulAdd(C4, V8, Result); Result = FMulAdd(C5, V10, Result); Result = FMulAdd(C6, V12, Result); Result = FMulAdd(C7, V14, Result); Result = FMulAdd(C8, V16, Result); Result = FMulAdd(C9, V18, Result); Result = FMulAdd(C10, V20, Result); Result = FMulAdd(C11, V22, Result); return Result; } NV_FORCE_INLINE uint32_t FOutOfBounds(const FloatV a, const FloatV min, const FloatV max) { const BoolV ffff = BFFFF(); const BoolV c = BOr(FIsGrtr(a, max), FIsGrtr(min, a)); return uint32_t(!BAllEq(c, ffff)); } NV_FORCE_INLINE uint32_t FInBounds(const FloatV a, const FloatV min, const FloatV max) { const BoolV tttt = BTTTT(); const BoolV c = BAnd(FIsGrtrOrEq(a, min), FIsGrtrOrEq(max, a)); return uint32_t(BAllEq(c, tttt)); } NV_FORCE_INLINE uint32_t FOutOfBounds(const FloatV a, const FloatV bounds) { #if NV_WINRT const uint32x2_t greater = vacgt_f32(a, bounds); #else const uint32x2_t greater = vcagt_f32(a, bounds); #endif return vget_lane_u32(greater, 0); } NV_FORCE_INLINE uint32_t FInBounds(const FloatV a, const FloatV bounds) { #if NV_WINRT const uint32x2_t geq = vacge_f32(bounds, a); #else const uint32x2_t geq = vcage_f32(bounds, a); #endif return vget_lane_u32(geq, 0); } ////////////////////////////////// //VEC3V ////////////////////////////////// NV_FORCE_INLINE Vec3V V3Splat(const FloatV f) { VECMATHAOS_ASSERT(isValidFloatV(f)); #if NV_WINRT const uint32x2_t mask = { 0x00000000ffffFFFFULL }; #else const uint32x2_t mask = {0xffffFFFF, 0x0}; #endif const uint32x2_t uHigh = vreinterpret_u32_f32(f); const float32x2_t dHigh = vreinterpret_f32_u32(vand_u32(uHigh, mask)); return vcombine_f32(f, dHigh); } NV_FORCE_INLINE Vec3V V3Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z) { VECMATHAOS_ASSERT(isValidFloatV(x)); VECMATHAOS_ASSERT(isValidFloatV(y)); VECMATHAOS_ASSERT(isValidFloatV(z)); #if NV_WINRT const uint32x2_t mask = { 0x00000000ffffFFFFULL }; #else const uint32x2_t mask = {0xffffFFFF, 0x0}; #endif const uint32x2_t dHigh = vand_u32(vreinterpret_u32_f32(z), mask); const uint32x2_t dLow = vext_u32(vreinterpret_u32_f32(x), vreinterpret_u32_f32(y), 1); return vreinterpretq_f32_u32(vcombine_u32(dLow, dHigh)); } NV_FORCE_INLINE Vec3V V3UnitX() { #if NV_WINRT const float32x4_t x = { 0x000000003f800000ULL, 0x0ULL}; #else const float32x4_t x = { 1.0f, 0.0f, 0.0f, 0.0f}; #endif // NV_WINRT return x; } NV_FORCE_INLINE Vec3V V3UnitY() { #if NV_WINRT const float32x4_t y = { 0x3f80000000000000ULL, 0x0ULL}; #else const float32x4_t y = {0, 1.0f, 0, 0}; #endif return y; } NV_FORCE_INLINE Vec3V V3UnitZ() { #if NV_WINRT const float32x4_t z = { 0x0ULL, 0x000000003f800000ULL }; #else const float32x4_t z = {0, 0, 1.0f, 0}; #endif return z; } NV_FORCE_INLINE FloatV V3GetX(const Vec3V f) { const float32x2_t fLow = vget_low_f32(f); return vdup_lane_f32(fLow, 0); } NV_FORCE_INLINE FloatV V3GetY(const Vec3V f) { const float32x2_t fLow = vget_low_f32(f); return vdup_lane_f32(fLow, 1); } NV_FORCE_INLINE FloatV V3GetZ(const Vec3V f) { const float32x2_t fhigh = vget_high_f32(f); return vdup_lane_f32(fhigh, 0); } NV_FORCE_INLINE Vec3V V3SetX(const Vec3V v, const FloatV f) { VECMATHAOS_ASSERT(isValidVec3V(v)); VECMATHAOS_ASSERT(isValidFloatV(f)); return V3Sel(BFTTT(),v, vcombine_f32(f, f)); } NV_FORCE_INLINE Vec3V V3SetY(const Vec3V v, const FloatV f) { VECMATHAOS_ASSERT(isValidVec3V(v)); VECMATHAOS_ASSERT(isValidFloatV(f)); return V3Sel(BTFTT(),v,vcombine_f32(f, f)); } NV_FORCE_INLINE Vec3V V3SetZ(const Vec3V v, const FloatV f) { VECMATHAOS_ASSERT(isValidVec3V(v)); VECMATHAOS_ASSERT(isValidFloatV(f)); return V3Sel(BTTFT(),v,vcombine_f32(f, f)); } NV_FORCE_INLINE Vec3V V3ColX(const Vec3V a, const Vec3V b, const Vec3V c) { const float32x2_t aLow = vget_low_f32(a); const float32x2_t bLow = vget_low_f32(b); const float32x2_t cLow = vget_low_f32(c); const float32x2_t zero = vdup_n_f32(0.0f); const float32x2x2_t zipL = vzip_f32(aLow, bLow); const float32x2x2_t zipH = vzip_f32(cLow, zero); return vcombine_f32(zipL.val[0], zipH.val[0]); } NV_FORCE_INLINE Vec3V V3ColY(const Vec3V a, const Vec3V b, const Vec3V c) { const float32x2_t aLow = vget_low_f32(a); const float32x2_t bLow = vget_low_f32(b); const float32x2_t cLow = vget_low_f32(c); const float32x2_t zero = vdup_n_f32(0.0f); const float32x2x2_t zipL = vzip_f32(aLow, bLow); const float32x2x2_t zipH = vzip_f32(cLow, zero); return vcombine_f32(zipL.val[1], zipH.val[1]); } NV_FORCE_INLINE Vec3V V3ColZ(const Vec3V a, const Vec3V b, const Vec3V c) { const float32x2_t aHi = vget_high_f32(a); const float32x2_t bHi = vget_high_f32(b); const float32x2_t cHi = vget_high_f32(c); const float32x2x2_t zipL = vzip_f32(aHi, bHi); return vcombine_f32(zipL.val[0], cHi); } NV_FORCE_INLINE Vec3V V3Zero() { return vdupq_n_f32(0.0f); } NV_FORCE_INLINE Vec3V V3Eps() { return V3Load(NV_EPS_REAL); } NV_FORCE_INLINE Vec3V V3One() { return V3Load(1.0f); } NV_FORCE_INLINE Vec3V V3Neg(const Vec3V f) { VECMATHAOS_ASSERT(isValidVec3V(f)); return vnegq_f32(f); } NV_FORCE_INLINE Vec3V V3Add(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return vaddq_f32(a, b); } NV_FORCE_INLINE Vec3V V3Add(const Vec3V a, const FloatV b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return vaddq_f32(a, Vec3V_From_FloatV(b)); } NV_FORCE_INLINE Vec3V V3Sub(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return vsubq_f32(a, b); } NV_FORCE_INLINE Vec3V V3Sub(const Vec3V a, const FloatV b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return vsubq_f32(a, Vec3V_From_FloatV(b)); } NV_FORCE_INLINE Vec3V V3Scale(const Vec3V a, const FloatV b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return vmulq_lane_f32(a, b, 0); } NV_FORCE_INLINE Vec3V V3Mul(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return vmulq_f32(a, b); } NV_FORCE_INLINE Vec3V V3ScaleInv(const Vec3V a, const FloatV b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); float32x2_t invB = VRECIP(b); return vsetq_lane_f32(0.0f, vmulq_lane_f32(a, invB, 0), 3); } NV_FORCE_INLINE Vec3V V3Div(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); float32x4_t invB = VRECIPQ(b); invB = vsetq_lane_f32(0.0f, invB, 3); return vmulq_f32(a, invB); } NV_FORCE_INLINE Vec3V V3ScaleInvFast(const Vec3V a, const FloatV b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); const float32x2_t invB = VRECIPE(b); return vmulq_lane_f32(a, invB, 0); } NV_FORCE_INLINE Vec3V V3DivFast(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); float32x4_t invB = VRECIPEQ(b); invB = vsetq_lane_f32(0.0f, invB, 3); return vmulq_f32(a, invB); } NV_FORCE_INLINE Vec3V V3Recip(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); const float32x4_t recipA = VRECIPQ(a); return vsetq_lane_f32(0.0f, recipA, 3); } NV_FORCE_INLINE Vec3V V3RecipFast(const Vec3V a) { const float32x4_t recipA = VRECIPEQ(a); return vsetq_lane_f32(0.0f, recipA, 3); } NV_FORCE_INLINE Vec3V V3Rsqrt(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); const float32x4_t rSqrA = VRECIPSQRTQ(a); return vsetq_lane_f32(0.0f, rSqrA, 3); } NV_FORCE_INLINE Vec3V V3RsqrtFast(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); const float32x4_t rSqrA = VRECIPSQRTEQ(a); return vsetq_lane_f32(0.0f, rSqrA, 3); } NV_FORCE_INLINE Vec3V V3ScaleAdd(const Vec3V a, const FloatV b, const Vec3V c) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); VECMATHAOS_ASSERT(isValidVec3V(c)); return vmlaq_lane_f32(c, a, b, 0); } NV_FORCE_INLINE Vec3V V3NegScaleSub(const Vec3V a, const FloatV b, const Vec3V c) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); VECMATHAOS_ASSERT(isValidVec3V(c)); return vmlsq_lane_f32(c, a, b, 0); } NV_FORCE_INLINE Vec3V V3MulAdd(const Vec3V a, const Vec3V b, const Vec3V c) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); VECMATHAOS_ASSERT(isValidVec3V(c)); return vmlaq_f32(c, a, b); } NV_FORCE_INLINE Vec3V V3NegMulSub(const Vec3V a, const Vec3V b, const Vec3V c) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); VECMATHAOS_ASSERT(isValidVec3V(c)); return vmlsq_f32(c, a, b); } NV_FORCE_INLINE Vec3V V3Abs(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); return vabsq_f32(a); } NV_FORCE_INLINE FloatV V3Dot(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); NV_ASSERT(isValidVec3V(a)); NV_ASSERT(isValidVec3V(b)); // const uint32x2_t mask = {0xffffFFFF, 0x0}; const float32x4_t tmp = vmulq_f32(a, b); const float32x2_t low = vget_low_f32(tmp); const float32x2_t high = vget_high_f32(tmp); // const float32x2_t high = vreinterpret_f32_u32(vand_u32(vreinterpret_u32_f32(high_), mask)); const float32x2_t sumTmp = vpadd_f32(low, high); // = {0+z, x+y} const float32x2_t sum0ZYX = vpadd_f32(sumTmp, sumTmp); // = {x+y+z, x+y+z} return sum0ZYX; } NV_FORCE_INLINE Vec3V V3Cross(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); #if NV_WINRT const uint32x2_t TF = { 0x00000000ffffFFFFULL }; #else const uint32x2_t TF = {0xffffFFFF, 0x0}; #endif const float32x2_t ay_ax = vget_low_f32(a); // d2 const float32x2_t aw_az = vget_high_f32(a); // d3 const float32x2_t by_bx = vget_low_f32(b); // d4 const float32x2_t bw_bz = vget_high_f32(b); // d5 // Hi, Lo const float32x2_t bz_by = vext_f32(by_bx, bw_bz, 1); // bz, by const float32x2_t az_ay = vext_f32(ay_ax, aw_az, 1); // az, ay const float32x2_t azbx = vmul_f32(aw_az, by_bx); // 0, az*bx const float32x2_t aybz_axby = vmul_f32(ay_ax, bz_by); // ay*bz, ax*by const float32x2_t azbxSUBaxbz = vmls_f32(azbx, bw_bz, ay_ax); // 0, az*bx-ax*bz const float32x2_t aybzSUBazby_axbySUBaybx = vmls_f32(aybz_axby, by_bx, az_ay); // ay*bz-az*by, ax*by-ay*bx const float32x2_t retLow = vext_f32(aybzSUBazby_axbySUBaybx, azbxSUBaxbz, 1); // az*bx-ax*bz, ay*bz-az*by const uint32x2_t retHigh = vand_u32(TF, vreinterpret_u32_f32(aybzSUBazby_axbySUBaybx)); // 0, ax*by-ay*bx return vcombine_f32(retLow, vreinterpret_f32_u32(retHigh)); } NV_FORCE_INLINE VecCrossV V3PrepareCross(const Vec3V a) { return a; } NV_FORCE_INLINE FloatV V3Length(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); NV_ASSERT(isValidVec3V(a)); // const uint32x2_t mask = {0xffffFFFF, 0x0}; const float32x4_t tmp = vmulq_f32(a, a); const float32x2_t low = vget_low_f32(tmp); const float32x2_t high = vget_high_f32(tmp); // const float32x2_t high = vreinterpret_f32_u32(vand_u32(vreinterpret_u32_f32(high_), mask)); const float32x2_t sumTmp = vpadd_f32(low, high); // = {0+z, x+y} const float32x2_t sum0ZYX = vpadd_f32(sumTmp, sumTmp); // = {x+y+z, x+y+z} const float32x2_t len = vmul_f32(VRECIPSQRTE(sum0ZYX), sum0ZYX); return len; } NV_FORCE_INLINE FloatV V3LengthSq(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); return V3Dot(a,a); } NV_FORCE_INLINE Vec3V V3Normalize(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); return V3ScaleInv(a, V3Length(a)); } NV_FORCE_INLINE Vec3V V3NormalizeFast(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); return V3Scale(a, VRECIPSQRTE(V3Dot(a,a))); } NV_FORCE_INLINE Vec3V V3NormalizeSafe(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); const FloatV zero = vdup_n_f32(0.0f); const FloatV length = V3Length(a); const uint32x4_t isGreaterThanZero = FIsGrtr(length, zero); return V3Sel(isGreaterThanZero, V3ScaleInv(a, length), vdupq_lane_f32(zero, 0)); } NV_FORCE_INLINE Vec3V V3Sel(const BoolV c, const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return vbslq_f32(c, a, b); } NV_FORCE_INLINE BoolV V3IsGrtr(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return vcgtq_f32(a, b); } NV_FORCE_INLINE BoolV V3IsGrtrOrEq(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return vcgeq_f32(a, b); } NV_FORCE_INLINE BoolV V3IsEq(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return vceqq_f32(a, b); } NV_FORCE_INLINE Vec3V V3Max(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return vmaxq_f32(a, b); } NV_FORCE_INLINE Vec3V V3Min(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return vminq_f32(a, b); } //Extract the maximum value from a NV_FORCE_INLINE FloatV V3ExtractMax(const Vec3V a) { const float32x2_t low = vget_low_f32(a); const float32x2_t high = vget_high_f32(a); const float32x2_t zz = vdup_lane_f32(high, 0); const float32x2_t max0 = vpmax_f32(zz, low); const float32x2_t max1 = vpmax_f32(max0, max0); return max1; } //Extract the maximum value from a NV_FORCE_INLINE FloatV V3ExtractMin(const Vec3V a) { const float32x2_t low = vget_low_f32(a); const float32x2_t high = vget_high_f32(a); const float32x2_t zz = vdup_lane_f32(high, 0); const float32x2_t min0 = vpmin_f32(zz, low); const float32x2_t min1 = vpmin_f32(min0, min0); return min1; } //return (a >= 0.0f) ? 1.0f : -1.0f; NV_FORCE_INLINE Vec3V V3Sign(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); const Vec3V zero = V3Zero(); const Vec3V one = V3One(); const Vec3V none = V3Neg(one); return V3Sel(V3IsGrtrOrEq(a, zero), one, none); } NV_FORCE_INLINE Vec3V V3Clamp(const Vec3V a, const Vec3V minV, const Vec3V maxV) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(minV)); VECMATHAOS_ASSERT(isValidVec3V(maxV)); return V3Max(V3Min(a,maxV),minV); } NV_FORCE_INLINE uint32_t V3AllGrtr(const Vec3V a, const Vec3V b) { return internalUnitNeonSimd::BAllTrue3_R(V4IsGrtr(a, b)); } NV_FORCE_INLINE uint32_t V3AllGrtrOrEq(const Vec3V a, const Vec3V b) { return internalUnitNeonSimd::BAllTrue3_R(V4IsGrtrOrEq(a, b)); } NV_FORCE_INLINE uint32_t V3AllEq(const Vec3V a, const Vec3V b) { return internalUnitNeonSimd::BAllTrue3_R(V4IsEq(a, b)); } NV_FORCE_INLINE Vec3V V3Round(const Vec3V a) { //truncate(a + (0.5f - sign(a))) const Vec3V half = V3Load(0.5f); const float32x4_t sign = vcvtq_f32_u32((vshrq_n_u32(vreinterpretq_u32_f32(a), 31))); const Vec3V aPlusHalf = V3Add(a, half); const Vec3V aRound = V3Sub(aPlusHalf, sign); return vcvtq_f32_s32(vcvtq_s32_f32(aRound)); } NV_FORCE_INLINE Vec3V V3Sin(const Vec3V a) { //Vec4V V1, V2, V3, V5, V7, V9, V11, V13, V15, V17, V19, V21, V23; //Vec4V S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11; Vec3V Result; // Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI const Vec3V recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f); const Vec3V twoPi = V4LoadA(g_NVTwoPi.f); const Vec3V tmp = V3Mul(a, recipTwoPi); const Vec3V b = V3Round(tmp); const Vec3V V1 = V3NegMulSub(twoPi, b, a); // sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! - // V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI) const Vec3V V2 = V3Mul(V1, V1); const Vec3V V3 = V3Mul(V2, V1); const Vec3V V5 = V3Mul(V3, V2); const Vec3V V7 = V3Mul(V5, V2); const Vec3V V9 = V3Mul(V7, V2); const Vec3V V11 = V3Mul(V9, V2); const Vec3V V13 = V3Mul(V11, V2); const Vec3V V15 = V3Mul(V13, V2); const Vec3V V17 = V3Mul(V15, V2); const Vec3V V19 = V3Mul(V17, V2); const Vec3V V21 = V3Mul(V19, V2); const Vec3V V23 = V3Mul(V21, V2); const Vec4V sinCoefficients0 = V4LoadA(g_NVSinCoefficients0.f); const Vec4V sinCoefficients1 = V4LoadA(g_NVSinCoefficients1.f); const Vec4V sinCoefficients2 = V4LoadA(g_NVSinCoefficients2.f); const FloatV S1 = V4GetY(sinCoefficients0); const FloatV S2 = V4GetZ(sinCoefficients0); const FloatV S3 = V4GetW(sinCoefficients0); const FloatV S4 = V4GetX(sinCoefficients1); const FloatV S5 = V4GetY(sinCoefficients1); const FloatV S6 = V4GetZ(sinCoefficients1); const FloatV S7 = V4GetW(sinCoefficients1); const FloatV S8 = V4GetX(sinCoefficients2); const FloatV S9 = V4GetY(sinCoefficients2); const FloatV S10 = V4GetZ(sinCoefficients2); const FloatV S11 = V4GetW(sinCoefficients2); Result = V3ScaleAdd(V3, S1, V1); Result = V3ScaleAdd(V5, S2, Result); Result = V3ScaleAdd(V7, S3, Result); Result = V3ScaleAdd(V9, S4, Result); Result = V3ScaleAdd(V11, S5, Result); Result = V3ScaleAdd(V13, S6, Result); Result = V3ScaleAdd(V15, S7, Result); Result = V3ScaleAdd(V17, S8, Result); Result = V3ScaleAdd(V19, S9, Result); Result = V3ScaleAdd(V21, S10,Result); Result = V3ScaleAdd(V23, S11,Result); return Result; } NV_FORCE_INLINE Vec3V V3Cos(const Vec3V a) { //XMVECTOR V1, V2, V4, V6, V8, V10, V12, V14, V16, V18, V20, V22; //XMVECTOR C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11; Vec3V Result; // Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI const Vec3V recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f); const Vec3V twoPi = V4LoadA(g_NVTwoPi.f); const Vec3V tmp = V3Mul(a, recipTwoPi); const Vec3V b = V3Round(tmp); const Vec3V V1 = V3NegMulSub(twoPi, b, a); // cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! - // V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI) const Vec3V V2 = V3Mul(V1, V1); const Vec3V V4 = V3Mul(V2, V2); const Vec3V V6 = V3Mul(V4, V2); const Vec3V V8 = V3Mul(V4, V4); const Vec3V V10 = V3Mul(V6, V4); const Vec3V V12 = V3Mul(V6, V6); const Vec3V V14 = V3Mul(V8, V6); const Vec3V V16 = V3Mul(V8, V8); const Vec3V V18 = V3Mul(V10, V8); const Vec3V V20 = V3Mul(V10, V10); const Vec3V V22 = V3Mul(V12, V10); const Vec4V cosCoefficients0 = V4LoadA(g_NVCosCoefficients0.f); const Vec4V cosCoefficients1 = V4LoadA(g_NVCosCoefficients1.f); const Vec4V cosCoefficients2 = V4LoadA(g_NVCosCoefficients2.f); const FloatV C1 = V4GetY(cosCoefficients0); const FloatV C2 = V4GetZ(cosCoefficients0); const FloatV C3 = V4GetW(cosCoefficients0); const FloatV C4 = V4GetX(cosCoefficients1); const FloatV C5 = V4GetY(cosCoefficients1); const FloatV C6 = V4GetZ(cosCoefficients1); const FloatV C7 = V4GetW(cosCoefficients1); const FloatV C8 = V4GetX(cosCoefficients2); const FloatV C9 = V4GetY(cosCoefficients2); const FloatV C10 = V4GetZ(cosCoefficients2); const FloatV C11 = V4GetW(cosCoefficients2); Result = V3ScaleAdd(V2, C1, V4One()); Result = V3ScaleAdd(V4, C2, Result); Result = V3ScaleAdd(V6, C3, Result); Result = V3ScaleAdd(V8, C4, Result); Result = V3ScaleAdd(V10, C5, Result); Result = V3ScaleAdd(V12, C6, Result); Result = V3ScaleAdd(V14, C7, Result); Result = V3ScaleAdd(V16, C8, Result); Result = V3ScaleAdd(V18, C9, Result); Result = V3ScaleAdd(V20, C10,Result); Result = V3ScaleAdd(V22, C11,Result); return Result; } NV_FORCE_INLINE Vec3V V3PermYZZ(const Vec3V a) { const float32x2_t xy = vget_low_f32(a); const float32x2_t zw = vget_high_f32(a); const float32x2_t yz = vext_f32(xy, zw, 1); return vcombine_f32(yz, zw); } NV_FORCE_INLINE Vec3V V3PermXYX(const Vec3V a) { #if NV_WINRT const uint32x2_t mask = { 0x00000000ffffFFFFULL }; #else const uint32x2_t mask = {0xffffFFFF, 0x0}; #endif const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(a)); const uint32x2_t xw = vand_u32(xy, mask); return vreinterpretq_f32_u32(vcombine_u32(xy, xw)); } NV_FORCE_INLINE Vec3V V3PermYZX(const Vec3V a) { #if NV_WINRT const uint32x2_t mask = { 0x00000000ffffFFFFULL }; #else const uint32x2_t mask = {0xffffFFFF, 0x0}; #endif const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(a)); const uint32x2_t zw = vget_high_u32(vreinterpretq_u32_f32(a)); const uint32x2_t yz = vext_u32(xy, zw, 1); const uint32x2_t xw = vand_u32(xy, mask); return vreinterpretq_f32_u32(vcombine_u32(yz, xw)); } NV_FORCE_INLINE Vec3V V3PermZXY(const Vec3V a) { const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(a)); const uint32x2_t zw = vget_high_u32(vreinterpretq_u32_f32(a)); const uint32x2_t wz = vrev64_u32(zw); const uint32x2_t zx = vext_u32(wz, xy, 1); const uint32x2_t yw = vext_u32(xy, wz, 1); return vreinterpretq_f32_u32(vcombine_u32(zx, yw)); } NV_FORCE_INLINE Vec3V V3PermZZY(const Vec3V a) { const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(a)); const uint32x2_t zw = vget_high_u32(vreinterpretq_u32_f32(a)); const uint32x2_t wz = vrev64_u32(zw); const uint32x2_t yw = vext_u32(xy, wz, 1); const uint32x2_t zz = vdup_lane_u32(wz, 1); return vreinterpretq_f32_u32(vcombine_u32(zz, yw)); } NV_FORCE_INLINE Vec3V V3PermYXX(const Vec3V a) { #if NV_WINRT const uint32x2_t mask = { 0x00000000ffffFFFFULL }; #else const uint32x2_t mask = {0xffffFFFF, 0x0}; #endif const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(a)); const uint32x2_t yx = vrev64_u32(xy); const uint32x2_t xw = vand_u32(xy, mask); return vreinterpretq_f32_u32(vcombine_u32(yx, xw)); } NV_FORCE_INLINE Vec3V V3Perm_Zero_1Z_0Y(const Vec3V v0, const Vec3V v1) { const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(v0)); const uint32x2_t zw = vget_high_u32(vreinterpretq_u32_f32(v1)); const uint32x2_t wz = vrev64_u32(zw); const uint32x2_t yw = vext_u32(xy, wz, 1); return vreinterpretq_f32_u32(vcombine_u32(wz, yw)); } NV_FORCE_INLINE Vec3V V3Perm_0Z_Zero_1X(const Vec3V v0, const Vec3V v1) { #if NV_WINRT const uint32x2_t mask = { 0x00000000ffffFFFFULL }; #else const uint32x2_t mask = {0xffffFFFF, 0x0}; #endif const uint32x2_t zw = vget_high_u32(vreinterpretq_u32_f32(v0)); const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(v1)); const uint32x2_t xw = vand_u32(xy, mask); return vreinterpretq_f32_u32(vcombine_u32(zw, xw)); } NV_FORCE_INLINE Vec3V V3Perm_1Y_0X_Zero(const Vec3V v0, const Vec3V v1) { const uint32x2_t axy = vget_low_u32(vreinterpretq_u32_f32(v0)); const uint32x2_t bxy = vget_low_u32(vreinterpretq_u32_f32(v1)); const uint32x2_t byax = vext_u32(bxy, axy, 1); const uint32x2_t ww = vdup_n_u32(0); return vreinterpretq_f32_u32(vcombine_u32(byax, ww)); } NV_FORCE_INLINE FloatV V3SumElems(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); NV_ASSERT(isValidVec3V(a)); //const uint32x2_t mask = {0xffffFFFF, 0x0}; const float32x2_t low = vget_low_f32(a); const float32x2_t high = vget_high_f32(a); //const float32x2_t high = vreinterpret_f32_u32(vand_u32(vreinterpret_u32_f32(high_), mask)); const float32x2_t sumTmp = vpadd_f32(low, high); // = {0+z, x+y} const float32x2_t sum0ZYX = vpadd_f32(sumTmp, sumTmp); // = {x+y+z, x+y+z} return sum0ZYX; } NV_FORCE_INLINE uint32_t V3OutOfBounds(const Vec3V a, const Vec3V min, const Vec3V max) { const BoolV c = BOr(V3IsGrtr(a, max), V3IsGrtr(min, a)); return internalUnitNeonSimd::BAnyTrue3_R(c); } NV_FORCE_INLINE uint32_t V3InBounds(const Vec3V a, const Vec3V min, const Vec3V max) { const BoolV c = BAnd(V3IsGrtrOrEq(a, min), V3IsGrtrOrEq(max, a)); return internalUnitNeonSimd::BAllTrue4_R(c); } NV_FORCE_INLINE uint32_t V3OutOfBounds(const Vec3V a, const Vec3V bounds) { #if NV_WINRT const uint32x4_t greater = vacgtq_f32(a, bounds); #else const uint32x4_t greater = vcagtq_f32(a, bounds); #endif return internalUnitNeonSimd::BAnyTrue3_R(greater); } NV_FORCE_INLINE uint32_t V3InBounds(const Vec3V a, const Vec3V bounds) { #if NV_WINRT const uint32x4_t geq = vacgeq_f32(bounds, a); #else const uint32x4_t geq = vcageq_f32(bounds, a); #endif return internalUnitNeonSimd::BAllTrue4_R(geq); } ////////////////////////////////// //VEC4V ////////////////////////////////// NV_FORCE_INLINE Vec4V V4Splat(const FloatV f) { VECMATHAOS_ASSERT(isValidFloatV(f)); return vcombine_f32(f, f); } NV_FORCE_INLINE Vec4V V4Merge(const FloatV* const floatVArray) { VECMATHAOS_ASSERT(isValidFloatV(floatVArray[0])); VECMATHAOS_ASSERT(isValidFloatV(floatVArray[1])); VECMATHAOS_ASSERT(isValidFloatV(floatVArray[2])); VECMATHAOS_ASSERT(isValidFloatV(floatVArray[3])); const uint32x2_t xLow = vreinterpret_u32_f32(floatVArray[0]); const uint32x2_t yLow = vreinterpret_u32_f32(floatVArray[1]); const uint32x2_t zLow = vreinterpret_u32_f32(floatVArray[2]); const uint32x2_t wLow = vreinterpret_u32_f32(floatVArray[3]); const uint32x2_t dLow = vext_u32(xLow, yLow, 1); const uint32x2_t dHigh = vext_u32(zLow, wLow, 1); return vreinterpretq_f32_u32(vcombine_u32(dLow, dHigh)); } NV_FORCE_INLINE Vec4V V4Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z, const FloatVArg w) { VECMATHAOS_ASSERT(isValidFloatV(x)); VECMATHAOS_ASSERT(isValidFloatV(y)); VECMATHAOS_ASSERT(isValidFloatV(z)); VECMATHAOS_ASSERT(isValidFloatV(w)); const uint32x2_t xLow = vreinterpret_u32_f32(x); const uint32x2_t yLow = vreinterpret_u32_f32(y); const uint32x2_t zLow = vreinterpret_u32_f32(z); const uint32x2_t wLow = vreinterpret_u32_f32(w); const uint32x2_t dLow = vext_u32(xLow, yLow, 1); const uint32x2_t dHigh = vext_u32(zLow, wLow, 1); return vreinterpretq_f32_u32(vcombine_u32(dLow, dHigh)); } NV_FORCE_INLINE Vec4V V4MergeW(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { const float32x2_t xx = vget_high_f32(x); const float32x2_t yy = vget_high_f32(y); const float32x2_t zz = vget_high_f32(z); const float32x2_t ww = vget_high_f32(w); const float32x2x2_t zipL = vzip_f32(xx, yy); const float32x2x2_t zipH = vzip_f32(zz, ww); return vcombine_f32(zipL.val[1], zipH.val[1]); } NV_FORCE_INLINE Vec4V V4MergeZ(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { const float32x2_t xx = vget_high_f32(x); const float32x2_t yy = vget_high_f32(y); const float32x2_t zz = vget_high_f32(z); const float32x2_t ww = vget_high_f32(w); const float32x2x2_t zipL = vzip_f32(xx, yy); const float32x2x2_t zipH = vzip_f32(zz, ww); return vcombine_f32(zipL.val[0], zipH.val[0]); } NV_FORCE_INLINE Vec4V V4MergeY(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { const float32x2_t xx = vget_low_f32(x); const float32x2_t yy = vget_low_f32(y); const float32x2_t zz = vget_low_f32(z); const float32x2_t ww = vget_low_f32(w); const float32x2x2_t zipL = vzip_f32(xx, yy); const float32x2x2_t zipH = vzip_f32(zz, ww); return vcombine_f32(zipL.val[1], zipH.val[1]); } NV_FORCE_INLINE Vec4V V4MergeX(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { const float32x2_t xx = vget_low_f32(x); const float32x2_t yy = vget_low_f32(y); const float32x2_t zz = vget_low_f32(z); const float32x2_t ww = vget_low_f32(w); const float32x2x2_t zipL = vzip_f32(xx, yy); const float32x2x2_t zipH = vzip_f32(zz, ww); return vcombine_f32(zipL.val[0], zipH.val[0]); } NV_FORCE_INLINE Vec4V V4UnpackXY(const Vec4VArg a, const Vec4VArg b) { return vzipq_f32(a, b).val[0]; } NV_FORCE_INLINE Vec4V V4UnpackZW(const Vec4VArg a, const Vec4VArg b) { return vzipq_f32(a, b).val[1]; } NV_FORCE_INLINE Vec4V V4UnitW() { const float32x2_t zeros = vreinterpret_f32_u32(vmov_n_u32(0)); const float32x2_t ones = vmov_n_f32(1.0f); const float32x2_t zo = vext_f32(zeros, ones, 1); return vcombine_f32(zeros, zo); } NV_FORCE_INLINE Vec4V V4UnitX() { const float32x2_t zeros = vreinterpret_f32_u32(vmov_n_u32(0)); const float32x2_t ones = vmov_n_f32(1.0f); const float32x2_t oz = vext_f32(ones, zeros, 1); return vcombine_f32(oz, zeros); } NV_FORCE_INLINE Vec4V V4UnitY() { const float32x2_t zeros = vreinterpret_f32_u32(vmov_n_u32(0)); const float32x2_t ones = vmov_n_f32(1.0f); const float32x2_t zo = vext_f32(zeros, ones, 1); return vcombine_f32(zo, zeros); } NV_FORCE_INLINE Vec4V V4UnitZ() { const float32x2_t zeros = vreinterpret_f32_u32(vmov_n_u32(0)); const float32x2_t ones = vmov_n_f32(1.0f); const float32x2_t oz = vext_f32(ones, zeros, 1); return vcombine_f32(zeros, oz); } NV_FORCE_INLINE FloatV V4GetW(const Vec4V f) { const float32x2_t fhigh = vget_high_f32(f); return vdup_lane_f32(fhigh, 1); } NV_FORCE_INLINE FloatV V4GetX(const Vec4V f) { const float32x2_t fLow = vget_low_f32(f); return vdup_lane_f32(fLow, 0); } NV_FORCE_INLINE FloatV V4GetY(const Vec4V f) { const float32x2_t fLow = vget_low_f32(f); return vdup_lane_f32(fLow, 1); } NV_FORCE_INLINE FloatV V4GetZ(const Vec4V f) { const float32x2_t fhigh = vget_high_f32(f); return vdup_lane_f32(fhigh, 0); } NV_FORCE_INLINE Vec4V V4SetW(const Vec4V v, const FloatV f) { VECMATHAOS_ASSERT(isValidFloatV(f)); return V4Sel(BTTTF(), v, vcombine_f32(f, f)); } NV_FORCE_INLINE Vec4V V4SetX(const Vec4V v, const FloatV f) { VECMATHAOS_ASSERT(isValidFloatV(f)); return V4Sel(BFTTT(), v, vcombine_f32(f, f)); } NV_FORCE_INLINE Vec4V V4SetY(const Vec4V v, const FloatV f) { VECMATHAOS_ASSERT(isValidFloatV(f)); return V4Sel(BTFTT(), v, vcombine_f32(f, f)); } NV_FORCE_INLINE Vec4V V4SetZ(const Vec4V v, const FloatV f) { VECMATHAOS_ASSERT(isValidVec3V(v)); VECMATHAOS_ASSERT(isValidFloatV(f)); return V4Sel(BTTFT(), v, vcombine_f32(f, f)); } NV_FORCE_INLINE Vec4V V4ClearW(const Vec4V v) { VECMATHAOS_ASSERT(isValidVec3V(v)); return V4Sel(BTTTF(), v, V4Zero()); } NV_FORCE_INLINE Vec4V V4Perm_YXWZ(const Vec4V a) { const float32x2_t xy = vget_low_f32(a); const float32x2_t zw = vget_high_f32(a); const float32x2_t yx = vext_f32(xy, xy, 1); const float32x2_t wz = vext_f32(zw, zw, 1); return vcombine_f32(yx, wz); } NV_FORCE_INLINE Vec4V V4Perm_XZXZ(const Vec4V a) { const float32x2_t xy = vget_low_f32(a); const float32x2_t zw = vget_high_f32(a); const float32x2x2_t xzyw = vzip_f32(xy, zw); return vcombine_f32(xzyw.val[0], xzyw.val[0]); } NV_FORCE_INLINE Vec4V V4Perm_YWYW(const Vec4V a) { const float32x2_t xy = vget_low_f32(a); const float32x2_t zw = vget_high_f32(a); const float32x2x2_t xzyw = vzip_f32(xy, zw); return vcombine_f32(xzyw.val[1], xzyw.val[1]); } template<uint8_t E0, uint8_t E1, uint8_t E2, uint8_t E3> NV_FORCE_INLINE Vec4V V4Perm(const Vec4V V) { static const uint32_t ControlElement[ 4 ] = { #if 1 0x03020100, // XM_SWIZZLE_X 0x07060504, // XM_SWIZZLE_Y 0x0B0A0908, // XM_SWIZZLE_Z 0x0F0E0D0C, // XM_SWIZZLE_W #else 0x00010203, // XM_SWIZZLE_X 0x04050607, // XM_SWIZZLE_Y 0x08090A0B, // XM_SWIZZLE_Z 0x0C0D0E0F, // XM_SWIZZLE_W #endif }; uint8x8x2_t tbl; tbl.val[0] = vreinterpret_u8_f32(vget_low_f32(V)); tbl.val[1] = vreinterpret_u8_f32(vget_high_f32(V)); uint8x8_t idx = vcreate_u8( ((uint64_t)ControlElement[E0]) | (((uint64_t)ControlElement[E1]) << 32) ); const uint8x8_t rL = vtbl2_u8( tbl, idx ); idx = vcreate_u8( ((uint64_t)ControlElement[E2]) | (((uint64_t)ControlElement[E3]) << 32) ); const uint8x8_t rH = vtbl2_u8( tbl, idx ); return vreinterpretq_f32_u8(vcombine_u8( rL, rH )); } NV_FORCE_INLINE Vec4V V4Zero() { return vreinterpretq_f32_u32(vmovq_n_u32(0)); } NV_FORCE_INLINE Vec4V V4One() { return vmovq_n_f32(1.0f); } NV_FORCE_INLINE Vec4V V4Eps() { return V4Load(NV_EPS_REAL); } NV_FORCE_INLINE Vec4V V4Neg(const Vec4V f) { return vnegq_f32(f); } NV_FORCE_INLINE Vec4V V4Add(const Vec4V a, const Vec4V b) { return vaddq_f32(a, b); } NV_FORCE_INLINE Vec4V V4Sub(const Vec4V a, const Vec4V b) { return vsubq_f32(a, b); } NV_FORCE_INLINE Vec4V V4Scale(const Vec4V a, const FloatV b) { return vmulq_lane_f32(a, b, 0); } NV_FORCE_INLINE Vec4V V4Mul(const Vec4V a, const Vec4V b) { return vmulq_f32(a, b); } NV_FORCE_INLINE Vec4V V4ScaleInv(const Vec4V a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(b)); const float32x2_t invB = VRECIP(b); return vmulq_lane_f32(a, invB, 0); } NV_FORCE_INLINE Vec4V V4Div(const Vec4V a, const Vec4V b) { const float32x4_t invB = VRECIPQ(b); return vmulq_f32(a, invB); } NV_FORCE_INLINE Vec4V V4ScaleInvFast(const Vec4V a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(b)); const float32x2_t invB = VRECIPE(b); return vmulq_lane_f32(a, invB, 0); } NV_FORCE_INLINE Vec4V V4DivFast(const Vec4V a, const Vec4V b) { const float32x4_t invB = VRECIPEQ(b); return vmulq_f32(a, invB); } NV_FORCE_INLINE Vec4V V4Recip(const Vec4V a) { return VRECIPQ(a); } NV_FORCE_INLINE Vec4V V4RecipFast(const Vec4V a) { return VRECIPEQ(a); } NV_FORCE_INLINE Vec4V V4Rsqrt(const Vec4V a) { return VRECIPSQRTQ(a); } NV_FORCE_INLINE Vec4V V4RsqrtFast(const Vec4V a) { return VRECIPSQRTEQ(a); } NV_FORCE_INLINE Vec4V V4Sqrt(const Vec4V a) { return V4Mul(a, VRECIPSQRTQ(a)); } NV_FORCE_INLINE Vec4V V4ScaleAdd(const Vec4V a, const FloatV b, const Vec4V c) { VECMATHAOS_ASSERT(isValidFloatV(b)); return vmlaq_lane_f32(c, a, b, 0); } NV_FORCE_INLINE Vec4V V4NegScaleSub(const Vec4V a, const FloatV b, const Vec4V c) { VECMATHAOS_ASSERT(isValidFloatV(b)); return vmlsq_lane_f32(c, a, b, 0); } NV_FORCE_INLINE Vec4V V4MulAdd(const Vec4V a, const Vec4V b, const Vec4V c) { return vmlaq_f32(c, a, b); } NV_FORCE_INLINE Vec4V V4NegMulSub(const Vec4V a, const Vec4V b, const Vec4V c) { return vmlsq_f32(c, a, b); } NV_FORCE_INLINE Vec4V V4Abs(const Vec4V a) { return vabsq_f32(a); } NV_FORCE_INLINE FloatV V4SumElements(const Vec4V a) { const Vec4V xy = V4UnpackXY(a, a); //x,x,y,y const Vec4V zw = V4UnpackZW(a, a); //z,z,w,w const Vec4V xz_yw = V4Add(xy, zw); //x+z,x+z,y+w,y+w const FloatV xz = V4GetX(xz_yw); //x+z const FloatV yw = V4GetZ(xz_yw); //y+w return FAdd(xz, yw); //sum } NV_FORCE_INLINE FloatV V4Dot(const Vec4V a, const Vec4V b) { const float32x4_t tmp = vmulq_f32(a, b); const float32x2_t low = vget_low_f32(tmp); const float32x2_t high = vget_high_f32(tmp); const float32x2_t sumTmp = vpadd_f32(low, high); // = {z+w, x+y} const float32x2_t sumWZYX = vpadd_f32(sumTmp, sumTmp); // = {x+y+z+w, x+y+z+w} return sumWZYX; } NV_FORCE_INLINE FloatV V4Length(const Vec4V a) { const float32x4_t tmp = vmulq_f32(a, a); const float32x2_t low = vget_low_f32(tmp); const float32x2_t high = vget_high_f32(tmp); const float32x2_t sumTmp = vpadd_f32(low, high); // = {0+z, x+y} const float32x2_t sumWZYX = vpadd_f32(sumTmp, sumTmp); // = {x+y+z, x+y+z} const float32x2_t len = vmul_f32(VRECIPSQRTE(sumWZYX), sumWZYX); return len; } NV_FORCE_INLINE FloatV V4LengthSq(const Vec4V a) { return V4Dot(a,a); } NV_FORCE_INLINE Vec4V V4Normalize(const Vec4V a) { return V4ScaleInv(a, V4Length(a)); } NV_FORCE_INLINE Vec4V V4NormalizeFast(const Vec4V a) { return V4Scale(a, FRsqrtFast(V4Dot(a,a))); } NV_FORCE_INLINE Vec4V V4NormalizeSafe(const Vec4V a) { const FloatV zero = FZero(); const FloatV length = V4Length(a); const uint32x4_t isGreaterThanZero = FIsGrtr(length, zero); return V4Sel(isGreaterThanZero, V4ScaleInv(a, length), vcombine_f32(zero, zero)); } NV_FORCE_INLINE BoolV V4IsEqU32(const VecU32V a, const VecU32V b) { return vceqq_u32(a, b); } NV_FORCE_INLINE Vec4V V4Sel(const BoolV c, const Vec4V a, const Vec4V b) { return vbslq_f32(c, a, b); } NV_FORCE_INLINE BoolV V4IsGrtr(const Vec4V a, const Vec4V b) { return vcgtq_f32(a, b); } NV_FORCE_INLINE BoolV V4IsGrtrOrEq(const Vec4V a, const Vec4V b) { return vcgeq_f32(a, b); } NV_FORCE_INLINE BoolV V4IsEq(const Vec4V a, const Vec4V b) { return vceqq_f32(a, b); } NV_FORCE_INLINE Vec4V V4Max(const Vec4V a, const Vec4V b) { return vmaxq_f32(a, b); } NV_FORCE_INLINE Vec4V V4Min(const Vec4V a, const Vec4V b) { return vminq_f32(a, b); } NV_FORCE_INLINE FloatV V4ExtractMax(const Vec4V a) { const float32x2_t low = vget_low_f32(a); const float32x2_t high = vget_high_f32(a); const float32x2_t max0 = vpmax_f32(high, low); const float32x2_t max1 = vpmax_f32(max0, max0); return max1; } NV_FORCE_INLINE FloatV V4ExtractMin(const Vec4V a) { const float32x2_t low = vget_low_f32(a); const float32x2_t high = vget_high_f32(a); const float32x2_t min0 = vpmin_f32(high, low); const float32x2_t min1 = vpmin_f32(min0, min0); return min1; } NV_FORCE_INLINE Vec4V V4Clamp(const Vec4V a, const Vec4V minV, const Vec4V maxV) { return V4Max(V4Min(a,maxV),minV); } NV_FORCE_INLINE uint32_t V4AllGrtr(const Vec4V a, const Vec4V b) { return internalUnitNeonSimd::BAllTrue4_R(V4IsGrtr(a, b)); } NV_FORCE_INLINE uint32_t V4AllGrtrOrEq(const Vec4V a, const Vec4V b) { return internalUnitNeonSimd::BAllTrue4_R(V4IsGrtrOrEq(a, b)); } NV_FORCE_INLINE uint32_t V4AllEq(const Vec4V a, const Vec4V b) { return internalUnitNeonSimd::BAllTrue4_R(V4IsEq(a, b)); } NV_FORCE_INLINE Vec4V V4Round(const Vec4V a) { //truncate(a + (0.5f - sign(a))) const Vec4V half = V4Load(0.5f); const float32x4_t sign = vcvtq_f32_u32((vshrq_n_u32(vreinterpretq_u32_f32(a), 31))); const Vec4V aPlusHalf = V4Add(a, half); const Vec4V aRound = V4Sub(aPlusHalf, sign); return vcvtq_f32_s32(vcvtq_s32_f32(aRound)); } NV_FORCE_INLINE Vec4V V4Sin(const Vec4V a) { //Vec4V V1, V2, V3, V5, V7, V9, V11, V13, V15, V17, V19, V21, V23; //Vec4V S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11; Vec4V Result; const Vec4V recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f); const Vec4V twoPi = V4LoadA(g_NVTwoPi.f); const Vec4V tmp = V4Mul(a, recipTwoPi); const Vec4V b = V4Round(tmp); const Vec4V V1 = V4NegMulSub(twoPi, b, a); // sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! - // V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI) const Vec4V V2 = V4Mul(V1, V1); const Vec4V V3 = V4Mul(V2, V1); const Vec4V V5 = V4Mul(V3, V2); const Vec4V V7 = V4Mul(V5, V2); const Vec4V V9 = V4Mul(V7, V2); const Vec4V V11 = V4Mul(V9, V2); const Vec4V V13 = V4Mul(V11, V2); const Vec4V V15 = V4Mul(V13, V2); const Vec4V V17 = V4Mul(V15, V2); const Vec4V V19 = V4Mul(V17, V2); const Vec4V V21 = V4Mul(V19, V2); const Vec4V V23 = V4Mul(V21, V2); const Vec4V sinCoefficients0 = V4LoadA(g_NVSinCoefficients0.f); const Vec4V sinCoefficients1 = V4LoadA(g_NVSinCoefficients1.f); const Vec4V sinCoefficients2 = V4LoadA(g_NVSinCoefficients2.f); const FloatV S1 = V4GetY(sinCoefficients0); const FloatV S2 = V4GetZ(sinCoefficients0); const FloatV S3 = V4GetW(sinCoefficients0); const FloatV S4 = V4GetX(sinCoefficients1); const FloatV S5 = V4GetY(sinCoefficients1); const FloatV S6 = V4GetZ(sinCoefficients1); const FloatV S7 = V4GetW(sinCoefficients1); const FloatV S8 = V4GetX(sinCoefficients2); const FloatV S9 = V4GetY(sinCoefficients2); const FloatV S10 = V4GetZ(sinCoefficients2); const FloatV S11 = V4GetW(sinCoefficients2); Result = V4ScaleAdd(V3, S1, V1); Result = V4ScaleAdd(V5, S2, Result); Result = V4ScaleAdd(V7, S3, Result); Result = V4ScaleAdd(V9, S4, Result); Result = V4ScaleAdd(V11, S5, Result); Result = V4ScaleAdd(V13, S6, Result); Result = V4ScaleAdd(V15, S7, Result); Result = V4ScaleAdd(V17, S8, Result); Result = V4ScaleAdd(V19, S9, Result); Result = V4ScaleAdd(V21, S10,Result); Result = V4ScaleAdd(V23, S11,Result); return Result; } NV_FORCE_INLINE Vec4V V4Cos(const Vec4V a) { //XMVECTOR V1, V2, V4, V6, V8, V10, V12, V14, V16, V18, V20, V22; //XMVECTOR C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11; Vec4V Result; const Vec4V recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f); const Vec4V twoPi = V4LoadA(g_NVTwoPi.f); const Vec4V tmp = V4Mul(a, recipTwoPi); const Vec4V b = V4Round(tmp); const Vec4V V1 = V4NegMulSub(twoPi, b, a); // cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! - // V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI) const Vec4V V2 = V4Mul(V1, V1); const Vec4V V4 = V4Mul(V2, V2); const Vec4V V6 = V4Mul(V4, V2); const Vec4V V8 = V4Mul(V4, V4); const Vec4V V10 = V4Mul(V6, V4); const Vec4V V12 = V4Mul(V6, V6); const Vec4V V14 = V4Mul(V8, V6); const Vec4V V16 = V4Mul(V8, V8); const Vec4V V18 = V4Mul(V10, V8); const Vec4V V20 = V4Mul(V10, V10); const Vec4V V22 = V4Mul(V12, V10); const Vec4V cosCoefficients0 = V4LoadA(g_NVCosCoefficients0.f); const Vec4V cosCoefficients1 = V4LoadA(g_NVCosCoefficients1.f); const Vec4V cosCoefficients2 = V4LoadA(g_NVCosCoefficients2.f); const FloatV C1 = V4GetY(cosCoefficients0); const FloatV C2 = V4GetZ(cosCoefficients0); const FloatV C3 = V4GetW(cosCoefficients0); const FloatV C4 = V4GetX(cosCoefficients1); const FloatV C5 = V4GetY(cosCoefficients1); const FloatV C6 = V4GetZ(cosCoefficients1); const FloatV C7 = V4GetW(cosCoefficients1); const FloatV C8 = V4GetX(cosCoefficients2); const FloatV C9 = V4GetY(cosCoefficients2); const FloatV C10 = V4GetZ(cosCoefficients2); const FloatV C11 = V4GetW(cosCoefficients2); Result = V4ScaleAdd(V2, C1, V4One()); Result = V4ScaleAdd(V4, C2, Result); Result = V4ScaleAdd(V6, C3, Result); Result = V4ScaleAdd(V8, C4, Result); Result = V4ScaleAdd(V10, C5, Result); Result = V4ScaleAdd(V12, C6, Result); Result = V4ScaleAdd(V14, C7, Result); Result = V4ScaleAdd(V16, C8, Result); Result = V4ScaleAdd(V18, C9, Result); Result = V4ScaleAdd(V20, C10,Result); Result = V4ScaleAdd(V22, C11,Result); return Result; } NV_FORCE_INLINE void V4Transpose(Vec4V& col0, Vec4V& col1, Vec4V& col2, Vec4V& col3) { const float32x4x2_t v0v1 = vzipq_f32(col0, col2); const float32x4x2_t v2v3 = vzipq_f32(col1, col3); const float32x4x2_t zip0 = vzipq_f32(v0v1.val[0], v2v3.val[0]); const float32x4x2_t zip1 = vzipq_f32(v0v1.val[1], v2v3.val[1]); col0 = zip0.val[0]; col1 = zip0.val[1]; col2 = zip1.val[0]; col3 = zip1.val[1]; } ////////////////////////////////// //VEC4V ////////////////////////////////// NV_FORCE_INLINE BoolV BFFFF() { return vmovq_n_u32(0); } NV_FORCE_INLINE BoolV BFFFT() { const uint32x2_t zeros = vmov_n_u32(0); const uint32x2_t ones = vmov_n_u32(0xffffFFFF); const uint32x2_t zo = vext_u32(zeros, ones, 1); return vcombine_u32(zeros, zo); } NV_FORCE_INLINE BoolV BFFTF() { const uint32x2_t zeros = vmov_n_u32(0); const uint32x2_t ones = vmov_n_u32(0xffffFFFF); const uint32x2_t oz = vext_u32(ones, zeros, 1); return vcombine_u32(zeros, oz); } NV_FORCE_INLINE BoolV BFFTT() { const uint32x2_t zeros = vmov_n_u32(0); const uint32x2_t ones = vmov_n_u32(0xffffFFFF); return vcombine_u32(zeros, ones); } NV_FORCE_INLINE BoolV BFTFF() { const uint32x2_t zeros = vmov_n_u32(0); const uint32x2_t ones = vmov_n_u32(0xffffFFFF); const uint32x2_t zo = vext_u32(zeros, ones, 1); return vcombine_u32(zo, zeros); } NV_FORCE_INLINE BoolV BFTFT() { const uint32x2_t zeros = vmov_n_u32(0); const uint32x2_t ones = vmov_n_u32(0xffffFFFF); const uint32x2_t zo = vext_u32(zeros, ones, 1); return vcombine_u32(zo, zo); } NV_FORCE_INLINE BoolV BFTTF() { const uint32x2_t zeros = vmov_n_u32(0); const uint32x2_t ones = vmov_n_u32(0xffffFFFF); const uint32x2_t zo = vext_u32(zeros, ones, 1); const uint32x2_t oz = vext_u32(ones, zeros, 1); return vcombine_u32(zo, oz); } NV_FORCE_INLINE BoolV BFTTT() { const uint32x2_t zeros = vmov_n_u32(0); const uint32x2_t ones = vmov_n_u32(0xffffFFFF); const uint32x2_t zo = vext_u32(zeros, ones, 1); return vcombine_u32(zo, ones); } NV_FORCE_INLINE BoolV BTFFF() { const uint32x2_t zeros = vmov_n_u32(0); const uint32x2_t ones = vmov_n_u32(0xffffFFFF); //const uint32x2_t zo = vext_u32(zeros, ones, 1); const uint32x2_t oz = vext_u32(ones, zeros, 1); return vcombine_u32(oz, zeros); } NV_FORCE_INLINE BoolV BTFFT() { const uint32x2_t zeros = vmov_n_u32(0); const uint32x2_t ones = vmov_n_u32(0xffffFFFF); const uint32x2_t zo = vext_u32(zeros, ones, 1); const uint32x2_t oz = vext_u32(ones, zeros, 1); return vcombine_u32(oz, zo); } NV_FORCE_INLINE BoolV BTFTF() { const uint32x2_t zeros = vmov_n_u32(0); const uint32x2_t ones = vmov_n_u32(0xffffFFFF); const uint32x2_t oz = vext_u32(ones, zeros, 1); return vcombine_u32(oz, oz); } NV_FORCE_INLINE BoolV BTFTT() { const uint32x2_t zeros = vmov_n_u32(0); const uint32x2_t ones = vmov_n_u32(0xffffFFFF); const uint32x2_t oz = vext_u32(ones, zeros, 1); return vcombine_u32(oz, ones); } NV_FORCE_INLINE BoolV BTTFF() { const uint32x2_t zeros = vmov_n_u32(0); const uint32x2_t ones = vmov_n_u32(0xffffFFFF); return vcombine_u32(ones, zeros); } NV_FORCE_INLINE BoolV BTTFT() { const uint32x2_t zeros = vmov_n_u32(0); const uint32x2_t ones = vmov_n_u32(0xffffFFFF); const uint32x2_t zo = vext_u32(zeros, ones, 1); return vcombine_u32(ones, zo); } NV_FORCE_INLINE BoolV BTTTF() { const uint32x2_t zeros = vmov_n_u32(0); const uint32x2_t ones = vmov_n_u32(0xffffFFFF); const uint32x2_t oz = vext_u32(ones, zeros, 1); return vcombine_u32(ones, oz); } NV_FORCE_INLINE BoolV BTTTT() { return vmovq_n_u32(0xffffFFFF); } NV_FORCE_INLINE BoolV BXMask() { return BTFFF(); } NV_FORCE_INLINE BoolV BYMask() { return BFTFF(); } NV_FORCE_INLINE BoolV BZMask() { return BFFTF(); } NV_FORCE_INLINE BoolV BWMask() { return BFFFT(); } NV_FORCE_INLINE BoolV BGetX(const BoolV f) { const uint32x2_t fLow = vget_low_u32(f); return vdupq_lane_u32(fLow, 0); } NV_FORCE_INLINE BoolV BGetY(const BoolV f) { const uint32x2_t fLow = vget_low_u32(f); return vdupq_lane_u32(fLow, 1); } NV_FORCE_INLINE BoolV BGetZ(const BoolV f) { const uint32x2_t fHigh = vget_high_u32(f); return vdupq_lane_u32(fHigh, 0); } NV_FORCE_INLINE BoolV BGetW(const BoolV f) { const uint32x2_t fHigh = vget_high_u32(f); return vdupq_lane_u32(fHigh, 1); } NV_FORCE_INLINE BoolV BSetX(const BoolV v, const BoolV f) { return vbslq_u32(BFTTT(), v, f); } NV_FORCE_INLINE BoolV BSetY(const BoolV v, const BoolV f) { return vbslq_u32(BTFTT(), v, f); } NV_FORCE_INLINE BoolV BSetZ(const BoolV v, const BoolV f) { return vbslq_u32(BTTFT(), v, f); } NV_FORCE_INLINE BoolV BSetW(const BoolV v, const BoolV f) { return vbslq_u32(BTTTF(), v, f); } NV_FORCE_INLINE BoolV BAnd(const BoolV a, const BoolV b) { return vandq_u32(a, b); } NV_FORCE_INLINE BoolV BNot(const BoolV a) { return vmvnq_u32(a); } NV_FORCE_INLINE BoolV BAndNot(const BoolV a, const BoolV b) { //return vbicq_u32(a, b); return vandq_u32(a, vmvnq_u32(b)); } NV_FORCE_INLINE BoolV BOr(const BoolV a, const BoolV b) { return vorrq_u32(a, b); } NV_FORCE_INLINE BoolV BAllTrue4(const BoolV a) { const uint32x2_t allTrue = vmov_n_u32(0xffffFFFF); const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a)); const uint16x4_t dLow = vmovn_u32(a); uint16x8_t combined = vcombine_u16(dLow, dHigh); const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined)); const uint32x2_t result = vceq_u32(finalReduce, allTrue); return vdupq_lane_u32(result, 0); } NV_FORCE_INLINE BoolV BAnyTrue4(const BoolV a) { const uint32x2_t allTrue = vmov_n_u32(0xffffFFFF); const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a)); const uint16x4_t dLow = vmovn_u32(a); uint16x8_t combined = vcombine_u16(dLow, dHigh); const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined)); const uint32x2_t result = vtst_u32(finalReduce, allTrue); return vdupq_lane_u32(result, 0); } NV_FORCE_INLINE BoolV BAllTrue3(const BoolV a) { const uint32x2_t allTrue3 = vmov_n_u32(0x00ffFFFF); const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a)); const uint16x4_t dLow = vmovn_u32(a); uint16x8_t combined = vcombine_u16(dLow, dHigh); const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined)); const uint32x2_t result = vceq_u32(vand_u32(finalReduce,allTrue3), allTrue3); return vdupq_lane_u32(result, 0); } NV_FORCE_INLINE BoolV BAnyTrue3(const BoolV a) { const uint32x2_t allTrue3 = vmov_n_u32(0x00ffFFFF); const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a)); const uint16x4_t dLow = vmovn_u32(a); uint16x8_t combined = vcombine_u16(dLow, dHigh); const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined)); const uint32x2_t result = vtst_u32(vand_u32(finalReduce,allTrue3), allTrue3); return vdupq_lane_u32(result, 0); } NV_FORCE_INLINE uint32_t BAllEq(const BoolV a, const BoolV b) { const BoolV bTest = vceqq_u32(a, b); return internalUnitNeonSimd::BAllTrue4_R(bTest); } NV_FORCE_INLINE uint32_t BAllEqTTTT(const BoolV a) { return BAllEq(a, BTTTT()); } NV_FORCE_INLINE uint32_t BAllEqFFFF(const BoolV a) { return BAllEq(a, BFFFF()); } NV_FORCE_INLINE uint32_t BGetBitMask(const BoolV a) { static NV_ALIGN(16, const uint32_t) bitMaskData[4] = { 1, 2, 4, 8 }; const uint32x4_t bitMask = *(reinterpret_cast<const uint32x4_t*>(bitMaskData)); const uint32x4_t t0 = vandq_u32(a, bitMask); const uint32x2_t t1 = vpadd_u32(vget_low_u32(t0), vget_high_u32(t0)); // Pairwise add (0 + 1), (2 + 3) return uint32_t(vget_lane_u32(vpadd_u32(t1, t1), 0)); } ////////////////////////////////// //MAT33V ////////////////////////////////// NV_FORCE_INLINE Vec3V M33MulV3(const Mat33V& a, const Vec3V b) { const FloatV x=V3GetX(b); const FloatV y=V3GetY(b); const FloatV z=V3GetZ(b); const Vec3V v0=V3Scale(a.col0,x); const Vec3V v1=V3Scale(a.col1,y); const Vec3V v2=V3Scale(a.col2,z); const Vec3V v0PlusV1=V3Add(v0,v1); return V3Add(v0PlusV1,v2); } NV_FORCE_INLINE Vec3V M33TrnspsMulV3(const Mat33V& a, const Vec3V b) { const FloatV x=V3Dot(a.col0,b); const FloatV y=V3Dot(a.col1,b); const FloatV z=V3Dot(a.col2,b); return V3Merge(x,y,z); } NV_FORCE_INLINE Vec3V M33MulV3AddV3(const Mat33V& A, const Vec3V b, const Vec3V c) { const FloatV x=V3GetX(b); const FloatV y=V3GetY(b); const FloatV z=V3GetZ(b); Vec3V result = V3ScaleAdd(A.col0, x, c); result = V3ScaleAdd(A.col1, y, result); return V3ScaleAdd(A.col2, z, result); } NV_FORCE_INLINE Mat33V M33MulM33(const Mat33V& a, const Mat33V& b) { return Mat33V(M33MulV3(a,b.col0),M33MulV3(a,b.col1),M33MulV3(a,b.col2)); } NV_FORCE_INLINE Mat33V M33Add(const Mat33V& a, const Mat33V& b) { return Mat33V(V3Add(a.col0,b.col0),V3Add(a.col1,b.col1),V3Add(a.col2,b.col2)); } NV_FORCE_INLINE Mat33V M33Scale(const Mat33V& a, const FloatV& b) { return Mat33V(V3Scale(a.col0,b),V3Scale(a.col1,b),V3Scale(a.col2,b)); } NV_FORCE_INLINE Mat33V M33Inverse(const Mat33V& a) { const float32x2_t zeros = vreinterpret_f32_u32(vmov_n_u32(0)); const BoolV btttf = BTTTF(); const Vec3V cross01 = V3Cross(a.col0,a.col1); const Vec3V cross12 = V3Cross(a.col1,a.col2); const Vec3V cross20 = V3Cross(a.col2,a.col0); const FloatV dot = V3Dot(cross01,a.col2); const FloatV invDet = FRecipFast(dot); const float32x4x2_t merge = vzipq_f32(cross12, cross01); const float32x4_t mergeh = merge.val[0]; const float32x4_t mergel = merge.val[1]; //const Vec3V colInv0 = XMVectorPermute(mergeh,cross20,NvPermuteControl(0,4,1,7)); const float32x4_t colInv0_xxyy = vzipq_f32(mergeh, cross20).val[0]; const float32x4_t colInv0 = vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(colInv0_xxyy), btttf)); //const Vec3V colInv1 = XMVectorPermute(mergeh,cross20,NvPermuteControl(2,5,3,7)); const float32x2_t zw0 = vget_high_f32(mergeh); const float32x2_t xy1 = vget_low_f32(cross20); const float32x2_t yzero1 = vext_f32(xy1, zeros, 1); const float32x2x2_t merge1 = vzip_f32(zw0, yzero1); const float32x4_t colInv1 = vcombine_f32(merge1.val[0], merge1.val[1]); //const Vec3V colInv2 = XMVectorPermute(mergel,cross20,NvPermuteControl(0,6,1,7)); const float32x2_t x0y0 = vget_low_f32(mergel); const float32x2_t z1w1 = vget_high_f32(cross20); const float32x2x2_t merge2 = vzip_f32(x0y0, z1w1); const float32x4_t colInv2 = vcombine_f32(merge2.val[0], merge2.val[1]); return Mat33V ( vmulq_lane_f32(colInv0, invDet, 0), vmulq_lane_f32(colInv1, invDet, 0), vmulq_lane_f32(colInv2, invDet, 0) ); } NV_FORCE_INLINE Mat33V M33Trnsps(const Mat33V& a) { return Mat33V ( V3Merge(V3GetX(a.col0),V3GetX(a.col1),V3GetX(a.col2)), V3Merge(V3GetY(a.col0),V3GetY(a.col1),V3GetY(a.col2)), V3Merge(V3GetZ(a.col0),V3GetZ(a.col1),V3GetZ(a.col2)) ); } NV_FORCE_INLINE Mat33V M33Identity() { return Mat33V ( V3UnitX(), V3UnitY(), V3UnitZ() ); } NV_FORCE_INLINE Mat33V M33Sub(const Mat33V& a, const Mat33V& b) { return Mat33V(V3Sub(a.col0,b.col0),V3Sub(a.col1,b.col1),V3Sub(a.col2,b.col2)); } NV_FORCE_INLINE Mat33V M33Neg(const Mat33V& a) { return Mat33V(V3Neg(a.col0),V3Neg(a.col1),V3Neg(a.col2)); } NV_FORCE_INLINE Mat33V M33Abs(const Mat33V& a) { return Mat33V(V3Abs(a.col0),V3Abs(a.col1),V3Abs(a.col2)); } NV_FORCE_INLINE Mat33V PromoteVec3V(const Vec3V v) { const BoolV bTFFF = BTFFF(); const BoolV bFTFF = BFTFF(); const BoolV bFFTF = BTFTF(); const Vec3V zero = V3Zero(); return Mat33V( V3Sel(bTFFF, v, zero), V3Sel(bFTFF, v, zero), V3Sel(bFFTF, v, zero)); } NV_FORCE_INLINE Mat33V M33Diagonal(const Vec3VArg d) { const Vec3V x = V3Mul(V3UnitX(), d); const Vec3V y = V3Mul(V3UnitY(), d); const Vec3V z = V3Mul(V3UnitZ(), d); return Mat33V(x, y, z); } ////////////////////////////////// //MAT34V ////////////////////////////////// NV_FORCE_INLINE Vec3V M34MulV3(const Mat34V& a, const Vec3V b) { const FloatV x=V3GetX(b); const FloatV y=V3GetY(b); const FloatV z=V3GetZ(b); const Vec3V v0=V3Scale(a.col0,x); const Vec3V v1=V3Scale(a.col1,y); const Vec3V v2=V3Scale(a.col2,z); const Vec3V v0PlusV1=V3Add(v0,v1); const Vec3V v0PlusV1Plusv2=V3Add(v0PlusV1,v2); return (V3Add(v0PlusV1Plusv2,a.col3)); } NV_FORCE_INLINE Vec3V M34Mul33V3(const Mat34V& a, const Vec3V b) { const FloatV x=V3GetX(b); const FloatV y=V3GetY(b); const FloatV z=V3GetZ(b); const Vec3V v0=V3Scale(a.col0,x); const Vec3V v1=V3Scale(a.col1,y); const Vec3V v2=V3Scale(a.col2,z); const Vec3V v0PlusV1=V3Add(v0,v1); return V3Add(v0PlusV1,v2); } NV_FORCE_INLINE Vec3V M34TrnspsMul33V3(const Mat34V& a, const Vec3V b) { const FloatV x=V3Dot(a.col0,b); const FloatV y=V3Dot(a.col1,b); const FloatV z=V3Dot(a.col2,b); return V3Merge(x,y,z); } NV_FORCE_INLINE Mat34V M34MulM34(const Mat34V& a, const Mat34V& b) { return Mat34V(M34Mul33V3(a,b.col0), M34Mul33V3(a,b.col1),M34Mul33V3(a,b.col2),M34MulV3(a,b.col3)); } NV_FORCE_INLINE Mat33V M34MulM33(const Mat34V& a, const Mat33V& b) { return Mat33V(M34Mul33V3(a,b.col0),M34Mul33V3(a,b.col1),M34Mul33V3(a,b.col2)); } NV_FORCE_INLINE Mat33V M34Mul33MM34(const Mat34V& a, const Mat34V& b) { return Mat33V(M34Mul33V3(a,b.col0),M34Mul33V3(a,b.col1),M34Mul33V3(a,b.col2)); } NV_FORCE_INLINE Mat34V M34Add(const Mat34V& a, const Mat34V& b) { return Mat34V(V3Add(a.col0,b.col0),V3Add(a.col1,b.col1),V3Add(a.col2,b.col2),V3Add(a.col3,b.col3)); } NV_FORCE_INLINE Mat33V M34Trnsps33(const Mat34V& a) { return Mat33V ( V3Merge(V3GetX(a.col0),V3GetX(a.col1),V3GetX(a.col2)), V3Merge(V3GetY(a.col0),V3GetY(a.col1),V3GetY(a.col2)), V3Merge(V3GetZ(a.col0),V3GetZ(a.col1),V3GetZ(a.col2)) ); } ////////////////////////////////// //MAT44V ////////////////////////////////// NV_FORCE_INLINE Vec4V M44MulV4(const Mat44V& a, const Vec4V b) { const FloatV x=V4GetX(b); const FloatV y=V4GetY(b); const FloatV z=V4GetZ(b); const FloatV w=V4GetW(b); const Vec4V v0=V4Scale(a.col0,x); const Vec4V v1=V4Scale(a.col1,y); const Vec4V v2=V4Scale(a.col2,z); const Vec4V v3=V4Scale(a.col3,w); const Vec4V v0PlusV1=V4Add(v0,v1); const Vec4V v0PlusV1Plusv2=V4Add(v0PlusV1,v2); return (V4Add(v0PlusV1Plusv2,v3)); } NV_FORCE_INLINE Vec4V M44TrnspsMulV4(const Mat44V& a, const Vec4V b) { return V4Merge(V4Dot(a.col0,b), V4Dot(a.col1,b), V4Dot(a.col2,b), V4Dot(a.col3,b)); } NV_FORCE_INLINE Mat44V M44MulM44(const Mat44V& a, const Mat44V& b) { return Mat44V(M44MulV4(a,b.col0),M44MulV4(a,b.col1),M44MulV4(a,b.col2),M44MulV4(a,b.col3)); } NV_FORCE_INLINE Mat44V M44Add(const Mat44V& a, const Mat44V& b) { return Mat44V(V4Add(a.col0,b.col0),V4Add(a.col1,b.col1),V4Add(a.col2,b.col2),V4Add(a.col3,b.col3)); } NV_FORCE_INLINE Mat44V M44Trnsps(const Mat44V& a) { // asm volatile( // "vzip.f32 %q0, %q2 \n\t" // "vzip.f32 %q1, %q3 \n\t" // "vzip.f32 %q0, %q1 \n\t" // "vzip.f32 %q2, %q3 \n\t" // : "+w" (a.col0), "+w" (a.col1), "+w" (a.col2), "+w" a.col3)); const float32x4x2_t v0v1 = vzipq_f32(a.col0, a.col2); const float32x4x2_t v2v3 = vzipq_f32(a.col1, a.col3); const float32x4x2_t zip0 = vzipq_f32(v0v1.val[0], v2v3.val[0]); const float32x4x2_t zip1 = vzipq_f32(v0v1.val[1], v2v3.val[1]); return Mat44V(zip0.val[0], zip0.val[1], zip1.val[0], zip1.val[1]); } NV_FORCE_INLINE Mat44V M44Inverse(const Mat44V& a) { float32x4_t minor0, minor1, minor2, minor3; float32x4_t row0, row1, row2, row3; float32x4_t det, tmp1; tmp1 = vmovq_n_f32(0.0f); row1 = vmovq_n_f32(0.0f); row3 = vmovq_n_f32(0.0f); row0 = a.col0; row1 = vextq_f32(a.col1, a.col1, 2); row2 = a.col2; row3 = vextq_f32(a.col3, a.col3, 2); tmp1 = vmulq_f32(row2, row3); tmp1 = vrev64q_f32(tmp1); minor0 = vmulq_f32(row1, tmp1); minor1 = vmulq_f32(row0, tmp1); tmp1 = vextq_f32(tmp1, tmp1, 2); minor0 = vsubq_f32(vmulq_f32(row1, tmp1), minor0); minor1 = vsubq_f32(vmulq_f32(row0, tmp1), minor1); minor1 = vextq_f32(minor1, minor1, 2); tmp1 = vmulq_f32(row1, row2); tmp1 = vrev64q_f32(tmp1); minor0 = vaddq_f32(vmulq_f32(row3, tmp1), minor0); minor3 = vmulq_f32(row0, tmp1); tmp1 = vextq_f32(tmp1, tmp1, 2); minor0 = vsubq_f32(minor0, vmulq_f32(row3, tmp1)); minor3 = vsubq_f32(vmulq_f32(row0, tmp1), minor3); minor3 = vextq_f32(minor3, minor3, 2); tmp1 = vmulq_f32(vextq_f32(row1, row1, 2), row3); tmp1 = vrev64q_f32(tmp1); row2 = vextq_f32(row2, row2, 2); minor0 = vaddq_f32(vmulq_f32(row2, tmp1), minor0); minor2 = vmulq_f32(row0, tmp1); tmp1 = vextq_f32(tmp1, tmp1, 2); minor0 = vsubq_f32(minor0, vmulq_f32(row2, tmp1)); minor2 = vsubq_f32(vmulq_f32(row0, tmp1), minor2); minor2 = vextq_f32(minor2, minor2, 2); tmp1 = vmulq_f32(row0, row1); tmp1 = vrev64q_f32(tmp1); minor2 = vaddq_f32(vmulq_f32(row3, tmp1), minor2); minor3 = vsubq_f32(vmulq_f32(row2, tmp1), minor3); tmp1 = vextq_f32(tmp1, tmp1, 2); minor2 = vsubq_f32(vmulq_f32(row3, tmp1), minor2); minor3 = vsubq_f32(minor3, vmulq_f32(row2, tmp1)); tmp1 = vmulq_f32(row0, row3); tmp1 = vrev64q_f32(tmp1); minor1 = vsubq_f32(minor1, vmulq_f32(row2, tmp1)); minor2 = vaddq_f32(vmulq_f32(row1, tmp1), minor2); tmp1 = vextq_f32(tmp1, tmp1, 2); minor1 = vaddq_f32(vmulq_f32(row2, tmp1), minor1); minor2 = vsubq_f32(minor2, vmulq_f32(row1, tmp1)); tmp1 = vmulq_f32(row0, row2); tmp1 = vrev64q_f32(tmp1); minor1 = vaddq_f32(vmulq_f32(row3, tmp1), minor1); minor3 = vsubq_f32(minor3, vmulq_f32(row1, tmp1)); tmp1 = vextq_f32(tmp1, tmp1, 2); minor1 = vsubq_f32(minor1, vmulq_f32(row3, tmp1)); minor3 = vaddq_f32(vmulq_f32(row1, tmp1), minor3); det = vmulq_f32(row0, minor0); det = vaddq_f32(vextq_f32(det, det, 2), det); det = vaddq_f32(vrev64q_f32(det), det); det = vdupq_lane_f32(VRECIPE(vget_low_f32(det)), 0); minor0 = vmulq_f32(det, minor0); minor1 = vmulq_f32(det, minor1); minor2 = vmulq_f32(det, minor2); minor3 = vmulq_f32(det, minor3); Mat44V invTrans(minor0,minor1,minor2,minor3); return M44Trnsps(invTrans); } NV_FORCE_INLINE Vec4V V4LoadXYZW(const float& x, const float& y, const float& z, const float& w) { #if NV_WINRT NV_ALIGN(16,float) r[4] = {x, y, z ,w}; return vld1q_f32((const float32_t*)r); #else const float32x4_t ret = {x, y, z, w}; return ret; #endif // NV_WINRT } /* NV_FORCE_INLINE VecU16V V4U32PK(VecU32V a, VecU32V b) { return vcombine_u16(vqmovn_u32(a), vqmovn_u32(b)); } */ NV_FORCE_INLINE VecU32V V4U32Sel(const BoolV c, const VecU32V a, const VecU32V b) { return vbslq_u32(c, a, b); } NV_FORCE_INLINE VecU32V V4U32or(VecU32V a, VecU32V b) { return vorrq_u32(a, b); } NV_FORCE_INLINE VecU32V V4U32and(VecU32V a, VecU32V b) { return vandq_u32(a, b); } NV_FORCE_INLINE VecU32V V4U32Andc(VecU32V a, VecU32V b) { //return vbicq_u32(a, b); // creates gcc compiler bug in RTreeQueries.cpp return vandq_u32(a, vmvnq_u32(b)); } /* NV_FORCE_INLINE VecU16V V4U16Or(VecU16V a, VecU16V b) { return vorrq_u16(a, b); } */ /* NV_FORCE_INLINE VecU16V V4U16And(VecU16V a, VecU16V b) { return vandq_u16(a, b); } */ /* NV_FORCE_INLINE VecU16V V4U16Andc(VecU16V a, VecU16V b) { return vbicq_u16(a, b); } */ NV_FORCE_INLINE VecI32V I4Load(const int32_t i) { return vdupq_n_s32(i); } NV_FORCE_INLINE VecI32V I4LoadU(const int32_t* i) { return vld1q_s32(i); } NV_FORCE_INLINE VecI32V I4LoadA(const int32_t* i) { return vld1q_s32(i); } NV_FORCE_INLINE VecI32V VecI32V_Add(const VecI32VArg a, const VecI32VArg b) { return vaddq_s32(a, b); } NV_FORCE_INLINE VecI32V VecI32V_Sub(const VecI32VArg a, const VecI32VArg b) { return vsubq_s32(a, b); } NV_FORCE_INLINE BoolV VecI32V_IsGrtr(const VecI32VArg a, const VecI32VArg b) { return vcgtq_s32(a, b); } NV_FORCE_INLINE BoolV VecI32V_IsEq(const VecI32VArg a, const VecI32VArg b) { return vceqq_s32(a, b); } NV_FORCE_INLINE VecI32V V4I32Sel(const BoolV c, const VecI32V a, const VecI32V b) { return vbslq_s32(c, a, b); } NV_FORCE_INLINE VecI32V VecI32V_Zero() { return vdupq_n_s32(0); } NV_FORCE_INLINE VecI32V VecI32V_One() { return vdupq_n_s32(1); } NV_FORCE_INLINE VecI32V VecI32V_Two() { return vdupq_n_s32(2); } NV_FORCE_INLINE VecI32V VecI32V_MinusOne() { return vdupq_n_s32(-1); } NV_FORCE_INLINE VecU32V U4Zero() { return U4Load(0); } NV_FORCE_INLINE VecU32V U4One() { return U4Load(1); } NV_FORCE_INLINE VecU32V U4Two() { return U4Load(2); } NV_FORCE_INLINE VecShiftV VecI32V_PrepareShift(const VecI32VArg shift) { return shift; } NV_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const VecShiftVArg count) { return vshlq_s32(a, count); } NV_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const VecShiftVArg count) { return vshlq_s32(a, VecI32V_Sub(I4Load(0), count)); } NV_FORCE_INLINE VecI32V VecI32V_And(const VecI32VArg a, const VecI32VArg b) { return vandq_s32(a, b); } NV_FORCE_INLINE VecI32V VecI32V_Or(const VecI32VArg a, const VecI32VArg b) { return vorrq_s32(a, b); } NV_FORCE_INLINE VecI32V VecI32V_GetX(const VecI32VArg f) { const int32x2_t fLow = vget_low_s32(f); return vdupq_lane_s32(fLow, 0); } NV_FORCE_INLINE VecI32V VecI32V_GetY(const VecI32VArg f) { const int32x2_t fLow = vget_low_s32(f); return vdupq_lane_s32(fLow, 1); } NV_FORCE_INLINE VecI32V VecI32V_GetZ(const VecI32VArg f) { const int32x2_t fHigh = vget_high_s32(f); return vdupq_lane_s32(fHigh, 0); } NV_FORCE_INLINE VecI32V VecI32V_GetW(const VecI32VArg f) { const int32x2_t fHigh = vget_high_s32(f); return vdupq_lane_s32(fHigh, 1); } NV_FORCE_INLINE VecI32V VecI32V_Sel(const BoolV c, const VecI32VArg a, const VecI32VArg b) { VECMATHAOS_ASSERT(_VecMathTests::allElementsEqualBoolV(c,BTTTT()) || _VecMathTests::allElementsEqualBoolV(c,BFFFF())); return vbslq_s32(c, a, b); } NV_FORCE_INLINE void NvI32_From_VecI32V(const VecI32VArg a, int32_t* i) { *i = vgetq_lane_s32(a, 0); } NV_FORCE_INLINE VecI32V VecI32V_Merge(const VecI32VArg a, const VecI32VArg b, const VecI32VArg c, const VecI32VArg d) { const int32x2_t aLow = vget_low_s32(a); const int32x2_t bLow = vget_low_s32(b); const int32x2_t cLow = vget_low_s32(c); const int32x2_t dLow = vget_low_s32(d); const int32x2_t low = vext_s32(aLow, bLow, 1); const int32x2_t high = vext_s32(cLow, dLow, 1); return vcombine_s32(low, high); } NV_FORCE_INLINE VecI32V VecI32V_From_BoolV(const BoolVArg a) { return reinterpret_cast<const int32x4_t&>(a); } NV_FORCE_INLINE VecU32V VecU32V_From_BoolV(const BoolVArg a) { return reinterpret_cast<const uint32x4_t&>(a); } /* template<int a> NV_FORCE_INLINE VecI32V V4ISplat() { return vdupq_n_s32(a); } template<uint32_t a> NV_FORCE_INLINE VecU32V V4USplat() { return vdupq_n_u32(a); } */ /* NV_FORCE_INLINE void V4U16StoreAligned(VecU16V val, VecU16V* address) { vst1q_u16((uint16_t*)address, val); } */ NV_FORCE_INLINE void V4U32StoreAligned(VecU32V val, VecU32V* address) { vst1q_u32((uint32_t*)address, val); } NV_FORCE_INLINE Vec4V V4LoadAligned(Vec4V* addr) { return vld1q_f32((float32_t*)addr); } NV_FORCE_INLINE Vec4V V4LoadUnaligned(Vec4V* addr) { return vld1q_f32((float32_t*)addr); } NV_FORCE_INLINE Vec4V V4Andc(const Vec4V a, const VecU32V b) { return vreinterpretq_f32_u32(V4U32Andc(vreinterpretq_u32_f32(a), b)); } NV_FORCE_INLINE VecU32V V4IsGrtrV32u(const Vec4V a, const Vec4V b) { return V4IsGrtr(a, b); } NV_FORCE_INLINE VecU16V V4U16LoadAligned(VecU16V* addr) { return vld1q_u16((uint16_t*)addr); } NV_FORCE_INLINE VecU16V V4U16LoadUnaligned(VecU16V* addr) { return vld1q_u16((uint16_t*)addr); } NV_FORCE_INLINE VecU16V V4U16CompareGt(VecU16V a, VecU16V b) { return vcgtq_u16(a, b); } NV_FORCE_INLINE VecU16V V4I16CompareGt(VecU16V a, VecU16V b) { return vcgtq_s16((VecI16V&)a, (VecI16V&)b); } NV_FORCE_INLINE Vec4V Vec4V_From_VecU32V(VecU32V a) { return vcvtq_f32_u32(a); } NV_FORCE_INLINE Vec4V Vec4V_From_VecI32V(VecI32V a) { return vcvtq_f32_s32(a); } NV_FORCE_INLINE VecI32V VecI32V_From_Vec4V(Vec4V a) { return vcvtq_s32_f32(a); } NV_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecU32V(VecU32V a) { return vreinterpretq_f32_u32(a); } NV_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecI32V(VecI32V a) { return vreinterpretq_f32_s32(a); } NV_FORCE_INLINE VecU32V VecU32V_ReinterpretFrom_Vec4V(Vec4V a) { return vreinterpretq_u32_f32(a); } NV_FORCE_INLINE VecI32V VecI32V_ReinterpretFrom_Vec4V(Vec4V a) { return vreinterpretq_s32_f32(a); } template<int index> NV_FORCE_INLINE BoolV BSplatElement(BoolV a) { #if NV_WINRT if(index == 0) { return vdupq_lane_u32(vget_low_u32(a), 0); } else if (index == 1) { return vdupq_lane_u32(vget_low_u32(a), 1); } #else if(index < 2) { return vdupq_lane_u32(vget_low_u32(a), index); } #endif else if(index == 2) { return vdupq_lane_u32(vget_high_u32(a), 0); } else if(index == 3) { return vdupq_lane_u32(vget_high_u32(a), 1); } } template<int index> NV_FORCE_INLINE VecU32V V4U32SplatElement(VecU32V a) { const int highIndex = index-2; #if NV_WINRT if(index == 0) { return vdupq_lane_u32(vget_low_u32(a), 0); } else if (index == 1) { return vdupq_lane_u32(vget_low_u32(a), 1); } #else if(index < 2) { return vdupq_lane_u32(vget_low_u32(a), index); } #endif else if(index == 2) { return vdupq_lane_u32(vget_high_u32(a), 0); } else if(index == 3) { return vdupq_lane_u32(vget_high_u32(a), 1); } } template<int index> NV_FORCE_INLINE Vec4V V4SplatElement(Vec4V a) { #if NV_WINRT if(index == 0) { return vdupq_lane_f32(vget_low_f32(a), 0); } else if (index == 1) { return vdupq_lane_f32(vget_low_f32(a), 1); } #else if(index < 2) { return vdupq_lane_f32(vget_low_f32(a), index); } #endif else if(index == 2) { return vdupq_lane_f32(vget_high_f32(a), 0); } else if(index == 3) { return vdupq_lane_f32(vget_high_f32(a), 1); } } template<int index> NV_FORCE_INLINE VecU16V V4U16SplatElement(VecU16V a) { #if NV_WINRT if(index == 0) { return vdupq_lane_u16(vget_low_u16(a), 0); } else if(index == 1) { return vdupq_lane_u16(vget_low_u16(a), 1); } else if(index == 2) { return vdupq_lane_u16(vget_low_u16(a), 2); } else if(index == 3) { return vdupq_lane_u16(vget_low_u16(a), 3); } #else if(index < 4) { return vdupq_lane_u16(vget_low_u16(a),index); } #endif else if(index == 4) { return vdupq_lane_u16(vget_high_u16(a), 0); } else if(index == 5) { return vdupq_lane_u16(vget_high_u16(a), 1); } else if(index == 6) { return vdupq_lane_u16(vget_high_u16(a), 2); } else if(index == 7) { return vdupq_lane_u16(vget_high_u16(a), 3); } } template<int imm> NV_FORCE_INLINE VecI16V V4I16SplatImmediate() { return vdupq_n_s16(imm); } template<uint16_t imm> NV_FORCE_INLINE VecU16V V4U16SplatImmediate() { return vdupq_n_u16(imm); } NV_FORCE_INLINE VecU16V V4U16SubtractModulo(VecU16V a, VecU16V b) { return vsubq_u16(a, b); } NV_FORCE_INLINE VecU16V V4U16AddModulo(VecU16V a, VecU16V b) { return vaddq_u16(a, b); } NV_FORCE_INLINE VecU32V V4U16GetLo16(VecU16V a) { const uint16x4x2_t ret = vuzp_u16(vget_low_u16(a), vget_high_u16(a)); return vmovl_u16(ret.val[0]); } NV_FORCE_INLINE VecU32V V4U16GetHi16(VecU16V a) { const uint16x4x2_t ret = vuzp_u16(vget_low_u16(a), vget_high_u16(a)); return vmovl_u16(ret.val[1]); } NV_FORCE_INLINE VecU32V VecU32VLoadXYZW(uint32_t x, uint32_t y, uint32_t z, uint32_t w) { #if NV_WINRT NV_ALIGN(16,uint32_t) r[4] = {x, y, z ,w}; return vld1q_u32((const uint32_t*)r); #else const uint32x4_t ret = {x, y, z, w}; return ret; #endif } NV_FORCE_INLINE VecU32V U4Load(const uint32_t i) { return vdupq_n_u32(i); } NV_FORCE_INLINE VecU32V U4LoadU(const uint32_t* i) { return vld1q_u32(i); } NV_FORCE_INLINE VecU32V U4LoadA(const uint32_t* i) { return vld1q_u32(i); } NV_FORCE_INLINE Vec4V V4Ceil(const Vec4V in) { const float32x4_t ones = vdupq_n_f32(1.0f); const float32x4_t rdToZero = vcvtq_f32_s32(vcvtq_s32_f32(in)); const float32x4_t rdToZeroPlusOne = vaddq_f32(rdToZero, ones); const uint32x4_t gt = vcgtq_f32(in, rdToZero); return vbslq_f32(gt, rdToZeroPlusOne, rdToZero); } NV_FORCE_INLINE Vec4V V4Floor(const Vec4V in) { const float32x4_t ones = vdupq_n_f32(1.0f); const float32x4_t rdToZero = vcvtq_f32_s32(vcvtq_s32_f32(in)); const float32x4_t rdToZeroMinusOne = vsubq_f32(rdToZero, ones); const uint32x4_t lt = vcltq_f32(in, rdToZero); return vbslq_f32(lt, rdToZeroMinusOne, rdToZero); } NV_FORCE_INLINE VecU32V V4ConvertToU32VSaturate(const Vec4V in, uint32_t power) { NV_ASSERT(power == 0 && "Non-zero power not supported in convertToU32VSaturate"); NV_UNUSED(power); // prevent warning in release builds return vcvtq_u32_f32(in); } #endif //PS_UNIX_NEON_INLINE_AOS_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/platform/windows/NsWindowsAoS.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef PS_WINDOWS_AOS_H #define PS_WINDOWS_AOS_H // no includes here! this file should be included from NvcVecMath.h only!!! #if !COMPILE_VECTOR_INTRINSICS #error Vector intrinsics should not be included when using scalar implementation. #endif typedef __m128 FloatV; typedef __m128 Vec3V; typedef __m128 Vec4V; typedef __m128 BoolV; typedef __m128 VecU32V; typedef __m128 VecI32V; typedef __m128 VecU16V; typedef __m128 VecI16V; typedef __m128 VecU8V; typedef __m128 QuatV; #define FloatVArg FloatV& #define Vec3VArg Vec3V& #define Vec4VArg Vec4V& #define BoolVArg BoolV& #define VecU32VArg VecU32V& #define VecI32VArg VecI32V& #define VecU16VArg VecU16V& #define VecI16VArg VecI16V& #define VecU8VArg VecU8V& #define QuatVArg QuatV& //Optimization for situations in which you cross product multiple vectors with the same vector. //Avoids 2X shuffles per product struct VecCrossV { Vec3V mL1; Vec3V mR1; }; struct VecShiftV { VecI32V shift; }; #define VecShiftVArg VecShiftV& NV_ALIGN_PREFIX(16) struct Mat33V { Mat33V(){} Mat33V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2) : col0(c0), col1(c1), col2(c2) { } Vec3V NV_ALIGN(16,col0); Vec3V NV_ALIGN(16,col1); Vec3V NV_ALIGN(16,col2); }NV_ALIGN_SUFFIX(16); NV_ALIGN_PREFIX(16) struct Mat34V { Mat34V(){} Mat34V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2, const Vec3V& c3) : col0(c0), col1(c1), col2(c2), col3(c3) { } Vec3V NV_ALIGN(16,col0); Vec3V NV_ALIGN(16,col1); Vec3V NV_ALIGN(16,col2); Vec3V NV_ALIGN(16,col3); }NV_ALIGN_SUFFIX(16); NV_ALIGN_PREFIX(16) struct Mat43V { Mat43V(){} Mat43V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2) : col0(c0), col1(c1), col2(c2) { } Vec4V NV_ALIGN(16,col0); Vec4V NV_ALIGN(16,col1); Vec4V NV_ALIGN(16,col2); }NV_ALIGN_SUFFIX(16); NV_ALIGN_PREFIX(16) struct Mat44V { Mat44V(){} Mat44V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2, const Vec4V& c3) : col0(c0), col1(c1), col2(c2), col3(c3) { } Vec4V NV_ALIGN(16,col0); Vec4V NV_ALIGN(16,col1); Vec4V NV_ALIGN(16,col2); Vec4V NV_ALIGN(16,col3); }NV_ALIGN_SUFFIX(16); #endif //PS_WINDOWS_AOS_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/platform/windows/NsWindowsTrigConstants.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef PS_WINDOWS_TRIG_CONSTANTS_H #define PS_WINDOWS_TRIG_CONSTANTS_H //#include "NvMath.h" #define NV_GLOBALCONST extern const __declspec(selectany) __declspec(align(16)) struct NV_VECTORF32 { float f[4]; }; //#define NV_PI 3.141592654f //#define NV_2PI 6.283185307f //#define NV_1DIVPI 0.318309886f //#define NV_1DIV2PI 0.159154943f //#define NV_PIDIV2 1.570796327f //#define NV_PIDIV4 0.785398163f NV_GLOBALCONST NV_VECTORF32 g_NVSinCoefficients0 = {{1.0f, -0.166666667f, 8.333333333e-3f, -1.984126984e-4f}}; NV_GLOBALCONST NV_VECTORF32 g_NVSinCoefficients1 = {{2.755731922e-6f, -2.505210839e-8f, 1.605904384e-10f, -7.647163732e-13f}}; NV_GLOBALCONST NV_VECTORF32 g_NVSinCoefficients2 = {{2.811457254e-15f, -8.220635247e-18f, 1.957294106e-20f, -3.868170171e-23f}}; NV_GLOBALCONST NV_VECTORF32 g_NVCosCoefficients0 = {{1.0f, -0.5f, 4.166666667e-2f, -1.388888889e-3f}}; NV_GLOBALCONST NV_VECTORF32 g_NVCosCoefficients1 = {{2.480158730e-5f, -2.755731922e-7f, 2.087675699e-9f, -1.147074560e-11f}}; NV_GLOBALCONST NV_VECTORF32 g_NVCosCoefficients2 = {{4.779477332e-14f, -1.561920697e-16f, 4.110317623e-19f, -8.896791392e-22f}}; NV_GLOBALCONST NV_VECTORF32 g_NVTanCoefficients0 = {{1.0f, 0.333333333f, 0.133333333f, 5.396825397e-2f}}; NV_GLOBALCONST NV_VECTORF32 g_NVTanCoefficients1 = {{2.186948854e-2f, 8.863235530e-3f, 3.592128167e-3f, 1.455834485e-3f}}; NV_GLOBALCONST NV_VECTORF32 g_NVTanCoefficients2 = {{5.900274264e-4f, 2.391290764e-4f, 9.691537707e-5f, 3.927832950e-5f}}; NV_GLOBALCONST NV_VECTORF32 g_NVASinCoefficients0 = {{-0.05806367563904f, -0.41861972469416f, 0.22480114791621f, 2.17337241360606f}}; NV_GLOBALCONST NV_VECTORF32 g_NVASinCoefficients1 = {{0.61657275907170f, 4.29696498283455f, -1.18942822255452f, -6.53784832094831f}}; NV_GLOBALCONST NV_VECTORF32 g_NVASinCoefficients2 = {{-1.36926553863413f, -4.48179294237210f, 1.41810672941833f, 5.48179257935713f}}; NV_GLOBALCONST NV_VECTORF32 g_NVATanCoefficients0 = {{1.0f, 0.333333334f, 0.2f, 0.142857143f}}; NV_GLOBALCONST NV_VECTORF32 g_NVATanCoefficients1 = {{1.111111111e-1f, 9.090909091e-2f, 7.692307692e-2f, 6.666666667e-2f}}; NV_GLOBALCONST NV_VECTORF32 g_NVATanCoefficients2 = {{5.882352941e-2f, 5.263157895e-2f, 4.761904762e-2f, 4.347826087e-2f}}; NV_GLOBALCONST NV_VECTORF32 g_NVSinEstCoefficients = {{1.0f, -1.66521856991541e-1f, 8.199913018755e-3f, -1.61475937228e-4f}}; NV_GLOBALCONST NV_VECTORF32 g_NVCosEstCoefficients = {{1.0f, -4.95348008918096e-1f, 3.878259962881e-2f, -9.24587976263e-4f}}; NV_GLOBALCONST NV_VECTORF32 g_NVTanEstCoefficients = {{2.484f, -1.954923183e-1f, 2.467401101f, NvInvPi}}; NV_GLOBALCONST NV_VECTORF32 g_NVATanEstCoefficients = {{7.689891418951e-1f, 1.104742493348f, 8.661844266006e-1f, NvPiDivTwo}}; NV_GLOBALCONST NV_VECTORF32 g_NVASinEstCoefficients = {{-1.36178272886711f, 2.37949493464538f, -8.08228565650486e-1f, 2.78440142746736e-1f}}; NV_GLOBALCONST NV_VECTORF32 g_NVASinEstConstants = {{1.00000011921f, NvPiDivTwo, 0.0f, 0.0f}}; NV_GLOBALCONST NV_VECTORF32 g_NVPiConstants0 = {{NvPi, NvTwoPi, NvInvPi, NvInvTwoPi}}; NV_GLOBALCONST NV_VECTORF32 g_NVReciprocalTwoPi = {{NvInvTwoPi, NvInvTwoPi, NvInvTwoPi, NvInvTwoPi}}; NV_GLOBALCONST NV_VECTORF32 g_NVTwoPi = {{NvTwoPi, NvTwoPi, NvTwoPi, NvTwoPi}}; #endif
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/platform/windows/NsWindowsFPU.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_WINDOWS_NSWINDOWSFPU_H #define NV_WINDOWS_NSWINDOWSFPU_H NV_INLINE nvidia::shdfnd::SIMDGuard::SIMDGuard() { #if !NV_ARM mControlWord = _mm_getcsr(); // set default (disable exceptions: _MM_MASK_MASK) and FTZ (_MM_FLUSH_ZERO_ON), DAZ (_MM_DENORMALS_ZERO_ON: (1<<6)) _mm_setcsr(_MM_MASK_MASK | _MM_FLUSH_ZERO_ON | (1 << 6)); #endif } NV_INLINE nvidia::shdfnd::SIMDGuard::~SIMDGuard() { #if !NV_ARM // restore control word and clear any exception flags // (setting exception state flags cause exceptions on the first following fp operation) _mm_setcsr(mControlWord & ~_MM_EXCEPT_MASK); #endif } #endif // #ifndef NV_WINDOWS_NSWINDOWSFPU_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/platform/windows/NsWindowsInlineAoS.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef PS_WINDOWS_INLINE_AOS_H #define PS_WINDOWS_INLINE_AOS_H #if !COMPILE_VECTOR_INTRINSICS #error Vector intrinsics should not be included when using scalar implementation. #endif //Remove this define when all platforms use simd solver. #define NV_SUPPORT_SIMD ///////////////////////////////////////////////////////////////////// ////FUNCTIONS USED ONLY FOR ASSERTS IN VECTORISED IMPLEMENTATIONS ///////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////// //USED ONLY INTERNALLY ////////////////////////////////////////////////////////////////////// namespace internalWindowsSimd { NV_FORCE_INLINE __m128 m128_I2F(__m128i n) { return _mm_castsi128_ps(n); } NV_FORCE_INLINE __m128i m128_F2I(__m128 n) { return _mm_castps_si128(n); } NV_FORCE_INLINE uint32_t BAllTrue4_R(const BoolV a) { const int32_t moveMask = _mm_movemask_ps(a); return uint32_t(moveMask == (0xf)); } NV_FORCE_INLINE uint32_t BAnyTrue4_R(const BoolV a) { const int32_t moveMask = _mm_movemask_ps(a); return uint32_t(moveMask != (0x0)); } NV_FORCE_INLINE uint32_t BAllTrue3_R(const BoolV a) { const int32_t moveMask = _mm_movemask_ps(a); return uint32_t(((moveMask & 0x7) == (0x7))); } NV_FORCE_INLINE uint32_t BAnyTrue3_R(const BoolV a) { const int32_t moveMask = _mm_movemask_ps(a); return uint32_t(((moveMask & 0x7) != (0x0))); } NV_FORCE_INLINE uint32_t FiniteTestEq(const Vec4V a, const Vec4V b) { //This is a bit of a bodge. //_mm_comieq_ss returns 1 if either value is nan so we need to re-cast a and b with true encoded as a non-nan number. //There must be a better way of doing this in sse. const BoolV one = FOne(); const BoolV zero = FZero(); const BoolV a1 =V4Sel(a,one,zero); const BoolV b1 =V4Sel(b,one,zero); return ( uint32_t(_mm_comieq_ss(a1, b1) && _mm_comieq_ss(_mm_shuffle_ps(a1, a1, _MM_SHUFFLE(1,1,1,1)),_mm_shuffle_ps(b1, b1, _MM_SHUFFLE(1,1,1,1))) && _mm_comieq_ss(_mm_shuffle_ps(a1, a1, _MM_SHUFFLE(2,2,2,2)),_mm_shuffle_ps(b1, b1, _MM_SHUFFLE(2,2,2,2))) && _mm_comieq_ss(_mm_shuffle_ps(a1, a1, _MM_SHUFFLE(3,3,3,3)),_mm_shuffle_ps(b1, b1, _MM_SHUFFLE(3,3,3,3)))) ); } NV_FORCE_INLINE bool hasZeroElementinFloatV(const FloatV a) { VECMATHAOS_ASSERT(isValidFloatV(a)); return (_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)),FZero()) ? true : false); } NV_FORCE_INLINE bool hasZeroElementInVec3V(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); return ( _mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)),FZero()) || _mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(1,1,1,1)),FZero()) || _mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2)),FZero()) ); } NV_FORCE_INLINE bool hasZeroElementInVec4V(const Vec4V a) { return ( _mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)),FZero()) || _mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(1,1,1,1)),FZero()) || _mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2)),FZero()) || _mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(3,3,3,3)),FZero()) ); } const NV_ALIGN(16, uint32_t gMaskXYZ[4])={0xffffffff, 0xffffffff, 0xffffffff, 0}; } namespace _VecMathTests { NV_FORCE_INLINE bool allElementsEqualFloatV(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return(_mm_comieq_ss(a,b)!=0); } NV_FORCE_INLINE bool allElementsEqualVec3V(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return V3AllEq(a, b) != 0; } NV_FORCE_INLINE bool allElementsEqualVec4V(const Vec4V a, const Vec4V b) { return V4AllEq(a, b) != 0; } NV_FORCE_INLINE bool allElementsEqualBoolV(const BoolV a, const BoolV b) { return internalWindowsSimd::BAllTrue4_R(VecI32V_IsEq(a, b)) != 0; } NV_FORCE_INLINE bool allElementsEqualVecU32V(const VecU32V a, const VecU32V b) { return internalWindowsSimd::BAllTrue4_R(V4IsEqU32(a, b)) != 0; } NV_FORCE_INLINE bool allElementsEqualVecI32V(const VecI32V a, const VecI32V b) { BoolV c = internalWindowsSimd::m128_I2F(_mm_cmpeq_epi32(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b))); return internalWindowsSimd::BAllTrue4_R(c) != 0; } #define VECMATH_AOS_EPSILON (1e-3f) static const FloatV minFError=FLoad(-VECMATH_AOS_EPSILON); static const FloatV maxFError=FLoad(VECMATH_AOS_EPSILON); static const Vec3V minV3Error=V3Load(-VECMATH_AOS_EPSILON); static const Vec3V maxV3Error=V3Load(VECMATH_AOS_EPSILON); static const Vec4V minV4Error=V4Load(-VECMATH_AOS_EPSILON); static const Vec4V maxV4Error=V4Load(VECMATH_AOS_EPSILON); NV_FORCE_INLINE bool allElementsNearEqualFloatV(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); const FloatV c=FSub(a,b); return (_mm_comigt_ss(c,minFError) && _mm_comilt_ss(c,maxFError)); } NV_FORCE_INLINE bool allElementsNearEqualVec3V(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); const Vec3V c=V3Sub(a,b); return ( _mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0,0,0,0)),minV3Error) && _mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0,0,0,0)),maxV3Error) && _mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1,1,1,1)),minV3Error) && _mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1,1,1,1)),maxV3Error) && _mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2,2,2,2)),minV3Error) && _mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2,2,2,2)),maxV3Error) ); } NV_FORCE_INLINE bool allElementsNearEqualVec4V(const Vec4V a, const Vec4V b) { const Vec4V c=V4Sub(a,b); return ( _mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0,0,0,0)),minV4Error) && _mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0,0,0,0)),maxV4Error) && _mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1,1,1,1)),minV4Error) && _mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1,1,1,1)),maxV4Error) && _mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2,2,2,2)),minV4Error) && _mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2,2,2,2)),maxV4Error) && _mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(3,3,3,3)),minV4Error) && _mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(3,3,3,3)),maxV4Error) ); } } NV_FORCE_INLINE bool isValidFloatV(const FloatV a) { return ( _mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)),_mm_shuffle_ps(a, a, _MM_SHUFFLE(1,1,1,1))) && _mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)),_mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2))) && _mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)),_mm_shuffle_ps(a, a, _MM_SHUFFLE(3,3,3,3))) ); } NV_FORCE_INLINE bool isValidVec3V(const Vec3V a) { return (_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(3,3,3,3)),FZero()) ? true : false); } NV_FORCE_INLINE bool isFiniteFloatV(const FloatV a) { return NvIsFinite(FStore(a)); /* const uint32_t badNumber = (_FPCLASS_SNAN | _FPCLASS_QNAN | _FPCLASS_NINF | _FPCLASS_PINF); const FloatV vBadNum = FloatV_From_F32((float&)badNumber); const BoolV vMask = BAnd(vBadNum, a); return FiniteTestEq(vMask, BFFFF()) == 1; */ } NV_FORCE_INLINE bool isFiniteVec3V(const Vec3V a) { NV_ALIGN(16, float f[4]); V4StoreA((Vec4V&)a, f); return NvIsFinite(f[0]) && NvIsFinite(f[1]) && NvIsFinite(f[2]); /* const uint32_t badNumber = (_FPCLASS_SNAN | _FPCLASS_QNAN | _FPCLASS_NINF | _FPCLASS_PINF); const Vec3V vBadNum = Vec3V_From_F32((float&)badNumber); const BoolV vMask = BAnd(BAnd(vBadNum, a), BTTTF()); return FiniteTestEq(vMask, BFFFF()) == 1; */ } NV_FORCE_INLINE bool isFiniteVec4V(const Vec4V a) { NV_ALIGN(16, float f[4]); V4StoreA(a, f); return NvIsFinite(f[0]) && NvIsFinite(f[1]) && NvIsFinite(f[2]) && NvIsFinite(f[3]); /* const uint32_t badNumber = (_FPCLASS_SNAN | _FPCLASS_QNAN | _FPCLASS_NINF | _FPCLASS_PINF); const Vec4V vBadNum = Vec4V_From_U32((float&)badNumber); const BoolV vMask = BAnd(vBadNum, a); return FiniteTestEq(vMask, BFFFF()) == 1; */ } ///////////////////////////////////////////////////////////////////// ////VECTORISED FUNCTION IMPLEMENTATIONS ///////////////////////////////////////////////////////////////////// NV_FORCE_INLINE FloatV FLoad(const float f) { return (_mm_load1_ps(&f)); } NV_FORCE_INLINE Vec3V V3Load(const float f) { return _mm_set_ps(0.0f,f,f,f); } NV_FORCE_INLINE Vec4V V4Load(const float f) { return (_mm_load1_ps(&f)); } NV_FORCE_INLINE BoolV BLoad(const bool f) { const uint32_t i=uint32_t(-(int32_t)f); return _mm_load1_ps((float*)&i); } NV_FORCE_INLINE Vec3V V3LoadA(const NvVec3& f) { VECMATHAOS_ASSERT(0 == ((size_t)&f & 0x0f)); return _mm_and_ps(_mm_load_ps(&f.x), reinterpret_cast<const Vec4V&>(internalWindowsSimd::gMaskXYZ)); } NV_FORCE_INLINE Vec3V V3LoadU(const NvVec3& f) { return (_mm_set_ps(0.0f,f.z,f.y,f.x)); } // w component of result is undefined NV_FORCE_INLINE Vec3V V3LoadUnsafeA(const NvVec3& f) { VECMATHAOS_ASSERT(0 == ((uint64_t)&f & 0x0f)); return _mm_load_ps(&f.x); } NV_FORCE_INLINE Vec3V V3LoadA(const float* const f) { VECMATHAOS_ASSERT(0 == ((uint64_t)f & 0x0f)); return V4ClearW(_mm_load_ps(f)); } NV_FORCE_INLINE Vec3V V3LoadU(const float* const i) { return (_mm_set_ps(0.0f,i[2],i[1],i[0])); } NV_FORCE_INLINE Vec3V Vec3V_From_Vec4V(Vec4V v) { return V4ClearW(v); } NV_FORCE_INLINE Vec3V Vec3V_From_Vec4V_WUndefined(const Vec4V v) { return v; } NV_FORCE_INLINE Vec4V Vec4V_From_Vec3V(Vec3V f) { return f; //ok if it is implemented as the same type. } NV_FORCE_INLINE Vec4V Vec4V_From_FloatV(FloatV f) { return f; } NV_FORCE_INLINE Vec3V Vec3V_From_FloatV(FloatV f) { return Vec3V_From_Vec4V(Vec4V_From_FloatV(f)); } NV_FORCE_INLINE Vec3V Vec3V_From_FloatV_WUndefined(FloatV f) { return Vec3V_From_Vec4V_WUndefined(Vec4V_From_FloatV(f)); } NV_FORCE_INLINE Vec4V Vec4V_From_NvVec3_WUndefined(const NvVec3& f) { return (_mm_set_ps(0.0f,f.z,f.y,f.x)); } NV_FORCE_INLINE Vec4V V4LoadA(const float* const f) { VECMATHAOS_ASSERT(0 == ((uint64_t)f & 0x0f)); return (_mm_load_ps(f)); } NV_FORCE_INLINE void V4StoreA(const Vec4V a, float* f) { VECMATHAOS_ASSERT(0 == ((uint64_t)f & 0x0f)); _mm_store_ps(f,a); } NV_FORCE_INLINE void V4StoreU(const Vec4V a, float* f) { _mm_storeu_ps(f,a); } NV_FORCE_INLINE void BStoreA(const BoolV a, uint32_t* f) { VECMATHAOS_ASSERT(0 == ((uint64_t)f & 0x0f)); _mm_store_ps((float*)f,a); } NV_FORCE_INLINE void U4StoreA(const VecU32V uv, uint32_t* u) { VECMATHAOS_ASSERT(0 == ((uint64_t)u & 0x0f)); _mm_store_ps((float*)u,uv); } NV_FORCE_INLINE void I4StoreA(const VecI32V iv, int32_t* i) { VECMATHAOS_ASSERT(0 == ((uint64_t)i & 0x0f)); _mm_store_ps((float*)i,iv); } NV_FORCE_INLINE Vec4V V4LoadU(const float* const f) { return (_mm_loadu_ps(f)); } NV_FORCE_INLINE BoolV BLoad(const bool* const f) { const NV_ALIGN(16, uint32_t b[4])={uint32_t(-(int32_t)f[0]), uint32_t(-(int32_t)f[1]), uint32_t(-(int32_t)f[2]), uint32_t(-(int32_t)f[3])}; return _mm_load_ps((float*)&b); } NV_FORCE_INLINE float FStore(const FloatV a) { VECMATHAOS_ASSERT(isValidFloatV(a)); float f; _mm_store_ss(&f,a); return f; } NV_FORCE_INLINE void FStore(const FloatV a, float* NV_RESTRICT f) { VECMATHAOS_ASSERT(isValidFloatV(a)); _mm_store_ss(f,a); } NV_FORCE_INLINE void V3StoreA(const Vec3V a, NvVec3& f) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(0 == ((int)&a & 0x0F)); VECMATHAOS_ASSERT(0 == ((int)&f & 0x0F)); NV_ALIGN(16, float f2[4]); _mm_store_ps(f2,a); f=NvVec3(f2[0],f2[1],f2[2]); } NV_FORCE_INLINE void Store_From_BoolV(const BoolV b, uint32_t* b2) { _mm_store_ss((float*)b2,b); } NV_FORCE_INLINE void V3StoreU(const Vec3V a, NvVec3& f) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(0 == ((int)&a & 0x0F)); NV_ALIGN(16, float f2[4]); _mm_store_ps(f2,a); f=NvVec3(f2[0],f2[1],f2[2]); } NV_FORCE_INLINE Mat33V Mat33V_From_NvMat33(const NvMat33 &m) { return Mat33V(V3LoadU(m.column0), V3LoadU(m.column1), V3LoadU(m.column2)); } NV_FORCE_INLINE void NvMat33_From_Mat33V(const Mat33V &m, NvMat33 &out) { NV_ASSERT((size_t(&out)&15)==0); V3StoreU(m.col0, out.column0); V3StoreU(m.col1, out.column1); V3StoreU(m.col2, out.column2); } ////////////////////////////////// //FLOATV ////////////////////////////////// NV_FORCE_INLINE FloatV FZero() { //return FloatV_From_F32(0.0f); return _mm_setzero_ps(); } NV_FORCE_INLINE FloatV FOne() { return FLoad(1.0f); } NV_FORCE_INLINE FloatV FHalf() { return FLoad(0.5f); } NV_FORCE_INLINE FloatV FEps() { return FLoad(NV_EPS_REAL); } NV_FORCE_INLINE FloatV FEps6() { return FLoad(1e-6f); } NV_FORCE_INLINE FloatV FMax() { return FLoad(NV_MAX_REAL); } NV_FORCE_INLINE FloatV FNegMax() { return FLoad(-NV_MAX_REAL); } NV_FORCE_INLINE FloatV IZero() { const uint32_t zero = 0; return _mm_load1_ps((float*)&zero); } NV_FORCE_INLINE FloatV IOne() { const uint32_t one = 1; return _mm_load1_ps((float*)&one); } NV_FORCE_INLINE FloatV ITwo() { const uint32_t two = 2; return _mm_load1_ps((float*)&two); } NV_FORCE_INLINE FloatV IThree() { const uint32_t three = 3; return _mm_load1_ps((float*)&three); } NV_FORCE_INLINE FloatV IFour() { uint32_t four = 4; return _mm_load1_ps((float*)&four); } NV_FORCE_INLINE FloatV FNeg(const FloatV f) { VECMATHAOS_ASSERT(isValidFloatV(f)); return _mm_sub_ps( _mm_setzero_ps(), f); } NV_FORCE_INLINE FloatV FAdd(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_add_ps(a,b); } NV_FORCE_INLINE FloatV FSub(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_sub_ps(a,b); } NV_FORCE_INLINE FloatV FMul(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_mul_ps(a,b); } NV_FORCE_INLINE FloatV FDiv(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_div_ps(a,b); } NV_FORCE_INLINE FloatV FDivFast(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_mul_ps(a,_mm_rcp_ps(b)); } NV_FORCE_INLINE FloatV FRecip(const FloatV a) { VECMATHAOS_ASSERT(isValidFloatV(a)); return _mm_div_ps(FOne(),a); } NV_FORCE_INLINE FloatV FRecipFast(const FloatV a) { return _mm_rcp_ps(a); } NV_FORCE_INLINE FloatV FRsqrt(const FloatV a) { VECMATHAOS_ASSERT(isValidFloatV(a)); return _mm_div_ps(FOne(),_mm_sqrt_ps(a)); } NV_FORCE_INLINE FloatV FSqrt(const FloatV a) { VECMATHAOS_ASSERT(isValidFloatV(a)); return _mm_sqrt_ps(a); } NV_FORCE_INLINE FloatV FRsqrtFast(const FloatV a) { return _mm_rsqrt_ps(a); } NV_FORCE_INLINE FloatV FScaleAdd(const FloatV a, const FloatV b, const FloatV c) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); VECMATHAOS_ASSERT(isValidFloatV(c)); return FAdd(FMul(a,b),c); } NV_FORCE_INLINE FloatV FNegScaleSub(const FloatV a, const FloatV b, const FloatV c) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); VECMATHAOS_ASSERT(isValidFloatV(c)); return FSub(c,FMul(a,b)); } NV_FORCE_INLINE FloatV FAbs(const FloatV a) { VECMATHAOS_ASSERT(isValidFloatV(a)); NV_ALIGN(16, const static uint32_t absMask[4]) = {0x7fFFffFF, 0x7fFFffFF, 0x7fFFffFF, 0x7fFFffFF}; return _mm_and_ps(a, _mm_load_ps((float*)absMask)); } NV_FORCE_INLINE FloatV FSel(const BoolV c, const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(_VecMathTests::allElementsEqualBoolV(c,BTTTT()) || _VecMathTests::allElementsEqualBoolV(c,BFFFF())); VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a)); } NV_FORCE_INLINE BoolV FIsGrtr(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_cmpgt_ps(a,b); } NV_FORCE_INLINE BoolV FIsGrtrOrEq(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_cmpge_ps(a,b); } NV_FORCE_INLINE BoolV FIsEq(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_cmpeq_ps(a,b); } NV_FORCE_INLINE FloatV FMax(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_max_ps(a, b); } NV_FORCE_INLINE FloatV FMin(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_min_ps(a, b); } NV_FORCE_INLINE FloatV FClamp(const FloatV a, const FloatV minV, const FloatV maxV) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(minV)); VECMATHAOS_ASSERT(isValidFloatV(maxV)); return FMax(FMin(a,maxV),minV); } NV_FORCE_INLINE uint32_t FAllGrtr(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return uint32_t(_mm_comigt_ss(a,b)); } NV_FORCE_INLINE uint32_t FAllGrtrOrEq(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return uint32_t(_mm_comige_ss(a,b)); } NV_FORCE_INLINE uint32_t FAllEq(const FloatV a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return uint32_t(_mm_comieq_ss(a,b)); } NV_FORCE_INLINE FloatV FRound(const FloatV a) { //return _mm_round_ps(a, 0x0); const FloatV half = FLoad(0.5f); const __m128 signBit = _mm_cvtepi32_ps(_mm_srli_epi32(_mm_cvtps_epi32(a), 31)); const FloatV aRound = FSub(FAdd(a, half), signBit); __m128i tmp = _mm_cvttps_epi32(aRound); return _mm_cvtepi32_ps(tmp); } NV_FORCE_INLINE FloatV FSin(const FloatV a) { //Vec4V V1, V2, V3, V5, V7, V9, V11, V13, V15, V17, V19, V21, V23; //Vec4V S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11; FloatV Result; // Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI const FloatV recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f); const FloatV twoPi = V4LoadA(g_NVTwoPi.f); const FloatV tmp = FMul(a, recipTwoPi); const FloatV b = FRound(tmp); const FloatV V1 = FNegMulSub(twoPi, b, a); // sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! - // V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI) const FloatV V2 = FMul(V1, V1); const FloatV V3 = FMul(V2, V1); const FloatV V5 = FMul(V3, V2); const FloatV V7 = FMul(V5, V2); const FloatV V9 = FMul(V7, V2); const FloatV V11 = FMul(V9, V2); const FloatV V13 = FMul(V11, V2); const FloatV V15 = FMul(V13, V2); const FloatV V17 = FMul(V15, V2); const FloatV V19 = FMul(V17, V2); const FloatV V21 = FMul(V19, V2); const FloatV V23 = FMul(V21, V2); const Vec4V sinCoefficients0 = V4LoadA(g_NVSinCoefficients0.f); const Vec4V sinCoefficients1 = V4LoadA(g_NVSinCoefficients1.f); const Vec4V sinCoefficients2 = V4LoadA(g_NVSinCoefficients2.f); const FloatV S1 = V4GetY(sinCoefficients0); const FloatV S2 = V4GetZ(sinCoefficients0); const FloatV S3 = V4GetW(sinCoefficients0); const FloatV S4 = V4GetX(sinCoefficients1); const FloatV S5 = V4GetY(sinCoefficients1); const FloatV S6 = V4GetZ(sinCoefficients1); const FloatV S7 = V4GetW(sinCoefficients1); const FloatV S8 = V4GetX(sinCoefficients2); const FloatV S9 = V4GetY(sinCoefficients2); const FloatV S10 = V4GetZ(sinCoefficients2); const FloatV S11 = V4GetW(sinCoefficients2); Result = FMulAdd(S1, V3, V1); Result = FMulAdd(S2, V5, Result); Result = FMulAdd(S3, V7, Result); Result = FMulAdd(S4, V9, Result); Result = FMulAdd(S5, V11, Result); Result = FMulAdd(S6, V13, Result); Result = FMulAdd(S7, V15, Result); Result = FMulAdd(S8, V17, Result); Result = FMulAdd(S9, V19, Result); Result = FMulAdd(S10, V21, Result); Result = FMulAdd(S11, V23, Result); return Result; } NV_FORCE_INLINE FloatV FCos(const FloatV a) { //XMVECTOR V1, V2, V4, V6, V8, V10, V12, V14, V16, V18, V20, V22; //XMVECTOR C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11; FloatV Result; // Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI const FloatV recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f); const FloatV twoPi = V4LoadA(g_NVTwoPi.f); const FloatV tmp = FMul(a, recipTwoPi); const FloatV b = FRound(tmp); const FloatV V1 = FNegMulSub(twoPi, b, a); // cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! - // V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI) const FloatV V2 = FMul(V1, V1); const FloatV V4 = FMul(V2, V2); const FloatV V6 = FMul(V4, V2); const FloatV V8 = FMul(V4, V4); const FloatV V10 = FMul(V6, V4); const FloatV V12 = FMul(V6, V6); const FloatV V14 = FMul(V8, V6); const FloatV V16 = FMul(V8, V8); const FloatV V18 = FMul(V10, V8); const FloatV V20 = FMul(V10, V10); const FloatV V22 = FMul(V12, V10); const Vec4V cosCoefficients0 = V4LoadA(g_NVCosCoefficients0.f); const Vec4V cosCoefficients1 = V4LoadA(g_NVCosCoefficients1.f); const Vec4V cosCoefficients2 = V4LoadA(g_NVCosCoefficients2.f); const FloatV C1 = V4GetY(cosCoefficients0); const FloatV C2 = V4GetZ(cosCoefficients0); const FloatV C3 = V4GetW(cosCoefficients0); const FloatV C4 = V4GetX(cosCoefficients1); const FloatV C5 = V4GetY(cosCoefficients1); const FloatV C6 = V4GetZ(cosCoefficients1); const FloatV C7 = V4GetW(cosCoefficients1); const FloatV C8 = V4GetX(cosCoefficients2); const FloatV C9 = V4GetY(cosCoefficients2); const FloatV C10 = V4GetZ(cosCoefficients2); const FloatV C11 = V4GetW(cosCoefficients2); Result = FMulAdd(C1, V2, V4One()); Result = FMulAdd(C2, V4, Result); Result = FMulAdd(C3, V6, Result); Result = FMulAdd(C4, V8, Result); Result = FMulAdd(C5, V10, Result); Result = FMulAdd(C6, V12, Result); Result = FMulAdd(C7, V14, Result); Result = FMulAdd(C8, V16, Result); Result = FMulAdd(C9, V18, Result); Result = FMulAdd(C10, V20, Result); Result = FMulAdd(C11, V22, Result); return Result; } NV_FORCE_INLINE uint32_t FOutOfBounds(const FloatV a, const FloatV min, const FloatV max) { const BoolV ffff = BFFFF(); const BoolV c = BOr(FIsGrtr(a, max), FIsGrtr(min, a)); return uint32_t(!BAllEq(c, ffff)); } NV_FORCE_INLINE uint32_t FInBounds(const FloatV a, const FloatV min, const FloatV max) { const BoolV tttt = BTTTT(); const BoolV c = BAnd(FIsGrtrOrEq(a, min), FIsGrtrOrEq(max, a)); return BAllEq(c, tttt); } NV_FORCE_INLINE uint32_t FOutOfBounds(const FloatV a, const FloatV bounds) { return FOutOfBounds(a, FNeg(bounds), bounds); } NV_FORCE_INLINE uint32_t FInBounds(const FloatV a, const FloatV bounds) { return FInBounds(a, FNeg(bounds), bounds); } ////////////////////////////////// //VEC3V ////////////////////////////////// NV_FORCE_INLINE Vec3V V3Splat(const FloatV f) { VECMATHAOS_ASSERT(isValidFloatV(f)); const __m128 zero=V3Zero(); const __m128 fff0 = _mm_move_ss(f, zero); return _mm_shuffle_ps(fff0, fff0, _MM_SHUFFLE(0,1,2,3)); } NV_FORCE_INLINE Vec3V V3Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z) { VECMATHAOS_ASSERT(isValidFloatV(x)); VECMATHAOS_ASSERT(isValidFloatV(y)); VECMATHAOS_ASSERT(isValidFloatV(z)); // static on zero causes compiler crash on x64 debug_opt const __m128 zero=V3Zero(); const __m128 xy = _mm_move_ss(x, y); const __m128 z0 = _mm_move_ss(zero, z); return _mm_shuffle_ps(xy, z0, _MM_SHUFFLE(1,0,0,1)); } NV_FORCE_INLINE Vec3V V3UnitX() { const NV_ALIGN(16, float x[4])={1.0f,0.0f,0.0f,0.0f}; const __m128 x128=_mm_load_ps(x); return x128; } NV_FORCE_INLINE Vec3V V3UnitY() { const NV_ALIGN(16, float y[4])={0.0f,1.0f,0.0f,0.0f}; const __m128 y128=_mm_load_ps(y); return y128; } NV_FORCE_INLINE Vec3V V3UnitZ() { const NV_ALIGN(16, float z[4])={0.0f,0.0f,1.0f,0.0f}; const __m128 z128=_mm_load_ps(z); return z128; } NV_FORCE_INLINE FloatV V3GetX(const Vec3V f) { VECMATHAOS_ASSERT(isValidVec3V(f)); return _mm_shuffle_ps(f, f, _MM_SHUFFLE(0,0,0,0)); } NV_FORCE_INLINE FloatV V3GetY(const Vec3V f) { VECMATHAOS_ASSERT(isValidVec3V(f)); return _mm_shuffle_ps(f, f, _MM_SHUFFLE(1,1,1,1)); } NV_FORCE_INLINE FloatV V3GetZ(const Vec3V f) { VECMATHAOS_ASSERT(isValidVec3V(f)); return _mm_shuffle_ps(f, f, _MM_SHUFFLE(2,2,2,2)); } NV_FORCE_INLINE Vec3V V3SetX(const Vec3V v, const FloatV f) { VECMATHAOS_ASSERT(isValidVec3V(v)); VECMATHAOS_ASSERT(isValidFloatV(f)); return V3Sel(BFTTT(),v,f); } NV_FORCE_INLINE Vec3V V3SetY(const Vec3V v, const FloatV f) { VECMATHAOS_ASSERT(isValidVec3V(v)); VECMATHAOS_ASSERT(isValidFloatV(f)); return V3Sel(BTFTT(),v,f); } NV_FORCE_INLINE Vec3V V3SetZ(const Vec3V v, const FloatV f) { VECMATHAOS_ASSERT(isValidVec3V(v)); VECMATHAOS_ASSERT(isValidFloatV(f)); return V3Sel(BTTFT(),v,f); } NV_FORCE_INLINE Vec3V V3ColX(const Vec3V a, const Vec3V b, const Vec3V c) { Vec3V r = _mm_shuffle_ps(a,c,_MM_SHUFFLE(3,0,3,0)); return V3SetY(r, V3GetX(b)); } NV_FORCE_INLINE Vec3V V3ColY(const Vec3V a, const Vec3V b, const Vec3V c) { Vec3V r = _mm_shuffle_ps(a,c,_MM_SHUFFLE(3,1,3,1)); return V3SetY(r, V3GetY(b)); } NV_FORCE_INLINE Vec3V V3ColZ(const Vec3V a, const Vec3V b, const Vec3V c) { Vec3V r = _mm_shuffle_ps(a,c,_MM_SHUFFLE(3,2,3,2)); return V3SetY(r, V3GetZ(b)); } NV_FORCE_INLINE Vec3V V3Zero() { return _mm_setzero_ps(); } NV_FORCE_INLINE Vec3V V3One() { return V3Load(1.0f); } NV_FORCE_INLINE Vec3V V3Eps() { return V3Load(NV_EPS_REAL); } NV_FORCE_INLINE Vec3V V3Neg(const Vec3V f) { VECMATHAOS_ASSERT(isValidVec3V(f)); return _mm_sub_ps( _mm_setzero_ps(), f); } NV_FORCE_INLINE Vec3V V3Add(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return _mm_add_ps(a,b); } NV_FORCE_INLINE Vec3V V3Sub(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return _mm_sub_ps(a,b); } NV_FORCE_INLINE Vec3V V3Scale(const Vec3V a, const FloatV b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_mul_ps(a,b); } NV_FORCE_INLINE Vec3V V3Mul(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return _mm_mul_ps(a,b); } NV_FORCE_INLINE Vec3V V3ScaleInv(const Vec3V a, const FloatV b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_div_ps(a,b); } NV_FORCE_INLINE Vec3V V3Div(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); // why are these here? //static const __m128 one=V3One(); //static const __m128 tttf=BTTTF(); //const __m128 b1=V3Sel(tttf,b,one); return _mm_div_ps(a,b); } NV_FORCE_INLINE Vec3V V3ScaleInvFast(const Vec3V a, const FloatV b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_mul_ps(a,_mm_rcp_ps(b)); } NV_FORCE_INLINE Vec3V V3DivFast(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); const __m128 one=V3One(); const __m128 tttf=BTTTF(); const __m128 b1=V3Sel(tttf,b,one); return _mm_mul_ps(a,_mm_rcp_ps(b1)); } NV_FORCE_INLINE Vec3V V3Recip(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); const __m128 zero=V3Zero(); const __m128 tttf=BTTTF(); const __m128 recipA=_mm_div_ps(V3One(),a); return V3Sel(tttf,recipA,zero); } NV_FORCE_INLINE Vec3V V3RecipFast(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); const __m128 zero=V3Zero(); const __m128 tttf=BTTTF(); const __m128 recipA=_mm_rcp_ps(a); return V3Sel(tttf,recipA,zero); } NV_FORCE_INLINE Vec3V V3Rsqrt(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); const __m128 zero=V3Zero(); const __m128 tttf=BTTTF(); const __m128 recipA=_mm_div_ps(V3One(),_mm_sqrt_ps(a)); return V3Sel(tttf,recipA,zero); } NV_FORCE_INLINE Vec3V V3RsqrtFast(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); const __m128 zero=V3Zero(); const __m128 tttf=BTTTF(); const __m128 recipA=_mm_rsqrt_ps(a); return V3Sel(tttf,recipA,zero); } NV_FORCE_INLINE Vec3V V3ScaleAdd(const Vec3V a, const FloatV b, const Vec3V c) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); VECMATHAOS_ASSERT(isValidVec3V(c)); return V3Add(V3Scale(a,b),c); } NV_FORCE_INLINE Vec3V V3NegScaleSub(const Vec3V a, const FloatV b, const Vec3V c) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidFloatV(b)); VECMATHAOS_ASSERT(isValidVec3V(c)); return V3Sub(c,V3Scale(a,b)); } NV_FORCE_INLINE Vec3V V3MulAdd(const Vec3V a, const Vec3V b, const Vec3V c) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); VECMATHAOS_ASSERT(isValidVec3V(c)); return V3Add(V3Mul(a,b),c); } NV_FORCE_INLINE Vec3V V3NegMulSub(const Vec3V a, const Vec3V b, const Vec3V c) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); VECMATHAOS_ASSERT(isValidVec3V(c)); return V3Sub(c,V3Mul(a,b)); } NV_FORCE_INLINE Vec3V V3Abs(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); return V3Max(a,V3Neg(a)); } NV_FORCE_INLINE FloatV V3Dot(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); __m128 dot1 = _mm_mul_ps(a, b); //w,z,y,x //__m128 shuf1 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(2,1,0,3)); //z,y,x,w //__m128 shuf2 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(1,0,3,2)); //y,x,w,z //__m128 shuf3 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(0,3,2,1)); //x,w,z,y //return _mm_add_ps(_mm_add_ps(shuf2, shuf3), _mm_add_ps(dot1,shuf1)); __m128 shuf1 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(0,0,0,0)); //z,y,x,w __m128 shuf2 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(1,1,1,1)); //y,x,w,z __m128 shuf3 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(2,2,2,2)); //x,w,z,y return _mm_add_ps(_mm_add_ps(shuf1, shuf2), shuf3); } NV_FORCE_INLINE Vec3V V3Cross(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); __m128 r1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 0, 2)); //z,x,y,w __m128 r2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1)); //y,z,x,w __m128 l1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); //y,z,x,w __m128 l2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 1, 0, 2)); //z,x,y,w return _mm_sub_ps(_mm_mul_ps(l1, l2), _mm_mul_ps(r1,r2)); } NV_FORCE_INLINE VecCrossV V3PrepareCross(const Vec3V a) { VecCrossV v; v.mR1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 0, 2)); //z,x,y,w v.mL1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); //y,z,x,w return v; } NV_FORCE_INLINE Vec3V V3Cross(const VecCrossV& a, const Vec3V b) { __m128 r2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1)); //y,z,x,w __m128 l2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 1, 0, 2)); //z,x,y,w return _mm_sub_ps(_mm_mul_ps(a.mL1, l2), _mm_mul_ps(a.mR1, r2)); } NV_FORCE_INLINE Vec3V V3Cross(const Vec3V a, const VecCrossV& b) { __m128 r2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); //y,z,x,w __m128 l2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 0, 2)); //z,x,y,w return _mm_sub_ps(_mm_mul_ps(b.mR1, r2), _mm_mul_ps(b.mL1, l2)); } NV_FORCE_INLINE Vec3V V3Cross(const VecCrossV& a, const VecCrossV& b) { return _mm_sub_ps(_mm_mul_ps(a.mL1, b.mR1), _mm_mul_ps(a.mR1, b.mL1)); } NV_FORCE_INLINE FloatV V3Length(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); return _mm_sqrt_ps(V3Dot(a,a)); } NV_FORCE_INLINE FloatV V3LengthSq(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); return V3Dot(a,a); } NV_FORCE_INLINE Vec3V V3Normalize(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(!FAllEq(V3Dot(a,a), FZero())); return V3ScaleInv(a, _mm_sqrt_ps(V3Dot(a,a))); } NV_FORCE_INLINE Vec3V V3NormalizeFast(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); return V3Mul(a, _mm_rsqrt_ps(V3Dot(a,a))); } NV_FORCE_INLINE Vec3V V3NormalizeSafe(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); const __m128 zero=V3Zero(); const __m128 eps=FEps(); const __m128 length=V3Length(a); const __m128 isGreaterThanZero=FIsGrtr(length,eps); return V3Sel(isGreaterThanZero,V3ScaleInv(a,length),zero); } NV_FORCE_INLINE Vec3V V3Sel(const BoolV c, const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return _mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a)); } NV_FORCE_INLINE BoolV V3IsGrtr(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return _mm_cmpgt_ps(a,b); } NV_FORCE_INLINE BoolV V3IsGrtrOrEq(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return _mm_cmpge_ps(a,b); } NV_FORCE_INLINE BoolV V3IsEq(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return _mm_cmpeq_ps(a,b); } NV_FORCE_INLINE Vec3V V3Max(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return _mm_max_ps(a, b); } NV_FORCE_INLINE Vec3V V3Min(const Vec3V a, const Vec3V b) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(b)); return _mm_min_ps(a, b); } //Extract the maximum value from a NV_FORCE_INLINE FloatV V3ExtractMax(const Vec3V a) { const __m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)); const __m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1,1,1,1)); const __m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2)); return _mm_max_ps(_mm_max_ps(shuf1, shuf2), shuf3); } //Extract the maximum value from a NV_FORCE_INLINE FloatV V3ExtractMin(const Vec3V a) { const __m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)); const __m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1,1,1,1)); const __m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2)); return _mm_min_ps(_mm_min_ps(shuf1, shuf2), shuf3); } //// if(a > 0.0f) return 1.0f; else if a == 0.f return 0.f, else return -1.f; //NV_FORCE_INLINE Vec3V V3MathSign(const Vec3V a) //{ // VECMATHAOS_ASSERT(isValidVec3V(a)); // // const __m128i ai = _mm_cvtps_epi32(a); // const __m128i bi = _mm_cvtps_epi32(V3Neg(a)); // const __m128 aa = _mm_cvtepi32_ps(_mm_srai_epi32(ai, 31)); // const __m128 bb = _mm_cvtepi32_ps(_mm_srai_epi32(bi, 31)); // return _mm_or_ps(aa, bb); //} //return (a >= 0.0f) ? 1.0f : -1.0f; NV_FORCE_INLINE Vec3V V3Sign(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); const __m128 zero = V3Zero(); const __m128 one = V3One(); const __m128 none = V3Neg(one); return V3Sel(V3IsGrtrOrEq(a, zero), one, none); } NV_FORCE_INLINE Vec3V V3Clamp(const Vec3V a, const Vec3V minV, const Vec3V maxV) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(minV)); VECMATHAOS_ASSERT(isValidVec3V(maxV)); return V3Max(V3Min(a,maxV),minV); } NV_FORCE_INLINE uint32_t V3AllGrtr(const Vec3V a, const Vec3V b) { return internalWindowsSimd::BAllTrue3_R(V4IsGrtr(a, b)); } NV_FORCE_INLINE uint32_t V3AllGrtrOrEq(const Vec3V a, const Vec3V b) { return internalWindowsSimd::BAllTrue3_R(V4IsGrtrOrEq(a, b)); } NV_FORCE_INLINE uint32_t V3AllEq(const Vec3V a, const Vec3V b) { return internalWindowsSimd::BAllTrue3_R(V4IsEq(a, b)); } NV_FORCE_INLINE Vec3V V3Round(const Vec3V a) { //return _mm_round_ps(a, 0x0); const Vec3V half = V3Load(0.5f); const __m128 signBit = _mm_cvtepi32_ps(_mm_srli_epi32(_mm_cvtps_epi32(a), 31)); const Vec3V aRound = V3Sub(V3Add(a, half), signBit); __m128i tmp = _mm_cvttps_epi32(aRound); return _mm_cvtepi32_ps(tmp); } NV_FORCE_INLINE Vec3V V3Sin(const Vec3V a) { //Vec4V V1, V2, V3, V5, V7, V9, V11, V13, V15, V17, V19, V21, V23; //Vec4V S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11; Vec3V Result; // Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI const Vec3V recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f); const Vec3V twoPi = V4LoadA(g_NVTwoPi.f); const Vec3V tmp = V3Mul(a, recipTwoPi); const Vec3V b = V3Round(tmp); const Vec3V V1 = V3NegMulSub(twoPi, b, a); // sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! - // V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI) const Vec3V V2 = V3Mul(V1, V1); const Vec3V V3 = V3Mul(V2, V1); const Vec3V V5 = V3Mul(V3, V2); const Vec3V V7 = V3Mul(V5, V2); const Vec3V V9 = V3Mul(V7, V2); const Vec3V V11 = V3Mul(V9, V2); const Vec3V V13 = V3Mul(V11, V2); const Vec3V V15 = V3Mul(V13, V2); const Vec3V V17 = V3Mul(V15, V2); const Vec3V V19 = V3Mul(V17, V2); const Vec3V V21 = V3Mul(V19, V2); const Vec3V V23 = V3Mul(V21, V2); const Vec4V sinCoefficients0 = V4LoadA(g_NVSinCoefficients0.f); const Vec4V sinCoefficients1 = V4LoadA(g_NVSinCoefficients1.f); const Vec4V sinCoefficients2 = V4LoadA(g_NVSinCoefficients2.f); const FloatV S1 = V4GetY(sinCoefficients0); const FloatV S2 = V4GetZ(sinCoefficients0); const FloatV S3 = V4GetW(sinCoefficients0); const FloatV S4 = V4GetX(sinCoefficients1); const FloatV S5 = V4GetY(sinCoefficients1); const FloatV S6 = V4GetZ(sinCoefficients1); const FloatV S7 = V4GetW(sinCoefficients1); const FloatV S8 = V4GetX(sinCoefficients2); const FloatV S9 = V4GetY(sinCoefficients2); const FloatV S10 = V4GetZ(sinCoefficients2); const FloatV S11 = V4GetW(sinCoefficients2); Result = V3MulAdd(S1, V3, V1); Result = V3MulAdd(S2, V5, Result); Result = V3MulAdd(S3, V7, Result); Result = V3MulAdd(S4, V9, Result); Result = V3MulAdd(S5, V11, Result); Result = V3MulAdd(S6, V13, Result); Result = V3MulAdd(S7, V15, Result); Result = V3MulAdd(S8, V17, Result); Result = V3MulAdd(S9, V19, Result); Result = V3MulAdd(S10, V21, Result); Result = V3MulAdd(S11, V23, Result); return Result; } NV_FORCE_INLINE Vec3V V3Cos(const Vec3V a) { //XMVECTOR V1, V2, V4, V6, V8, V10, V12, V14, V16, V18, V20, V22; //XMVECTOR C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11; Vec3V Result; // Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI const Vec3V recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f); const Vec3V twoPi = V4LoadA(g_NVTwoPi.f); const Vec3V tmp = V3Mul(a, recipTwoPi); const Vec3V b = V3Round(tmp); const Vec3V V1 = V3NegMulSub(twoPi, b, a); // cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! - // V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI) const Vec3V V2 = V3Mul(V1, V1); const Vec3V V4 = V3Mul(V2, V2); const Vec3V V6 = V3Mul(V4, V2); const Vec3V V8 = V3Mul(V4, V4); const Vec3V V10 = V3Mul(V6, V4); const Vec3V V12 = V3Mul(V6, V6); const Vec3V V14 = V3Mul(V8, V6); const Vec3V V16 = V3Mul(V8, V8); const Vec3V V18 = V3Mul(V10, V8); const Vec3V V20 = V3Mul(V10, V10); const Vec3V V22 = V3Mul(V12, V10); const Vec4V cosCoefficients0 = V4LoadA(g_NVCosCoefficients0.f); const Vec4V cosCoefficients1 = V4LoadA(g_NVCosCoefficients1.f); const Vec4V cosCoefficients2 = V4LoadA(g_NVCosCoefficients2.f); const FloatV C1 = V4GetY(cosCoefficients0); const FloatV C2 = V4GetZ(cosCoefficients0); const FloatV C3 = V4GetW(cosCoefficients0); const FloatV C4 = V4GetX(cosCoefficients1); const FloatV C5 = V4GetY(cosCoefficients1); const FloatV C6 = V4GetZ(cosCoefficients1); const FloatV C7 = V4GetW(cosCoefficients1); const FloatV C8 = V4GetX(cosCoefficients2); const FloatV C9 = V4GetY(cosCoefficients2); const FloatV C10 = V4GetZ(cosCoefficients2); const FloatV C11 = V4GetW(cosCoefficients2); Result = V3MulAdd(C1, V2, V4One()); Result = V3MulAdd(C2, V4, Result); Result = V3MulAdd(C3, V6, Result); Result = V3MulAdd(C4, V8, Result); Result = V3MulAdd(C5, V10, Result); Result = V3MulAdd(C6, V12, Result); Result = V3MulAdd(C7, V14, Result); Result = V3MulAdd(C8, V16, Result); Result = V3MulAdd(C9, V18, Result); Result = V3MulAdd(C10, V20, Result); Result = V3MulAdd(C11, V22, Result); return Result; } NV_FORCE_INLINE Vec3V V3PermYZZ(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); return _mm_shuffle_ps(a,a,_MM_SHUFFLE(3,2,2,1)); } NV_FORCE_INLINE Vec3V V3PermXYX(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); return _mm_shuffle_ps(a,a,_MM_SHUFFLE(3,0,1,0)); } NV_FORCE_INLINE Vec3V V3PermYZX(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); return _mm_shuffle_ps(a,a,_MM_SHUFFLE(3,0,2,1)); } NV_FORCE_INLINE Vec3V V3PermZXY(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3,1,0,2)); } NV_FORCE_INLINE Vec3V V3PermZZY(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3,1,2,2)); } NV_FORCE_INLINE Vec3V V3PermYXX(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3,0,0,1)); } NV_FORCE_INLINE Vec3V V3Perm_Zero_1Z_0Y(const Vec3V v0, const Vec3V v1) { VECMATHAOS_ASSERT(isValidVec3V(v0)); VECMATHAOS_ASSERT(isValidVec3V(v1)); return _mm_shuffle_ps(v1, v0, _MM_SHUFFLE(3,1,2,3)); } NV_FORCE_INLINE Vec3V V3Perm_0Z_Zero_1X(const Vec3V v0, const Vec3V v1) { VECMATHAOS_ASSERT(isValidVec3V(v0)); VECMATHAOS_ASSERT(isValidVec3V(v1)); return _mm_shuffle_ps(v0, v1, _MM_SHUFFLE(3,0,3,2)); } NV_FORCE_INLINE Vec3V V3Perm_1Y_0X_Zero(const Vec3V v0, const Vec3V v1) { VECMATHAOS_ASSERT(isValidVec3V(v0)); VECMATHAOS_ASSERT(isValidVec3V(v1)); //There must be a better way to do this. Vec3V v2=V3Zero(); FloatV y1=V3GetY(v1); FloatV x0=V3GetX(v0); v2=V3SetX(v2,y1); return V3SetY(v2,x0); } NV_FORCE_INLINE FloatV V3SumElems(const Vec3V a) { VECMATHAOS_ASSERT(isValidVec3V(a)); __m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)); //z,y,x,w __m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1,1,1,1)); //y,x,w,z __m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2)); //x,w,z,y return _mm_add_ps(_mm_add_ps(shuf1, shuf2), shuf3); } NV_FORCE_INLINE uint32_t V3OutOfBounds(const Vec3V a, const Vec3V min, const Vec3V max) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(min)); VECMATHAOS_ASSERT(isValidVec3V(max)); const BoolV ffff = BFFFF(); const BoolV c = BOr(V3IsGrtr(a, max), V3IsGrtr(min, a)); return uint32_t(!BAllEq(c, ffff)); } NV_FORCE_INLINE uint32_t V3InBounds(const Vec3V a, const Vec3V min, const Vec3V max) { VECMATHAOS_ASSERT(isValidVec3V(a)); VECMATHAOS_ASSERT(isValidVec3V(min)); VECMATHAOS_ASSERT(isValidVec3V(max)); const BoolV tttt = BTTTT(); const BoolV c = BAnd(V3IsGrtrOrEq(a, min), V3IsGrtrOrEq(max, a)); return BAllEq(c, tttt); } NV_FORCE_INLINE uint32_t V3OutOfBounds(const Vec3V a, const Vec3V bounds) { return V3OutOfBounds(a, V3Neg(bounds), bounds); } NV_FORCE_INLINE uint32_t V3InBounds(const Vec3V a, const Vec3V bounds) { return V3InBounds(a, V3Neg(bounds), bounds); } ////////////////////////////////// //VEC4V ////////////////////////////////// NV_FORCE_INLINE Vec4V V4Splat(const FloatV f) { VECMATHAOS_ASSERT(isValidFloatV(f)); //return _mm_shuffle_ps(f, f, _MM_SHUFFLE(0,0,0,0)); return f; } NV_FORCE_INLINE Vec4V V4Merge(const FloatV* const floatVArray) { VECMATHAOS_ASSERT(isValidFloatV(floatVArray[0])); VECMATHAOS_ASSERT(isValidFloatV(floatVArray[1])); VECMATHAOS_ASSERT(isValidFloatV(floatVArray[2])); VECMATHAOS_ASSERT(isValidFloatV(floatVArray[3])); __m128 xw = _mm_move_ss(floatVArray[1], floatVArray[0]); //y, y, y, x __m128 yz = _mm_move_ss(floatVArray[2], floatVArray[3]); //z, z, z, w return (_mm_shuffle_ps(xw,yz,_MM_SHUFFLE(0,2,1,0))); } NV_FORCE_INLINE Vec4V V4Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z, const FloatVArg w) { VECMATHAOS_ASSERT(isValidFloatV(x)); VECMATHAOS_ASSERT(isValidFloatV(y)); VECMATHAOS_ASSERT(isValidFloatV(z)); VECMATHAOS_ASSERT(isValidFloatV(w)); __m128 xw = _mm_move_ss(y, x); //y, y, y, x __m128 yz = _mm_move_ss(z, w); //z, z, z, w return (_mm_shuffle_ps(xw,yz,_MM_SHUFFLE(0,2,1,0))); } NV_FORCE_INLINE Vec4V V4MergeW(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { const Vec4V xz = _mm_unpackhi_ps(x, z); const Vec4V yw = _mm_unpackhi_ps(y, w); return _mm_unpackhi_ps(xz, yw); } NV_FORCE_INLINE Vec4V V4MergeZ(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { const Vec4V xz = _mm_unpackhi_ps(x, z); const Vec4V yw = _mm_unpackhi_ps(y, w); return _mm_unpacklo_ps(xz, yw); } NV_FORCE_INLINE Vec4V V4MergeY(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { const Vec4V xz = _mm_unpacklo_ps(x, z); const Vec4V yw = _mm_unpacklo_ps(y, w); return _mm_unpackhi_ps(xz, yw); } NV_FORCE_INLINE Vec4V V4MergeX(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w) { const Vec4V xz = _mm_unpacklo_ps(x, z); const Vec4V yw = _mm_unpacklo_ps(y, w); return _mm_unpacklo_ps(xz, yw); } NV_FORCE_INLINE Vec4V V4UnpackXY(const Vec4VArg a, const Vec4VArg b) { return _mm_unpacklo_ps(a, b); } NV_FORCE_INLINE Vec4V V4UnpackZW(const Vec4VArg a, const Vec4VArg b) { return _mm_unpackhi_ps(a, b); } NV_FORCE_INLINE Vec4V V4Perm_YXWZ(const Vec4V a) { return _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,3,0,1)); } NV_FORCE_INLINE Vec4V V4Perm_XZXZ(const Vec4V a) { return _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,0,2,0)); } NV_FORCE_INLINE Vec4V V4Perm_YWYW(const Vec4V a) { return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3,1,3,1)); } template<uint8_t x, uint8_t y, uint8_t z, uint8_t w> NV_FORCE_INLINE Vec4V V4Perm(const Vec4V a) { return _mm_shuffle_ps(a, a, _MM_SHUFFLE(w, z, y, x)); } NV_FORCE_INLINE Vec4V V4UnitW() { const NV_ALIGN(16, float w[4])={0.0f,0.0f,0.0f,1.0f}; const __m128 w128=_mm_load_ps(w); return w128; } NV_FORCE_INLINE Vec4V V4UnitX() { const NV_ALIGN(16, float x[4])={1.0f,0.0f,0.0f,0.0f}; const __m128 x128=_mm_load_ps(x); return x128; } NV_FORCE_INLINE Vec4V V4UnitY() { const NV_ALIGN(16, float y[4])={0.0f,1.0f,0.0f,0.0f}; const __m128 y128=_mm_load_ps(y); return y128; } NV_FORCE_INLINE Vec4V V4UnitZ() { const NV_ALIGN(16, float z[4])={0.0f,0.0f,1.0f,0.0f}; const __m128 z128=_mm_load_ps(z); return z128; } NV_FORCE_INLINE FloatV V4GetW(const Vec4V f) { return _mm_shuffle_ps(f, f, _MM_SHUFFLE(3,3,3,3)); } NV_FORCE_INLINE FloatV V4GetX(const Vec4V f) { return _mm_shuffle_ps(f, f, _MM_SHUFFLE(0,0,0,0)); } NV_FORCE_INLINE FloatV V4GetY(const Vec4V f) { return _mm_shuffle_ps(f, f, _MM_SHUFFLE(1,1,1,1)); } NV_FORCE_INLINE FloatV V4GetZ(const Vec4V f) { return _mm_shuffle_ps(f, f, _MM_SHUFFLE(2,2,2,2)); } NV_FORCE_INLINE Vec4V V4SetW(const Vec4V v, const FloatV f) { VECMATHAOS_ASSERT(isValidFloatV(f)); return V4Sel(BTTTF(),v,f); } NV_FORCE_INLINE Vec4V V4ClearW(const Vec4V v) { return _mm_and_ps(v, (VecI32V&)internalWindowsSimd::gMaskXYZ); } NV_FORCE_INLINE Vec4V V4SetX(const Vec4V v, const FloatV f) { VECMATHAOS_ASSERT(isValidFloatV(f)); return V4Sel(BFTTT(),v,f); } NV_FORCE_INLINE Vec4V V4SetY(const Vec4V v, const FloatV f) { VECMATHAOS_ASSERT(isValidFloatV(f)); return V4Sel(BTFTT(),v,f); } NV_FORCE_INLINE Vec4V V4SetZ(const Vec4V v, const FloatV f) { VECMATHAOS_ASSERT(isValidVec3V(v)); VECMATHAOS_ASSERT(isValidFloatV(f)); return V4Sel(BTTFT(),v,f); } NV_FORCE_INLINE Vec4V V4Zero() { return _mm_setzero_ps(); } NV_FORCE_INLINE Vec4V V4One() { return V4Load(1.0f); } NV_FORCE_INLINE Vec4V V4Eps() { return V4Load(NV_EPS_REAL); } NV_FORCE_INLINE Vec4V V4Neg(const Vec4V f) { return _mm_sub_ps( _mm_setzero_ps(), f); } NV_FORCE_INLINE Vec4V V4Add(const Vec4V a, const Vec4V b) { return _mm_add_ps(a,b); } NV_FORCE_INLINE Vec4V V4Sub(const Vec4V a, const Vec4V b) { return _mm_sub_ps(a,b); } NV_FORCE_INLINE Vec4V V4Scale(const Vec4V a, const FloatV b) { return _mm_mul_ps(a,b); } NV_FORCE_INLINE Vec4V V4Mul(const Vec4V a, const Vec4V b) { return _mm_mul_ps(a,b); } NV_FORCE_INLINE Vec4V V4ScaleInv(const Vec4V a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_div_ps(a,b); } NV_FORCE_INLINE Vec4V V4Div(const Vec4V a, const Vec4V b) { return _mm_div_ps(a,b); } NV_FORCE_INLINE Vec4V V4ScaleInvFast(const Vec4V a, const FloatV b) { VECMATHAOS_ASSERT(isValidFloatV(b)); return _mm_mul_ps(a,_mm_rcp_ps(b)); } NV_FORCE_INLINE Vec4V V4DivFast(const Vec4V a, const Vec4V b) { return _mm_mul_ps(a,_mm_rcp_ps(b)); } NV_FORCE_INLINE Vec4V V4Recip(const Vec4V a) { return _mm_div_ps(V4One(),a); } NV_FORCE_INLINE Vec4V V4RecipFast(const Vec4V a) { return _mm_rcp_ps(a); } NV_FORCE_INLINE Vec4V V4Rsqrt(const Vec4V a) { return _mm_div_ps(V4One(),_mm_sqrt_ps(a)); } NV_FORCE_INLINE Vec4V V4RsqrtFast(const Vec4V a) { return _mm_rsqrt_ps(a); } NV_FORCE_INLINE Vec4V V4Sqrt(const Vec4V a) { return _mm_sqrt_ps(a); } NV_FORCE_INLINE Vec4V V4ScaleAdd(const Vec4V a, const FloatV b, const Vec4V c) { VECMATHAOS_ASSERT(isValidFloatV(b)); return V4Add(V4Scale(a,b),c); } NV_FORCE_INLINE Vec4V V4NegScaleSub(const Vec4V a, const FloatV b, const Vec4V c) { VECMATHAOS_ASSERT(isValidFloatV(b)); return V4Sub(c,V4Scale(a,b)); } NV_FORCE_INLINE Vec4V V4MulAdd(const Vec4V a, const Vec4V b, const Vec4V c) { return V4Add(V4Mul(a,b),c); } NV_FORCE_INLINE Vec4V V4NegMulSub(const Vec4V a, const Vec4V b, const Vec4V c) { return V4Sub(c,V4Mul(a,b)); } NV_FORCE_INLINE Vec4V V4Abs(const Vec4V a) { return V4Max(a,V4Neg(a)); } NV_FORCE_INLINE FloatV V4SumElements(const Vec4V a) { const Vec4V xy = V4UnpackXY(a, a); //x,x,y,y const Vec4V zw = V4UnpackZW(a, a); //z,z,w,w const Vec4V xz_yw = V4Add(xy, zw); //x+z,x+z,y+w,y+w const FloatV xz = V4GetX(xz_yw); //x+z const FloatV yw = V4GetZ(xz_yw); //y+w return FAdd(xz, yw); //sum } NV_FORCE_INLINE FloatV V4Dot(const Vec4V a, const Vec4V b) { __m128 dot1 = _mm_mul_ps(a, b); //x,y,z,w __m128 shuf1 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(2,1,0,3)); //w,x,y,z __m128 shuf2 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(1,0,3,2)); //z,w,x,y __m128 shuf3 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(0,3,2,1)); //y,z,w,x return _mm_add_ps(_mm_add_ps(shuf2, shuf3), _mm_add_ps(dot1,shuf1)); } NV_FORCE_INLINE FloatV V4Length(const Vec4V a) { return _mm_sqrt_ps(V4Dot(a,a)); } NV_FORCE_INLINE FloatV V4LengthSq(const Vec4V a) { return V4Dot(a,a); } NV_FORCE_INLINE Vec4V V4Normalize(const Vec4V a) { VECMATHAOS_ASSERT(!FAllEq(V4Dot(a,a), FZero())); return V4ScaleInv(a,_mm_sqrt_ps(V4Dot(a,a))); } NV_FORCE_INLINE Vec4V V4NormalizeFast(const Vec4V a) { return V4ScaleInvFast(a,_mm_sqrt_ps(V4Dot(a,a))); } NV_FORCE_INLINE Vec4V V4NormalizeSafe(const Vec4V a) { const __m128 zero=FZero(); const __m128 eps=V3Eps(); const __m128 length=V4Length(a); const __m128 isGreaterThanZero=V4IsGrtr(length,eps); return V4Sel(isGreaterThanZero,V4ScaleInv(a,length),zero); } NV_FORCE_INLINE Vec4V V4Sel(const BoolV c, const Vec4V a, const Vec4V b) { return _mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a)); } NV_FORCE_INLINE BoolV V4IsGrtr(const Vec4V a, const Vec4V b) { return _mm_cmpgt_ps(a,b); } NV_FORCE_INLINE BoolV V4IsGrtrOrEq(const Vec4V a, const Vec4V b) { return _mm_cmpge_ps(a,b); } NV_FORCE_INLINE BoolV V4IsEq(const Vec4V a, const Vec4V b) { return _mm_cmpeq_ps(a,b); } NV_FORCE_INLINE BoolV V4IsEqU32(const VecU32V a, const VecU32V b) { return internalWindowsSimd::m128_I2F(_mm_cmpeq_epi32(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b))); } NV_FORCE_INLINE Vec4V V4Max(const Vec4V a, const Vec4V b) { return _mm_max_ps(a, b); } NV_FORCE_INLINE Vec4V V4Min(const Vec4V a, const Vec4V b) { return _mm_min_ps(a, b); } //Extract the maximum value from a NV_FORCE_INLINE FloatV V4ExtractMax(const Vec4V a) { __m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,1,0,3)); __m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1,0,3,2)); __m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0,3,2,1)); return _mm_max_ps(_mm_max_ps(a, shuf1), _mm_max_ps(shuf2, shuf3)); } //Extract the maximum value from a NV_FORCE_INLINE FloatV V4ExtractMin(const Vec4V a) { __m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,1,0,3)); __m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1,0,3,2)); __m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0,3,2,1)); return _mm_min_ps(_mm_min_ps(a, shuf1), _mm_min_ps(shuf2, shuf3)); } NV_FORCE_INLINE Vec4V V4Clamp(const Vec4V a, const Vec4V minV, const Vec4V maxV) { return V4Max(V4Min(a,maxV),minV); } NV_FORCE_INLINE uint32_t V4AllGrtr(const Vec4V a, const Vec4V b) { return internalWindowsSimd::BAllTrue4_R(V4IsGrtr(a, b)); } NV_FORCE_INLINE uint32_t V4AllGrtrOrEq(const Vec4V a, const Vec4V b) { return internalWindowsSimd::BAllTrue4_R(V4IsGrtrOrEq(a, b)); } NV_FORCE_INLINE uint32_t V4AllEq(const Vec4V a, const Vec4V b) { return internalWindowsSimd::BAllTrue4_R(V4IsEq(a, b)); } NV_FORCE_INLINE Vec4V V4Round(const Vec4V a) { //return _mm_round_ps(a, 0x0); const Vec4V half = V4Load(0.5f); const __m128 signBit = _mm_cvtepi32_ps(_mm_srli_epi32(_mm_cvtps_epi32(a), 31)); const Vec4V aRound = V4Sub(V4Add(a, half), signBit); __m128i tmp = _mm_cvttps_epi32(aRound); return _mm_cvtepi32_ps(tmp); } NV_FORCE_INLINE Vec4V V4Sin(const Vec4V a) { //Vec4V V1, V2, V3, V5, V7, V9, V11, V13, V15, V17, V19, V21, V23; //Vec4V S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11; Vec4V Result; const Vec4V recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f); const Vec4V twoPi = V4LoadA(g_NVTwoPi.f); const Vec4V tmp = V4Mul(a, recipTwoPi); const Vec4V b = V4Round(tmp); const Vec4V V1 = V4NegMulSub(twoPi, b, a); // sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! - // V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI) const Vec4V V2 = V4Mul(V1, V1); const Vec4V V3 = V4Mul(V2, V1); const Vec4V V5 = V4Mul(V3, V2); const Vec4V V7 = V4Mul(V5, V2); const Vec4V V9 = V4Mul(V7, V2); const Vec4V V11 = V4Mul(V9, V2); const Vec4V V13 = V4Mul(V11, V2); const Vec4V V15 = V4Mul(V13, V2); const Vec4V V17 = V4Mul(V15, V2); const Vec4V V19 = V4Mul(V17, V2); const Vec4V V21 = V4Mul(V19, V2); const Vec4V V23 = V4Mul(V21, V2); const Vec4V sinCoefficients0 = V4LoadA(g_NVSinCoefficients0.f); const Vec4V sinCoefficients1 = V4LoadA(g_NVSinCoefficients1.f); const Vec4V sinCoefficients2 = V4LoadA(g_NVSinCoefficients2.f); const FloatV S1 = V4GetY(sinCoefficients0); const FloatV S2 = V4GetZ(sinCoefficients0); const FloatV S3 = V4GetW(sinCoefficients0); const FloatV S4 = V4GetX(sinCoefficients1); const FloatV S5 = V4GetY(sinCoefficients1); const FloatV S6 = V4GetZ(sinCoefficients1); const FloatV S7 = V4GetW(sinCoefficients1); const FloatV S8 = V4GetX(sinCoefficients2); const FloatV S9 = V4GetY(sinCoefficients2); const FloatV S10 = V4GetZ(sinCoefficients2); const FloatV S11 = V4GetW(sinCoefficients2); Result = V4MulAdd(S1, V3, V1); Result = V4MulAdd(S2, V5, Result); Result = V4MulAdd(S3, V7, Result); Result = V4MulAdd(S4, V9, Result); Result = V4MulAdd(S5, V11, Result); Result = V4MulAdd(S6, V13, Result); Result = V4MulAdd(S7, V15, Result); Result = V4MulAdd(S8, V17, Result); Result = V4MulAdd(S9, V19, Result); Result = V4MulAdd(S10, V21, Result); Result = V4MulAdd(S11, V23, Result); return Result; } NV_FORCE_INLINE Vec4V V4Cos(const Vec4V a) { //XMVECTOR V1, V2, V4, V6, V8, V10, V12, V14, V16, V18, V20, V22; //XMVECTOR C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11; Vec4V Result; const Vec4V recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f); const FloatV twoPi = V4LoadA(g_NVTwoPi.f); const Vec4V tmp = V4Mul(a, recipTwoPi); const Vec4V b = V4Round(tmp); const Vec4V V1 = V4NegMulSub(twoPi, b, a); // cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! - // V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI) const Vec4V V2 = V4Mul(V1, V1); const Vec4V V4 = V4Mul(V2, V2); const Vec4V V6 = V4Mul(V4, V2); const Vec4V V8 = V4Mul(V4, V4); const Vec4V V10 = V4Mul(V6, V4); const Vec4V V12 = V4Mul(V6, V6); const Vec4V V14 = V4Mul(V8, V6); const Vec4V V16 = V4Mul(V8, V8); const Vec4V V18 = V4Mul(V10, V8); const Vec4V V20 = V4Mul(V10, V10); const Vec4V V22 = V4Mul(V12, V10); const Vec4V cosCoefficients0 = V4LoadA(g_NVCosCoefficients0.f); const Vec4V cosCoefficients1 = V4LoadA(g_NVCosCoefficients1.f); const Vec4V cosCoefficients2 = V4LoadA(g_NVCosCoefficients2.f); const FloatV C1 = V4GetY(cosCoefficients0); const FloatV C2 = V4GetZ(cosCoefficients0); const FloatV C3 = V4GetW(cosCoefficients0); const FloatV C4 = V4GetX(cosCoefficients1); const FloatV C5 = V4GetY(cosCoefficients1); const FloatV C6 = V4GetZ(cosCoefficients1); const FloatV C7 = V4GetW(cosCoefficients1); const FloatV C8 = V4GetX(cosCoefficients2); const FloatV C9 = V4GetY(cosCoefficients2); const FloatV C10 = V4GetZ(cosCoefficients2); const FloatV C11 = V4GetW(cosCoefficients2); Result = V4MulAdd(C1, V2, V4One()); Result = V4MulAdd(C2, V4, Result); Result = V4MulAdd(C3, V6, Result); Result = V4MulAdd(C4, V8, Result); Result = V4MulAdd(C5, V10, Result); Result = V4MulAdd(C6, V12, Result); Result = V4MulAdd(C7, V14, Result); Result = V4MulAdd(C8, V16, Result); Result = V4MulAdd(C9, V18, Result); Result = V4MulAdd(C10, V20, Result); Result = V4MulAdd(C11, V22, Result); return Result; } NV_FORCE_INLINE void V4Transpose(Vec4V& col0, Vec4V& col1, Vec4V& col2, Vec4V& col3) { Vec4V tmp0 = _mm_unpacklo_ps(col0, col1); Vec4V tmp2 = _mm_unpacklo_ps(col2, col3); Vec4V tmp1 = _mm_unpackhi_ps(col0, col1); Vec4V tmp3 = _mm_unpackhi_ps(col2, col3); col0 = _mm_movelh_ps(tmp0, tmp2); col1 = _mm_movehl_ps(tmp2, tmp0); col2 = _mm_movelh_ps(tmp1, tmp3); col3 = _mm_movehl_ps(tmp3, tmp1); } ////////////////////////////////// //BoolV ////////////////////////////////// NV_FORCE_INLINE BoolV BFFFF() { return _mm_setzero_ps(); } NV_FORCE_INLINE BoolV BFFFT() { /*const NV_ALIGN(16, uint32_t f[4])={0,0,0,0xFFFFFFFF}; const __m128 ffft=_mm_load_ps((float*)&f); return ffft;*/ return internalWindowsSimd::m128_I2F(_mm_set_epi32(-1, 0, 0, 0)); } NV_FORCE_INLINE BoolV BFFTF() { /*const NV_ALIGN(16, uint32_t f[4])={0,0,0xFFFFFFFF,0}; const __m128 fftf=_mm_load_ps((float*)&f); return fftf;*/ return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, -1, 0, 0)); } NV_FORCE_INLINE BoolV BFFTT() { /*const NV_ALIGN(16, uint32_t f[4])={0,0,0xFFFFFFFF,0xFFFFFFFF}; const __m128 fftt=_mm_load_ps((float*)&f); return fftt;*/ return internalWindowsSimd::m128_I2F(_mm_set_epi32(-1, -1, 0, 0)); } NV_FORCE_INLINE BoolV BFTFF() { /*const NV_ALIGN(16, uint32_t f[4])={0,0xFFFFFFFF,0,0}; const __m128 ftff=_mm_load_ps((float*)&f); return ftff;*/ return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, 0, -1, 0)); } NV_FORCE_INLINE BoolV BFTFT() { /*const NV_ALIGN(16, uint32_t f[4])={0,0xFFFFFFFF,0,0xFFFFFFFF}; const __m128 ftft=_mm_load_ps((float*)&f); return ftft;*/ return internalWindowsSimd::m128_I2F(_mm_set_epi32(-1, 0, -1, 0)); } NV_FORCE_INLINE BoolV BFTTF() { /*const NV_ALIGN(16, uint32_t f[4])={0,0xFFFFFFFF,0xFFFFFFFF,0}; const __m128 fttf=_mm_load_ps((float*)&f); return fttf;*/ return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, -1, -1, 0)); } NV_FORCE_INLINE BoolV BFTTT() { /*const NV_ALIGN(16, uint32_t f[4])={0,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF}; const __m128 fttt=_mm_load_ps((float*)&f); return fttt;*/ return internalWindowsSimd::m128_I2F(_mm_set_epi32(-1, -1, -1, 0)); } NV_FORCE_INLINE BoolV BTFFF() { //const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0,0,0}; //const __m128 tfff=_mm_load_ps((float*)&f); //return tfff; return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, 0, 0, -1)); } NV_FORCE_INLINE BoolV BTFFT() { /*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0,0,0xFFFFFFFF}; const __m128 tfft=_mm_load_ps((float*)&f); return tfft;*/ return internalWindowsSimd::m128_I2F(_mm_set_epi32(-1, 0, 0, -1)); } NV_FORCE_INLINE BoolV BTFTF() { /*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0,0xFFFFFFFF,0}; const __m128 tftf=_mm_load_ps((float*)&f); return tftf;*/ return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, -1, 0, -1)); } NV_FORCE_INLINE BoolV BTFTT() { /*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0,0xFFFFFFFF,0xFFFFFFFF}; const __m128 tftt=_mm_load_ps((float*)&f); return tftt;*/ return internalWindowsSimd::m128_I2F(_mm_set_epi32(-1, -1, 0, -1)); } NV_FORCE_INLINE BoolV BTTFF() { /*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0xFFFFFFFF,0,0}; const __m128 ttff=_mm_load_ps((float*)&f); return ttff;*/ return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, 0, -1, -1)); } NV_FORCE_INLINE BoolV BTTFT() { /*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0xFFFFFFFF,0,0xFFFFFFFF}; const __m128 ttft=_mm_load_ps((float*)&f); return ttft;*/ return internalWindowsSimd::m128_I2F(_mm_set_epi32(-1, 0, -1, -1)); } NV_FORCE_INLINE BoolV BTTTF() { /*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0}; const __m128 tttf=_mm_load_ps((float*)&f); return tttf;*/ return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, -1, -1, -1)); } NV_FORCE_INLINE BoolV BTTTT() { /*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF}; const __m128 tttt=_mm_load_ps((float*)&f); return tttt;*/ return internalWindowsSimd::m128_I2F(_mm_set_epi32(-1, -1, -1, -1)); } NV_FORCE_INLINE BoolV BXMask() { /*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0,0,0}; const __m128 tfff=_mm_load_ps((float*)&f); return tfff;*/ return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, 0, 0, -1)); } NV_FORCE_INLINE BoolV BYMask() { /*const NV_ALIGN(16, uint32_t f[4])={0,0xFFFFFFFF,0,0}; const __m128 ftff=_mm_load_ps((float*)&f); return ftff;*/ return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, 0, -1, 0)); } NV_FORCE_INLINE BoolV BZMask() { /*const NV_ALIGN(16, uint32_t f[4])={0,0,0xFFFFFFFF,0}; const __m128 fftf=_mm_load_ps((float*)&f); return fftf;*/ return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, -1, 0, 0)); } NV_FORCE_INLINE BoolV BWMask() { /*const NV_ALIGN(16, uint32_t f[4])={0,0,0,0xFFFFFFFF}; const __m128 ffft=_mm_load_ps((float*)&f); return ffft;*/ return internalWindowsSimd::m128_I2F(_mm_set_epi32(-1, 0, 0, 0)); } NV_FORCE_INLINE BoolV BGetX(const BoolV f) { return _mm_shuffle_ps(f, f, _MM_SHUFFLE(0,0,0,0)); } NV_FORCE_INLINE BoolV BGetY(const BoolV f) { return _mm_shuffle_ps(f, f, _MM_SHUFFLE(1,1,1,1)); } NV_FORCE_INLINE BoolV BGetZ(const BoolV f) { return _mm_shuffle_ps(f, f, _MM_SHUFFLE(2,2,2,2)); } NV_FORCE_INLINE BoolV BGetW(const BoolV f) { return _mm_shuffle_ps(f, f, _MM_SHUFFLE(3,3,3,3)); } NV_FORCE_INLINE BoolV BSetX(const BoolV v, const BoolV f) { return V4Sel(BFTTT(),v,f); } NV_FORCE_INLINE BoolV BSetY(const BoolV v, const BoolV f) { return V4Sel(BTFTT(),v,f); } NV_FORCE_INLINE BoolV BSetZ(const BoolV v, const BoolV f) { return V4Sel(BTTFT(),v,f); } NV_FORCE_INLINE BoolV BSetW(const BoolV v, const BoolV f) { return V4Sel(BTTTF(),v,f); } template<int index> BoolV BSplatElement(BoolV a) { return internalWindowsSimd::m128_I2F(_mm_shuffle_epi32(internalWindowsSimd::m128_F2I(a), _MM_SHUFFLE(index, index, index, index))); } NV_FORCE_INLINE BoolV BAnd(const BoolV a, const BoolV b) { return (_mm_and_ps(a,b)); } NV_FORCE_INLINE BoolV BNot(const BoolV a) { const BoolV bAllTrue(BTTTT()); return _mm_xor_ps(a, bAllTrue); } NV_FORCE_INLINE BoolV BAndNot(const BoolV a, const BoolV b) { return (_mm_andnot_ps(b, a)); } NV_FORCE_INLINE BoolV BOr(const BoolV a, const BoolV b) { return (_mm_or_ps(a,b)); } NV_FORCE_INLINE BoolV BAllTrue4(const BoolV a) { const BoolV bTmp = _mm_and_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,1,0,1)), _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,3,2,3))); return _mm_and_ps(_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(0,0,0,0)), _mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(1,1,1,1))); } NV_FORCE_INLINE BoolV BAnyTrue4(const BoolV a) { const BoolV bTmp = _mm_or_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,1,0,1)), _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,3,2,3))); return _mm_or_ps(_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(0,0,0,0)), _mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(1,1,1,1))); } NV_FORCE_INLINE BoolV BAllTrue3(const BoolV a) { const BoolV bTmp = _mm_and_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,1,0,1)), _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2))); return _mm_and_ps(_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(0,0,0,0)), _mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(1,1,1,1))); } NV_FORCE_INLINE BoolV BAnyTrue3(const BoolV a) { const BoolV bTmp = _mm_or_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,1,0,1)), _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2))); return _mm_or_ps(_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(0,0,0,0)), _mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(1,1,1,1))); } NV_FORCE_INLINE uint32_t BAllEq(const BoolV a, const BoolV b) { const BoolV bTest = internalWindowsSimd::m128_I2F(_mm_cmpeq_epi32(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b))); return internalWindowsSimd::BAllTrue4_R(bTest); } NV_FORCE_INLINE uint32_t BAllEqTTTT(const BoolV a) { return uint32_t(_mm_movemask_ps(a)==15); } NV_FORCE_INLINE uint32_t BAllEqFFFF(const BoolV a) { return uint32_t(_mm_movemask_ps(a)==0); } NV_FORCE_INLINE uint32_t BGetBitMask(const BoolV a) { return uint32_t(_mm_movemask_ps(a)); } ////////////////////////////////// //MAT33V ////////////////////////////////// NV_FORCE_INLINE Vec3V M33MulV3(const Mat33V& a, const Vec3V b) { const FloatV x=V3GetX(b); const FloatV y=V3GetY(b); const FloatV z=V3GetZ(b); const Vec3V v0=V3Scale(a.col0,x); const Vec3V v1=V3Scale(a.col1,y); const Vec3V v2=V3Scale(a.col2,z); const Vec3V v0PlusV1=V3Add(v0,v1); return V3Add(v0PlusV1,v2); } NV_FORCE_INLINE Vec3V M33TrnspsMulV3(const Mat33V& a, const Vec3V b) { const FloatV x=V3Dot(a.col0,b); const FloatV y=V3Dot(a.col1,b); const FloatV z=V3Dot(a.col2,b); return V3Merge(x,y,z); } NV_FORCE_INLINE Vec3V M33MulV3AddV3(const Mat33V& A, const Vec3V b, const Vec3V c) { const FloatV x=V3GetX(b); const FloatV y=V3GetY(b); const FloatV z=V3GetZ(b); Vec3V result = V3MulAdd(A.col0, x, c); result = V3MulAdd(A.col1, y, result); return V3MulAdd(A.col2, z, result); } NV_FORCE_INLINE Mat33V M33MulM33(const Mat33V& a, const Mat33V& b) { return Mat33V(M33MulV3(a,b.col0),M33MulV3(a,b.col1),M33MulV3(a,b.col2)); } NV_FORCE_INLINE Mat33V M33Add(const Mat33V& a, const Mat33V& b) { return Mat33V(V3Add(a.col0,b.col0),V3Add(a.col1,b.col1),V3Add(a.col2,b.col2)); } NV_FORCE_INLINE Mat33V M33Scale(const Mat33V& a, const FloatV& b) { return Mat33V(V3Scale(a.col0,b),V3Scale(a.col1,b),V3Scale(a.col2,b)); } NV_FORCE_INLINE Mat33V M33Sub(const Mat33V& a, const Mat33V& b) { return Mat33V(V3Sub(a.col0,b.col0),V3Sub(a.col1,b.col1),V3Sub(a.col2,b.col2)); } NV_FORCE_INLINE Mat33V M33Neg(const Mat33V& a) { return Mat33V(V3Neg(a.col0),V3Neg(a.col1),V3Neg(a.col2)); } NV_FORCE_INLINE Mat33V M33Abs(const Mat33V& a) { return Mat33V(V3Abs(a.col0),V3Abs(a.col1),V3Abs(a.col2)); } NV_FORCE_INLINE Mat33V M33Inverse(const Mat33V& a) { const BoolV tfft=BTFFT(); const BoolV tttf=BTTTF(); const FloatV zero=V3Zero(); const Vec3V cross01 = V3Cross(a.col0,a.col1); const Vec3V cross12 = V3Cross(a.col1,a.col2); const Vec3V cross20 = V3Cross(a.col2,a.col0); const FloatV dot = V3Dot(cross01,a.col2); const FloatV invDet = _mm_rcp_ps(dot); const Vec3V mergeh = _mm_unpacklo_ps(cross12,cross01); const Vec3V mergel = _mm_unpackhi_ps(cross12,cross01); Vec3V colInv0 = _mm_unpacklo_ps(mergeh,cross20); colInv0 = _mm_or_ps(_mm_andnot_ps(tttf, zero), _mm_and_ps(tttf, colInv0)); const Vec3V zppd=_mm_shuffle_ps(mergeh,cross20,_MM_SHUFFLE(3,0,0,2)); const Vec3V pbwp=_mm_shuffle_ps(cross20,mergeh,_MM_SHUFFLE(3,3,1,0)); const Vec3V colInv1=_mm_or_ps(_mm_andnot_ps(BTFFT(), pbwp), _mm_and_ps(BTFFT(), zppd)); const Vec3V xppd=_mm_shuffle_ps(mergel,cross20,_MM_SHUFFLE(3,0,0,0)); const Vec3V pcyp=_mm_shuffle_ps(cross20,mergel,_MM_SHUFFLE(3,1,2,0)); const Vec3V colInv2=_mm_or_ps(_mm_andnot_ps(tfft, pcyp), _mm_and_ps(tfft, xppd)); return Mat33V ( _mm_mul_ps(colInv0,invDet), _mm_mul_ps(colInv1,invDet), _mm_mul_ps(colInv2,invDet) ); } NV_FORCE_INLINE Mat33V M33Trnsps(const Mat33V& a) { return Mat33V ( V3Merge(V3GetX(a.col0),V3GetX(a.col1),V3GetX(a.col2)), V3Merge(V3GetY(a.col0),V3GetY(a.col1),V3GetY(a.col2)), V3Merge(V3GetZ(a.col0),V3GetZ(a.col1),V3GetZ(a.col2)) ); } NV_FORCE_INLINE Mat33V M33Identity() { return Mat33V ( V3UnitX(), V3UnitY(), V3UnitZ() ); } NV_FORCE_INLINE Mat33V M33Diagonal(const Vec3VArg d) { const FloatV x = V3Mul(V3UnitX(), d); const FloatV y = V3Mul(V3UnitY(), d); const FloatV z = V3Mul(V3UnitZ(), d); return Mat33V(x, y, z); } ////////////////////////////////// //MAT34V ////////////////////////////////// NV_FORCE_INLINE Vec3V M34MulV3(const Mat34V& a, const Vec3V b) { const FloatV x=V3GetX(b); const FloatV y=V3GetY(b); const FloatV z=V3GetZ(b); const Vec3V v0=V3Scale(a.col0,x); const Vec3V v1=V3Scale(a.col1,y); const Vec3V v2=V3Scale(a.col2,z); const Vec3V v0PlusV1=V3Add(v0,v1); const Vec3V v0PlusV1Plusv2=V3Add(v0PlusV1,v2); return (V3Add(v0PlusV1Plusv2,a.col3)); } NV_FORCE_INLINE Vec3V M34Mul33V3(const Mat34V& a, const Vec3V b) { const FloatV x=V3GetX(b); const FloatV y=V3GetY(b); const FloatV z=V3GetZ(b); const Vec3V v0=V3Scale(a.col0,x); const Vec3V v1=V3Scale(a.col1,y); const Vec3V v2=V3Scale(a.col2,z); const Vec3V v0PlusV1=V3Add(v0,v1); return V3Add(v0PlusV1,v2); } NV_FORCE_INLINE Vec3V M34TrnspsMul33V3(const Mat34V& a, const Vec3V b) { const FloatV x=V3Dot(a.col0,b); const FloatV y=V3Dot(a.col1,b); const FloatV z=V3Dot(a.col2,b); return V3Merge(x,y,z); } NV_FORCE_INLINE Mat34V M34MulM34(const Mat34V& a, const Mat34V& b) { return Mat34V(M34Mul33V3(a,b.col0), M34Mul33V3(a,b.col1),M34Mul33V3(a,b.col2),M34MulV3(a,b.col3)); } NV_FORCE_INLINE Mat33V M34MulM33(const Mat34V& a, const Mat33V& b) { return Mat33V(M34Mul33V3(a,b.col0),M34Mul33V3(a,b.col1),M34Mul33V3(a,b.col2)); } NV_FORCE_INLINE Mat33V M34Mul33MM34(const Mat34V& a, const Mat34V& b) { return Mat33V(M34Mul33V3(a,b.col0),M34Mul33V3(a,b.col1),M34Mul33V3(a,b.col2)); } NV_FORCE_INLINE Mat34V M34Add(const Mat34V& a, const Mat34V& b) { return Mat34V(V3Add(a.col0,b.col0),V3Add(a.col1,b.col1),V3Add(a.col2,b.col2),V3Add(a.col3,b.col3)); } NV_FORCE_INLINE Mat34V M34Inverse(const Mat34V& a) { Mat34V aInv; const BoolV tfft=BTFFT(); const BoolV tttf=BTTTF(); const FloatV zero=V3Zero(); const Vec3V cross01 = V3Cross(a.col0,a.col1); const Vec3V cross12 = V3Cross(a.col1,a.col2); const Vec3V cross20 = V3Cross(a.col2,a.col0); const FloatV dot = V3Dot(cross01,a.col2); const FloatV invDet = _mm_rcp_ps(dot); const Vec3V mergeh = _mm_unpacklo_ps(cross12,cross01); const Vec3V mergel = _mm_unpackhi_ps(cross12,cross01); Vec3V colInv0 = _mm_unpacklo_ps(mergeh,cross20); colInv0 = _mm_or_ps(_mm_andnot_ps(tttf, zero), _mm_and_ps(tttf, colInv0)); const Vec3V zppd=_mm_shuffle_ps(mergeh,cross20,_MM_SHUFFLE(3,0,0,2)); const Vec3V pbwp=_mm_shuffle_ps(cross20,mergeh,_MM_SHUFFLE(3,3,1,0)); const Vec3V colInv1=_mm_or_ps(_mm_andnot_ps(BTFFT(), pbwp), _mm_and_ps(BTFFT(), zppd)); const Vec3V xppd=_mm_shuffle_ps(mergel,cross20,_MM_SHUFFLE(3,0,0,0)); const Vec3V pcyp=_mm_shuffle_ps(cross20,mergel,_MM_SHUFFLE(3,1,2,0)); const Vec3V colInv2=_mm_or_ps(_mm_andnot_ps(tfft, pcyp), _mm_and_ps(tfft, xppd)); aInv.col0=_mm_mul_ps(colInv0,invDet); aInv.col1=_mm_mul_ps(colInv1,invDet); aInv.col2=_mm_mul_ps(colInv2,invDet); aInv.col3=M34Mul33V3(aInv,V3Neg(a.col3)); return aInv; } NV_FORCE_INLINE Mat33V M34Trnsps33(const Mat34V& a) { return Mat33V ( V3Merge(V3GetX(a.col0),V3GetX(a.col1),V3GetX(a.col2)), V3Merge(V3GetY(a.col0),V3GetY(a.col1),V3GetY(a.col2)), V3Merge(V3GetZ(a.col0),V3GetZ(a.col1),V3GetZ(a.col2)) ); } ////////////////////////////////// //MAT44V ////////////////////////////////// NV_FORCE_INLINE Vec4V M44MulV4(const Mat44V& a, const Vec4V b) { const FloatV x=V4GetX(b); const FloatV y=V4GetY(b); const FloatV z=V4GetZ(b); const FloatV w=V4GetW(b); const Vec4V v0=V4Scale(a.col0,x); const Vec4V v1=V4Scale(a.col1,y); const Vec4V v2=V4Scale(a.col2,z); const Vec4V v3=V4Scale(a.col3,w); const Vec4V v0PlusV1=V4Add(v0,v1); const Vec4V v0PlusV1Plusv2=V4Add(v0PlusV1,v2); return (V4Add(v0PlusV1Plusv2,v3)); } NV_FORCE_INLINE Vec4V M44TrnspsMulV4(const Mat44V& a, const Vec4V b) { NV_ALIGN(16, FloatV dotProdArray[4])= { V4Dot(a.col0,b), V4Dot(a.col1,b), V4Dot(a.col2,b), V4Dot(a.col3,b) }; return V4Merge(dotProdArray); } NV_FORCE_INLINE Mat44V M44MulM44(const Mat44V& a, const Mat44V& b) { return Mat44V(M44MulV4(a,b.col0),M44MulV4(a,b.col1),M44MulV4(a,b.col2),M44MulV4(a,b.col3)); } NV_FORCE_INLINE Mat44V M44Add(const Mat44V& a, const Mat44V& b) { return Mat44V(V4Add(a.col0,b.col0),V4Add(a.col1,b.col1),V4Add(a.col2,b.col2),V4Add(a.col3,b.col3)); } NV_FORCE_INLINE Mat44V M44Trnsps(const Mat44V& a) { const Vec4V v0 = _mm_unpacklo_ps(a.col0, a.col2); const Vec4V v1 = _mm_unpackhi_ps(a.col0, a.col2); const Vec4V v2 = _mm_unpacklo_ps(a.col1, a.col3); const Vec4V v3 = _mm_unpackhi_ps(a.col1, a.col3); return Mat44V( _mm_unpacklo_ps(v0, v2),_mm_unpackhi_ps(v0, v2),_mm_unpacklo_ps(v1, v3),_mm_unpackhi_ps(v1, v3)); } NV_FORCE_INLINE Mat44V M44Inverse(const Mat44V& a) { __m128 minor0, minor1, minor2, minor3; __m128 row0, row1, row2, row3; __m128 det, tmp1; tmp1=V4Zero(); row1=V4Zero(); row3=V4Zero(); row0=a.col0; row1=_mm_shuffle_ps(a.col1,a.col1,_MM_SHUFFLE(1,0,3,2)); row2=a.col2; row3=_mm_shuffle_ps(a.col3,a.col3,_MM_SHUFFLE(1,0,3,2)); tmp1 = _mm_mul_ps(row2, row3); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1); minor0 = _mm_mul_ps(row1, tmp1); minor1 = _mm_mul_ps(row0, tmp1); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E); minor0 = _mm_sub_ps(_mm_mul_ps(row1, tmp1), minor0); minor1 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor1); minor1 = _mm_shuffle_ps(minor1, minor1, 0x4E); tmp1 = _mm_mul_ps(row1, row2); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1); minor0 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor0); minor3 = _mm_mul_ps(row0, tmp1); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E); minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row3, tmp1)); minor3 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor3); minor3 = _mm_shuffle_ps(minor3, minor3, 0x4E); tmp1 = _mm_mul_ps(_mm_shuffle_ps(row1, row1, 0x4E), row3); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1); row2 = _mm_shuffle_ps(row2, row2, 0x4E); minor0 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor0); minor2 = _mm_mul_ps(row0, tmp1); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E); minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row2, tmp1)); minor2 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor2); minor2 = _mm_shuffle_ps(minor2, minor2, 0x4E); tmp1 = _mm_mul_ps(row0, row1); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1); minor2 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor2); minor3 = _mm_sub_ps(_mm_mul_ps(row2, tmp1), minor3); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E); minor2 = _mm_sub_ps(_mm_mul_ps(row3, tmp1), minor2); minor3 = _mm_sub_ps(minor3, _mm_mul_ps(row2, tmp1)); tmp1 = _mm_mul_ps(row0, row3); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1); minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row2, tmp1)); minor2 = _mm_add_ps(_mm_mul_ps(row1, tmp1), minor2); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E); minor1 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor1); minor2 = _mm_sub_ps(minor2, _mm_mul_ps(row1, tmp1)); tmp1 = _mm_mul_ps(row0, row2); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1); minor1 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor1); minor3 = _mm_sub_ps(minor3, _mm_mul_ps(row1, tmp1)); tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E); minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row3, tmp1)); minor3 = _mm_add_ps(_mm_mul_ps(row1, tmp1), minor3); det = _mm_mul_ps(row0, minor0); det = _mm_add_ps(_mm_shuffle_ps(det, det, 0x4E), det); det = _mm_add_ss(_mm_shuffle_ps(det, det, 0xB1), det); tmp1 = _mm_rcp_ss(det); #if 0 det = _mm_sub_ss(_mm_add_ss(tmp1, tmp1), _mm_mul_ss(det, _mm_mul_ss(tmp1, tmp1))); det = _mm_shuffle_ps(det, det, 0x00); #else det= _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(0,0,0,0)); #endif minor0 = _mm_mul_ps(det, minor0); minor1 = _mm_mul_ps(det, minor1); minor2 = _mm_mul_ps(det, minor2); minor3 = _mm_mul_ps(det, minor3); Mat44V invTrans(minor0,minor1,minor2,minor3); return M44Trnsps(invTrans); } NV_FORCE_INLINE Vec4V V4LoadXYZW(const float& x, const float& y, const float& z, const float& w) { return _mm_set_ps(w, z, y, x); } /* // AP: work in progress - use proper SSE intrinsics where possible NV_FORCE_INLINE VecU16V V4U32PK(VecU32V a, VecU32V b) { VecU16V result; result.m128_u16[0] = uint16_t(NvClamp<uint32_t>((a).m128_u32[0], 0, 0xFFFF)); result.m128_u16[1] = uint16_t(NvClamp<uint32_t>((a).m128_u32[1], 0, 0xFFFF)); result.m128_u16[2] = uint16_t(NvClamp<uint32_t>((a).m128_u32[2], 0, 0xFFFF)); result.m128_u16[3] = uint16_t(NvClamp<uint32_t>((a).m128_u32[3], 0, 0xFFFF)); result.m128_u16[4] = uint16_t(NvClamp<uint32_t>((b).m128_u32[0], 0, 0xFFFF)); result.m128_u16[5] = uint16_t(NvClamp<uint32_t>((b).m128_u32[1], 0, 0xFFFF)); result.m128_u16[6] = uint16_t(NvClamp<uint32_t>((b).m128_u32[2], 0, 0xFFFF)); result.m128_u16[7] = uint16_t(NvClamp<uint32_t>((b).m128_u32[3], 0, 0xFFFF)); return result; } */ NV_FORCE_INLINE VecU32V V4U32Sel(const BoolV c, const VecU32V a, const VecU32V b) { return internalWindowsSimd::m128_I2F(_mm_or_si128( _mm_andnot_si128(internalWindowsSimd::m128_F2I(c), internalWindowsSimd::m128_F2I(b)), _mm_and_si128(internalWindowsSimd::m128_F2I(c), internalWindowsSimd::m128_F2I(a)) )); } NV_FORCE_INLINE VecU32V V4U32or(VecU32V a, VecU32V b) { return internalWindowsSimd::m128_I2F(_mm_or_si128(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b))); } NV_FORCE_INLINE VecU32V V4U32and(VecU32V a, VecU32V b) { return internalWindowsSimd::m128_I2F(_mm_and_si128(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b))); } NV_FORCE_INLINE VecU32V V4U32Andc(VecU32V a, VecU32V b) { return internalWindowsSimd::m128_I2F(_mm_andnot_si128(internalWindowsSimd::m128_F2I(b), internalWindowsSimd::m128_F2I(a))); } /* NV_FORCE_INLINE VecU16V V4U16Or(VecU16V a, VecU16V b) { return internalWindowsSimd::m128_I2F(_mm_or_si128(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b))); } */ /* NV_FORCE_INLINE VecU16V V4U16And(VecU16V a, VecU16V b) { return internalWindowsSimd::m128_I2F(_mm_and_si128(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b))); } */ /* NV_FORCE_INLINE VecU16V V4U16Andc(VecU16V a, VecU16V b) { return internalWindowsSimd::m128_I2F(_mm_andnot_si128(internalWindowsSimd::m128_F2I(b), internalWindowsSimd::m128_F2I(a))); } */ NV_FORCE_INLINE VecI32V U4Load(const uint32_t i) { return (_mm_load1_ps((float*)&i)); } NV_FORCE_INLINE VecU32V U4LoadU(const uint32_t* i) { return _mm_loadu_ps((float*)i); } NV_FORCE_INLINE VecU32V U4LoadA(const uint32_t* i) { VECMATHAOS_ASSERT(0==((size_t)i & 0x0f)); return _mm_load_ps((float*)i); } NV_FORCE_INLINE VecI32V I4Load(const int32_t i) { return (_mm_load1_ps((float*)&i)); } NV_FORCE_INLINE VecI32V I4LoadU(const int32_t* i) { return _mm_loadu_ps((float*)i); } NV_FORCE_INLINE VecI32V I4LoadA(const int32_t* i) { return _mm_load_ps((float*)i); } NV_FORCE_INLINE VecI32V VecI32V_Add(const VecI32VArg a, const VecI32VArg b) { return internalWindowsSimd::m128_I2F(_mm_add_epi32(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b))); } NV_FORCE_INLINE VecI32V VecI32V_Sub(const VecI32VArg a, const VecI32VArg b) { return internalWindowsSimd::m128_I2F(_mm_sub_epi32(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b))); } NV_FORCE_INLINE BoolV VecI32V_IsGrtr(const VecI32VArg a, const VecI32VArg b) { return internalWindowsSimd::m128_I2F(_mm_cmpgt_epi32(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b))); } NV_FORCE_INLINE BoolV VecI32V_IsEq(const VecI32VArg a, const VecI32VArg b) { return internalWindowsSimd::m128_I2F(_mm_cmpeq_epi32(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b))); } NV_FORCE_INLINE VecI32V V4I32Sel(const BoolV c, const VecI32V a, const VecI32V b) { return V4U32Sel(c, a, b); } NV_FORCE_INLINE VecI32V VecI32V_Zero() { return V4Zero(); } NV_FORCE_INLINE VecI32V VecI32V_One() { return I4Load(1); } NV_FORCE_INLINE VecI32V VecI32V_Two() { return I4Load(2); } NV_FORCE_INLINE VecI32V VecI32V_MinusOne() { return I4Load(-1); } NV_FORCE_INLINE VecU32V U4Zero() { return U4Load(0); } NV_FORCE_INLINE VecU32V U4One() { return U4Load(1); } NV_FORCE_INLINE VecU32V U4Two() { return U4Load(2); } NV_FORCE_INLINE VecI32V VecI32V_Sel(const BoolV c, const VecI32VArg a, const VecI32VArg b) { VECMATHAOS_ASSERT(_VecMathTests::allElementsEqualBoolV(c,BTTTT()) || _VecMathTests::allElementsEqualBoolV(c,BFFFF())); return _mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a)); } NV_FORCE_INLINE VecShiftV VecI32V_PrepareShift(const VecI32VArg shift) { VecShiftV preparedShift; preparedShift.shift = VecI32V_Sel(BTFFF(), shift, VecI32V_Zero()); return preparedShift; } NV_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const VecShiftVArg count) { return internalWindowsSimd::m128_I2F(_mm_sll_epi32(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(count.shift))); } NV_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const VecShiftVArg count) { return internalWindowsSimd::m128_I2F(_mm_srl_epi32(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(count.shift))); } NV_FORCE_INLINE VecI32V VecI32V_And(const VecI32VArg a, const VecI32VArg b) { return _mm_and_ps(a, b); } NV_FORCE_INLINE VecI32V VecI32V_Or(const VecI32VArg a, const VecI32VArg b) { return _mm_or_ps(a, b); } NV_FORCE_INLINE VecI32V VecI32V_GetX(const VecI32VArg a) { return _mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)); } NV_FORCE_INLINE VecI32V VecI32V_GetY(const VecI32VArg a) { return _mm_shuffle_ps(a, a, _MM_SHUFFLE(1,1,1,1)); } NV_FORCE_INLINE VecI32V VecI32V_GetZ(const VecI32VArg a) { return _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2)); } NV_FORCE_INLINE VecI32V VecI32V_GetW(const VecI32VArg a) { return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3,3,3,3)); } NV_FORCE_INLINE void NvI32_From_VecI32V(const VecI32VArg a, int32_t* i) { _mm_store_ss((float*)i,a); } NV_FORCE_INLINE VecI32V VecI32V_From_BoolV(const BoolVArg a) { return a; } NV_FORCE_INLINE VecU32V VecU32V_From_BoolV(const BoolVArg a) { return a; } NV_FORCE_INLINE VecI32V VecI32V_Merge(const VecI32VArg a, const VecI32VArg b, const VecI32VArg c, const VecI32VArg d) { return V4Merge(a, b, c, d); } /* template<int a> NV_FORCE_INLINE VecI32V V4ISplat() { VecI32V result; result.m128_i32[0] = a; result.m128_i32[1] = a; result.m128_i32[2] = a; result.m128_i32[3] = a; return result; } template<uint32_t a> NV_FORCE_INLINE VecU32V V4USplat() { VecU32V result; result.m128_u32[0] = a; result.m128_u32[1] = a; result.m128_u32[2] = a; result.m128_u32[3] = a; return result; } */ /* NV_FORCE_INLINE void V4U16StoreAligned(VecU16V val, VecU16V* address) { *address = val; } */ NV_FORCE_INLINE void V4U32StoreAligned(VecU32V val, VecU32V* address) { *address = val; } NV_FORCE_INLINE Vec4V V4Andc(const Vec4V a, const VecU32V b) { VecU32V result32(a); result32 = V4U32Andc(result32, b); return Vec4V(result32); } NV_FORCE_INLINE VecU32V V4IsGrtrV32u(const Vec4V a, const Vec4V b) { return V4IsGrtr(a, b); } NV_FORCE_INLINE VecU16V V4U16LoadAligned(VecU16V* addr) { return *addr; } NV_FORCE_INLINE VecU16V V4U16LoadUnaligned(VecU16V* addr) { return *addr; } // unsigned compares are not supported on x86 NV_FORCE_INLINE VecU16V V4U16CompareGt(VecU16V a, VecU16V b) { // _mm_cmpgt_epi16 doesn't work for unsigned values unfortunately // return m128_I2F(_mm_cmpgt_epi16(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b))); VecU16V result; result.m128_u16[0] = uint16_t((a).m128_u16[0]>(b).m128_u16[0]); result.m128_u16[1] = uint16_t((a).m128_u16[1]>(b).m128_u16[1]); result.m128_u16[2] = uint16_t((a).m128_u16[2]>(b).m128_u16[2]); result.m128_u16[3] = uint16_t((a).m128_u16[3]>(b).m128_u16[3]); result.m128_u16[4] = uint16_t((a).m128_u16[4]>(b).m128_u16[4]); result.m128_u16[5] = uint16_t((a).m128_u16[5]>(b).m128_u16[5]); result.m128_u16[6] = uint16_t((a).m128_u16[6]>(b).m128_u16[6]); result.m128_u16[7] = uint16_t((a).m128_u16[7]>(b).m128_u16[7]); return result; } NV_FORCE_INLINE VecU16V V4I16CompareGt(VecU16V a, VecU16V b) { return internalWindowsSimd::m128_I2F(_mm_cmpgt_epi16(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b))); } NV_FORCE_INLINE Vec4V Vec4V_From_VecU32V(VecU32V a) { Vec4V result = V4LoadXYZW(float(a.m128_u32[0]), float(a.m128_u32[1]), float(a.m128_u32[2]), float(a.m128_u32[3])); return result; } NV_FORCE_INLINE Vec4V Vec4V_From_VecI32V(VecI32V a) { return _mm_cvtepi32_ps(internalWindowsSimd::m128_F2I(a)); } NV_FORCE_INLINE VecI32V VecI32V_From_Vec4V(Vec4V a) { return internalWindowsSimd::m128_I2F(_mm_cvttps_epi32(a)); } NV_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecU32V(VecU32V a) { return Vec4V(a); } NV_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecI32V(VecI32V a) { return Vec4V(a); } NV_FORCE_INLINE VecU32V VecU32V_ReinterpretFrom_Vec4V(Vec4V a) { return VecU32V(a); } NV_FORCE_INLINE VecI32V VecI32V_ReinterpretFrom_Vec4V(Vec4V a) { return VecI32V(a); } template<int index> NV_FORCE_INLINE VecU32V V4U32SplatElement(VecU32V a) { return internalWindowsSimd::m128_I2F(_mm_shuffle_epi32(internalWindowsSimd::m128_F2I(a), _MM_SHUFFLE(index, index, index, index))); } template<int index> NV_FORCE_INLINE Vec4V V4SplatElement(Vec4V a) { return internalWindowsSimd::m128_I2F(_mm_shuffle_epi32(internalWindowsSimd::m128_F2I(a), _MM_SHUFFLE(index, index, index, index))); } template<int index> NV_FORCE_INLINE VecU16V V4U16SplatElement(VecU16V a) { VecU16V result = a; //AM: initializing to avoid nonsensical warning 4701 here with VC10. for (int i = 0; i < 8; i ++) result.m128_u16[i] = a.m128_u16[index]; return result; } template<int imm> NV_FORCE_INLINE VecI16V V4I16SplatImmediate() { VecI16V result; result.m128_i16[0] = imm; result.m128_i16[1] = imm; result.m128_i16[2] = imm; result.m128_i16[3] = imm; result.m128_i16[4] = imm; result.m128_i16[5] = imm; result.m128_i16[6] = imm; result.m128_i16[7] = imm; return result; } template<uint16_t imm> NV_FORCE_INLINE VecU16V V4U16SplatImmediate() { VecU16V result; result.m128_u16[0] = imm; result.m128_u16[1] = imm; result.m128_u16[2] = imm; result.m128_u16[3] = imm; result.m128_u16[4] = imm; result.m128_u16[5] = imm; result.m128_u16[6] = imm; result.m128_u16[7] = imm; return result; } NV_FORCE_INLINE VecU16V V4U16SubtractModulo(VecU16V a, VecU16V b) { return internalWindowsSimd::m128_I2F(_mm_sub_epi16(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b))); } NV_FORCE_INLINE VecU16V V4U16AddModulo(VecU16V a, VecU16V b) { return internalWindowsSimd::m128_I2F(_mm_add_epi16(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b))); } NV_FORCE_INLINE VecU32V V4U16GetLo16(VecU16V a) { VecU32V result; result.m128_u32[0] = a.m128_u16[0]; result.m128_u32[1] = a.m128_u16[2]; result.m128_u32[2] = a.m128_u16[4]; result.m128_u32[3] = a.m128_u16[6]; return result; } NV_FORCE_INLINE VecU32V V4U16GetHi16(VecU16V a) { VecU32V result; result.m128_u32[0] = a.m128_u16[1]; result.m128_u32[1] = a.m128_u16[3]; result.m128_u32[2] = a.m128_u16[5]; result.m128_u32[3] = a.m128_u16[7]; return result; } NV_FORCE_INLINE VecU32V VecU32VLoadXYZW(uint32_t x, uint32_t y, uint32_t z, uint32_t w) { VecU32V result; result.m128_u32[0] = x; result.m128_u32[1] = y; result.m128_u32[2] = z; result.m128_u32[3] = w; return result; } NV_FORCE_INLINE Vec4V V4ConvertFromI32V(const VecI32V in) { return _mm_cvtepi32_ps(internalWindowsSimd::m128_F2I(in)); } //not used /* NV_FORCE_INLINE Vec4V V4LoadAligned(Vec4V* addr) { return *addr; } */ /* NV_FORCE_INLINE Vec4V V4LoadUnaligned(Vec4V* addr) { return Vec4V_From_F32Array((float*)addr); } */ /* NV_FORCE_INLINE Vec4V V4Ceil(const Vec4V a) { return Vec4V_From_XYZW(NvCeil(a.m128_f32[0]), NvCeil(a.m128_f32[1]), NvCeil(a.m128_f32[2]), NvCeil(a.m128_f32[3])); } */ /* NV_FORCE_INLINE Vec4V V4Floor(const Vec4V a) { return Vec4V_From_XYZW(NvFloor(a.m128_f32[0]), NvFloor(a.m128_f32[1]), NvFloor(a.m128_f32[2]), NvFloor(a.m128_f32[3])); } */ /* NV_FORCE_INLINE VecU32V V4ConvertToU32VSaturate(const Vec4V a, uint32_t power) { NV_ASSERT(power == 0 && "Non-zero power not supported in convertToU32VSaturate"); NV_UNUSED(power); // prevent warning in release builds float ffffFFFFasFloat = float(0xFFFF0000); VecU32V result; result.m128_u32[0] = uint32_t(NvClamp<float>((a).m128_f32[0], 0.0f, ffffFFFFasFloat)); result.m128_u32[1] = uint32_t(NvClamp<float>((a).m128_f32[1], 0.0f, ffffFFFFasFloat)); result.m128_u32[2] = uint32_t(NvClamp<float>((a).m128_f32[2], 0.0f, ffffFFFFasFloat)); result.m128_u32[3] = uint32_t(NvClamp<float>((a).m128_f32[3], 0.0f, ffffFFFFasFloat)); return result; } */ #endif //PS_WINDOWS_INLINE_AOS_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/platform/windows/NsWindowsIntrinsics.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_WINDOWS_NSWINDOWSINTRINSICS_H #define NV_WINDOWS_NSWINDOWSINTRINSICS_H #include "Ns.h" #include "NvAssert.h" // this file is for internal intrinsics - that is, intrinsics that are used in // cross platform code but do not appear in the API #if !(NV_WINDOWS_FAMILY || NV_WINRT) #error "This file should only be included by Windows or WIN8ARM builds!!" #endif #pragma warning(push) //'symbol' is not defined as a preprocessor macro, replacing with '0' for 'directives' #pragma warning(disable : 4668) #if NV_VC == 10 #pragma warning(disable : 4987) // nonstandard extension used: 'throw (...)' #endif #include <intrin.h> #pragma warning(pop) #pragma warning(push) #pragma warning(disable : 4985) // 'symbol name': attributes not present on previous declaration #include <math.h> #pragma warning(pop) #include <float.h> #include <mmintrin.h> #pragma intrinsic(_BitScanForward) #pragma intrinsic(_BitScanReverse) namespace nvidia { namespace shdfnd { /* * Implements a memory barrier */ NV_FORCE_INLINE void memoryBarrier() { _ReadWriteBarrier(); /* long Barrier; __asm { xchg Barrier, eax }*/ } /*! Returns the index of the highest set bit. Not valid for zero arg. */ NV_FORCE_INLINE uint32_t highestSetBitUnsafe(uint32_t v) { unsigned long retval; _BitScanReverse(&retval, v); return retval; } /*! Returns the index of the highest set bit. Undefined for zero arg. */ NV_FORCE_INLINE uint32_t lowestSetBitUnsafe(uint32_t v) { unsigned long retval; _BitScanForward(&retval, v); return retval; } /*! Returns the number of leading zeros in v. Returns 32 for v=0. */ NV_FORCE_INLINE uint32_t countLeadingZeros(uint32_t v) { if(v) { unsigned long bsr = (unsigned long)-1; _BitScanReverse(&bsr, v); return 31 - bsr; } else return 32; } /*! Prefetch aligned cache size around \c ptr+offset. */ #if !NV_ARM NV_FORCE_INLINE void prefetchLine(const void* ptr, uint32_t offset = 0) { // cache line on X86/X64 is 64-bytes so a 128-byte prefetch would require 2 prefetches. // However, we can only dispatch a limited number of prefetch instructions so we opt to prefetch just 1 cache line /*_mm_prefetch(((const char*)ptr + offset), _MM_HINT_T0);*/ // We get slightly better performance prefetching to non-temporal addresses instead of all cache levels _mm_prefetch(((const char*)ptr + offset), _MM_HINT_NTA); } #else NV_FORCE_INLINE void prefetchLine(const void* ptr, uint32_t offset = 0) { // arm does have 32b cache line size __prefetch(((const char*)ptr + offset)); } #endif /*! Prefetch \c count bytes starting at \c ptr. */ #if !NV_ARM NV_FORCE_INLINE void prefetch(const void* ptr, uint32_t count = 1) { const char* cp = (char*)ptr; uint64_t p = size_t(ptr); uint64_t startLine = p >> 6, endLine = (p + count - 1) >> 6; uint64_t lines = endLine - startLine + 1; do { prefetchLine(cp); cp += 64; } while(--lines); } #else NV_FORCE_INLINE void prefetch(const void* ptr, uint32_t count = 1) { const char* cp = (char*)ptr; uint32_t p = size_t(ptr); uint32_t startLine = p >> 5, endLine = (p + count - 1) >> 5; uint32_t lines = endLine - startLine + 1; do { prefetchLine(cp); cp += 32; } while(--lines); } #endif //! \brief platform-specific reciprocal NV_CUDA_CALLABLE NV_FORCE_INLINE float recipFast(float a) { return 1.0f / a; } //! \brief platform-specific fast reciprocal square root NV_CUDA_CALLABLE NV_FORCE_INLINE float recipSqrtFast(float a) { return 1.0f / ::sqrtf(a); } //! \brief platform-specific floor NV_CUDA_CALLABLE NV_FORCE_INLINE float floatFloor(float x) { return ::floorf(x); } #define NS_EXPECT_TRUE(x) x #define NS_EXPECT_FALSE(x) x } // namespace shdfnd } // namespace nvidia #endif // #ifndef NV_WINDOWS_NSWINDOWSINTRINSICS_H
NVIDIA-Omniverse/PhysX/blast/source/shared/utils/AssetGenerator.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef ASSETGENERATOR_H #define ASSETGENERATOR_H #include "NvBlast.h" #include <vector> #include <cmath> class GeneratorAsset { public: struct Vec3 { float x, y, z; Vec3() {} Vec3(float x_, float y_, float z_) : x(x_), y(y_), z(z_) {} Vec3 operator * (float v) const { return Vec3(x * v, y * v, z * v); } Vec3 operator * (const Vec3& v) const { return Vec3(x * v.x, y * v.y, z * v.z); } Vec3 operator + (const Vec3& v) const { return Vec3(x + v.x, y + v.y, z + v.z); } Vec3 operator - (const Vec3& v) const { return Vec3(x - v.x, y - v.y, z - v.z); } Vec3 getNormalized() const { return (*this)*(1.0f / sqrt(x*x + y*y + z*z)); } }; struct BlastChunkCube { BlastChunkCube(Vec3 position_, Vec3 extents_) { position = position_; extents = extents_; } Vec3 position; Vec3 extents; }; std::vector<NvBlastChunkDesc> solverChunks; std::vector<NvBlastBondDesc> solverBonds; std::vector<BlastChunkCube> chunks; Vec3 extents; }; class CubeAssetGenerator { public: struct DepthInfo { DepthInfo(GeneratorAsset::Vec3 slices = GeneratorAsset::Vec3(1, 1, 1), NvBlastChunkDesc::Flags flag_ = NvBlastChunkDesc::Flags::NoFlags) : slicesPerAxis(slices), flag(flag_) {} GeneratorAsset::Vec3 slicesPerAxis; NvBlastChunkDesc::Flags flag; }; enum BondFlags { NO_BONDS = 0, X_BONDS = 1 << 0, Y_BONDS = 1 << 1, Z_BONDS = 1 << 2, X_PLUS_WORLD_BONDS = 1 << 3, X_MINUS_WORLD_BONDS = 1 << 4, Y_PLUS_WORLD_BONDS = 1 << 5, Y_MINUS_WORLD_BONDS = 1 << 6, Z_PLUS_WORLD_BONDS = 1 << 7, Z_MINUS_WORLD_BONDS = 1 << 8, ALL_INTERNAL_BONDS = X_BONDS | Y_BONDS | Z_BONDS }; struct Settings { Settings() : bondFlags(BondFlags::ALL_INTERNAL_BONDS) {} std::vector<DepthInfo> depths; GeneratorAsset::Vec3 extents; BondFlags bondFlags; }; static void generate(GeneratorAsset& asset, const Settings& settings); private: static void fillBondDesc(std::vector<NvBlastBondDesc>& bondDescs, uint32_t id0, uint32_t id1, GeneratorAsset::Vec3 pos0, GeneratorAsset::Vec3 pos1, GeneratorAsset::Vec3 size, float area); }; inline CubeAssetGenerator::BondFlags operator | (CubeAssetGenerator::BondFlags a, CubeAssetGenerator::BondFlags b) { return static_cast<CubeAssetGenerator::BondFlags>(static_cast<int>(a) | static_cast<int>(b)); } #endif // #ifndef ASSETGENERATOR_H
NVIDIA-Omniverse/PhysX/blast/source/shared/utils/AssetGenerator.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "AssetGenerator.h" #include <cstring> void CubeAssetGenerator::generate(GeneratorAsset& asset, const Settings& settings) { // cleanup asset.solverChunks.clear(); asset.solverBonds.clear(); asset.chunks.clear(); // initial params std::vector<uint32_t> depthStartIDs; std::vector<GeneratorAsset::Vec3> depthSlicesPerAxisTotal; uint32_t currentID = 0; GeneratorAsset::Vec3 extents = settings.extents; asset.extents = extents; // Iterate over depths and create children for (uint32_t depth = 0; depth < settings.depths.size(); depth++) { GeneratorAsset::Vec3 slicesPerAxis = settings.depths[depth].slicesPerAxis; GeneratorAsset::Vec3 slicesPerAxisTotal = (depth == 0) ? slicesPerAxis : slicesPerAxis * (depthSlicesPerAxisTotal[depth - 1]); depthSlicesPerAxisTotal.push_back(slicesPerAxisTotal); depthStartIDs.push_back(currentID); extents.x /= slicesPerAxis.x; extents.y /= slicesPerAxis.y; extents.z /= slicesPerAxis.z; for (uint32_t z = 0; z < (uint32_t)slicesPerAxisTotal.z; ++z) { uint32_t parent_z = z / (uint32_t)slicesPerAxis.z; for (uint32_t y = 0; y < (uint32_t)slicesPerAxisTotal.y; ++y) { uint32_t parent_y = y / (uint32_t)slicesPerAxis.y; for (uint32_t x = 0; x < (uint32_t)slicesPerAxisTotal.x; ++x) { uint32_t parent_x = x / (uint32_t)slicesPerAxis.x; uint32_t parentID = depth == 0 ? UINT32_MAX : depthStartIDs[depth - 1] + parent_x + (uint32_t)depthSlicesPerAxisTotal[depth - 1].x*(parent_y + (uint32_t)depthSlicesPerAxisTotal[depth - 1].y*parent_z); GeneratorAsset::Vec3 position; position.x = ((float)x - (slicesPerAxisTotal.x / 2) + 0.5f) * extents.x; position.y = ((float)y - (slicesPerAxisTotal.y / 2) + 0.5f) * extents.y; position.z = ((float)z - (slicesPerAxisTotal.z / 2) + 0.5f) * extents.z; NvBlastChunkDesc chunkDesc; memcpy(chunkDesc.centroid, &position.x, 3 * sizeof(float)); chunkDesc.volume = extents.x * extents.y * extents.z; chunkDesc.flags = settings.depths[depth].flag; chunkDesc.userData = currentID++; chunkDesc.parentChunkDescIndex = parentID; asset.solverChunks.push_back(chunkDesc); if (settings.depths[depth].flag & NvBlastChunkDesc::Flags::SupportFlag) { // Internal bonds // x-neighbor if (x > 0 && (settings.bondFlags & BondFlags::X_BONDS)) { GeneratorAsset::Vec3 xNeighborPosition = position - GeneratorAsset::Vec3(extents.x, 0, 0); uint32_t neighborID = chunkDesc.userData - 1; fillBondDesc(asset.solverBonds, chunkDesc.userData, neighborID, position, xNeighborPosition, extents, extents.y * extents.z); } // y-neighbor if (y > 0 && (settings.bondFlags & BondFlags::Y_BONDS)) { GeneratorAsset::Vec3 yNeighborPosition = position - GeneratorAsset::Vec3(0, extents.y, 0); uint32_t neighborID = chunkDesc.userData - (uint32_t)slicesPerAxisTotal.x; fillBondDesc(asset.solverBonds, chunkDesc.userData, neighborID, position, yNeighborPosition, extents, extents.z * extents.x); } // z-neighbor if (z > 0 && (settings.bondFlags & BondFlags::Z_BONDS)) { GeneratorAsset::Vec3 zNeighborPosition = position - GeneratorAsset::Vec3(0, 0, extents.z); uint32_t neighborID = chunkDesc.userData - (uint32_t)slicesPerAxisTotal.x*(uint32_t)slicesPerAxisTotal.y; fillBondDesc(asset.solverBonds, chunkDesc.userData, neighborID, position, zNeighborPosition, extents, extents.x * extents.y); } // World bonds (only one per chunk is enough, otherwise they will be removed as duplicated, thus 'else if') // -x world bond if (x == 0 && (settings.bondFlags & BondFlags::X_MINUS_WORLD_BONDS)) { GeneratorAsset::Vec3 xNeighborPosition = position - GeneratorAsset::Vec3(extents.x, 0, 0); fillBondDesc(asset.solverBonds, chunkDesc.userData, UINT32_MAX, position, xNeighborPosition, extents, extents.y * extents.z); } // +x world bond else if (x == slicesPerAxisTotal.x - 1 && (settings.bondFlags & BondFlags::X_PLUS_WORLD_BONDS)) { GeneratorAsset::Vec3 xNeighborPosition = position + GeneratorAsset::Vec3(extents.x, 0, 0); fillBondDesc(asset.solverBonds, chunkDesc.userData, UINT32_MAX, position, xNeighborPosition, extents, extents.y * extents.z); } // -y world bond else if (y == 0 && (settings.bondFlags & BondFlags::Y_MINUS_WORLD_BONDS)) { GeneratorAsset::Vec3 yNeighborPosition = position - GeneratorAsset::Vec3(0, extents.y, 0); fillBondDesc(asset.solverBonds, chunkDesc.userData, UINT32_MAX, position, yNeighborPosition, extents, extents.z * extents.x); } // +y world bond else if (y == slicesPerAxisTotal.y - 1 && (settings.bondFlags & BondFlags::Y_PLUS_WORLD_BONDS)) { GeneratorAsset::Vec3 yNeighborPosition = position + GeneratorAsset::Vec3(0, extents.y, 0); fillBondDesc(asset.solverBonds, chunkDesc.userData, UINT32_MAX, position, yNeighborPosition, extents, extents.z * extents.x); } // -z world bond else if (z == 0 && (settings.bondFlags & BondFlags::Z_MINUS_WORLD_BONDS)) { GeneratorAsset::Vec3 zNeighborPosition = position - GeneratorAsset::Vec3(0, 0, extents.z); fillBondDesc(asset.solverBonds, chunkDesc.userData, UINT32_MAX, position, zNeighborPosition, extents, extents.x * extents.y); } // +z world bond else if (z == slicesPerAxisTotal.z - 1 && (settings.bondFlags & BondFlags::Z_PLUS_WORLD_BONDS)) { GeneratorAsset::Vec3 zNeighborPosition = position + GeneratorAsset::Vec3(0, 0, extents.z); fillBondDesc(asset.solverBonds, chunkDesc.userData, UINT32_MAX, position, zNeighborPosition, extents, extents.x * extents.y); } } asset.chunks.push_back(GeneratorAsset::BlastChunkCube(position, extents/*isStatic*/)); } } } } // Reorder chunks std::vector<uint32_t> chunkReorderMap(asset.solverChunks.size()); std::vector<char> scratch(asset.solverChunks.size() * sizeof(NvBlastChunkDesc)); NvBlastBuildAssetDescChunkReorderMap(chunkReorderMap.data(), asset.solverChunks.data(), (uint32_t)asset.solverChunks.size(), scratch.data(), nullptr); std::vector<GeneratorAsset::BlastChunkCube> chunksTemp = asset.chunks; for (uint32_t i = 0; i < chunkReorderMap.size(); ++i) { asset.chunks[chunkReorderMap[i]] = chunksTemp[i]; } NvBlastApplyAssetDescChunkReorderMapInPlace(asset.solverChunks.data(), (uint32_t)asset.solverChunks.size(), asset.solverBonds.data(), (uint32_t)asset.solverBonds.size(), chunkReorderMap.data(), true, scratch.data(), nullptr); } void CubeAssetGenerator::fillBondDesc(std::vector<NvBlastBondDesc>& bondDescs, uint32_t id0, uint32_t id1, GeneratorAsset::Vec3 pos0, GeneratorAsset::Vec3 pos1, GeneratorAsset::Vec3 size, float area) { NV_UNUSED(size); NvBlastBondDesc bondDesc = NvBlastBondDesc(); bondDesc.chunkIndices[0] = id0; bondDesc.chunkIndices[1] = id1; bondDesc.bond.area = area; GeneratorAsset::Vec3 centroid = (pos0 + pos1) * 0.5f; bondDesc.bond.centroid[0] = centroid.x; bondDesc.bond.centroid[1] = centroid.y; bondDesc.bond.centroid[2] = centroid.z; GeneratorAsset::Vec3 normal = (pos0 - pos1).getNormalized(); bondDesc.bond.normal[0] = normal.x; bondDesc.bond.normal[1] = normal.y; bondDesc.bond.normal[2] = normal.z; bondDescs.push_back(bondDesc); }
NVIDIA-Omniverse/PhysX/blast/include/extensions/authoring/NvBlastExtAuthoringFractureTool.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Defines the API for the NvBlastExtAuthoring blast sdk extension's FractureTool #ifndef NVBLASTAUTHORINGFRACTURETOOL_H #define NVBLASTAUTHORINGFRACTURETOOL_H #include "NvBlastExtAuthoringTypes.h" namespace Nv { namespace Blast { class SpatialAccelerator; class Triangulator; class Mesh; class CutoutSet; /* Transform used for chunk scaling (uniform scale + translation only) */ struct TransformST { NvcVec3 t; // Translation float s; // Uniform scale static TransformST identity() { return {{0.0f, 0.0f, 0.0f}, 1.0f}; } /* Point and vector transformations. Note, normals are invariant (up to normalization) under TransformST transformations. */ NvcVec3 transformPos(const NvcVec3& p) const { return {s * p.x + t.x, s * p.y + t.y, s * p.z + t.z}; } NvcVec3 transformDir(const NvcVec3& d) const { return {s * d.x, s * d.y, s * d.z}; } NvcVec3 invTransformPos(const NvcVec3& p) const { return {(p.x - t.x) / s, (p.y - t.y) / s, (p.z - t.z) / s}; } NvcVec3 invTransformDir(const NvcVec3& d) const { return {d.x / s, d.y / s, d.z / s}; } }; /* Chunk data, chunks with parentChunkId == -1 are the source meshes. */ struct ChunkInfo { ChunkInfo(); enum ChunkFlags { NO_FLAGS = 0, APPROXIMATE_BONDING = 1 // Created by island splitting or chunk merge, etc. and should check for inexact bonds }; protected: /** * The mesh is transformed to fit within a unit cube centered at the origin. * This transform puts the mesh back into its original space. * These fields are protected so that only an authoring class can access them. * It is important that the tmToWorld be set based upon the mesh bounds and parent tmToWorld. */ TransformST tmToWorld; Mesh* meshData; /** * Parent ID is set to this value initially, as opposed to -1 (which is a valid parent ID denoting "no parent") */ enum { UninitializedID = INT32_MIN }; public: int32_t parentChunkId; int32_t chunkId; uint32_t flags; bool isLeaf; bool isChanged; const TransformST& getTmToWorld() const { return tmToWorld; } Mesh* getMesh() const { return meshData; } }; inline ChunkInfo::ChunkInfo() : tmToWorld(TransformST::identity()), meshData(nullptr), parentChunkId(UninitializedID), chunkId(-1), flags(NO_FLAGS), isLeaf(false), isChanged(true) { } /** Abstract base class for user-defined random value generator. */ class RandomGeneratorBase { public: // Generates uniformly distributed value in [0, 1] range. virtual float getRandomValue() = 0; // Seeds random value generator virtual void seed(int32_t seed) = 0; virtual ~RandomGeneratorBase(){}; }; /* Noise fracturing configuration for chunks's faces */ struct NoiseConfiguration { /** Noisy slicing configutaion: Amplitude of cutting surface noise. If it is 0 - noise is disabled. */ float amplitude = 0.f; /** Frequencey of cutting surface noise. */ float frequency = 1.f; /** Octave number in slicing surface noise. */ uint32_t octaveNumber = 1; /** Sampling interval for surface grid. */ NvcVec3 samplingInterval = { 1, 1, 1 }; }; /* Slicing fracturing configuration */ struct SlicingConfiguration { /** Number of slices in each direction */ int32_t x_slices = 1, y_slices = 1, z_slices = 1; /** Offset variation, value in [0, 1] */ float offset_variations = 0.f; /** Angle variation, value in [0, 1] */ float angle_variations = 0.f; /* Noise parameters for faces between sliced chunks */ NoiseConfiguration noise; }; /** Cutout fracturing configuration */ struct CutoutConfiguration { /** Set of grouped convex loop patterns for cutout in normal direction. Not required for PLANE_ONLY mode */ CutoutSet* cutoutSet = nullptr; /** Transform for initial pattern position and orientation. By default 2d pattern lies in XY plane (Y is up) the center of pattern is (0, 0) */ NvcTransform transform = {{0, 0, 0, 1}, {0, 0, 0}}; /** Scale for pattern. Unscaled pattern has size (1, 1). For negative scale pattern will be placed at the center of chunk and scaled with max distance between points of its AABB */ NvcVec2 scale = { -1, -1 }; /** Conic aperture in degree, for cylindric cutout set it to 0. */ float aperture = 0.f; /** If relative transform is set - position will be displacement vector from chunk's center. Otherwise from global origin. */ bool isRelativeTransform = true; /** Add generatad faces to the same smoothing group as original face without noise */ bool useSmoothing = false; /** Noise parameters for cutout surface, see NoiseConfiguration. */ NoiseConfiguration noise; }; /** Class for voronoi sites generation inside supplied mesh. */ class VoronoiSitesGenerator { public: virtual ~VoronoiSitesGenerator() {} /** Release VoronoiSitesGenerator memory */ virtual void release() = 0; /** Set base fracture mesh */ virtual void setBaseMesh(const Mesh* mesh) = 0; /** Access to generated voronoi sites. \param[out] Pointer to generated voronoi sites \return Count of generated voronoi sites. */ virtual uint32_t getVoronoiSites(const NvcVec3*& sites) = 0; /** Add site in particular point \param[in] site Site coordinates */ virtual void addSite(const NvcVec3& site) = 0; /** Uniformly generate sites inside the mesh \param[in] numberOfSites Number of generated sites */ virtual void uniformlyGenerateSitesInMesh(uint32_t numberOfSites) = 0; /** Generate sites in clustered fashion \param[in] numberOfClusters Number of generated clusters \param[in] sitesPerCluster Number of sites in each cluster \param[in] clusterRadius Voronoi cells cluster radius */ virtual void clusteredSitesGeneration(uint32_t numberOfClusters, uint32_t sitesPerCluster, float clusterRadius) = 0; /** Radial pattern of sites generation \param[in] center Center of generated pattern \param[in] normal Normal to plane in which sites are generated \param[in] radius Pattern radius \param[in] angularSteps Number of angular steps \param[in] radialSteps Number of radial steps \param[in] angleOffset Angle offset at each radial step \param[in] variability Randomness of sites distribution */ virtual void radialPattern(const NvcVec3& center, const NvcVec3& normal, float radius, int32_t angularSteps, int32_t radialSteps, float angleOffset = 0.0f, float variability = 0.0f) = 0; /** Generate sites inside sphere \param[in] count Count of generated sites \param[in] radius Radius of sphere \param[in] center Center of sphere */ virtual void generateInSphere(const uint32_t count, const float radius, const NvcVec3& center) = 0; /** Set stencil mesh. With stencil mesh sites are generated only inside both of fracture and stencil meshes. \param[in] stencil Stencil mesh. */ virtual void setStencil(const Mesh* stencil) = 0; /** Removes stencil mesh */ virtual void clearStencil() = 0; /** Deletes sites inside supplied sphere \param[in] radius Radius of sphere \param[in] center Center of sphere \param[in] eraserProbability Probability of removing some particular site */ virtual void deleteInSphere(const float radius, const NvcVec3& center, const float eraserProbability = 1) = 0; }; /** FractureTool class provides methods to fracture provided mesh and generate Blast asset data */ class FractureTool { public: virtual ~FractureTool() {} /** Release FractureTool memory */ virtual void release() = 0; /** Reset FractureTool state. */ virtual void reset() = 0; /** Set input meshes which will be fractured, FractureTool will be reset. If ids != nullptr, it must point to an array of length meshSizes. Each mesh will be assigned to a chunk with ID given by the corresponding element in ids. If the corresponding element is negative, or ids is NULL, then the chunk will be assigned an arbitrary (but currently unused) ID. Returns true iff all meshes were assigned chunks with valid IDs. */ virtual bool setSourceMeshes(Mesh const * const * meshes, uint32_t meshesSize, const int32_t* ids = nullptr) = 0; /** Set chunk mesh, parentId should be valid, return id of new chunk. */ virtual int32_t setChunkMesh(const Mesh* mesh, int32_t parentId, int32_t chunkId = -1) = 0; /** Set the material id to use for new interior faces. Defaults to kMaterialInteriorId */ virtual void setInteriorMaterialId(int32_t materialId) = 0; /** Gets the material id to use for new interior faces */ virtual int32_t getInteriorMaterialId() const = 0; /** Replaces an material id on faces with a new one */ virtual void replaceMaterialId(int32_t oldMaterialId, int32_t newMaterialId) = 0; /** Get chunk mesh in polygonal representation. User's code should release it after usage. This function welds vertices based upon vertex position and normal. If splitUVs == true, UV coordinates are also considered in vertex welding. */ virtual Mesh* createChunkMesh(int32_t chunkInfoIndex, bool splitUVs = true) = 0; /** Fractures specified chunk with voronoi method. \param[in] chunkId Chunk to fracture \param[in] cellPoints Array of voronoi sites \param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly generated chunks will be at next depth level, source chunk will be parent for them. Case replaceChunk == true && chunkId == 0 considered as wrong input parameters \return If 0, fracturing is successful. */ virtual int32_t voronoiFracturing(uint32_t chunkId, uint32_t cellCount, const NvcVec3* cellPoints, bool replaceChunk) = 0; /** Fractures specified chunk with voronoi method. Cells can be scaled along x,y,z axes. \param[in] chunkId Chunk to fracture \param[in] cellPoints Array of voronoi sites \param[in] cellPoints Array of voronoi sites \param[in] scale Voronoi cells scaling factor \param[in] rotation Voronoi cells rotation. Has no effect without cells scale factor \param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly generated chunks will be at next depth level, source chunk will be parent for them. Case replaceChunk == true && chunkId == 0 considered as wrong input parameters \return If 0, fracturing is successful. */ virtual int32_t voronoiFracturing(uint32_t chunkId, uint32_t cellCount, const NvcVec3* cellPoints, const NvcVec3& scale, const NvcQuat& rotation, bool replaceChunk) = 0; /** Fractures specified chunk with slicing method. \param[in] chunkId Chunk to fracture \param[in] conf Slicing parameters, see SlicingConfiguration. \param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly generated chunks will be at next depth level, source chunk will be parent for them. Case replaceChunk == true && chunkId == 0 considered as wrong input parameters \param[in] rnd User supplied random number generator \return If 0, fracturing is successful. */ virtual int32_t slicing(uint32_t chunkId, const SlicingConfiguration& conf, bool replaceChunk, RandomGeneratorBase* rnd) = 0; /** Cut chunk with plane. \param[in] chunkId Chunk to fracture \param[in] normal Plane normal \param[in] position Point on plane \param[in] noise Noise configuration for plane-chunk intersection, see NoiseConfiguration. \param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly generated chunks will be at next depth level, source chunk will be parent for them. Case replaceChunk == true && chunkId == 0 considered as wrong input parameters \param[in] rnd User supplied random number generator \return If 0, fracturing is successful. */ virtual int32_t cut(uint32_t chunkId, const NvcVec3& normal, const NvcVec3& position, const NoiseConfiguration& noise, bool replaceChunk, RandomGeneratorBase* rnd) = 0; /** Cutout fracture for specified chunk. \param[in] chunkId Chunk to fracture \param[in] conf Cutout parameters, see CutoutConfiguration. \param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly \param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly generated chunks will be at next depth level, source chunk will be parent for them. Case replaceChunk == true && chunkId == 0 considered as wrong input parameters \param[in] rnd User supplied random number generator \return If 0, fracturing is successful. */ virtual int32_t cutout(uint32_t chunkId, CutoutConfiguration conf, bool replaceChunk, RandomGeneratorBase* rnd) = 0; /** Creates resulting fractured mesh geometry from intermediate format */ virtual void finalizeFracturing() = 0; /** Returns overall number of chunks in fracture. */ virtual uint32_t getChunkCount() const = 0; /** Get chunk information */ virtual const ChunkInfo& getChunkInfo(int32_t chunkInfoIndex) = 0; /** Get percentage of mesh overlap. percentage computed as volume(intersection(meshA , meshB)) / volume (meshA) \param[in] meshA Mesh A \param[in] meshB Mesh B \return mesh overlap percentage */ virtual float getMeshOverlap(const Mesh& meshA, const Mesh& meshB) = 0; /** Get chunk base mesh \param[in] chunkIndex Chunk index \param[out] output Array of triangles to be filled \return number of triangles in base mesh */ virtual uint32_t getBaseMesh(int32_t chunkIndex, Triangle*& output) = 0; /** Update chunk base mesh \note Doesn't allocates output array, Triangle* output should be preallocated by user \param[in] chunkIndex Chunk index \param[out] output Array of triangles to be filled \return number of triangles in base mesh */ virtual uint32_t updateBaseMesh(int32_t chunkIndex, Triangle* output) = 0; /** Return info index of chunk with specified chunkId \param[in] chunkId Chunk ID \return Chunk info index in internal buffer, if not exist -1 is returned. */ virtual int32_t getChunkInfoIndex(int32_t chunkId) const = 0; /** Return id of chunk with specified info index. \param[in] chunkInfoIndex Chunk info index \return Chunk id or -1 if there is no such chunk. */ virtual int32_t getChunkId(int32_t chunkInfoIndex) const = 0; /** Return depth level of the given chunk \param[in] chunkId Chunk ID \return Chunk depth or -1 if there is no such chunk. */ virtual int32_t getChunkDepth(int32_t chunkId) const = 0; /** Return array of chunks IDs with given depth. \param[in] depth Chunk depth \param[out] Pointer to array of chunk IDs \return Number of chunks in array */ virtual uint32_t getChunksIdAtDepth(uint32_t depth, int32_t*& chunkIds) const = 0; /** Get result geometry without noise as vertex and index buffers, where index buffers contain series of triplets which represent triangles. \param[out] vertexBuffer Array of vertices to be filled \param[out] indexBuffer Array of indices to be filled \param[out] indexBufferOffsets Array of offsets in indexBuffer for each base mesh. Contains getChunkCount() + 1 elements. Last one is indexBuffer size \return Number of vertices in vertexBuffer */ virtual uint32_t getBufferedBaseMeshes(Vertex*& vertexBuffer, uint32_t*& indexBuffer, uint32_t*& indexBufferOffsets) = 0; /** Set automatic islands removing. May cause instabilities. \param[in] isRemoveIslands Flag whether remove or not islands. */ virtual void setRemoveIslands(bool isRemoveIslands) = 0; /** Try find islands and remove them on some specifical chunk. If chunk has childs, island removing can lead to wrong results! Apply it before further chunk splitting. \param[in] chunkId Chunk ID which should be checked for islands \return Number of found islands is returned */ virtual int32_t islandDetectionAndRemoving(int32_t chunkId, bool createAtNewDepth = false) = 0; /** Check if input mesh contains open edges. Open edges can lead to wrong fracturing results. \return true if mesh contains open edges */ virtual bool isMeshContainOpenEdges(const Mesh* input) = 0; /** Delete all children for specified chunk (also recursively delete chidren of children). \param[in] chunkId Chunk ID which children should be deleted \param[in] deleteRoot (optional) If true, deletes the given chunk too \return true if one or more chunks were removed */ virtual bool deleteChunkSubhierarchy(int32_t chunkId, bool deleteRoot = false) = 0; /** Optimize chunk hierarhy for better runtime performance. It tries to unite chunks to groups of some size in order to transform flat hierarchy (all chunks are children of single root) to tree like hieracrhy with limited number of children for each chunk. \param[in] threshold If number of children of some chunk less then maxAtLevel then it would be considered as already optimized and skipped. \param[in] targetClusterSize Target number of children for processed chunks. \param[in] chunksToMerge Which chunks are merge candidate. If NULL, all chunks will be a merge candidate. \param[in] mergeChunkCount size of chunksToMerge array, if chunksToMerge != NULL. \param[in] adjChunks Optional index pairs to describe chunk adjacency. May be NULL. \param[in] adjChunksSize If 'adjChunks' is not NULL, the number of index pairs in the adjChunks array. \param[in] removeOriginalChunks If true, original chunks that are merged are removed. */ virtual void uniteChunks(uint32_t threshold, uint32_t targetClusterSize, const uint32_t* chunksToMerge, uint32_t mergeChunkCount, const NvcVec2i* adjChunks, uint32_t adjChunksSize, bool removeOriginalChunks = false) = 0; /** Set the APPROXIMATE_BONDING flag in the chunk's ChunkInfo \param[in] chunkInfoIndex chunk info index - use getChunkInfoIndex(ID) \param[in] useApproximateBonding value of flag to set \return true if the chunk ID is found, false otherwise */ virtual bool setApproximateBonding(uint32_t chunkInfoIndex, bool useApproximateBonding) = 0; /** Rescale interior uv coordinates of given chunk to fit square of given size. \param[in] side Size of square side \param[in] chunkId Chunk ID for which UVs should be scaled. */ virtual void fitUvToRect(float side, uint32_t chunkId) = 0; /** Rescale interior uv coordinates of all existing chunks to fit square of given size, relative sizes will be preserved. \param[in] side Size of square side */ virtual void fitAllUvToRect(float side) = 0; }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTAUTHORINGFRACTURETOOL_H
NVIDIA-Omniverse/PhysX/blast/include/extensions/authoring/NvBlastExtAuthoringBooleanTool.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Defines the API for the NvBlastExtAuthoring blast sdk extension's BooleanTool #ifndef NVBLASTAUTHORINGBOOLEANTOOL_H #define NVBLASTAUTHORINGBOOLEANTOOL_H #include "NvBlastExtAuthoringTypes.h" namespace Nv { namespace Blast { // Forward declaration class Mesh; class SpatialAccelerator; /** Tool for performing boolean operations on polygonal meshes. Tool supports only closed meshes. Performing boolean on meshes with holes can lead to unexpected behavior, e.g. holes in result geometry. */ class BooleanTool { public: virtual ~BooleanTool() {} /** * Release BooleanTool memory */ virtual void release() = 0; /** * Operation to perform */ enum Op { Intersection, Union, Difference }; /** * Perform boolean operation on two polygonal meshes (A and B). * \param[in] meshA Mesh A * \param[in] accelA Spatial accelerator for meshA. Can be nullptr. * \param[in] meshB Mesh B * \param[in] accelB Spatial accelerator for meshB. Can be nullptr. * \param[in] op Boolean operation type (see BooleanTool::Op) * \return new mesh result of the boolean operation. If nullptr, result is the empty set. */ virtual Mesh* performBoolean(const Mesh* meshA, SpatialAccelerator* accelA, const Mesh* meshB, SpatialAccelerator* accelB, Op op) = 0; /** * Test whether point contained in mesh. * \param[in] mesh Mesh geometry * \param[in] accel Spatial accelerator for mesh. Can be nullptr. * \param[in] point Point which should be tested * \return true iff point is inside of mesh */ virtual bool pointInMesh(const Mesh* mesh, SpatialAccelerator* accel, const NvcVec3& point) = 0; }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTAUTHORINGBOOLEANTOOL_H
NVIDIA-Omniverse/PhysX/blast/include/extensions/authoring/NvBlastExtAuthoringBondGenerator.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Defines the API for the NvBlastExtAuthoring blast sdk extension's BlastBondGenerator #ifndef NVBLASTEXTAUTHORINGBONDGENERATOR_H #define NVBLASTEXTAUTHORINGBONDGENERATOR_H #include "NvBlastExtAuthoringTypes.h" struct NvBlastBondDesc; struct NvBlastChunkDesc; struct NvBlastBond; namespace Nv { namespace Blast { // Forward declarations class FractureTool; class TriangleProcessor; struct PlaneChunkIndexer; /** Bond interface generation configuration EXACT - common surface will be searched AVERAGE - Inerface is approximated by projections or intersecitons with midplane maxSeparation - for AVERAGE mode. Maximum distance between chunks and midplane used in decision whether create bond or chunks are too far from each other. */ struct BondGenerationConfig { enum BondGenMode { EXACT, AVERAGE }; float maxSeparation; BondGenMode bondMode; }; struct PlaneChunkIndexer { int32_t chunkId; int32_t trId; NvcPlane plane; }; /** Tool for gathering bond information from provided mesh geometry */ class BlastBondGenerator { public: virtual ~BlastBondGenerator() {} /** Release BlastBondGenerator memory */ virtual void release() = 0; /** This method based on marking triangles during fracture process, so can be used only with internally fractured meshes. \note User should call NVBLAST_FREE for resultBondDescs when it not needed anymore \param[in] tool FractureTool which contains chunks representation, tool->finalizeFracturing() should be called before. \param[in] chunkIsSupport Pointer to array of flags, if true - chunk is support. Array size should be equal to chunk count in tool. \param[out] resultBondDescs Pointer to array of created bond descriptors. \param[out] resultChunkDescriptors Pointer to array of created chunk descriptors. \return Number of created bonds */ virtual int32_t buildDescFromInternalFracture(FractureTool* tool, const bool* chunkIsSupport, NvBlastBondDesc*& resultBondDescs, NvBlastChunkDesc*& resultChunkDescriptors) = 0; /** Creates bond description between two meshes \param[in] meshACount Number of triangles in mesh A \param[in] meshA Pointer to array of triangles of mesh A. \param[in] meshBCount Number of triangles in mesh B \param[in] meshB Pointer to array of triangles of mesh B. \param[out] resultBond Result bond description. \param[in] conf Bond creation mode. \return 0 if success */ virtual int32_t createBondBetweenMeshes(uint32_t meshACount, const Triangle* meshA, uint32_t meshBCount, const Triangle* meshB, NvBlastBond& resultBond, BondGenerationConfig conf) = 0; /** Creates bond description between number of meshes \note User should call NVBLAST_FREE for resultBondDescs when it not needed anymore \param[in] meshCount Number of meshes \param[in] geometryOffset Pointer to array of triangle offsets for each mesh. Containts meshCount + 1 element, last one is total number of triangles in geometry \param[in] geometry Pointer to array of triangles. Triangles from geometryOffset[i] to geometryOffset[i+1] correspond to i-th mesh. \param[in] overlapsCount Number of overlaps \param[in] overlaps Pointer to array of pairs - indexes of chunks, for which bond should be created. \param[out] resultBond Pointer to array of result bonds. \param[in] cfg Bond creation mode. \return Number of created bonds */ virtual int32_t createBondBetweenMeshes(uint32_t meshCount, const uint32_t* geometryOffset, const Triangle* geometry, uint32_t overlapsCount, const uint32_t* overlapsA, const uint32_t* overlapsB, NvBlastBondDesc*& resultBond, BondGenerationConfig cfg) = 0; /** Creates bond description for prefractured meshes, when there is no info about which chunks should be connected with bond. \note User should call NVBLAST_FREE for resultBondDescs when it not needed anymore \param[in] meshCount Number of meshes \param[in] geometryOffset Pointer to array of triangle offsets for each mesh. Containts meshCount + 1 element, last one is total number of triangles in geometry \param[in] geometry Pointer to array of triangles. Triangles from geometryOffset[i] to geometryOffset[i+1] correspond to i-th mesh. \param[in] chunkIsSupport Pointer to array of flags, if true - chunk is support. Array size should be equal to chunk count in tool. \param[out] resultBondDescs Pointer to array of result bonds. \param[in] conf Bond creation mode. \return Number of created bonds */ virtual int32_t bondsFromPrefractured(uint32_t meshCount, const uint32_t* geometryOffset, const Triangle* geometry, const bool* chunkIsSupport, NvBlastBondDesc*& resultBondDescs, BondGenerationConfig conf) = 0; /** Creates bond description for prefractured meshes, when there is no info about which chunks should be connected with bond. This uses the same process as bondsFromPrefractured using the BondGenMode::AVERAGE mode however the existing collision data is used. \note User should call NVBLAST_FREE for resultBondDescs when it not needed anymore. \param[in] meshCount Number of meshes \param[in] convexHullOffset Pointer to array of convex hull offsets for each mesh. Containts meshCount + 1 element, last one is total number of hulls in the geometry \param[in] chunkHulls Pointer to array of convex hulls. Hulls from convexHullOffset[i] to convexHullOffset[i+1] correspond to i-th mesh. \param[in] chunkIsSupport Pointer to array of flags, if true - chunk is support. Array size should be equal to chunk count in tool. \param[in] meshGroups Pointer to array of group ids for each mesh, bonds will not be generated between meshs of the same group. If null each mesh is assumed to be in it's own group. \param[out] resultBondDescs Pointer to array of result bonds. \return Number of created bonds */ virtual int32_t bondsFromPrefractured(uint32_t meshCount, const uint32_t* convexHullOffset, const CollisionHull** chunkHulls, const bool* chunkIsSupport, const uint32_t* meshGroups, NvBlastBondDesc*& resultBondDescs, float maxSeparation) = 0; }; } // namespace Blast } // namespace Nv #endif // NVBLASTEXTAUTHORINGBONDGENERATOR_H
NVIDIA-Omniverse/PhysX/blast/include/extensions/authoring/NvBlastExtAuthoringCutout.h
// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, // MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. // // Information and code furnished is believed to be accurate and reliable. // However, NVIDIA Corporation assumes no responsibility for the consequences of use of such // information or for any infringement of patents or other rights of third parties that may // result from its use. No license is granted by implication or otherwise under any patent // or patent rights of NVIDIA Corporation. Details are subject to change without notice. // This code supersedes and replaces all information previously supplied. // NVIDIA Corporation products are not authorized for use as critical // components in life support devices or systems without express written approval of // NVIDIA Corporation. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Defines the API for the NvBlastExtAuthoring blast sdk extension's CutoutSet, used for cutout fracturing #ifndef NVBLASTAUTHORINGCUTOUT_H #define NVBLASTAUTHORINGCUTOUT_H #include "NvBlastExtAuthoringTypes.h" namespace Nv { namespace Blast { /** Interface to a "cutout set," used with chippable fracturing. A cutout set is created from a bitmap. The result is turned into cutouts which are applied to the mesh. For example, a bitmap which looks like a brick pattern will generate a cutout for each "brick," forming the cutout set. Each cutout is a 2D entity, meant to be projected onto various faces of a mesh. They are represented by a set of 2D vertices, which form closed loops. More than one loop may represent a single cutout, if the loops are forced to be convex. Otherwise, a cutout is represented by a single loop. */ class CutoutSet { public: /** Returns the number of cutouts in the set. */ virtual uint32_t getCutoutCount() const = 0; /** Applies to the cutout indexed by cutoutIndex: Returns the number of vertices in the cutout. */ virtual uint32_t getCutoutVertexCount(uint32_t cutoutIndex, uint32_t loopIndex) const = 0; /** Applies to the cutout indexed by cutoutIndex: Returns the number of loops in this cutout. */ virtual uint32_t getCutoutLoopCount(uint32_t cutoutIndex) const = 0; /** Applies to the cutout indexed by cutoutIndex: Returns the vertex indexed by vertexIndex. (Only the X and Y coordinates are used.) */ virtual const NvcVec3& getCutoutVertex(uint32_t cutoutIndex, uint32_t loopIndex, uint32_t vertexIndex) const = 0; /** If smoothing group should be changed for adjacent to this vertex faces return true */ virtual bool isCutoutVertexToggleSmoothingGroup(uint32_t cutoutIndex, uint32_t loopIndex, uint32_t vertexIndex) const = 0; /** Whether or not this cutout set is to be tiled. */ virtual bool isPeriodic() const = 0; /** The dimensions of the fracture map used to create the cutout set. */ virtual const NvcVec2& getDimensions() const = 0; /** Releases all memory and deletes itself. */ virtual void release() = 0; protected: /** Protected destructor. Use the release() method. */ virtual ~CutoutSet() {} }; } // namespace Blast } // namespace Nv #endif // idndef NVBLASTAUTHORINGCUTOUT_H
NVIDIA-Omniverse/PhysX/blast/include/extensions/authoring/NvBlastExtAuthoring.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Defines the API for the NvBlastExtAuthoring blast sdk extension #ifndef NVBLASTAUTHORING_H #define NVBLASTAUTHORING_H #include "NvBlastExtAuthoringTypes.h" namespace Nv { namespace Blast { class Mesh; class VoronoiSitesGenerator; class CutoutSet; class RandomGeneratorBase; class FractureTool; class ConvexMeshBuilder; class BlastBondGenerator; class MeshCleaner; class PatternGenerator; class SpatialGrid; class SpatialAccelerator; class BooleanTool; } // namespace Blast } // namespace Nv struct NvBlastExtAssetUtilsBondDesc; /** Constructs mesh object from array of triangles. User should call release() after usage. \param[in] positions Array for vertex positions, 3 * verticesCount floats will be read \param[in] normals Array for vertex normals, 3 * verticesCount floats will be read \param[in] uv Array for vertex uv coordinates, 2 * verticesCount floats will be read \param[in] verticesCount Number of vertices in mesh \param[in] indices Array of vertex indices. Indices contain vertex index triplets which form a mesh triangle. \param[in] indicesCount Indices count (should be equal to numberOfTriangles * 3) \return pointer to Nv::Blast::Mesh if it was created succefully otherwise return nullptr */ NV_C_API Nv::Blast::Mesh* NvBlastExtAuthoringCreateMesh(const NvcVec3* positions, const NvcVec3* normals, const NvcVec2* uv, uint32_t verticesCount, const uint32_t* indices, uint32_t indicesCount); /** Constructs mesh object from triangles represented as arrays of vertices, indices and per facet material. User should call Mesh::release() after usage. \param[in] vertices Array for vertex positions, 3 * verticesCount floats will be read \param[in] verticesCount Number of vertices in mesh \param[in] indices Array of vertex indices. Indices contain vertex index triplets which form a mesh triangle. \param[in] indicesCount Indices count (should be equal to numberOfTriangles * 3) \param[in] materials Array of material indices per triangle. If not set default material (0) will be assigned. \param[in] materialStride Stride for material indices \return pointer to Nv::Blast::Mesh if it was created succefully otherwise return nullptr */ NV_C_API Nv::Blast::Mesh* NvBlastExtAuthoringCreateMeshOnlyTriangles(const void* vertices, uint32_t verticesCount, uint32_t* indices, uint32_t indexCount, void* materials = nullptr, uint32_t materialStride = 4); /** Constructs mesh object from array of vertices, edges and facets. User should call release() after usage. \param[in] vertices Array for Nv::Blast::Vertex \param[in] edges Array for Nv::Blast::Edge \param[in] facets Array for Nv::Blast::Facet \param[in] verticesCount Number of vertices in mesh \param[in] edgesCount Number of edges in mesh \param[in] facetsCount Number of facets in mesh \return pointer to Nv::Blast::Mesh if it was created succefully otherwise return nullptr */ NV_C_API Nv::Blast::Mesh* NvBlastExtAuthoringCreateMeshFromFacets(const void* vertices, const void* edges, const void* facets, uint32_t verticesCount, uint32_t edgesCount, uint32_t facetsCount); /** Voronoi sites should not be generated outside of the fractured mesh, so VoronoiSitesGenerator should be supplied with fracture mesh. \param[in] mesh Fracture mesh \param[in] rnd User supplied random value generator. \return Pointer to VoronoiSitesGenerator. User's code should release it after usage. */ NV_C_API Nv::Blast::VoronoiSitesGenerator* NvBlastExtAuthoringCreateVoronoiSitesGenerator(Nv::Blast::Mesh* mesh, Nv::Blast::RandomGeneratorBase* rng); /** Instantiates a blank CutoutSet */ NV_C_API Nv::Blast::CutoutSet* NvBlastExtAuthoringCreateCutoutSet(); /** Builds a cutout set (which must have been initially created by createCutoutSet()). Uses a bitmap described by pixelBuffer, bufferWidth, and bufferHeight. Each pixel is represented by one byte in the buffer. \param cutoutSet the CutoutSet to build \param pixelBuffer pointer to be beginning of the pixel buffer \param bufferWidth the width of the buffer in pixels \param bufferHeight the height of the buffer in pixels \param segmentationErrorThreshold Reduce the number of vertices on curve untill segmentation error is smaller then specified. By default set it to 0.001. \param snapThreshold the pixel distance at which neighboring cutout vertices and segments may be fudged into alignment. By default set it to 1. \param periodic whether or not to use periodic boundary conditions when creating cutouts from the map \param expandGaps expand cutout regions to gaps or keep it as is */ NV_C_API void NvBlastExtAuthoringBuildCutoutSet(Nv::Blast::CutoutSet& cutoutSet, const uint8_t* pixelBuffer, uint32_t bufferWidth, uint32_t bufferHeight, float segmentationErrorThreshold, float snapThreshold, bool periodic, bool expandGaps); /** Create FractureTool object. \return Pointer to create FractureTool. User's code should release it after usage. */ NV_C_API Nv::Blast::FractureTool* NvBlastExtAuthoringCreateFractureTool(); /** Create BlastBondGenerator \return Pointer to created BlastBondGenerator. User's code should release it after usage. */ NV_C_API Nv::Blast::BlastBondGenerator* NvBlastExtAuthoringCreateBondGenerator(Nv::Blast::ConvexMeshBuilder* builder); /** Build convex mesh decomposition. \param[in] mesh Triangle mesh to decompose. \param[in] triangleCount Number of triangles in mesh. \param[in] params Parameters for convex mesh decomposition builder. \param[out] convexes The resulting convex hulls. \return Number of created convex hulls. */ NV_C_API int32_t NvBlastExtAuthoringBuildMeshConvexDecomposition(Nv::Blast::ConvexMeshBuilder* cmb, const Nv::Blast::Triangle* mesh, uint32_t triangleCount, const Nv::Blast::ConvexDecompositionParams& params, Nv::Blast::CollisionHull**& convexes); /** Convex geometry trimming. Using slicing with noised slicing surface can result in intersecting collision geometry. It leads to unstable behaviour of rigid body simulation. This method trims all intersecting parts of collision geometry. As a drawback, trimming collision geometry can lead to penetrating render meshes during simulation. \param[in] chunksCount Number of chunks \param[in,out] in ConvexHull geometry which should be clipped. \param[in] chunkDepth Array of depth levels of convex hulls corresponding chunks. */ NV_C_API void NvBlastExtAuthoringTrimCollisionGeometry(Nv::Blast::ConvexMeshBuilder* cmb, uint32_t chunksCount, Nv::Blast::CollisionHull** in, const uint32_t* chunkDepth); /** Transforms collision hull in place using scale, rotation, transform. \param[in, out] hull Pointer to the hull to be transformed (modified). \param[in] scale Pointer to scale to be applied. Can be nullptr. \param[in] rotation Pointer to rotation to be applied. Can be nullptr. \param[in] translation Pointer to translation to be applied. Can be nullptr. */ NV_C_API void NvBlastExtAuthoringTransformCollisionHullInPlace(Nv::Blast::CollisionHull* hull, const NvcVec3* scaling, const NvcQuat* rotation, const NvcVec3* translation); /** Transforms collision hull in place using scale, rotation, transform. \param[in] hull Pointer to the hull to be transformed (modified). \param[in] scale Pointer to scale to be applied. Can be nullptr. \param[in] rotation Pointer to rotation to be applied. Can be nullptr. \param[in] translation Pointer to translation to be applied. Can be nullptr. */ NV_C_API Nv::Blast::CollisionHull* NvBlastExtAuthoringTransformCollisionHull(const Nv::Blast::CollisionHull* hull, const NvcVec3* scaling, const NvcQuat* rotation, const NvcVec3* translation); /** Performs pending fractures and generates fractured asset, render and collision geometry \param[in] fTool Fracture tool created by NvBlastExtAuthoringCreateFractureTool \param[in] bondGenerator Bond generator created by NvBlastExtAuthoringCreateBondGenerator \param[in] collisionBuilder Collision builder created by NvBlastExtAuthoringCreateConvexMeshBuilder \param[in] defaultSupportDepth All new chunks will be marked as support if its depth equal to defaultSupportDepth. By default leaves (chunks without children) marked as support. \param[in] collisionParam Parameters of collision hulls generation. \return Authoring result */ NV_C_API Nv::Blast::AuthoringResult* NvBlastExtAuthoringProcessFracture(Nv::Blast::FractureTool& fTool, Nv::Blast::BlastBondGenerator& bondGenerator, Nv::Blast::ConvexMeshBuilder& collisionBuilder, const Nv::Blast::ConvexDecompositionParams& collisionParam, int32_t defaultSupportDepth = -1); /** Releases collision data for AuthoringResult. AuthoringResult should be created by NvBlast. */ NV_C_API void NvBlastExtAuthoringReleaseAuthoringResultCollision(Nv::Blast::ConvexMeshBuilder& collisionBuilder, Nv::Blast::AuthoringResult* ar); /** Releases AuthoringResult data. AuthoringResult should be created by NvBlast. */ NV_C_API void NvBlastExtAuthoringReleaseAuthoringResult(Nv::Blast::ConvexMeshBuilder& collisionBuilder, Nv::Blast::AuthoringResult* ar); /** Updates graphics mesh only \param[in] fTool Fracture tool created by NvBlastExtAuthoringCreateFractureTool \param[out] ares AuthoringResult object which contains chunks, for which rendermeshes will be updated (e.g. to tweak UVs). Initially should be created by NvBlastExtAuthoringProcessFracture. */ NV_C_API void NvBlastExtAuthoringUpdateGraphicsMesh(Nv::Blast::FractureTool& fTool, Nv::Blast::AuthoringResult& ares); /** Build collision meshes \param[in,out] ares AuthoringResult object which contains chunks, for which collision meshes will be built. \param[in] collisionBuilder Reference to ConvexMeshBuilder instance. \param[in] collisionParam Parameters of collision hulls generation. \param[in] chunksToProcessCount Number of chunk indices in chunksToProcess memory buffer. \param[in] chunksToProcess Chunk indices for which collision mesh should be built. */ NV_C_API void NvBlastExtAuthoringBuildCollisionMeshes(Nv::Blast::AuthoringResult& ares, Nv::Blast::ConvexMeshBuilder& collisionBuilder, const Nv::Blast::ConvexDecompositionParams& collisionParam, uint32_t chunksToProcessCount, uint32_t* chunksToProcess); /** Creates MeshCleaner object \return pointer to Nv::Blast::Mesh if it was created succefully otherwise return nullptr */ NV_C_API Nv::Blast::MeshCleaner* NvBlastExtAuthoringCreateMeshCleaner(); /** Finds bonds connecting chunks in a list of assets New bond descriptors may be given to bond support chunks from different components. An NvBlastAsset may appear more than once in the components array. NOTE: This function allocates memory using the allocator in NvBlastGlobals, to create the new bond descriptor arrays returned. The user must free this memory after use with NVBLAST_FREE \param[in] components An array of assets to merge, of size componentCount. \param[in] scales If not NULL, an array of size componentCount of scales to apply to the geometric data in the chunks and bonds. If NULL, no scaling is applied. \param[in] rotations If not NULL, an array of size componentCount of rotations to apply to the geometric data in the chunks and bonds. The quaternions MUST be normalized. If NULL, no rotations are applied. \param[in] translations If not NULL, an array of of size componentCount of translations to apply to the geometric data in the chunks and bonds. If NULL, no translations are applied. \param[in] convexHullOffsets For each component, an array of chunkSize+1 specifying the start of the convex hulls for that chunk inside the chunkHulls array for that component. \param[in] chunkHulls For each component, an array of CollisionHull* specifying the collision geometry for the chunks in that component. \param[in] componentCount The size of the components and relativeTransforms arrays. \param[out] newBondDescs Descriptors of type NvBlastExtAssetUtilsBondDesc for new bonds between components. \param[in] maxSeparation Maximal distance between chunks which can be connected by bond. \return the number of bonds in newBondDescs */ NV_C_API uint32_t NvBlastExtAuthoringFindAssetConnectingBonds( const NvBlastAsset** components, const NvcVec3* scales, const NvcQuat* rotations, const NvcVec3* translations, const uint32_t** convexHullOffsets, const Nv::Blast::CollisionHull*** chunkHulls, uint32_t componentCount, NvBlastExtAssetUtilsBondDesc*& newBondDescs, float maxSeparation = 0.0f); /** Returns pattern generator used for generating fracture patterns. */ NV_C_API Nv::Blast::PatternGenerator* NvBlastExtAuthoringCreatePatternGenerator(); /** Create spatial grid for mesh. Release using Nv::Blast::SpatialGrid::release() */ NV_C_API Nv::Blast::SpatialGrid* NvBlastExtAuthoringCreateSpatialGrid(uint32_t resolution, const Nv::Blast::Mesh* m); /** Create GridAccelerator - SpatialAccelerator which use Grid for faster mesh sampling. Release using Nv::Blast::SpatialAccelerator::release() */ NV_C_API Nv::Blast::SpatialAccelerator* NvBlastExtAuthoringCreateGridAccelerator(Nv::Blast::SpatialGrid* parent); /** Create SweepingAccelerator - SpatialAccelerator which uses a sweep algorithm. Release using Nv::Blast::SpatialAccelerator::release() */ NV_C_API Nv::Blast::SpatialAccelerator* NvBlastExtAuthoringCreateSweepingAccelerator(const Nv::Blast::Mesh* m); /** Create BBoxBasedAccelerator - SpatialAccelerator which uses a bbox/grid algorithm. Release using Nv::Blast::SpatialAccelerator::release() */ NV_C_API Nv::Blast::SpatialAccelerator* NvBlastExtAuthoringCreateBBoxBasedAccelerator(uint32_t resolution, const Nv::Blast::Mesh* m); #define kBBoxBasedAcceleratorDefaultResolution 10 /** Create BooleanTool object. \return Pointer to created BooleanTool. User's code should release it after usage. */ NV_C_API Nv::Blast::BooleanTool* NvBlastExtAuthoringCreateBooleanTool(); #endif // ifndef NVBLASTAUTHORING_H
NVIDIA-Omniverse/PhysX/blast/include/extensions/authoring/NvBlastExtAuthoringMeshCleaner.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Defines the API for the NvBlastExtAuthoring blast sdk extension's MeshCleaner utility #ifndef NVBLASTEXTAUTHORINGMESHCLEANER_H #define NVBLASTEXTAUTHORINGMESHCLEANER_H #include "NvBlastExtAuthoringTypes.h" /** FractureTool has requirements to input meshes to fracture them successfully: 1) Mesh should be closed (watertight) 2) There should not be self-intersections and open-edges. */ /** Mesh cleaner input is closed mesh with self-intersections and open-edges (only in the interior). It tries to track outer hull to make input mesh solid and meet requierements of FractureTool. If mesh contained some internal cavities they will be removed. */ namespace Nv { namespace Blast { class Mesh; class MeshCleaner { public: virtual ~MeshCleaner() {} /** Tries to remove self intersections and open edges in interior of mesh. \param[in] mesh Mesh to be cleaned. \return Cleaned mesh or nullptr if failed. */ virtual Mesh* cleanMesh(const Mesh* mesh) = 0; virtual void release() = 0; }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTEXTAUTHORINGMESHCLEANER_H
NVIDIA-Omniverse/PhysX/blast/include/extensions/assetutils/NvBlastExtAssetUtils.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Defines the API for the NvBlastExtAssetUtils blast sdk extension #ifndef NVBLASTEXTASSETUTILS_H #define NVBLASTEXTASSETUTILS_H #include "NvBlastTypes.h" #include "NvCTypes.h" #include <stdint.h> /** Reauthor the provided asset to create external bonds in the specified support chunks. \param[in] asset Pointer to the original asset. Won't be modified. \param[in] externalBoundChunks Array of support chunk indices which are to be bound to the external body. \param[in] externalBoundChunksCount Size of externalBoundChunks array. \param[in] bondDirections Array of normals for each bond (size externalBoundChunksCount) \param[in] bondUserData Array of user data values for the new bonds, of size externalBoundChunksCount. May be NULL. If NULL, bond user data will be set to zero. \return a new asset with added bonds if successful, NULL otherwise. */ NV_C_API NvBlastAsset* NvBlastExtAssetUtilsAddExternalBonds ( const NvBlastAsset* asset, const uint32_t* externalBoundChunks, uint32_t externalBoundChunkCount, const NvcVec3* bondDirections, const uint32_t* bondUserData ); // DEPRICATED: remove on next major version bump #define NvBlastExtAssetUtilsAddWorldBonds NvBlastExtAssetUtilsAddExternalBonds /** Bond descriptor used to merge assets. In addition to the NvBlastBondDesc fields, adds "component" indices to indicate to which component asset the chunk indices in NvBlastBondDesc refer. Used in the function NvBlastExtAssetUtilsMergeAssets. */ struct NvBlastExtAssetUtilsBondDesc : public NvBlastBondDesc { uint32_t componentIndices[2]; //!< The asset component for the corresponding chunkIndices[2] value. }; /** Creates an asset descriptor from an asset. NOTE: This function allocates memory using the allocator in NvBlastGlobals, to create the new chunk and bond descriptor arrays referenced in the returned NvBlastAssetDesc. The user must free this memory after use with NVBLAST_FREE appied to the pointers in the returned NvBlastAssetDesc. \param[in] asset The asset from which to create a descriptor. \return an asset descriptor that will build an exact duplicate of the input asset. */ NV_C_API NvBlastAssetDesc NvBlastExtAssetUtilsCreateDesc(const NvBlastAsset* asset); /** Creates an asset descriptor which will build an asset that merges several assets. Each asset (or component) is given a transform, applied to the geometric information in the chunk and bond descriptors. New bond descriptors may be given to bond support chunks from different components. An NvBlastAsset may appear more than once in the components array. This function will call NvBlastEnsureAssetExactSupportCoverage on the returned chunk descriptors. It will also call NvBlastReorderAssetDescChunks if the user passes in valid arrays for chunkReorderMap and chunkReorderMapSize. Otherwise, the user must ensure that the returned chunk descriptors are in a valid order is valid before using them. NOTE: This function allocates memory using the allocator in NvBlastGlobals, to create the new chunk and bond descriptor arrays referenced in the returned NvBlastAssetDesc. The user must free this memory after use with NVBLAST_FREE appied to the pointers in the returned NvBlastAssetDesc. \param[in] components An array of assets to merge, of size componentCount. \param[in] scales An array of scales to apply to the geometric data in the chunks and bonds. If NULL, no scales are applied. If not NULL, the array must be of size componentCount. \param[in] rotations An array of rotations to apply to the geometric data in the chunks and bonds, stored quaternion format. The quaternions MUST be normalized. If NULL, no rotations are applied. If not NULL, the array must be of size componentCount. \param[in] translations An array of translations to apply to the geometric data in the chunks and bonds. If NULL, no translations are applied. If not NULL, the array must be of size componentCount. \param[in] componentCount The size of the components and relativeTransforms arrays. \param[in] newBondDescs Descriptors of type NvBlastExtAssetUtilsBondDesc for new bonds between components, of size newBondCount. If NULL, newBondCount must be 0. \param[in] newBondCount The size of the newBondDescs array. \param[in] chunkIndexOffsets If not NULL, must point to a uin32_t array of size componentCount. It will be filled with the starting elements in chunkReorderMap corresponding to each component. \param[in] chunkReorderMap If not NULL, the returned descriptor is run through NvBlastReorderAssetDescChunks, to ensure that it is a valid asset descriptor. In the process, chunks may be reordered (in addition to their natural re-indexing due to them all being placed in one array). To map from the old chunk indexing for the various component assets to the chunk indexing used in the returned descriptor, set chunkReorderMap to point to a uin32_t array of size equal to the total number of chunks in all components, and pass in a non-NULL value to chunkIndexOffsets as described above. Then, for component index c and chunk index k within that component, the new chunk index is given by: index = chunkReorderMap[ k + chunkIndexOffsets[c] ]. \param[in] chunkReorderMapSize The size of the array passed into chunkReorderMap, if chunkReorderMap is not NULL. This is for safety, so that this function does not overwrite chunkReorderMap. \return an asset descriptor that will build an asset which merges the components, using NvBlastCreateAsset. */ NV_C_API NvBlastAssetDesc NvBlastExtAssetUtilsMergeAssets ( const NvBlastAsset** components, const NvcVec3* scales, const NvcQuat* rotations, const NvcVec3* translations, uint32_t componentCount, const NvBlastExtAssetUtilsBondDesc* newBondDescs, uint32_t newBondCount, uint32_t* chunkIndexOffsets, uint32_t* chunkReorderMap, uint32_t chunkReorderMapSize ); /** Transforms asset in place using scale, rotation, transform. Chunk centroids, chunk bond centroids and bond normals are being transformed. Chunk volume and bond area are changed accordingly. \param[in, out] asset Pointer to the asset to be transformed (modified). \param[in] scale Pointer to scale to be applied. Can be nullptr. \param[in] rotation Pointer to rotation to be applied. Can be nullptr. \param[in] translation Pointer to translation to be applied. Can be nullptr. */ NV_C_API void NvBlastExtAssetTransformInPlace ( NvBlastAsset* asset, const NvcVec3* scale, const NvcQuat* rotation, const NvcVec3* translation ); #endif // ifndef NVBLASTEXTASSETUTILS_H
NVIDIA-Omniverse/PhysX/blast/include/extensions/stress/NvBlastExtStressSolver.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief NvBlastExtStressSolver blast extension, provides functionality to calculate stress on a destructible #ifndef NVBLASTEXTSTRESSSOLVER_H #define NVBLASTEXTSTRESSSOLVER_H #include "NvBlastTypes.h" #include "NvCTypes.h" namespace Nv { namespace Blast { /** Stress Solver Settings Stress on every bond is calculated with these components: compression/tension (parallel to bond normal) shear (perpendicular to bond normal) Damage is done based on the limits defined in this structure to simulate micro bonds in the material breaking Units for all limits are in pascals Support graph reduction: graphReductionLevel is the number of node merge passes. The resulting graph will be roughly 2^graphReductionLevel times smaller than the original. NOTE: the reduction is currently fairly random and can lead to interlocked actors when solver bonds break. If we are going to keep the feature, the algorithm for combining bonds should be revisited to take locality into account. */ struct ExtStressSolverSettings { uint32_t maxSolverIterationsPerFrame;//!< the maximum number of iterations to perform per frame uint32_t graphReductionLevel; //!< graph reduction level // stress pressure limits float compressionElasticLimit; //!< below this compression pressure no damage is done to bonds. Also used as the default for shear and tension if they aren't provided. float compressionFatalLimit; //!< above this compression pressure the bond is immediately broken. Also used as the default for shear and tension if they aren't provided. float tensionElasticLimit; //!< below this tension pressure no damage is done to bonds. Use a negative value to fall back on compression limit. float tensionFatalLimit; //!< above this tension pressure the bond is immediately broken. Use a negative value to fall back on compression limit. float shearElasticLimit; //!< below this shear pressure no damage is done to bonds. Use a negative value to fall back on compression limit. float shearFatalLimit; //!< above this shear pressure the bond is immediately broken. Use a negative value to fall back on compression limit. ExtStressSolverSettings() : maxSolverIterationsPerFrame(25), graphReductionLevel(0), // stress pressure limits compressionElasticLimit(1.0f), compressionFatalLimit(2.0f), tensionElasticLimit(-1.0f), tensionFatalLimit(-1.0f), shearElasticLimit(-1.0f), shearFatalLimit(-1.0f) {} }; /** Parameter to addForce() calls, determines the exact operation that is carried out. @see ExtStressSolver.addForce() */ struct ExtForceMode { enum Enum { FORCE, //!< parameter has unit of mass * distance / time^2 ACCELERATION, //!< parameter has unit of distance / time^2, i.e. the effect is mass independent }; }; /** Stress Solver. Uses NvBlastFamily, allocates and prepares its graph once when it's created. Then it's being quickly updated on every actor split. It uses NvBlastAsset support graph, you can apply forces on nodes and stress on bonds will be calculated as the result. When stress on bond exceeds it's health bond is considered broken (overstressed). Basic usage: 1. Create it with create function once for family 2. Fill node info for every node in support graph or use setAllNodesInfoFromLL() function. 3. Use notifyActorCreated / notifyActorDestroyed whenever actors are created and destroyed in family. 4. Every frame: Apply forces (there are different functions for it see @addForce) 5. Every frame: Call update() for actual solver to process. 6. If getOverstressedBondCount() > 0 use generateFractureCommands() functions to get FractureCommands with bonds fractured */ class NV_DLL_EXPORT ExtStressSolver { public: //////// creation //////// /** Create a new ExtStressSolver. \param[in] family The NvBlastFamily instance to calculate stress on. \param[in] settings The settings to be set on ExtStressSolver. \return the new ExtStressSolver if successful, NULL otherwise. */ static ExtStressSolver* create(const NvBlastFamily& family, const ExtStressSolverSettings& settings = ExtStressSolverSettings()); //////// interface //////// /** Release this stress solver. */ virtual void release() = 0; /** Set node info. All the required info per node for stress solver is set with this function. Call it for every node in graph or use setAllNodesInfoFromLL(). \param[in] graphNodeIndex Index of the node in support graph. see NvBlastSupportGraph. \param[in] mass Node mass. For static node it is must be zero. \param[in] volume Node volume. For static node it is irrelevant. \param[in] localPosition Node local position. */ virtual void setNodeInfo(uint32_t graphNodeIndex, float mass, float volume, NvcVec3 localPosition) = 0; /** Set all nodes info using low level NvBlastAsset data. Uses NvBlastChunk's centroid and volume. Uses 'external' node to mark nodes as static. \param[in] density Density. Used to convert volume to mass. */ virtual void setAllNodesInfoFromLL(float density = 1.0f) = 0; /** Set stress solver settings. Changing graph reduction level will lead to graph being rebuilt (which is fast, but still not recommended). All other settings are applied instantly and can be changed every frame. \param[in] settings The settings to be set on ExtStressSolver. */ virtual void setSettings(const ExtStressSolverSettings& settings) = 0; /** Get stress solver settings. \return the pointer to stress solver settings currently set. */ virtual const ExtStressSolverSettings& getSettings() const = 0; /** Notify stress solver on newly created actor. Call this function for all initial actors present in family and later upon every actor split. \param[in] actor The actor created. \return true if actor will take part in stress solver process. false if actor doesn't contain any bonds. */ virtual bool notifyActorCreated(const NvBlastActor& actor) = 0; /** Notify stress solver on destroyed actor. Call this function when actor is destroyed (split futher) or deactivated. \param[in] actor The actor destroyed. */ virtual void notifyActorDestroyed(const NvBlastActor& actor) = 0; /** Apply external impulse on particular actor of family. This function will find nearest actor's graph node to apply impulse on. \param[in] actor The actor to apply impulse on. \param[in] localPosition Local position in actor's coordinates to apply impulse on. \param[in] localForce Force to apply in local actor's coordinates. \param[in] mode The mode to use when applying the force/impulse(see #ExtForceMode) \return true iff node was found and force applied. */ virtual bool addForce(const NvBlastActor& actor, NvcVec3 localPosition, NvcVec3 localForce, ExtForceMode::Enum mode = ExtForceMode::FORCE) = 0; /** Apply external impulse on particular node. \param[in] graphNodeIndex The graph node index to apply impulse on. See #NvBlastSupportGraph. \param[in] localForce Force to apply in local actor's coordinates. \param[in] mode The mode to use when applying the force/impulse(see #ExtForceMode) */ virtual void addForce(uint32_t graphNodeIndex, NvcVec3 localForce, ExtForceMode::Enum mode = ExtForceMode::FORCE) = 0; /** Apply external gravity on particular actor of family. This function applies gravity on every node withing actor, so it makes sense only for static actors. \param[in] actor The actor to apply gravitational acceleration on. \param[in] localGravity Gravity to apply in local actor's coordinates. ExtForceMode::ACCELERATION is used. \return true iff acceleration was applied on at least one node. */ virtual bool addGravity(const NvBlastActor& actor, NvcVec3 localGravity) = 0; /** Apply centrifugal acceleration produced by actor's angular movement. \param[in] actor The actor to apply impulse on. \param[in] localCenterMass Actor's local center of mass. \param[in] localAngularVelocity Local angular velocity of an actor. \return true iff force was applied on at least one node. */ virtual bool addCentrifugalAcceleration(const NvBlastActor& actor, NvcVec3 localCenterMass, NvcVec3 localAngularVelocity) = 0; /** Update stress solver. Actual performance of stress calculation happens there. Call it after all relevant forces were applied, usually every frame. */ virtual void update() = 0; /** Get overstressed/broken bonds count. This count is updated after every update() call. Number of overstressed bond directly hints if any bond fracture is recommended by stress solver. \return the overstressed bonds count. */ virtual uint32_t getOverstressedBondCount() const = 0; /** Generate fracture commands for particular actor. Calling this function if getOverstressedBondCount() == 0 or actor has no bond doesn't make sense, bondFractureCount will be '0'. Filled fracture commands buffer can be passed directly to NvBlastActorApplyFracture. IMPORTANT: NvBlastFractureBuffers::bondFractures will point to internal stress solver memory which will be valid till next call of any of generateFractureCommands() functions or stress solver release() call. \param[in] actor The actor to fill fracture commands for. \param[in] commands Pointer to command buffer to fill. */ virtual void generateFractureCommands(const NvBlastActor& actor, NvBlastFractureBuffers& commands) = 0; /** Generate fracture commands for every actor in family. Actors and commands buffer must be passed in order to be filled. It's recommended for bufferSize to be the count of actor with more then one bond in family. Calling this function if getOverstressedBondCount() == 0 or actor has no bond doesn't make sense, '0' will be returned. IMPORTANT: NvBlastFractureBuffers::bondFractures will point to internal stress solver memory which will be valid till next call of any of generateFractureCommands() functions or stress solver release() call. \param[out] buffer A user-supplied array of NvBlastActor pointers to fill. \param[out] commandsBuffer A user-supplied array of NvBlastFractureBuffers to fill. \param[in] bufferSize The number of elements available to write into buffer. \return the number of actors and command buffers written to the buffer. */ virtual uint32_t generateFractureCommandsPerActor(const NvBlastActor** actorBuffer, NvBlastFractureBuffers* commandsBuffer, uint32_t bufferSize) = 0; /** Reset stress solver. Stress solver uses warm start internally, calling this function will flush all previous data calculated and also zeros frame count. This function is to be used for debug purposes. */ virtual void reset() = 0; /** Get stress solver linear error. \return the total linear error of stress calculation. */ virtual float getStressErrorLinear() const = 0; /** Get stress solver angular error. \return the total angular error of stress calculation. */ virtual float getStressErrorAngular() const = 0; /** Whether or not the solver converged to a solution within the desired error. \return true iff the solver converged. */ virtual bool converged() const = 0; /** Get stress solver total frames count (update() calls) since it was created (or reset). \return the frames count. */ virtual uint32_t getFrameCount() const = 0; /** Get stress solver bonds count, after graph reduction was applied. \return the bonds count. */ virtual uint32_t getBondCount() const = 0; /** Get stress solver excess force related to broken bonds for the given actor. This is intended to be called after damage is applied to bonds and actors are split, but before the next call to 'update()'. Force is intended to be applied to the center of mass, torque due to linear forces that happen away from the COM are converted to torque as part of this function. \return true if data was gathered, false otherwise. */ virtual bool getExcessForces(uint32_t actorIndex, const NvcVec3& com, NvcVec3& force, NvcVec3& torque) = 0; /** Debug Render Mode */ enum DebugRenderMode { STRESS_PCT_MAX = 0, //!< render the maximum of the compression, tension, and shear stress percentages STRESS_PCT_COMPRESSION = 1, //!< render the compression stress percentage STRESS_PCT_TENSION = 2, //!< render the tension stress percentage STRESS_PCT_SHEAR = 3, //!< render the shear stress percentage }; /** Used to store a single line and colour for debug rendering. */ struct DebugLine { DebugLine(const NvcVec3& p0, const NvcVec3& p1, const uint32_t& c) : pos0(p0), color0(c), pos1(p1), color1(c) {} NvcVec3 pos0; uint32_t color0; NvcVec3 pos1; uint32_t color1; }; /** Debug Buffer */ struct DebugBuffer { const DebugLine* lines; uint32_t lineCount; }; /** Fill debug render for passed array of support graph nodes. NOTE: Returned DebugBuffer points into internal memory which is valid till next fillDebugRender() call. \param[in] nodes Node indices of support graph to debug render for. \param[in] nodeCount Node indices count. \param[in] mode Debug render mode. \param[in] scale Scale to be applied on impulses. \return debug buffer with array of lines */ virtual const DebugBuffer fillDebugRender(const uint32_t* nodes, uint32_t nodeCount, DebugRenderMode mode, float scale = 1.0f) = 0; }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTEXTSTRESSSOLVER_H
NVIDIA-Omniverse/PhysX/blast/include/extensions/shaders/NvBlastExtDamageShaders.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief NvBlastExtDamageShaders blast extension, provides damage shaders for applying damage to destructibles #ifndef NVBLASTEXTDAMAGESHADERS_H #define NVBLASTEXTDAMAGESHADERS_H #include "NvBlastTypes.h" #include "NvBlastDebugRender.h" /** A few example damage shader implementations. */ /////////////////////////////////////////////////////////////////////////////// // Damage Accelerator /////////////////////////////////////////////////////////////////////////////// class NvBlastExtDamageAccelerator { public: virtual void release() = 0; virtual Nv::Blast::DebugBuffer fillDebugRender(int depth = -1, bool segments = false) = 0; }; NV_C_API NvBlastExtDamageAccelerator* NvBlastExtDamageAcceleratorCreate(const NvBlastAsset* asset, int type); /////////////////////////////////////////////////////////////////////////////// // Damage Program /////////////////////////////////////////////////////////////////////////////// /** Damage program params. Custom user params to be passed in shader functions. This structure hints recommended parameters layout, but it doesn't required to be this way. The idea of this 'hint' is that damage parameters are basically 2 entities: material + damage description. 1. Material is something that describes an actor properties (e.g. mass, stiffness, fragility) which are not expected to be changed often. 2. Damage description is something that describes particular damage event (e.g. position, radius and force of explosion). Also this damage program hints that there could be more than one damage event happening and processed per one shader call (for efficiency reasons). So different damage descriptions can be stacked and passed in one shader call (while material is kept the same obviously). */ struct NvBlastExtProgramParams { NvBlastExtProgramParams(const void* desc, const void* material_ = nullptr, NvBlastExtDamageAccelerator* accelerator_ = nullptr) : damageDesc(desc), material(material_), accelerator(accelerator_) {} const void* damageDesc; //!< array of damage descriptions const void* material; //!< pointer to material NvBlastExtDamageAccelerator* accelerator; }; /////////////////////////////////////////////////////////////////////////////// // Common Material /////////////////////////////////////////////////////////////////////////////// /** Example of simple material. It is passed into damage shader, thus it is not used currently in any of them. The user can use it to filter and normalize before applying. Material function implementers may choose their own set. */ struct NvBlastExtMaterial { NvBlastExtMaterial() : health(100.f), minDamageThreshold(0.0f), maxDamageThreshold(1.0f) {} float health; //!< health float minDamageThreshold; //!< min damage fraction threshold to be applied. Range [0, 1]. For example 0.1 filters all damage below 10% of health. float maxDamageThreshold; //!< max damage fraction threshold to be applied. Range [0, 1]. For example 0.8 won't allow more then 80% of health damage to be applied. /** Helper to normalize damage. Pass damage defined in health, damage in range [0, 1] is returned, where 0 basically indicates that the threshold wasn't reached and there is no point in applying it. \param[in] damageInHealth Damage defined in terms of health amount to be reduced. \return normalized damage */ float getNormalizedDamage(float damageInHealth) const { const float damage = health > 0.f ? damageInHealth / health : 1.0f; return damage > minDamageThreshold ? (damage < maxDamageThreshold ? damage : maxDamageThreshold) : 0.f; } }; /////////////////////////////////////////////////////////////////////////////// // Point Radial Damage /////////////////////////////////////////////////////////////////////////////// /** Radial Damage Desc */ struct NvBlastExtRadialDamageDesc { float damage; //!< normalized damage amount, range: [0, 1] (maximum health value to be reduced) float position[3]; //!< origin of damage action float minRadius; //!< inner radius of damage action float maxRadius; //!< outer radius of damage action }; /** Radial Falloff and Radial Cutter damage for both graph and subgraph shaders. NOTE: The signature of shader functions are equal to NvBlastGraphShaderFunction and NvBlastSubgraphShaderFunction respectively. They are not expected to be called directly. @see NvBlastGraphShaderFunction, NvBlastSubgraphShaderFunction */ NV_C_API void NvBlastExtFalloffGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params); NV_C_API void NvBlastExtFalloffSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params); NV_C_API void NvBlastExtCutterGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params); NV_C_API void NvBlastExtCutterSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params); /////////////////////////////////////////////////////////////////////////////// // Capsule Radial Damage /////////////////////////////////////////////////////////////////////////////// /** Capsule Radial Damage Desc */ struct NvBlastExtCapsuleRadialDamageDesc { float damage; //!< normalized damage amount, range: [0, 1] (maximum health value to be reduced) float position0[3]; //!< damage segment point A position float position1[3]; //!< damage segment point B position float minRadius; //!< inner radius of damage action float maxRadius; //!< outer radius of damage action }; /** Capsule Radial Falloff damage for both graph and subgraph shaders. For every bond/chunk damage is calculated from the distance to line segment AB described in NvBlastExtCapsuleRadialDamageDesc. If distance is smaller then minRadius, full compressive amount of damage is applied. From minRadius to maxRaidus it linearly falls off to zero. NOTE: The signature of shader functions are equal to NvBlastGraphShaderFunction and NvBlastSubgraphShaderFunction respectively. They are not expected to be called directly. @see NvBlastGraphShaderFunction, NvBlastSubgraphShaderFunction */ NV_C_API void NvBlastExtCapsuleFalloffGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params); NV_C_API void NvBlastExtCapsuleFalloffSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params); /////////////////////////////////////////////////////////////////////////////// // Shear Damage /////////////////////////////////////////////////////////////////////////////// /** Shear Damage Desc */ struct NvBlastExtShearDamageDesc { float damage; //!< normalized damage amount, range: [0, 1] (maximum health value to be reduced) float normal[3]; //!< directional damage component float position[3]; //!< origin of damage action float minRadius; //!< inner radius of damage action float maxRadius; //!< outer radius of damage action }; /** Shear Damage Shaders NOTE: The signature of shader functions are equal to NvBlastGraphShaderFunction and NvBlastSubgraphShaderFunction respectively. They are not expected to be called directly. @see NvBlastGraphShaderFunction, NvBlastSubgraphShaderFunction */ NV_C_API void NvBlastExtShearGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params); NV_C_API void NvBlastExtShearSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params); /////////////////////////////////////////////////////////////////////////////// // Triangle Intersection Damage /////////////////////////////////////////////////////////////////////////////// /** Triangle Intersection Damage Desc */ struct NvBlastExtTriangleIntersectionDamageDesc { float damage; //!< normalized damage amount, range: [0, 1] (maximum health value to be reduced) NvcVec3 position0; //!< triangle point A position NvcVec3 position1; //!< triangle point B position NvcVec3 position2; //!< triangle point C position }; /** Triangle Intersection damage for both graph and subgraph shaders. Every bond is considered to be a segment connecting two chunk centroids. For every bond (segment) intersection with passed triangle is checked. If intersects full damage is applied on bond. For subgraph shader segments are formed as connections between it's subchunks centroids. Intersection is check in the same fashion. The idea is that if you want to cut an object say with the laser sword, you can form a triangle by taking the position of a sword on this timeframe and on previous one. So that nothing will be missed in terms of space and time. By sweeping sword through whole object it will be cut in halves inevitably, since all bonds segments form connected graph. NOTE: The signature of shader functions are equal to NvBlastGraphShaderFunction and NvBlastSubgraphShaderFunction respectively. They are not expected to be called directly. @see NvBlastGraphShaderFunction, NvBlastSubgraphShaderFunction */ NV_C_API void NvBlastExtTriangleIntersectionGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params); NV_C_API void NvBlastExtTriangleIntersectionSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params); /////////////////////////////////////////////////////////////////////////////// // Impact Spread /////////////////////////////////////////////////////////////////////////////// /** Impact Spread Damage Desc */ struct NvBlastExtImpactSpreadDamageDesc { float damage; //!< normalized damage amount, range: [0, 1] (maximum health value to be reduced) float position[3]; //!< origin of damage action float minRadius; //!< inner radius of damage action float maxRadius; //!< outer radius of damage action }; /** Impact Spread Damage Shaders. It assumes that position is somewhere on the chunk and looks for nearest chunk to this position and damages it. Then it does breadth-first support graph traversal. For radial falloff metric distance is measured along the edges of the graph. That allows to avoid damaging parts which are near in space but disjointed topologically. For example if you hit one column of an arc it would take much bigger radius for damage to travel to the other column than in the simple radial damage. Shader is designed to be used with impact damage, where it is know in advance that actual hit happened. This shader requires NvBlastExtDamageAccelerator passed in, it request scratch memory from it, therefore it is also designed to work only in single threaded mode. It can easily be changed by passing scratch memory as a part of NvBlastExtProgramParams if required. NOTE: The signature of shader functions are equal to NvBlastGraphShaderFunction and NvBlastSubgraphShaderFunction respectively. They are not expected to be called directly. @see NvBlastGraphShaderFunction, NvBlastSubgraphShaderFunction */ NV_C_API void NvBlastExtImpactSpreadGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params); NV_C_API void NvBlastExtImpactSpreadSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params); #endif // NVBLASTEXTDAMAGESHADERS_H
NVIDIA-Omniverse/PhysX/blast/include/extensions/serialization/NvBlastExtSerialization.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Defines basic types for the NvBlastExtSerialization blast extension, which handles versioned serialization of blast objects in different formats #pragma once #include "NvBlastGlobals.h" namespace Nv { namespace Blast { /** Serialization manager interface */ class ExtSerialization { public: /** Standard Encoding IDs */ struct EncodingID { enum Enum { CapnProtoBinary = NVBLAST_FOURCC('C', 'P', 'N', 'B'), RawBinary = NVBLAST_FOURCC('R', 'A', 'W', ' '), }; }; /** Buffer provider API, used to request a buffer for serialization. */ class BufferProvider { public: virtual void* requestBuffer(size_t size) = 0; }; /** Set the serialization encoding to use. (See EncodingID.) \return true iff successful. */ virtual bool setSerializationEncoding(uint32_t encodingID) = 0; /** Retrieve the current serialization encoding being used. Note, by default this is set to the encoding of the first serializer registered by a module. Currently this is done automatically by the NvBlastExtLlExtension module. \return the current serialization encoding (zero if none is set). */ virtual uint32_t getSerializationEncoding() const = 0; /** Set the buffer provider callback to use. (See BufferProvider.) If not set, a default provider using NVBLAST_ALLOC (see NvBlastGlobals.h) is used, which may be freed using NvBLAST_FREE. \param[in] bufferProvider Buffer provider callback to use. If NULL, uses the default provider using the allocator given in NvBlastGlobals. */ virtual void setBufferProvider(BufferProvider* bufferProvider) = 0; /** Reads information from a buffer, returning the contained object type ID, encoding ID, and data size. \param[out] objectTypeID If not NULL, the object type ID is written to *objectTypeID. \param[out] encodingID If not NULL, the encoding ID is written to *encodingID. \param[out] dataSize If not NULL, the data size is written to *dataSize. (Does not include the size of the header.) \param[in] buffer Pointer to the buffer to read. \param[in] bufferSize Size of the buffer to read. \return true iff the header is successfully read. */ virtual bool peekHeader(uint32_t* objectTypeID, uint32_t* encodingID, uint64_t* dataSize, const void* buffer, uint64_t bufferSize) = 0; /** Determines the current object in the buffer and returns the position in the buffer immediately after the object. \param[in, out] bufferSize Size of the buffer to read on input, on output the remaining buffer size given the return buffer value. \param[in] buffer Pointer to the buffer to read. \return a pointer to the new position in the buffer after the skipped object if successful, NULL otherwise. The bufferSize field is only updated if a valid pointer is returned. */ virtual const void* skipObject(uint64_t& bufferSize, const void* buffer) = 0; /** Deserialize from a buffer into a newly allocated object. \param[in] buffer Pointer to the buffer to read. \param[in] bufferSize Size of the buffer to read. \param[out] objectTypeIDPtr Optional, if not NULL then *objectTypeIDPtr will be filled with the deserialized object's type ID if deserialization is successful, or 0 if unsuccessful. \return object pointer; returns null if failed to deserialize. */ virtual void* deserializeFromBuffer(const void* buffer, uint64_t bufferSize, uint32_t* objectTypeIDPtr = nullptr) = 0; /** Serialize into a buffer. Allocates the buffer internally using the callack set in setBufferProvider. \param[out] buffer Pointer to the buffer created. \param[in] object Object pointer. \param[in] objectTypeID Object type ID. \return the number of bytes serialized into the buffer (zero if unsuccessful). */ virtual uint64_t serializeIntoBuffer(void*& buffer, const void* object, uint32_t objectTypeID) = 0; /** Release the serialization manager and all contained objects. */ virtual void release() = 0; protected: /** Destructor is virtual and not public - use the release() method instead of explicitly deleting the serialization manager */ virtual ~ExtSerialization() {} }; } // namespace Blast } // namespace Nv //////// Global API to create serialization //////// /** Create a new serialization manager. To release it, use its release() method. This uses the global allocator set in NvBlastGlobals.h. \return a new serialization manager. */ NV_C_API Nv::Blast::ExtSerialization* NvBlastExtSerializationCreate();
NVIDIA-Omniverse/PhysX/blast/include/extensions/serialization/NvBlastExtTkSerialization.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Defines blast toolkit (Tk) serialization support for the NvBlastExtSerialization blast extension #pragma once #include "NvBlastGlobals.h" /** Blast High-level serialization support. Contains serializers which can be used by the ExtSerialization manager. */ namespace Nv { namespace Blast { // Forward declarations class ExtSerialization; class TkFramework; class TkAsset; /** Standard Object Type IDs */ struct TkObjectTypeID { enum Enum { Asset = NVBLAST_FOURCC('T', 'K', 'A', 'S'), }; }; } // namespace Blast } // namespace Nv /** Load all high-level serializers into the ExtSerialization manager. It does no harm to call this function more than once; serializers already loaded will not be loaded again. \param[in] serialization Serialization manager into which to load serializers. \return the number of serializers loaded. */ NV_C_API size_t NvBlastExtTkSerializerLoadSet(Nv::Blast::TkFramework& framework, Nv::Blast::ExtSerialization& serialization); /** Utility wrapper function to serialize a TkAsset. Allocates the buffer internally using the callack set in ExtSerialization::setBufferProvider. Equivalent to: serialization.serializeIntoBuffer(buffer, asset, Nv::Blast::TkObjectTypeID::Asset); \param[out] buffer Pointer to the buffer created. \param[in] serialization Serialization manager. \param[in] asset Pointer to the TkAsset to serialize. \return the number of bytes serialized into the buffer (zero if unsuccessful). */ NV_C_API uint64_t NvBlastExtSerializationSerializeTkAssetIntoBuffer(void*& buffer, Nv::Blast::ExtSerialization& serialization, const Nv::Blast::TkAsset* asset);
NVIDIA-Omniverse/PhysX/blast/include/extensions/serialization/NvBlastExtLlSerialization.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Defines low-level serialization support for the NvBlastExtSerialization blast extension #pragma once #include "NvBlastGlobals.h" /** Blast Low-level serialization support. Contains serializers which can be used by the ExtSerialization manager. */ // Forward declarations struct NvBlastAsset; struct NvBlastFamily; namespace Nv { namespace Blast { // Forward declarations class ExtSerialization; /** Standard Object Type IDs */ struct LlObjectTypeID { enum Enum { Asset = NVBLAST_FOURCC('L', 'L', 'A', 'S'), Family = NVBLAST_FOURCC('L', 'L', 'F', 'A'), }; }; } // namespace Blast } // namespace Nv /** Load all low-level serializers into the ExtSerialization manager. *N.B.* This is done automatically when the ExtSerialization manager is created via NvBlastExtSerializationCreate(), so currently this public function is unnecessary. Note also that other modules' serializers (e.g. ExtTkSerialization) are _not_ loaded automatically, and need to be explicitly loaded by the user using their respective load functions. It does no harm to call this function more than once; serializers already loaded will not be loaded again. \param[in] serialization Serialization manager into which to load serializers. \return the number of serializers loaded. */ NV_C_API size_t NvBlastExtLlSerializerLoadSet(Nv::Blast::ExtSerialization& serialization); /** Utility wrapper function to serialize an NvBlastAsset. Allocates the buffer internally using the callack set in ExtSerialization::setBufferProvider. Equivalent to: serialization.serializeIntoBuffer(buffer, asset, Nv::Blast::LlObjectTypeID::Asset); \param[out] buffer Pointer to the buffer created. \param[in] serialization Serialization manager. \param[in] asset Pointer to the NvBlastAsset to serialize. \return the number of bytes serialized into the buffer (zero if unsuccessful). */ NV_C_API uint64_t NvBlastExtSerializationSerializeAssetIntoBuffer(void*& buffer, Nv::Blast::ExtSerialization& serialization, const NvBlastAsset* asset); /** Utility wrapper function to serialize an NvBlastFamily. Allocates the buffer internally using the callack set in ExtSerialization::setBufferProvider. Equivalent to: serialization.serializeIntoBuffer(buffer, family, Nv::Blast::LlObjectTypeID::Family); \param[out] buffer Pointer to the buffer created. \param[in] serialization Serialization manager. \param[in] family Pointer to the NvBlastFamily to serialize. \return the number of bytes serialized into the buffer (zero if unsuccessful). */ NV_C_API uint64_t NvBlastExtSerializationSerializeFamilyIntoBuffer(void*& buffer, Nv::Blast::ExtSerialization& serialization, const NvBlastFamily* family);
NVIDIA-Omniverse/PhysX/blast/include/extensions/authoringCommon/NvBlastExtAuthoringAccelerator.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Defines the SpatialAccelerator API used by the BooleanTool #ifndef NVBLASTEXTAUTHORINGACCELERATOR_H #define NVBLASTEXTAUTHORINGACCELERATOR_H #include "NvBlastExtAuthoringTypes.h" namespace Nv { namespace Blast { class Mesh; /** Acceleration structure interface. */ class SpatialAccelerator { public: /** Set state of accelerator to return all facets which possibly can intersect given facet bound. \param[in] pos Vertex buffer \param[in] ed Edge buffer \param[in] fc Facet which should be tested. */ virtual void setState(const NvcBounds3* bounds) = 0; /** Set state of accelerator to return all facets which possibly can intersect given facet. \param[in] pos Vertex buffer \param[in] ed Edge buffer \param[in] fc Facet which should be tested. */ virtual void setState(const Vertex* pos, const Edge* ed, const Facet& fc) = 0; /** Set state of accelerator to return all facets which possibly can cover given point. Needed for testing whether point is inside mesh. \param[in] point Point which should be tested. */ virtual void setState(const NvcVec3& point) = 0; /** Recieve next facet for setted state. \return Next facet index, or -1 if no facets left. */ virtual int32_t getNextFacet() = 0; virtual void setPointCmpDirection(int32_t dir) = 0; virtual void release() = 0; virtual ~SpatialAccelerator() {} }; /** Used for some implementations of spatial accelerators. */ class SpatialGrid { public: virtual void setMesh(const Nv::Blast::Mesh* m) = 0; virtual void release() = 0; }; } // namespace Blast } // namsepace Nv #endif // ifndef NVBLASTEXTAUTHORINGACCELERATOR_H
NVIDIA-Omniverse/PhysX/blast/include/extensions/authoringCommon/NvBlastExtAuthoringMesh.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Defines the Mesh API used by the authoring tools #ifndef NVBLASTAUTHORINGMESH_H #define NVBLASTAUTHORINGMESH_H #include "NvBlastExtAuthoringTypes.h" namespace Nv { namespace Blast { /** Class for internal mesh representation */ class Mesh { public: virtual ~Mesh() {} /** Release Mesh memory */ virtual void release() = 0; /** Return true if mesh is valid */ virtual bool isValid() const = 0; /** Return writable pointer on vertices array */ virtual Vertex* getVerticesWritable() = 0; /** Return pointer on vertices array */ virtual const Vertex* getVertices() const = 0; /** Return writable pointer on edges array */ virtual Edge* getEdgesWritable() = 0; /** Return pointer on edges array */ virtual const Edge* getEdges() const = 0; /** Return writable pointer on facets array */ virtual Facet* getFacetsBufferWritable() = 0; /** Return pointer on facets array */ virtual const Facet* getFacetsBuffer() const = 0; /** Return writable pointer on specified facet */ virtual Facet* getFacetWritable(int32_t facet) = 0; /** Return pointer on specified facet */ virtual const Facet* getFacet(int32_t facet) const = 0; /** Return edges count */ virtual uint32_t getEdgesCount() const = 0; /** Return vertices count */ virtual uint32_t getVerticesCount() const = 0; /** Return facet count */ virtual uint32_t getFacetCount() const = 0; /** Return reference on mesh bounding box. */ virtual const NvcBounds3& getBoundingBox() const = 0; /** Return writable reference on mesh bounding box. */ virtual NvcBounds3& getBoundingBoxWritable() = 0; /** Set per-facet material id. */ virtual void setMaterialId(const int32_t* materialIds) = 0; /** Replaces an material id on faces with a new one */ virtual void replaceMaterialId(int32_t oldMaterialId, int32_t newMaterialId) = 0; /** Set per-facet smoothing group. */ virtual void setSmoothingGroup(const int32_t* smoothingGroups) = 0; /** Recalculate bounding box */ virtual void recalculateBoundingBox() = 0; /** Compute mesh volume and centroid. Assumes mesh has outward normals and no holes. */ virtual float getMeshVolumeAndCentroid(NvcVec3& centroid) const = 0; /** Calculate per-facet bounding boxes. */ virtual void calcPerFacetBounds() = 0; /** Get pointer on facet bounding box, if not calculated return nullptr. */ virtual const NvcBounds3* getFacetBound(uint32_t index) const = 0; }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTAUTHORINGMESH_H
NVIDIA-Omniverse/PhysX/blast/include/extensions/authoringCommon/NvBlastExtAuthoringPatternGenerator.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Defines the PatternGenerator API used by the authoring tools, allowing the user to create fracture patterns #ifndef NVBLASTEXTAUTHORINGPATTERNGENERATOR_H #define NVBLASTEXTAUTHORINGPATTERNGENERATOR_H #include "NvBlastGlobals.h" namespace Nv { namespace Blast { typedef float (*RNG_CALLBACK)(void); struct PatternDescriptor { RNG_CALLBACK RNG = nullptr; uint32_t interiorMaterialId = 1000; }; struct UniformPatternDesc : public PatternDescriptor { uint32_t cellsCount = 2; float radiusMin = 0.0f; float radiusMax = 1.0f; float radiusDistr = 1.0f; float debrisRadiusMult = 1.0f; }; struct BeamPatternDesc : public PatternDescriptor { uint32_t cellsCount; float radiusMin; float radiusMax; }; struct RegularRadialPatternDesc : public PatternDescriptor { float radiusMin = 0.0f; float radiusMax = 1.0f; uint32_t radialSteps = 3; uint32_t angularSteps = 8; float aperture = .0f; float angularNoiseAmplitude = 0.0f; float radialNoiseAmplitude = 0.0f; float radialNoiseFrequency = 0.0f; float debrisRadiusMult = 1.0f; }; struct DamagePattern { /** Used to compute activated chunks. */ float activationRadius; float angle; // For cone shape activation enum ActivationDistanceType { Point = 0, Line, Cone }; ActivationDistanceType activationType = Point; // ---------------------------------------------- uint32_t cellsCount; class Mesh** cellsMeshes = nullptr; virtual void release() = 0; }; class PatternGenerator { public: virtual DamagePattern* generateUniformPattern(const UniformPatternDesc* desc) = 0; virtual DamagePattern* generateBeamPattern(const BeamPatternDesc* desc) = 0; virtual DamagePattern* generateRegularRadialPattern(const RegularRadialPatternDesc* desc) = 0; virtual DamagePattern* generateVoronoiPattern(uint32_t pointCount, const NvcVec3* points, int32_t interiorMaterialId) = 0; virtual void release() = 0; }; NV_C_API void savePatternToObj(DamagePattern* pattern); } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTEXTAUTHORINGMESHCLEANER_H
NVIDIA-Omniverse/PhysX/blast/include/extensions/authoringCommon/NvBlastExtAuthoringConvexMeshBuilder.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Defines the ConvexMeshBuilder API used by the authoring tools, allowing the user to specify a convex hull description for a chunk #ifndef NVBLASTEXTAUTHORINGCONVEXMESHBUILDER_H #define NVBLASTEXTAUTHORINGCONVEXMESHBUILDER_H #include "NvCTypes.h" namespace Nv { namespace Blast { struct CollisionHull; /** ConvexMeshBuilder provides routine to build collision hulls from array of vertices. Collision hull is built as convex hull of provided point set. If due to some reason building of convex hull is failed, collision hull is built as bounding box of vertex set. */ class ConvexMeshBuilder { public: /** Release ConvexMeshBuilder memory */ virtual void release() = 0; /** Method creates CollisionHull from provided array of vertices. \param[in] verticesCount Number of vertices \param[in] vertexData Vertex array of some object, for which collision geometry should be built \param[out] output Reference on CollisionHull object in which generated geometry should be saved */ virtual CollisionHull* buildCollisionGeometry(uint32_t verticesCount, const NvcVec3* vertexData) = 0; /** Release CollisionHull memory. */ virtual void releaseCollisionHull(CollisionHull* hull) const = 0; }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTEXTAUTHORINGCONVEXMESHBUILDER_H
NVIDIA-Omniverse/PhysX/blast/include/extensions/authoringCommon/NvBlastExtAuthoringTypes.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Defines basic types used by NvBlastExtAuthoring #ifndef NVBLASTAUTHORINGTYPES_H #define NVBLASTAUTHORINGTYPES_H #include "NvBlastTypes.h" #include "NvCTypes.h" namespace Nv { namespace Blast { /** Default material id assigned to interior faces (faces which created between 2 fractured chunks) */ const uint32_t kMaterialInteriorId = 1000; /** Default smoothing group id assigned to interior faces */ const uint32_t kSmoothingGroupInteriorId = 1000; /** Vertex index which considired by NvBlast as not valid. */ const uint32_t kNotValidVertexIndex = UINT32_MAX; /** Edge representation */ struct Edge { Edge(uint32_t s = kNotValidVertexIndex, uint32_t e = kNotValidVertexIndex) : s(s), e(e) {} uint32_t s; uint32_t e; }; /** Mesh vertex representation */ struct Vertex { Vertex() {}; Vertex(const NvcVec3& p, const NvcVec3& n, const NvcVec2& _uv) : p(p), n(n) { uv[0] = _uv; } NvcVec3 p; // Position NvcVec3 n; // Normal NvcVec2 uv[1]; // UV-coordinates array, currently supported only one UV coordinate. }; /** Mesh triangle representation */ struct Triangle { Triangle() {}; Triangle(const Vertex& a, const Vertex& b, const Vertex& c, int32_t ud = 0, int32_t mid = 0, int32_t sid = 0) : a(a), b(b), c(c), userData(ud), materialId(mid), smoothingGroup(sid) {} Vertex a, b, c; int32_t userData; int32_t materialId; int32_t smoothingGroup; }; /** Index based triangle */ struct TriangleIndexed { TriangleIndexed(uint32_t ea, uint32_t eb, uint32_t ec, int32_t mid = 0, int32_t sid = 0, int32_t ud = 0) : ea(ea), eb(eb), ec(ec), materialId(mid), smoothingGroup(sid), userData(ud) {} uint32_t ea, eb, ec; int32_t materialId; int32_t smoothingGroup; int32_t userData; }; /** Mesh facet representation */ struct Facet { Facet(int32_t fen = 0, uint32_t ec = 0, int64_t ud = 0, int32_t mid = 0, int32_t sid = 0) : firstEdgeNumber(fen), edgesCount(ec), userData(ud), materialId(mid), smoothingGroup(sid) {} int32_t firstEdgeNumber; uint32_t edgesCount; int64_t userData; int32_t materialId; int32_t smoothingGroup; }; /** Collision hull geometry format. */ struct HullPolygon { // Polygon base plane float plane[4]; // Number vertices in polygon uint16_t vertexCount; // First index in CollisionHull.indices array for this polygon uint16_t indexBase; }; /** Collsion hull geometry. */ struct CollisionHull { uint32_t pointsCount; uint32_t indicesCount; uint32_t polygonDataCount; NvcVec3* points; uint32_t* indices; HullPolygon* polygonData; }; /** Authoring results. Which contains NvBlastAsset, render and collision meshes. If it was created by NvBlast it should be released with NvBlastExtAuthoringReleaseAuthoringResult For releasing just collsion geometry call NvBlastExtAuthoringReleaseAuthoringResultCollision */ struct AuthoringResult { uint32_t chunkCount; // Number of chunks in Blast asset uint32_t bondCount; // Number of bonds in Blast asset NvBlastAsset* asset; // Blast asset /** assetToFractureChunkIdMap used for getting internal FractureChunkId with FractureTool::getChunkId. FractureChunkId = FractureTool.getChunkId(aResult.assetToFractureChunkIdMap(AssetChunkId); */ uint32_t* assetToFractureChunkIdMap; /** Offsets for render mesh geometry. Contains chunkCount + 1 element. First triangle for i-th chunk: aResult.geometry[aResult.geometryOffset[i]] aResult.geometryOffset[chunkCount+1] is total number of triangles in geometry */ uint32_t* geometryOffset; Triangle* geometry; // Raw array of Triangle for all chunks NvBlastChunkDesc* chunkDescs; // Array of chunk descriptors. Contains chunkCount elements NvBlastBondDesc* bondDescs; // Array of bond descriptors. Contains bondCount elements /** Collision hull offsets. Contains chunkCount + 1 element. First collision hull for i-th chunk: aResult.collisionHull[aResult.collisionHullOffset[i]] aResult.collisionHullOffset[chunkCount+1] is total number of collision hulls in collisionHull */ uint32_t* collisionHullOffset; CollisionHull** collisionHull; // Raw array of pointers to collision hull for all chunks. /** Array of material names. */ const char** materialNames; /** Size of array of material names. */ uint32_t materialCount; }; struct ConvexDecompositionParams { uint32_t maximumNumberOfHulls = 8; // Maximum number of convex hull generated for one chunk. If equal to 1 convex // decomposition is disabled. uint32_t maximumNumberOfVerticesPerHull = 64; // Controls the maximum number of triangles per convex-hull // (default=64, range=4-1024) uint32_t voxelGridResolution = 1000000; // Voxel grid resolution used for chunk convex decomposition // (default=1,000,000, range=10,000-16,000,000). float concavity = 0.0025f; // Value between 0 and 1, controls how accurate hull generation is }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTAUTHORINGTYPES_H
NVIDIA-Omniverse/PhysX/blast/include/toolkit/NvBlastTkActor.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Defines the API for the NvBlastExtTkActor class #ifndef NVBLASTTKACTOR_H #define NVBLASTTKACTOR_H #include "NvBlastTkObject.h" #include "NvBlastTypes.h" // Forward declarations struct NvBlastActor; struct NvBlastFamily; namespace Nv { namespace Blast { // Forward declarations class TkAsset; class TkFamily; class TkGroup; class TkJoint; /** The BlastTk entity which encapsulates an NvBlastActor. Every TkActor represents a group of chunks which may correspond to a single physical rigid body. TkActors are created using TkFramework::createActor. */ class TkActor : public TkObject { public: /** Access to underlying low-level actor. \return a pointer to the (const) low-level NvBlastActor object. */ virtual const NvBlastActor* getActorLL() const = 0; /** Every actor is part of an actor family, even if that family contains a single actor. This function returns a reference to the actor's TkFamily. \return a pointer to the actor's TkFamily. */ virtual TkFamily& getFamily() const = 0; /** Every actor has a unique index within a family. This function returns that index. */ virtual uint32_t getIndex() const = 0; /** Actors may be part of (no more than) one group. See TkGroup for the functions to add and remove actors. This function returns a pointer to the actor's group, or NULL if it is not in a group. */ virtual TkGroup* getGroup() const = 0; /** Remove this actor from its group, if it is in one. \return the actor's former group if successful, NULL otherwise. */ virtual TkGroup* removeFromGroup() = 0; /** Every actor has an associated asset. \return a pointer to the (const) TkAsset object. */ virtual const TkAsset* getAsset() const = 0; /** Get the number of visible chunks for this actor. May be used in conjunction with getVisibleChunkIndices. NOTE: Wrapper function over low-level function call, see NvBlastActorGetVisibleChunkCount for details. \return the number of visible chunk indices for the actor. */ virtual uint32_t getVisibleChunkCount() const = 0; /** Retrieve a list of visible chunk indices for the actor into the given array. NOTE: Wrapper function over low-level function call, see NvBlastActorGetVisibleChunkIndices for details. \param[in] visibleChunkIndices User-supplied array to be filled in with indices of visible chunks for this actor. \param[in] visibleChunkIndicesSize The size of the visibleChunkIndices array. To receive all visible chunk indices, the size must be at least that given by getVisibleChunkCount(). \return the number of indices written to visibleChunkIndices. This will not exceed visibleChunkIndicesSize. */ virtual uint32_t getVisibleChunkIndices(uint32_t* visibleChunkIndices, uint32_t visibleChunkIndicesSize) const = 0; /** Get the number of graph nodes for this actor. May be used in conjunction with getGraphNodeIndices. NOTE: Wrapper function over low-level function call, see NvBlastActorGetGraphNodeCount for details. \return the number of graph node indices for the actor. */ virtual uint32_t getGraphNodeCount() const = 0; /** Retrieve a list of graph node indices for the actor into the given array. NOTE: Wrapper function over low-level function call, see NvBlastActorGetGraphNodeIndices for details. \param[in] graphNodeIndices User-supplied array to be filled in with indices of graph nodes for this actor. \param[in] graphNodeIndicesSize The size of the graphNodeIndices array. To receive all graph node indices, the size must be at least that given by getGraphNodeCount(). \return the number of indices written to graphNodeIndices. This will not exceed graphNodeIndicesSize. */ virtual uint32_t getGraphNodeIndices(uint32_t* graphNodeIndices, uint32_t graphNodeIndicesSize) const = 0; /** Access the bond health data for an actor. NOTE: Wrapper function over low-level function call, see NvBlastActorGetBondHealths for details. \return the array of bond healths for the actor's family, or NULL if the actor is invalid. */ virtual const float* getBondHealths() const = 0; /** Returns the upper-bound number of actors which can be created by splitting this actor. NOTE: Wrapper function over low-level function call, see NvBlastActorGetMaxActorCountForSplit for details. \return the upper-bound number of actors which can be created by splitting this actor. */ virtual uint32_t getSplitMaxActorCount() const = 0; /** Report whether this actor is in 'pending' state. Being in 'pending' state leads to actor being processed by group. \return true iff actor is in 'pending' state. */ virtual bool isPending() const = 0; /** Apply damage to this actor. Actual damage processing is deferred till the group worker process() call. Sets actor in 'pending' state. It's the user's responsibility to keep programParams pointer alive until the group endProcess() call. \param[in] program A NvBlastDamageProgram containing damage shaders. \param[in] programParams Parameters for the NvBlastDamageProgram. */ virtual void damage(const NvBlastDamageProgram& program, const void* programParams) = 0; /** Creates fracture commands for the actor using an NvBlastMaterialFunction. Cannot be called during group processing, in that case a warning will be raised and function will do nothing. NOTE: Wrapper function over low-level function call, see NvBlastActorGenerateFracture for details. \param[in,out] commands Target buffers to hold generated commands. To avoid data loss, provide an entry for every support chunk and every bond in the original actor. \param[in] program A NvBlastDamageProgram containing damage shaders. \param[in] programParams Parameters for the NvBlastDamageProgram. */ virtual void generateFracture(NvBlastFractureBuffers* commands, const NvBlastDamageProgram& program, const void* programParams) const = 0; /** Function applies the direct fracture and breaks graph bonds/edges as necessary. Sets actor in 'pending' state if any bonds or chunks were damaged. Dispatches FractureCommand events. NOTE: Calls NvBlastActorApplyFracture internally. see NvBlastActorApplyFracture for details. \param[in,out] eventBuffers Target buffers to hold applied fracture events. May be NULL, in which case events are not reported. To avoid data loss, provide an entry for every lower-support chunk and every bond in the original actor. \param[in] commands The fracture commands to process. */ virtual void applyFracture(NvBlastFractureBuffers* eventBuffers, const NvBlastFractureBuffers* commands) = 0; /** The number of joints currently attached to this actor. \return the number of TkJoints that are currently attached to this actor. */ virtual uint32_t getJointCount() const = 0; /** Retrieve an array of pointers (into the user-supplied buffer) to joints. \param[out] joints A user-supplied array of TkJoint pointers. \param[in] jointsSize The number of elements available to write into the joints array. \return the number of TkJoint pointers written to the joints array. */ virtual uint32_t getJoints(TkJoint** joints, uint32_t jointsSize) const = 0; /** Whether or not this actor is bound to an external body using a bond with an invalid chunk index to represent the NRF. NOTE: Wrapper function over low-level function call NvBlastActorHasExternalBonds. \return true iff this actor contains the "external" support graph node, created when a bond contains the UINT32_MAX value for one of their chunkIndices. */ virtual bool hasExternalBonds() const = 0; // DEPRICATED: remove on next major version bump inline bool isBoundToWorld() const { return this->hasExternalBonds(); }; }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKACTOR_H
NVIDIA-Omniverse/PhysX/blast/include/toolkit/NvBlastTkType.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKTYPE_H #define NVBLASTTKTYPE_H #include "NvBlastTypes.h" //! @file //! //! @brief Defines the API for the NvBlastExtTkType class namespace Nv { namespace Blast { /** Interface for static (class) type data. This data is used for identification in streams, class-specific object queries in TkFramework, etc. Only classes derived from TkIdentifiable use TkType data. */ class TkType { public: /** The class name. \return the class name. */ virtual const char* getName() const = 0; /** The data format version for this class. When deserializing, this version must match the current version. If not, the user may convert the data format using the format conversion extension. \return the version number. */ virtual uint32_t getVersion() const = 0; /** Test for equality. This type is used in static (per-class) data, so types are equal exactly when their addresses are equal. \param[in] type The TkType to compare with this TkType. \return true if this type equals the input type, false otherwise. */ bool operator == (const TkType& type) const { return &type == this; } }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKTYPE_H
NVIDIA-Omniverse/PhysX/blast/include/toolkit/NvBlastTkJoint.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Defines the API for the NvBlastExtTkJoint class #ifndef NVBLASTTKJOINT_H #define NVBLASTTKJOINT_H #include "NvBlastTkObject.h" #include "NvVec3.h" namespace Nv { namespace Blast { /** The data contained in a TkJoint. */ struct TkJointData { TkActor* actors[2]; //!< The TkActor objects joined by the joint uint32_t chunkIndices[2]; //!< The chunk indices within the corresponding TkActor objects joined by the joint. The indexed chunks will be support chunks. nvidia::NvVec3 attachPositions[2]; //!< The position of the joint relative to each TkActor }; /** The TkJoint is may join two different TkActors, or be created internally within a single TkActor. When a TkActor is created from a TkAsset with jointed bonds (the asset is created using a TkAssetDesc with joint flags on bonds, see TkActorDesc) then internal TkJoint objects are created and associated with every TkActor created from that TkAsset. The user only gets notification of the internal TkJoint objects when the TkActor is split into separate TkActor objects that hold the support chunks joined by an internal TkJoint. The user will be notified when the TkActor objects that are attached to TkJoint objects change, or are released. In that case, a TkEvent with a TkJointUpdateEvent payload is dispatched to TkEventListener objects registered with the TkFamily objects to which the actors belong. */ class TkJoint : public TkObject { public: /** Retrieve data in this joint. \return a TkJointData containing this joint's data. */ virtual const TkJointData getData() const = 0; }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKJOINT_H
NVIDIA-Omniverse/PhysX/blast/include/toolkit/NvBlastTkObject.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Defines the API for the NvBlastExtTkObject abstract base class #ifndef NVBLASTTKOBJECT_H #define NVBLASTTKOBJECT_H namespace Nv { namespace Blast { /** Base class for all objects in Tk. All TkObjects are releasable. */ class TkObject { public: /** Constructor clears userData. */ TkObject() : userData(nullptr) {} // Object API /** Release this object and free associated memory. */ virtual void release() = 0; protected: /** Destructor is virtual and not public - use the release() method instead of explicitly deleting a TkObject */ virtual ~TkObject() {} public: // Data /** Pointer field available to the user. */ void* userData; }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKOBJECT_H
NVIDIA-Omniverse/PhysX/blast/include/toolkit/NvBlastTkFramework.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Defines the API for the NvBlastExtTkFramework class #ifndef NVBLASTTKFRAMEWORK_H #define NVBLASTTKFRAMEWORK_H #include "NvBlastTkType.h" #include "NvBlastTkEvent.h" #include "NvPreprocessor.h" #include "NvBlastTypes.h" #include "NvVec3.h" namespace Nv { namespace Blast { // Forward declarations class TkObject; class TkEventDispatcher; class TkAsset; struct TkGroupDesc; class TkGroup; class TkActor; class TkJoint; class TkIdentifiable; struct TkAssetJointDesc; /** Descriptor for asset creation Used to create a TkAsset. This may be used by an authoring tool to describe the asset to be created. The TkAssetDesc is a simple extension of the low-level NvBlastAsset descriptor, NvBlastAssetDesc. */ struct TkAssetDesc : public NvBlastAssetDesc { /** Flags which may be associated with each bond described in the base NvBlastAssetDesc. */ enum BondFlags { NoFlags = 0, /** If this flag is set then a TkJoint will be created joining the support chunks jointed by the bond. These joints will remain "quiet" until the actor is split in such a way that the joint joins two different actors. In that case, a TkJointUpdateEvent will be dispatched with subtype External. (See TkJointUpdateEvent.) */ BondJointed = (1 << 0) }; /** An array of size bondCount, see BondFlags. If NULL, all flags are assumed to be NoFlags. */ const uint8_t* bondFlags; /** Constructor sets sane default values. The zero chunkCount will cause TkFramework::createAsset(...) to fail, though gracefully. */ TkAssetDesc() : bondFlags(nullptr) { chunkCount = bondCount = 0; chunkDescs = nullptr; bondDescs = nullptr; } }; /** Descriptor for actor creation. The TkActorDesc is a simple extension of the low-level NvBlastActor descriptor, NvBlastActorDesc. */ struct TkActorDesc : public NvBlastActorDesc { const TkAsset* asset; //!< The TkAsset to instance /** Constructor sets sane default values */ TkActorDesc(const TkAsset* inAsset = nullptr) : asset(inAsset) { uniformInitialBondHealth = uniformInitialLowerSupportChunkHealth = 1.0f; initialBondHealths = initialSupportChunkHealths = nullptr; } }; /** Descriptor for joint creation. */ struct TkJointDesc { TkFamily* families[2]; //!< The TkFamily objects containing the chunks joined by the joint uint32_t chunkIndices[2]; //!< The chunk indices within the corresponding TkFamily objects joined by the joint. The indexed chunks will be support chunks. nvidia::NvVec3 attachPositions[2]; //!< The position of the joint relative to each TkActor which owns the chunks jointed by this joint }; /** Struct-enum to index object types handled by the framework */ struct TkTypeIndex { enum Enum { Asset = 0, //!< TkAsset object type Family, //!< TkFamily object type Group, //!< TkGroup object type TypeCount }; }; /** BlastTk Framework. The framework exists as a singleton and is used to create objects, deserialize object streams, and hold references to identified objects (TkAsset, TkFamily, and TkGroup) which may be recalled by their GUIDs. */ class TkFramework { public: /** Release this framework and all contained objects. Global singleton is set to NULL. */ virtual void release() = 0; /** To find the type information for a given TkIdentifiable-derived class, use this funtion with the TkTypeIndex::Enum corresponding to the desired class name. \param[in] typeIndex Enumerated object type (see TkTypeIndex). \return type object associated with the object's class. */ virtual const TkType* getType(TkTypeIndex::Enum typeIndex) const = 0; /** Look up an object derived from TkIdentifiable by its ID. \param[in] id The ID of the object to look up (see NvBlastID). \return pointer the object if it exists, NULL otherwise. */ virtual TkIdentifiable* findObjectByID(const NvBlastID& id) const = 0; /** The number of TkIdentifiable-derived objects in the framework of the given type. \param[in] type The type object for the given type. \return the number of objects that currently exist of the given type. */ virtual uint32_t getObjectCount(const TkType& type) const = 0; /** Retrieve an array of pointers (into the user-supplied buffer) to TkIdentifiable-derived objects of the given type. \param[out] buffer A user-supplied array of TkIdentifiable pointers. \param[in] bufferSize The number of elements available to write into buffer. \param[in] type The type object for the given type. \param[in] indexStart The starting index of the object. \return the number of TkIdentifiable pointers written to the buffer. */ virtual uint32_t getObjects(TkIdentifiable** buffer, uint32_t bufferSize, const TkType& type, uint32_t indexStart = 0) const = 0; //////// Asset creation //////// /** Helper function to build and apply chunk reorder map, so that chunk descriptors are properly ordered for the createAsset function. This is a convenience wrapper for the low-level NvBlastReorderAssetDescChunks function. This function may modify both the chunkDescs and bondDescs array, since rearranging chunk descriptors requires re-indexing within the bond descriptors. \param[in] chunkDescs Array of chunk descriptors of size chunkCount. It will be updated accordingly. \param[in] chunkCount The number of chunk descriptors. \param[in] bondDescs Array of bond descriptors of size chunkCount. It will be updated accordingly. \param[in] bondCount The number of bond descriptors. \param[in] chunkReorderMap If not NULL, must be a pointer to a uint32_t array of size desc.chunkCount. Maps old chunk indices to the reordered chunk indices. \param[in] keepBondNormalChunkOrder If true, bond normals will be flipped if their chunk index order was reveresed by the reorder map. \return true iff the chunks did not require reordering (chunkReorderMap is the identity map). */ virtual bool reorderAssetDescChunks(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, NvBlastBondDesc* bondDescs, uint32_t bondCount, uint32_t* chunkReorderMap = nullptr, bool keepBondNormalChunkOrder = false) const = 0; /** Helper function to ensure (check and update) support coverage of chunks, required for asset creation via the createAsset function. This is a convenience wrapper for the low-level NvBlastEnsureAssetExactSupportCoverage function. The chunk descriptors may have their support flags be modified to ensure exact coverage. \param[in] chunkDescs An array of chunk descriptors. \param[in] chunkCount The size of the chunkDescs array. \return true iff coverage was already exact. */ virtual bool ensureAssetExactSupportCoverage(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount) const = 0; /** Create an asset from the given descriptor. \param[in] desc The asset descriptor (see TkAssetDesc). \return the created asset, if the descriptor was valid and memory was available for the operation. Otherwise, returns NULL. */ virtual TkAsset* createAsset(const TkAssetDesc& desc) = 0; /** Create an asset from a low-level NvBlastAsset. \param[in] assetLL The low-level NvBlastAsset to encapsulate. \param[in] jointDescs Optional joint descriptors to add to the new asset. \param[in] jointDescCount The number of joint descriptors in the jointDescs array. If non-zero, jointDescs cannot be NULL. \param[in] ownsAsset Does this TkAsset own the NvBlastAsset and thus is responsible for freeing it. \return the created asset, if memory was available for the operation. Otherwise, returns NULL. */ virtual TkAsset* createAsset(const NvBlastAsset* assetLL, Nv::Blast::TkAssetJointDesc* jointDescs = nullptr, uint32_t jointDescCount = 0, bool ownsAsset = false) = 0; //////// Group creation //////// /** Create a group from the given descriptor. A group is a processing unit, to which the user may add TkActors. New actors generated from splitting a TkActor are automatically put into the same group. However, any actor may be removed from its group and optionally placed into another group, or left groupless. \param[in] desc The group descriptor (see TkGroupDesc). \return the created group, if the descriptor was valid and memory was available for the operation. Otherwise, returns NULL. */ virtual TkGroup* createGroup(const TkGroupDesc& desc) = 0; //////// Actor creation //////// /** Create an actor from the given descriptor. The actor will be the first member of a new TkFamily. \param[in] desc The actor descriptor (see TkActorDesc). \return the created actor, if the descriptor was valid and memory was available for the operation. Otherwise, returns NULL. */ virtual TkActor* createActor(const TkActorDesc& desc) = 0; //////// Joint creation //////// /** Create a joint from the given descriptor. The following restrictions apply: * Only one joint may be created between any two support chunks. * A joint cannot be created between chunks within the same actor using this method. See TkAssetDesc for a description of bond joint flags, which will create internal joints within an actor. \param[in] desc The joint descriptor (see TkJointDesc). \return the created joint, if the descriptor was valid and memory was available for the operation. Otherwise, returns NULL. */ virtual TkJoint* createJoint(const TkJointDesc& desc) = 0; protected: /** Destructor is virtual and not public - use the release() method instead of explicitly deleting the TkFramework */ virtual ~TkFramework() {} }; } // namespace Blast } // namespace Nv //////// Global API to Create and Access Framework //////// /** Create a new TkFramework. This creates a global singleton, and will fail if a TkFramework object already exists. \return the new TkFramework if successful, NULL otherwise. */ NV_C_API Nv::Blast::TkFramework* NvBlastTkFrameworkCreate(); /** Retrieve a pointer to the global TkFramework singleton (if it exists). \return the pointer to the global TkFramework (NULL if none exists). */ NV_C_API Nv::Blast::TkFramework* NvBlastTkFrameworkGet(); #endif // ifndef NVBLASTTKFRAMEWORK_H
NVIDIA-Omniverse/PhysX/blast/include/toolkit/NvBlastTkGroup.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Defines the API for the NvBlastExtTkGroup class #ifndef NVBLASTTKGROUP_H #define NVBLASTTKGROUP_H #include "NvBlastTkIdentifiable.h" namespace Nv { namespace Blast { // Forward declarations class TkActor; /** Descriptor for a TkGroup. TkGroup uses a number of TkGroupWorker to process its actors. @see TkGroupWorker, TkGroup::setWorkerCount */ struct TkGroupDesc { uint32_t workerCount; //!< The number of expected TkWorkers to process the TkGroup concurrently. }; /** Used to collect internal counters using TkGroup::getStats (for profile builds only) @see TkGroup::getStats() */ struct TkGroupStats { NvBlastTimers timers; //!< Accumulated time spent in blast low-level functions, see NvBlastTimers uint32_t processedActorsCount; //!< Accumulated number of processed actors in all TkWorker int64_t workerTime; //!< Accumulated time spent executing TkWorker::run. Unit is ticks, see NvBlastTimers. }; /** A worker as provided by TkGroup::acquireWorker(). It manages the necessary memory for parallel processing. The group can be processed concurrently by calling process() from different threads using a different TkGroupWorker each. TkActors that have been damaged with applyFracture() such that they may be split into separate actors are split by this function. TkActors that have damage queued through the actor's damage() function will be fractured and split by this function. */ class TkGroupWorker { public: /** Process a job of this worker's TkGroup. /param[in] jobId a job id in the range (0, TkGroup::startProcess()] */ virtual void process(uint32_t jobId) = 0; }; /** A group is a processing unit, to which the user may add TkActors. New actors generated from splitting a TkActor are automatically put into the same group. However, any actor may be removed from its group and placed into another group (or no group) by the user's choice. When the group's process function is called, all actors' damage buffers will be processed and turned into fracture events and the actor is split if applicable. This work can be done in multiple threads with the help of TkGroupWorker: Instead of calling the process function, commence the procedure with startProcess which returns the number of jobs to process. Each concurrent thread uses an acquired TkGroupWorker to process the jobs. Over the whole procedure, each job must be processed once and only once. Jobs can be processed in any order. TkGroupWorkers can be returned and acquired later by another task. After processing every job and returning all the workers to the group, endProcess concludes the procedure. */ class TkGroup : public TkIdentifiable { public: /** Add the actor to this group, if the actor does not currently belong to a group. \param[in] actor The actor to add. \return true if successful, false otherwise. */ virtual bool addActor(TkActor& actor) = 0; /** The number of actors currently in this group. \return the number of TkActors that currently exist in this group. */ virtual uint32_t getActorCount() const = 0; /** Retrieve an array of pointers (into the user-supplied buffer) to actors. \param[out] buffer A user-supplied array of TkActor pointers. \param[in] bufferSize The number of elements available to write into buffer. \param[in] indexStart The starting index of the actor. \return the number of TkActor pointers written to the buffer. */ virtual uint32_t getActors(TkActor** buffer, uint32_t bufferSize, uint32_t indexStart = 0) const = 0; /** Lock this group for processing concurrently with TkGroupWorker. The group is unlocked again with the endProcess() function. \return The number of jobs to process. TkGroupWorker::process must be called once for each jobID from 0 to this number-1. See TkGroup::process for a single threaded example. */ virtual uint32_t startProcess() = 0; /** Unlock this group after all jobs were processed with TkGroupWorker. All workers must have been returned with returnWorker(). This function gathers the results of the split operations on the actors in this group. Events will be dispatched to notify listeners of new and deleted actors. Note that groups concurrently dispatching events for the same TkFamily require synchronization in the TkFamily's Listener. However, concurrent use of endProcess is not recommended in this version. It should be called from the main thread. \return true if the group was processing */ virtual bool endProcess() = 0; /** Set the expected number of concurrent worker threads that will process this group concurrently. */ virtual void setWorkerCount(uint32_t workerCount) = 0; /** \return The total amount of workers allocated for this group. */ virtual uint32_t getWorkerCount() const = 0; /** Acquire one worker to process the group concurrently on a thread. The worker must be returned with returnWorker() before endProcess() is called on its group. \return A worker for this group (at most getWorkerCount) or nullptr if none is available. */ virtual TkGroupWorker* acquireWorker() = 0; /** Return a worker previously acquired with acquireWorker() to this TkGroup. \param[in] The TkGroupWorker previously acquired from this TkGroup. */ virtual void returnWorker(TkGroupWorker*) = 0; /** Helper function to process the group synchronously on a single thread. */ void process(); /** For profile builds only, request stats of the last successful processing. Inactive in other builds. The times and counters reported account for all the TkWorker (accumulated) taking part in the processing. \param[in] stats The struct to be filled in. */ virtual void getStats(TkGroupStats& stats) const = 0; }; } // namespace Blast } // namespace Nv NV_INLINE void Nv::Blast::TkGroup::process() { uint32_t jobCount = startProcess(); if (jobCount > 0) { TkGroupWorker* worker = acquireWorker(); for (uint32_t i = 0; i < jobCount; i++) { worker->process(i); } returnWorker(worker); } endProcess(); } #endif // ifndef NVBLASTTKGROUP_H
NVIDIA-Omniverse/PhysX/blast/include/toolkit/NvBlastTkAsset.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Defines the API for the NvBlastExtTkAsset class #ifndef NVBLASTTKASSET_H #define NVBLASTTKASSET_H #include "NvBlastTkIdentifiable.h" #include "NvBlastTypes.h" #include "NvVec3.h" // Forward declarations struct NvBlastAsset; namespace Nv { namespace Blast { /** A descriptor stored by a TkAsset for an internal joint. Internal joints are created when a TkAsset is instanced into a TkActor. */ struct TkAssetJointDesc { uint32_t nodeIndices[2]; //!< The graph node indices corresponding to the support chunks joined by a joint nvidia::NvVec3 attachPositions[2]; //!< The joint's attachment positions in asset-local space }; /** The static data associated with a destructible actor. TkAsset encapsulates an NvBlastAsset. In addition to the NvBlastAsset, the TkAsset stores joint descriptors (see TkAssetJointDesc). */ class TkAsset : public TkIdentifiable { public: /** Access to underlying low-level asset. \return a pointer to the (const) low-level NvBlastAsset object. */ virtual const NvBlastAsset* getAssetLL() const = 0; /** Get the number of chunks in this asset. NOTE: Wrapper function over low-level function call, see NvBlastAssetGetChunkCount for details. \return the number of chunks in the asset. */ virtual uint32_t getChunkCount() const = 0; /** Get the number of leaf chunks in the given asset. NOTE: Wrapper function over low-level function call, see NvBlastAssetGetLeafChunkCount for details. \return the number of leaf chunks in the asset. */ virtual uint32_t getLeafChunkCount() const = 0; /** Get the number of bonds in the given asset. NOTE: Wrapper function over low-level function call, see NvBlastAssetGetBondCount for details. \return the number of bonds in the asset. */ virtual uint32_t getBondCount() const = 0; /** Access an array of chunks of the given asset. NOTE: Wrapper function over low-level function call, see NvBlastAssetGetChunks for details. \return a pointer to an array of chunks of the asset. */ virtual const NvBlastChunk* getChunks() const = 0; /** Access an array of bonds of the given asset. NOTE: Wrapper function over low-level function call, see NvBlastAssetGetBonds for details. \return a pointer to an array of bonds of the asset. */ virtual const NvBlastBond* getBonds() const = 0; /** Access an support graph for the given asset. NOTE: Wrapper function over low-level function call, see NvBlastAssetGetSupportGraph for details. \return a struct of support graph for the given asset. */ virtual const NvBlastSupportGraph getGraph() const = 0; /** Retrieve the size (in bytes) of the LL asset. NOTE: Wrapper function over low-level function call, see NvBlastAssetGetSize for details. \return the size of the data block (in bytes). */ virtual uint32_t getDataSize() const = 0; /** The number of internal TkJoint objects that will be created when this asset is instanced into a TkActor (see TkFramework::createActor). These joints will not trigger TkJointUpdateEvent events until this actor is split into actors such that a joint connects two actors. At this time the actor's family will dispatch a TkJointUpdateEvent::External event during a call to TkGroup::endProcess() (see TkGroup). \return the number of descriptors for internal joints. */ virtual uint32_t getJointDescCount() const = 0; /** The descriptors for the internal joints created when this asset is instanced. (See getJointDescCount.) \return a pointer to the array of descriptors for internal joints. */ virtual const TkAssetJointDesc* getJointDescs() const = 0; }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKASSET_H
NVIDIA-Omniverse/PhysX/blast/include/toolkit/NvBlastTkIdentifiable.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Defines the API for the NvBlastExtTkIdentifiable abstract base class #ifndef NVBLASTTKIDENTIFIABLE_H #define NVBLASTTKIDENTIFIABLE_H #include "NvBlastTkObject.h" #include "NvBlastTypes.h" namespace Nv { namespace Blast { // Forward declarations class TkType; /** TkIdentifiable objects have getID and setID methods for individual objects. They also have a type (class) identification. */ class TkIdentifiable : public TkObject { public: // Identifiable API /** Return the ID associated with this object. \return the ID for this object. */ virtual const NvBlastID& getID() const = 0; /** Set the ID for this object. */ virtual void setID(const NvBlastID& id) = 0; /** Access to the static (class) type data for this object. \return the static type data for this object type. */ virtual const TkType& getType() const = 0; /** Integer field available to the user which may be serialized. */ uint64_t userIntData; }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKIDENTIFIABLE_H
NVIDIA-Omniverse/PhysX/blast/include/toolkit/NvBlastTkGroupTaskManager.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Defines a task manager API for multithreading Tk operations #ifndef NVBLASTTKGROUPTASKMANAGER_H #define NVBLASTTKGROUPTASKMANAGER_H #include "NvBlastTypes.h" // Forward declarations namespace nvidia { namespace task { class NvTaskManager; } } namespace Nv { namespace Blast { // Forward declarations class TkGroup; /** Uses a nvidia::task::NvTaskManager to process a TkGroup concurrently. */ class NV_DLL_EXPORT TkGroupTaskManager { protected: virtual ~TkGroupTaskManager() {} public: /** Construct using existing nvidia::task::NvTaskManager and TkGroup. The TkGroup can be set later with setGroup(). */ static TkGroupTaskManager* create(nvidia::task::NvTaskManager&, TkGroup* = nullptr); /** Set the group to process. Cannot be changed while a group being processed. */ virtual void setGroup(TkGroup*) = 0; /** Start processing the group. The parallelizing strategy is to have all worker tasks running concurrently. The number of started tasks may be smaller than the requested value, when the task manager's dispatcher thread count or the number of group jobs are smaller. \param[in] workerCount The number of worker tasks to start, 0 uses the dispatcher's worker thread count. \return The number of worker tasks started. If 0, processing did not start and wait() will never return true. */ virtual uint32_t process(uint32_t workerCount = 0) = 0; /** Wait for the group to end processing. When processing has finished, TkGroup::endProcess is executed. \param[in] block true: does not return until the group has been processed. false: return immediately if workers are still processing the group. \return true if group processing was completed (and the group was actually processing) */ virtual bool wait(bool block = true) = 0; /** Release this object. */ virtual void release() = 0; }; } // namespace Blast } // namespace Nv #endif // NVBLASTTKGROUPTASKMANAGER_H
NVIDIA-Omniverse/PhysX/blast/include/toolkit/NvBlastTkFamily.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Defines the API for the NvBlastExtTkFamily class #ifndef NVBLASTTKFAMILY_H #define NVBLASTTKFAMILY_H #include "NvBlastTkIdentifiable.h" // Forward declarations struct NvBlastFamily; namespace Nv { namespace Blast { // Forward declarations class TkActor; class TkAsset; class TkEventListener; /** The TkFamily is associated with the TkActor that is instanced from a TkAsset, as well as all descendent TkActors generated by spliting TkActors within the family. It encapsulates an NvBlastFamily, and also holds a material which will be used by default on all TkActors during damage functions. */ class TkFamily : public TkIdentifiable { public: /** Access to underlying low-level family. \return a pointer to the (const) low-level NvBlastFamily object. */ virtual const NvBlastFamily* getFamilyLL() const = 0; /** Every family has an associated asset (the TkAsset which was instanced to create the first member of the family). \return a pointer to the (const) TkAsset object. */ virtual const TkAsset* getAsset() const = 0; /** The number of actors currently in this family. \return the number of TkActors that currently exist in this family. */ virtual uint32_t getActorCount() const = 0; /** Retrieve an array of pointers (into the user-supplied buffer) to actors. \param[out] buffer A user-supplied array of TkActor pointers. \param[in] bufferSize The number of elements available to write into buffer. \param[in] indexStart The starting index of the actor. \return the number of TkActor pointers written to the buffer. */ virtual uint32_t getActors(TkActor** buffer, uint32_t bufferSize, uint32_t indexStart = 0) const = 0; /** Add a user implementation of TkEventListener to this family's list of listeners. These listeners will receive all split and fracture events generated by TkActor objects in this family. They will also receive joint update events when TkJoint objects are updated that are (or were) associated with a TkActor in this family. \param[in] l The event listener to add. */ virtual void addListener(TkEventListener& l) = 0; /** Remove a TkEventReciever from this family's list of listeners. \param[in] l The event listener to remove. */ virtual void removeListener(TkEventListener& l) = 0; /** This function applies fracture buffers on relevant actors (actor which contains corresponding bond/chunk) in family. \param[in] commands The fracture commands to process. */ virtual void applyFracture(const NvBlastFractureBuffers* commands) = 0; /** A function to reinitialize this family with new family. The Family must be created from the same low-level asset, but can be in any other state. As a result split events (TkEvent::Split) will be dispatched reflecting the resulting changes (created and removed actors) Afterwards the family will contain a copy of the new family and all actors' low-level actor pointers will be updated. \param[in] newFamily The NvBlastFamily to use to reinitialize this family. \param[in] group The group for new actors to be placed in. */ virtual void reinitialize(const NvBlastFamily* newFamily, TkGroup* group = nullptr) = 0; }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKFAMILY_H
NVIDIA-Omniverse/PhysX/blast/include/toolkit/NvBlastTk.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Single include file to bring in headers for the blast sdk toolkit (tk) library #ifndef NVBLASTTK_H #define NVBLASTTK_H /** This is the main include header for the BlastTk SDK, for users who want to use a single #include file. Alternatively, one can instead directly #include a subset of the below files. */ #include "NvBlastTkFramework.h" #include "NvBlastTkAsset.h" #include "NvBlastTkActor.h" #include "NvBlastTkJoint.h" #include "NvBlastTkFamily.h" #include "NvBlastTkGroup.h" #endif // ifndef NVBLASTTK_H
NVIDIA-Omniverse/PhysX/blast/include/toolkit/NvBlastTkEvent.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Defines the API for the NvBlastExtTkEvent class #ifndef NVBLASTTKEVENT_H #define NVBLASTTKEVENT_H #include <vector> #include "NvBlastTypes.h" namespace Nv { namespace Blast { // Forward declarations class TkObject; class TkActor; class TkFamily; class TkJoint; /** By the time events are dispatched, a specific TkActor may have been split and became invalid. This structure caches the state before invalidation happened. */ struct TkActorData { TkFamily* family; //!< TkFamily of the originating TkActor void* userData; //!< TkActor.userData of the originating TkActor uint32_t index; //!< TkActor::getIndex() of the originating TkActor }; /** Event data dispatched to TkEventListener objects. The user may implement the abstract TkEventListener interface and pass the listener object to a BlastTk object which dispatches events. (See for example TkFamily.) */ struct TkEvent { // Enums enum Type { Split, //!< Sent when a TkActor is split. See TkSplitEvent. FractureCommand, //!< Sent when a TkActor generated fracture commands using TkActor::generateFracture. FractureEvent, //!< Sent when a TkActor is fractured using TkActor::applyFracture. JointUpdate, //!< Sent when TkJoints change their attachment state. See TkJointUpdateEvent. TypeCount }; // Data const void* payload; //!< Type-dependent payload data Type type; //!< See the Type enum, above /** Casts the payload data into its type-dependent format. \return the payload for an event of type T */ template<typename T> const T* getPayload() const { return reinterpret_cast<const T*>(payload); } }; /** Payload for TkEvent::Split events When this event is sent, the parent TkActor that was split is no longer valid. Therefore it is not referenced directly in the event data. Instead, its TkFamily, index within the TkFamily, and userData are stored. In addition, this event gives the child TkActors generated by the split. */ struct TkSplitEvent { enum { EVENT_TYPE = TkEvent::Split }; TkActorData parentData; //!< The data of parent TkActor that was split uint32_t numChildren; //!< The number of children into which the parent TkActor was split TkActor** children; //!< An array of pointers to the children into which the TkActor was split }; /** Payload for the TkEvent::FractureCommand events Fracture Commands used to apply fracture to a TkActor. */ struct TkFractureCommands { enum { EVENT_TYPE = TkEvent::FractureCommand }; TkActorData tkActorData; //!< The data of TkActor that received the fracture command NvBlastFractureBuffers buffers; //!< The fracture commands used to modify the TkActor }; /** Payload for the TkEvent::FractureEvent events Fracture Events resulting from applying fracture to a TkActor. */ struct TkFractureEvents { enum { EVENT_TYPE = TkEvent::FractureEvent }; TkActorData tkActorData; //!< The data of TkActor that received the fracture command NvBlastFractureBuffers buffers; //!< The fracture result of the modified TkActor uint32_t bondsDamaged; //!< number of damaged bonds (health remains) uint32_t bondsBroken; //!< number of broken bonds (health exhausted) uint32_t chunksDamaged; //!< number of damaged chunks (health remains) including child chunks uint32_t chunksBroken; //!< number of broken chunks (health exhausted) including child chunks }; /** Payload for the TkEvent::JointUpdate events Event type sent when a TkJoint's TkActor references change. This may indicate a joint becoming external, simply changing actors when split events occur on one or both of the actors, or when one or both of the actors are destroyed. */ struct TkJointUpdateEvent { enum { EVENT_TYPE = TkEvent::JointUpdate }; enum Subtype { External, //!< A joint that used to be internal to a single TkActor now joins two different TkActors Changed, //!< One or both of the joint's attached TkActors has changed. The previous TkActors were distinct, however, differentiating this from the JointExternal case Unreferenced //!< The joint's actors have been set to NULL. The joint will not be used again, and the user may release the TkJoint at this time }; TkJoint* joint; //!< The joint being updated Subtype subtype; //!< The type of update event this is (see Subtype) }; /** Interface for a listener of TkEvent data. The user must implement this interface and pass it to the object which will dispatch the events. */ class TkEventListener { public: /** Interface to be implemented by the user. Events will be sent by BlastTk through a call to this function. \param[in] events The array of events being dispatched. \param[in] eventCount The number of events in the array. */ virtual void receive(const TkEvent* events, uint32_t eventCount) = 0; }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKEVENT_H
NVIDIA-Omniverse/PhysX/blast/include/globals/NvBlastAllocator.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Allocator utility API in the NvBlastGlobals library #ifndef NVBLASTALLOCATOR_H #define NVBLASTALLOCATOR_H #include "NvAllocatorCallback.h" #include "NvBlastGlobals.h" /** This file contains nvidia::NvAllocatorCallback wrappers compatible with NvShared containers. */ namespace Nv { namespace Blast { /** Allocator uses global nvidia::NvAllocatorCallback. */ class Allocator { public: Allocator(const char* = 0) { } void* allocate(size_t size, const char* filename, int line) { return NvBlastGlobalGetAllocatorCallback()->allocate(size, nullptr, filename, line); } void deallocate(void* ptr) { NvBlastGlobalGetAllocatorCallback()->deallocate(ptr); } }; } // namespace Blast } // namespace Nv #endif // #ifndef NVBLASTALLOCATOR_H
NVIDIA-Omniverse/PhysX/blast/include/globals/NvBlastDebugRender.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Debug render utility API in the NvBlastGlobals library #pragma once #include <NvCTypes.h> namespace Nv { namespace Blast { /** Used to store a single line and colour for debug rendering. */ struct DebugLine { DebugLine(const NvcVec3& p0, const NvcVec3& p1, uint32_t c) : pos0(p0), color0(c), pos1(p1), color1(c) {} NvcVec3 pos0; uint32_t color0; NvcVec3 pos1; uint32_t color1; }; /** Debug Buffer */ struct DebugBuffer { const DebugLine* lines; uint32_t lineCount; }; } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/include/globals/NvCMath.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Vector math utility functions #ifndef NVCMATH_H #define NVCMATH_H #include "NvCTypes.h" /** * Some basic operators for NvcVec2 and NvcVec3 */ /* NvcVec2 operators */ // Vector sum inline NvcVec2 operator + (const NvcVec2& v, const NvcVec2& w) { return { v.x + w.x, v.y + w.y }; } // Vector difference inline NvcVec2 operator - (const NvcVec2& v, const NvcVec2& w) { return { v.x - w.x, v.y - w.y }; } // Vector component product inline NvcVec2 operator * (const NvcVec2& v, const NvcVec2& w) { return { v.x * w.x, v.y * w.y }; } // Vector component quotient inline NvcVec2 operator / (const NvcVec2& v, const NvcVec2& w) { return { v.x / w.x, v.y / w.y }; } // Vector product with scalar (on right) inline NvcVec2 operator * (const NvcVec2& v, float f) { return { v.x * f, v.y * f }; } // Vector product with scalar (on left) inline NvcVec2 operator * (float f, const NvcVec2& v) { return { f * v.x, f * v.y }; } // Vector quotient with scalar (on right) inline NvcVec2 operator / (const NvcVec2& v, float f) { return { v.x / f, v.y / f }; } // Vector quotient with scalar (on left) inline NvcVec2 operator / (float f, const NvcVec2& v) { return { f / v.x, f / v.y }; } // Inner product inline float operator | (const NvcVec2& v, const NvcVec2& w) { return v.x * w.x + v.y * w.y; } // Vector negation inline NvcVec2 operator - (const NvcVec2& v) { return { -v.x, -v.y }; } /* NvcVec2 assignment operators */ // Vector sum with assignment inline NvcVec2& operator += (NvcVec2& v, const NvcVec2& w) { return v = v + w; } // Vector difference with assignment inline NvcVec2& operator -= (NvcVec2& v, const NvcVec2& w) { return v = v - w; } // Vector component product with assignment inline NvcVec2& operator *= (NvcVec2& v, const NvcVec2& w) { return v = v * w; } // Vector component quotient with assignment inline NvcVec2& operator /= (NvcVec2& v, const NvcVec2& w) { return v = v / w; } // Vector product with scalar with assignment inline NvcVec2& operator *= (NvcVec2& v, float f) { return v = v * f; } // Vector quotient with scalar with assignment inline NvcVec2& operator /= (NvcVec2& v, float f) { return v = v / f; } /* NvcVec3 operators */ // Vector sum inline NvcVec3 operator + (const NvcVec3& v, const NvcVec3& w) { return { v.x + w.x, v.y + w.y, v.z + w.z }; } // Vector difference inline NvcVec3 operator - (const NvcVec3& v, const NvcVec3& w) { return { v.x - w.x, v.y - w.y, v.z - w.z }; } // Vector component product inline NvcVec3 operator * (const NvcVec3& v, const NvcVec3& w) { return { v.x * w.x, v.y * w.y, v.z * w.z }; } // Vector component quotient inline NvcVec3 operator / (const NvcVec3& v, const NvcVec3& w) { return { v.x / w.x, v.y / w.y, v.z / w.z }; } // Vector product with scalar (on right) inline NvcVec3 operator * (const NvcVec3& v, float f) { return { v.x * f, v.y * f, v.z * f }; } // Vector product with scalar (on left) inline NvcVec3 operator * (float f, const NvcVec3& v) { return { f * v.x, f * v.y, f * v.z }; } // Vector quotient with scalar (on right) inline NvcVec3 operator / (const NvcVec3& v, float f) { return { v.x / f, v.y / f, v.z / f }; } // Vector quotient with scalar (on left) inline NvcVec3 operator / (float f, const NvcVec3& v) { return { f / v.x, f / v.y, f / v.z }; } // Inner product inline float operator | (const NvcVec3& v, const NvcVec3& w) { return v.x * w.x + v.y * w.y + v.z * w.z; } // Cross product inline NvcVec3 operator ^ (const NvcVec3& v, const NvcVec3& w) { return { v.y * w.z - v.z * w.y, v.z * w.x - v.x * w.z, v.x * w.y - v.y * w.x }; } // Vector negation inline NvcVec3 operator - (const NvcVec3& v) { return { -v.x, -v.y, -v.z }; } /* NvcVec3 assignment operators */ // Vector sum with assignment inline NvcVec3& operator += (NvcVec3& v, const NvcVec3& w) { return v = v + w; } // Vector difference with assignment inline NvcVec3& operator -= (NvcVec3& v, const NvcVec3& w) { return v = v - w; } // Vector component product with assignment inline NvcVec3& operator *= (NvcVec3& v, const NvcVec3& w) { return v = v * w; } // Vector component quotient with assignment inline NvcVec3& operator /= (NvcVec3& v, const NvcVec3& w) { return v = v / w; } // Vector product with scalar with assignment inline NvcVec3& operator *= (NvcVec3& v, float f) { return v = v * f; } // Vector quotient with scalar with assignment inline NvcVec3& operator /= (NvcVec3& v, float f) { return v = v / f; } #endif // #ifndef NVCMATH_H
NVIDIA-Omniverse/PhysX/blast/include/globals/NvBlastGlobals.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTGLOBALS_H #define NVBLASTGLOBALS_H #include <new> #include "NvBlastTypes.h" #include "NvAllocatorCallback.h" #include "NvErrorCallback.h" #include "NvProfiler.h" //! @file //! //! @brief API for the NvBlastGlobals library //////// Global API to Access Global nvidia::NvAllocatorCallback, nvidia::NvErrorCallback, and nvidia::NvProfilerCallback //////// /** Retrieve a pointer to the global nvidia::NvAllocatorCallback. Default implementation with std allocator is used if user didn't provide their own. It always exists, 'nullptr' will never be returned. \return the pointer to the global nvidia::NvAllocatorCallback. */ NV_C_API nvidia::NvAllocatorCallback* NvBlastGlobalGetAllocatorCallback(); /** Set global nvidia::NvAllocatorCallback. If 'nullptr' is passed the default nvidia::NvAllocatorCallback with std allocator is set. */ NV_C_API void NvBlastGlobalSetAllocatorCallback(nvidia::NvAllocatorCallback* allocatorCallback); /** Retrieve a pointer to the global nvidia::NvErrorCallback. Default implementation which writes messages to stdout is used if user didn't provide their own. It always exists, 'nullptr' will never be returned. \return the pointer to the global nvidia::NvErrorCallback. */ NV_C_API nvidia::NvErrorCallback* NvBlastGlobalGetErrorCallback(); /** Set global nvidia::NvErrorCallback. If 'nullptr' is passed the default nvidia::NvErrorCallback that writes messages to stdout is set. */ NV_C_API void NvBlastGlobalSetErrorCallback(nvidia::NvErrorCallback* errorCallback); /** Retrieve a pointer to the global nvidia::NvProfilerCallback. Returns nullptr if none is set. \return the pointer to the global nvidia::NvProfilerCallback. */ NV_C_API nvidia::NvProfilerCallback* NvBlastGlobalGetProfilerCallback(); /** Set a custom profiler callback. May be nullptr (the default). */ NV_C_API void NvBlastGlobalSetProfilerCallback(nvidia::NvProfilerCallback* profilerCallback); //////// Helper Global Functions //////// namespace Nv { namespace Blast { /** Logging wrapper compatible with NvBlastLog. @see NvBlastLog. Pass this function to LowLevel function calls in order to get logging into global nvidia::NvErrorCallback. */ NV_INLINE void logLL(int type, const char* msg, const char* file, int line) { nvidia::NvErrorCode::Enum errorCode = nvidia::NvErrorCode::eNO_ERROR; switch (type) { case NvBlastMessage::Error: errorCode = nvidia::NvErrorCode::eINVALID_OPERATION; break; case NvBlastMessage::Warning: errorCode = nvidia::NvErrorCode::eDEBUG_WARNING; break; case NvBlastMessage::Info: errorCode = nvidia::NvErrorCode::eDEBUG_INFO; break; case NvBlastMessage::Debug: errorCode = nvidia::NvErrorCode::eNO_ERROR; break; } NvBlastGlobalGetErrorCallback()->reportError(errorCode, msg, file, line); } } // namespace Blast } // namespace Nv //////// Allocator macros //////// /** Alloc/Free macros that use global nvidia::NvAllocatorCallback. Thus allocated memory is 16-byte aligned. */ #define NVBLAST_ALLOC(_size) NvBlastGlobalGetAllocatorCallback()->allocate(_size, nullptr, __FILE__, __LINE__) #define NVBLAST_ALLOC_NAMED(_size, _name) NvBlastGlobalGetAllocatorCallback()->allocate(_size, _name, __FILE__, __LINE__) #define NVBLAST_FREE(_mem) NvBlastGlobalGetAllocatorCallback()->deallocate(_mem) /** Placement new. Example: Foo* foo = NVBLAST_NEW(Foo) (params); */ #define NVBLAST_NEW(T) new (NvBlastGlobalGetAllocatorCallback()->allocate(sizeof(T), #T, __FILE__, __LINE__)) T /** Respective delete to NVBLAST_NEW The obj pointer may be NULL (to match the behavior of standard C++ delete) Example: NVBLAST_DELETE(foo, Foo); */ #define NVBLAST_DELETE(obj, T) \ do \ { \ if ((obj) != nullptr) \ { \ (obj)->~T(); \ NvBlastGlobalGetAllocatorCallback()->deallocate(obj); \ } \ } while (false) //////// Log macros //////// /** Logging macros that use global nvidia::NvAllocatorCallback. */ #define NVBLAST_LOG(_code, _msg) NvBlastGlobalGetErrorCallback()->reportError(_code, _msg, __FILE__, __LINE__) #define NVBLAST_LOG_ERROR(_msg) NVBLAST_LOG(nvidia::NvErrorCode::eINVALID_OPERATION, _msg) #define NVBLAST_LOG_WARNING(_msg) NVBLAST_LOG(nvidia::NvErrorCode::eDEBUG_WARNING, _msg) #define NVBLAST_LOG_INFO(_msg) NVBLAST_LOG(nvidia::NvErrorCode::eDEBUG_INFO, _msg) #define NVBLAST_LOG_DEBUG(_msg) NVBLAST_LOG(nvidia::NvErrorCode::eNO_ERROR, _msg) /** Check macros that use global nvidia::NvAllocatorCallback. The idea is that you pass an expression to check, if it fails it logs and calls '_onFail' code you passed. */ #define NVBLAST_CHECK(_code, _expr, _msg, _onFail) \ { \ if(!(_expr)) \ { \ NVBLAST_LOG(_code, _msg); \ { _onFail; }; \ } \ } #define NVBLAST_CHECK_ERROR(_expr, _msg, _onFail) NVBLAST_CHECK(nvidia::NvErrorCode::eINVALID_OPERATION, _expr, _msg, _onFail) #define NVBLAST_CHECK_WARNING(_expr, _msg, _onFail) NVBLAST_CHECK(nvidia::NvErrorCode::eDEBUG_WARNING, _expr, _msg, _onFail) #define NVBLAST_CHECK_INFO(_expr, _msg, _onFail) NVBLAST_CHECK(nvidia::NvErrorCode::eDEBUG_INFO, _expr, _msg, _onFail) #define NVBLAST_CHECK_DEBUG(_expr, _msg, _onFail) NVBLAST_CHECK(nvidia::NvErrorCode::eNO_ERROR, _expr, _msg, _onFail) //////// Misc //////// // Macro to load a uint32_t (or larger) with four characters #define NVBLAST_FOURCC(_a, _b, _c, _d) ( (uint32_t)(_a) | (uint32_t)(_b)<<8 | (uint32_t)(_c)<<16 | (uint32_t)(_d)<<24 ) #endif // ifndef NVBLASTGLOBALS_H
NVIDIA-Omniverse/PhysX/blast/include/shared/NvFoundation/NvCTypes.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_C_TYPES_H #define NV_C_TYPES_H #include "NvPreprocessor.h" #ifdef _MSC_VER #ifndef _INTPTR #define _INTPTR 0 #endif #endif #include <stdint.h> /** C type for 2-float vectors */ typedef struct { float x, y; } NvcVec2; /** C type for 3-float vectors */ typedef struct { float x, y, z; } NvcVec3; /** C type for 4-float vectors */ typedef struct { float x, y, z, w; } NvcVec4; /** C type for quaternions */ typedef struct { float x, y, z, w; } NvcQuat; /** C type for transforms */ typedef struct { NvcQuat q; NvcVec3 p; } NvcTransform; /** C type for 3x3 matrices */ typedef struct { NvcVec3 column0, column1, column2, column3; } NvcMat34; /** C type for 3x3 matrices */ typedef struct { NvcVec3 column0, column1, column2; } NvcMat33; /** C type for 4x4 matrices */ typedef struct { NvcVec4 column0, column1, column2, column3; } NvcMat44; /** C type for 3d bounding box */ typedef struct { NvcVec3 minimum; NvcVec3 maximum; } NvcBounds3; /** C type for a plane */ typedef struct { NvcVec3 n; float d; } NvcPlane; /** C type for 2-integer vectors */ typedef struct { int32_t x, y; } NvcVec2i; /** C type for 3-integer vectors */ typedef struct { int32_t x, y, z; } NvcVec3i; /** C type for 4-integer vectors */ typedef struct { int32_t x, y, z, w; } NvcVec4i; /** @} */ #endif // NV_C_TYPES_H
NVIDIA-Omniverse/PhysX/blast/include/shared/NvFoundation/NvIO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NVFOUNDATION_NVIO_H #define NV_NVFOUNDATION_NVIO_H /** \addtogroup common @{ */ #include "NvSimpleTypes.h" #if !NV_DOXYGEN namespace nvidia { #endif /** \brief Input stream class for I/O. The user needs to supply a NvInputStream implementation to a number of methods to allow the SDK to read data. */ class NvInputStream { public: /** \brief read from the stream. The number of bytes read may be less than the number requested. \param[in] dest the destination address to which the data will be read \param[in] count the number of bytes requested \return the number of bytes read from the stream. */ virtual uint32_t read(void* dest, uint32_t count) = 0; virtual ~NvInputStream() { } }; /** \brief Input data class for I/O which provides random read access. The user needs to supply a NvInputData implementation to a number of methods to allow the SDK to read data. */ class NvInputData : public NvInputStream { public: /** \brief return the length of the input data \return size in bytes of the input data */ virtual uint32_t getLength() const = 0; /** \brief seek to the given offset from the start of the data. \param[in] offset the offset to seek to. If greater than the length of the data, this call is equivalent to seek(length); */ virtual void seek(uint32_t offset) = 0; /** \brief return the current offset from the start of the data \return the offset to seek to. */ virtual uint32_t tell() const = 0; virtual ~NvInputData() { } }; /** \brief Output stream class for I/O. The user needs to supply a NvOutputStream implementation to a number of methods to allow the SDK to write data. */ class NvOutputStream { public: /** \brief write to the stream. The number of bytes written may be less than the number sent. \param[in] src the destination address from which the data will be written \param[in] count the number of bytes to be written \return the number of bytes written to the stream by this call. */ virtual uint32_t write(const void* src, uint32_t count) = 0; virtual ~NvOutputStream() { } }; #if !NV_DOXYGEN } // namespace nvidia #endif /** @} */ #endif // #ifndef NV_NVFOUNDATION_NVIO_H
NVIDIA-Omniverse/PhysX/blast/include/shared/NvFoundation/NvProfiler.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_PROFILER_H #define NV_PROFILER_H #include <NvSimpleTypes.h> namespace nvidia { class NvProfilerCallback; namespace shdfnd { NV_FOUNDATION_API NvProfilerCallback *getProfilerCallback(); NV_FOUNDATION_API void setProfilerCallback(NvProfilerCallback *profiler); } } namespace nvidia { struct NvProfileContext { enum Enum { eNONE = 0 //!< value for no specific profile context. \see NvProfilerCallback::zoneAt }; }; /** \brief The pure virtual callback interface for general purpose instrumentation and profiling of GameWorks modules as well as applications */ class NvProfilerCallback { protected: virtual ~NvProfilerCallback() {} public: /************************************************************************************************************************** Instrumented profiling events ***************************************************************************************************************************/ /** \brief Mark the beginning of a nested profile block \param[in] eventName Event name. Must be a persistent const char * \param[in] detached True for cross thread events \param[in] contextId the context id of this zone. Zones with the same id belong to the same group. 0 is used for no specific group. \return Returns implementation-specific profiler data for this event */ virtual void* zoneStart(const char* eventName, bool detached, uint64_t contextId) = 0; /** \brief Mark the end of a nested profile block \param[in] profilerData The data returned by the corresponding zoneStart call (or NULL if not available) \param[in] eventName The name of the zone ending, must match the corresponding name passed with 'zoneStart'. Must be a persistent const char *. \param[in] detached True for cross thread events. Should match the value passed to zoneStart. \param[in] contextId The context of this zone. Should match the value passed to zoneStart. \note eventName plus contextId can be used to uniquely match up start and end of a zone. */ virtual void zoneEnd(void* profilerData, const char* eventName, bool detached, uint64_t contextId) = 0; }; class NvProfileScoped { public: NV_FORCE_INLINE NvProfileScoped(const char* eventName, bool detached, uint64_t contextId) : mCallback(nvidia::shdfnd::getProfilerCallback()) { if (mCallback) { mEventName = eventName; mDetached = detached; mContextId = contextId; mProfilerData = mCallback->zoneStart(mEventName, mDetached, mContextId); } } ~NvProfileScoped(void) { if (mCallback) { mCallback->zoneEnd(mProfilerData, mEventName, mDetached, mContextId); } } nvidia::NvProfilerCallback* mCallback; void* mProfilerData; const char* mEventName; bool mDetached; uint64_t mContextId; }; } // end of NVIDIA namespace #if NV_DEBUG || NV_CHECKED || NV_PROFILE #define NV_PROFILE_ZONE(name,context_id) nvidia::NvProfileScoped NV_CONCAT(_scoped,__LINE__)(name,false,context_id) #define NV_PROFILE_START_CROSSTHREAD(name,context_id) if ( nvidia::shdfnd::getProfilerCallback() ) nvidia::shdfnd::getProfilerCallback()->zoneStart(name,true,context_id) #define NV_PROFILE_STOP_CROSSTHREAD(name,context_id) if ( nvidia::shdfnd::getProfilerCallback() ) nvidia::shdfnd::getProfilerCallback()->zoneEnd(nullptr,name,true,context_id) #else #define NV_PROFILE_ZONE(name,context_id) #define NV_PROFILE_START_CROSSTHREAD(name,context_id) #define NV_PROFILE_STOP_CROSSTHREAD(name,context_id) #endif #define NV_PROFILE_POINTER_TO_U64( pointer ) static_cast<uint64_t>(reinterpret_cast<size_t>(pointer)) #endif
NVIDIA-Omniverse/PhysX/blast/include/shared/NvFoundation/NvVec3.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NVFOUNDATION_NVVEC3_H #define NV_NVFOUNDATION_NVVEC3_H /** \addtogroup foundation @{ */ #include "NvMath.h" #if !NV_DOXYGEN namespace nvidia { #endif /** \brief 3 Element vector class. This is a 3-dimensional vector class with public data members. */ class NvVec3 { public: /** \brief default constructor leaves data uninitialized. */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3() { } /** \brief zero constructor. */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3(NvZERO r) : x(0.0f), y(0.0f), z(0.0f) { NV_UNUSED(r); } /** \brief Assigns scalar parameter to all elements. Useful to initialize to zero or one. \param[in] a Value to assign to elements. */ explicit NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3(float a) : x(a), y(a), z(a) { } /** \brief Initializes from 3 scalar parameters. \param[in] nx Value to initialize X component. \param[in] ny Value to initialize Y component. \param[in] nz Value to initialize Z component. */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3(float nx, float ny, float nz) : x(nx), y(ny), z(nz) { } /** \brief Copy ctor. */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3(const NvVec3& v) : x(v.x), y(v.y), z(v.z) { } // Operators /** \brief Assignment operator */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3& operator=(const NvVec3& p) { x = p.x; y = p.y; z = p.z; return *this; } /** \brief element access */ NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE float& operator[](unsigned int index) { NV_ASSERT(index <= 2); return reinterpret_cast<float*>(this)[index]; } /** \brief element access */ NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE const float& operator[](unsigned int index) const { NV_ASSERT(index <= 2); return reinterpret_cast<const float*>(this)[index]; } /** \brief returns true if the two vectors are exactly equal. */ NV_CUDA_CALLABLE NV_FORCE_INLINE bool operator==(const NvVec3& v) const { return x == v.x && y == v.y && z == v.z; } /** \brief returns true if the two vectors are not exactly equal. */ NV_CUDA_CALLABLE NV_FORCE_INLINE bool operator!=(const NvVec3& v) const { return x != v.x || y != v.y || z != v.z; } /** \brief tests for exact zero vector */ NV_CUDA_CALLABLE NV_FORCE_INLINE bool isZero() const { return x == 0.0f && y == 0.0f && z == 0.0f; } /** \brief returns true if all 3 elems of the vector are finite (not NAN or INF, etc.) */ NV_CUDA_CALLABLE NV_INLINE bool isFinite() const { return NvIsFinite(x) && NvIsFinite(y) && NvIsFinite(z); } /** \brief is normalized - used by API parameter validation */ NV_CUDA_CALLABLE NV_FORCE_INLINE bool isNormalized() const { const float unitTolerance = 1e-4f; return isFinite() && NvAbs(magnitude() - 1) < unitTolerance; } /** \brief returns the squared magnitude Avoids calling NvSqrt()! */ NV_CUDA_CALLABLE NV_FORCE_INLINE float magnitudeSquared() const { return x * x + y * y + z * z; } /** \brief returns the magnitude */ NV_CUDA_CALLABLE NV_FORCE_INLINE float magnitude() const { return NvSqrt(magnitudeSquared()); } /** \brief negation */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 operator-() const { return NvVec3(-x, -y, -z); } /** \brief vector addition */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 operator+(const NvVec3& v) const { return NvVec3(x + v.x, y + v.y, z + v.z); } /** \brief vector difference */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 operator-(const NvVec3& v) const { return NvVec3(x - v.x, y - v.y, z - v.z); } /** \brief scalar post-multiplication */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 operator*(float f) const { return NvVec3(x * f, y * f, z * f); } /** \brief scalar division */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 operator/(float f) const { f = 1.0f / f; return NvVec3(x * f, y * f, z * f); } /** \brief vector addition */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3& operator+=(const NvVec3& v) { x += v.x; y += v.y; z += v.z; return *this; } /** \brief vector difference */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3& operator-=(const NvVec3& v) { x -= v.x; y -= v.y; z -= v.z; return *this; } /** \brief scalar multiplication */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3& operator*=(float f) { x *= f; y *= f; z *= f; return *this; } /** \brief scalar division */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3& operator/=(float f) { f = 1.0f / f; x *= f; y *= f; z *= f; return *this; } /** \brief returns the scalar product of this and other. */ NV_CUDA_CALLABLE NV_FORCE_INLINE float dot(const NvVec3& v) const { return x * v.x + y * v.y + z * v.z; } /** \brief cross product */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 cross(const NvVec3& v) const { return NvVec3(y * v.z - z * v.y, z * v.x - x * v.z, x * v.y - y * v.x); } /** return a unit vector */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 getNormalized() const { const float m = magnitudeSquared(); return m > 0.0f ? *this * NvRecipSqrt(m) : NvVec3(0, 0, 0); } /** \brief normalizes the vector in place */ NV_CUDA_CALLABLE NV_FORCE_INLINE float normalize() { const float m = magnitude(); if(m > 0.0f) *this /= m; return m; } /** \brief normalizes the vector in place. Does nothing if vector magnitude is under NV_NORMALIZATION_EPSILON. Returns vector magnitude if >= NV_NORMALIZATION_EPSILON and 0.0f otherwise. */ NV_CUDA_CALLABLE NV_FORCE_INLINE float normalizeSafe() { const float mag = magnitude(); if(mag < NV_NORMALIZATION_EPSILON) return 0.0f; *this *= 1.0f / mag; return mag; } /** \brief normalizes the vector in place. Asserts if vector magnitude is under NV_NORMALIZATION_EPSILON. returns vector magnitude. */ NV_CUDA_CALLABLE NV_FORCE_INLINE float normalizeFast() { const float mag = magnitude(); NV_ASSERT(mag >= NV_NORMALIZATION_EPSILON); *this *= 1.0f / mag; return mag; } /** \brief a[i] * b[i], for all i. */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 multiply(const NvVec3& a) const { return NvVec3(x * a.x, y * a.y, z * a.z); } /** \brief element-wise minimum */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 minimum(const NvVec3& v) const { return NvVec3(NvMin(x, v.x), NvMin(y, v.y), NvMin(z, v.z)); } /** \brief returns MIN(x, y, z); */ NV_CUDA_CALLABLE NV_FORCE_INLINE float minElement() const { return NvMin(x, NvMin(y, z)); } /** \brief element-wise maximum */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 maximum(const NvVec3& v) const { return NvVec3(NvMax(x, v.x), NvMax(y, v.y), NvMax(z, v.z)); } /** \brief returns MAX(x, y, z); */ NV_CUDA_CALLABLE NV_FORCE_INLINE float maxElement() const { return NvMax(x, NvMax(y, z)); } /** \brief returns absolute values of components; */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 abs() const { return NvVec3(NvAbs(x), NvAbs(y), NvAbs(z)); } float x, y, z; }; NV_CUDA_CALLABLE static NV_FORCE_INLINE NvVec3 operator*(float f, const NvVec3& v) { return NvVec3(f * v.x, f * v.y, f * v.z); } #if !NV_DOXYGEN } // namespace nvidia #endif /** @} */ #endif // #ifndef NV_NVFOUNDATION_NVVEC3_H
NVIDIA-Omniverse/PhysX/blast/include/shared/NvFoundation/NvErrorCallback.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NVFOUNDATION_NVERRORCALLBACK_H #define NV_NVFOUNDATION_NVERRORCALLBACK_H /** \addtogroup foundation @{ */ #include "NvErrors.h" #if !NV_DOXYGEN namespace nvidia { #endif /** \brief User defined interface class. Used by the library to emit debug information. \note The SDK state should not be modified from within any error reporting functions. <b>Threading:</b> The SDK sequences its calls to the output stream using a mutex, so the class need not be implemented in a thread-safe manner if the SDK is the only client. */ class NvErrorCallback { public: virtual ~NvErrorCallback() { } /** \brief Reports an error code. \param code Error code, see #NvErrorCode \param message Message to display. \param file File error occured in. \param line Line number error occured on. */ virtual void reportError(NvErrorCode::Enum code, const char* message, const char* file, int line) = 0; }; #if !NV_DOXYGEN } // namespace nvidia #endif /** @} */ #endif // #ifndef NV_NVFOUNDATION_NVERRORCALLBACK_H
NVIDIA-Omniverse/PhysX/blast/include/shared/NvFoundation/NvAssert.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NVFOUNDATION_NVASSERT_H #define NV_NVFOUNDATION_NVASSERT_H /** \addtogroup foundation @{ */ #include "Nv.h" #if !NV_DOXYGEN namespace nvidia { #endif /* Base class to handle assert failures */ class NvAssertHandler { public: virtual ~NvAssertHandler() { } virtual void operator()(const char* exp, const char* file, int line, bool& ignore) = 0; }; NV_FOUNDATION_API NvAssertHandler& NvGetAssertHandler(); NV_FOUNDATION_API void NvSetAssertHandler(NvAssertHandler& handler); #if !NV_DOXYGEN } // namespace nvidia #endif #if !NV_ENABLE_ASSERTS #define NV_ASSERT(exp) ((void)0) #define NV_ALWAYS_ASSERT_MESSAGE(exp) ((void)0) #define NV_ASSERT_WITH_MESSAGE(condition, message) ((void)0) #elif NV_SPU #include "ps3/NvPS3Assert.h" #else #if NV_VC #define NV_CODE_ANALYSIS_ASSUME(exp) \ __analysis_assume(!!(exp)) // This macro will be used to get rid of analysis warning messages if a NV_ASSERT is used // to "guard" illegal mem access, for example. #else #define NV_CODE_ANALYSIS_ASSUME(exp) #endif #define NV_ASSERT(exp) \ { \ static bool _ignore = false; \ ((void)((!!(exp)) || (!_ignore && (nvidia::NvGetAssertHandler()(#exp, __FILE__, __LINE__, _ignore), false)))); \ NV_CODE_ANALYSIS_ASSUME(exp); \ } #define NV_ALWAYS_ASSERT_MESSAGE(exp) \ { \ static bool _ignore = false; \ if(!_ignore) \ nvidia::NvGetAssertHandler()(exp, __FILE__, __LINE__, _ignore); \ } #define NV_ASSERT_WITH_MESSAGE(exp, message) \ { \ static bool _ignore = false; \ ((void)((!!(exp)) || (!_ignore && (nvidia::NvGetAssertHandler()(message, __FILE__, __LINE__, _ignore), false)))); \ NV_CODE_ANALYSIS_ASSUME(exp); \ } #endif #define NV_ALWAYS_ASSERT() NV_ASSERT(0) /** @} */ #endif // #ifndef NV_NVFOUNDATION_NVASSERT_H
NVIDIA-Omniverse/PhysX/blast/include/shared/NvFoundation/NvMath.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NVFOUNDATION_NVMATH_H #define NV_NVFOUNDATION_NVMATH_H /** \addtogroup foundation @{ */ #include "NvPreprocessor.h" #if NV_VC #pragma warning(push) #pragma warning(disable : 4985) // 'symbol name': attributes not present on previous declaration #endif #include <math.h> #if NV_VC #pragma warning(pop) #endif #include <float.h> #include "NvIntrinsics.h" #include "NvAssert.h" #if !NV_DOXYGEN namespace nvidia { #endif // constants static const float NvPi = float(3.141592653589793); static const float NvHalfPi = float(1.57079632679489661923); static const float NvTwoPi = float(6.28318530717958647692); static const float NvInvPi = float(0.31830988618379067154); static const float NvInvTwoPi = float(0.15915494309189533577); static const float NvPiDivTwo = float(1.57079632679489661923); static const float NvPiDivFour = float(0.78539816339744830962); /** \brief The return value is the greater of the two specified values. */ template <class T> NV_CUDA_CALLABLE NV_FORCE_INLINE T NvMax(T a, T b) { return a < b ? b : a; } //! overload for float to use fsel on xbox template <> NV_CUDA_CALLABLE NV_FORCE_INLINE float NvMax(float a, float b) { return intrinsics::selectMax(a, b); } /** \brief The return value is the lesser of the two specified values. */ template <class T> NV_CUDA_CALLABLE NV_FORCE_INLINE T NvMin(T a, T b) { return a < b ? a : b; } template <> //! overload for float to use fsel on xbox NV_CUDA_CALLABLE NV_FORCE_INLINE float NvMin(float a, float b) { return intrinsics::selectMin(a, b); } /* Many of these are just implemented as NV_CUDA_CALLABLE NV_FORCE_INLINE calls to the C lib right now, but later we could replace some of them with some approximations or more clever stuff. */ /** \brief abs returns the absolute value of its argument. */ NV_CUDA_CALLABLE NV_FORCE_INLINE float NvAbs(float a) { return intrinsics::abs(a); } NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvEquals(float a, float b, float eps) { return (NvAbs(a - b) < eps); } /** \brief abs returns the absolute value of its argument. */ NV_CUDA_CALLABLE NV_FORCE_INLINE double NvAbs(double a) { return ::fabs(a); } /** \brief abs returns the absolute value of its argument. */ NV_CUDA_CALLABLE NV_FORCE_INLINE int32_t NvAbs(int32_t a) { return ::abs(a); } /** \brief Clamps v to the range [hi,lo] */ template <class T> NV_CUDA_CALLABLE NV_FORCE_INLINE T NvClamp(T v, T lo, T hi) { NV_ASSERT(lo <= hi); return NvMin(hi, NvMax(lo, v)); } //! \brief Square root. NV_CUDA_CALLABLE NV_FORCE_INLINE float NvSqrt(float a) { return intrinsics::sqrt(a); } //! \brief Square root. NV_CUDA_CALLABLE NV_FORCE_INLINE double NvSqrt(double a) { return ::sqrt(a); } //! \brief reciprocal square root. NV_CUDA_CALLABLE NV_FORCE_INLINE float NvRecipSqrt(float a) { return intrinsics::recipSqrt(a); } //! \brief reciprocal square root. NV_CUDA_CALLABLE NV_FORCE_INLINE double NvRecipSqrt(double a) { return 1 / ::sqrt(a); } //! trigonometry -- all angles are in radians. //! \brief Sine of an angle ( <b>Unit:</b> Radians ) NV_CUDA_CALLABLE NV_FORCE_INLINE float NvSin(float a) { return intrinsics::sin(a); } //! \brief Sine of an angle ( <b>Unit:</b> Radians ) NV_CUDA_CALLABLE NV_FORCE_INLINE double NvSin(double a) { return ::sin(a); } //! \brief Cosine of an angle (<b>Unit:</b> Radians) NV_CUDA_CALLABLE NV_FORCE_INLINE float NvCos(float a) { return intrinsics::cos(a); } //! \brief Cosine of an angle (<b>Unit:</b> Radians) NV_CUDA_CALLABLE NV_FORCE_INLINE double NvCos(double a) { return ::cos(a); } /** \brief Tangent of an angle. <b>Unit:</b> Radians */ NV_CUDA_CALLABLE NV_FORCE_INLINE float NvTan(float a) { return ::tanf(a); } /** \brief Tangent of an angle. <b>Unit:</b> Radians */ NV_CUDA_CALLABLE NV_FORCE_INLINE double NvTan(double a) { return ::tan(a); } /** \brief Arcsine. Returns angle between -PI/2 and PI/2 in radians <b>Unit:</b> Radians */ NV_CUDA_CALLABLE NV_FORCE_INLINE float NvAsin(float f) { return ::asinf(NvClamp(f, -1.0f, 1.0f)); } /** \brief Arcsine. Returns angle between -PI/2 and PI/2 in radians <b>Unit:</b> Radians */ NV_CUDA_CALLABLE NV_FORCE_INLINE double NvAsin(double f) { return ::asin(NvClamp(f, -1.0, 1.0)); } /** \brief Arccosine. Returns angle between 0 and PI in radians <b>Unit:</b> Radians */ NV_CUDA_CALLABLE NV_FORCE_INLINE float NvAcos(float f) { return ::acosf(NvClamp(f, -1.0f, 1.0f)); } /** \brief Arccosine. Returns angle between 0 and PI in radians <b>Unit:</b> Radians */ NV_CUDA_CALLABLE NV_FORCE_INLINE double NvAcos(double f) { return ::acos(NvClamp(f, -1.0, 1.0)); } /** \brief ArcTangent. Returns angle between -PI/2 and PI/2 in radians <b>Unit:</b> Radians */ NV_CUDA_CALLABLE NV_FORCE_INLINE float NvAtan(float a) { return ::atanf(a); } /** \brief ArcTangent. Returns angle between -PI/2 and PI/2 in radians <b>Unit:</b> Radians */ NV_CUDA_CALLABLE NV_FORCE_INLINE double NvAtan(double a) { return ::atan(a); } /** \brief Arctangent of (x/y) with correct sign. Returns angle between -PI and PI in radians <b>Unit:</b> Radians */ NV_CUDA_CALLABLE NV_FORCE_INLINE float NvAtan2(float x, float y) { return ::atan2f(x, y); } /** \brief Arctangent of (x/y) with correct sign. Returns angle between -PI and PI in radians <b>Unit:</b> Radians */ NV_CUDA_CALLABLE NV_FORCE_INLINE double NvAtan2(double x, double y) { return ::atan2(x, y); } //! \brief returns true if the passed number is a finite floating point number as opposed to INF, NAN, etc. NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvIsFinite(float f) { return intrinsics::isFinite(f); } //! \brief returns true if the passed number is a finite floating point number as opposed to INF, NAN, etc. NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvIsFinite(double f) { return intrinsics::isFinite(f); } NV_CUDA_CALLABLE NV_FORCE_INLINE float NvFloor(float a) { return ::floorf(a); } NV_CUDA_CALLABLE NV_FORCE_INLINE float NvExp(float a) { return ::expf(a); } NV_CUDA_CALLABLE NV_FORCE_INLINE float NvCeil(float a) { return ::ceilf(a); } NV_CUDA_CALLABLE NV_FORCE_INLINE float NvSign(float a) { return nvidia::intrinsics::sign(a); } NV_CUDA_CALLABLE NV_FORCE_INLINE float NvPow(float x, float y) { return ::powf(x, y); } NV_CUDA_CALLABLE NV_FORCE_INLINE float NvLog(float x) { return ::logf(x); } #if !NV_DOXYGEN } // namespace nvidia #endif /** @} */ #endif // #ifndef NV_NVFOUNDATION_NVMATH_H
NVIDIA-Omniverse/PhysX/blast/include/shared/NvFoundation/NvMat44.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NVFOUNDATION_NVMAT44_H #define NV_NVFOUNDATION_NVMAT44_H /** \addtogroup foundation @{ */ #include "NvQuat.h" #include "NvVec4.h" #include "NvMat33.h" #include "NvTransform.h" #if !NV_DOXYGEN namespace nvidia { #endif /*! \brief 4x4 matrix class This class is layout-compatible with D3D and OpenGL matrices. More notes on layout are given in the NvMat33 @see NvMat33 NvTransform */ class NvMat44 { public: //! Default constructor NV_CUDA_CALLABLE NV_INLINE NvMat44() { } //! identity constructor NV_CUDA_CALLABLE NV_INLINE NvMat44(NvIDENTITY r) : column0(1.0f, 0.0f, 0.0f, 0.0f) , column1(0.0f, 1.0f, 0.0f, 0.0f) , column2(0.0f, 0.0f, 1.0f, 0.0f) , column3(0.0f, 0.0f, 0.0f, 1.0f) { NV_UNUSED(r); } //! zero constructor NV_CUDA_CALLABLE NV_INLINE NvMat44(NvZERO r) : column0(NvZero), column1(NvZero), column2(NvZero), column3(NvZero) { NV_UNUSED(r); } //! Construct from four 4-vectors NV_CUDA_CALLABLE NvMat44(const NvVec4& col0, const NvVec4& col1, const NvVec4& col2, const NvVec4& col3) : column0(col0), column1(col1), column2(col2), column3(col3) { } //! constructor that generates a multiple of the identity matrix explicit NV_CUDA_CALLABLE NV_INLINE NvMat44(float r) : column0(r, 0.0f, 0.0f, 0.0f) , column1(0.0f, r, 0.0f, 0.0f) , column2(0.0f, 0.0f, r, 0.0f) , column3(0.0f, 0.0f, 0.0f, r) { } //! Construct from three base vectors and a translation NV_CUDA_CALLABLE NvMat44(const NvVec3& col0, const NvVec3& col1, const NvVec3& col2, const NvVec3& col3) : column0(col0, 0), column1(col1, 0), column2(col2, 0), column3(col3, 1.0f) { } //! Construct from float[16] explicit NV_CUDA_CALLABLE NV_INLINE NvMat44(float values[]) : column0(values[0], values[1], values[2], values[3]) , column1(values[4], values[5], values[6], values[7]) , column2(values[8], values[9], values[10], values[11]) , column3(values[12], values[13], values[14], values[15]) { } //! Construct from a quaternion explicit NV_CUDA_CALLABLE NV_INLINE NvMat44(const NvQuat& q) { const float x = q.x; const float y = q.y; const float z = q.z; const float w = q.w; const float x2 = x + x; const float y2 = y + y; const float z2 = z + z; const float xx = x2 * x; const float yy = y2 * y; const float zz = z2 * z; const float xy = x2 * y; const float xz = x2 * z; const float xw = x2 * w; const float yz = y2 * z; const float yw = y2 * w; const float zw = z2 * w; column0 = NvVec4(1.0f - yy - zz, xy + zw, xz - yw, 0.0f); column1 = NvVec4(xy - zw, 1.0f - xx - zz, yz + xw, 0.0f); column2 = NvVec4(xz + yw, yz - xw, 1.0f - xx - yy, 0.0f); column3 = NvVec4(0.0f, 0.0f, 0.0f, 1.0f); } //! Construct from a diagonal vector explicit NV_CUDA_CALLABLE NV_INLINE NvMat44(const NvVec4& diagonal) : column0(diagonal.x, 0.0f, 0.0f, 0.0f) , column1(0.0f, diagonal.y, 0.0f, 0.0f) , column2(0.0f, 0.0f, diagonal.z, 0.0f) , column3(0.0f, 0.0f, 0.0f, diagonal.w) { } //! Construct from Mat33 and a translation NV_CUDA_CALLABLE NvMat44(const NvMat33& axes, const NvVec3& position) : column0(axes.column0, 0.0f), column1(axes.column1, 0.0f), column2(axes.column2, 0.0f), column3(position, 1.0f) { } NV_CUDA_CALLABLE NvMat44(const NvTransform& t) { *this = NvMat44(NvMat33(t.q), t.p); } /** \brief returns true if the two matrices are exactly equal */ NV_CUDA_CALLABLE NV_INLINE bool operator==(const NvMat44& m) const { return column0 == m.column0 && column1 == m.column1 && column2 == m.column2 && column3 == m.column3; } //! Copy constructor NV_CUDA_CALLABLE NV_INLINE NvMat44(const NvMat44& other) : column0(other.column0), column1(other.column1), column2(other.column2), column3(other.column3) { } //! Assignment operator NV_CUDA_CALLABLE NV_INLINE const NvMat44& operator=(const NvMat44& other) { column0 = other.column0; column1 = other.column1; column2 = other.column2; column3 = other.column3; return *this; } //! Get transposed matrix NV_CUDA_CALLABLE NV_INLINE NvMat44 getTranspose() const { return NvMat44( NvVec4(column0.x, column1.x, column2.x, column3.x), NvVec4(column0.y, column1.y, column2.y, column3.y), NvVec4(column0.z, column1.z, column2.z, column3.z), NvVec4(column0.w, column1.w, column2.w, column3.w)); } //! Unary minus NV_CUDA_CALLABLE NV_INLINE NvMat44 operator-() const { return NvMat44(-column0, -column1, -column2, -column3); } //! Add NV_CUDA_CALLABLE NV_INLINE NvMat44 operator+(const NvMat44& other) const { return NvMat44(column0 + other.column0, column1 + other.column1, column2 + other.column2, column3 + other.column3); } //! Subtract NV_CUDA_CALLABLE NV_INLINE NvMat44 operator-(const NvMat44& other) const { return NvMat44(column0 - other.column0, column1 - other.column1, column2 - other.column2, column3 - other.column3); } //! Scalar multiplication NV_CUDA_CALLABLE NV_INLINE NvMat44 operator*(float scalar) const { return NvMat44(column0 * scalar, column1 * scalar, column2 * scalar, column3 * scalar); } friend NvMat44 operator*(float, const NvMat44&); //! Matrix multiplication NV_CUDA_CALLABLE NV_INLINE NvMat44 operator*(const NvMat44& other) const { // Rows from this <dot> columns from other // column0 = transform(other.column0) etc return NvMat44(transform(other.column0), transform(other.column1), transform(other.column2), transform(other.column3)); } // a <op>= b operators //! Equals-add NV_CUDA_CALLABLE NV_INLINE NvMat44& operator+=(const NvMat44& other) { column0 += other.column0; column1 += other.column1; column2 += other.column2; column3 += other.column3; return *this; } //! Equals-sub NV_CUDA_CALLABLE NV_INLINE NvMat44& operator-=(const NvMat44& other) { column0 -= other.column0; column1 -= other.column1; column2 -= other.column2; column3 -= other.column3; return *this; } //! Equals scalar multiplication NV_CUDA_CALLABLE NV_INLINE NvMat44& operator*=(float scalar) { column0 *= scalar; column1 *= scalar; column2 *= scalar; column3 *= scalar; return *this; } //! Equals matrix multiplication NV_CUDA_CALLABLE NV_INLINE NvMat44& operator*=(const NvMat44& other) { *this = *this * other; return *this; } //! Element access, mathematical way! NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE float operator()(unsigned int row, unsigned int col) const { return (*this)[col][row]; } //! Element access, mathematical way! NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE float& operator()(unsigned int row, unsigned int col) { return (*this)[col][row]; } //! Transform vector by matrix, equal to v' = M*v NV_CUDA_CALLABLE NV_INLINE NvVec4 transform(const NvVec4& other) const { return column0 * other.x + column1 * other.y + column2 * other.z + column3 * other.w; } //! Transform vector by matrix, equal to v' = M*v NV_CUDA_CALLABLE NV_INLINE NvVec3 transform(const NvVec3& other) const { return transform(NvVec4(other, 1.0f)).getXYZ(); } //! Rotate vector by matrix, equal to v' = M*v NV_CUDA_CALLABLE NV_INLINE const NvVec4 rotate(const NvVec4& other) const { return column0 * other.x + column1 * other.y + column2 * other.z; // + column3*0; } //! Rotate vector by matrix, equal to v' = M*v NV_CUDA_CALLABLE NV_INLINE const NvVec3 rotate(const NvVec3& other) const { return rotate(NvVec4(other, 1.0f)).getXYZ(); } NV_CUDA_CALLABLE NV_INLINE NvVec3 getBasis(int num) const { NV_ASSERT(num >= 0 && num < 3); return (&column0)[num].getXYZ(); } NV_CUDA_CALLABLE NV_INLINE NvVec3 getPosition() const { return column3.getXYZ(); } NV_CUDA_CALLABLE NV_INLINE void setPosition(const NvVec3& position) { column3.x = position.x; column3.y = position.y; column3.z = position.z; } NV_CUDA_CALLABLE NV_FORCE_INLINE const float* front() const { return &column0.x; } NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec4& operator[](unsigned int num) { return (&column0)[num]; } NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE const NvVec4& operator[](unsigned int num) const { return (&column0)[num]; } NV_CUDA_CALLABLE NV_INLINE void scale(const NvVec4& p) { column0 *= p.x; column1 *= p.y; column2 *= p.z; column3 *= p.w; } NV_CUDA_CALLABLE NV_INLINE NvMat44 inverseRT(void) const { NvVec3 r0(column0.x, column1.x, column2.x), r1(column0.y, column1.y, column2.y), r2(column0.z, column1.z, column2.z); return NvMat44(r0, r1, r2, -(r0 * column3.x + r1 * column3.y + r2 * column3.z)); } NV_CUDA_CALLABLE NV_INLINE bool isFinite() const { return column0.isFinite() && column1.isFinite() && column2.isFinite() && column3.isFinite(); } // Data, see above for format! NvVec4 column0, column1, column2, column3; // the four base vectors }; // implementation from NvTransform.h NV_CUDA_CALLABLE NV_FORCE_INLINE NvTransform::NvTransform(const NvMat44& m) { NvVec3 column0 = NvVec3(m.column0.x, m.column0.y, m.column0.z); NvVec3 column1 = NvVec3(m.column1.x, m.column1.y, m.column1.z); NvVec3 column2 = NvVec3(m.column2.x, m.column2.y, m.column2.z); q = NvQuat(NvMat33(column0, column1, column2)); p = NvVec3(m.column3.x, m.column3.y, m.column3.z); } #if !NV_DOXYGEN } // namespace nvidia #endif /** @} */ #endif // #ifndef NV_NVFOUNDATION_NVMAT44_H
NVIDIA-Omniverse/PhysX/blast/include/shared/NvFoundation/NvAllocatorCallback.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NVFOUNDATION_NVALLOCATORCALLBACK_H #define NV_NVFOUNDATION_NVALLOCATORCALLBACK_H /** \addtogroup foundation @{ */ #include "Nv.h" #if !NV_DOXYGEN namespace nvidia { #endif /** \brief Abstract base class for an application defined memory allocator that can be used by the Nv library. \note The SDK state should not be modified from within any allocation/free function. <b>Threading:</b> All methods of this class should be thread safe as it can be called from the user thread or the physics processing thread(s). */ class NvAllocatorCallback { public: /** \brief destructor */ virtual ~NvAllocatorCallback() { } /** \brief Allocates size bytes of memory, which must be 16-byte aligned. This method should never return NULL. If you run out of memory, then you should terminate the app or take some other appropriate action. <b>Threading:</b> This function should be thread safe as it can be called in the context of the user thread and physics processing thread(s). \param size Number of bytes to allocate. \param typeName Name of the datatype that is being allocated \param filename The source file which allocated the memory \param line The source line which allocated the memory \return The allocated block of memory. */ virtual void* allocate(size_t size, const char* typeName, const char* filename, int line) = 0; /** \brief Frees memory previously allocated by allocate(). <b>Threading:</b> This function should be thread safe as it can be called in the context of the user thread and physics processing thread(s). \param ptr Memory to free. */ virtual void deallocate(void* ptr) = 0; }; #if !NV_DOXYGEN } // namespace nvidia #endif /** @} */ #endif // #ifndef NV_NVFOUNDATION_NVALLOCATORCALLBACK_H
NVIDIA-Omniverse/PhysX/blast/include/shared/NvFoundation/NvMat33.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NVFOUNDATION_NVMAT33_H #define NV_NVFOUNDATION_NVMAT33_H /** \addtogroup foundation @{ */ #include "NvVec3.h" #include "NvQuat.h" #if !NV_DOXYGEN namespace nvidia { #endif /*! \brief 3x3 matrix class Some clarifications, as there have been much confusion about matrix formats etc in the past. Short: - Matrix have base vectors in columns (vectors are column matrices, 3x1 matrices). - Matrix is physically stored in column major format - Matrices are concaternated from left Long: Given three base vectors a, b and c the matrix is stored as |a.x b.x c.x| |a.y b.y c.y| |a.z b.z c.z| Vectors are treated as columns, so the vector v is |x| |y| |z| And matrices are applied _before_ the vector (pre-multiplication) v' = M*v |x'| |a.x b.x c.x| |x| |a.x*x + b.x*y + c.x*z| |y'| = |a.y b.y c.y| * |y| = |a.y*x + b.y*y + c.y*z| |z'| |a.z b.z c.z| |z| |a.z*x + b.z*y + c.z*z| Physical storage and indexing: To be compatible with popular 3d rendering APIs (read D3d and OpenGL) the physical indexing is |0 3 6| |1 4 7| |2 5 8| index = column*3 + row which in C++ translates to M[column][row] The mathematical indexing is M_row,column and this is what is used for _-notation so _12 is 1st row, second column and operator(row, column)! */ class NvMat33 { public: //! Default constructor NV_CUDA_CALLABLE NV_FORCE_INLINE NvMat33() { } //! identity constructor NV_CUDA_CALLABLE NV_INLINE NvMat33(NvIDENTITY r) : column0(1.0f, 0.0f, 0.0f), column1(0.0f, 1.0f, 0.0f), column2(0.0f, 0.0f, 1.0f) { NV_UNUSED(r); } //! zero constructor NV_CUDA_CALLABLE NV_INLINE NvMat33(NvZERO r) : column0(0.0f), column1(0.0f), column2(0.0f) { NV_UNUSED(r); } //! Construct from three base vectors NV_CUDA_CALLABLE NvMat33(const NvVec3& col0, const NvVec3& col1, const NvVec3& col2) : column0(col0), column1(col1), column2(col2) { } //! constructor from a scalar, which generates a multiple of the identity matrix explicit NV_CUDA_CALLABLE NV_INLINE NvMat33(float r) : column0(r, 0.0f, 0.0f), column1(0.0f, r, 0.0f), column2(0.0f, 0.0f, r) { } //! Construct from float[9] explicit NV_CUDA_CALLABLE NV_INLINE NvMat33(float values[]) : column0(values[0], values[1], values[2]) , column1(values[3], values[4], values[5]) , column2(values[6], values[7], values[8]) { } //! Construct from a quaternion explicit NV_CUDA_CALLABLE NV_FORCE_INLINE NvMat33(const NvQuat& q) { const float x = q.x; const float y = q.y; const float z = q.z; const float w = q.w; const float x2 = x + x; const float y2 = y + y; const float z2 = z + z; const float xx = x2 * x; const float yy = y2 * y; const float zz = z2 * z; const float xy = x2 * y; const float xz = x2 * z; const float xw = x2 * w; const float yz = y2 * z; const float yw = y2 * w; const float zw = z2 * w; column0 = NvVec3(1.0f - yy - zz, xy + zw, xz - yw); column1 = NvVec3(xy - zw, 1.0f - xx - zz, yz + xw); column2 = NvVec3(xz + yw, yz - xw, 1.0f - xx - yy); } //! Copy constructor NV_CUDA_CALLABLE NV_INLINE NvMat33(const NvMat33& other) : column0(other.column0), column1(other.column1), column2(other.column2) { } //! Assignment operator NV_CUDA_CALLABLE NV_FORCE_INLINE NvMat33& operator=(const NvMat33& other) { column0 = other.column0; column1 = other.column1; column2 = other.column2; return *this; } //! Construct from diagonal, off-diagonals are zero. NV_CUDA_CALLABLE NV_INLINE static NvMat33 createDiagonal(const NvVec3& d) { return NvMat33(NvVec3(d.x, 0.0f, 0.0f), NvVec3(0.0f, d.y, 0.0f), NvVec3(0.0f, 0.0f, d.z)); } /** \brief returns true if the two matrices are exactly equal */ NV_CUDA_CALLABLE NV_INLINE bool operator==(const NvMat33& m) const { return column0 == m.column0 && column1 == m.column1 && column2 == m.column2; } //! Get transposed matrix NV_CUDA_CALLABLE NV_FORCE_INLINE NvMat33 getTranspose() const { const NvVec3 v0(column0.x, column1.x, column2.x); const NvVec3 v1(column0.y, column1.y, column2.y); const NvVec3 v2(column0.z, column1.z, column2.z); return NvMat33(v0, v1, v2); } //! Get the real inverse NV_CUDA_CALLABLE NV_INLINE NvMat33 getInverse() const { const float det = getDeterminant(); NvMat33 inverse; if(det != 0) { const float invDet = 1.0f / det; inverse.column0.x = invDet * (column1.y * column2.z - column2.y * column1.z); inverse.column0.y = invDet * -(column0.y * column2.z - column2.y * column0.z); inverse.column0.z = invDet * (column0.y * column1.z - column0.z * column1.y); inverse.column1.x = invDet * -(column1.x * column2.z - column1.z * column2.x); inverse.column1.y = invDet * (column0.x * column2.z - column0.z * column2.x); inverse.column1.z = invDet * -(column0.x * column1.z - column0.z * column1.x); inverse.column2.x = invDet * (column1.x * column2.y - column1.y * column2.x); inverse.column2.y = invDet * -(column0.x * column2.y - column0.y * column2.x); inverse.column2.z = invDet * (column0.x * column1.y - column1.x * column0.y); return inverse; } else { return NvMat33(NvIdentity); } } //! Get determinant NV_CUDA_CALLABLE NV_INLINE float getDeterminant() const { return column0.dot(column1.cross(column2)); } //! Unary minus NV_CUDA_CALLABLE NV_INLINE NvMat33 operator-() const { return NvMat33(-column0, -column1, -column2); } //! Add NV_CUDA_CALLABLE NV_INLINE NvMat33 operator+(const NvMat33& other) const { return NvMat33(column0 + other.column0, column1 + other.column1, column2 + other.column2); } //! Subtract NV_CUDA_CALLABLE NV_INLINE NvMat33 operator-(const NvMat33& other) const { return NvMat33(column0 - other.column0, column1 - other.column1, column2 - other.column2); } //! Scalar multiplication NV_CUDA_CALLABLE NV_INLINE NvMat33 operator*(float scalar) const { return NvMat33(column0 * scalar, column1 * scalar, column2 * scalar); } friend NvMat33 operator*(float, const NvMat33&); //! Matrix vector multiplication (returns 'this->transform(vec)') NV_CUDA_CALLABLE NV_INLINE NvVec3 operator*(const NvVec3& vec) const { return transform(vec); } // a <op>= b operators //! Matrix multiplication NV_CUDA_CALLABLE NV_FORCE_INLINE NvMat33 operator*(const NvMat33& other) const { // Rows from this <dot> columns from other // column0 = transform(other.column0) etc return NvMat33(transform(other.column0), transform(other.column1), transform(other.column2)); } //! Equals-add NV_CUDA_CALLABLE NV_INLINE NvMat33& operator+=(const NvMat33& other) { column0 += other.column0; column1 += other.column1; column2 += other.column2; return *this; } //! Equals-sub NV_CUDA_CALLABLE NV_INLINE NvMat33& operator-=(const NvMat33& other) { column0 -= other.column0; column1 -= other.column1; column2 -= other.column2; return *this; } //! Equals scalar multiplication NV_CUDA_CALLABLE NV_INLINE NvMat33& operator*=(float scalar) { column0 *= scalar; column1 *= scalar; column2 *= scalar; return *this; } //! Equals matrix multiplication NV_CUDA_CALLABLE NV_INLINE NvMat33& operator*=(const NvMat33& other) { *this = *this * other; return *this; } //! Element access, mathematical way! NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE float operator()(unsigned int row, unsigned int col) const { return (*this)[col][row]; } //! Element access, mathematical way! NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE float& operator()(unsigned int row, unsigned int col) { return (*this)[col][row]; } // Transform etc //! Transform vector by matrix, equal to v' = M*v NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 transform(const NvVec3& other) const { return column0 * other.x + column1 * other.y + column2 * other.z; } //! Transform vector by matrix transpose, v' = M^t*v NV_CUDA_CALLABLE NV_INLINE NvVec3 transformTranspose(const NvVec3& other) const { return NvVec3(column0.dot(other), column1.dot(other), column2.dot(other)); } NV_CUDA_CALLABLE NV_FORCE_INLINE const float* front() const { return &column0.x; } NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3& operator[](unsigned int num) { return (&column0)[num]; } NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE const NvVec3& operator[](unsigned int num) const { return (&column0)[num]; } // Data, see above for format! NvVec3 column0, column1, column2; // the three base vectors }; // implementation from NvQuat.h NV_CUDA_CALLABLE NV_INLINE NvQuat::NvQuat(const NvMat33& m) { if (m.column2.z < 0) { if (m.column0.x > m.column1.y) { float t = 1 + m.column0.x - m.column1.y - m.column2.z; *this = NvQuat(t, m.column0.y + m.column1.x, m.column2.x + m.column0.z, m.column1.z - m.column2.y) * (0.5f / NvSqrt(t)); } else { float t = 1 - m.column0.x + m.column1.y - m.column2.z; *this = NvQuat(m.column0.y + m.column1.x, t, m.column1.z + m.column2.y, m.column2.x - m.column0.z) * (0.5f / NvSqrt(t)); } } else { if (m.column0.x < -m.column1.y) { float t = 1 - m.column0.x - m.column1.y + m.column2.z; *this = NvQuat(m.column2.x + m.column0.z, m.column1.z + m.column2.y, t, m.column0.y - m.column1.x) * (0.5f / NvSqrt(t)); } else { float t = 1 + m.column0.x + m.column1.y + m.column2.z; *this = NvQuat(m.column1.z - m.column2.y, m.column2.x - m.column0.z, m.column0.y - m.column1.x, t) * (0.5f / NvSqrt(t)); } } } #if !NV_DOXYGEN } // namespace nvidia #endif /** @} */ #endif // #ifndef NV_NVFOUNDATION_NVMAT33_H
NVIDIA-Omniverse/PhysX/blast/include/shared/NvFoundation/NvPlane.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NVFOUNDATION_NVPLANE_H #define NV_NVFOUNDATION_NVPLANE_H /** \addtogroup foundation @{ */ #include "NvMath.h" #include "NvVec3.h" #if !NV_DOXYGEN namespace nvidia { #endif /** \brief Representation of a plane. Plane equation used: n.dot(v) + d = 0 */ class NvPlane { public: /** \brief Constructor */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvPlane() { } /** \brief Constructor from a normal and a distance */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvPlane(float nx, float ny, float nz, float distance) : n(nx, ny, nz), d(distance) { } /** \brief Constructor from a normal and a distance */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvPlane(const NvVec3& normal, float distance) : n(normal), d(distance) { } /** \brief Constructor from a point on the plane and a normal */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvPlane(const NvVec3& point, const NvVec3& normal) : n(normal), d(-point.dot(n)) // p satisfies normal.dot(p) + d = 0 { } /** \brief Constructor from three points */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvPlane(const NvVec3& p0, const NvVec3& p1, const NvVec3& p2) { n = (p1 - p0).cross(p2 - p0).getNormalized(); d = -p0.dot(n); } /** \brief returns true if the two planes are exactly equal */ NV_CUDA_CALLABLE NV_INLINE bool operator==(const NvPlane& p) const { return n == p.n && d == p.d; } NV_CUDA_CALLABLE NV_FORCE_INLINE float distance(const NvVec3& p) const { return p.dot(n) + d; } NV_CUDA_CALLABLE NV_FORCE_INLINE bool contains(const NvVec3& p) const { return NvAbs(distance(p)) < (1.0e-7f); } /** \brief projects p into the plane */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 project(const NvVec3& p) const { return p - n * distance(p); } /** \brief find an arbitrary point in the plane */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 pointInPlane() const { return -n * d; } /** \brief equivalent plane with unit normal */ NV_CUDA_CALLABLE NV_FORCE_INLINE void normalize() { float denom = 1.0f / n.magnitude(); n *= denom; d *= denom; } NvVec3 n; //!< The normal to the plane float d; //!< The distance from the origin }; #if !NV_DOXYGEN } // namespace nvidia #endif /** @} */ #endif // #ifndef NV_NVFOUNDATION_NVPLANE_H
NVIDIA-Omniverse/PhysX/blast/include/shared/NvFoundation/Nv.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NVFOUNDATION_NV_H #define NV_NVFOUNDATION_NV_H /** \addtogroup foundation @{ */ #include "NvSimpleTypes.h" /** files to always include */ #include <string.h> #include <stdlib.h> #if !NV_DOXYGEN namespace nvidia { #endif class NvAllocatorCallback; class NvErrorCallback; struct NvErrorCode; class NvAssertHandler; class NvInputStream; class NvInputData; class NvOutputStream; class NvVec2; class NvVec3; class NvVec4; class NvMat33; class NvMat44; class NvPlane; class NvQuat; class NvTransform; class NvBounds3; /** enum for empty constructor tag*/ enum NvEMPTY { NvEmpty }; /** enum for zero constructor tag for vectors and matrices */ enum NvZERO { NvZero }; /** enum for identity constructor flag for quaternions, transforms, and matrices */ enum NvIDENTITY { NvIdentity }; #if !NV_DOXYGEN } // namespace nvidia #endif /** @} */ #endif // #ifndef NV_NVFOUNDATION_NV_H
NVIDIA-Omniverse/PhysX/blast/include/shared/NvFoundation/NvVec2.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NVFOUNDATION_NVVEC2_H #define NV_NVFOUNDATION_NVVEC2_H /** \addtogroup foundation @{ */ #include "NvMath.h" #if !NV_DOXYGEN namespace nvidia { #endif /** \brief 2 Element vector class. This is a 2-dimensional vector class with public data members. */ class NvVec2 { public: /** \brief default constructor leaves data uninitialized. */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2() { } /** \brief zero constructor. */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2(NvZERO r) : x(0.0f), y(0.0f) { NV_UNUSED(r); } /** \brief Assigns scalar parameter to all elements. Useful to initialize to zero or one. \param[in] a Value to assign to elements. */ explicit NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2(float a) : x(a), y(a) { } /** \brief Initializes from 2 scalar parameters. \param[in] nx Value to initialize X component. \param[in] ny Value to initialize Y component. */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2(float nx, float ny) : x(nx), y(ny) { } /** \brief Copy ctor. */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2(const NvVec2& v) : x(v.x), y(v.y) { } // Operators /** \brief Assignment operator */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2& operator=(const NvVec2& p) { x = p.x; y = p.y; return *this; } /** \brief element access */ NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE float& operator[](int index) { NV_ASSERT(index >= 0 && index <= 1); return reinterpret_cast<float*>(this)[index]; } /** \brief element access */ NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE const float& operator[](int index) const { NV_ASSERT(index >= 0 && index <= 1); return reinterpret_cast<const float*>(this)[index]; } /** \brief returns true if the two vectors are exactly equal. */ NV_CUDA_CALLABLE NV_FORCE_INLINE bool operator==(const NvVec2& v) const { return x == v.x && y == v.y; } /** \brief returns true if the two vectors are not exactly equal. */ NV_CUDA_CALLABLE NV_FORCE_INLINE bool operator!=(const NvVec2& v) const { return x != v.x || y != v.y; } /** \brief tests for exact zero vector */ NV_CUDA_CALLABLE NV_FORCE_INLINE bool isZero() const { return x == 0.0f && y == 0.0f; } /** \brief returns true if all 2 elems of the vector are finite (not NAN or INF, etc.) */ NV_CUDA_CALLABLE NV_INLINE bool isFinite() const { return NvIsFinite(x) && NvIsFinite(y); } /** \brief is normalized - used by API parameter validation */ NV_CUDA_CALLABLE NV_FORCE_INLINE bool isNormalized() const { const float unitTolerance = 1e-4f; return isFinite() && NvAbs(magnitude() - 1) < unitTolerance; } /** \brief returns the squared magnitude Avoids calling NvSqrt()! */ NV_CUDA_CALLABLE NV_FORCE_INLINE float magnitudeSquared() const { return x * x + y * y; } /** \brief returns the magnitude */ NV_CUDA_CALLABLE NV_FORCE_INLINE float magnitude() const { return NvSqrt(magnitudeSquared()); } /** \brief negation */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2 operator-() const { return NvVec2(-x, -y); } /** \brief vector addition */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2 operator+(const NvVec2& v) const { return NvVec2(x + v.x, y + v.y); } /** \brief vector difference */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2 operator-(const NvVec2& v) const { return NvVec2(x - v.x, y - v.y); } /** \brief scalar post-multiplication */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2 operator*(float f) const { return NvVec2(x * f, y * f); } /** \brief scalar division */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2 operator/(float f) const { f = 1.0f / f; // PT: inconsistent notation with operator /= return NvVec2(x * f, y * f); } /** \brief vector addition */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2& operator+=(const NvVec2& v) { x += v.x; y += v.y; return *this; } /** \brief vector difference */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2& operator-=(const NvVec2& v) { x -= v.x; y -= v.y; return *this; } /** \brief scalar multiplication */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2& operator*=(float f) { x *= f; y *= f; return *this; } /** \brief scalar division */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2& operator/=(float f) { f = 1.0f / f; // PT: inconsistent notation with operator / x *= f; y *= f; return *this; } /** \brief returns the scalar product of this and other. */ NV_CUDA_CALLABLE NV_FORCE_INLINE float dot(const NvVec2& v) const { return x * v.x + y * v.y; } /** return a unit vector */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2 getNormalized() const { const float m = magnitudeSquared(); return m > 0.0f ? *this * NvRecipSqrt(m) : NvVec2(0, 0); } /** \brief normalizes the vector in place */ NV_CUDA_CALLABLE NV_FORCE_INLINE float normalize() { const float m = magnitude(); if(m > 0.0f) *this /= m; return m; } /** \brief a[i] * b[i], for all i. */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2 multiply(const NvVec2& a) const { return NvVec2(x * a.x, y * a.y); } /** \brief element-wise minimum */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2 minimum(const NvVec2& v) const { return NvVec2(NvMin(x, v.x), NvMin(y, v.y)); } /** \brief returns MIN(x, y); */ NV_CUDA_CALLABLE NV_FORCE_INLINE float minElement() const { return NvMin(x, y); } /** \brief element-wise maximum */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2 maximum(const NvVec2& v) const { return NvVec2(NvMax(x, v.x), NvMax(y, v.y)); } /** \brief returns MAX(x, y); */ NV_CUDA_CALLABLE NV_FORCE_INLINE float maxElement() const { return NvMax(x, y); } float x, y; }; NV_CUDA_CALLABLE static NV_FORCE_INLINE NvVec2 operator*(float f, const NvVec2& v) { return NvVec2(f * v.x, f * v.y); } #if !NV_DOXYGEN } // namespace nvidia #endif /** @} */ #endif // #ifndef NV_NVFOUNDATION_NVVEC2_H
NVIDIA-Omniverse/PhysX/blast/include/shared/NvFoundation/NvFlags.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NVFOUNDATION_NVFLAGS_H #define NV_NVFOUNDATION_NVFLAGS_H /** \addtogroup foundation @{ */ #include "Nv.h" #if !NV_DOXYGEN namespace nvidia { #endif /** \brief Container for bitfield flag variables associated with a specific enum type. This allows for type safe manipulation for bitfields. <h3>Example</h3> // enum that defines each bit... struct MyEnum { enum Enum { eMAN = 1, eBEAR = 2, ePIG = 4, }; }; // implements some convenient global operators. NV_FLAGS_OPERATORS(MyEnum::Enum, uint8_t); NvFlags<MyEnum::Enum, uint8_t> myFlags; myFlags |= MyEnum::eMAN; myFlags |= MyEnum::eBEAR | MyEnum::ePIG; if(myFlags & MyEnum::eBEAR) { doSomething(); } */ template <typename enumtype, typename storagetype = uint32_t> class NvFlags { public: typedef storagetype InternalType; NV_INLINE explicit NvFlags(const NvEMPTY) { } NV_INLINE NvFlags(void); NV_INLINE NvFlags(enumtype e); NV_INLINE NvFlags(const NvFlags<enumtype, storagetype>& f); NV_INLINE explicit NvFlags(storagetype b); NV_INLINE bool isSet(enumtype e) const; NV_INLINE NvFlags<enumtype, storagetype>& set(enumtype e); NV_INLINE bool operator==(enumtype e) const; NV_INLINE bool operator==(const NvFlags<enumtype, storagetype>& f) const; NV_INLINE bool operator==(bool b) const; NV_INLINE bool operator!=(enumtype e) const; NV_INLINE bool operator!=(const NvFlags<enumtype, storagetype>& f) const; NV_INLINE NvFlags<enumtype, storagetype>& operator=(const NvFlags<enumtype, storagetype>& f); NV_INLINE NvFlags<enumtype, storagetype>& operator=(enumtype e); NV_INLINE NvFlags<enumtype, storagetype>& operator|=(enumtype e); NV_INLINE NvFlags<enumtype, storagetype>& operator|=(const NvFlags<enumtype, storagetype>& f); NV_INLINE NvFlags<enumtype, storagetype> operator|(enumtype e) const; NV_INLINE NvFlags<enumtype, storagetype> operator|(const NvFlags<enumtype, storagetype>& f) const; NV_INLINE NvFlags<enumtype, storagetype>& operator&=(enumtype e); NV_INLINE NvFlags<enumtype, storagetype>& operator&=(const NvFlags<enumtype, storagetype>& f); NV_INLINE NvFlags<enumtype, storagetype> operator&(enumtype e) const; NV_INLINE NvFlags<enumtype, storagetype> operator&(const NvFlags<enumtype, storagetype>& f) const; NV_INLINE NvFlags<enumtype, storagetype>& operator^=(enumtype e); NV_INLINE NvFlags<enumtype, storagetype>& operator^=(const NvFlags<enumtype, storagetype>& f); NV_INLINE NvFlags<enumtype, storagetype> operator^(enumtype e) const; NV_INLINE NvFlags<enumtype, storagetype> operator^(const NvFlags<enumtype, storagetype>& f) const; NV_INLINE NvFlags<enumtype, storagetype> operator~(void) const; NV_INLINE operator bool(void) const; NV_INLINE operator uint8_t(void) const; NV_INLINE operator uint16_t(void) const; NV_INLINE operator uint32_t(void) const; NV_INLINE void clear(enumtype e); public: friend NV_INLINE NvFlags<enumtype, storagetype> operator&(enumtype a, NvFlags<enumtype, storagetype>& b) { NvFlags<enumtype, storagetype> out; out.mBits = a & b.mBits; return out; } private: storagetype mBits; }; #define NV_FLAGS_OPERATORS(enumtype, storagetype) \ NV_INLINE NvFlags<enumtype, storagetype> operator|(enumtype a, enumtype b) \ { \ NvFlags<enumtype, storagetype> r(a); \ r |= b; \ return r; \ } \ NV_INLINE NvFlags<enumtype, storagetype> operator&(enumtype a, enumtype b) \ { \ NvFlags<enumtype, storagetype> r(a); \ r &= b; \ return r; \ } \ NV_INLINE NvFlags<enumtype, storagetype> operator~(enumtype a) \ { \ return ~NvFlags<enumtype, storagetype>(a); \ } #define NV_FLAGS_TYPEDEF(x, y) \ typedef NvFlags<x::Enum, y> x##s; \ NV_FLAGS_OPERATORS(x::Enum, y) template <typename enumtype, typename storagetype> NV_INLINE NvFlags<enumtype, storagetype>::NvFlags(void) { mBits = 0; } template <typename enumtype, typename storagetype> NV_INLINE NvFlags<enumtype, storagetype>::NvFlags(enumtype e) { mBits = static_cast<storagetype>(e); } template <typename enumtype, typename storagetype> NV_INLINE NvFlags<enumtype, storagetype>::NvFlags(const NvFlags<enumtype, storagetype>& f) { mBits = f.mBits; } template <typename enumtype, typename storagetype> NV_INLINE NvFlags<enumtype, storagetype>::NvFlags(storagetype b) { mBits = b; } template <typename enumtype, typename storagetype> NV_INLINE bool NvFlags<enumtype, storagetype>::isSet(enumtype e) const { return (mBits & static_cast<storagetype>(e)) == static_cast<storagetype>(e); } template <typename enumtype, typename storagetype> NV_INLINE NvFlags<enumtype, storagetype>& NvFlags<enumtype, storagetype>::set(enumtype e) { mBits = static_cast<storagetype>(e); return *this; } template <typename enumtype, typename storagetype> NV_INLINE bool NvFlags<enumtype, storagetype>::operator==(enumtype e) const { return mBits == static_cast<storagetype>(e); } template <typename enumtype, typename storagetype> NV_INLINE bool NvFlags<enumtype, storagetype>::operator==(const NvFlags<enumtype, storagetype>& f) const { return mBits == f.mBits; } template <typename enumtype, typename storagetype> NV_INLINE bool NvFlags<enumtype, storagetype>::operator==(bool b) const { return bool(*this) == b; } template <typename enumtype, typename storagetype> NV_INLINE bool NvFlags<enumtype, storagetype>::operator!=(enumtype e) const { return mBits != static_cast<storagetype>(e); } template <typename enumtype, typename storagetype> NV_INLINE bool NvFlags<enumtype, storagetype>::operator!=(const NvFlags<enumtype, storagetype>& f) const { return mBits != f.mBits; } template <typename enumtype, typename storagetype> NV_INLINE NvFlags<enumtype, storagetype>& NvFlags<enumtype, storagetype>::operator=(enumtype e) { mBits = static_cast<storagetype>(e); return *this; } template <typename enumtype, typename storagetype> NV_INLINE NvFlags<enumtype, storagetype>& NvFlags<enumtype, storagetype>::operator=(const NvFlags<enumtype, storagetype>& f) { mBits = f.mBits; return *this; } template <typename enumtype, typename storagetype> NV_INLINE NvFlags<enumtype, storagetype>& NvFlags<enumtype, storagetype>::operator|=(enumtype e) { mBits |= static_cast<storagetype>(e); return *this; } template <typename enumtype, typename storagetype> NV_INLINE NvFlags<enumtype, storagetype>& NvFlags<enumtype, storagetype>:: operator|=(const NvFlags<enumtype, storagetype>& f) { mBits |= f.mBits; return *this; } template <typename enumtype, typename storagetype> NV_INLINE NvFlags<enumtype, storagetype> NvFlags<enumtype, storagetype>::operator|(enumtype e) const { NvFlags<enumtype, storagetype> out(*this); out |= e; return out; } template <typename enumtype, typename storagetype> NV_INLINE NvFlags<enumtype, storagetype> NvFlags<enumtype, storagetype>:: operator|(const NvFlags<enumtype, storagetype>& f) const { NvFlags<enumtype, storagetype> out(*this); out |= f; return out; } template <typename enumtype, typename storagetype> NV_INLINE NvFlags<enumtype, storagetype>& NvFlags<enumtype, storagetype>::operator&=(enumtype e) { mBits &= static_cast<storagetype>(e); return *this; } template <typename enumtype, typename storagetype> NV_INLINE NvFlags<enumtype, storagetype>& NvFlags<enumtype, storagetype>:: operator&=(const NvFlags<enumtype, storagetype>& f) { mBits &= f.mBits; return *this; } template <typename enumtype, typename storagetype> NV_INLINE NvFlags<enumtype, storagetype> NvFlags<enumtype, storagetype>::operator&(enumtype e) const { NvFlags<enumtype, storagetype> out = *this; out.mBits &= static_cast<storagetype>(e); return out; } template <typename enumtype, typename storagetype> NV_INLINE NvFlags<enumtype, storagetype> NvFlags<enumtype, storagetype>:: operator&(const NvFlags<enumtype, storagetype>& f) const { NvFlags<enumtype, storagetype> out = *this; out.mBits &= f.mBits; return out; } template <typename enumtype, typename storagetype> NV_INLINE NvFlags<enumtype, storagetype>& NvFlags<enumtype, storagetype>::operator^=(enumtype e) { mBits ^= static_cast<storagetype>(e); return *this; } template <typename enumtype, typename storagetype> NV_INLINE NvFlags<enumtype, storagetype>& NvFlags<enumtype, storagetype>:: operator^=(const NvFlags<enumtype, storagetype>& f) { mBits ^= f.mBits; return *this; } template <typename enumtype, typename storagetype> NV_INLINE NvFlags<enumtype, storagetype> NvFlags<enumtype, storagetype>::operator^(enumtype e) const { NvFlags<enumtype, storagetype> out = *this; out.mBits ^= static_cast<storagetype>(e); return out; } template <typename enumtype, typename storagetype> NV_INLINE NvFlags<enumtype, storagetype> NvFlags<enumtype, storagetype>:: operator^(const NvFlags<enumtype, storagetype>& f) const { NvFlags<enumtype, storagetype> out = *this; out.mBits ^= f.mBits; return out; } template <typename enumtype, typename storagetype> NV_INLINE NvFlags<enumtype, storagetype> NvFlags<enumtype, storagetype>::operator~(void) const { NvFlags<enumtype, storagetype> out; out.mBits = storagetype(~mBits); return out; } template <typename enumtype, typename storagetype> NV_INLINE NvFlags<enumtype, storagetype>::operator bool(void) const { return mBits ? true : false; } template <typename enumtype, typename storagetype> NV_INLINE NvFlags<enumtype, storagetype>::operator uint8_t(void) const { return static_cast<uint8_t>(mBits); } template <typename enumtype, typename storagetype> NV_INLINE NvFlags<enumtype, storagetype>::operator uint16_t(void) const { return static_cast<uint16_t>(mBits); } template <typename enumtype, typename storagetype> NV_INLINE NvFlags<enumtype, storagetype>::operator uint32_t(void) const { return static_cast<uint32_t>(mBits); } template <typename enumtype, typename storagetype> NV_INLINE void NvFlags<enumtype, storagetype>::clear(enumtype e) { mBits &= ~static_cast<storagetype>(e); } #if !NV_DOXYGEN } // namespace nvidia #endif /** @} */ #endif // #ifndef NV_NVFOUNDATION_NVFLAGS_H
NVIDIA-Omniverse/PhysX/blast/include/shared/NvFoundation/NvSimpleTypes.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NVFOUNDATION_NVSIMPLETYPES_H #define NV_NVFOUNDATION_NVSIMPLETYPES_H /** \addtogroup foundation @{ */ // Platform specific types: // Design note: Its OK to use int for general loop variables and temps. #include "NvPreprocessor.h" #if NV_VC #pragma warning(push) #pragma warning(disable : 4668) // suppressing warning generated by Microsoft Visual Studio when including this standard // header #endif #if NV_LINUX #define __STDC_LIMIT_MACROS #endif #include <stdint.h> #if NV_VC #pragma warning(pop) #endif // Type ranges // These are here because we sometimes have non-IEEE compliant platforms to deal with. // Removal is under consideration (issue GWSD-34) #define NV_MAX_F32 3.4028234663852885981170418348452e+38F // maximum possible float value #define NV_MAX_F64 DBL_MAX // maximum possible double value #define NV_EPS_F32 FLT_EPSILON // maximum relative error of float rounding #define NV_EPS_F64 DBL_EPSILON // maximum relative error of double rounding #define NV_MAX_REAL NV_MAX_F32 #define NV_EPS_REAL NV_EPS_F32 #define NV_NORMALIZATION_EPSILON float(1e-20f) /** @} */ #endif // #ifndef NV_NVFOUNDATION_NVSIMPLETYPES_H
NVIDIA-Omniverse/PhysX/blast/include/shared/NvFoundation/NvPreprocessor.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NVFOUNDATION_NVPREPROCESSOR_H #define NV_NVFOUNDATION_NVPREPROCESSOR_H #include <stddef.h> /** \addtogroup foundation @{ */ /* The following preprocessor identifiers specify compiler, OS, and architecture. All definitions have a value of 1 or 0, use '#if' instead of '#ifdef'. */ /** Compiler defines, see http://sourceforge.net/p/predef/wiki/Compilers/ */ #if defined(_MSC_VER) #if _MSC_VER >= 1900 #define NV_VC 14 #elif _MSC_VER >= 1800 #define NV_VC 12 #elif _MSC_VER >= 1700 #define NV_VC 11 #elif _MSC_VER >= 1600 #define NV_VC 10 #elif _MSC_VER >= 1500 #define NV_VC 9 #else #error "Unknown VC version" #endif #elif defined(__clang__) #define NV_CLANG 1 #elif defined(__SNC__) #define NV_SNC 1 #elif defined(__ghs__) #define NV_GHS 1 #elif defined(__GNUC__) // note: __clang__, __SNC__, or __ghs__ imply __GNUC__ #define NV_GCC 1 #else #error "Unknown compiler" #endif /** Operating system defines, see http://sourceforge.net/p/predef/wiki/OperatingSystems/ */ #if defined(WINAPI_FAMILY) && WINAPI_FAMILY == WINAPI_PARTITION_APP #define NV_WINRT 1 // Windows Runtime, either on Windows RT or Windows 8 #elif defined(XBOXONE) #define NV_XBOXONE 1 #elif defined(_WIN64) // note: XBOXONE implies _WIN64 #define NV_WIN64 1 #elif defined(_M_PPC) #define NV_X360 1 #elif defined(_WIN32) // note: _M_PPC implies _WIN32 #define NV_WIN32 1 #elif defined(__ANDROID__) #define NV_ANDROID 1 #elif defined(__linux__) // note: __ANDROID__ implies __linux__ #define NV_LINUX 1 #elif defined(__APPLE__) && (defined(__arm__) || defined(__arm64__)) #define NV_IOS 1 #elif defined(__APPLE__) #define NV_OSX 1 #elif defined(__CELLOS_LV2__) #define NV_PS3 1 #elif defined(__ORBIS__) #define NV_PS4 1 #elif defined(__SNC__) && defined(__arm__) #define NV_PSP2 1 #elif defined(__ghs__) #define NV_WIIU 1 #else #error "Unknown operating system" #endif /** Architecture defines, see http://sourceforge.net/p/predef/wiki/Architectures/ */ #if defined(__x86_64__) || defined(_M_X64) // ps4 compiler defines _M_X64 without value #define NV_X64 1 #elif defined(__i386__) || defined(_M_IX86) #define NV_X86 1 #elif defined(__arm64__) || defined(__aarch64__) #define NV_A64 1 #elif defined(__arm__) || defined(_M_ARM) #define NV_ARM 1 #elif defined(__SPU__) #define NV_SPU 1 #elif defined(__ppc__) || defined(_M_PPC) || defined(__CELLOS_LV2__) #define NV_PPC 1 #else #error "Unknown architecture" #endif /** SIMD defines */ #if defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) #define NV_SSE2 1 #endif #if defined(_M_ARM) || defined(__ARM_NEON__) #define NV_NEON 1 #endif #if defined(_M_PPC) || defined(__CELLOS_LV2__) #define NV_VMX 1 #endif /** define anything not defined on this platform to 0 */ #ifndef NV_VC #define NV_VC 0 #endif #ifndef NV_CLANG #define NV_CLANG 0 #endif #ifndef NV_SNC #define NV_SNC 0 #endif #ifndef NV_GHS #define NV_GHS 0 #endif #ifndef NV_GCC #define NV_GCC 0 #endif #ifndef NV_WINRT #define NV_WINRT 0 #endif #ifndef NV_XBOXONE #define NV_XBOXONE 0 #endif #ifndef NV_WIN64 #define NV_WIN64 0 #endif #ifndef NV_X360 #define NV_X360 0 #endif #ifndef NV_WIN32 #define NV_WIN32 0 #endif #ifndef NV_ANDROID #define NV_ANDROID 0 #endif #ifndef NV_LINUX #define NV_LINUX 0 #endif #ifndef NV_IOS #define NV_IOS 0 #endif #ifndef NV_OSX #define NV_OSX 0 #endif #ifndef NV_PS3 #define NV_PS3 0 #endif #ifndef NV_PS4 #define NV_PS4 0 #endif #ifndef NV_PSP2 #define NV_PSP2 0 #endif #ifndef NV_WIIU #define NV_WIIU 0 #endif #ifndef NV_X64 #define NV_X64 0 #endif #ifndef NV_X86 #define NV_X86 0 #endif #ifndef NV_A64 #define NV_A64 0 #endif #ifndef NV_ARM #define NV_ARM 0 #endif #ifndef NV_SPU #define NV_SPU 0 #endif #ifndef NV_PPC #define NV_PPC 0 #endif #ifndef NV_SSE2 #define NV_SSE2 0 #endif #ifndef NV_NEON #define NV_NEON 0 #endif #ifndef NV_VMX #define NV_VMX 0 #endif /* define anything not defined through the command line to 0 */ #ifndef NV_DEBUG #define NV_DEBUG 0 #endif #ifndef NV_CHECKED #define NV_CHECKED 0 #endif #ifndef NV_PROFILE #define NV_PROFILE 0 #endif #ifndef NV_NVTX #define NV_NVTX 0 #endif #ifndef NV_DOXYGEN #define NV_DOXYGEN 0 #endif /** family shortcuts */ // compiler #define NV_GCC_FAMILY (NV_CLANG || NV_SNC || NV_GHS || NV_GCC) // os #define NV_WINDOWS_FAMILY (NV_WINRT || NV_WIN32 || NV_WIN64) #define NV_MICROSOFT_FAMILY (NV_XBOXONE || NV_X360 || NV_WINDOWS_FAMILY) #define NV_LINUX_FAMILY (NV_LINUX || NV_ANDROID) #define NV_APPLE_FAMILY (NV_IOS || NV_OSX) // equivalent to #if __APPLE__ #define NV_UNIX_FAMILY (NV_LINUX_FAMILY || NV_APPLE_FAMILY) // shortcut for unix/posix platforms // architecture #define NV_INTEL_FAMILY (NV_X64 || NV_X86) #define NV_ARM_FAMILY (NV_ARM || NV_A64) #define NV_P64_FAMILY (NV_X64 || NV_A64) // shortcut for 64-bit architectures // shortcut for PS3 PPU #define NV_PPU (NV_PS3&& NV_PPC) /** Assert macro */ #ifndef NV_ENABLE_ASSERTS #if NV_DEBUG && !defined(__CUDACC__) #define NV_ENABLE_ASSERTS 1 #else #define NV_ENABLE_ASSERTS 0 #endif #endif /** DLL export macros */ #ifndef NV_C_EXPORT #if NV_WINDOWS_FAMILY || NV_LINUX #define NV_C_EXPORT extern "C" #else #define NV_C_EXPORT #endif #endif #if NV_UNIX_FAMILY&& __GNUC__ >= 4 #define NV_UNIX_EXPORT __attribute__((visibility("default"))) #else #define NV_UNIX_EXPORT #endif #if NV_WINDOWS_FAMILY #define NV_DLL_EXPORT __declspec(dllexport) #define NV_DLL_IMPORT __declspec(dllimport) #else #define NV_DLL_EXPORT NV_UNIX_EXPORT #define NV_DLL_IMPORT #endif /** Define API function declaration NV_FOUNDATION_DLL=1 - used by the DLL library (PhysXCommon) to export the API NV_FOUNDATION_DLL=0 - for windows configurations where the NV_FOUNDATION_API is linked through standard static linking no definition - this will allow DLLs and libraries to use the exported API from PhysXCommon */ #if NV_WINDOWS_FAMILY && !NV_ARM_FAMILY || NV_WINRT #ifndef NV_FOUNDATION_DLL #define NV_FOUNDATION_API NV_DLL_IMPORT #elif NV_FOUNDATION_DLL #define NV_FOUNDATION_API NV_DLL_EXPORT #endif #elif NV_UNIX_FAMILY #ifdef NV_FOUNDATION_DLL #define NV_FOUNDATION_API NV_UNIX_EXPORT #endif #endif #ifndef NV_FOUNDATION_API #define NV_FOUNDATION_API #endif /** Calling convention */ #ifndef NV_CALL_CONV #if NV_MICROSOFT_FAMILY #define NV_CALL_CONV __cdecl #else #define NV_CALL_CONV #endif #endif /** Pack macros - disabled on SPU because they are not supported */ #if NV_VC #define NV_PUSH_PACK_DEFAULT __pragma(pack(push, 8)) #define NV_POP_PACK __pragma(pack(pop)) #elif NV_GCC_FAMILY && !NV_SPU #define NV_PUSH_PACK_DEFAULT _Pragma("pack(push, 8)") #define NV_POP_PACK _Pragma("pack(pop)") #else #define NV_PUSH_PACK_DEFAULT #define NV_POP_PACK #endif /** Inline macro */ #define NV_INLINE inline #if NV_MICROSOFT_FAMILY #pragma inline_depth(255) #endif /** Force inline macro */ #if NV_VC #define NV_FORCE_INLINE __forceinline #elif NV_LINUX // Workaround; Fedora Core 3 do not agree with force inline and NvcPool #define NV_FORCE_INLINE inline #elif NV_GCC_FAMILY #define NV_FORCE_INLINE inline __attribute__((always_inline)) #else #define NV_FORCE_INLINE inline #endif /** Noinline macro */ #if NV_MICROSOFT_FAMILY #define NV_NOINLINE __declspec(noinline) #elif NV_GCC_FAMILY #define NV_NOINLINE __attribute__((noinline)) #else #define NV_NOINLINE #endif /** Restrict macro */ #if defined(__CUDACC__) #define NV_RESTRICT __restrict__ #else #define NV_RESTRICT __restrict #endif /** Noalias macro */ #if NV_MICROSOFT_FAMILY #define NV_NOALIAS __declspec(noalias) #else #define NV_NOALIAS #endif /** Alignment macros NV_ALIGN_PREFIX and NV_ALIGN_SUFFIX can be used for type alignment instead of aligning individual variables as follows: NV_ALIGN_PREFIX(16) struct A { ... } NV_ALIGN_SUFFIX(16); This declaration style is parsed correctly by Visual Assist. */ #ifndef NV_ALIGN #if NV_MICROSOFT_FAMILY #define NV_ALIGN(alignment, decl) __declspec(align(alignment)) decl #define NV_ALIGN_PREFIX(alignment) __declspec(align(alignment)) #define NV_ALIGN_SUFFIX(alignment) #elif NV_GCC_FAMILY #define NV_ALIGN(alignment, decl) decl __attribute__((aligned(alignment))) #define NV_ALIGN_PREFIX(alignment) #define NV_ALIGN_SUFFIX(alignment) __attribute__((aligned(alignment))) #else #define NV_ALIGN(alignment, decl) #define NV_ALIGN_PREFIX(alignment) #define NV_ALIGN_SUFFIX(alignment) #endif #endif /** Deprecated macro - To deprecate a function: Place NV_DEPRECATED at the start of the function header (leftmost word). - To deprecate a 'typedef', a 'struct' or a 'class': Place NV_DEPRECATED directly after the keywords ('typdef', 'struct', 'class'). Use these macro definitions to create warnings for deprecated functions #define NV_DEPRECATED __declspec(deprecated) // Microsoft #define NV_DEPRECATED __attribute__((deprecated())) // GCC */ #define NV_DEPRECATED /** General defines */ // static assert #if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)) || defined(__ORBIS__) #define NV_COMPILE_TIME_ASSERT(exp) typedef char NvCompileTimeAssert_Dummy[(exp) ? 1 : -1] __attribute__((unused)) #else #define NV_COMPILE_TIME_ASSERT(exp) typedef char NvCompileTimeAssert_Dummy[(exp) ? 1 : -1] #endif #if NV_GCC_FAMILY && !NV_SNC && !NV_GHS #define NV_OFFSET_OF(X, Y) __builtin_offsetof(X, Y) #else #define NV_OFFSET_OF(X, Y) offsetof(X, Y) #endif #define NV_OFFSETOF_BASE 0x100 // casting the null ptr takes a special-case code path, which we don't want #define NV_OFFSET_OF_RT(Class, Member) \ (reinterpret_cast<size_t>(&reinterpret_cast<Class*>(NV_OFFSETOF_BASE)->Member) - size_t(NV_OFFSETOF_BASE)) // check that exactly one of NDEBUG and _DEBUG is defined #if !defined(NDEBUG) ^ defined(_DEBUG) #error Exactly one of NDEBUG and _DEBUG needs to be defined! #endif // make sure NV_CHECKED is defined in all _DEBUG configurations as well #if !NV_CHECKED && NV_DEBUG #error NV_CHECKED must be defined when NV_DEBUG is defined #endif #ifdef __CUDACC__ #define NV_CUDA_CALLABLE __host__ __device__ #else #define NV_CUDA_CALLABLE #endif // avoid unreferenced parameter warning // preferred solution: omit the parameter's name from the declaration template <class T> NV_CUDA_CALLABLE NV_INLINE void NV_UNUSED(T const&) { } // Ensure that the application hasn't tweaked the pack value to less than 8, which would break // matching between the API headers and the binaries // This assert works on win32/win64/360/ps3, but may need further specialization on other platforms. // Some GCC compilers need the compiler flag -malign-double to be set. // Apparently the apple-clang-llvm compiler doesn't support malign-double. #if NV_PS4 || NV_APPLE_FAMILY struct NvPackValidation { char _; long a; }; #elif NV_ANDROID struct NvPackValidation { char _; double a; }; #else struct NvPackValidation { char _; long long a; }; #endif #if !NV_APPLE_FAMILY NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvPackValidation, a) == 8); #endif // use in a cpp file to suppress LNK4221 #if NV_VC #define NV_DUMMY_SYMBOL \ namespace \ { \ char NvDummySymbol; \ } #else #define NV_DUMMY_SYMBOL #endif #if NV_GCC_FAMILY && !NV_GHS #define NV_WEAK_SYMBOL __attribute__((weak)) // this is to support SIMD constant merging in template specialization #else #define NV_WEAK_SYMBOL #endif // Macro for avoiding default assignment and copy, because doing this by inheritance can increase class size on some // platforms. #define NV_NOCOPY(Class) \ \ protected: \ Class(const Class&); \ Class& operator=(const Class&); #define NV_STRINGIZE_HELPER(X) #X #define NV_STRINGIZE(X) NV_STRINGIZE_HELPER(X) #define NV_CONCAT_HELPER(X, Y) X##Y #define NV_CONCAT(X, Y) NV_CONCAT_HELPER(X, Y) // C-style API declaration. #define NV_C_API NV_C_EXPORT NV_DLL_EXPORT /** @} */ #endif // #ifndef NV_NVFOUNDATION_NVPREPROCESSOR_H
NVIDIA-Omniverse/PhysX/blast/include/shared/NvFoundation/NvErrors.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NVFOUNDATION_NVERRORS_H #define NV_NVFOUNDATION_NVERRORS_H /** \addtogroup foundation @{ */ #include "Nv.h" #if !NV_DOXYGEN namespace nvidia { #endif /** \brief Error codes These error codes are passed to #NvErrorCallback @see NvErrorCallback */ struct NvErrorCode { enum Enum { eNO_ERROR = 0, //! \brief An informational message. eDEBUG_INFO = 1, //! \brief a warning message for the user to help with debugging eDEBUG_WARNING = 2, //! \brief method called with invalid parameter(s) eINVALID_PARAMETER = 4, //! \brief method was called at a time when an operation is not possible eINVALID_OPERATION = 8, //! \brief method failed to allocate some memory eOUT_OF_MEMORY = 16, /** \brief The library failed for some reason. Possibly you have passed invalid values like NaNs, which are not checked for. */ eINTERNAL_ERROR = 32, //! \brief An unrecoverable error, execution should be halted and log output flushed eABORT = 64, //! \brief The SDK has determined that an operation may result in poor performance. ePERF_WARNING = 128, //! \brief A bit mask for including all errors eMASK_ALL = -1 }; }; #if !NV_DOXYGEN } // namespace nvidia #endif /** @} */ #endif // #ifndef NV_NVFOUNDATION_NVERRORS_H
NVIDIA-Omniverse/PhysX/blast/include/shared/NvFoundation/NvIntrinsics.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NVFOUNDATION_NVINTRINSICS_H #define NV_NVFOUNDATION_NVINTRINSICS_H #include "NvPreprocessor.h" #if NV_WINDOWS_FAMILY #include "platform/windows/NvWindowsIntrinsics.h" #elif NV_X360 #include "xbox360/NvXbox360Intrinsics.h" #elif(NV_LINUX || NV_ANDROID || NV_APPLE_FAMILY || NV_PS4) #include "platform/unix/NvUnixIntrinsics.h" #elif NV_PS3 #include "ps3/NvPS3Intrinsics.h" #elif NV_PSP2 #include "psp2/NvPSP2Intrinsics.h" #elif NV_WIIU #include "wiiu/NvWiiUIntrinsics.h" #elif NV_XBOXONE #include "XboxOne/NvXboxOneIntrinsics.h" #else #error "Platform not supported!" #endif #endif // #ifndef NV_NVFOUNDATION_NVINTRINSICS_H
NVIDIA-Omniverse/PhysX/blast/include/shared/NvFoundation/NvBounds3.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NVFOUNDATION_NVBOUNDS3_H #define NV_NVFOUNDATION_NVBOUNDS3_H /** \addtogroup foundation @{ */ #include "NvTransform.h" #include "NvMat33.h" #if !NV_DOXYGEN namespace nvidia { #endif // maximum extents defined such that floating point exceptions are avoided for standard use cases #define NV_MAX_BOUNDS_EXTENTS (NV_MAX_REAL * 0.25f) /** \brief Class representing 3D range or axis aligned bounding box. Stored as minimum and maximum extent corners. Alternate representation would be center and dimensions. May be empty or nonempty. For nonempty bounds, minimum <= maximum has to hold for all axes. Empty bounds have to be represented as minimum = NV_MAX_BOUNDS_EXTENTS and maximum = -NV_MAX_BOUNDS_EXTENTS for all axes. All other representations are invalid and the behavior is undefined. */ class NvBounds3 { public: /** \brief Default constructor, not performing any initialization for performance reason. \remark Use empty() function below to construct empty bounds. */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvBounds3() { } /** \brief Construct from two bounding points */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvBounds3(const NvVec3& minimum, const NvVec3& maximum); /** \brief Return empty bounds. */ static NV_CUDA_CALLABLE NV_FORCE_INLINE NvBounds3 empty(); /** \brief returns the AABB containing v0 and v1. \param v0 first point included in the AABB. \param v1 second point included in the AABB. */ static NV_CUDA_CALLABLE NV_FORCE_INLINE NvBounds3 boundsOfPoints(const NvVec3& v0, const NvVec3& v1); /** \brief returns the AABB from center and extents vectors. \param center Center vector \param extent Extents vector */ static NV_CUDA_CALLABLE NV_FORCE_INLINE NvBounds3 centerExtents(const NvVec3& center, const NvVec3& extent); /** \brief Construct from center, extent, and (not necessarily orthogonal) basis */ static NV_CUDA_CALLABLE NV_INLINE NvBounds3 basisExtent(const NvVec3& center, const NvMat33& basis, const NvVec3& extent); /** \brief Construct from pose and extent */ static NV_CUDA_CALLABLE NV_INLINE NvBounds3 poseExtent(const NvTransform& pose, const NvVec3& extent); /** \brief gets the transformed bounds of the passed AABB (resulting in a bigger AABB). This version is safe to call for empty bounds. \param[in] matrix Transform to apply, can contain scaling as well \param[in] bounds The bounds to transform. */ static NV_CUDA_CALLABLE NV_INLINE NvBounds3 transformSafe(const NvMat33& matrix, const NvBounds3& bounds); /** \brief gets the transformed bounds of the passed AABB (resulting in a bigger AABB). Calling this method for empty bounds leads to undefined behavior. Use #transformSafe() instead. \param[in] matrix Transform to apply, can contain scaling as well \param[in] bounds The bounds to transform. */ static NV_CUDA_CALLABLE NV_INLINE NvBounds3 transformFast(const NvMat33& matrix, const NvBounds3& bounds); /** \brief gets the transformed bounds of the passed AABB (resulting in a bigger AABB). This version is safe to call for empty bounds. \param[in] transform Transform to apply, can contain scaling as well \param[in] bounds The bounds to transform. */ static NV_CUDA_CALLABLE NV_INLINE NvBounds3 transformSafe(const NvTransform& transform, const NvBounds3& bounds); /** \brief gets the transformed bounds of the passed AABB (resulting in a bigger AABB). Calling this method for empty bounds leads to undefined behavior. Use #transformSafe() instead. \param[in] transform Transform to apply, can contain scaling as well \param[in] bounds The bounds to transform. */ static NV_CUDA_CALLABLE NV_INLINE NvBounds3 transformFast(const NvTransform& transform, const NvBounds3& bounds); /** \brief Sets empty to true */ NV_CUDA_CALLABLE NV_FORCE_INLINE void setEmpty(); /** \brief Sets the bounds to maximum size [-NV_MAX_BOUNDS_EXTENTS, NV_MAX_BOUNDS_EXTENTS]. */ NV_CUDA_CALLABLE NV_FORCE_INLINE void setMaximal(); /** \brief expands the volume to include v \param v Point to expand to. */ NV_CUDA_CALLABLE NV_FORCE_INLINE void include(const NvVec3& v); /** \brief expands the volume to include b. \param b Bounds to perform union with. */ NV_CUDA_CALLABLE NV_FORCE_INLINE void include(const NvBounds3& b); NV_CUDA_CALLABLE NV_FORCE_INLINE bool isEmpty() const; /** \brief indicates whether the intersection of this and b is empty or not. \param b Bounds to test for intersection. */ NV_CUDA_CALLABLE NV_FORCE_INLINE bool intersects(const NvBounds3& b) const; /** \brief computes the 1D-intersection between two AABBs, on a given axis. \param a the other AABB \param axis the axis (0, 1, 2) */ NV_CUDA_CALLABLE NV_FORCE_INLINE bool intersects1D(const NvBounds3& a, uint32_t axis) const; /** \brief indicates if these bounds contain v. \param v Point to test against bounds. */ NV_CUDA_CALLABLE NV_FORCE_INLINE bool contains(const NvVec3& v) const; /** \brief checks a box is inside another box. \param box the other AABB */ NV_CUDA_CALLABLE NV_FORCE_INLINE bool isInside(const NvBounds3& box) const; /** \brief returns the center of this axis aligned box. */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 getCenter() const; /** \brief get component of the box's center along a given axis */ NV_CUDA_CALLABLE NV_FORCE_INLINE float getCenter(uint32_t axis) const; /** \brief get component of the box's extents along a given axis */ NV_CUDA_CALLABLE NV_FORCE_INLINE float getExtents(uint32_t axis) const; /** \brief returns the dimensions (width/height/depth) of this axis aligned box. */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 getDimensions() const; /** \brief returns the extents, which are half of the width/height/depth. */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 getExtents() const; /** \brief scales the AABB. This version is safe to call for empty bounds. \param scale Factor to scale AABB by. */ NV_CUDA_CALLABLE NV_FORCE_INLINE void scaleSafe(float scale); /** \brief scales the AABB. Calling this method for empty bounds leads to undefined behavior. Use #scaleSafe() instead. \param scale Factor to scale AABB by. */ NV_CUDA_CALLABLE NV_FORCE_INLINE void scaleFast(float scale); /** fattens the AABB in all 3 dimensions by the given distance. This version is safe to call for empty bounds. */ NV_CUDA_CALLABLE NV_FORCE_INLINE void fattenSafe(float distance); /** fattens the AABB in all 3 dimensions by the given distance. Calling this method for empty bounds leads to undefined behavior. Use #fattenSafe() instead. */ NV_CUDA_CALLABLE NV_FORCE_INLINE void fattenFast(float distance); /** checks that the AABB values are not NaN */ NV_CUDA_CALLABLE NV_FORCE_INLINE bool isFinite() const; /** checks that the AABB values describe a valid configuration. */ NV_CUDA_CALLABLE NV_FORCE_INLINE bool isValid() const; NvVec3 minimum, maximum; }; NV_CUDA_CALLABLE NV_FORCE_INLINE NvBounds3::NvBounds3(const NvVec3& minimum_, const NvVec3& maximum_) : minimum(minimum_), maximum(maximum_) { } NV_CUDA_CALLABLE NV_FORCE_INLINE NvBounds3 NvBounds3::empty() { return NvBounds3(NvVec3(NV_MAX_BOUNDS_EXTENTS), NvVec3(-NV_MAX_BOUNDS_EXTENTS)); } NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvBounds3::isFinite() const { return minimum.isFinite() && maximum.isFinite(); } NV_CUDA_CALLABLE NV_FORCE_INLINE NvBounds3 NvBounds3::boundsOfPoints(const NvVec3& v0, const NvVec3& v1) { return NvBounds3(v0.minimum(v1), v0.maximum(v1)); } NV_CUDA_CALLABLE NV_FORCE_INLINE NvBounds3 NvBounds3::centerExtents(const NvVec3& center, const NvVec3& extent) { return NvBounds3(center - extent, center + extent); } NV_CUDA_CALLABLE NV_INLINE NvBounds3 NvBounds3::basisExtent(const NvVec3& center, const NvMat33& basis, const NvVec3& extent) { // extended basis vectors NvVec3 c0 = basis.column0 * extent.x; NvVec3 c1 = basis.column1 * extent.y; NvVec3 c2 = basis.column2 * extent.z; NvVec3 w; // find combination of base vectors that produces max. distance for each component = sum of abs() w.x = NvAbs(c0.x) + NvAbs(c1.x) + NvAbs(c2.x); w.y = NvAbs(c0.y) + NvAbs(c1.y) + NvAbs(c2.y); w.z = NvAbs(c0.z) + NvAbs(c1.z) + NvAbs(c2.z); return NvBounds3(center - w, center + w); } NV_CUDA_CALLABLE NV_INLINE NvBounds3 NvBounds3::poseExtent(const NvTransform& pose, const NvVec3& extent) { return basisExtent(pose.p, NvMat33(pose.q), extent); } NV_CUDA_CALLABLE NV_FORCE_INLINE void NvBounds3::setEmpty() { minimum = NvVec3(NV_MAX_BOUNDS_EXTENTS); maximum = NvVec3(-NV_MAX_BOUNDS_EXTENTS); } NV_CUDA_CALLABLE NV_FORCE_INLINE void NvBounds3::setMaximal() { minimum = NvVec3(-NV_MAX_BOUNDS_EXTENTS); maximum = NvVec3(NV_MAX_BOUNDS_EXTENTS); } NV_CUDA_CALLABLE NV_FORCE_INLINE void NvBounds3::include(const NvVec3& v) { NV_ASSERT(isValid()); minimum = minimum.minimum(v); maximum = maximum.maximum(v); } NV_CUDA_CALLABLE NV_FORCE_INLINE void NvBounds3::include(const NvBounds3& b) { NV_ASSERT(isValid()); minimum = minimum.minimum(b.minimum); maximum = maximum.maximum(b.maximum); } NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvBounds3::isEmpty() const { NV_ASSERT(isValid()); return minimum.x > maximum.x; } NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvBounds3::intersects(const NvBounds3& b) const { NV_ASSERT(isValid() && b.isValid()); return !(b.minimum.x > maximum.x || minimum.x > b.maximum.x || b.minimum.y > maximum.y || minimum.y > b.maximum.y || b.minimum.z > maximum.z || minimum.z > b.maximum.z); } NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvBounds3::intersects1D(const NvBounds3& a, uint32_t axis) const { NV_ASSERT(isValid() && a.isValid()); return maximum[axis] >= a.minimum[axis] && a.maximum[axis] >= minimum[axis]; } NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvBounds3::contains(const NvVec3& v) const { NV_ASSERT(isValid()); return !(v.x < minimum.x || v.x > maximum.x || v.y < minimum.y || v.y > maximum.y || v.z < minimum.z || v.z > maximum.z); } NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvBounds3::isInside(const NvBounds3& box) const { NV_ASSERT(isValid() && box.isValid()); if(box.minimum.x > minimum.x) return false; if(box.minimum.y > minimum.y) return false; if(box.minimum.z > minimum.z) return false; if(box.maximum.x < maximum.x) return false; if(box.maximum.y < maximum.y) return false; if(box.maximum.z < maximum.z) return false; return true; } NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 NvBounds3::getCenter() const { NV_ASSERT(isValid()); return (minimum + maximum) * 0.5f; } NV_CUDA_CALLABLE NV_FORCE_INLINE float NvBounds3::getCenter(uint32_t axis) const { NV_ASSERT(isValid()); return (minimum[axis] + maximum[axis]) * 0.5f; } NV_CUDA_CALLABLE NV_FORCE_INLINE float NvBounds3::getExtents(uint32_t axis) const { NV_ASSERT(isValid()); return (maximum[axis] - minimum[axis]) * 0.5f; } NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 NvBounds3::getDimensions() const { NV_ASSERT(isValid()); return maximum - minimum; } NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 NvBounds3::getExtents() const { NV_ASSERT(isValid()); return getDimensions() * 0.5f; } NV_CUDA_CALLABLE NV_FORCE_INLINE void NvBounds3::scaleSafe(float scale) { NV_ASSERT(isValid()); if(!isEmpty()) scaleFast(scale); } NV_CUDA_CALLABLE NV_FORCE_INLINE void NvBounds3::scaleFast(float scale) { NV_ASSERT(isValid()); *this = centerExtents(getCenter(), getExtents() * scale); } NV_CUDA_CALLABLE NV_FORCE_INLINE void NvBounds3::fattenSafe(float distance) { NV_ASSERT(isValid()); if(!isEmpty()) fattenFast(distance); } NV_CUDA_CALLABLE NV_FORCE_INLINE void NvBounds3::fattenFast(float distance) { NV_ASSERT(isValid()); minimum.x -= distance; minimum.y -= distance; minimum.z -= distance; maximum.x += distance; maximum.y += distance; maximum.z += distance; } NV_CUDA_CALLABLE NV_INLINE NvBounds3 NvBounds3::transformSafe(const NvMat33& matrix, const NvBounds3& bounds) { NV_ASSERT(bounds.isValid()); return !bounds.isEmpty() ? transformFast(matrix, bounds) : bounds; } NV_CUDA_CALLABLE NV_INLINE NvBounds3 NvBounds3::transformFast(const NvMat33& matrix, const NvBounds3& bounds) { NV_ASSERT(bounds.isValid()); return NvBounds3::basisExtent(matrix * bounds.getCenter(), matrix, bounds.getExtents()); } NV_CUDA_CALLABLE NV_INLINE NvBounds3 NvBounds3::transformSafe(const NvTransform& transform, const NvBounds3& bounds) { NV_ASSERT(bounds.isValid()); return !bounds.isEmpty() ? transformFast(transform, bounds) : bounds; } NV_CUDA_CALLABLE NV_INLINE NvBounds3 NvBounds3::transformFast(const NvTransform& transform, const NvBounds3& bounds) { NV_ASSERT(bounds.isValid()); return NvBounds3::basisExtent(transform.transform(bounds.getCenter()), NvMat33(transform.q), bounds.getExtents()); } NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvBounds3::isValid() const { return (isFinite() && (((minimum.x <= maximum.x) && (minimum.y <= maximum.y) && (minimum.z <= maximum.z)) || ((minimum.x == NV_MAX_BOUNDS_EXTENTS) && (minimum.y == NV_MAX_BOUNDS_EXTENTS) && (minimum.z == NV_MAX_BOUNDS_EXTENTS) && (maximum.x == -NV_MAX_BOUNDS_EXTENTS) && (maximum.y == -NV_MAX_BOUNDS_EXTENTS) && (maximum.z == -NV_MAX_BOUNDS_EXTENTS)))); } #if !NV_DOXYGEN } // namespace nvidia #endif /** @} */ #endif // #ifndef NV_NVFOUNDATION_NVBOUNDS3_H
NVIDIA-Omniverse/PhysX/blast/include/shared/NvFoundation/NvTransform.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NVFOUNDATION_NVTRANSFORM_H #define NV_NVFOUNDATION_NVTRANSFORM_H /** \addtogroup foundation @{ */ #include "NvQuat.h" #include "NvPlane.h" #if !NV_DOXYGEN namespace nvidia { #endif /*! \brief class representing a rigid euclidean transform as a quaternion and a vector */ class NvTransform { public: NvQuat q; NvVec3 p; NV_CUDA_CALLABLE NV_FORCE_INLINE NvTransform() { } NV_CUDA_CALLABLE NV_FORCE_INLINE explicit NvTransform(const NvVec3& position) : q(NvIdentity), p(position) { } NV_CUDA_CALLABLE NV_FORCE_INLINE explicit NvTransform(NvIDENTITY r) : q(NvIdentity), p(NvZero) { NV_UNUSED(r); } NV_CUDA_CALLABLE NV_FORCE_INLINE explicit NvTransform(const NvQuat& orientation) : q(orientation), p(0) { NV_ASSERT(orientation.isSane()); } NV_CUDA_CALLABLE NV_FORCE_INLINE NvTransform(float x, float y, float z, NvQuat aQ = NvQuat(NvIdentity)) : q(aQ), p(x, y, z) { } NV_CUDA_CALLABLE NV_FORCE_INLINE NvTransform(const NvVec3& p0, const NvQuat& q0) : q(q0), p(p0) { NV_ASSERT(q0.isSane()); } NV_CUDA_CALLABLE NV_FORCE_INLINE explicit NvTransform(const NvMat44& m); // defined in NvMat44.h /** \brief returns true if the two transforms are exactly equal */ NV_CUDA_CALLABLE NV_INLINE bool operator==(const NvTransform& t) const { return p == t.p && q == t.q; } NV_CUDA_CALLABLE NV_FORCE_INLINE NvTransform operator*(const NvTransform& x) const { NV_ASSERT(x.isSane()); return transform(x); } //! Equals matrix multiplication NV_CUDA_CALLABLE NV_INLINE NvTransform& operator*=(NvTransform& other) { *this = *this * other; return *this; } NV_CUDA_CALLABLE NV_FORCE_INLINE NvTransform getInverse() const { NV_ASSERT(isFinite()); return NvTransform(q.rotateInv(-p), q.getConjugate()); } NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 transform(const NvVec3& input) const { NV_ASSERT(isFinite()); return q.rotate(input) + p; } NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 transformInv(const NvVec3& input) const { NV_ASSERT(isFinite()); return q.rotateInv(input - p); } NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 rotate(const NvVec3& input) const { NV_ASSERT(isFinite()); return q.rotate(input); } NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 rotateInv(const NvVec3& input) const { NV_ASSERT(isFinite()); return q.rotateInv(input); } //! Transform transform to parent (returns compound transform: first src, then *this) NV_CUDA_CALLABLE NV_FORCE_INLINE NvTransform transform(const NvTransform& src) const { NV_ASSERT(src.isSane()); NV_ASSERT(isSane()); // src = [srct, srcr] -> [r*srct + t, r*srcr] return NvTransform(q.rotate(src.p) + p, q * src.q); } /** \brief returns true if finite and q is a unit quaternion */ NV_CUDA_CALLABLE bool isValid() const { return p.isFinite() && q.isFinite() && q.isUnit(); } /** \brief returns true if finite and quat magnitude is reasonably close to unit to allow for some accumulation of error vs isValid */ NV_CUDA_CALLABLE bool isSane() const { return isFinite() && q.isSane(); } /** \brief returns true if all elems are finite (not NAN or INF, etc.) */ NV_CUDA_CALLABLE NV_FORCE_INLINE bool isFinite() const { return p.isFinite() && q.isFinite(); } //! Transform transform from parent (returns compound transform: first src, then this->inverse) NV_CUDA_CALLABLE NV_FORCE_INLINE NvTransform transformInv(const NvTransform& src) const { NV_ASSERT(src.isSane()); NV_ASSERT(isFinite()); // src = [srct, srcr] -> [r^-1*(srct-t), r^-1*srcr] NvQuat qinv = q.getConjugate(); return NvTransform(qinv.rotate(src.p - p), qinv * src.q); } /** \brief transform plane */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvPlane transform(const NvPlane& plane) const { NvVec3 transformedNormal = rotate(plane.n); return NvPlane(transformedNormal, plane.d - p.dot(transformedNormal)); } /** \brief inverse-transform plane */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvPlane inverseTransform(const NvPlane& plane) const { NvVec3 transformedNormal = rotateInv(plane.n); return NvPlane(transformedNormal, plane.d + p.dot(plane.n)); } /** \brief return a normalized transform (i.e. one in which the quaternion has unit magnitude) */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvTransform getNormalized() const { return NvTransform(p, q.getNormalized()); } }; #if !NV_DOXYGEN } // namespace nvidia #endif /** @} */ #endif // #ifndef NV_NVFOUNDATION_NVTRANSFORM_H
NVIDIA-Omniverse/PhysX/blast/include/shared/NvFoundation/NvQuat.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NVFOUNDATION_NVQUAT_H #define NV_NVFOUNDATION_NVQUAT_H /** \addtogroup foundation @{ */ #include "NvVec3.h" #if !NV_DOXYGEN namespace nvidia { #endif /** \brief This is a quaternion class. For more information on quaternion mathematics consult a mathematics source on complex numbers. */ class NvQuat { public: /** \brief Default constructor, does not do any initialization. */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat() { } //! identity constructor NV_CUDA_CALLABLE NV_INLINE NvQuat(NvIDENTITY r) : x(0.0f), y(0.0f), z(0.0f), w(1.0f) { NV_UNUSED(r); } /** \brief Constructor from a scalar: sets the real part w to the scalar value, and the imaginary parts (x,y,z) to zero */ explicit NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat(float r) : x(0.0f), y(0.0f), z(0.0f), w(r) { } /** \brief Constructor. Take note of the order of the elements! */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat(float nx, float ny, float nz, float nw) : x(nx), y(ny), z(nz), w(nw) { } /** \brief Creates from angle-axis representation. Axis must be normalized! Angle is in radians! <b>Unit:</b> Radians */ NV_CUDA_CALLABLE NV_INLINE NvQuat(float angleRadians, const NvVec3& unitAxis) { NV_ASSERT(NvAbs(1.0f - unitAxis.magnitude()) < 1e-3f); const float a = angleRadians * 0.5f; const float s = NvSin(a); w = NvCos(a); x = unitAxis.x * s; y = unitAxis.y * s; z = unitAxis.z * s; } /** \brief Copy ctor. */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat(const NvQuat& v) : x(v.x), y(v.y), z(v.z), w(v.w) { } /** \brief Creates from orientation matrix. \param[in] m Rotation matrix to extract quaternion from. */ NV_CUDA_CALLABLE NV_INLINE explicit NvQuat(const NvMat33& m); /* defined in NvMat33.h */ /** \brief returns true if quat is identity */ NV_CUDA_CALLABLE NV_FORCE_INLINE bool isIdentity() const { return x==0.0f && y==0.0f && z==0.0f && w==1.0f; } /** \brief returns true if all elements are finite (not NAN or INF, etc.) */ NV_CUDA_CALLABLE bool isFinite() const { return NvIsFinite(x) && NvIsFinite(y) && NvIsFinite(z) && NvIsFinite(w); } /** \brief returns true if finite and magnitude is close to unit */ NV_CUDA_CALLABLE bool isUnit() const { const float unitTolerance = 1e-4f; return isFinite() && NvAbs(magnitude() - 1) < unitTolerance; } /** \brief returns true if finite and magnitude is reasonably close to unit to allow for some accumulation of error vs isValid */ NV_CUDA_CALLABLE bool isSane() const { const float unitTolerance = 1e-2f; return isFinite() && NvAbs(magnitude() - 1) < unitTolerance; } /** \brief returns true if the two quaternions are exactly equal */ NV_CUDA_CALLABLE NV_INLINE bool operator==(const NvQuat& q) const { return x == q.x && y == q.y && z == q.z && w == q.w; } /** \brief converts this quaternion to angle-axis representation */ NV_CUDA_CALLABLE NV_INLINE void toRadiansAndUnitAxis(float& angle, NvVec3& axis) const { const float quatEpsilon = 1.0e-8f; const float s2 = x * x + y * y + z * z; if(s2 < quatEpsilon * quatEpsilon) // can't extract a sensible axis { angle = 0.0f; axis = NvVec3(1.0f, 0.0f, 0.0f); } else { const float s = NvRecipSqrt(s2); axis = NvVec3(x, y, z) * s; angle = NvAbs(w) < quatEpsilon ? NvPi : NvAtan2(s2 * s, w) * 2.0f; } } /** \brief Gets the angle between this quat and the identity quaternion. <b>Unit:</b> Radians */ NV_CUDA_CALLABLE NV_INLINE float getAngle() const { return NvAcos(w) * 2.0f; } /** \brief Gets the angle between this quat and the argument <b>Unit:</b> Radians */ NV_CUDA_CALLABLE NV_INLINE float getAngle(const NvQuat& q) const { return NvAcos(dot(q)) * 2.0f; } /** \brief This is the squared 4D vector length, should be 1 for unit quaternions. */ NV_CUDA_CALLABLE NV_FORCE_INLINE float magnitudeSquared() const { return x * x + y * y + z * z + w * w; } /** \brief returns the scalar product of this and other. */ NV_CUDA_CALLABLE NV_FORCE_INLINE float dot(const NvQuat& v) const { return x * v.x + y * v.y + z * v.z + w * v.w; } NV_CUDA_CALLABLE NV_INLINE NvQuat getNormalized() const { const float s = 1.0f / magnitude(); return NvQuat(x * s, y * s, z * s, w * s); } NV_CUDA_CALLABLE NV_INLINE float magnitude() const { return NvSqrt(magnitudeSquared()); } // modifiers: /** \brief maps to the closest unit quaternion. */ NV_CUDA_CALLABLE NV_INLINE float normalize() // convert this NvQuat to a unit quaternion { const float mag = magnitude(); if(mag != 0.0f) { const float imag = 1.0f / mag; x *= imag; y *= imag; z *= imag; w *= imag; } return mag; } /* \brief returns the conjugate. \note for unit quaternions, this is the inverse. */ NV_CUDA_CALLABLE NV_INLINE NvQuat getConjugate() const { return NvQuat(-x, -y, -z, w); } /* \brief returns imaginary part. */ NV_CUDA_CALLABLE NV_INLINE NvVec3 getImaginaryPart() const { return NvVec3(x, y, z); } /** brief computes rotation of x-axis */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 getBasisVector0() const { const float x2 = x * 2.0f; const float w2 = w * 2.0f; return NvVec3((w * w2) - 1.0f + x * x2, (z * w2) + y * x2, (-y * w2) + z * x2); } /** brief computes rotation of y-axis */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 getBasisVector1() const { const float y2 = y * 2.0f; const float w2 = w * 2.0f; return NvVec3((-z * w2) + x * y2, (w * w2) - 1.0f + y * y2, (x * w2) + z * y2); } /** brief computes rotation of z-axis */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 getBasisVector2() const { const float z2 = z * 2.0f; const float w2 = w * 2.0f; return NvVec3((y * w2) + x * z2, (-x * w2) + y * z2, (w * w2) - 1.0f + z * z2); } /** rotates passed vec by this (assumed unitary) */ NV_CUDA_CALLABLE NV_FORCE_INLINE const NvVec3 rotate(const NvVec3& v) const { const float vx = 2.0f * v.x; const float vy = 2.0f * v.y; const float vz = 2.0f * v.z; const float w2 = w * w - 0.5f; const float dot2 = (x * vx + y * vy + z * vz); return NvVec3((vx * w2 + (y * vz - z * vy) * w + x * dot2), (vy * w2 + (z * vx - x * vz) * w + y * dot2), (vz * w2 + (x * vy - y * vx) * w + z * dot2)); } /** inverse rotates passed vec by this (assumed unitary) */ NV_CUDA_CALLABLE NV_FORCE_INLINE const NvVec3 rotateInv(const NvVec3& v) const { const float vx = 2.0f * v.x; const float vy = 2.0f * v.y; const float vz = 2.0f * v.z; const float w2 = w * w - 0.5f; const float dot2 = (x * vx + y * vy + z * vz); return NvVec3((vx * w2 - (y * vz - z * vy) * w + x * dot2), (vy * w2 - (z * vx - x * vz) * w + y * dot2), (vz * w2 - (x * vy - y * vx) * w + z * dot2)); } /** \brief Assignment operator */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat& operator=(const NvQuat& p) { x = p.x; y = p.y; z = p.z; w = p.w; return *this; } NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat& operator*=(const NvQuat& q) { const float tx = w * q.x + q.w * x + y * q.z - q.y * z; const float ty = w * q.y + q.w * y + z * q.x - q.z * x; const float tz = w * q.z + q.w * z + x * q.y - q.x * y; w = w * q.w - q.x * x - y * q.y - q.z * z; x = tx; y = ty; z = tz; return *this; } NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat& operator+=(const NvQuat& q) { x += q.x; y += q.y; z += q.z; w += q.w; return *this; } NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat& operator-=(const NvQuat& q) { x -= q.x; y -= q.y; z -= q.z; w -= q.w; return *this; } NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat& operator*=(const float s) { x *= s; y *= s; z *= s; w *= s; return *this; } /** quaternion multiplication */ NV_CUDA_CALLABLE NV_INLINE NvQuat operator*(const NvQuat& q) const { return NvQuat(w * q.x + q.w * x + y * q.z - q.y * z, w * q.y + q.w * y + z * q.x - q.z * x, w * q.z + q.w * z + x * q.y - q.x * y, w * q.w - x * q.x - y * q.y - z * q.z); } /** quaternion addition */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat operator+(const NvQuat& q) const { return NvQuat(x + q.x, y + q.y, z + q.z, w + q.w); } /** quaternion subtraction */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat operator-() const { return NvQuat(-x, -y, -z, -w); } NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat operator-(const NvQuat& q) const { return NvQuat(x - q.x, y - q.y, z - q.z, w - q.w); } NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat operator*(float r) const { return NvQuat(x * r, y * r, z * r, w * r); } /** the quaternion elements */ float x, y, z, w; }; #if !NV_DOXYGEN } // namespace nvidia #endif /** @} */ #endif // #ifndef NV_NVFOUNDATION_NVQUAT_H
NVIDIA-Omniverse/PhysX/blast/include/shared/NvFoundation/NvVec4.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NVFOUNDATION_NVVEC4_H #define NV_NVFOUNDATION_NVVEC4_H /** \addtogroup foundation @{ */ #include "NvMath.h" #include "NvVec3.h" #include "NvAssert.h" /** \brief 4 Element vector class. This is a 4-dimensional vector class with public data members. */ #if !NV_DOXYGEN namespace nvidia { #endif class NvVec4 { public: /** \brief default constructor leaves data uninitialized. */ NV_CUDA_CALLABLE NV_INLINE NvVec4() { } /** \brief zero constructor. */ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec4(NvZERO r) : x(0.0f), y(0.0f), z(0.0f), w(0.0f) { NV_UNUSED(r); } /** \brief Assigns scalar parameter to all elements. Useful to initialize to zero or one. \param[in] a Value to assign to elements. */ explicit NV_CUDA_CALLABLE NV_INLINE NvVec4(float a) : x(a), y(a), z(a), w(a) { } /** \brief Initializes from 3 scalar parameters. \param[in] nx Value to initialize X component. \param[in] ny Value to initialize Y component. \param[in] nz Value to initialize Z component. \param[in] nw Value to initialize W component. */ NV_CUDA_CALLABLE NV_INLINE NvVec4(float nx, float ny, float nz, float nw) : x(nx), y(ny), z(nz), w(nw) { } /** \brief Initializes from 3 scalar parameters. \param[in] v Value to initialize the X, Y, and Z components. \param[in] nw Value to initialize W component. */ NV_CUDA_CALLABLE NV_INLINE NvVec4(const NvVec3& v, float nw) : x(v.x), y(v.y), z(v.z), w(nw) { } /** \brief Initializes from an array of scalar parameters. \param[in] v Value to initialize with. */ explicit NV_CUDA_CALLABLE NV_INLINE NvVec4(const float v[]) : x(v[0]), y(v[1]), z(v[2]), w(v[3]) { } /** \brief Copy ctor. */ NV_CUDA_CALLABLE NV_INLINE NvVec4(const NvVec4& v) : x(v.x), y(v.y), z(v.z), w(v.w) { } // Operators /** \brief Assignment operator */ NV_CUDA_CALLABLE NV_INLINE NvVec4& operator=(const NvVec4& p) { x = p.x; y = p.y; z = p.z; w = p.w; return *this; } /** \brief element access */ NV_DEPRECATED NV_CUDA_CALLABLE NV_INLINE float& operator[](unsigned int index) { NV_ASSERT(index <= 3); return reinterpret_cast<float*>(this)[index]; } /** \brief element access */ NV_DEPRECATED NV_CUDA_CALLABLE NV_INLINE const float& operator[](unsigned int index) const { NV_ASSERT(index <= 3); return reinterpret_cast<const float*>(this)[index]; } /** \brief returns true if the two vectors are exactly equal. */ NV_CUDA_CALLABLE NV_INLINE bool operator==(const NvVec4& v) const { return x == v.x && y == v.y && z == v.z && w == v.w; } /** \brief returns true if the two vectors are not exactly equal. */ NV_CUDA_CALLABLE NV_INLINE bool operator!=(const NvVec4& v) const { return x != v.x || y != v.y || z != v.z || w != v.w; } /** \brief tests for exact zero vector */ NV_CUDA_CALLABLE NV_INLINE bool isZero() const { return x == 0 && y == 0 && z == 0 && w == 0; } /** \brief returns true if all 3 elems of the vector are finite (not NAN or INF, etc.) */ NV_CUDA_CALLABLE NV_INLINE bool isFinite() const { return NvIsFinite(x) && NvIsFinite(y) && NvIsFinite(z) && NvIsFinite(w); } /** \brief is normalized - used by API parameter validation */ NV_CUDA_CALLABLE NV_INLINE bool isNormalized() const { const float unitTolerance = 1e-4f; return isFinite() && NvAbs(magnitude() - 1) < unitTolerance; } /** \brief returns the squared magnitude Avoids calling NvSqrt()! */ NV_CUDA_CALLABLE NV_INLINE float magnitudeSquared() const { return x * x + y * y + z * z + w * w; } /** \brief returns the magnitude */ NV_CUDA_CALLABLE NV_INLINE float magnitude() const { return NvSqrt(magnitudeSquared()); } /** \brief negation */ NV_CUDA_CALLABLE NV_INLINE NvVec4 operator-() const { return NvVec4(-x, -y, -z, -w); } /** \brief vector addition */ NV_CUDA_CALLABLE NV_INLINE NvVec4 operator+(const NvVec4& v) const { return NvVec4(x + v.x, y + v.y, z + v.z, w + v.w); } /** \brief vector difference */ NV_CUDA_CALLABLE NV_INLINE NvVec4 operator-(const NvVec4& v) const { return NvVec4(x - v.x, y - v.y, z - v.z, w - v.w); } /** \brief scalar post-multiplication */ NV_CUDA_CALLABLE NV_INLINE NvVec4 operator*(float f) const { return NvVec4(x * f, y * f, z * f, w * f); } /** \brief scalar division */ NV_CUDA_CALLABLE NV_INLINE NvVec4 operator/(float f) const { f = 1.0f / f; return NvVec4(x * f, y * f, z * f, w * f); } /** \brief vector addition */ NV_CUDA_CALLABLE NV_INLINE NvVec4& operator+=(const NvVec4& v) { x += v.x; y += v.y; z += v.z; w += v.w; return *this; } /** \brief vector difference */ NV_CUDA_CALLABLE NV_INLINE NvVec4& operator-=(const NvVec4& v) { x -= v.x; y -= v.y; z -= v.z; w -= v.w; return *this; } /** \brief scalar multiplication */ NV_CUDA_CALLABLE NV_INLINE NvVec4& operator*=(float f) { x *= f; y *= f; z *= f; w *= f; return *this; } /** \brief scalar division */ NV_CUDA_CALLABLE NV_INLINE NvVec4& operator/=(float f) { f = 1.0f / f; x *= f; y *= f; z *= f; w *= f; return *this; } /** \brief returns the scalar product of this and other. */ NV_CUDA_CALLABLE NV_INLINE float dot(const NvVec4& v) const { return x * v.x + y * v.y + z * v.z + w * v.w; } /** return a unit vector */ NV_CUDA_CALLABLE NV_INLINE NvVec4 getNormalized() const { float m = magnitudeSquared(); return m > 0.0f ? *this * NvRecipSqrt(m) : NvVec4(0, 0, 0, 0); } /** \brief normalizes the vector in place */ NV_CUDA_CALLABLE NV_INLINE float normalize() { float m = magnitude(); if(m > 0.0f) *this /= m; return m; } /** \brief a[i] * b[i], for all i. */ NV_CUDA_CALLABLE NV_INLINE NvVec4 multiply(const NvVec4& a) const { return NvVec4(x * a.x, y * a.y, z * a.z, w * a.w); } /** \brief element-wise minimum */ NV_CUDA_CALLABLE NV_INLINE NvVec4 minimum(const NvVec4& v) const { return NvVec4(NvMin(x, v.x), NvMin(y, v.y), NvMin(z, v.z), NvMin(w, v.w)); } /** \brief element-wise maximum */ NV_CUDA_CALLABLE NV_INLINE NvVec4 maximum(const NvVec4& v) const { return NvVec4(NvMax(x, v.x), NvMax(y, v.y), NvMax(z, v.z), NvMax(w, v.w)); } NV_CUDA_CALLABLE NV_INLINE NvVec3 getXYZ() const { return NvVec3(x, y, z); } /** \brief set vector elements to zero */ NV_CUDA_CALLABLE NV_INLINE void setZero() { x = y = z = w = 0.0f; } float x, y, z, w; }; NV_CUDA_CALLABLE static NV_INLINE NvVec4 operator*(float f, const NvVec4& v) { return NvVec4(f * v.x, f * v.y, f * v.z, f * v.w); } #if !NV_DOXYGEN } // namespace nvidia #endif /** @} */ #endif // #ifndef NV_NVFOUNDATION_NVVEC4_H
NVIDIA-Omniverse/PhysX/blast/include/shared/NvFoundation/platform/unix/NvUnixIntrinsics.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_UNIX_NVUNIXINTRINSICS_H #define NV_UNIX_NVUNIXINTRINSICS_H #include "Nv.h" #include "NvAssert.h" #if !(NV_LINUX || NV_ANDROID || NV_PS4 || NV_APPLE_FAMILY) #error "This file should only be included by Unix builds!!" #endif #include <math.h> #include <float.h> namespace nvidia { namespace intrinsics { //! \brief platform-specific absolute value NV_CUDA_CALLABLE NV_FORCE_INLINE float abs(float a) { return ::fabsf(a); } //! \brief platform-specific select float NV_CUDA_CALLABLE NV_FORCE_INLINE float fsel(float a, float b, float c) { return (a >= 0.0f) ? b : c; } //! \brief platform-specific sign NV_CUDA_CALLABLE NV_FORCE_INLINE float sign(float a) { return (a >= 0.0f) ? 1.0f : -1.0f; } //! \brief platform-specific reciprocal NV_CUDA_CALLABLE NV_FORCE_INLINE float recip(float a) { return 1.0f / a; } //! \brief platform-specific reciprocal estimate NV_CUDA_CALLABLE NV_FORCE_INLINE float recipFast(float a) { return 1.0f / a; } //! \brief platform-specific square root NV_CUDA_CALLABLE NV_FORCE_INLINE float sqrt(float a) { return ::sqrtf(a); } //! \brief platform-specific reciprocal square root NV_CUDA_CALLABLE NV_FORCE_INLINE float recipSqrt(float a) { return 1.0f / ::sqrtf(a); } NV_CUDA_CALLABLE NV_FORCE_INLINE float recipSqrtFast(float a) { return 1.0f / ::sqrtf(a); } //! \brief platform-specific sine NV_CUDA_CALLABLE NV_FORCE_INLINE float sin(float a) { return ::sinf(a); } //! \brief platform-specific cosine NV_CUDA_CALLABLE NV_FORCE_INLINE float cos(float a) { return ::cosf(a); } //! \brief platform-specific minimum NV_CUDA_CALLABLE NV_FORCE_INLINE float selectMin(float a, float b) { return a < b ? a : b; } //! \brief platform-specific maximum NV_CUDA_CALLABLE NV_FORCE_INLINE float selectMax(float a, float b) { return a > b ? a : b; } //! \brief platform-specific finiteness check (not INF or NAN) NV_CUDA_CALLABLE NV_FORCE_INLINE bool isFinite(float a) { return !!isfinite(a); } //! \brief platform-specific finiteness check (not INF or NAN) NV_CUDA_CALLABLE NV_FORCE_INLINE bool isFinite(double a) { return !!isfinite(a); } /*! Sets \c count bytes starting at \c dst to zero. */ NV_FORCE_INLINE void* memZero(void* NV_RESTRICT dest, uint32_t count) { return memset(dest, 0, count); } /*! Sets \c count bytes starting at \c dst to \c c. */ NV_FORCE_INLINE void* memSet(void* NV_RESTRICT dest, int32_t c, uint32_t count) { return memset(dest, c, count); } /*! Copies \c count bytes from \c src to \c dst. User memMove if regions overlap. */ NV_FORCE_INLINE void* memCopy(void* NV_RESTRICT dest, const void* NV_RESTRICT src, uint32_t count) { return memcpy(dest, src, count); } /*! Copies \c count bytes from \c src to \c dst. Supports overlapping regions. */ NV_FORCE_INLINE void* memMove(void* NV_RESTRICT dest, const void* NV_RESTRICT src, uint32_t count) { return memmove(dest, src, count); } /*! Set 128B to zero starting at \c dst+offset. Must be aligned. */ NV_FORCE_INLINE void memZero128(void* NV_RESTRICT dest, uint32_t offset = 0) { NV_ASSERT(((size_t(dest) + offset) & 0x7f) == 0); memSet(reinterpret_cast<char * NV_RESTRICT>(dest) + offset, 0, 128); } } // namespace intrinsics } // namespace nvidia #endif // #ifndef NV_UNIX_NVUNIXINTRINSICS_H
NVIDIA-Omniverse/PhysX/blast/include/shared/NvFoundation/platform/windows/NvWindowsIntrinsics.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_WINDOWS_NVWINDOWSINTRINSICS_H #define NV_WINDOWS_NVWINDOWSINTRINSICS_H #include "Nv.h" #include "NvAssert.h" #if !NV_WINDOWS_FAMILY #error "This file should only be included by Windows or WIN8ARM builds!!" #endif #include <math.h> #include <float.h> #if !NV_DOXYGEN namespace nvidia { namespace intrinsics { #endif //! \brief platform-specific absolute value NV_CUDA_CALLABLE NV_FORCE_INLINE float abs(float a) { return ::fabsf(a); } //! \brief platform-specific select float NV_CUDA_CALLABLE NV_FORCE_INLINE float fsel(float a, float b, float c) { return (a >= 0.0f) ? b : c; } //! \brief platform-specific sign NV_CUDA_CALLABLE NV_FORCE_INLINE float sign(float a) { return (a >= 0.0f) ? 1.0f : -1.0f; } //! \brief platform-specific reciprocal NV_CUDA_CALLABLE NV_FORCE_INLINE float recip(float a) { return 1.0f / a; } //! \brief platform-specific reciprocal estimate NV_CUDA_CALLABLE NV_FORCE_INLINE float recipFast(float a) { return 1.0f / a; } //! \brief platform-specific square root NV_CUDA_CALLABLE NV_FORCE_INLINE float sqrt(float a) { return ::sqrtf(a); } //! \brief platform-specific reciprocal square root NV_CUDA_CALLABLE NV_FORCE_INLINE float recipSqrt(float a) { return 1.0f / ::sqrtf(a); } //! \brief platform-specific reciprocal square root estimate NV_CUDA_CALLABLE NV_FORCE_INLINE float recipSqrtFast(float a) { return 1.0f / ::sqrtf(a); } //! \brief platform-specific sine NV_CUDA_CALLABLE NV_FORCE_INLINE float sin(float a) { return ::sinf(a); } //! \brief platform-specific cosine NV_CUDA_CALLABLE NV_FORCE_INLINE float cos(float a) { return ::cosf(a); } //! \brief platform-specific minimum NV_CUDA_CALLABLE NV_FORCE_INLINE float selectMin(float a, float b) { return a < b ? a : b; } //! \brief platform-specific maximum NV_CUDA_CALLABLE NV_FORCE_INLINE float selectMax(float a, float b) { return a > b ? a : b; } //! \brief platform-specific finiteness check (not INF or NAN) NV_CUDA_CALLABLE NV_FORCE_INLINE bool isFinite(float a) { #ifdef __CUDACC__ return !!isfinite(a); #else return (0 == ((_FPCLASS_SNAN | _FPCLASS_QNAN | _FPCLASS_NINF | _FPCLASS_PINF) & _fpclass(a))); #endif } //! \brief platform-specific finiteness check (not INF or NAN) NV_CUDA_CALLABLE NV_FORCE_INLINE bool isFinite(double a) { #ifdef __CUDACC__ return !!isfinite(a); #else return (0 == ((_FPCLASS_SNAN | _FPCLASS_QNAN | _FPCLASS_NINF | _FPCLASS_PINF) & _fpclass(a))); #endif } /*! Sets \c count bytes starting at \c dst to zero. */ NV_FORCE_INLINE void* memZero(void* NV_RESTRICT dest, uint32_t count) { return memset(dest, 0, count); } /*! Sets \c count bytes starting at \c dst to \c c. */ NV_FORCE_INLINE void* memSet(void* NV_RESTRICT dest, int32_t c, uint32_t count) { return memset(dest, c, count); } /*! Copies \c count bytes from \c src to \c dst. User memMove if regions overlap. */ NV_FORCE_INLINE void* memCopy(void* NV_RESTRICT dest, const void* NV_RESTRICT src, uint32_t count) { return memcpy(dest, src, count); } /*! Copies \c count bytes from \c src to \c dst. Supports overlapping regions. */ NV_FORCE_INLINE void* memMove(void* NV_RESTRICT dest, const void* NV_RESTRICT src, uint32_t count) { return memmove(dest, src, count); } /*! Set 128B to zero starting at \c dst+offset. Must be aligned. */ NV_FORCE_INLINE void memZero128(void* NV_RESTRICT dest, uint32_t offset = 0) { NV_ASSERT(((size_t(dest) + offset) & 0x7f) == 0); memSet((char * NV_RESTRICT)dest + offset, 0, 128); } #if !NV_DOXYGEN } // namespace intrinsics } // namespace nvidia #endif #endif // #ifndef NV_WINDOWS_NVWINDOWSINTRINSICS_H
NVIDIA-Omniverse/PhysX/blast/include/lowlevel/NvBlast.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Defines the API for the low-level blast library. #ifndef NVBLAST_H #define NVBLAST_H #include "NvBlastTypes.h" /////////////////////////////////////////////////////////////////////////////// // NvBlastAsset functions /////////////////////////////////////////////////////////////////////////////// ///@{ /** Calculates the memory requirements for an asset based upon its descriptor. Use this function when building an asset with NvBlastCreateAsset. \param[in] desc Asset descriptor (see NvBlastAssetDesc). Used to calculate node count. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the memory size (in bytes) required for the asset, or zero if desc is invalid. */ NV_C_API size_t NvBlastGetAssetMemorySize(const NvBlastAssetDesc* desc, NvBlastLog logFn); /** Calculates the memory requirements for an asset based upon supplied sized data. Used primarily with serialization. \param[in] sizeData Alternate form where all size data is already known. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the memory size (in bytes) required for the asset, or zero if data is invalid. */ NV_C_API size_t NvBlastGetAssetMemorySizeFromSizeData(const NvBlastAssetMemSizeData& sizeData, NvBlastLog logFn); /** Returns the number of bytes of scratch memory that the user must supply to NvBlastCreateAsset, based upon the descriptor that will be passed into that function. \param[in] desc The asset descriptor that will be passed into NvBlastCreateAsset. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the number of bytes of scratch memory required for a call to NvBlastCreateAsset with that descriptor. */ NV_C_API size_t NvBlastGetRequiredScratchForCreateAsset(const NvBlastAssetDesc* desc, NvBlastLog logFn); /** Asset-building function. Constructs an NvBlastAsset in-place at the address given by the user. The address must point to a block of memory of at least the size given by NvBlastGetAssetMemorySize(desc, logFn), and must be 16-byte aligned. Support chunks (marked in the NvBlastChunkDesc struct) must provide full coverage over the asset. This means that from any leaf chunk to the root node, exactly one chunk must be support. If this condition is not met the function fails to create an asset. Any bonds described by NvBlastBondDesc descriptors that reference non-support chunks will be removed. Duplicate bonds will be removed as well (bonds that are between the same chunk pairs). Chunks in the asset should be arranged such that sibling chunks (chunks with the same parent) are contiguous. Chunks are also should be arranged such that leaf chunks (chunks with no children) are at the end of the chunk list. If chunks aren't arranged properly the function fails to create an asset. \param[in] mem Pointer to block of memory of at least the size given by NvBlastGetAssetMemorySize(desc, logFn). Must be 16-byte aligned. \param[in] desc Asset descriptor (see NvBlastAssetDesc). \param[in] scratch User-supplied scratch memory of size NvBlastGetRequiredScratchForCreateAsset(desc) bytes. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return pointer to new NvBlastAsset (will be the same address as mem), or NULL if unsuccessful. */ NV_C_API NvBlastAsset* NvBlastCreateAsset(void* mem, const NvBlastAssetDesc* desc, void* scratch, NvBlastLog logFn); /** Calculates the memory requirements for a family based upon an asset. Use this function when building a family with NvBlastAssetCreateFamily. \param[in] asset Asset used to build the family (see NvBlastAsset). \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the memory size (in bytes) required for the family, or zero if asset is invalid. */ NV_C_API size_t NvBlastAssetGetFamilyMemorySize(const NvBlastAsset* asset, NvBlastLog logFn); /** Calculates the memory requirements for a family based upon supplied sized data. Used primarily with serialization. \param[in] sizeData Alternate form where all size data is already known. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the memory size (in bytes) required for the family, or zero if data is invalid. */ NV_C_API size_t NvBlastAssetGetFamilyMemorySizeFromSizeData(const NvBlastAssetMemSizeData& sizeData, NvBlastLog logFn); /** Fill out the size data from the provided asset \param[in] asset Asset to pull the size data from (see NvBlastAsset). \return Filled out size data struct. */ NV_C_API NvBlastAssetMemSizeData NvBlastAssetMemSizeDataFromAsset(const NvBlastAsset* asset); /** Family-building function. Constructs an NvBlastFamily in-place at the address given by the user. The address must point to a block of memory of at least the size given by NvBlastAssetGetFamilyMemorySize(asset, logFn), and must be 16-byte aligned. \param[in] mem Pointer to block of memory of at least the size given by NvBlastAssetGetFamilyMemorySize(asset, logFn). Must be 16-byte aligned. \param[in] asset Asset to instance. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the family. */ NV_C_API NvBlastFamily* NvBlastAssetCreateFamily(void* mem, const NvBlastAsset* asset, NvBlastLog logFn); /** Family-building function. Constructs an NvBlastFamily in-place at the address given by the user. The address must point to a block of memory of at least the size given by NvBlastAssetGetFamilyMemorySize(sizeData, logFn), and must be 16-byte aligned. \param[in] mem Pointer to block of memory of at least the size given by NvBlastAssetGetFamilyMemorySize(asset, logFn). Must be 16-byte aligned. \param[in] sizeData Data used to init buffer sizes. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the family. */ NV_C_API NvBlastFamily* NvBlastAssetCreateFamilyFromSizeData(void* mem, const NvBlastAssetMemSizeData& sizeData, NvBlastLog logFn); /** Retrieve the asset ID. \param[in] asset The given asset. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the ID of the asset. */ NV_C_API NvBlastID NvBlastAssetGetID(const NvBlastAsset* asset, NvBlastLog logFn); /** Set an asset's ID \param[in] asset The given asset. \param[in] id A pointer to the id to copy into the asset. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return true iff the id is successfully set. */ NV_C_API bool NvBlastAssetSetID(NvBlastAsset* asset, const NvBlastID* id, NvBlastLog logFn); /** Retrieve the data format version for the given asset \param[in] asset The asset. Cannot be NULL. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the data format version (NvBlastAssetDataFormat). */ NV_C_API uint32_t NvBlastAssetGetFormatVersion(const NvBlastAsset* asset, NvBlastLog logFn); /** Retrieve the memory size (in bytes) of the given data asset \param[in] asset The asset. Cannot be NULL. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the memory size of the asset (in bytes). */ NV_C_API uint32_t NvBlastAssetGetSize(const NvBlastAsset* asset, NvBlastLog logFn); /** Get the number of chunks in the given asset. \param[in] asset The asset. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the number of chunks in the asset. */ NV_C_API uint32_t NvBlastAssetGetChunkCount(const NvBlastAsset* asset, NvBlastLog logFn); /** Get the number of support chunks in the given asset. This will equal the number of graph nodes in NvBlastSupportGraph::nodeCount returned by NvBlastAssetGetSupportGraph only if no extra "external" node was created. If such bonds were created, then an extra "external" graph node is added, and this function will return NvBlastSupportGraph::nodeCount - 1. \param[in] asset The asset. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the number of chunks in the asset. */ NV_C_API uint32_t NvBlastAssetGetSupportChunkCount(const NvBlastAsset* asset, NvBlastLog logFn); /** Get the number of leaf chunks in the given asset. \param[in] asset The asset. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the number of leaf chunks in the asset. */ NV_C_API uint32_t NvBlastAssetGetLeafChunkCount(const NvBlastAsset* asset, NvBlastLog logFn); /** Get the first subsupport chunk index in the given asset. Chunks are sorted such that subsupport chunks come last. This is the first subsupport chunk index. Equals to total chunk count if there are no subsupport chunks. \param[in] asset The asset. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the first subsupport chunk index in the asset. */ NV_C_API uint32_t NvBlastAssetGetFirstSubsupportChunkIndex(const NvBlastAsset* asset, NvBlastLog logFn); /** Get the number of bonds in the given asset. \param[in] asset The asset. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the number of bonds in the asset. */ NV_C_API uint32_t NvBlastAssetGetBondCount(const NvBlastAsset* asset, NvBlastLog logFn); /** Access the support graph for the given asset. \param[in] asset The asset. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return a struct of support graph for the given asset. */ NV_C_API const NvBlastSupportGraph NvBlastAssetGetSupportGraph(const NvBlastAsset* asset, NvBlastLog logFn); /** Access a map from chunk index to graph node index. Returned map is valid in the domain [0, NvBlastAssetGetChunkCount(asset, logFn)). Non-support chunks are mapped to the invalid index 0xFFFFFFFF. \param[in] asset The asset. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return an array of uint32_t values defining the map, of size NvBlastAssetGetChunkCount(asset, logFn). */ NV_C_API const uint32_t* NvBlastAssetGetChunkToGraphNodeMap(const NvBlastAsset* asset, NvBlastLog logFn); /** Access an array of chunks of the given asset. \param[in] asset The asset. \param[in] logFn User - supplied message function(see NvBlastLog definition).May be NULL. \return a pointer to an array of chunks of the asset. */ NV_C_API const NvBlastChunk* NvBlastAssetGetChunks(const NvBlastAsset* asset, NvBlastLog logFn); /** Access an array of bonds of the given asset. \param[in] asset The asset. \param[in] logFn User - supplied message function(see NvBlastLog definition).May be NULL. \return a pointer to an array of bonds of the asset. */ NV_C_API const NvBlastBond* NvBlastAssetGetBonds(const NvBlastAsset* asset, NvBlastLog logFn); /** A buffer size sufficient to serialize an actor instanced from a given asset. This function is faster than NvBlastActorGetSerializationSize, and can be used to create a reusable buffer for actor serialization. \param[in] asset The asset. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the required buffer size in bytes. */ NV_C_API uint32_t NvBlastAssetGetActorSerializationSizeUpperBound(const NvBlastAsset* asset, NvBlastLog logFn); ///@} End NvBlastAsset functions /////////////////////////////////////////////////////////////////////////////// // NvBlastAsset helper functions /////////////////////////////////////////////////////////////////////////////// ///@{ /** Function to ensure (check and update) support coverage of chunks. Support chunks (marked in the NvBlastChunkDesc struct) must provide full coverage over the asset. This means that from any leaf chunk to the root node, exactly one chunk must be support. If this condition is not met, the actual support chunks will be adjusted accordingly. Chunk order depends on support coverage, so this function should be called before chunk reordering. \param[in] chunkDescs Array of chunk descriptors of size chunkCount. It will be updated accordingly. \param[in] chunkCount The number of chunk descriptors. \param[in] scratch User-supplied scratch storage, must point to chunkCount valid bytes of memory. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return true iff coverage was already exact. */ NV_C_API bool NvBlastEnsureAssetExactSupportCoverage(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, void* scratch, NvBlastLog logFn); /** Build chunk reorder map. NvBlastCreateAsset function requires NvBlastChunkDesc array to be in correct oder: 1. Root chunks (chunks with invalid parent index) must be first in the asset's chunk list. 2. Chunks in the asset must be arranged such that sibling chunks (chunks with the same parent) are contiguous. 3. Chunks must be arranged such that upper-support chunks (support chunks and their parent chunks) go first in chunk list. This function builds chunk reorder map which can be used to order chunk descs. Reordering chunk's descriptors according to generated map places them in correct order for NvBlastCreateAsset to succeed. Iff chunks are already ordered correctly, function returns 'true' and identity chunk reorder map. Otherwise 'false' is returned. \param[out] chunkReorderMap User-supplied map of size chunkCount to fill. For every chunk index this array will contain new chunk position (index). \param[in] chunkDescs Array of chunk descriptors of size chunkCount. \param[in] chunkCount The number of chunk descriptors. \param[in] scratch User-supplied scratch storage, must point to 3 * chunkCount * sizeof(uint32_t) valid bytes of memory. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return true iff the chunks did not require reordering (chunkReorderMap is the identity map). */ NV_C_API bool NvBlastBuildAssetDescChunkReorderMap(uint32_t* chunkReorderMap, const NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, void* scratch, NvBlastLog logFn); /** Apply chunk reorder map. Function applies reorder map on NvBlastChunkDesc and NvBlastBondDesc arrays. It reorders chunks, replaces their 'parentChunkIndex' field with new indices. Bonds are kept in the same order, but their 'chunkIndices' field is updated with proper indices. @see NvBlastBuildAssetDescChunkReorderMap \param[out] reorderedChunkDescs User-supplied array of size chunkCount to fill with new reordered NvBlastChunkDesc's. \param[in] chunkDescs Array of chunk descriptors of size chunkCount. \param[in] chunkCount The number of chunk descriptors. \param[in] bondDescs Array of bond descriptors of size chunkCount. It will be updated accordingly. \param[in] bondCount The number of bond descriptors. \param[in] chunkReorderMap Chunk reorder map to use, must be of size chunkCount. \param[in] keepBondNormalChunkOrder If true, bond normals will be flipped if their chunk index order was reveresed by the reorder map. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. */ NV_C_API void NvBlastApplyAssetDescChunkReorderMap ( NvBlastChunkDesc* reorderedChunkDescs, const NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, NvBlastBondDesc* bondDescs, uint32_t bondCount, const uint32_t* chunkReorderMap, bool keepBondNormalChunkOrder, NvBlastLog logFn ); /** Apply chunk reorder map. Function applies reorder map on NvBlastChunkDesc and NvBlastBondDesc arrays. It reorders chunks, replaces their 'parentChunkIndex' field with new indices. Bonds are kept in the same order, but their 'chunkIndices' field is updated with proper indices. This overload of function reorders chunks in place. @see NvBlastBuildAssetDescChunkReorderMap \param[in] chunkDescs Array of chunk descriptors of size chunkCount. It will be updated accordingly. \param[in] chunkCount The number of chunk descriptors. \param[in] bondDescs Array of bond descriptors of size chunkCount. It will be updated accordingly. \param[in] bondCount The number of bond descriptors. \param[in] chunkReorderMap Chunk reorder map to use, must be of size chunkCount. \param[in] keepBondNormalChunkOrder If true, bond normals will be flipped if their chunk index order was reveresed by the reorder map. \param[in] scratch User-supplied scratch storage, must point to chunkCount * sizeof(NvBlastChunkDesc) valid bytes of memory. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. */ NV_C_API void NvBlastApplyAssetDescChunkReorderMapInPlace ( NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, NvBlastBondDesc* bondDescs, uint32_t bondCount, const uint32_t* chunkReorderMap, bool keepBondNormalChunkOrder, void* scratch, NvBlastLog logFn ); /** Build and apply chunk reorder map. Function basically calls NvBlastBuildAssetDescChunkReorderMap and NvBlastApplyAssetDescChunkReorderMap. Used for Convenience. \param[in] chunkDescs Array of chunk descriptors of size chunkCount. It will be updated accordingly. \param[in] chunkCount The number of chunk descriptors. \param[in] bondDescs Array of bond descriptors of size chunkCount. It will be updated accordingly. \param[in] bondCount The number of bond descriptors. \param[in] chunkReorderMap Chunk reorder map to fill, must be of size chunkCount. \param[in] keepBondNormalChunkOrder If true, bond normals will be flipped if their chunk index order was reveresed by the reorder map. \param[in] scratch User-supplied scratch storage, must point to chunkCount * sizeof(NvBlastChunkDesc) valid bytes of memory. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return true iff the chunks did not require reordering (chunkReorderMap is the identity map). */ NV_C_API bool NvBlastReorderAssetDescChunks ( NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, NvBlastBondDesc* bondDescs, uint32_t bondCount, uint32_t* chunkReorderMap, bool keepBondNormalChunkOrder, void* scratch, NvBlastLog logFn ); ///@} End NvBlastAsset helper functions /////////////////////////////////////////////////////////////////////////////// // NvBlastFamily functions /////////////////////////////////////////////////////////////////////////////// ///@{ /** Retrieve the data format version for the given family. \param[in] family The family. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the family format version. */ NV_C_API uint32_t NvBlastFamilyGetFormatVersion(const NvBlastFamily* family, NvBlastLog logFn); /** Retrieve the asset of the given family. \param[in] family The family. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return pointer to the asset associated with the family. */ NV_C_API const NvBlastAsset* NvBlastFamilyGetAsset(const NvBlastFamily* family, NvBlastLog logFn); /** Set asset to the family. It should be the same asset as the one family was created from (same ID). \param[in] family The family. \param[in] asset Asset to instance. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. */ NV_C_API void NvBlastFamilySetAsset(NvBlastFamily* family, const NvBlastAsset* asset, NvBlastLog logFn); /** Retrieve the size (in bytes) of the given family. \param[in] family The family. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the size of the family (in bytes). */ NV_C_API uint32_t NvBlastFamilyGetSize(const NvBlastFamily* family, NvBlastLog logFn); /** Retrieve the asset ID of the given family. \param[in] family The family. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the ID of the asset associated with the family. */ NV_C_API NvBlastID NvBlastFamilyGetAssetID(const NvBlastFamily* family, NvBlastLog logFn); /** Returns the number of bytes of scratch memory that the user must supply to NvBlastFamilyCreateFirstActor. \param[in] family The family from which the first actor will be instanced. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the number of bytes of scratch memory required for a call to NvBlastFamilyCreateFirstActor. */ NV_C_API size_t NvBlastFamilyGetRequiredScratchForCreateFirstActor(const NvBlastFamily* family, NvBlastLog logFn); /** Instance the family's asset into a new, unfractured actor. \param[in] family Family in which to create a new actor. The family must have no other actors in it. (See NvBlastAssetCreateFamily.) \param[in] desc Actor descriptor (see NvBlastActorDesc). \param[in] scratch User-supplied scratch memory of size NvBlastFamilyGetRequiredScratchForCreateFirstActor(asset) bytes, where 'asset' is the NvBlastAsset from which the family was created. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return pointer to new NvBlastActor if successful (the actor was successfully inserted into the family), or NULL if unsuccessful. */ NV_C_API NvBlastActor* NvBlastFamilyCreateFirstActor(NvBlastFamily* family, const NvBlastActorDesc* desc, void* scratch, NvBlastLog logFn); /** Retrieve the number of active actors associated with the given family. \param[in] family The family. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the number of active actors in the family. */ NV_C_API uint32_t NvBlastFamilyGetActorCount(const NvBlastFamily* family, NvBlastLog logFn); /** Deserialize a single Actor from a buffer into the given family. The actor will be inserted if it is compatible with the current family state. That is, it must not share any chunks or internal IDs with the actors already present in the family. \param[in] family Family in which to deserialize the actor. \param[in] buffer User-supplied buffer containing the actor to deserialize. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the deserialized actor if successful, NULL otherwise. */ NV_C_API NvBlastActor* NvBlastFamilyDeserializeActor(NvBlastFamily* family, const void* buffer, NvBlastLog logFn); /** Retrieve the active actors associated with the given family. \param[out] actors User-supplied array to be filled with the returned actor pointers. \param[out] actorsSize The size of the actors array. To receive all actor pointers, the size must be at least that given by NvBlastFamilyGetActorCount(family). \param[in] family The family. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the number of actor pointers written to actors. This will not exceed actorsSize. */ NV_C_API uint32_t NvBlastFamilyGetActors(NvBlastActor** actors, uint32_t actorsSize, const NvBlastFamily* family, NvBlastLog logFn); /** Retrieve the actor associated with the given actor index. \param[in] family The family. \param[in] actorIndex The index of actor. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return pointer to actor associated with given actor index. NULL if there is no such actor or it is inactive. */ NV_C_API NvBlastActor* NvBlastFamilyGetActorByIndex(const NvBlastFamily* family, uint32_t actorIndex, NvBlastLog logFn); /** Retrieve the actor associated with the given chunk. \param[in] family The family. \param[in] chunkIndex The index of chunk. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return pointer to actor associated with given chunk. NULL if there is no such actor. */ NV_C_API NvBlastActor* NvBlastFamilyGetChunkActor(const NvBlastFamily* family, uint32_t chunkIndex, NvBlastLog logFn); /** Retrieve the actor indices associated with chunks. NOTE: the returned array size equals the number of support chunks in the asset. \param[in] family The family. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return pointer to actor associated with given chunk. NULL if there is no such actor. */ NV_C_API uint32_t* NvBlastFamilyGetChunkActorIndices(const NvBlastFamily* family, NvBlastLog logFn); /** Retrieve the max active actor count family could have. \param[in] family The family. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the max number of active actors family could have. */ NV_C_API uint32_t NvBlastFamilyGetMaxActorCount(const NvBlastFamily* family, NvBlastLog logFn); ///@} End NvBlastFamily functions /////////////////////////////////////////////////////////////////////////////////////// // NvBlastActor accessor, serialization, and deactivation functions /////////////////////////////////////////////////////////////////////////////////////// ///@{ /** Get the number of visible chunks for this actor. May be used in conjunction with NvBlastActorGetVisibleChunkIndices. \param[in] actor The actor. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the number of visible chunk indices for the actor. */ NV_C_API uint32_t NvBlastActorGetVisibleChunkCount(const NvBlastActor* actor, NvBlastLog logFn); /** Retrieve a list of visible chunk indices for the actor into the given array. \param[in] visibleChunkIndices User-supplied array to be filled in with indices of visible chunks for this actor. \param[in] visibleChunkIndicesSize The size of the visibleChunkIndices array. To receive all visible chunk indices, the size must be at least that given by NvBlastActorGetVisibleChunkCount(actor). \param[in] actor The actor. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the number of indices written to visibleChunkIndices. This will not exceed visibleChunkIndicesSize. */ NV_C_API uint32_t NvBlastActorGetVisibleChunkIndices(uint32_t* visibleChunkIndices, uint32_t visibleChunkIndicesSize, const NvBlastActor* actor, NvBlastLog logFn); /** Get the number of graph nodes for this actor. May be used in conjunction with NvBlastActorGetGraphNodeIndices. \param[in] actor The actor. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the number of graph node indices for the actor. */ NV_C_API uint32_t NvBlastActorGetGraphNodeCount(const NvBlastActor* actor, NvBlastLog logFn); /** Retrieve a list of graph node indices for the actor into the given array. \param[in] graphNodeIndices User-supplied array to be filled in with indices of graph nodes for this actor. \param[in] graphNodeIndicesSize The size of the graphNodeIndices array. To receive all graph node indices, the size must be at least that given by NvBlastActorGetGraphNodeCount(actor). \param[in] actor The actor. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the number of indices written to graphNodeIndices. This will not exceed graphNodeIndicesSize. */ NV_C_API uint32_t NvBlastActorGetGraphNodeIndices(uint32_t* graphNodeIndices, uint32_t graphNodeIndicesSize, const NvBlastActor* actor, NvBlastLog logFn); /** Access the bond health data for an actor. This function returns a pointer to the head of an array of bond healths (floats). This array is the same for any actor that has been created from repeated fracturing of the same original instance of an asset (in the same instance family). The indices obtained from NvBlastSupportGraph::adjacentBondIndices in the asset may be used to access this array. The size of the array returned is NvBlastAssetGetBondCount(asset, logFn), where 'asset' is the NvBlastAsset that was used to create the actor. This array is valid as long as any actor in the instance family for the input actor exists. If the input actor is invalid, NULL will be returned. \param[in] actor The actor. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the array of bond healths for the actor's instance family, or NULL if the actor is invalid. */ NV_C_API const float* NvBlastActorGetBondHealths(const NvBlastActor* actor, NvBlastLog logFn); /** Access the cached bond health data for an actor. It is intended to be populated with pre-damage health values. This function returns a pointer to the head of an array of bond healths (floats). This array is the same for any actor that has been created from repeated fracturing of the same original instance of an asset (in the same instance family). The indices obtained from NvBlastSupportGraph::adjacentBondIndices in the asset may be used to access this array. The size of the array returned is NvBlastAssetGetBondCount(asset, logFn), where 'asset' is the NvBlastAsset that was used to create the actor. This array is valid as long as any actor in the instance family for the input actor exists. If the input actor is invalid, NULL will be returned. \param[in] actor The actor. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the array of bond healths for the actor's instance family, or NULL if the actor is invalid. */ NV_C_API const float* NvBlastActorGetCachedBondHeaths(const NvBlastActor* actor, NvBlastLog logFn); /** Tell the system to cache the bond health for the given bond index. \param[in] actor The actor. \param[in] bondIndex The bond to cache the health value. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return true if value was cached, false otherwise */ NV_C_API bool NvBlastActorCacheBondHeath(const NvBlastActor* actor, uint32_t bondIndex, NvBlastLog logFn); /** The buffer size needed to serialize a single actor. This will give the exact size needed. For an upper bound on the buffer size needed for any actor instanced from an NvBlastAsset, use NvBlastAssetGetActorSerializationSizeUpperBound. \param[in] actor The actor. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the required buffer size in bytes. */ NV_C_API uint32_t NvBlastActorGetSerializationSize(const NvBlastActor* actor, NvBlastLog logFn); /** Serialize a single actor to a buffer. \param[out] buffer User-supplied buffer, must be at least of size given by NvBlastActorGetSerializationSize(actor). \param[in] bufferSize The size of the user-supplied buffer. \param[in] actor The actor. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the number of bytes written to the buffer, or 0 if there is an error (such as an under-sized buffer). */ NV_C_API uint32_t NvBlastActorSerialize(void* buffer, uint32_t bufferSize, const NvBlastActor* actor, NvBlastLog logFn); /** Access to an actor's family. \param[in] actor The actor. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the family with which the actor is associated. */ NV_C_API NvBlastFamily* NvBlastActorGetFamily(const NvBlastActor* actor, NvBlastLog logFn); /** Access to an actor's internal index. \param[in] actor The actor. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return actor's internal index in family. */ NV_C_API uint32_t NvBlastActorGetIndex(const NvBlastActor* actor, NvBlastLog logFn); /** Deactivate an actor within its family. Conceptually this is "destroying" the actor, however memory will not be released until the family is released. \param[in] actor Points to a user-supplied actor struct. May be NULL, in which case this function no-ops. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return true iff successful (actor was active). */ NV_C_API bool NvBlastActorDeactivate(NvBlastActor* actor, NvBlastLog logFn); ///@} End NvBlastActor accessor, serialization, and deactivation functions /////////////////////////////////////////////////////////////////////////////// // NvBlastActor damage and fracturing functions /////////////////////////////////////////////////////////////////////////////// ///@{ /** Creates fracture commands for the actor using a damage program and program parameters (material and damage descriptions). \param[in,out] commandBuffers Target buffers to hold generated commands. To avoid data loss, provide an entry for every support chunk and every bond in the original actor. \param[in] actor The NvBlastActor to create fracture commands for. \param[in] program A NvBlastDamageProgram containing damage shaders. \param[in] programParams Parameters for the NvBlastDamageProgram. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \param[in,out] timers If non-NULL this struct will be filled out with profiling information for the step, in profile build configurations. Interpretation of NvBlastFractureBuffers: As input: Counters denote available entries for FractureData. Chunk and Bond userdata are not used. Health values are not used. As output: Counters denote valid entires in FractureData arrays. Chunks and Bond userdata reflect the respective userdata set during asset initialization, where implemented by the material function. Health values denote how much damage is to be applied. */ NV_C_API void NvBlastActorGenerateFracture ( NvBlastFractureBuffers* commandBuffers, const NvBlastActor* actor, const NvBlastDamageProgram program, const void* programParams, NvBlastLog logFn, NvBlastTimers* timers ); /** Applies the direct fracture and breaks graph bonds/edges as necessary. Chunks damaged beyond their respective health fracture their children recursively, creating a NvBlastChunkFractureData for each. \param[in,out] eventBuffers Target buffers to hold applied fracture events. May be NULL, in which case events are not reported. To avoid data loss, provide an entry for every lower-support chunk and every bond in the original actor. \param[in,out] actor The NvBlastActor to apply fracture to. \param[in] commands The fracture commands to process. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \param[in,out] timers If non-NULL this struct will be filled out with profiling information for the step, in profile build configurations. Interpretation of NvBlastFractureBuffers: commands: Counters denote the number of command entries to process. Chunk and Bond userdata are not used. Health values denote the amount of damage to apply, as a positive value. eventBuffers as input: Counters denote available entries for FractureData. Chunk and Bond userdata are not used. Health values are not used. eventBuffers as output: Counters denote valid entires in FractureData arrays. Chunks and Bond userdata reflect the respective userdata set during asset initialization. Health values denote how much health is remaining for the damaged element. Broken elements report a negative value corresponding to the superfluous health damage. commands and eventBuffers may point to the same memory. */ NV_C_API void NvBlastActorApplyFracture ( NvBlastFractureBuffers* eventBuffers, NvBlastActor* actor, const NvBlastFractureBuffers* commands, NvBlastLog logFn, NvBlastTimers* timers ); /** Releases the oldActor and creates its children newActors if necessary. \param[out] result The list of deleted and created NvBlastActor objects. \param[in] actor The actor to split. \param[in] newActorsMaxCount Number of available NvBlastActor slots. In the worst case, one NvBlastActor may be created for every chunk in the asset. \param[in] scratch Scratch Memory used during processing. NvBlastActorGetRequiredScratchForSplit provides the necessary size. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \param[in,out] timers If non-NULL this struct will be filled out with profiling information for the step, in profile build configurations \return 1..n: new actors were created \return 0: oldActor is unchanged */ NV_C_API uint32_t NvBlastActorSplit ( NvBlastActorSplitEvent* result, NvBlastActor* actor, uint32_t newActorsMaxCount, void* scratch, NvBlastLog logFn, NvBlastTimers* timers ); /** Returns the number of bytes of scratch memory that the user must supply to NvBlastActorSplit, based upon the actor that will be passed into that function. \param[in] actor The actor that will be passed into NvBlastActorSplit. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the number of bytes of scratch memory required for a call to NvBlastActorSplit with that actor. */ NV_C_API size_t NvBlastActorGetRequiredScratchForSplit(const NvBlastActor* actor, NvBlastLog logFn); /** Returns the upper-bound number of actors which can be created by calling NvBlastActorSplit with that actor, this value can't exceed chunk count. \param[in] actor The actor. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the upper-bound number of actors which can be created by calling NvBlastActorSplit with that actor. */ NV_C_API uint32_t NvBlastActorGetMaxActorCountForSplit(const NvBlastActor* actor, NvBlastLog logFn); /** Determines if the actor can fracture further. \param[in] actor The actor potentially being fractured. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return true if any result can be expected from fracturing the actor. false if no further change to the actor is possible. */ NV_C_API bool NvBlastActorCanFracture(const NvBlastActor* actor, NvBlastLog logFn); /** Determines if the actor is damaged (was fractured) and split call is required. The actor could be damaged by calling NvBlastActorApplyFracture and NvBlastActorSplit is expected after. This function gives a hint that NvBlastActorSplit will have some work to be done and actor could potentially be split. If actor is not damaged calling NvBlastActorSplit will make no effect. \return true iff split call is required for this actor. */ NV_C_API bool NvBlastActorIsSplitRequired(const NvBlastActor* actor, NvBlastLog logFn); /** \return true iff this actor contains the "external" support graph node, created when a bond contains the UINT32_MAX value for one of their chunkIndices. */ NV_C_API bool NvBlastActorHasExternalBonds(const NvBlastActor* actor, NvBlastLog logFn); // DEPRICATED: remove on next major version bump #define NvBlastActorIsBoundToWorld NvBlastActorHasExternalBonds ///@} End NvBlastActor damage and fracturing functions /////////////////////////////////////////////////////////////////////////////// // NvBlastTimers functions and helpers /////////////////////////////////////////////////////////////////////////////// ///@{ /** Resets all values in the given NvBlastTimers struct to zero. \param[in] timers The NvBlastTimers to set to zero. */ NV_C_API void NvBlastTimersReset(NvBlastTimers* timers); /** Convert a tick value from NvBlastTimers to seconds. \param[in] ticks The tick value. \return the seconds correposnding to the input tick value. */ NV_C_API double NvBlastTicksToSeconds(int64_t ticks); ///@} End NvBlastTimers functions and helpers #endif // ifndef NVBLAST_H
NVIDIA-Omniverse/PhysX/blast/include/lowlevel/NvBlastTypes.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //! @file //! //! @brief Basic data types for the blast sdk APIs #ifndef NVBLASTTYPES_H #define NVBLASTTYPES_H #include "NvPreprocessor.h" #include <float.h> #include <stdint.h> /////////////////////////////////////////////////////////////////////////////// // NvBlast common types /////////////////////////////////////////////////////////////////////////////// ///@{ /** Types of log messages. */ struct NvBlastMessage { enum Type { Error, //!< Error messages Warning, //!< Warning messages Info, //!< Information messages Debug //!< Used only in debug version of dll }; }; /** Function pointer type for logging. When a function with this signature is passed into Blast functions with an NvBlastLog argument, Blast will use it to report errors, warnings, and other information. */ typedef void(*NvBlastLog)(int type, const char* msg, const char* file, int line); /** ID used to identify assets. */ struct NvBlastID { char data[16]; }; /** Time spent (in ticks) in various parts of Blast. These values may be filled in during the execution of various API functions. To convert to seconds, use NvBlastTicksToSeconds(ticks). In profile build configurations, if a pointer to an instance of this struct is passed into Blast functions with an NvBlastTimers argument, then Blast will add to appropriate fields the time measured in corresponding sections of code. The user must clear the timer fields with NvBlastTimersReset to initialize or reset. */ struct NvBlastTimers { int64_t material; //!< Time spent in material function int64_t fracture; //!< Time spent applying damage int64_t island; //!< Time spent discovering islands int64_t partition; //!< Time spent partitioning the graph int64_t visibility; //!< Time spent updating visibility }; /** Generic data block header for all data blocks. */ struct NvBlastDataBlock { /** Enum of data block types */ enum Type { AssetDataBlock, FamilyDataBlock, Count }; /** A data type keeps value from Type enum */ uint32_t dataType; /** A number which is incremented every time the data layout changes. Depending on dataType corresponding data format is kept. See NvBlastAssetDataFormat, NvBlastFamilyDataFormat enum. */ uint32_t formatVersion; /** The size of the family, including this header. Memory sizes are restricted to 32-bit representable values. */ uint32_t size; /** Reserved to be possibly used in future versions */ uint32_t reserved; }; ///@} End NvBlast common types /////////////////////////////////////////////////////////////////////////////// // NvBlastAsset related types /////////////////////////////////////////////////////////////////////////////// ///@{ /** Represents a piece of a destructible asset which may be realized as an entity with a physical and graphical component. Chunks may form a hierarchical decomposition of the asset. They contain parent and child chunk index information which defines the hierarchy. The parent and child chunk indices are their positions with the NvBlastAsset::chunks array. Child chunk indices are contiguous, starting at firstChildIndex and ending with childIndexStop - 1. */ struct NvBlastChunk { /** Central position for the chunk's volume */ float centroid[3]; /** Volume of the chunk */ float volume; /** Index of parent (UINT32_MAX denotes no parent) */ uint32_t parentChunkIndex; /** Index of first child */ uint32_t firstChildIndex; /** Stop for child indices */ uint32_t childIndexStop; /** Field for user to associate with external data */ uint32_t userData; }; /** Represents the interface between two chunks. At most one bond is created for a chunk pair. */ struct NvBlastBond { /** Interface average normal */ float normal[3]; /** Area of interface */ float area; /** Central position on the interface between chunks */ float centroid[3]; /** Extra data associated with bond, e.g. whether or not to create a joint */ uint32_t userData; }; /** Describes the connectivity between support chunks via bonds. Vertices in the support graph are termed "nodes," and represent particular chunks (NvBlastChunk) in an NvBlastAsset. The indexing for nodes is not the same as that for chunks. Only some chunks are represented by nodes in the graph, and these chunks are called "support chunks." Adjacent node indices and adjacent bond indices are stored for each node, and therefore each bond is represented twice in this graph, going from node[i] -> node[j] and from node[j] -> node[i]. Therefore the size of the adjacentNodeIndices and adjacentBondIndices arrays are twice the number of bonds stored in the corresponding NvBlastAsset. The graph is used as follows. Given a NvBlastSupportGraph "graph" and node index i, (0 <= i < graph.nodeCount), one may find all adjacent bonds and nodes using: // adj is the lookup value in graph.adjacentNodeIndices and graph.adjacentBondIndices for (uint32_t adj = graph.adjacencyPartition[i]; adj < graph.adjacencyPartition[i+1]; ++adj) { // An adjacent node: uint32_t adjacentNodeIndex = graph.adjacentNodeIndices[adj]; // The corresponding bond (that connects node index i with node indexed adjacentNodeIndex: uint32_t adjacentBondIndex = graph.adjacentBondIndices[adj]; } For a graph node with index i, the corresponding asset chunk index is found using graph.chunkIndices[i]. The reverse mapping (obtaining a graph node index from an asset chunk index) can be done using the NvBlastAssetGetChunkToGraphNodeMap(asset, logFn) function. See the documentation for its use. The returned "node index" for a non-support chunk is the invalid value 0xFFFFFFFF. */ struct NvBlastSupportGraph { /** Total number of nodes in the support graph. */ uint32_t nodeCount; /** Indices of chunks represented by the nodes, an array of size nodeCount. */ uint32_t* chunkIndices; /** Partitions both the adjacentNodeIndices and the adjacentBondIndices arrays into subsets corresponding to each node. The size of this array is nodeCount+1. For 0 <= i < nodeCount, adjacencyPartition[i] is the index of the first element in adjacentNodeIndices (or adjacentBondIndices) for nodes adjacent to the node with index i. adjacencyPartition[nodeCount] is the size of the adjacentNodeIndices and adjacentBondIndices arrays. This allows one to easily count the number of nodes adjacent to a node with index i, using adjacencyPartition[i+1] - adjacencyPartition[i]. */ uint32_t* adjacencyPartition; /** Array composed of subarrays holding the indices of nodes adjacent to a given node. The subarrays may be accessed through the adjacencyPartition array. */ uint32_t* adjacentNodeIndices; /** Array composed of subarrays holding the indices of bonds (NvBlastBond) for a given node. The subarrays may be accessed through the adjacencyPartition array. */ uint32_t* adjacentBondIndices; }; /** Asset (opaque) Static destructible data, used to create actor families. Pointer to this struct can be created with NvBlastCreateAsset. The NvBlastAsset includes a ID which may be used to match it with physics and graphics data. */ struct NvBlastAsset {}; /** Chunk descriptor used to build an asset. See NvBlastAssetDesc. */ struct NvBlastChunkDesc { enum Flags { NoFlags = 0, /** If this flag is set then the chunk will become a support chunk, unless an ancestor chunk is also marked as support. */ SupportFlag = (1 << 0) }; /** Central position in chunk. */ float centroid[3]; /** Volume of chunk. */ float volume; /** Index of this chunk's parent. If this is a root chunk, then this value must be UINT32_MAX. */ uint32_t parentChunkDescIndex; /** See Flags enum for possible flags. */ uint32_t flags; /** User-supplied data which will be accessible to the user in chunk fracture events. */ uint32_t userData; }; /** Chunk bond descriptor used to build an asset. See NvBlastAssetDesc. */ struct NvBlastBondDesc { /** Bond data (see NvBlastBond). */ NvBlastBond bond; /** The indices of the chunks linked by this bond. They must be different support chunk indices. If one of the chunk indices is the invalid index (UINT32_MAX), then this will create a bond between the chunk indexed by the other index (which must be valid) and something external. Any actor containing this bond will cause the function NvBlastActorHasExternalBonds to return true. */ uint32_t chunkIndices[2]; }; /** Asset descriptor, used to build an asset with NvBlastCreateAsset A valid asset descriptor must have a non-zero chunkCount and valid chunkDescs. The user may create an asset with no bonds (e.g. a single-chunk asset). In this case bondCount should be zero and bondDescs is ignored. */ struct NvBlastAssetDesc { /** The number of chunk descriptors. */ uint32_t chunkCount; /** Array of chunk descriptors of size chunkCount. */ const NvBlastChunkDesc* chunkDescs; /** The number of bond descriptors. */ uint32_t bondCount; /** Array of bond descriptors of size bondCount. */ const NvBlastBondDesc* bondDescs; }; /** Info used to construct an Asset or Family instance */ struct NvBlastAssetMemSizeData { public: uint32_t bondCount; uint32_t chunkCount; uint32_t nodeCount; uint32_t lowerSupportChunkCount; uint32_t upperSupportChunkCount; }; ///@} End NvBlastAsset related types /////////////////////////////////////////////////////////////////////////////// // NvBlastActor related types /////////////////////////////////////////////////////////////////////////////// ///@{ /** Family (opaque) A family can be created by the NvBlastAssetCreateFamily function. Family is needed to create first actor. All the following actors which can be created with NvBlastActorSplit function (as a result of fracture) will share the same family block. NvBlastFamilyGetActorCount can be used to know if family can be safely released. */ struct NvBlastFamily {}; /** Actor (opaque) Actors can be generated by the NvBlastFamilyCreateFirstActor and NvBlastActorSplit functions. Opaque NvBlastActor pointers reference data within the family generated during NvBlastFamilyCreateFirstActor, and represent the actor in all actor-related API functions. */ struct NvBlastActor {}; namespace Nv { namespace Blast { const float kUnbreakableLimit = (0.5f * FLT_MAX); } } inline bool canTakeDamage(float health) { return (health > 0.0f && health < Nv::Blast::kUnbreakableLimit); } /** Actor descriptor, used to create an instance of an NvBlastAsset with NvBlastFamilyCreateFirstActor See NvBlastFamilyCreateFirstActor. */ struct NvBlastActorDesc { /** Initial health of all bonds, if initialBondHealths is NULL (see initialBondHealths). */ float uniformInitialBondHealth; /** Initial bond healths. If not NULL, this array must be of length NvBlastAssetGetBondCount(asset, logFn). Setting it above Nv::Blast::kUnbreakableLimit will make the bond unbreakable. If NULL, uniformInitialBondHealth must be set. */ const float* initialBondHealths; /** Initial health of all lower-support chunks, if initialSupportChunkHealths is NULL (see initialSupportChunkHealths). */ float uniformInitialLowerSupportChunkHealth; /** Initial health of all support chunks. If not NULL, this must be of length NvBlastAssetGetSupportChunkCount(asset, logFn).nodeCount. The elements in the initialSupportChunkHealth array will correspond to the chunk indices in the NvBlastAssetGetSupportGraph(asset, logFn).chunkIndices array. Every descendent of a support chunk will have its health initialized to its ancestor support chunk's health, so this initializes all lower-support chunk healths. Setting it above Nv::Blast::kUnbreakableLimit will make the chunk unbreakable. If NULL, uniformInitialLowerSupportChunkHealth must be set. */ const float* initialSupportChunkHealths; }; ///@} End NvBlastActor related types /////////////////////////////////////////////////////////////////////////////// // Types used for damage and fracturing /////////////////////////////////////////////////////////////////////////////// ///@{ /** Fracture Data for Chunks Data interpretation varies depending on the function used. @see NvBlastActorGenerateFracture NvBlastActorApplyFracture NvBlastFractureBuffers */ struct NvBlastChunkFractureData { uint32_t userdata; //!< chunk's user data uint32_t chunkIndex; //!< asset chunk index float health; //!< health value (damage or remains) }; /** Fracture Data for Bonds Data interpretation varies depending on the function used. @see NvBlastActorGenerateFracture NvBlastActorApplyFracture NvBlastFractureBuffers */ struct NvBlastBondFractureData { uint32_t userdata; //!< bond's user data uint32_t nodeIndex0; //!< graph node index of bond uint32_t nodeIndex1; //!< pair graph node index of bond float health; //!< health value (damage or remains) }; /** Memory to be used by fracture functions. Used as input and output target. @see NvBlastActorGenerateFracture NvBlastActorApplyFracture */ struct NvBlastFractureBuffers { uint32_t bondFractureCount; //!< available elements in bondFractures uint32_t chunkFractureCount; //!< available elements in chunkFractures NvBlastBondFractureData* bondFractures; //!< memory to be filled by fracture functions NvBlastChunkFractureData* chunkFractures; //!< memory to be filled by fracture functions }; /** Description of a NvBlastActorSplit result. This tells the user about changes in the actor, or creation of children. */ struct NvBlastActorSplitEvent { NvBlastActor* deletedActor; //!< deleted actor or nullptr if actor has not changed NvBlastActor** newActors; //!< list of created actors }; /** A single actor's representation used by NvBlastGraphShaderFunction. */ struct NvBlastGraphShaderActor { uint32_t actorIndex; //!< Actor's index. uint32_t graphNodeCount; //!< Actor's graph node count. uint32_t assetNodeCount; //!< Asset node count. uint32_t firstGraphNodeIndex; //!< Entry index for graphNodeIndexLinks const uint32_t* graphNodeIndexLinks; //!< Linked index list of connected nodes. Traversable with nextIndex = graphNodeIndexLinks[currentIndex], terminates with 0xFFFFFFFF. const uint32_t* chunkIndices; //!< Graph's map from node index to support chunk index. const uint32_t* adjacencyPartition; //!< See NvBlastSupportGraph::adjacencyPartition. const uint32_t* adjacentNodeIndices; //!< See NvBlastSupportGraph::adjacentNodeIndices. const uint32_t* adjacentBondIndices; //!< See NvBlastSupportGraph::adjacentBondIndices. const NvBlastBond* assetBonds; //!< NvBlastBonds geometry in the NvBlastAsset. const NvBlastChunk* assetChunks; //!< NvBlastChunks geometry in the NvBlastAsset. const float* familyBondHealths; //!< Actual bond health values for broken bond detection. const float* supportChunkHealths; //!< Actual chunk health values for dead chunk detection. const uint32_t* nodeActorIndices; //!< Family's map from node index to actor index. }; /** A single actor's representation used by NvBlastSubgraphShaderFunction. */ struct NvBlastSubgraphShaderActor { uint32_t chunkIndex; //!< Index of chunk represented by this actor. const NvBlastChunk* assetChunks; //!< NvBlastChunks geometry in the NvBlastAsset. }; /** Damage shader for actors with more then one node in support graph. From a an input actor data (NvBlastGraphShaderActor) and user custom data (params), creates a list of NvBlastFractureCommand to be applied to the respective NvBlastActor. \param[in,out] commandBuffers The resulting health damage to apply. Typically requires an array of size (number of support chunks) + (number of bonds) of the processed asset but may depend on the actual implementation. \param[in] actor The actor representation used for creating commands. \param[in] programParams A set of parameters defined by the damage shader implementer. Interpretation of NvBlastFractureBuffers: As input: Counters denote available entries for FractureData. Chunk and Bond userdata are not used. Health values are not used. As output: Counters denote valid entires in FractureData arrays. Chunks and Bond userdata reflect the respective userdata set during asset initialization. Health values denote how much damage is to be applied. @see NvBlastFractureBuffers NvBlastGraphShaderActor */ typedef void(*NvBlastGraphShaderFunction)(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* programParams); /** Damage shader for actors with single chunk. From a an input actor data (NvBlastSubgraphShaderActor) and user custom data (params), creates a list of NvBlastFractureCommand to be applied to the respective NvBlastActor. \param[in,out] commandBuffers The resulting health damage to apply. Typically requires an array of size (number of support chunks) + (number of bonds) of the processed asset but may depend on the actual implementation. \param[in] actor The actor representation used for creating commands. \param[in] programParams A set of parameters defined by the damage shader implementer. Interpretation of NvBlastFractureBuffers: As input: Counters denote available entries for FractureData. Chunk and Bond userdata are not used. Health values are not used. As output: Counters denote valid entires in FractureData arrays. Chunks and Bond userdata reflect the respective userdata set during asset initialization. Health values denote how much damage is to be applied. @see NvBlastFractureBuffers NvBlastSubgraphShaderActor */ typedef void(*NvBlastSubgraphShaderFunction)(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* programParams); /** Damage Program. Contains both graph and subgraph shader. When used on actor appropriate shader will be called. Any shader can be nullptr to be skipped. @see NvBlastGraphShaderFunction NvBlastSubgraphShaderFunction */ struct NvBlastDamageProgram { NvBlastGraphShaderFunction graphShaderFunction; NvBlastSubgraphShaderFunction subgraphShaderFunction; }; ///@} End of types used for damage and fracturing #endif // ifndef NVBLASTTYPES_H
NVIDIA-Omniverse/PhysX/blast/docs/CHANGELOG.md
# Changelog ## [5.0.4] - 22-January-2024 ### Bugfixes - Fixed issue https://github.com/NVIDIA-Omniverse/PhysX/issues/207, Island removal doesn't work as expected ## [5.0.3] - 1-November-2023 ### Bugfixes - Fixed memory leak in NvBlastExtAuthoringFindAssetConnectingBonds reported in issue #185. ## [5.0.2] - 25-July-2023 ### Bugfixes - Fixed slice fracturing bug which set the local chunk transform to the identity in some cases ## [5.0.1] - 22-June-2023 ### Bugfixes - Use proper constructors for NvTransform and NvVec3 to avoid using garbage data ## [5.0.0] - 23-Jan-2023 ### Changes - Removed all PhysX dependencies from code outside of the ExtPx extension - Replaced Px types with NvShared types - NvFoundation headers in include/shared/NvFoundation - Includes NvPreprocessor.h and NvcTypes.h (formerly in include/lowlevel) - Include basic Nv types, such as NvVec3 (used by the Tk library) - Consolidated header structure - include/lowlevel/NvBlastPreprocessor.h is gone - Previously-defined NVBLAST_API has been renamed NV_C_API and is now defined in NvPreprocessor.h ## [4.0.2] - 31-Aug-2022 ### Bugfixes - Stress solver Linux crash fix. Explicitly allocating aligned data buffers for use with simd data. ## [4.0.1] - 10-Aug-2022 ### Bugfixes - Stress solver fixes: - More robust conversion from angular pressures to linear pressures. - Better error tolerance checking. - Force sign consistency. ## [4.0.0] - 31-May-2022 ### New Features - Fully integrated stress-damage system. A stress solver is used to determine how bond forces react to impacts and other externally-supplied accelerations. Stress limits (elastic and fatal) determine how bond health (area) deteriorates with bond force. When bonds break and new actors are generated, excess forces are applied to the previously joined bodies. Using a new stress solver with better convergence properties. - Documentation publishing. ## [3.1.3] - 28-Feb-2022 ### Changes - Update triangulation ear clipping algorithm to avoid outputting triangle slivers. ## [3.1.2] - 24-Feb-2022 ### Changes - Change ExtStressSolver::create() (and downstream functions/classes) to take const NvBlastFamily. - Change the order colors are compressed in PxVec4ToU32Color() ### Bug Fixes - Fixed triangulation ear clipping bug that could cause input verts to not be used in output triangulation, leading to T junctions. ## [3.1.1] - 2022-01-12 ### Changes - Exposing NvcVec2 and NvcVec3 operators in new file include/globals/NvCMath.h. ## [3.1.0] - 2022-01-10 ### Changes - Exposing boolean tool API, along with spatial accelerators used with the tool. - include/extensions/authoring/NvBlastExtAuthoringBooleanTool.h contains virtual API class Nv::Blast::BooleanTool. - include/extensions/authoringCommon/NvBlastExtAuthoringAccelerator.h contains virtual API classes Nv::Blast::SpatialAccelerator and SpatialGrid. - include/extensions/authoring/NvBlastExtAuthoring.h has global functions: - NvBlastExtAuthoringCreateBooleanTool() - NvBlastExtAuthoringCreateSpatialGrid(...) - NvBlastExtAuthoringCreateGridAccelerator(...) - NvBlastExtAuthoringCreateSweepingAccelerator(...) - NvBlastExtAuthoringCreateBBoxBasedAccelerator(...) ## [3.0.0] - 2021-10-13 ### Changes - Rearranged folder layout. Public include files are now under a top-level "include" folder. ## [2.1.7] - 2021-07-18 ### Bug Fixes - Fixed edge case with convex hull overlap test in processWithMidplanes(), so that (0,0,0) normals aren't generated. - Prevented crash when no viable chunk is found by findClosestNode(), leading to lookup by invalid chunk index in damage shaders. ## [2.1.6] - 2021-06-24 ### Changes - Prioritize convex hulls over triangles in processWithMidplanes() - Store local convex hulls longer in createFullBondListAveraged() so they can be used in processWithMidplanes() - Fix bug with buildDescFromInternalFracture() when there are multiple root chunks. ## [2.1.5] - 2021-05-10 ### Changes - Bond centroid and normal calculations improved when splitting a child chunk. The normal will only be different from before with non-planar splitting surfaces. - Mesh facet user data, which stores a splitting plane (or surface) identifier, will stay unique if the split mesh is fed back into a new instance of the fracture tool. The new IDs generated will be larger than any ID input using FractureTool::setChunkMesh(...). ## [2.1.4] - 2021-04-08 ### Bug Fixes - OM-29933: Crash fracturing dynamic attachment ## [2.1.3] - 2021-04-05 ### Bug Fixes - Bond area calculation was producing values twice the correct value. - Fixed exception in TkGroupImpl. ## [2.1.2] - 2021-03-15 ### Bug Fixes - MR #18: Fix asset joint serialization (BlastTk) - MR #19: Fix index out of bounds (BlastTk) ## [2.1.1] - 2021-03-02 ### Changes - Added Cap'n Proto serialization path for Family to match Asset. - Fix bug with BlastAsset::getSupportChunkHealthMax() returning the wrong value. - Add get/set/size data interface to FixedBoolArray. - Allocate asset memory based on how much space it needs, not serialized data size. - Release asset memory if deserialization fails. - Removed FamilyHeader::getActorBufferSize(), use FamilyHeader::getActorsArraySize() instead. ## [2.0.1] - 2021-03-01 ### Changes - Added .pdb files to windows package. - Bumping version to update dependency chain with linux built from gcc 5.5.0 (for CentOS7 compatibility). ## [2.0.0] - 2021-02-19 ### Changes - Add optional chunkId params to FractureTool::setSourceMeshes() and FractureTool::setChunkMesh() - Rename functions and variables to better indicate what the indices are used for instead of using generic "chunkIndex" for everything ## [1.4.7] - 2020-10-20 ### Changes - Don't include bonds that can't take damage (already broken or unbreakable) in results when applying damage ### Bug Fixes - Make sure all fields (specifically userData) on NvBlastBondDesc are initialized when creating bonds ## [1.4.6] - 2020-10-08 ### Changes - Updated license file - Updated copyright dates ### Bug Fixes - Pull request #15 "Fix Blast bond generation" - Pull request #16 "Fix invalid pointer access in authoring tools" ## [1.4.5] - 2020-09-30 ### Bug Fixes - Allocate on heap instead of stack in importerHullsInProximityApexFree() to prevent crash ## [1.4.4] - 2020-09-29 ### Changes - Support unbreakable bonds and chunks by setting their health above Nv::Blast::kUnbreakableLimit - Consolidate code when node is removed ## [1.4.3] - 2020-09-26 ### Changes - Per-chunk internal scaling. ChunkInfo contains the struct TransformST (scale & translation) ### Bug Fixes - Fixes many fracturing instabilities with per-chunk scaling ## [1.4.2] - 2020-08-28 ### Bug Fixes - Fixed mesh generation bug when using FractureToolImpl::createChunkMesh ## [1.4.1] - 2020-06-26 ### Changes - Change API references to 'external' instead of 'world' bonds - Deprecate 'world' versions, should be removed on next major version bump ## [1.2.0] - 2020-01-23 ### Changes - Removed BlastTool - Removed ApexImporter tool - Removed ExtImport extension (for Apex) ### New Features - Reenabling runtime fracture ### Known Issues - Damage shaders in extensions can miss bonds if the damage volume is too small. - Authoring code does not use the user-defined allocator (NvBlastGlobals) exclusively. ## [1.1.5] - 2019-09-16 ### Changes - Extensions API refactored to eliminate use of Px types. - Numerous API ### changes to meet new coding conventions. - Packman package manager updated to v. 5.7.2, cleaned up dependency files. - Chunks created from islands use padded bounds to determine connectivity. - FractureTool::deleteAllChildrenOfChunk renamed FractureTool::deleteChunkSubhierarchy, added ability to delete chunks. - NvBlastAsset::testForValidChunkOrder (used when creating an NvBlastAsset) is now more strict, requiring parent chunk descriptors to come before their children. It is still less strict than the order created by NvBlastBuildAssetDescChunkReorderMap. ### New Features - Authoring tools: - Ability to pass chunk connectivity info to uniteChunks function, enabling chunks split by island detection to be united. - Option to remove original merged chunks in uniteChunks function. - The function uniteChunks allows the user to specify a chunk set to merge. Chunks from that set, and all descendants, are considered for merging. - Ability to delete chunks (see note about FractureTool::deleteChunkSubhierarchy in ### Changes section, above). - Added FractureTool::setApproximateBonding function. Signals the tool to create bonds by proximity instead of just using cut plane data. ### Bug Fixes - Authoring tools: - Fixed chunk reordering bug in BlastTool. - Chunks which have been merged using the uniteChunks function may be merged again - Restored chunk volume calculation - NvBlastBuildAssetDescChunkReorderMap failure cases fixed. ### Known Issues - Damage shaders in extensions can miss bonds if the damage volume is too small. - Authoring code does not use the user-defined allocator (NvBlastGlobals) exclusively. ## [1.1.4] - 2018-10-24 ### Changes - Unity plugin example updated to work with latest Blast SDK. ### New Features - Authoring tools: - Island detection function islandDetectionAndRemoving has a new parameter, createAtNewDepth. - Bonds created between island-based chunks. - Added "agg" (aggregate) commandline switch to AuthoringTool. This allows multiple convex hulls per chunk to be generated. - Damage pattern authoring interface. ### Bug Fixes - Build working on later C++ versions (e.g. deprecated UINT32_MAX removed). - Authoring tools: - Fixed .obj material loading when obj folder is same as working directory. - Degenerate face generation fix. - Fixed memory leak in FractureTool. - Proper memory releasing in samples. - Single-actor serialization bugfix when actor has world bonds. - Updated PhysX package for Win64 (vc14 and vc15) and Linux64 to 3.4.24990349, improving GRB behavior and fixing GRB crash/failure on Volta and Turing. - Documented JSON collision export option introduced in previous version. ### Known Issues - Damage shaders in extensions can miss bonds if the damage volume is too small. - Authoring code does not use the user-defined allocator (NvBlastGlobals) exclusively. ## [1.1.3] - 2018-05-30 ### Changes - No longer testing Win32 project scripts. Note generate_projects_vc14win32.bat has been renamed generate_projects_vc14win32_untested.bat. - Using a PhysX Packman package that no longer includes APEX. - Updated documentation: - Authoring documentation mentions restrictions for meshes to be fractured. - Added BlastTool reference to README.md. - Updated documentation paths in README.md. - Using Packman5 for external packages. - Authoring tools: - In NoiseConfiguration, surfaceResolution changed to samplingInterval. The latter is reciprocal of resolution and defined for all 3 axes. - Improved cutout robustness. - Exporter (used by both authoring tools and ApexImporter) has a JSON collision export option. ### New Features - VC15 Win64 project scripts. Run generate_projects_vc15win64.bat. - Authoring tools: - Noisy cutout fracture. - Conic cutout option (tapers cut planes relative to central point). - Cutout option "useSmoothing." Add generatad faces to the same smoothing group as original face without noise. - Periodic cutout boundary conditions. ### Bug Fixes - Packman target platform dependencies no longer pulling windows packages into other platforms. - Fixed bond generation for cutout fracture. ### Known Issues - Damage shaders in extensions can miss bonds if the damage volume is too small. - Authoring code does not use the user-defined allocator (NvBlastGlobals) exclusively. ## [1.1.2] - 2018-01-26 ### Changes - Improvements to uniteChunks for hierarchy optimization. - NvBlastExtAuthoringFindAssetConnectingBonds optimized. - APEX dependency has been removed (ExtImport used it). Now ExtImport has a built-in NvParameterized read that can load an APEX Destructible asset. ### New Features - FractureTool::setChunkMesh method. - Distance threshold added to NvBlastExtAuthoringFindAssetConnectingBonds. - NvBlastExtExporter: IMeshFileWriter::setInteriorIndex function, for control of interior material. - Cutout and cut fracture methods: NvBlastExtAuthoringCreateCutoutSet and Nv::Blast::CutoutSet API, FractureTool::cut and FractureTool::cutout APIs. - NvBlastExtAuthoring: - NvBlastExtAuthoringCreateMeshFromFacets function. - NvBlastExtUpdateGraphicsMesh function. - NvBlastExtAuthoringBuildCollisionMeshes function. - UV fitting on interior materials using new FractureTool::fitUvToRect and FractureTool::fitAllUvToRect functions. - Multi-material support in OBJ file format. ### Bug Fixes - Fixed bug causing normals on every other depth level to be flipped when exporting Blast meshes. - Fixed bug where faces are missed after hierarchy optimization on a sliced mesh. - Fixed subtree chunk count generated in Nv::Blast::Asset::Create (led to a crash in authoring tools, fracturing a pre-fractured mesh). - Fixed a crash when loading an obj with bad material indices. - Fixed Actor::split so that visibility lists are correctly updated even when the number of split actors exceeds newActorsMaxCount. ### Known Issues - Damage shaders in extensions can miss bonds if the damage volume is too small. - Authoring code does not use the user-defined allocator (NvBlastGlobals) exclusively. ## [1.1.1] - 2017-10-10 ### Changes - NvBlastProgramParams moved to NvBlastExtDamageShaders - Materials removed from NvBlastTk ### New Features - Damage shader acceleration structure - Extended support structures via new asset merge functions in NvBlastExtAssetUtils - Ability to scale asset components when merging assets with NvBlastExtAssetUtilsMergeAssets - NvBlastExtAuthoring - Option to fit multiple convex hulls to a chunk (uses VHACD) - deleteAllChildrenOfChunk and uniteChunks APIs - Triangle damage shader for swept segments - Impact damage spread shaders ### Bug Fixes - Linux build fixes - NvBlastExtAuthoring - Fracturing tools chunk index fix - VoronoiSitesGeneratorImpl::generateInSphere fix - More consistent use of NVBLAST_ALLOC and NVBLAST_FREE - Boolean tool bug fix ### Known Issues - Damage shaders in extensions can miss bonds if the damage volume is too small. - Authoring code does not use the user-defined allocator (NvBlastGlobals) exclusively. ## [1.1.0] - 2017-08-28 ### Changes - VC12 is no longer supported. - New license header, consistent with PhysX license header. - New serialization extension. NvBlastExtSerialization is now a modular serialization manager. It loads serializers sets for low-level, Tk, and ExtPx. Each serializer handles a particular file format and object type. Currently the universally available format for all object types is Cap'n Proto binary. The file format is universal, as it uses a header to inform the serialization manager which serializer is needed to deserialize the contained data. All authoring and import tools write using this format to files with a ".blast" filename extension. - Corresponding to the new serialization, the old formats have been deprecated. In particular, the DataConverter tool has been removed. Instead see LegacyConverter in the ### New Features section. - TkSerializable virtual base class has been removed. TkAsset and TkFamily are now derived directly from TkIdentifiable. Serialization functions have been removed, replaced by the new serialization extension. - ExtPxAsset serialization functions have been removed, replaced by the new serialization extension. - World bonds. A bond descriptor can now take the invalid index for one of its chunkIndices. This will cause an additional support graph node to be created within an asset being created with this descriptor. This node will not correspond to any chunk (it maps to the invalid index in the graph's chunkIndices array). Actors that contain this new "world node" may be kept static by the user, emulating world attachment. This is easily tested using the new low-level function NvBlastActorIsBoundToWorld. - With the addition of world bonds (see above), the NvBlastExtImport extension no longer creates an extra "earth chunk" to bind chunks to the world. Instead, it creates world bonds. - ExtPxAsset now contains an NvBlastActorDesc, which is used as the default actor descriptor when creating an ExtPxFamily from the asset. - TkFramework no longer has its own allocator and message handler. Instead, this is part of a new NvBlastGlobals API. This way, extensions and TkFramework may share the same allocator. - SampleAssetViewer - Physics simulation now runs concurrently with graphics and some of the sample/blast logic. - New Damage tool added: line segment damage - Damage tool radius can be set individually for each tool (radial, cutter, line segment, hierarchical). - Cubes now removed when a scene is reloaded. - Cube throw velocity can be "charged" by holding down the 'F' key. - New damage system built around "health," see API ### changes in NvBlastExtShaders and ### changes in NvBlastExtImpactDamageManager. - NvBlastExtShearGraphShader uses a chunk-based method to find the closest graph node, improving performance. - TkGroup no longer uses physx::PxTaskManager interface for task management. Instead, a TkGroupWorker interface has been added. The NvBlastExtPhysX extension uses the physx::PxTaskManager to implement this interface. - Better error handling in AuthoringTool (stderr and user error handler). - More consistent commandline switches in AuthoringTool and ApexImporter (--ll, --tk, --px flags). - Various small clean-ups. ### New Features - NvBlastExtAssetUtils extension - Merge multiple assets into one. - Add "world bonds" to an asset (see "World bonds" in the ### Changes section). - Transform an NvBlastAsset's geometric data in-place. - NvBlastExtAuthoring - Open edge detection. - Rotation of voronoi cells used for fracturing. - "Globals" code (under sdk/globals). Includes a global allocator, message handler, and profiler API used by TkFramework and extensions. - NvBlastExtStress extension, a PhysX-independent API for performing stress calculations with low-level Blast actors. - NvBlastActorIsSplitRequired() function for low-level actors. If this function returns false, NvBlastActorSplit() may be skipped as it will have no effect. - NvBlastExtShaders - New "Segment Radial Damage" shader. Damages everything within a given distance of a line segment. - New NvBlastExtExporter extension, used commonly by import and authoring tools. Allows collision data to be stored in one of three ways: - JSON format. - FBX mesh format (seprate file). - FBX mesh format in a second "collision" layer, alongside the graphics mesh nodes corresponding to Blast chunks. - LegacyConverter tool has been added, which converts .llasset, .tkasset, .bpxa, .pllasset, .ptkasset, and .pbpxa asset files to the new .blast format using the universal serialization scheme in the new NvBlastExtSerialization extension. - NvBlastExtAuthoring - Mesh cleaner, tries to remove self intersections and open edges in the interior of a mesh. - Ability to set interior material to existing (external) material, or a new material id. - Material ID remapping API. ### Bug Fixes - NvBlastExtAuthoring - Slicing normals fix. - Various instances of &array[0] to get the data buffer from a std::vector now use data() member function. This had led to some crashes with empty vectors. - SampleAssetViewer - Fixed dragging kinematic actor. - Now loads the commandline-defined asset also when sample resources were not downloaded yet. - Serialization documented. - Fixed smoothing groups in FBX exporter code. - Impulse passing from parent to child chunks fixed. - Reading unskinned fbx meshes correctly. - Collision hull generation from fbx meshes fixed. - Win32/64 PerfTest crash fix. ### Known Issues - Damage shaders in extensions can miss bonds if the damage volume is too small. - Authoring extension does not perform convex decomposition to fit chunks with multiple collision hulls. - Authoring code does not use the user-defined allocator (NvBlastGlobals) exclusively. ## [1.0.0] - 2017-02-24 ### Changes - tclap, imgui, moved to Packman package - Models and textures for the sample application have been moved to Packman - Packman packages with platform-specific sections have been split into platform-specific packages - Improvements to fracturing tools - TkJoint events no longer contain actor data - API cleanup: - NvBlastActorCreate -> NvBlastFamilyCreateFirstActor - NvBlastActorRelease -> NvBlastActorDeactivate - NvBlastActorDeserialize -> NvBlastFamilyDeserializeActor - Functions that operate on an object start with NvBlast[ObjectName] - Functions that create an object purely from a desc start with NvBlastCreate - Functions that get scratch start with NvBlast[Object]GetScratchFor[functionname], etc. - Object functions take the object as the first input parameter (non-optional output parameters always come first) - Removal of NvBlastCommon.h - More consistent parameter checking in low-level API - NvBlastAlloc and NvBlastFree functions have been removed. Blast low-level no longer does (de)allocation. All memory is passed in and managed by the user - All Blast low-level functions take a log (NvBlastLog) function pointer (which may still be NULL) - Authoring tool now handles FBX mesh format - Constructor for TkAssetDesc sets sane defaults - Sample uses skinning for the 38k tower, for perf improvement - Further optimzations to sample, including using 4 instead of 2 CPU cores and capping the actor count at 40k - Linux build (SDK and tests) - Renamed TkJointUpdateEvent::eventSubtype -> TkJointUpdateEvent::subtype - "LowLevel" extension renamed "ConvertLL" - Renamed TkEventReceiver -> TkEventListener ### New Features - Serialization enabled for XBoxOne ### Bug Fixes - Can change worker thread count in CPU dispatcher - TkJoints created from the TkFramework::createJoint function are now released when the TkFramework is released - Various fixes to unit tests - Crash fix in CPU dispatcher - Returning enough buffer space to handle hierarchical fracturing cases ### Known Issues - Serialization requires documentation ## [1.0.0-beta] - 2017-01-24 ### Changes - Material API simplified (NvBlastProgramParams) - Nv::Blast::ExtPhysics renamed Nv::Blast::ExtPx - Various small ### changes to the low-level API (function renaming, argument list ### changes, etc.) - Extensions libraries reconfigured according to major dependencies and functionality: - Authoring - Import (depends on PhysX and APEX) - PhysX (depends on PhysX) - Serialization (depends on PhysX and Cap'n Proto) - Shaders - Source folder reorganization: low-level, Tk, and extensions all under an sdk folder ### New Features - TkFamily serialization - Versioned data serialization extensions for both low-level and Tk, based on Cap'n Proto - TkJoint API, can create joints at runtime, attachments to Newtonian Reference Frame supported - CMake projects - PackMan used for dependencies - Per-bond and per-chunk health initialization - XBoxOne and Windows support for perf zones - Timers in Tk - Stress solver (automatic bond breaking) - ExtPx asset serialization, combined TkAsset + PhysX collision meshes (.bpxa files) ### Removed Features - TkComposite objects. Composites may be created using the new TkJoint API in the TkFramework ### Known Issues - Serialization requires documentation ## [1.0.0-alpha] - 2016-10-21 ### Features - Blast (low-level) library - BlastTk (high-level) library - BlastExt (extensions) library including: - AssetAuthoring - DataConverter - BlastID Utilities - ApexImporter Utilities - Materials - Physics Manager - Sync Layer - Tools: - ApexImporter - DataConverter - AuthoringTool - Samples: - SampleAssetViewer ### Known Issues - Documentation incomplete - TkFamily cannot be serialized - Data conversion utility for Tk library does not exist - Material API is still changing
NVIDIA-Omniverse/PhysX/flow/build_aarch64.sh
#! /bin/bash ./generate_projects_aarch64.sh cd _compiler/gmake2 make config=debug_aarch64 make config=release_aarch64 cd ../..
NVIDIA-Omniverse/PhysX/flow/PACKAGE-INFO.yaml
Package : nvflow Maintainers : [email protected] Description : Flow SDK SWIPAT NvBug : Repository : P4://sw/devrel/libdev/turbulence2/NvFlow2/dev/main/
NVIDIA-Omniverse/PhysX/flow/build.bat
@echo off call generate_projects.bat cd _compiler\vs2017 if defined NVFLOW_MSBUILD2017 ( echo Using provided NVFLOW_MSBUILD2017 ) else ( echo NVFLOW_MSBUILD2017 not set, attempting default VS2017 install path set NVFLOW_MSBUILD2017="%ProgramFiles% (x86)\Microsoft Visual Studio\2017\Professional\MSBuild\15.0\Bin\amd64\MSBuild.exe" ) call %NVFLOW_MSBUILD2017% nvflow.sln /p:Configuration=debug call %NVFLOW_MSBUILD2017% nvflow.sln /p:Configuration=release cd ..\..
NVIDIA-Omniverse/PhysX/flow/build.sh
#! /bin/bash ./generate_projects.sh cd _compiler/gmake2 make config=debug_x86_64 make config=release_x86_64 cd ../..
NVIDIA-Omniverse/PhysX/flow/generate_projects_aarch64.sh
#!/bin/bash ./external/premake/linux64/premake5_aarch64 gmake2
NVIDIA-Omniverse/PhysX/flow/generate_projects.sh
#!/bin/bash ./external/premake/linux64/premake5 gmake2
NVIDIA-Omniverse/PhysX/flow/premake5.lua
workspace "nvflow" configurations { "debug", "release" } filter { "system:linux" } platforms {"x86_64", "aarch64"} filter { "system:windows" } platforms {"x86_64"} filter {} startproject "nvfloweditor" local platform = "%{cfg.system}-%{cfg.platform}" local workspaceDir = "_compiler/" .. _ACTION local targetDir = "_build/" .. platform .. "/%{cfg.buildcfg}" local generatedDir = "_generated/" .. platform .. "/%{cfg.buildcfg}" location(workspaceDir) targetdir(targetDir) objdir("_build/intermediate/" ..platform.. "/%{prj.name}") staticruntime "On" cppdialect "C++11" exceptionhandling "Off" rtti "Off" flags { "FatalCompileWarnings", "NoPCH", "NoIncrementalLink" } floatingpoint "Fast" includedirs { "shared", "include/nvflow", "include/nvflow/shaders", generatedDir } filter { "platforms:x86_64" } architecture "x86_64" filter { "system:linux", "platforms:aarch64" } architecture "aarch64" filter { "configurations:debug" } optimize "Off" symbols "On" defines { "_DEBUG" } filter { "configurations:release" } optimize "On" symbols "On" defines { "NDEBUG" } filter { "system:windows" } systemversion "10.0.17763.0" filter { "system:linux", "platforms:x86_64"} buildoptions { "-msse4" } filter { "system:linux" } linkoptions { "-Wl,-rpath,'$$ORIGIN',--no-as-needed", "-ldl", "-lpthread" } filter { } function copy_to_targetdir(filePath) local relativeTargetDir = "../../../" .. targetDir local relativeFilePath = "../../../" .. filePath postbuildcommands { "{COPY} \"" .. relativeFilePath .. "\" \"" .. relativeTargetDir .. "\"" } end function prebuildShaderTool(shaderProjectFile) local relativeTargetDir = "../../../" .. targetDir local generatedPath = "../../../" .. generatedDir local shaderProjectPath = "../../../" .. shaderProjectFile filter { "system:windows" } prebuildcommands { "\"" .. relativeTargetDir .. "/nvflowshadertool.exe\" \"" .. generatedPath .. "\" \"" .. shaderProjectPath .. "\"" } filter { "system:linux" } prebuildcommands { "\"" .. relativeTargetDir .. "/nvflowshadertool\" \"" .. generatedPath .. "\" \"" .. shaderProjectPath .. "\"" } filter{} end function postbuildShaderTool() local generatedPath = "../../../" .. generatedDir .. "/%{prj.name}" postbuildcommands { "{MOVE} \"" .. generatedPath .. "/generated.cpp\" \"" .. generatedPath .. "/generatedOld.cpp\"" } end function addSourceDir(path) files { "include/%{prj.name}/*.h", "include/%{prj.name}/shaders/*.h", "include/%{prj.name}/shaders/*.hlsli", "shared/*.cpp", "shared/*.h", path .. "/*.cpp", path .. "/*.c", path .. "/*.h", path .. "/shaders/*.hlsl", path .. "/shaders/*.hlsli", path .. "/shaders/*.h", generatedDir .. "/%{prj.name}/generated.cpp", generatedDir .. "/%{prj.name}/generated_cpu.cpp", generatedDir .. "/%{prj.name}/*.h", } filter { "files:**.hlsl" } flags {"ExcludeFromBuild"} filter{} end project "nvfloweditor" kind "ConsoleApp" --dependson { "nvflowext", "nvflow", "nvflowshadertool" } prebuildShaderTool("source/%{prj.name}/NvFlowEditor.nfproj") postbuildShaderTool() location(workspaceDir .. "/%{prj.name}") language "C++" includedirs { generatedDir .. "/%{prj.name}", "include/nvflowext", "external/glfw/include", "external/imgui" } addSourceDir("source/%{prj.name}") addSourceDir("external/imgui") filter { "system:windows" } copy_to_targetdir("external/glfw/win64/glfw3.dll") filter { "system:linux" } copy_to_targetdir("external/glfw/linux/libglfw.so") copy_to_targetdir("external/glfw/linux/libglfw.so.3") copy_to_targetdir("external/glfw/linux/libglfw.so.3.3") copy_to_targetdir("external/glfw/linux/libglfw_aarch64.so.3.3") filter { }
NVIDIA-Omniverse/PhysX/flow/generate_projects.bat
external\premake\win64\premake5.exe vs2017
NVIDIA-Omniverse/PhysX/flow/external/glfw/COPYING.txt
Copyright (c) 2002-2006 Marcus Geelnard Copyright (c) 2006-2016 Camilla Berglund <[email protected]> This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution.
NVIDIA-Omniverse/PhysX/flow/external/glfw/include/GLFW/glfw3native.h
/************************************************************************* * GLFW 3.2 - www.glfw.org * A library for OpenGL, window and input *------------------------------------------------------------------------ * Copyright (c) 2002-2006 Marcus Geelnard * Copyright (c) 2006-2016 Camilla Berglund <[email protected]> * * This software is provided 'as-is', without any express or implied * warranty. In no event will the authors be held liable for any damages * arising from the use of this software. * * Permission is granted to anyone to use this software for any purpose, * including commercial applications, and to alter it and redistribute it * freely, subject to the following restrictions: * * 1. The origin of this software must not be misrepresented; you must not * claim that you wrote the original software. If you use this software * in a product, an acknowledgment in the product documentation would * be appreciated but is not required. * * 2. Altered source versions must be plainly marked as such, and must not * be misrepresented as being the original software. * * 3. This notice may not be removed or altered from any source * distribution. * *************************************************************************/ #ifndef _glfw3_native_h_ #define _glfw3_native_h_ #ifdef __cplusplus extern "C" { #endif /************************************************************************* * Doxygen documentation *************************************************************************/ /*! @file glfw3native.h * @brief The header of the native access functions. * * This is the header file of the native access functions. See @ref native for * more information. */ /*! @defgroup native Native access * * **By using the native access functions you assert that you know what you're * doing and how to fix problems caused by using them. If you don't, you * shouldn't be using them.** * * Before the inclusion of @ref glfw3native.h, you may define exactly one * window system API macro and zero or more context creation API macros. * * The chosen backends must match those the library was compiled for. Failure * to do this will cause a link-time error. * * The available window API macros are: * * `GLFW_EXPOSE_NATIVE_WIN32` * * `GLFW_EXPOSE_NATIVE_COCOA` * * `GLFW_EXPOSE_NATIVE_X11` * * `GLFW_EXPOSE_NATIVE_WAYLAND` * * `GLFW_EXPOSE_NATIVE_MIR` * * The available context API macros are: * * `GLFW_EXPOSE_NATIVE_WGL` * * `GLFW_EXPOSE_NATIVE_NSGL` * * `GLFW_EXPOSE_NATIVE_GLX` * * `GLFW_EXPOSE_NATIVE_EGL` * * These macros select which of the native access functions that are declared * and which platform-specific headers to include. It is then up your (by * definition platform-specific) code to handle which of these should be * defined. */ /************************************************************************* * System headers and types *************************************************************************/ #if defined(GLFW_EXPOSE_NATIVE_WIN32) // This is a workaround for the fact that glfw3.h needs to export APIENTRY (for // example to allow applications to correctly declare a GL_ARB_debug_output // callback) but windows.h assumes no one will define APIENTRY before it does #undef APIENTRY #include <windows.h> #elif defined(GLFW_EXPOSE_NATIVE_COCOA) #include <ApplicationServices/ApplicationServices.h> #if defined(__OBJC__) #import <Cocoa/Cocoa.h> #else typedef void* id; #endif #elif defined(GLFW_EXPOSE_NATIVE_X11) #include <X11/Xlib.h> #include <X11/extensions/Xrandr.h> #elif defined(GLFW_EXPOSE_NATIVE_WAYLAND) #include <wayland-client.h> #elif defined(GLFW_EXPOSE_NATIVE_MIR) #include <mir_toolkit/mir_client_library.h> #endif #if defined(GLFW_EXPOSE_NATIVE_WGL) /* WGL is declared by windows.h */ #endif #if defined(GLFW_EXPOSE_NATIVE_NSGL) /* NSGL is declared by Cocoa.h */ #endif #if defined(GLFW_EXPOSE_NATIVE_GLX) #include <GL/glx.h> #endif #if defined(GLFW_EXPOSE_NATIVE_EGL) #include <EGL/egl.h> #endif /************************************************************************* * Functions *************************************************************************/ #if defined(GLFW_EXPOSE_NATIVE_WIN32) /*! @brief Returns the adapter device name of the specified monitor. * * @return The UTF-8 encoded adapter device name (for example `\\.\DISPLAY1`) * of the specified monitor, or `NULL` if an [error](@ref error_handling) * occurred. * * @thread_safety This function may be called from any thread. Access is not * synchronized. * * @since Added in version 3.1. * * @ingroup native */ GLFWAPI const char* glfwGetWin32Adapter(GLFWmonitor* monitor); /*! @brief Returns the display device name of the specified monitor. * * @return The UTF-8 encoded display device name (for example * `\\.\DISPLAY1\Monitor0`) of the specified monitor, or `NULL` if an * [error](@ref error_handling) occurred. * * @thread_safety This function may be called from any thread. Access is not * synchronized. * * @since Added in version 3.1. * * @ingroup native */ GLFWAPI const char* glfwGetWin32Monitor(GLFWmonitor* monitor); /*! @brief Returns the `HWND` of the specified window. * * @return The `HWND` of the specified window, or `NULL` if an * [error](@ref error_handling) occurred. * * @thread_safety This function may be called from any thread. Access is not * synchronized. * * @since Added in version 3.0. * * @ingroup native */ GLFWAPI HWND glfwGetWin32Window(GLFWwindow* window); #endif #if defined(GLFW_EXPOSE_NATIVE_WGL) /*! @brief Returns the `HGLRC` of the specified window. * * @return The `HGLRC` of the specified window, or `NULL` if an * [error](@ref error_handling) occurred. * * @thread_safety This function may be called from any thread. Access is not * synchronized. * * @since Added in version 3.0. * * @ingroup native */ GLFWAPI HGLRC glfwGetWGLContext(GLFWwindow* window); #endif #if defined(GLFW_EXPOSE_NATIVE_COCOA) /*! @brief Returns the `CGDirectDisplayID` of the specified monitor. * * @return The `CGDirectDisplayID` of the specified monitor, or * `kCGNullDirectDisplay` if an [error](@ref error_handling) occurred. * * @thread_safety This function may be called from any thread. Access is not * synchronized. * * @since Added in version 3.1. * * @ingroup native */ GLFWAPI CGDirectDisplayID glfwGetCocoaMonitor(GLFWmonitor* monitor); /*! @brief Returns the `NSWindow` of the specified window. * * @return The `NSWindow` of the specified window, or `nil` if an * [error](@ref error_handling) occurred. * * @thread_safety This function may be called from any thread. Access is not * synchronized. * * @since Added in version 3.0. * * @ingroup native */ GLFWAPI id glfwGetCocoaWindow(GLFWwindow* window); #endif #if defined(GLFW_EXPOSE_NATIVE_NSGL) /*! @brief Returns the `NSOpenGLContext` of the specified window. * * @return The `NSOpenGLContext` of the specified window, or `nil` if an * [error](@ref error_handling) occurred. * * @thread_safety This function may be called from any thread. Access is not * synchronized. * * @since Added in version 3.0. * * @ingroup native */ GLFWAPI id glfwGetNSGLContext(GLFWwindow* window); #endif #if defined(GLFW_EXPOSE_NATIVE_X11) /*! @brief Returns the `Display` used by GLFW. * * @return The `Display` used by GLFW, or `NULL` if an * [error](@ref error_handling) occurred. * * @thread_safety This function may be called from any thread. Access is not * synchronized. * * @since Added in version 3.0. * * @ingroup native */ GLFWAPI Display* glfwGetX11Display(void); /*! @brief Returns the `RRCrtc` of the specified monitor. * * @return The `RRCrtc` of the specified monitor, or `None` if an * [error](@ref error_handling) occurred. * * @thread_safety This function may be called from any thread. Access is not * synchronized. * * @since Added in version 3.1. * * @ingroup native */ GLFWAPI RRCrtc glfwGetX11Adapter(GLFWmonitor* monitor); /*! @brief Returns the `RROutput` of the specified monitor. * * @return The `RROutput` of the specified monitor, or `None` if an * [error](@ref error_handling) occurred. * * @thread_safety This function may be called from any thread. Access is not * synchronized. * * @since Added in version 3.1. * * @ingroup native */ GLFWAPI RROutput glfwGetX11Monitor(GLFWmonitor* monitor); /*! @brief Returns the `Window` of the specified window. * * @return The `Window` of the specified window, or `None` if an * [error](@ref error_handling) occurred. * * @thread_safety This function may be called from any thread. Access is not * synchronized. * * @since Added in version 3.0. * * @ingroup native */ GLFWAPI Window glfwGetX11Window(GLFWwindow* window); #endif #if defined(GLFW_EXPOSE_NATIVE_GLX) /*! @brief Returns the `GLXContext` of the specified window. * * @return The `GLXContext` of the specified window, or `NULL` if an * [error](@ref error_handling) occurred. * * @thread_safety This function may be called from any thread. Access is not * synchronized. * * @since Added in version 3.0. * * @ingroup native */ GLFWAPI GLXContext glfwGetGLXContext(GLFWwindow* window); /*! @brief Returns the `GLXWindow` of the specified window. * * @return The `GLXWindow` of the specified window, or `None` if an * [error](@ref error_handling) occurred. * * @thread_safety This function may be called from any thread. Access is not * synchronized. * * @since Added in version 3.2. * * @ingroup native */ GLFWAPI GLXWindow glfwGetGLXWindow(GLFWwindow* window); #endif #if defined(GLFW_EXPOSE_NATIVE_WAYLAND) /*! @brief Returns the `struct wl_display*` used by GLFW. * * @return The `struct wl_display*` used by GLFW, or `NULL` if an * [error](@ref error_handling) occurred. * * @thread_safety This function may be called from any thread. Access is not * synchronized. * * @since Added in version 3.2. * * @ingroup native */ GLFWAPI struct wl_display* glfwGetWaylandDisplay(void); /*! @brief Returns the `struct wl_output*` of the specified monitor. * * @return The `struct wl_output*` of the specified monitor, or `NULL` if an * [error](@ref error_handling) occurred. * * @thread_safety This function may be called from any thread. Access is not * synchronized. * * @since Added in version 3.2. * * @ingroup native */ GLFWAPI struct wl_output* glfwGetWaylandMonitor(GLFWmonitor* monitor); /*! @brief Returns the main `struct wl_surface*` of the specified window. * * @return The main `struct wl_surface*` of the specified window, or `NULL` if * an [error](@ref error_handling) occurred. * * @thread_safety This function may be called from any thread. Access is not * synchronized. * * @since Added in version 3.2. * * @ingroup native */ GLFWAPI struct wl_surface* glfwGetWaylandWindow(GLFWwindow* window); #endif #if defined(GLFW_EXPOSE_NATIVE_MIR) /*! @brief Returns the `MirConnection*` used by GLFW. * * @return The `MirConnection*` used by GLFW, or `NULL` if an * [error](@ref error_handling) occurred. * * @thread_safety This function may be called from any thread. Access is not * synchronized. * * @since Added in version 3.2. * * @ingroup native */ GLFWAPI MirConnection* glfwGetMirDisplay(void); /*! @brief Returns the Mir output ID of the specified monitor. * * @return The Mir output ID of the specified monitor, or zero if an * [error](@ref error_handling) occurred. * * @thread_safety This function may be called from any thread. Access is not * synchronized. * * @since Added in version 3.2. * * @ingroup native */ GLFWAPI int glfwGetMirMonitor(GLFWmonitor* monitor); /*! @brief Returns the `MirSurface*` of the specified window. * * @return The `MirSurface*` of the specified window, or `NULL` if an * [error](@ref error_handling) occurred. * * @thread_safety This function may be called from any thread. Access is not * synchronized. * * @since Added in version 3.2. * * @ingroup native */ GLFWAPI MirSurface* glfwGetMirWindow(GLFWwindow* window); #endif #if defined(GLFW_EXPOSE_NATIVE_EGL) /*! @brief Returns the `EGLDisplay` used by GLFW. * * @return The `EGLDisplay` used by GLFW, or `EGL_NO_DISPLAY` if an * [error](@ref error_handling) occurred. * * @thread_safety This function may be called from any thread. Access is not * synchronized. * * @since Added in version 3.0. * * @ingroup native */ GLFWAPI EGLDisplay glfwGetEGLDisplay(void); /*! @brief Returns the `EGLContext` of the specified window. * * @return The `EGLContext` of the specified window, or `EGL_NO_CONTEXT` if an * [error](@ref error_handling) occurred. * * @thread_safety This function may be called from any thread. Access is not * synchronized. * * @since Added in version 3.0. * * @ingroup native */ GLFWAPI EGLContext glfwGetEGLContext(GLFWwindow* window); /*! @brief Returns the `EGLSurface` of the specified window. * * @return The `EGLSurface` of the specified window, or `EGL_NO_SURFACE` if an * [error](@ref error_handling) occurred. * * @thread_safety This function may be called from any thread. Access is not * synchronized. * * @since Added in version 3.0. * * @ingroup native */ GLFWAPI EGLSurface glfwGetEGLSurface(GLFWwindow* window); #endif #ifdef __cplusplus } #endif #endif /* _glfw3_native_h_ */
NVIDIA-Omniverse/PhysX/flow/external/glfw/include/GLFW/glfw3.h
/************************************************************************* * GLFW 3.2 - www.glfw.org * A library for OpenGL, window and input *------------------------------------------------------------------------ * Copyright (c) 2002-2006 Marcus Geelnard * Copyright (c) 2006-2016 Camilla Berglund <[email protected]> * * This software is provided 'as-is', without any express or implied * warranty. In no event will the authors be held liable for any damages * arising from the use of this software. * * Permission is granted to anyone to use this software for any purpose, * including commercial applications, and to alter it and redistribute it * freely, subject to the following restrictions: * * 1. The origin of this software must not be misrepresented; you must not * claim that you wrote the original software. If you use this software * in a product, an acknowledgment in the product documentation would * be appreciated but is not required. * * 2. Altered source versions must be plainly marked as such, and must not * be misrepresented as being the original software. * * 3. This notice may not be removed or altered from any source * distribution. * *************************************************************************/ #ifndef _glfw3_h_ #define _glfw3_h_ #ifdef __cplusplus extern "C" { #endif /************************************************************************* * Doxygen documentation *************************************************************************/ /*! @file glfw3.h * @brief The header of the GLFW 3 API. * * This is the header file of the GLFW 3 API. It defines all its types and * declares all its functions. * * For more information about how to use this file, see @ref build_include. */ /*! @defgroup context Context reference * * This is the reference documentation for OpenGL and OpenGL ES context related * functions. For more task-oriented information, see the @ref context_guide. */ /*! @defgroup vulkan Vulkan reference * * This is the reference documentation for Vulkan related functions and types. * For more task-oriented information, see the @ref vulkan_guide. */ /*! @defgroup init Initialization, version and error reference * * This is the reference documentation for initialization and termination of * the library, version management and error handling. For more task-oriented * information, see the @ref intro_guide. */ /*! @defgroup input Input reference * * This is the reference documentation for input related functions and types. * For more task-oriented information, see the @ref input_guide. */ /*! @defgroup monitor Monitor reference * * This is the reference documentation for monitor related functions and types. * For more task-oriented information, see the @ref monitor_guide. */ /*! @defgroup window Window reference * * This is the reference documentation for window related functions and types, * including creation, deletion and event polling. For more task-oriented * information, see the @ref window_guide. */ /************************************************************************* * Compiler- and platform-specific preprocessor work *************************************************************************/ /* If we are we on Windows, we want a single define for it. */ #if !defined(_WIN32) && (defined(__WIN32__) || defined(WIN32) || defined(__MINGW32__)) #define _WIN32 #endif /* _WIN32 */ /* It is customary to use APIENTRY for OpenGL function pointer declarations on * all platforms. Additionally, the Windows OpenGL header needs APIENTRY. */ #ifndef APIENTRY #ifdef _WIN32 #define APIENTRY __stdcall #else #define APIENTRY #endif #endif /* APIENTRY */ /* Some Windows OpenGL headers need this. */ #if !defined(WINGDIAPI) && defined(_WIN32) #define WINGDIAPI __declspec(dllimport) #define GLFW_WINGDIAPI_DEFINED #endif /* WINGDIAPI */ /* Some Windows GLU headers need this. */ #if !defined(CALLBACK) && defined(_WIN32) #define CALLBACK __stdcall #define GLFW_CALLBACK_DEFINED #endif /* CALLBACK */ /* Include because most Windows GLU headers need wchar_t and * the OS X OpenGL header blocks the definition of ptrdiff_t by glext.h. * Include it unconditionally to avoid surprising side-effects. */ #include <stddef.h> /* Include because it is needed by Vulkan and related functions. */ #include <stdint.h> /* Include the chosen client API headers. */ #if defined(__APPLE__) #if defined(GLFW_INCLUDE_GLCOREARB) #include <OpenGL/gl3.h> #if defined(GLFW_INCLUDE_GLEXT) #include <OpenGL/gl3ext.h> #endif #elif !defined(GLFW_INCLUDE_NONE) #if !defined(GLFW_INCLUDE_GLEXT) #define GL_GLEXT_LEGACY #endif #include <OpenGL/gl.h> #endif #if defined(GLFW_INCLUDE_GLU) #include <OpenGL/glu.h> #endif #else #if defined(GLFW_INCLUDE_GLCOREARB) #include <GL/glcorearb.h> #elif defined(GLFW_INCLUDE_ES1) #include <GLES/gl.h> #if defined(GLFW_INCLUDE_GLEXT) #include <GLES/glext.h> #endif #elif defined(GLFW_INCLUDE_ES2) #include <GLES2/gl2.h> #if defined(GLFW_INCLUDE_GLEXT) #include <GLES2/gl2ext.h> #endif #elif defined(GLFW_INCLUDE_ES3) #include <GLES3/gl3.h> #if defined(GLFW_INCLUDE_GLEXT) #include <GLES2/gl2ext.h> #endif #elif defined(GLFW_INCLUDE_ES31) #include <GLES3/gl31.h> #if defined(GLFW_INCLUDE_GLEXT) #include <GLES2/gl2ext.h> #endif #elif defined(GLFW_INCLUDE_VULKAN) #include <vulkan/vulkan.h> #elif !defined(GLFW_INCLUDE_NONE) #include <GL/gl.h> #if defined(GLFW_INCLUDE_GLEXT) #include <GL/glext.h> #endif #endif #if defined(GLFW_INCLUDE_GLU) #include <GL/glu.h> #endif #endif #if defined(GLFW_DLL) && defined(_GLFW_BUILD_DLL) /* GLFW_DLL must be defined by applications that are linking against the DLL * version of the GLFW library. _GLFW_BUILD_DLL is defined by the GLFW * configuration header when compiling the DLL version of the library. */ #error "You must not have both GLFW_DLL and _GLFW_BUILD_DLL defined" #endif /* GLFWAPI is used to declare public API functions for export * from the DLL / shared library / dynamic library. */ #if defined(_WIN32) && defined(_GLFW_BUILD_DLL) /* We are building GLFW as a Win32 DLL */ #define GLFWAPI __declspec(dllexport) #elif defined(_WIN32) && defined(GLFW_DLL) /* We are calling GLFW as a Win32 DLL */ #define GLFWAPI __declspec(dllimport) #elif defined(__GNUC__) && defined(_GLFW_BUILD_DLL) /* We are building GLFW as a shared / dynamic library */ #define GLFWAPI __attribute__((visibility("default"))) #else /* We are building or calling GLFW as a static library */ #define GLFWAPI #endif /************************************************************************* * GLFW API tokens *************************************************************************/ /*! @name GLFW version macros * @{ */ /*! @brief The major version number of the GLFW library. * * This is incremented when the API is changed in non-compatible ways. * @ingroup init */ #define GLFW_VERSION_MAJOR 3 /*! @brief The minor version number of the GLFW library. * * This is incremented when features are added to the API but it remains * backward-compatible. * @ingroup init */ #define GLFW_VERSION_MINOR 2 /*! @brief The revision number of the GLFW library. * * This is incremented when a bug fix release is made that does not contain any * API changes. * @ingroup init */ #define GLFW_VERSION_REVISION 1 /*! @} */ /*! @name Boolean values * @{ */ /*! @brief One. * * One. Seriously. You don't _need_ to use this symbol in your code. It's * just semantic sugar for the number 1. You can use `1` or `true` or `_True` * or `GL_TRUE` or whatever you want. */ #define GLFW_TRUE 1 /*! @brief Zero. * * Zero. Seriously. You don't _need_ to use this symbol in your code. It's * just just semantic sugar for the number 0. You can use `0` or `false` or * `_False` or `GL_FALSE` or whatever you want. */ #define GLFW_FALSE 0 /*! @} */ /*! @name Key and button actions * @{ */ /*! @brief The key or mouse button was released. * * The key or mouse button was released. * * @ingroup input */ #define GLFW_RELEASE 0 /*! @brief The key or mouse button was pressed. * * The key or mouse button was pressed. * * @ingroup input */ #define GLFW_PRESS 1 /*! @brief The key was held down until it repeated. * * The key was held down until it repeated. * * @ingroup input */ #define GLFW_REPEAT 2 /*! @} */ /*! @defgroup keys Keyboard keys * * See [key input](@ref input_key) for how these are used. * * These key codes are inspired by the _USB HID Usage Tables v1.12_ (p. 53-60), * but re-arranged to map to 7-bit ASCII for printable keys (function keys are * put in the 256+ range). * * The naming of the key codes follow these rules: * - The US keyboard layout is used * - Names of printable alpha-numeric characters are used (e.g. "A", "R", * "3", etc.) * - For non-alphanumeric characters, Unicode:ish names are used (e.g. * "COMMA", "LEFT_SQUARE_BRACKET", etc.). Note that some names do not * correspond to the Unicode standard (usually for brevity) * - Keys that lack a clear US mapping are named "WORLD_x" * - For non-printable keys, custom names are used (e.g. "F4", * "BACKSPACE", etc.) * * @ingroup input * @{ */ /* The unknown key */ #define GLFW_KEY_UNKNOWN -1 /* Printable keys */ #define GLFW_KEY_SPACE 32 #define GLFW_KEY_APOSTROPHE 39 /* ' */ #define GLFW_KEY_COMMA 44 /* , */ #define GLFW_KEY_MINUS 45 /* - */ #define GLFW_KEY_PERIOD 46 /* . */ #define GLFW_KEY_SLASH 47 /* / */ #define GLFW_KEY_0 48 #define GLFW_KEY_1 49 #define GLFW_KEY_2 50 #define GLFW_KEY_3 51 #define GLFW_KEY_4 52 #define GLFW_KEY_5 53 #define GLFW_KEY_6 54 #define GLFW_KEY_7 55 #define GLFW_KEY_8 56 #define GLFW_KEY_9 57 #define GLFW_KEY_SEMICOLON 59 /* ; */ #define GLFW_KEY_EQUAL 61 /* = */ #define GLFW_KEY_A 65 #define GLFW_KEY_B 66 #define GLFW_KEY_C 67 #define GLFW_KEY_D 68 #define GLFW_KEY_E 69 #define GLFW_KEY_F 70 #define GLFW_KEY_G 71 #define GLFW_KEY_H 72 #define GLFW_KEY_I 73 #define GLFW_KEY_J 74 #define GLFW_KEY_K 75 #define GLFW_KEY_L 76 #define GLFW_KEY_M 77 #define GLFW_KEY_N 78 #define GLFW_KEY_O 79 #define GLFW_KEY_P 80 #define GLFW_KEY_Q 81 #define GLFW_KEY_R 82 #define GLFW_KEY_S 83 #define GLFW_KEY_T 84 #define GLFW_KEY_U 85 #define GLFW_KEY_V 86 #define GLFW_KEY_W 87 #define GLFW_KEY_X 88 #define GLFW_KEY_Y 89 #define GLFW_KEY_Z 90 #define GLFW_KEY_LEFT_BRACKET 91 /* [ */ #define GLFW_KEY_BACKSLASH 92 /* \ */ #define GLFW_KEY_RIGHT_BRACKET 93 /* ] */ #define GLFW_KEY_GRAVE_ACCENT 96 /* ` */ #define GLFW_KEY_WORLD_1 161 /* non-US #1 */ #define GLFW_KEY_WORLD_2 162 /* non-US #2 */ /* Function keys */ #define GLFW_KEY_ESCAPE 256 #define GLFW_KEY_ENTER 257 #define GLFW_KEY_TAB 258 #define GLFW_KEY_BACKSPACE 259 #define GLFW_KEY_INSERT 260 #define GLFW_KEY_DELETE 261 #define GLFW_KEY_RIGHT 262 #define GLFW_KEY_LEFT 263 #define GLFW_KEY_DOWN 264 #define GLFW_KEY_UP 265 #define GLFW_KEY_PAGE_UP 266 #define GLFW_KEY_PAGE_DOWN 267 #define GLFW_KEY_HOME 268 #define GLFW_KEY_END 269 #define GLFW_KEY_CAPS_LOCK 280 #define GLFW_KEY_SCROLL_LOCK 281 #define GLFW_KEY_NUM_LOCK 282 #define GLFW_KEY_PRINT_SCREEN 283 #define GLFW_KEY_PAUSE 284 #define GLFW_KEY_F1 290 #define GLFW_KEY_F2 291 #define GLFW_KEY_F3 292 #define GLFW_KEY_F4 293 #define GLFW_KEY_F5 294 #define GLFW_KEY_F6 295 #define GLFW_KEY_F7 296 #define GLFW_KEY_F8 297 #define GLFW_KEY_F9 298 #define GLFW_KEY_F10 299 #define GLFW_KEY_F11 300 #define GLFW_KEY_F12 301 #define GLFW_KEY_F13 302 #define GLFW_KEY_F14 303 #define GLFW_KEY_F15 304 #define GLFW_KEY_F16 305 #define GLFW_KEY_F17 306 #define GLFW_KEY_F18 307 #define GLFW_KEY_F19 308 #define GLFW_KEY_F20 309 #define GLFW_KEY_F21 310 #define GLFW_KEY_F22 311 #define GLFW_KEY_F23 312 #define GLFW_KEY_F24 313 #define GLFW_KEY_F25 314 #define GLFW_KEY_KP_0 320 #define GLFW_KEY_KP_1 321 #define GLFW_KEY_KP_2 322 #define GLFW_KEY_KP_3 323 #define GLFW_KEY_KP_4 324 #define GLFW_KEY_KP_5 325 #define GLFW_KEY_KP_6 326 #define GLFW_KEY_KP_7 327 #define GLFW_KEY_KP_8 328 #define GLFW_KEY_KP_9 329 #define GLFW_KEY_KP_DECIMAL 330 #define GLFW_KEY_KP_DIVIDE 331 #define GLFW_KEY_KP_MULTIPLY 332 #define GLFW_KEY_KP_SUBTRACT 333 #define GLFW_KEY_KP_ADD 334 #define GLFW_KEY_KP_ENTER 335 #define GLFW_KEY_KP_EQUAL 336 #define GLFW_KEY_LEFT_SHIFT 340 #define GLFW_KEY_LEFT_CONTROL 341 #define GLFW_KEY_LEFT_ALT 342 #define GLFW_KEY_LEFT_SUPER 343 #define GLFW_KEY_RIGHT_SHIFT 344 #define GLFW_KEY_RIGHT_CONTROL 345 #define GLFW_KEY_RIGHT_ALT 346 #define GLFW_KEY_RIGHT_SUPER 347 #define GLFW_KEY_MENU 348 #define GLFW_KEY_LAST GLFW_KEY_MENU /*! @} */ /*! @defgroup mods Modifier key flags * * See [key input](@ref input_key) for how these are used. * * @ingroup input * @{ */ /*! @brief If this bit is set one or more Shift keys were held down. */ #define GLFW_MOD_SHIFT 0x0001 /*! @brief If this bit is set one or more Control keys were held down. */ #define GLFW_MOD_CONTROL 0x0002 /*! @brief If this bit is set one or more Alt keys were held down. */ #define GLFW_MOD_ALT 0x0004 /*! @brief If this bit is set one or more Super keys were held down. */ #define GLFW_MOD_SUPER 0x0008 /*! @} */ /*! @defgroup buttons Mouse buttons * * See [mouse button input](@ref input_mouse_button) for how these are used. * * @ingroup input * @{ */ #define GLFW_MOUSE_BUTTON_1 0 #define GLFW_MOUSE_BUTTON_2 1 #define GLFW_MOUSE_BUTTON_3 2 #define GLFW_MOUSE_BUTTON_4 3 #define GLFW_MOUSE_BUTTON_5 4 #define GLFW_MOUSE_BUTTON_6 5 #define GLFW_MOUSE_BUTTON_7 6 #define GLFW_MOUSE_BUTTON_8 7 #define GLFW_MOUSE_BUTTON_LAST GLFW_MOUSE_BUTTON_8 #define GLFW_MOUSE_BUTTON_LEFT GLFW_MOUSE_BUTTON_1 #define GLFW_MOUSE_BUTTON_RIGHT GLFW_MOUSE_BUTTON_2 #define GLFW_MOUSE_BUTTON_MIDDLE GLFW_MOUSE_BUTTON_3 /*! @} */ /*! @defgroup joysticks Joysticks * * See [joystick input](@ref joystick) for how these are used. * * @ingroup input * @{ */ #define GLFW_JOYSTICK_1 0 #define GLFW_JOYSTICK_2 1 #define GLFW_JOYSTICK_3 2 #define GLFW_JOYSTICK_4 3 #define GLFW_JOYSTICK_5 4 #define GLFW_JOYSTICK_6 5 #define GLFW_JOYSTICK_7 6 #define GLFW_JOYSTICK_8 7 #define GLFW_JOYSTICK_9 8 #define GLFW_JOYSTICK_10 9 #define GLFW_JOYSTICK_11 10 #define GLFW_JOYSTICK_12 11 #define GLFW_JOYSTICK_13 12 #define GLFW_JOYSTICK_14 13 #define GLFW_JOYSTICK_15 14 #define GLFW_JOYSTICK_16 15 #define GLFW_JOYSTICK_LAST GLFW_JOYSTICK_16 /*! @} */ /*! @defgroup errors Error codes * * See [error handling](@ref error_handling) for how these are used. * * @ingroup init * @{ */ /*! @brief GLFW has not been initialized. * * This occurs if a GLFW function was called that must not be called unless the * library is [initialized](@ref intro_init). * * @analysis Application programmer error. Initialize GLFW before calling any * function that requires initialization. */ #define GLFW_NOT_INITIALIZED 0x00010001 /*! @brief No context is current for this thread. * * This occurs if a GLFW function was called that needs and operates on the * current OpenGL or OpenGL ES context but no context is current on the calling * thread. One such function is @ref glfwSwapInterval. * * @analysis Application programmer error. Ensure a context is current before * calling functions that require a current context. */ #define GLFW_NO_CURRENT_CONTEXT 0x00010002 /*! @brief One of the arguments to the function was an invalid enum value. * * One of the arguments to the function was an invalid enum value, for example * requesting [GLFW_RED_BITS](@ref window_hints_fb) with @ref * glfwGetWindowAttrib. * * @analysis Application programmer error. Fix the offending call. */ #define GLFW_INVALID_ENUM 0x00010003 /*! @brief One of the arguments to the function was an invalid value. * * One of the arguments to the function was an invalid value, for example * requesting a non-existent OpenGL or OpenGL ES version like 2.7. * * Requesting a valid but unavailable OpenGL or OpenGL ES version will instead * result in a @ref GLFW_VERSION_UNAVAILABLE error. * * @analysis Application programmer error. Fix the offending call. */ #define GLFW_INVALID_VALUE 0x00010004 /*! @brief A memory allocation failed. * * A memory allocation failed. * * @analysis A bug in GLFW or the underlying operating system. Report the bug * to our [issue tracker](https://github.com/glfw/glfw/issues). */ #define GLFW_OUT_OF_MEMORY 0x00010005 /*! @brief GLFW could not find support for the requested API on the system. * * GLFW could not find support for the requested API on the system. * * @analysis The installed graphics driver does not support the requested * API, or does not support it via the chosen context creation backend. * Below are a few examples. * * @par * Some pre-installed Windows graphics drivers do not support OpenGL. AMD only * supports OpenGL ES via EGL, while Nvidia and Intel only support it via * a WGL or GLX extension. OS X does not provide OpenGL ES at all. The Mesa * EGL, OpenGL and OpenGL ES libraries do not interface with the Nvidia binary * driver. Older graphics drivers do not support Vulkan. */ #define GLFW_API_UNAVAILABLE 0x00010006 /*! @brief The requested OpenGL or OpenGL ES version is not available. * * The requested OpenGL or OpenGL ES version (including any requested context * or framebuffer hints) is not available on this machine. * * @analysis The machine does not support your requirements. If your * application is sufficiently flexible, downgrade your requirements and try * again. Otherwise, inform the user that their machine does not match your * requirements. * * @par * Future invalid OpenGL and OpenGL ES versions, for example OpenGL 4.8 if 5.0 * comes out before the 4.x series gets that far, also fail with this error and * not @ref GLFW_INVALID_VALUE, because GLFW cannot know what future versions * will exist. */ #define GLFW_VERSION_UNAVAILABLE 0x00010007 /*! @brief A platform-specific error occurred that does not match any of the * more specific categories. * * A platform-specific error occurred that does not match any of the more * specific categories. * * @analysis A bug or configuration error in GLFW, the underlying operating * system or its drivers, or a lack of required resources. Report the issue to * our [issue tracker](https://github.com/glfw/glfw/issues). */ #define GLFW_PLATFORM_ERROR 0x00010008 /*! @brief The requested format is not supported or available. * * If emitted during window creation, the requested pixel format is not * supported. * * If emitted when querying the clipboard, the contents of the clipboard could * not be converted to the requested format. * * @analysis If emitted during window creation, one or more * [hard constraints](@ref window_hints_hard) did not match any of the * available pixel formats. If your application is sufficiently flexible, * downgrade your requirements and try again. Otherwise, inform the user that * their machine does not match your requirements. * * @par * If emitted when querying the clipboard, ignore the error or report it to * the user, as appropriate. */ #define GLFW_FORMAT_UNAVAILABLE 0x00010009 /*! @brief The specified window does not have an OpenGL or OpenGL ES context. * * A window that does not have an OpenGL or OpenGL ES context was passed to * a function that requires it to have one. * * @analysis Application programmer error. Fix the offending call. */ #define GLFW_NO_WINDOW_CONTEXT 0x0001000A /*! @} */ #define GLFW_FOCUSED 0x00020001 #define GLFW_ICONIFIED 0x00020002 #define GLFW_RESIZABLE 0x00020003 #define GLFW_VISIBLE 0x00020004 #define GLFW_DECORATED 0x00020005 #define GLFW_AUTO_ICONIFY 0x00020006 #define GLFW_FLOATING 0x00020007 #define GLFW_MAXIMIZED 0x00020008 #define GLFW_RED_BITS 0x00021001 #define GLFW_GREEN_BITS 0x00021002 #define GLFW_BLUE_BITS 0x00021003 #define GLFW_ALPHA_BITS 0x00021004 #define GLFW_DEPTH_BITS 0x00021005 #define GLFW_STENCIL_BITS 0x00021006 #define GLFW_ACCUM_RED_BITS 0x00021007 #define GLFW_ACCUM_GREEN_BITS 0x00021008 #define GLFW_ACCUM_BLUE_BITS 0x00021009 #define GLFW_ACCUM_ALPHA_BITS 0x0002100A #define GLFW_AUX_BUFFERS 0x0002100B #define GLFW_STEREO 0x0002100C #define GLFW_SAMPLES 0x0002100D #define GLFW_SRGB_CAPABLE 0x0002100E #define GLFW_REFRESH_RATE 0x0002100F #define GLFW_DOUBLEBUFFER 0x00021010 #define GLFW_CLIENT_API 0x00022001 #define GLFW_CONTEXT_VERSION_MAJOR 0x00022002 #define GLFW_CONTEXT_VERSION_MINOR 0x00022003 #define GLFW_CONTEXT_REVISION 0x00022004 #define GLFW_CONTEXT_ROBUSTNESS 0x00022005 #define GLFW_OPENGL_FORWARD_COMPAT 0x00022006 #define GLFW_OPENGL_DEBUG_CONTEXT 0x00022007 #define GLFW_OPENGL_PROFILE 0x00022008 #define GLFW_CONTEXT_RELEASE_BEHAVIOR 0x00022009 #define GLFW_CONTEXT_NO_ERROR 0x0002200A #define GLFW_CONTEXT_CREATION_API 0x0002200B #define GLFW_NO_API 0 #define GLFW_OPENGL_API 0x00030001 #define GLFW_OPENGL_ES_API 0x00030002 #define GLFW_NO_ROBUSTNESS 0 #define GLFW_NO_RESET_NOTIFICATION 0x00031001 #define GLFW_LOSE_CONTEXT_ON_RESET 0x00031002 #define GLFW_OPENGL_ANY_PROFILE 0 #define GLFW_OPENGL_CORE_PROFILE 0x00032001 #define GLFW_OPENGL_COMPAT_PROFILE 0x00032002 #define GLFW_CURSOR 0x00033001 #define GLFW_STICKY_KEYS 0x00033002 #define GLFW_STICKY_MOUSE_BUTTONS 0x00033003 #define GLFW_CURSOR_NORMAL 0x00034001 #define GLFW_CURSOR_HIDDEN 0x00034002 #define GLFW_CURSOR_DISABLED 0x00034003 #define GLFW_ANY_RELEASE_BEHAVIOR 0 #define GLFW_RELEASE_BEHAVIOR_FLUSH 0x00035001 #define GLFW_RELEASE_BEHAVIOR_NONE 0x00035002 #define GLFW_NATIVE_CONTEXT_API 0x00036001 #define GLFW_EGL_CONTEXT_API 0x00036002 /*! @defgroup shapes Standard cursor shapes * * See [standard cursor creation](@ref cursor_standard) for how these are used. * * @ingroup input * @{ */ /*! @brief The regular arrow cursor shape. * * The regular arrow cursor. */ #define GLFW_ARROW_CURSOR 0x00036001 /*! @brief The text input I-beam cursor shape. * * The text input I-beam cursor shape. */ #define GLFW_IBEAM_CURSOR 0x00036002 /*! @brief The crosshair shape. * * The crosshair shape. */ #define GLFW_CROSSHAIR_CURSOR 0x00036003 /*! @brief The hand shape. * * The hand shape. */ #define GLFW_HAND_CURSOR 0x00036004 /*! @brief The horizontal resize arrow shape. * * The horizontal resize arrow shape. */ #define GLFW_HRESIZE_CURSOR 0x00036005 /*! @brief The vertical resize arrow shape. * * The vertical resize arrow shape. */ #define GLFW_VRESIZE_CURSOR 0x00036006 /*! @} */ #define GLFW_CONNECTED 0x00040001 #define GLFW_DISCONNECTED 0x00040002 #define GLFW_DONT_CARE -1 /************************************************************************* * GLFW API types *************************************************************************/ /*! @brief Client API function pointer type. * * Generic function pointer used for returning client API function pointers * without forcing a cast from a regular pointer. * * @sa @ref context_glext * @sa glfwGetProcAddress * * @since Added in version 3.0. * @ingroup context */ typedef void (*GLFWglproc)(void); /*! @brief Vulkan API function pointer type. * * Generic function pointer used for returning Vulkan API function pointers * without forcing a cast from a regular pointer. * * @sa @ref vulkan_proc * @sa glfwGetInstanceProcAddress * * @since Added in version 3.2. * * @ingroup vulkan */ typedef void (*GLFWvkproc)(void); /*! @brief Opaque monitor object. * * Opaque monitor object. * * @see @ref monitor_object * * @since Added in version 3.0. * * @ingroup monitor */ typedef struct GLFWmonitor GLFWmonitor; /*! @brief Opaque window object. * * Opaque window object. * * @see @ref window_object * * @since Added in version 3.0. * * @ingroup window */ typedef struct GLFWwindow GLFWwindow; /*! @brief Opaque cursor object. * * Opaque cursor object. * * @see @ref cursor_object * * @since Added in version 3.1. * * @ingroup cursor */ typedef struct GLFWcursor GLFWcursor; /*! @brief The function signature for error callbacks. * * This is the function signature for error callback functions. * * @param[in] error An [error code](@ref errors). * @param[in] description A UTF-8 encoded string describing the error. * * @sa @ref error_handling * @sa glfwSetErrorCallback * * @since Added in version 3.0. * * @ingroup init */ typedef void (* GLFWerrorfun)(int,const char*); /*! @brief The function signature for window position callbacks. * * This is the function signature for window position callback functions. * * @param[in] window The window that was moved. * @param[in] xpos The new x-coordinate, in screen coordinates, of the * upper-left corner of the client area of the window. * @param[in] ypos The new y-coordinate, in screen coordinates, of the * upper-left corner of the client area of the window. * * @sa @ref window_pos * @sa glfwSetWindowPosCallback * * @since Added in version 3.0. * * @ingroup window */ typedef void (* GLFWwindowposfun)(GLFWwindow*,int,int); /*! @brief The function signature for window resize callbacks. * * This is the function signature for window size callback functions. * * @param[in] window The window that was resized. * @param[in] width The new width, in screen coordinates, of the window. * @param[in] height The new height, in screen coordinates, of the window. * * @sa @ref window_size * @sa glfwSetWindowSizeCallback * * @since Added in version 1.0. * @glfw3 Added window handle parameter. * * @ingroup window */ typedef void (* GLFWwindowsizefun)(GLFWwindow*,int,int); /*! @brief The function signature for window close callbacks. * * This is the function signature for window close callback functions. * * @param[in] window The window that the user attempted to close. * * @sa @ref window_close * @sa glfwSetWindowCloseCallback * * @since Added in version 2.5. * @glfw3 Added window handle parameter. * * @ingroup window */ typedef void (* GLFWwindowclosefun)(GLFWwindow*); /*! @brief The function signature for window content refresh callbacks. * * This is the function signature for window refresh callback functions. * * @param[in] window The window whose content needs to be refreshed. * * @sa @ref window_refresh * @sa glfwSetWindowRefreshCallback * * @since Added in version 2.5. * @glfw3 Added window handle parameter. * * @ingroup window */ typedef void (* GLFWwindowrefreshfun)(GLFWwindow*); /*! @brief The function signature for window focus/defocus callbacks. * * This is the function signature for window focus callback functions. * * @param[in] window The window that gained or lost input focus. * @param[in] focused `GLFW_TRUE` if the window was given input focus, or * `GLFW_FALSE` if it lost it. * * @sa @ref window_focus * @sa glfwSetWindowFocusCallback * * @since Added in version 3.0. * * @ingroup window */ typedef void (* GLFWwindowfocusfun)(GLFWwindow*,int); /*! @brief The function signature for window iconify/restore callbacks. * * This is the function signature for window iconify/restore callback * functions. * * @param[in] window The window that was iconified or restored. * @param[in] iconified `GLFW_TRUE` if the window was iconified, or * `GLFW_FALSE` if it was restored. * * @sa @ref window_iconify * @sa glfwSetWindowIconifyCallback * * @since Added in version 3.0. * * @ingroup window */ typedef void (* GLFWwindowiconifyfun)(GLFWwindow*,int); /*! @brief The function signature for framebuffer resize callbacks. * * This is the function signature for framebuffer resize callback * functions. * * @param[in] window The window whose framebuffer was resized. * @param[in] width The new width, in pixels, of the framebuffer. * @param[in] height The new height, in pixels, of the framebuffer. * * @sa @ref window_fbsize * @sa glfwSetFramebufferSizeCallback * * @since Added in version 3.0. * * @ingroup window */ typedef void (* GLFWframebuffersizefun)(GLFWwindow*,int,int); /*! @brief The function signature for mouse button callbacks. * * This is the function signature for mouse button callback functions. * * @param[in] window The window that received the event. * @param[in] button The [mouse button](@ref buttons) that was pressed or * released. * @param[in] action One of `GLFW_PRESS` or `GLFW_RELEASE`. * @param[in] mods Bit field describing which [modifier keys](@ref mods) were * held down. * * @sa @ref input_mouse_button * @sa glfwSetMouseButtonCallback * * @since Added in version 1.0. * @glfw3 Added window handle and modifier mask parameters. * * @ingroup input */ typedef void (* GLFWmousebuttonfun)(GLFWwindow*,int,int,int); /*! @brief The function signature for cursor position callbacks. * * This is the function signature for cursor position callback functions. * * @param[in] window The window that received the event. * @param[in] xpos The new cursor x-coordinate, relative to the left edge of * the client area. * @param[in] ypos The new cursor y-coordinate, relative to the top edge of the * client area. * * @sa @ref cursor_pos * @sa glfwSetCursorPosCallback * * @since Added in version 3.0. Replaces `GLFWmouseposfun`. * * @ingroup input */ typedef void (* GLFWcursorposfun)(GLFWwindow*,double,double); /*! @brief The function signature for cursor enter/leave callbacks. * * This is the function signature for cursor enter/leave callback functions. * * @param[in] window The window that received the event. * @param[in] entered `GLFW_TRUE` if the cursor entered the window's client * area, or `GLFW_FALSE` if it left it. * * @sa @ref cursor_enter * @sa glfwSetCursorEnterCallback * * @since Added in version 3.0. * * @ingroup input */ typedef void (* GLFWcursorenterfun)(GLFWwindow*,int); /*! @brief The function signature for scroll callbacks. * * This is the function signature for scroll callback functions. * * @param[in] window The window that received the event. * @param[in] xoffset The scroll offset along the x-axis. * @param[in] yoffset The scroll offset along the y-axis. * * @sa @ref scrolling * @sa glfwSetScrollCallback * * @since Added in version 3.0. Replaces `GLFWmousewheelfun`. * * @ingroup input */ typedef void (* GLFWscrollfun)(GLFWwindow*,double,double); /*! @brief The function signature for keyboard key callbacks. * * This is the function signature for keyboard key callback functions. * * @param[in] window The window that received the event. * @param[in] key The [keyboard key](@ref keys) that was pressed or released. * @param[in] scancode The system-specific scancode of the key. * @param[in] action `GLFW_PRESS`, `GLFW_RELEASE` or `GLFW_REPEAT`. * @param[in] mods Bit field describing which [modifier keys](@ref mods) were * held down. * * @sa @ref input_key * @sa glfwSetKeyCallback * * @since Added in version 1.0. * @glfw3 Added window handle, scancode and modifier mask parameters. * * @ingroup input */ typedef void (* GLFWkeyfun)(GLFWwindow*,int,int,int,int); /*! @brief The function signature for Unicode character callbacks. * * This is the function signature for Unicode character callback functions. * * @param[in] window The window that received the event. * @param[in] codepoint The Unicode code point of the character. * * @sa @ref input_char * @sa glfwSetCharCallback * * @since Added in version 2.4. * @glfw3 Added window handle parameter. * * @ingroup input */ typedef void (* GLFWcharfun)(GLFWwindow*,unsigned int); /*! @brief The function signature for Unicode character with modifiers * callbacks. * * This is the function signature for Unicode character with modifiers callback * functions. It is called for each input character, regardless of what * modifier keys are held down. * * @param[in] window The window that received the event. * @param[in] codepoint The Unicode code point of the character. * @param[in] mods Bit field describing which [modifier keys](@ref mods) were * held down. * * @sa @ref input_char * @sa glfwSetCharModsCallback * * @since Added in version 3.1. * * @ingroup input */ typedef void (* GLFWcharmodsfun)(GLFWwindow*,unsigned int,int); /*! @brief The function signature for file drop callbacks. * * This is the function signature for file drop callbacks. * * @param[in] window The window that received the event. * @param[in] count The number of dropped files. * @param[in] paths The UTF-8 encoded file and/or directory path names. * * @sa @ref path_drop * @sa glfwSetDropCallback * * @since Added in version 3.1. * * @ingroup input */ typedef void (* GLFWdropfun)(GLFWwindow*,int,const char**); /*! @brief The function signature for monitor configuration callbacks. * * This is the function signature for monitor configuration callback functions. * * @param[in] monitor The monitor that was connected or disconnected. * @param[in] event One of `GLFW_CONNECTED` or `GLFW_DISCONNECTED`. * * @sa @ref monitor_event * @sa glfwSetMonitorCallback * * @since Added in version 3.0. * * @ingroup monitor */ typedef void (* GLFWmonitorfun)(GLFWmonitor*,int); /*! @brief The function signature for joystick configuration callbacks. * * This is the function signature for joystick configuration callback * functions. * * @param[in] joy The joystick that was connected or disconnected. * @param[in] event One of `GLFW_CONNECTED` or `GLFW_DISCONNECTED`. * * @sa @ref joystick_event * @sa glfwSetJoystickCallback * * @since Added in version 3.2. * * @ingroup input */ typedef void (* GLFWjoystickfun)(int,int); /*! @brief Video mode type. * * This describes a single video mode. * * @sa @ref monitor_modes * @sa glfwGetVideoMode glfwGetVideoModes * * @since Added in version 1.0. * @glfw3 Added refresh rate member. * * @ingroup monitor */ typedef struct GLFWvidmode { /*! The width, in screen coordinates, of the video mode. */ int width; /*! The height, in screen coordinates, of the video mode. */ int height; /*! The bit depth of the red channel of the video mode. */ int redBits; /*! The bit depth of the green channel of the video mode. */ int greenBits; /*! The bit depth of the blue channel of the video mode. */ int blueBits; /*! The refresh rate, in Hz, of the video mode. */ int refreshRate; } GLFWvidmode; /*! @brief Gamma ramp. * * This describes the gamma ramp for a monitor. * * @sa @ref monitor_gamma * @sa glfwGetGammaRamp glfwSetGammaRamp * * @since Added in version 3.0. * * @ingroup monitor */ typedef struct GLFWgammaramp { /*! An array of value describing the response of the red channel. */ unsigned short* red; /*! An array of value describing the response of the green channel. */ unsigned short* green; /*! An array of value describing the response of the blue channel. */ unsigned short* blue; /*! The number of elements in each array. */ unsigned int size; } GLFWgammaramp; /*! @brief Image data. * * @sa @ref cursor_custom * @sa @ref window_icon * * @since Added in version 2.1. * @glfw3 Removed format and bytes-per-pixel members. */ typedef struct GLFWimage { /*! The width, in pixels, of this image. */ int width; /*! The height, in pixels, of this image. */ int height; /*! The pixel data of this image, arranged left-to-right, top-to-bottom. */ unsigned char* pixels; } GLFWimage; /************************************************************************* * GLFW API functions *************************************************************************/ /*! @brief Initializes the GLFW library. * * This function initializes the GLFW library. Before most GLFW functions can * be used, GLFW must be initialized, and before an application terminates GLFW * should be terminated in order to free any resources allocated during or * after initialization. * * If this function fails, it calls @ref glfwTerminate before returning. If it * succeeds, you should call @ref glfwTerminate before the application exits. * * Additional calls to this function after successful initialization but before * termination will return `GLFW_TRUE` immediately. * * @return `GLFW_TRUE` if successful, or `GLFW_FALSE` if an * [error](@ref error_handling) occurred. * * @errors Possible errors include @ref GLFW_PLATFORM_ERROR. * * @remark @osx This function will change the current directory of the * application to the `Contents/Resources` subdirectory of the application's * bundle, if present. This can be disabled with a * [compile-time option](@ref compile_options_osx). * * @thread_safety This function must only be called from the main thread. * * @sa @ref intro_init * @sa glfwTerminate * * @since Added in version 1.0. * * @ingroup init */ GLFWAPI int glfwInit(void); /*! @brief Terminates the GLFW library. * * This function destroys all remaining windows and cursors, restores any * modified gamma ramps and frees any other allocated resources. Once this * function is called, you must again call @ref glfwInit successfully before * you will be able to use most GLFW functions. * * If GLFW has been successfully initialized, this function should be called * before the application exits. If initialization fails, there is no need to * call this function, as it is called by @ref glfwInit before it returns * failure. * * @errors Possible errors include @ref GLFW_PLATFORM_ERROR. * * @remark This function may be called before @ref glfwInit. * * @warning The contexts of any remaining windows must not be current on any * other thread when this function is called. * * @reentrancy This function must not be called from a callback. * * @thread_safety This function must only be called from the main thread. * * @sa @ref intro_init * @sa glfwInit * * @since Added in version 1.0. * * @ingroup init */ GLFWAPI void glfwTerminate(void); /*! @brief Retrieves the version of the GLFW library. * * This function retrieves the major, minor and revision numbers of the GLFW * library. It is intended for when you are using GLFW as a shared library and * want to ensure that you are using the minimum required version. * * Any or all of the version arguments may be `NULL`. * * @param[out] major Where to store the major version number, or `NULL`. * @param[out] minor Where to store the minor version number, or `NULL`. * @param[out] rev Where to store the revision number, or `NULL`. * * @errors None. * * @remark This function may be called before @ref glfwInit. * * @thread_safety This function may be called from any thread. * * @sa @ref intro_version * @sa glfwGetVersionString * * @since Added in version 1.0. * * @ingroup init */ GLFWAPI void glfwGetVersion(int* major, int* minor, int* rev); /*! @brief Returns a string describing the compile-time configuration. * * This function returns the compile-time generated * [version string](@ref intro_version_string) of the GLFW library binary. It * describes the version, platform, compiler and any platform-specific * compile-time options. It should not be confused with the OpenGL or OpenGL * ES version string, queried with `glGetString`. * * __Do not use the version string__ to parse the GLFW library version. The * @ref glfwGetVersion function provides the version of the running library * binary in numerical format. * * @return The ASCII encoded GLFW version string. * * @errors None. * * @remark This function may be called before @ref glfwInit. * * @pointer_lifetime The returned string is static and compile-time generated. * * @thread_safety This function may be called from any thread. * * @sa @ref intro_version * @sa glfwGetVersion * * @since Added in version 3.0. * * @ingroup init */ GLFWAPI const char* glfwGetVersionString(void); /*! @brief Sets the error callback. * * This function sets the error callback, which is called with an error code * and a human-readable description each time a GLFW error occurs. * * The error callback is called on the thread where the error occurred. If you * are using GLFW from multiple threads, your error callback needs to be * written accordingly. * * Because the description string may have been generated specifically for that * error, it is not guaranteed to be valid after the callback has returned. If * you wish to use it after the callback returns, you need to make a copy. * * Once set, the error callback remains set even after the library has been * terminated. * * @param[in] cbfun The new callback, or `NULL` to remove the currently set * callback. * @return The previously set callback, or `NULL` if no callback was set. * * @errors None. * * @remark This function may be called before @ref glfwInit. * * @thread_safety This function must only be called from the main thread. * * @sa @ref error_handling * * @since Added in version 3.0. * * @ingroup init */ GLFWAPI GLFWerrorfun glfwSetErrorCallback(GLFWerrorfun cbfun); /*! @brief Returns the currently connected monitors. * * This function returns an array of handles for all currently connected * monitors. The primary monitor is always first in the returned array. If no * monitors were found, this function returns `NULL`. * * @param[out] count Where to store the number of monitors in the returned * array. This is set to zero if an error occurred. * @return An array of monitor handles, or `NULL` if no monitors were found or * if an [error](@ref error_handling) occurred. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @pointer_lifetime The returned array is allocated and freed by GLFW. You * should not free it yourself. It is guaranteed to be valid only until the * monitor configuration changes or the library is terminated. * * @thread_safety This function must only be called from the main thread. * * @sa @ref monitor_monitors * @sa @ref monitor_event * @sa glfwGetPrimaryMonitor * * @since Added in version 3.0. * * @ingroup monitor */ GLFWAPI GLFWmonitor** glfwGetMonitors(int* count); /*! @brief Returns the primary monitor. * * This function returns the primary monitor. This is usually the monitor * where elements like the task bar or global menu bar are located. * * @return The primary monitor, or `NULL` if no monitors were found or if an * [error](@ref error_handling) occurred. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @thread_safety This function must only be called from the main thread. * * @remark The primary monitor is always first in the array returned by @ref * glfwGetMonitors. * * @sa @ref monitor_monitors * @sa glfwGetMonitors * * @since Added in version 3.0. * * @ingroup monitor */ GLFWAPI GLFWmonitor* glfwGetPrimaryMonitor(void); /*! @brief Returns the position of the monitor's viewport on the virtual screen. * * This function returns the position, in screen coordinates, of the upper-left * corner of the specified monitor. * * Any or all of the position arguments may be `NULL`. If an error occurs, all * non-`NULL` position arguments will be set to zero. * * @param[in] monitor The monitor to query. * @param[out] xpos Where to store the monitor x-coordinate, or `NULL`. * @param[out] ypos Where to store the monitor y-coordinate, or `NULL`. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @thread_safety This function must only be called from the main thread. * * @sa @ref monitor_properties * * @since Added in version 3.0. * * @ingroup monitor */ GLFWAPI void glfwGetMonitorPos(GLFWmonitor* monitor, int* xpos, int* ypos); /*! @brief Returns the physical size of the monitor. * * This function returns the size, in millimetres, of the display area of the * specified monitor. * * Some systems do not provide accurate monitor size information, either * because the monitor * [EDID](https://en.wikipedia.org/wiki/Extended_display_identification_data) * data is incorrect or because the driver does not report it accurately. * * Any or all of the size arguments may be `NULL`. If an error occurs, all * non-`NULL` size arguments will be set to zero. * * @param[in] monitor The monitor to query. * @param[out] widthMM Where to store the width, in millimetres, of the * monitor's display area, or `NULL`. * @param[out] heightMM Where to store the height, in millimetres, of the * monitor's display area, or `NULL`. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @remark @win32 calculates the returned physical size from the * current resolution and system DPI instead of querying the monitor EDID data. * * @thread_safety This function must only be called from the main thread. * * @sa @ref monitor_properties * * @since Added in version 3.0. * * @ingroup monitor */ GLFWAPI void glfwGetMonitorPhysicalSize(GLFWmonitor* monitor, int* widthMM, int* heightMM); /*! @brief Returns the name of the specified monitor. * * This function returns a human-readable name, encoded as UTF-8, of the * specified monitor. The name typically reflects the make and model of the * monitor and is not guaranteed to be unique among the connected monitors. * * @param[in] monitor The monitor to query. * @return The UTF-8 encoded name of the monitor, or `NULL` if an * [error](@ref error_handling) occurred. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @pointer_lifetime The returned string is allocated and freed by GLFW. You * should not free it yourself. It is valid until the specified monitor is * disconnected or the library is terminated. * * @thread_safety This function must only be called from the main thread. * * @sa @ref monitor_properties * * @since Added in version 3.0. * * @ingroup monitor */ GLFWAPI const char* glfwGetMonitorName(GLFWmonitor* monitor); /*! @brief Sets the monitor configuration callback. * * This function sets the monitor configuration callback, or removes the * currently set callback. This is called when a monitor is connected to or * disconnected from the system. * * @param[in] cbfun The new callback, or `NULL` to remove the currently set * callback. * @return The previously set callback, or `NULL` if no callback was set or the * library had not been [initialized](@ref intro_init). * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @thread_safety This function must only be called from the main thread. * * @sa @ref monitor_event * * @since Added in version 3.0. * * @ingroup monitor */ GLFWAPI GLFWmonitorfun glfwSetMonitorCallback(GLFWmonitorfun cbfun); /*! @brief Returns the available video modes for the specified monitor. * * This function returns an array of all video modes supported by the specified * monitor. The returned array is sorted in ascending order, first by color * bit depth (the sum of all channel depths) and then by resolution area (the * product of width and height). * * @param[in] monitor The monitor to query. * @param[out] count Where to store the number of video modes in the returned * array. This is set to zero if an error occurred. * @return An array of video modes, or `NULL` if an * [error](@ref error_handling) occurred. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @pointer_lifetime The returned array is allocated and freed by GLFW. You * should not free it yourself. It is valid until the specified monitor is * disconnected, this function is called again for that monitor or the library * is terminated. * * @thread_safety This function must only be called from the main thread. * * @sa @ref monitor_modes * @sa glfwGetVideoMode * * @since Added in version 1.0. * @glfw3 Changed to return an array of modes for a specific monitor. * * @ingroup monitor */ GLFWAPI const GLFWvidmode* glfwGetVideoModes(GLFWmonitor* monitor, int* count); /*! @brief Returns the current mode of the specified monitor. * * This function returns the current video mode of the specified monitor. If * you have created a full screen window for that monitor, the return value * will depend on whether that window is iconified. * * @param[in] monitor The monitor to query. * @return The current mode of the monitor, or `NULL` if an * [error](@ref error_handling) occurred. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @pointer_lifetime The returned array is allocated and freed by GLFW. You * should not free it yourself. It is valid until the specified monitor is * disconnected or the library is terminated. * * @thread_safety This function must only be called from the main thread. * * @sa @ref monitor_modes * @sa glfwGetVideoModes * * @since Added in version 3.0. Replaces `glfwGetDesktopMode`. * * @ingroup monitor */ GLFWAPI const GLFWvidmode* glfwGetVideoMode(GLFWmonitor* monitor); /*! @brief Generates a gamma ramp and sets it for the specified monitor. * * This function generates a 256-element gamma ramp from the specified exponent * and then calls @ref glfwSetGammaRamp with it. The value must be a finite * number greater than zero. * * @param[in] monitor The monitor whose gamma ramp to set. * @param[in] gamma The desired exponent. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref * GLFW_INVALID_VALUE and @ref GLFW_PLATFORM_ERROR. * * @thread_safety This function must only be called from the main thread. * * @sa @ref monitor_gamma * * @since Added in version 3.0. * * @ingroup monitor */ GLFWAPI void glfwSetGamma(GLFWmonitor* monitor, float gamma); /*! @brief Returns the current gamma ramp for the specified monitor. * * This function returns the current gamma ramp of the specified monitor. * * @param[in] monitor The monitor to query. * @return The current gamma ramp, or `NULL` if an * [error](@ref error_handling) occurred. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @pointer_lifetime The returned structure and its arrays are allocated and * freed by GLFW. You should not free them yourself. They are valid until the * specified monitor is disconnected, this function is called again for that * monitor or the library is terminated. * * @thread_safety This function must only be called from the main thread. * * @sa @ref monitor_gamma * * @since Added in version 3.0. * * @ingroup monitor */ GLFWAPI const GLFWgammaramp* glfwGetGammaRamp(GLFWmonitor* monitor); /*! @brief Sets the current gamma ramp for the specified monitor. * * This function sets the current gamma ramp for the specified monitor. The * original gamma ramp for that monitor is saved by GLFW the first time this * function is called and is restored by @ref glfwTerminate. * * @param[in] monitor The monitor whose gamma ramp to set. * @param[in] ramp The gamma ramp to use. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @remark Gamma ramp sizes other than 256 are not supported by all platforms * or graphics hardware. * * @remark @win32 The gamma ramp size must be 256. * * @pointer_lifetime The specified gamma ramp is copied before this function * returns. * * @thread_safety This function must only be called from the main thread. * * @sa @ref monitor_gamma * * @since Added in version 3.0. * * @ingroup monitor */ GLFWAPI void glfwSetGammaRamp(GLFWmonitor* monitor, const GLFWgammaramp* ramp); /*! @brief Resets all window hints to their default values. * * This function resets all window hints to their * [default values](@ref window_hints_values). * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @thread_safety This function must only be called from the main thread. * * @sa @ref window_hints * @sa glfwWindowHint * * @since Added in version 3.0. * * @ingroup window */ GLFWAPI void glfwDefaultWindowHints(void); /*! @brief Sets the specified window hint to the desired value. * * This function sets hints for the next call to @ref glfwCreateWindow. The * hints, once set, retain their values until changed by a call to @ref * glfwWindowHint or @ref glfwDefaultWindowHints, or until the library is * terminated. * * This function does not check whether the specified hint values are valid. * If you set hints to invalid values this will instead be reported by the next * call to @ref glfwCreateWindow. * * @param[in] hint The [window hint](@ref window_hints) to set. * @param[in] value The new value of the window hint. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_INVALID_ENUM. * * @thread_safety This function must only be called from the main thread. * * @sa @ref window_hints * @sa glfwDefaultWindowHints * * @since Added in version 3.0. Replaces `glfwOpenWindowHint`. * * @ingroup window */ GLFWAPI void glfwWindowHint(int hint, int value); /*! @brief Creates a window and its associated context. * * This function creates a window and its associated OpenGL or OpenGL ES * context. Most of the options controlling how the window and its context * should be created are specified with [window hints](@ref window_hints). * * Successful creation does not change which context is current. Before you * can use the newly created context, you need to * [make it current](@ref context_current). For information about the `share` * parameter, see @ref context_sharing. * * The created window, framebuffer and context may differ from what you * requested, as not all parameters and hints are * [hard constraints](@ref window_hints_hard). This includes the size of the * window, especially for full screen windows. To query the actual attributes * of the created window, framebuffer and context, see @ref * glfwGetWindowAttrib, @ref glfwGetWindowSize and @ref glfwGetFramebufferSize. * * To create a full screen window, you need to specify the monitor the window * will cover. If no monitor is specified, the window will be windowed mode. * Unless you have a way for the user to choose a specific monitor, it is * recommended that you pick the primary monitor. For more information on how * to query connected monitors, see @ref monitor_monitors. * * For full screen windows, the specified size becomes the resolution of the * window's _desired video mode_. As long as a full screen window is not * iconified, the supported video mode most closely matching the desired video * mode is set for the specified monitor. For more information about full * screen windows, including the creation of so called _windowed full screen_ * or _borderless full screen_ windows, see @ref window_windowed_full_screen. * * Once you have created the window, you can switch it between windowed and * full screen mode with @ref glfwSetWindowMonitor. If the window has an * OpenGL or OpenGL ES context, it will be unaffected. * * By default, newly created windows use the placement recommended by the * window system. To create the window at a specific position, make it * initially invisible using the [GLFW_VISIBLE](@ref window_hints_wnd) window * hint, set its [position](@ref window_pos) and then [show](@ref window_hide) * it. * * As long as at least one full screen window is not iconified, the screensaver * is prohibited from starting. * * Window systems put limits on window sizes. Very large or very small window * dimensions may be overridden by the window system on creation. Check the * actual [size](@ref window_size) after creation. * * The [swap interval](@ref buffer_swap) is not set during window creation and * the initial value may vary depending on driver settings and defaults. * * @param[in] width The desired width, in screen coordinates, of the window. * This must be greater than zero. * @param[in] height The desired height, in screen coordinates, of the window. * This must be greater than zero. * @param[in] title The initial, UTF-8 encoded window title. * @param[in] monitor The monitor to use for full screen mode, or `NULL` for * windowed mode. * @param[in] share The window whose context to share resources with, or `NULL` * to not share resources. * @return The handle of the created window, or `NULL` if an * [error](@ref error_handling) occurred. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref * GLFW_INVALID_ENUM, @ref GLFW_INVALID_VALUE, @ref GLFW_API_UNAVAILABLE, @ref * GLFW_VERSION_UNAVAILABLE, @ref GLFW_FORMAT_UNAVAILABLE and @ref * GLFW_PLATFORM_ERROR. * * @remark @win32 Window creation will fail if the Microsoft GDI software * OpenGL implementation is the only one available. * * @remark @win32 If the executable has an icon resource named `GLFW_ICON,` it * will be set as the initial icon for the window. If no such icon is present, * the `IDI_WINLOGO` icon will be used instead. To set a different icon, see * @ref glfwSetWindowIcon. * * @remark @win32 The context to share resources with must not be current on * any other thread. * * @remark @osx The GLFW window has no icon, as it is not a document * window, but the dock icon will be the same as the application bundle's icon. * For more information on bundles, see the * [Bundle Programming Guide](https://developer.apple.com/library/mac/documentation/CoreFoundation/Conceptual/CFBundles/) * in the Mac Developer Library. * * @remark @osx The first time a window is created the menu bar is populated * with common commands like Hide, Quit and About. The About entry opens * a minimal about dialog with information from the application's bundle. The * menu bar can be disabled with a * [compile-time option](@ref compile_options_osx). * * @remark @osx On OS X 10.10 and later the window frame will not be rendered * at full resolution on Retina displays unless the `NSHighResolutionCapable` * key is enabled in the application bundle's `Info.plist`. For more * information, see * [High Resolution Guidelines for OS X](https://developer.apple.com/library/mac/documentation/GraphicsAnimation/Conceptual/HighResolutionOSX/Explained/Explained.html) * in the Mac Developer Library. The GLFW test and example programs use * a custom `Info.plist` template for this, which can be found as * `CMake/MacOSXBundleInfo.plist.in` in the source tree. * * @remark @x11 Some window managers will not respect the placement of * initially hidden windows. * * @remark @x11 Due to the asynchronous nature of X11, it may take a moment for * a window to reach its requested state. This means you may not be able to * query the final size, position or other attributes directly after window * creation. * * @reentrancy This function must not be called from a callback. * * @thread_safety This function must only be called from the main thread. * * @sa @ref window_creation * @sa glfwDestroyWindow * * @since Added in version 3.0. Replaces `glfwOpenWindow`. * * @ingroup window */ GLFWAPI GLFWwindow* glfwCreateWindow(int width, int height, const char* title, GLFWmonitor* monitor, GLFWwindow* share); /*! @brief Destroys the specified window and its context. * * This function destroys the specified window and its context. On calling * this function, no further callbacks will be called for that window. * * If the context of the specified window is current on the main thread, it is * detached before being destroyed. * * @param[in] window The window to destroy. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @note The context of the specified window must not be current on any other * thread when this function is called. * * @reentrancy This function must not be called from a callback. * * @thread_safety This function must only be called from the main thread. * * @sa @ref window_creation * @sa glfwCreateWindow * * @since Added in version 3.0. Replaces `glfwCloseWindow`. * * @ingroup window */ GLFWAPI void glfwDestroyWindow(GLFWwindow* window); /*! @brief Checks the close flag of the specified window. * * This function returns the value of the close flag of the specified window. * * @param[in] window The window to query. * @return The value of the close flag. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @thread_safety This function may be called from any thread. Access is not * synchronized. * * @sa @ref window_close * * @since Added in version 3.0. * * @ingroup window */ GLFWAPI int glfwWindowShouldClose(GLFWwindow* window); /*! @brief Sets the close flag of the specified window. * * This function sets the value of the close flag of the specified window. * This can be used to override the user's attempt to close the window, or * to signal that it should be closed. * * @param[in] window The window whose flag to change. * @param[in] value The new value. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @thread_safety This function may be called from any thread. Access is not * synchronized. * * @sa @ref window_close * * @since Added in version 3.0. * * @ingroup window */ GLFWAPI void glfwSetWindowShouldClose(GLFWwindow* window, int value); /*! @brief Sets the title of the specified window. * * This function sets the window title, encoded as UTF-8, of the specified * window. * * @param[in] window The window whose title to change. * @param[in] title The UTF-8 encoded window title. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @remark @osx The window title will not be updated until the next time you * process events. * * @thread_safety This function must only be called from the main thread. * * @sa @ref window_title * * @since Added in version 1.0. * @glfw3 Added window handle parameter. * * @ingroup window */ GLFWAPI void glfwSetWindowTitle(GLFWwindow* window, const char* title); /*! @brief Sets the icon for the specified window. * * This function sets the icon of the specified window. If passed an array of * candidate images, those of or closest to the sizes desired by the system are * selected. If no images are specified, the window reverts to its default * icon. * * The desired image sizes varies depending on platform and system settings. * The selected images will be rescaled as needed. Good sizes include 16x16, * 32x32 and 48x48. * * @param[in] window The window whose icon to set. * @param[in] count The number of images in the specified array, or zero to * revert to the default window icon. * @param[in] images The images to create the icon from. This is ignored if * count is zero. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @pointer_lifetime The specified image data is copied before this function * returns. * * @remark @osx The GLFW window has no icon, as it is not a document * window, so this function does nothing. The dock icon will be the same as * the application bundle's icon. For more information on bundles, see the * [Bundle Programming Guide](https://developer.apple.com/library/mac/documentation/CoreFoundation/Conceptual/CFBundles/) * in the Mac Developer Library. * * @thread_safety This function must only be called from the main thread. * * @sa @ref window_icon * * @since Added in version 3.2. * * @ingroup window */ GLFWAPI void glfwSetWindowIcon(GLFWwindow* window, int count, const GLFWimage* images); /*! @brief Retrieves the position of the client area of the specified window. * * This function retrieves the position, in screen coordinates, of the * upper-left corner of the client area of the specified window. * * Any or all of the position arguments may be `NULL`. If an error occurs, all * non-`NULL` position arguments will be set to zero. * * @param[in] window The window to query. * @param[out] xpos Where to store the x-coordinate of the upper-left corner of * the client area, or `NULL`. * @param[out] ypos Where to store the y-coordinate of the upper-left corner of * the client area, or `NULL`. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @thread_safety This function must only be called from the main thread. * * @sa @ref window_pos * @sa glfwSetWindowPos * * @since Added in version 3.0. * * @ingroup window */ GLFWAPI void glfwGetWindowPos(GLFWwindow* window, int* xpos, int* ypos); /*! @brief Sets the position of the client area of the specified window. * * This function sets the position, in screen coordinates, of the upper-left * corner of the client area of the specified windowed mode window. If the * window is a full screen window, this function does nothing. * * __Do not use this function__ to move an already visible window unless you * have very good reasons for doing so, as it will confuse and annoy the user. * * The window manager may put limits on what positions are allowed. GLFW * cannot and should not override these limits. * * @param[in] window The window to query. * @param[in] xpos The x-coordinate of the upper-left corner of the client area. * @param[in] ypos The y-coordinate of the upper-left corner of the client area. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @thread_safety This function must only be called from the main thread. * * @sa @ref window_pos * @sa glfwGetWindowPos * * @since Added in version 1.0. * @glfw3 Added window handle parameter. * * @ingroup window */ GLFWAPI void glfwSetWindowPos(GLFWwindow* window, int xpos, int ypos); /*! @brief Retrieves the size of the client area of the specified window. * * This function retrieves the size, in screen coordinates, of the client area * of the specified window. If you wish to retrieve the size of the * framebuffer of the window in pixels, see @ref glfwGetFramebufferSize. * * Any or all of the size arguments may be `NULL`. If an error occurs, all * non-`NULL` size arguments will be set to zero. * * @param[in] window The window whose size to retrieve. * @param[out] width Where to store the width, in screen coordinates, of the * client area, or `NULL`. * @param[out] height Where to store the height, in screen coordinates, of the * client area, or `NULL`. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @thread_safety This function must only be called from the main thread. * * @sa @ref window_size * @sa glfwSetWindowSize * * @since Added in version 1.0. * @glfw3 Added window handle parameter. * * @ingroup window */ GLFWAPI void glfwGetWindowSize(GLFWwindow* window, int* width, int* height); /*! @brief Sets the size limits of the specified window. * * This function sets the size limits of the client area of the specified * window. If the window is full screen, the size limits only take effect * once it is made windowed. If the window is not resizable, this function * does nothing. * * The size limits are applied immediately to a windowed mode window and may * cause it to be resized. * * The maximum dimensions must be greater than or equal to the minimum * dimensions and all must be greater than or equal to zero. * * @param[in] window The window to set limits for. * @param[in] minwidth The minimum width, in screen coordinates, of the client * area, or `GLFW_DONT_CARE`. * @param[in] minheight The minimum height, in screen coordinates, of the * client area, or `GLFW_DONT_CARE`. * @param[in] maxwidth The maximum width, in screen coordinates, of the client * area, or `GLFW_DONT_CARE`. * @param[in] maxheight The maximum height, in screen coordinates, of the * client area, or `GLFW_DONT_CARE`. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref * GLFW_INVALID_VALUE and @ref GLFW_PLATFORM_ERROR. * * @remark If you set size limits and an aspect ratio that conflict, the * results are undefined. * * @thread_safety This function must only be called from the main thread. * * @sa @ref window_sizelimits * @sa glfwSetWindowAspectRatio * * @since Added in version 3.2. * * @ingroup window */ GLFWAPI void glfwSetWindowSizeLimits(GLFWwindow* window, int minwidth, int minheight, int maxwidth, int maxheight); /*! @brief Sets the aspect ratio of the specified window. * * This function sets the required aspect ratio of the client area of the * specified window. If the window is full screen, the aspect ratio only takes * effect once it is made windowed. If the window is not resizable, this * function does nothing. * * The aspect ratio is specified as a numerator and a denominator and both * values must be greater than zero. For example, the common 16:9 aspect ratio * is specified as 16 and 9, respectively. * * If the numerator and denominator is set to `GLFW_DONT_CARE` then the aspect * ratio limit is disabled. * * The aspect ratio is applied immediately to a windowed mode window and may * cause it to be resized. * * @param[in] window The window to set limits for. * @param[in] numer The numerator of the desired aspect ratio, or * `GLFW_DONT_CARE`. * @param[in] denom The denominator of the desired aspect ratio, or * `GLFW_DONT_CARE`. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref * GLFW_INVALID_VALUE and @ref GLFW_PLATFORM_ERROR. * * @remark If you set size limits and an aspect ratio that conflict, the * results are undefined. * * @thread_safety This function must only be called from the main thread. * * @sa @ref window_sizelimits * @sa glfwSetWindowSizeLimits * * @since Added in version 3.2. * * @ingroup window */ GLFWAPI void glfwSetWindowAspectRatio(GLFWwindow* window, int numer, int denom); /*! @brief Sets the size of the client area of the specified window. * * This function sets the size, in screen coordinates, of the client area of * the specified window. * * For full screen windows, this function updates the resolution of its desired * video mode and switches to the video mode closest to it, without affecting * the window's context. As the context is unaffected, the bit depths of the * framebuffer remain unchanged. * * If you wish to update the refresh rate of the desired video mode in addition * to its resolution, see @ref glfwSetWindowMonitor. * * The window manager may put limits on what sizes are allowed. GLFW cannot * and should not override these limits. * * @param[in] window The window to resize. * @param[in] width The desired width, in screen coordinates, of the window * client area. * @param[in] height The desired height, in screen coordinates, of the window * client area. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @thread_safety This function must only be called from the main thread. * * @sa @ref window_size * @sa glfwGetWindowSize * @sa glfwSetWindowMonitor * * @since Added in version 1.0. * @glfw3 Added window handle parameter. * * @ingroup window */ GLFWAPI void glfwSetWindowSize(GLFWwindow* window, int width, int height); /*! @brief Retrieves the size of the framebuffer of the specified window. * * This function retrieves the size, in pixels, of the framebuffer of the * specified window. If you wish to retrieve the size of the window in screen * coordinates, see @ref glfwGetWindowSize. * * Any or all of the size arguments may be `NULL`. If an error occurs, all * non-`NULL` size arguments will be set to zero. * * @param[in] window The window whose framebuffer to query. * @param[out] width Where to store the width, in pixels, of the framebuffer, * or `NULL`. * @param[out] height Where to store the height, in pixels, of the framebuffer, * or `NULL`. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @thread_safety This function must only be called from the main thread. * * @sa @ref window_fbsize * @sa glfwSetFramebufferSizeCallback * * @since Added in version 3.0. * * @ingroup window */ GLFWAPI void glfwGetFramebufferSize(GLFWwindow* window, int* width, int* height); /*! @brief Retrieves the size of the frame of the window. * * This function retrieves the size, in screen coordinates, of each edge of the * frame of the specified window. This size includes the title bar, if the * window has one. The size of the frame may vary depending on the * [window-related hints](@ref window_hints_wnd) used to create it. * * Because this function retrieves the size of each window frame edge and not * the offset along a particular coordinate axis, the retrieved values will * always be zero or positive. * * Any or all of the size arguments may be `NULL`. If an error occurs, all * non-`NULL` size arguments will be set to zero. * * @param[in] window The window whose frame size to query. * @param[out] left Where to store the size, in screen coordinates, of the left * edge of the window frame, or `NULL`. * @param[out] top Where to store the size, in screen coordinates, of the top * edge of the window frame, or `NULL`. * @param[out] right Where to store the size, in screen coordinates, of the * right edge of the window frame, or `NULL`. * @param[out] bottom Where to store the size, in screen coordinates, of the * bottom edge of the window frame, or `NULL`. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @thread_safety This function must only be called from the main thread. * * @sa @ref window_size * * @since Added in version 3.1. * * @ingroup window */ GLFWAPI void glfwGetWindowFrameSize(GLFWwindow* window, int* left, int* top, int* right, int* bottom); /*! @brief Iconifies the specified window. * * This function iconifies (minimizes) the specified window if it was * previously restored. If the window is already iconified, this function does * nothing. * * If the specified window is a full screen window, the original monitor * resolution is restored until the window is restored. * * @param[in] window The window to iconify. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @thread_safety This function must only be called from the main thread. * * @sa @ref window_iconify * @sa glfwRestoreWindow * @sa glfwMaximizeWindow * * @since Added in version 2.1. * @glfw3 Added window handle parameter. * * @ingroup window */ GLFWAPI void glfwIconifyWindow(GLFWwindow* window); /*! @brief Restores the specified window. * * This function restores the specified window if it was previously iconified * (minimized) or maximized. If the window is already restored, this function * does nothing. * * If the specified window is a full screen window, the resolution chosen for * the window is restored on the selected monitor. * * @param[in] window The window to restore. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @thread_safety This function must only be called from the main thread. * * @sa @ref window_iconify * @sa glfwIconifyWindow * @sa glfwMaximizeWindow * * @since Added in version 2.1. * @glfw3 Added window handle parameter. * * @ingroup window */ GLFWAPI void glfwRestoreWindow(GLFWwindow* window); /*! @brief Maximizes the specified window. * * This function maximizes the specified window if it was previously not * maximized. If the window is already maximized, this function does nothing. * * If the specified window is a full screen window, this function does nothing. * * @param[in] window The window to maximize. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @par Thread Safety * This function may only be called from the main thread. * * @sa @ref window_iconify * @sa glfwIconifyWindow * @sa glfwRestoreWindow * * @since Added in GLFW 3.2. * * @ingroup window */ GLFWAPI void glfwMaximizeWindow(GLFWwindow* window); /*! @brief Makes the specified window visible. * * This function makes the specified window visible if it was previously * hidden. If the window is already visible or is in full screen mode, this * function does nothing. * * @param[in] window The window to make visible. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @thread_safety This function must only be called from the main thread. * * @sa @ref window_hide * @sa glfwHideWindow * * @since Added in version 3.0. * * @ingroup window */ GLFWAPI void glfwShowWindow(GLFWwindow* window); /*! @brief Hides the specified window. * * This function hides the specified window if it was previously visible. If * the window is already hidden or is in full screen mode, this function does * nothing. * * @param[in] window The window to hide. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @thread_safety This function must only be called from the main thread. * * @sa @ref window_hide * @sa glfwShowWindow * * @since Added in version 3.0. * * @ingroup window */ GLFWAPI void glfwHideWindow(GLFWwindow* window); /*! @brief Brings the specified window to front and sets input focus. * * This function brings the specified window to front and sets input focus. * The window should already be visible and not iconified. * * By default, both windowed and full screen mode windows are focused when * initially created. Set the [GLFW_FOCUSED](@ref window_hints_wnd) to disable * this behavior. * * __Do not use this function__ to steal focus from other applications unless * you are certain that is what the user wants. Focus stealing can be * extremely disruptive. * * @param[in] window The window to give input focus. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @thread_safety This function must only be called from the main thread. * * @sa @ref window_focus * * @since Added in version 3.2. * * @ingroup window */ GLFWAPI void glfwFocusWindow(GLFWwindow* window); /*! @brief Returns the monitor that the window uses for full screen mode. * * This function returns the handle of the monitor that the specified window is * in full screen on. * * @param[in] window The window to query. * @return The monitor, or `NULL` if the window is in windowed mode or an * [error](@ref error_handling) occurred. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @thread_safety This function must only be called from the main thread. * * @sa @ref window_monitor * @sa glfwSetWindowMonitor * * @since Added in version 3.0. * * @ingroup window */ GLFWAPI GLFWmonitor* glfwGetWindowMonitor(GLFWwindow* window); /*! @brief Sets the mode, monitor, video mode and placement of a window. * * This function sets the monitor that the window uses for full screen mode or, * if the monitor is `NULL`, makes it windowed mode. * * When setting a monitor, this function updates the width, height and refresh * rate of the desired video mode and switches to the video mode closest to it. * The window position is ignored when setting a monitor. * * When the monitor is `NULL`, the position, width and height are used to * place the window client area. The refresh rate is ignored when no monitor * is specified. * * If you only wish to update the resolution of a full screen window or the * size of a windowed mode window, see @ref glfwSetWindowSize. * * When a window transitions from full screen to windowed mode, this function * restores any previous window settings such as whether it is decorated, * floating, resizable, has size or aspect ratio limits, etc.. * * @param[in] window The window whose monitor, size or video mode to set. * @param[in] monitor The desired monitor, or `NULL` to set windowed mode. * @param[in] xpos The desired x-coordinate of the upper-left corner of the * client area. * @param[in] ypos The desired y-coordinate of the upper-left corner of the * client area. * @param[in] width The desired with, in screen coordinates, of the client area * or video mode. * @param[in] height The desired height, in screen coordinates, of the client * area or video mode. * @param[in] refreshRate The desired refresh rate, in Hz, of the video mode, * or `GLFW_DONT_CARE`. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @thread_safety This function must only be called from the main thread. * * @sa @ref window_monitor * @sa @ref window_full_screen * @sa glfwGetWindowMonitor * @sa glfwSetWindowSize * * @since Added in version 3.2. * * @ingroup window */ GLFWAPI void glfwSetWindowMonitor(GLFWwindow* window, GLFWmonitor* monitor, int xpos, int ypos, int width, int height, int refreshRate); /*! @brief Returns an attribute of the specified window. * * This function returns the value of an attribute of the specified window or * its OpenGL or OpenGL ES context. * * @param[in] window The window to query. * @param[in] attrib The [window attribute](@ref window_attribs) whose value to * return. * @return The value of the attribute, or zero if an * [error](@ref error_handling) occurred. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR. * * @remark Framebuffer related hints are not window attributes. See @ref * window_attribs_fb for more information. * * @remark Zero is a valid value for many window and context related * attributes so you cannot use a return value of zero as an indication of * errors. However, this function should not fail as long as it is passed * valid arguments and the library has been [initialized](@ref intro_init). * * @thread_safety This function must only be called from the main thread. * * @sa @ref window_attribs * * @since Added in version 3.0. Replaces `glfwGetWindowParam` and * `glfwGetGLVersion`. * * @ingroup window */ GLFWAPI int glfwGetWindowAttrib(GLFWwindow* window, int attrib); /*! @brief Sets the user pointer of the specified window. * * This function sets the user-defined pointer of the specified window. The * current value is retained until the window is destroyed. The initial value * is `NULL`. * * @param[in] window The window whose pointer to set. * @param[in] pointer The new value. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @thread_safety This function may be called from any thread. Access is not * synchronized. * * @sa @ref window_userptr * @sa glfwGetWindowUserPointer * * @since Added in version 3.0. * * @ingroup window */ GLFWAPI void glfwSetWindowUserPointer(GLFWwindow* window, void* pointer); /*! @brief Returns the user pointer of the specified window. * * This function returns the current value of the user-defined pointer of the * specified window. The initial value is `NULL`. * * @param[in] window The window whose pointer to return. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @thread_safety This function may be called from any thread. Access is not * synchronized. * * @sa @ref window_userptr * @sa glfwSetWindowUserPointer * * @since Added in version 3.0. * * @ingroup window */ GLFWAPI void* glfwGetWindowUserPointer(GLFWwindow* window); /*! @brief Sets the position callback for the specified window. * * This function sets the position callback of the specified window, which is * called when the window is moved. The callback is provided with the screen * position of the upper-left corner of the client area of the window. * * @param[in] window The window whose callback to set. * @param[in] cbfun The new callback, or `NULL` to remove the currently set * callback. * @return The previously set callback, or `NULL` if no callback was set or the * library had not been [initialized](@ref intro_init). * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @thread_safety This function must only be called from the main thread. * * @sa @ref window_pos * * @since Added in version 3.0. * * @ingroup window */ GLFWAPI GLFWwindowposfun glfwSetWindowPosCallback(GLFWwindow* window, GLFWwindowposfun cbfun); /*! @brief Sets the size callback for the specified window. * * This function sets the size callback of the specified window, which is * called when the window is resized. The callback is provided with the size, * in screen coordinates, of the client area of the window. * * @param[in] window The window whose callback to set. * @param[in] cbfun The new callback, or `NULL` to remove the currently set * callback. * @return The previously set callback, or `NULL` if no callback was set or the * library had not been [initialized](@ref intro_init). * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @thread_safety This function must only be called from the main thread. * * @sa @ref window_size * * @since Added in version 1.0. * @glfw3 Added window handle parameter and return value. * * @ingroup window */ GLFWAPI GLFWwindowsizefun glfwSetWindowSizeCallback(GLFWwindow* window, GLFWwindowsizefun cbfun); /*! @brief Sets the close callback for the specified window. * * This function sets the close callback of the specified window, which is * called when the user attempts to close the window, for example by clicking * the close widget in the title bar. * * The close flag is set before this callback is called, but you can modify it * at any time with @ref glfwSetWindowShouldClose. * * The close callback is not triggered by @ref glfwDestroyWindow. * * @param[in] window The window whose callback to set. * @param[in] cbfun The new callback, or `NULL` to remove the currently set * callback. * @return The previously set callback, or `NULL` if no callback was set or the * library had not been [initialized](@ref intro_init). * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @remark @osx Selecting Quit from the application menu will trigger the close * callback for all windows. * * @thread_safety This function must only be called from the main thread. * * @sa @ref window_close * * @since Added in version 2.5. * @glfw3 Added window handle parameter and return value. * * @ingroup window */ GLFWAPI GLFWwindowclosefun glfwSetWindowCloseCallback(GLFWwindow* window, GLFWwindowclosefun cbfun); /*! @brief Sets the refresh callback for the specified window. * * This function sets the refresh callback of the specified window, which is * called when the client area of the window needs to be redrawn, for example * if the window has been exposed after having been covered by another window. * * On compositing window systems such as Aero, Compiz or Aqua, where the window * contents are saved off-screen, this callback may be called only very * infrequently or never at all. * * @param[in] window The window whose callback to set. * @param[in] cbfun The new callback, or `NULL` to remove the currently set * callback. * @return The previously set callback, or `NULL` if no callback was set or the * library had not been [initialized](@ref intro_init). * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @thread_safety This function must only be called from the main thread. * * @sa @ref window_refresh * * @since Added in version 2.5. * @glfw3 Added window handle parameter and return value. * * @ingroup window */ GLFWAPI GLFWwindowrefreshfun glfwSetWindowRefreshCallback(GLFWwindow* window, GLFWwindowrefreshfun cbfun); /*! @brief Sets the focus callback for the specified window. * * This function sets the focus callback of the specified window, which is * called when the window gains or loses input focus. * * After the focus callback is called for a window that lost input focus, * synthetic key and mouse button release events will be generated for all such * that had been pressed. For more information, see @ref glfwSetKeyCallback * and @ref glfwSetMouseButtonCallback. * * @param[in] window The window whose callback to set. * @param[in] cbfun The new callback, or `NULL` to remove the currently set * callback. * @return The previously set callback, or `NULL` if no callback was set or the * library had not been [initialized](@ref intro_init). * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @thread_safety This function must only be called from the main thread. * * @sa @ref window_focus * * @since Added in version 3.0. * * @ingroup window */ GLFWAPI GLFWwindowfocusfun glfwSetWindowFocusCallback(GLFWwindow* window, GLFWwindowfocusfun cbfun); /*! @brief Sets the iconify callback for the specified window. * * This function sets the iconification callback of the specified window, which * is called when the window is iconified or restored. * * @param[in] window The window whose callback to set. * @param[in] cbfun The new callback, or `NULL` to remove the currently set * callback. * @return The previously set callback, or `NULL` if no callback was set or the * library had not been [initialized](@ref intro_init). * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @thread_safety This function must only be called from the main thread. * * @sa @ref window_iconify * * @since Added in version 3.0. * * @ingroup window */ GLFWAPI GLFWwindowiconifyfun glfwSetWindowIconifyCallback(GLFWwindow* window, GLFWwindowiconifyfun cbfun); /*! @brief Sets the framebuffer resize callback for the specified window. * * This function sets the framebuffer resize callback of the specified window, * which is called when the framebuffer of the specified window is resized. * * @param[in] window The window whose callback to set. * @param[in] cbfun The new callback, or `NULL` to remove the currently set * callback. * @return The previously set callback, or `NULL` if no callback was set or the * library had not been [initialized](@ref intro_init). * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @thread_safety This function must only be called from the main thread. * * @sa @ref window_fbsize * * @since Added in version 3.0. * * @ingroup window */ GLFWAPI GLFWframebuffersizefun glfwSetFramebufferSizeCallback(GLFWwindow* window, GLFWframebuffersizefun cbfun); /*! @brief Processes all pending events. * * This function processes only those events that are already in the event * queue and then returns immediately. Processing events will cause the window * and input callbacks associated with those events to be called. * * On some platforms, a window move, resize or menu operation will cause event * processing to block. This is due to how event processing is designed on * those platforms. You can use the * [window refresh callback](@ref window_refresh) to redraw the contents of * your window when necessary during such operations. * * On some platforms, certain events are sent directly to the application * without going through the event queue, causing callbacks to be called * outside of a call to one of the event processing functions. * * Event processing is not required for joystick input to work. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @reentrancy This function must not be called from a callback. * * @thread_safety This function must only be called from the main thread. * * @sa @ref events * @sa glfwWaitEvents * @sa glfwWaitEventsTimeout * * @since Added in version 1.0. * * @ingroup window */ GLFWAPI void glfwPollEvents(void); /*! @brief Waits until events are queued and processes them. * * This function puts the calling thread to sleep until at least one event is * available in the event queue. Once one or more events are available, * it behaves exactly like @ref glfwPollEvents, i.e. the events in the queue * are processed and the function then returns immediately. Processing events * will cause the window and input callbacks associated with those events to be * called. * * Since not all events are associated with callbacks, this function may return * without a callback having been called even if you are monitoring all * callbacks. * * On some platforms, a window move, resize or menu operation will cause event * processing to block. This is due to how event processing is designed on * those platforms. You can use the * [window refresh callback](@ref window_refresh) to redraw the contents of * your window when necessary during such operations. * * On some platforms, certain callbacks may be called outside of a call to one * of the event processing functions. * * If no windows exist, this function returns immediately. For synchronization * of threads in applications that do not create windows, use your threading * library of choice. * * Event processing is not required for joystick input to work. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @reentrancy This function must not be called from a callback. * * @thread_safety This function must only be called from the main thread. * * @sa @ref events * @sa glfwPollEvents * @sa glfwWaitEventsTimeout * * @since Added in version 2.5. * * @ingroup window */ GLFWAPI void glfwWaitEvents(void); /*! @brief Waits with timeout until events are queued and processes them. * * This function puts the calling thread to sleep until at least one event is * available in the event queue, or until the specified timeout is reached. If * one or more events are available, it behaves exactly like @ref * glfwPollEvents, i.e. the events in the queue are processed and the function * then returns immediately. Processing events will cause the window and input * callbacks associated with those events to be called. * * The timeout value must be a positive finite number. * * Since not all events are associated with callbacks, this function may return * without a callback having been called even if you are monitoring all * callbacks. * * On some platforms, a window move, resize or menu operation will cause event * processing to block. This is due to how event processing is designed on * those platforms. You can use the * [window refresh callback](@ref window_refresh) to redraw the contents of * your window when necessary during such operations. * * On some platforms, certain callbacks may be called outside of a call to one * of the event processing functions. * * If no windows exist, this function returns immediately. For synchronization * of threads in applications that do not create windows, use your threading * library of choice. * * Event processing is not required for joystick input to work. * * @param[in] timeout The maximum amount of time, in seconds, to wait. * * @reentrancy This function must not be called from a callback. * * @thread_safety This function must only be called from the main thread. * * @sa @ref events * @sa glfwPollEvents * @sa glfwWaitEvents * * @since Added in version 3.2. * * @ingroup window */ GLFWAPI void glfwWaitEventsTimeout(double timeout); /*! @brief Posts an empty event to the event queue. * * This function posts an empty event from the current thread to the event * queue, causing @ref glfwWaitEvents or @ref glfwWaitEventsTimeout to return. * * If no windows exist, this function returns immediately. For synchronization * of threads in applications that do not create windows, use your threading * library of choice. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @thread_safety This function may be called from any thread. * * @sa @ref events * @sa glfwWaitEvents * @sa glfwWaitEventsTimeout * * @since Added in version 3.1. * * @ingroup window */ GLFWAPI void glfwPostEmptyEvent(void); /*! @brief Returns the value of an input option for the specified window. * * This function returns the value of an input option for the specified window. * The mode must be one of `GLFW_CURSOR`, `GLFW_STICKY_KEYS` or * `GLFW_STICKY_MOUSE_BUTTONS`. * * @param[in] window The window to query. * @param[in] mode One of `GLFW_CURSOR`, `GLFW_STICKY_KEYS` or * `GLFW_STICKY_MOUSE_BUTTONS`. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_INVALID_ENUM. * * @thread_safety This function must only be called from the main thread. * * @sa glfwSetInputMode * * @since Added in version 3.0. * * @ingroup input */ GLFWAPI int glfwGetInputMode(GLFWwindow* window, int mode); /*! @brief Sets an input option for the specified window. * * This function sets an input mode option for the specified window. The mode * must be one of `GLFW_CURSOR`, `GLFW_STICKY_KEYS` or * `GLFW_STICKY_MOUSE_BUTTONS`. * * If the mode is `GLFW_CURSOR`, the value must be one of the following cursor * modes: * - `GLFW_CURSOR_NORMAL` makes the cursor visible and behaving normally. * - `GLFW_CURSOR_HIDDEN` makes the cursor invisible when it is over the client * area of the window but does not restrict the cursor from leaving. * - `GLFW_CURSOR_DISABLED` hides and grabs the cursor, providing virtual * and unlimited cursor movement. This is useful for implementing for * example 3D camera controls. * * If the mode is `GLFW_STICKY_KEYS`, the value must be either `GLFW_TRUE` to * enable sticky keys, or `GLFW_FALSE` to disable it. If sticky keys are * enabled, a key press will ensure that @ref glfwGetKey returns `GLFW_PRESS` * the next time it is called even if the key had been released before the * call. This is useful when you are only interested in whether keys have been * pressed but not when or in which order. * * If the mode is `GLFW_STICKY_MOUSE_BUTTONS`, the value must be either * `GLFW_TRUE` to enable sticky mouse buttons, or `GLFW_FALSE` to disable it. * If sticky mouse buttons are enabled, a mouse button press will ensure that * @ref glfwGetMouseButton returns `GLFW_PRESS` the next time it is called even * if the mouse button had been released before the call. This is useful when * you are only interested in whether mouse buttons have been pressed but not * when or in which order. * * @param[in] window The window whose input mode to set. * @param[in] mode One of `GLFW_CURSOR`, `GLFW_STICKY_KEYS` or * `GLFW_STICKY_MOUSE_BUTTONS`. * @param[in] value The new value of the specified input mode. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR. * * @thread_safety This function must only be called from the main thread. * * @sa glfwGetInputMode * * @since Added in version 3.0. Replaces `glfwEnable` and `glfwDisable`. * * @ingroup input */ GLFWAPI void glfwSetInputMode(GLFWwindow* window, int mode, int value); /*! @brief Returns the localized name of the specified printable key. * * This function returns the localized name of the specified printable key. * This is intended for displaying key bindings to the user. * * If the key is `GLFW_KEY_UNKNOWN`, the scancode is used instead, otherwise * the scancode is ignored. If a non-printable key or (if the key is * `GLFW_KEY_UNKNOWN`) a scancode that maps to a non-printable key is * specified, this function returns `NULL`. * * This behavior allows you to pass in the arguments passed to the * [key callback](@ref input_key) without modification. * * The printable keys are: * - `GLFW_KEY_APOSTROPHE` * - `GLFW_KEY_COMMA` * - `GLFW_KEY_MINUS` * - `GLFW_KEY_PERIOD` * - `GLFW_KEY_SLASH` * - `GLFW_KEY_SEMICOLON` * - `GLFW_KEY_EQUAL` * - `GLFW_KEY_LEFT_BRACKET` * - `GLFW_KEY_RIGHT_BRACKET` * - `GLFW_KEY_BACKSLASH` * - `GLFW_KEY_WORLD_1` * - `GLFW_KEY_WORLD_2` * - `GLFW_KEY_0` to `GLFW_KEY_9` * - `GLFW_KEY_A` to `GLFW_KEY_Z` * - `GLFW_KEY_KP_0` to `GLFW_KEY_KP_9` * - `GLFW_KEY_KP_DECIMAL` * - `GLFW_KEY_KP_DIVIDE` * - `GLFW_KEY_KP_MULTIPLY` * - `GLFW_KEY_KP_SUBTRACT` * - `GLFW_KEY_KP_ADD` * - `GLFW_KEY_KP_EQUAL` * * @param[in] key The key to query, or `GLFW_KEY_UNKNOWN`. * @param[in] scancode The scancode of the key to query. * @return The localized name of the key, or `NULL`. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @pointer_lifetime The returned string is allocated and freed by GLFW. You * should not free it yourself. It is valid until the next call to @ref * glfwGetKeyName, or until the library is terminated. * * @thread_safety This function must only be called from the main thread. * * @sa @ref input_key_name * * @since Added in version 3.2. * * @ingroup input */ GLFWAPI const char* glfwGetKeyName(int key, int scancode); /*! @brief Returns the last reported state of a keyboard key for the specified * window. * * This function returns the last state reported for the specified key to the * specified window. The returned state is one of `GLFW_PRESS` or * `GLFW_RELEASE`. The higher-level action `GLFW_REPEAT` is only reported to * the key callback. * * If the `GLFW_STICKY_KEYS` input mode is enabled, this function returns * `GLFW_PRESS` the first time you call it for a key that was pressed, even if * that key has already been released. * * The key functions deal with physical keys, with [key tokens](@ref keys) * named after their use on the standard US keyboard layout. If you want to * input text, use the Unicode character callback instead. * * The [modifier key bit masks](@ref mods) are not key tokens and cannot be * used with this function. * * __Do not use this function__ to implement [text input](@ref input_char). * * @param[in] window The desired window. * @param[in] key The desired [keyboard key](@ref keys). `GLFW_KEY_UNKNOWN` is * not a valid key for this function. * @return One of `GLFW_PRESS` or `GLFW_RELEASE`. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_INVALID_ENUM. * * @thread_safety This function must only be called from the main thread. * * @sa @ref input_key * * @since Added in version 1.0. * @glfw3 Added window handle parameter. * * @ingroup input */ GLFWAPI int glfwGetKey(GLFWwindow* window, int key); /*! @brief Returns the last reported state of a mouse button for the specified * window. * * This function returns the last state reported for the specified mouse button * to the specified window. The returned state is one of `GLFW_PRESS` or * `GLFW_RELEASE`. * * If the `GLFW_STICKY_MOUSE_BUTTONS` input mode is enabled, this function * `GLFW_PRESS` the first time you call it for a mouse button that was pressed, * even if that mouse button has already been released. * * @param[in] window The desired window. * @param[in] button The desired [mouse button](@ref buttons). * @return One of `GLFW_PRESS` or `GLFW_RELEASE`. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_INVALID_ENUM. * * @thread_safety This function must only be called from the main thread. * * @sa @ref input_mouse_button * * @since Added in version 1.0. * @glfw3 Added window handle parameter. * * @ingroup input */ GLFWAPI int glfwGetMouseButton(GLFWwindow* window, int button); /*! @brief Retrieves the position of the cursor relative to the client area of * the window. * * This function returns the position of the cursor, in screen coordinates, * relative to the upper-left corner of the client area of the specified * window. * * If the cursor is disabled (with `GLFW_CURSOR_DISABLED`) then the cursor * position is unbounded and limited only by the minimum and maximum values of * a `double`. * * The coordinate can be converted to their integer equivalents with the * `floor` function. Casting directly to an integer type works for positive * coordinates, but fails for negative ones. * * Any or all of the position arguments may be `NULL`. If an error occurs, all * non-`NULL` position arguments will be set to zero. * * @param[in] window The desired window. * @param[out] xpos Where to store the cursor x-coordinate, relative to the * left edge of the client area, or `NULL`. * @param[out] ypos Where to store the cursor y-coordinate, relative to the to * top edge of the client area, or `NULL`. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @thread_safety This function must only be called from the main thread. * * @sa @ref cursor_pos * @sa glfwSetCursorPos * * @since Added in version 3.0. Replaces `glfwGetMousePos`. * * @ingroup input */ GLFWAPI void glfwGetCursorPos(GLFWwindow* window, double* xpos, double* ypos); /*! @brief Sets the position of the cursor, relative to the client area of the * window. * * This function sets the position, in screen coordinates, of the cursor * relative to the upper-left corner of the client area of the specified * window. The window must have input focus. If the window does not have * input focus when this function is called, it fails silently. * * __Do not use this function__ to implement things like camera controls. GLFW * already provides the `GLFW_CURSOR_DISABLED` cursor mode that hides the * cursor, transparently re-centers it and provides unconstrained cursor * motion. See @ref glfwSetInputMode for more information. * * If the cursor mode is `GLFW_CURSOR_DISABLED` then the cursor position is * unconstrained and limited only by the minimum and maximum values of * a `double`. * * @param[in] window The desired window. * @param[in] xpos The desired x-coordinate, relative to the left edge of the * client area. * @param[in] ypos The desired y-coordinate, relative to the top edge of the * client area. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @thread_safety This function must only be called from the main thread. * * @sa @ref cursor_pos * @sa glfwGetCursorPos * * @since Added in version 3.0. Replaces `glfwSetMousePos`. * * @ingroup input */ GLFWAPI void glfwSetCursorPos(GLFWwindow* window, double xpos, double ypos); /*! @brief Creates a custom cursor. * * Creates a new custom cursor image that can be set for a window with @ref * glfwSetCursor. The cursor can be destroyed with @ref glfwDestroyCursor. * Any remaining cursors are destroyed by @ref glfwTerminate. * * The pixels are 32-bit, little-endian, non-premultiplied RGBA, i.e. eight * bits per channel. They are arranged canonically as packed sequential rows, * starting from the top-left corner. * * The cursor hotspot is specified in pixels, relative to the upper-left corner * of the cursor image. Like all other coordinate systems in GLFW, the X-axis * points to the right and the Y-axis points down. * * @param[in] image The desired cursor image. * @param[in] xhot The desired x-coordinate, in pixels, of the cursor hotspot. * @param[in] yhot The desired y-coordinate, in pixels, of the cursor hotspot. * @return The handle of the created cursor, or `NULL` if an * [error](@ref error_handling) occurred. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @pointer_lifetime The specified image data is copied before this function * returns. * * @reentrancy This function must not be called from a callback. * * @thread_safety This function must only be called from the main thread. * * @sa @ref cursor_object * @sa glfwDestroyCursor * @sa glfwCreateStandardCursor * * @since Added in version 3.1. * * @ingroup input */ GLFWAPI GLFWcursor* glfwCreateCursor(const GLFWimage* image, int xhot, int yhot); /*! @brief Creates a cursor with a standard shape. * * Returns a cursor with a [standard shape](@ref shapes), that can be set for * a window with @ref glfwSetCursor. * * @param[in] shape One of the [standard shapes](@ref shapes). * @return A new cursor ready to use or `NULL` if an * [error](@ref error_handling) occurred. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR. * * @reentrancy This function must not be called from a callback. * * @thread_safety This function must only be called from the main thread. * * @sa @ref cursor_object * @sa glfwCreateCursor * * @since Added in version 3.1. * * @ingroup input */ GLFWAPI GLFWcursor* glfwCreateStandardCursor(int shape); /*! @brief Destroys a cursor. * * This function destroys a cursor previously created with @ref * glfwCreateCursor. Any remaining cursors will be destroyed by @ref * glfwTerminate. * * @param[in] cursor The cursor object to destroy. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @reentrancy This function must not be called from a callback. * * @thread_safety This function must only be called from the main thread. * * @sa @ref cursor_object * @sa glfwCreateCursor * * @since Added in version 3.1. * * @ingroup input */ GLFWAPI void glfwDestroyCursor(GLFWcursor* cursor); /*! @brief Sets the cursor for the window. * * This function sets the cursor image to be used when the cursor is over the * client area of the specified window. The set cursor will only be visible * when the [cursor mode](@ref cursor_mode) of the window is * `GLFW_CURSOR_NORMAL`. * * On some platforms, the set cursor may not be visible unless the window also * has input focus. * * @param[in] window The window to set the cursor for. * @param[in] cursor The cursor to set, or `NULL` to switch back to the default * arrow cursor. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @thread_safety This function must only be called from the main thread. * * @sa @ref cursor_object * * @since Added in version 3.1. * * @ingroup input */ GLFWAPI void glfwSetCursor(GLFWwindow* window, GLFWcursor* cursor); /*! @brief Sets the key callback. * * This function sets the key callback of the specified window, which is called * when a key is pressed, repeated or released. * * The key functions deal with physical keys, with layout independent * [key tokens](@ref keys) named after their values in the standard US keyboard * layout. If you want to input text, use the * [character callback](@ref glfwSetCharCallback) instead. * * When a window loses input focus, it will generate synthetic key release * events for all pressed keys. You can tell these events from user-generated * events by the fact that the synthetic ones are generated after the focus * loss event has been processed, i.e. after the * [window focus callback](@ref glfwSetWindowFocusCallback) has been called. * * The scancode of a key is specific to that platform or sometimes even to that * machine. Scancodes are intended to allow users to bind keys that don't have * a GLFW key token. Such keys have `key` set to `GLFW_KEY_UNKNOWN`, their * state is not saved and so it cannot be queried with @ref glfwGetKey. * * Sometimes GLFW needs to generate synthetic key events, in which case the * scancode may be zero. * * @param[in] window The window whose callback to set. * @param[in] cbfun The new key callback, or `NULL` to remove the currently * set callback. * @return The previously set callback, or `NULL` if no callback was set or the * library had not been [initialized](@ref intro_init). * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @thread_safety This function must only be called from the main thread. * * @sa @ref input_key * * @since Added in version 1.0. * @glfw3 Added window handle parameter and return value. * * @ingroup input */ GLFWAPI GLFWkeyfun glfwSetKeyCallback(GLFWwindow* window, GLFWkeyfun cbfun); /*! @brief Sets the Unicode character callback. * * This function sets the character callback of the specified window, which is * called when a Unicode character is input. * * The character callback is intended for Unicode text input. As it deals with * characters, it is keyboard layout dependent, whereas the * [key callback](@ref glfwSetKeyCallback) is not. Characters do not map 1:1 * to physical keys, as a key may produce zero, one or more characters. If you * want to know whether a specific physical key was pressed or released, see * the key callback instead. * * The character callback behaves as system text input normally does and will * not be called if modifier keys are held down that would prevent normal text * input on that platform, for example a Super (Command) key on OS X or Alt key * on Windows. There is a * [character with modifiers callback](@ref glfwSetCharModsCallback) that * receives these events. * * @param[in] window The window whose callback to set. * @param[in] cbfun The new callback, or `NULL` to remove the currently set * callback. * @return The previously set callback, or `NULL` if no callback was set or the * library had not been [initialized](@ref intro_init). * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @thread_safety This function must only be called from the main thread. * * @sa @ref input_char * * @since Added in version 2.4. * @glfw3 Added window handle parameter and return value. * * @ingroup input */ GLFWAPI GLFWcharfun glfwSetCharCallback(GLFWwindow* window, GLFWcharfun cbfun); /*! @brief Sets the Unicode character with modifiers callback. * * This function sets the character with modifiers callback of the specified * window, which is called when a Unicode character is input regardless of what * modifier keys are used. * * The character with modifiers callback is intended for implementing custom * Unicode character input. For regular Unicode text input, see the * [character callback](@ref glfwSetCharCallback). Like the character * callback, the character with modifiers callback deals with characters and is * keyboard layout dependent. Characters do not map 1:1 to physical keys, as * a key may produce zero, one or more characters. If you want to know whether * a specific physical key was pressed or released, see the * [key callback](@ref glfwSetKeyCallback) instead. * * @param[in] window The window whose callback to set. * @param[in] cbfun The new callback, or `NULL` to remove the currently set * callback. * @return The previously set callback, or `NULL` if no callback was set or an * [error](@ref error_handling) occurred. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @thread_safety This function must only be called from the main thread. * * @sa @ref input_char * * @since Added in version 3.1. * * @ingroup input */ GLFWAPI GLFWcharmodsfun glfwSetCharModsCallback(GLFWwindow* window, GLFWcharmodsfun cbfun); /*! @brief Sets the mouse button callback. * * This function sets the mouse button callback of the specified window, which * is called when a mouse button is pressed or released. * * When a window loses input focus, it will generate synthetic mouse button * release events for all pressed mouse buttons. You can tell these events * from user-generated events by the fact that the synthetic ones are generated * after the focus loss event has been processed, i.e. after the * [window focus callback](@ref glfwSetWindowFocusCallback) has been called. * * @param[in] window The window whose callback to set. * @param[in] cbfun The new callback, or `NULL` to remove the currently set * callback. * @return The previously set callback, or `NULL` if no callback was set or the * library had not been [initialized](@ref intro_init). * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @thread_safety This function must only be called from the main thread. * * @sa @ref input_mouse_button * * @since Added in version 1.0. * @glfw3 Added window handle parameter and return value. * * @ingroup input */ GLFWAPI GLFWmousebuttonfun glfwSetMouseButtonCallback(GLFWwindow* window, GLFWmousebuttonfun cbfun); /*! @brief Sets the cursor position callback. * * This function sets the cursor position callback of the specified window, * which is called when the cursor is moved. The callback is provided with the * position, in screen coordinates, relative to the upper-left corner of the * client area of the window. * * @param[in] window The window whose callback to set. * @param[in] cbfun The new callback, or `NULL` to remove the currently set * callback. * @return The previously set callback, or `NULL` if no callback was set or the * library had not been [initialized](@ref intro_init). * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @thread_safety This function must only be called from the main thread. * * @sa @ref cursor_pos * * @since Added in version 3.0. Replaces `glfwSetMousePosCallback`. * * @ingroup input */ GLFWAPI GLFWcursorposfun glfwSetCursorPosCallback(GLFWwindow* window, GLFWcursorposfun cbfun); /*! @brief Sets the cursor enter/exit callback. * * This function sets the cursor boundary crossing callback of the specified * window, which is called when the cursor enters or leaves the client area of * the window. * * @param[in] window The window whose callback to set. * @param[in] cbfun The new callback, or `NULL` to remove the currently set * callback. * @return The previously set callback, or `NULL` if no callback was set or the * library had not been [initialized](@ref intro_init). * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @thread_safety This function must only be called from the main thread. * * @sa @ref cursor_enter * * @since Added in version 3.0. * * @ingroup input */ GLFWAPI GLFWcursorenterfun glfwSetCursorEnterCallback(GLFWwindow* window, GLFWcursorenterfun cbfun); /*! @brief Sets the scroll callback. * * This function sets the scroll callback of the specified window, which is * called when a scrolling device is used, such as a mouse wheel or scrolling * area of a touchpad. * * The scroll callback receives all scrolling input, like that from a mouse * wheel or a touchpad scrolling area. * * @param[in] window The window whose callback to set. * @param[in] cbfun The new scroll callback, or `NULL` to remove the currently * set callback. * @return The previously set callback, or `NULL` if no callback was set or the * library had not been [initialized](@ref intro_init). * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @thread_safety This function must only be called from the main thread. * * @sa @ref scrolling * * @since Added in version 3.0. Replaces `glfwSetMouseWheelCallback`. * * @ingroup input */ GLFWAPI GLFWscrollfun glfwSetScrollCallback(GLFWwindow* window, GLFWscrollfun cbfun); /*! @brief Sets the file drop callback. * * This function sets the file drop callback of the specified window, which is * called when one or more dragged files are dropped on the window. * * Because the path array and its strings may have been generated specifically * for that event, they are not guaranteed to be valid after the callback has * returned. If you wish to use them after the callback returns, you need to * make a deep copy. * * @param[in] window The window whose callback to set. * @param[in] cbfun The new file drop callback, or `NULL` to remove the * currently set callback. * @return The previously set callback, or `NULL` if no callback was set or the * library had not been [initialized](@ref intro_init). * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @thread_safety This function must only be called from the main thread. * * @sa @ref path_drop * * @since Added in version 3.1. * * @ingroup input */ GLFWAPI GLFWdropfun glfwSetDropCallback(GLFWwindow* window, GLFWdropfun cbfun); /*! @brief Returns whether the specified joystick is present. * * This function returns whether the specified joystick is present. * * @param[in] joy The [joystick](@ref joysticks) to query. * @return `GLFW_TRUE` if the joystick is present, or `GLFW_FALSE` otherwise. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR. * * @thread_safety This function must only be called from the main thread. * * @sa @ref joystick * * @since Added in version 3.0. Replaces `glfwGetJoystickParam`. * * @ingroup input */ GLFWAPI int glfwJoystickPresent(int joy); /*! @brief Returns the values of all axes of the specified joystick. * * This function returns the values of all axes of the specified joystick. * Each element in the array is a value between -1.0 and 1.0. * * Querying a joystick slot with no device present is not an error, but will * cause this function to return `NULL`. Call @ref glfwJoystickPresent to * check device presence. * * @param[in] joy The [joystick](@ref joysticks) to query. * @param[out] count Where to store the number of axis values in the returned * array. This is set to zero if the joystick is not present or an error * occurred. * @return An array of axis values, or `NULL` if the joystick is not present or * an [error](@ref error_handling) occurred. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR. * * @pointer_lifetime The returned array is allocated and freed by GLFW. You * should not free it yourself. It is valid until the specified joystick is * disconnected, this function is called again for that joystick or the library * is terminated. * * @thread_safety This function must only be called from the main thread. * * @sa @ref joystick_axis * * @since Added in version 3.0. Replaces `glfwGetJoystickPos`. * * @ingroup input */ GLFWAPI const float* glfwGetJoystickAxes(int joy, int* count); /*! @brief Returns the state of all buttons of the specified joystick. * * This function returns the state of all buttons of the specified joystick. * Each element in the array is either `GLFW_PRESS` or `GLFW_RELEASE`. * * Querying a joystick slot with no device present is not an error, but will * cause this function to return `NULL`. Call @ref glfwJoystickPresent to * check device presence. * * @param[in] joy The [joystick](@ref joysticks) to query. * @param[out] count Where to store the number of button states in the returned * array. This is set to zero if the joystick is not present or an error * occurred. * @return An array of button states, or `NULL` if the joystick is not present * or an [error](@ref error_handling) occurred. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR. * * @pointer_lifetime The returned array is allocated and freed by GLFW. You * should not free it yourself. It is valid until the specified joystick is * disconnected, this function is called again for that joystick or the library * is terminated. * * @thread_safety This function must only be called from the main thread. * * @sa @ref joystick_button * * @since Added in version 2.2. * @glfw3 Changed to return a dynamic array. * * @ingroup input */ GLFWAPI const unsigned char* glfwGetJoystickButtons(int joy, int* count); /*! @brief Returns the name of the specified joystick. * * This function returns the name, encoded as UTF-8, of the specified joystick. * The returned string is allocated and freed by GLFW. You should not free it * yourself. * * Querying a joystick slot with no device present is not an error, but will * cause this function to return `NULL`. Call @ref glfwJoystickPresent to * check device presence. * * @param[in] joy The [joystick](@ref joysticks) to query. * @return The UTF-8 encoded name of the joystick, or `NULL` if the joystick * is not present or an [error](@ref error_handling) occurred. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR. * * @pointer_lifetime The returned string is allocated and freed by GLFW. You * should not free it yourself. It is valid until the specified joystick is * disconnected, this function is called again for that joystick or the library * is terminated. * * @thread_safety This function must only be called from the main thread. * * @sa @ref joystick_name * * @since Added in version 3.0. * * @ingroup input */ GLFWAPI const char* glfwGetJoystickName(int joy); /*! @brief Sets the joystick configuration callback. * * This function sets the joystick configuration callback, or removes the * currently set callback. This is called when a joystick is connected to or * disconnected from the system. * * @param[in] cbfun The new callback, or `NULL` to remove the currently set * callback. * @return The previously set callback, or `NULL` if no callback was set or the * library had not been [initialized](@ref intro_init). * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @thread_safety This function must only be called from the main thread. * * @sa @ref joystick_event * * @since Added in version 3.2. * * @ingroup input */ GLFWAPI GLFWjoystickfun glfwSetJoystickCallback(GLFWjoystickfun cbfun); /*! @brief Sets the clipboard to the specified string. * * This function sets the system clipboard to the specified, UTF-8 encoded * string. * * @param[in] window The window that will own the clipboard contents. * @param[in] string A UTF-8 encoded string. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @pointer_lifetime The specified string is copied before this function * returns. * * @thread_safety This function must only be called from the main thread. * * @sa @ref clipboard * @sa glfwGetClipboardString * * @since Added in version 3.0. * * @ingroup input */ GLFWAPI void glfwSetClipboardString(GLFWwindow* window, const char* string); /*! @brief Returns the contents of the clipboard as a string. * * This function returns the contents of the system clipboard, if it contains * or is convertible to a UTF-8 encoded string. If the clipboard is empty or * if its contents cannot be converted, `NULL` is returned and a @ref * GLFW_FORMAT_UNAVAILABLE error is generated. * * @param[in] window The window that will request the clipboard contents. * @return The contents of the clipboard as a UTF-8 encoded string, or `NULL` * if an [error](@ref error_handling) occurred. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_PLATFORM_ERROR. * * @pointer_lifetime The returned string is allocated and freed by GLFW. You * should not free it yourself. It is valid until the next call to @ref * glfwGetClipboardString or @ref glfwSetClipboardString, or until the library * is terminated. * * @thread_safety This function must only be called from the main thread. * * @sa @ref clipboard * @sa glfwSetClipboardString * * @since Added in version 3.0. * * @ingroup input */ GLFWAPI const char* glfwGetClipboardString(GLFWwindow* window); /*! @brief Returns the value of the GLFW timer. * * This function returns the value of the GLFW timer. Unless the timer has * been set using @ref glfwSetTime, the timer measures time elapsed since GLFW * was initialized. * * The resolution of the timer is system dependent, but is usually on the order * of a few micro- or nanoseconds. It uses the highest-resolution monotonic * time source on each supported platform. * * @return The current value, in seconds, or zero if an * [error](@ref error_handling) occurred. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @thread_safety This function may be called from any thread. Reading and * writing of the internal timer offset is not atomic, so it needs to be * externally synchronized with calls to @ref glfwSetTime. * * @sa @ref time * * @since Added in version 1.0. * * @ingroup input */ GLFWAPI double glfwGetTime(void); /*! @brief Sets the GLFW timer. * * This function sets the value of the GLFW timer. It then continues to count * up from that value. The value must be a positive finite number less than * or equal to 18446744073.0, which is approximately 584.5 years. * * @param[in] time The new value, in seconds. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_INVALID_VALUE. * * @remark The upper limit of the timer is calculated as * floor((2<sup>64</sup> - 1) / 10<sup>9</sup>) and is due to implementations * storing nanoseconds in 64 bits. The limit may be increased in the future. * * @thread_safety This function may be called from any thread. Reading and * writing of the internal timer offset is not atomic, so it needs to be * externally synchronized with calls to @ref glfwGetTime. * * @sa @ref time * * @since Added in version 2.2. * * @ingroup input */ GLFWAPI void glfwSetTime(double time); /*! @brief Returns the current value of the raw timer. * * This function returns the current value of the raw timer, measured in * 1&nbsp;/&nbsp;frequency seconds. To get the frequency, call @ref * glfwGetTimerFrequency. * * @return The value of the timer, or zero if an * [error](@ref error_handling) occurred. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @thread_safety This function may be called from any thread. * * @sa @ref time * @sa glfwGetTimerFrequency * * @since Added in version 3.2. * * @ingroup input */ GLFWAPI uint64_t glfwGetTimerValue(void); /*! @brief Returns the frequency, in Hz, of the raw timer. * * This function returns the frequency, in Hz, of the raw timer. * * @return The frequency of the timer, in Hz, or zero if an * [error](@ref error_handling) occurred. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @thread_safety This function may be called from any thread. * * @sa @ref time * @sa glfwGetTimerValue * * @since Added in version 3.2. * * @ingroup input */ GLFWAPI uint64_t glfwGetTimerFrequency(void); /*! @brief Makes the context of the specified window current for the calling * thread. * * This function makes the OpenGL or OpenGL ES context of the specified window * current on the calling thread. A context can only be made current on * a single thread at a time and each thread can have only a single current * context at a time. * * By default, making a context non-current implicitly forces a pipeline flush. * On machines that support `GL_KHR_context_flush_control`, you can control * whether a context performs this flush by setting the * [GLFW_CONTEXT_RELEASE_BEHAVIOR](@ref window_hints_ctx) window hint. * * The specified window must have an OpenGL or OpenGL ES context. Specifying * a window without a context will generate a @ref GLFW_NO_WINDOW_CONTEXT * error. * * @param[in] window The window whose context to make current, or `NULL` to * detach the current context. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref * GLFW_NO_WINDOW_CONTEXT and @ref GLFW_PLATFORM_ERROR. * * @thread_safety This function may be called from any thread. * * @sa @ref context_current * @sa glfwGetCurrentContext * * @since Added in version 3.0. * * @ingroup context */ GLFWAPI void glfwMakeContextCurrent(GLFWwindow* window); /*! @brief Returns the window whose context is current on the calling thread. * * This function returns the window whose OpenGL or OpenGL ES context is * current on the calling thread. * * @return The window whose context is current, or `NULL` if no window's * context is current. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @thread_safety This function may be called from any thread. * * @sa @ref context_current * @sa glfwMakeContextCurrent * * @since Added in version 3.0. * * @ingroup context */ GLFWAPI GLFWwindow* glfwGetCurrentContext(void); /*! @brief Swaps the front and back buffers of the specified window. * * This function swaps the front and back buffers of the specified window when * rendering with OpenGL or OpenGL ES. If the swap interval is greater than * zero, the GPU driver waits the specified number of screen updates before * swapping the buffers. * * The specified window must have an OpenGL or OpenGL ES context. Specifying * a window without a context will generate a @ref GLFW_NO_WINDOW_CONTEXT * error. * * This function does not apply to Vulkan. If you are rendering with Vulkan, * see `vkQueuePresentKHR` instead. * * @param[in] window The window whose buffers to swap. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref * GLFW_NO_WINDOW_CONTEXT and @ref GLFW_PLATFORM_ERROR. * * @remark __EGL:__ The context of the specified window must be current on the * calling thread. * * @thread_safety This function may be called from any thread. * * @sa @ref buffer_swap * @sa glfwSwapInterval * * @since Added in version 1.0. * @glfw3 Added window handle parameter. * * @ingroup window */ GLFWAPI void glfwSwapBuffers(GLFWwindow* window); /*! @brief Sets the swap interval for the current context. * * This function sets the swap interval for the current OpenGL or OpenGL ES * context, i.e. the number of screen updates to wait from the time @ref * glfwSwapBuffers was called before swapping the buffers and returning. This * is sometimes called _vertical synchronization_, _vertical retrace * synchronization_ or just _vsync_. * * Contexts that support either of the `WGL_EXT_swap_control_tear` and * `GLX_EXT_swap_control_tear` extensions also accept negative swap intervals, * which allow the driver to swap even if a frame arrives a little bit late. * You can check for the presence of these extensions using @ref * glfwExtensionSupported. For more information about swap tearing, see the * extension specifications. * * A context must be current on the calling thread. Calling this function * without a current context will cause a @ref GLFW_NO_CURRENT_CONTEXT error. * * This function does not apply to Vulkan. If you are rendering with Vulkan, * see the present mode of your swapchain instead. * * @param[in] interval The minimum number of screen updates to wait for * until the buffers are swapped by @ref glfwSwapBuffers. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref * GLFW_NO_CURRENT_CONTEXT and @ref GLFW_PLATFORM_ERROR. * * @remark This function is not called during context creation, leaving the * swap interval set to whatever is the default on that platform. This is done * because some swap interval extensions used by GLFW do not allow the swap * interval to be reset to zero once it has been set to a non-zero value. * * @remark Some GPU drivers do not honor the requested swap interval, either * because of a user setting that overrides the application's request or due to * bugs in the driver. * * @thread_safety This function may be called from any thread. * * @sa @ref buffer_swap * @sa glfwSwapBuffers * * @since Added in version 1.0. * * @ingroup context */ GLFWAPI void glfwSwapInterval(int interval); /*! @brief Returns whether the specified extension is available. * * This function returns whether the specified * [API extension](@ref context_glext) is supported by the current OpenGL or * OpenGL ES context. It searches both for client API extension and context * creation API extensions. * * A context must be current on the calling thread. Calling this function * without a current context will cause a @ref GLFW_NO_CURRENT_CONTEXT error. * * As this functions retrieves and searches one or more extension strings each * call, it is recommended that you cache its results if it is going to be used * frequently. The extension strings will not change during the lifetime of * a context, so there is no danger in doing this. * * This function does not apply to Vulkan. If you are using Vulkan, see @ref * glfwGetRequiredInstanceExtensions, `vkEnumerateInstanceExtensionProperties` * and `vkEnumerateDeviceExtensionProperties` instead. * * @param[in] extension The ASCII encoded name of the extension. * @return `GLFW_TRUE` if the extension is available, or `GLFW_FALSE` * otherwise. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref * GLFW_NO_CURRENT_CONTEXT, @ref GLFW_INVALID_VALUE and @ref * GLFW_PLATFORM_ERROR. * * @thread_safety This function may be called from any thread. * * @sa @ref context_glext * @sa glfwGetProcAddress * * @since Added in version 1.0. * * @ingroup context */ GLFWAPI int glfwExtensionSupported(const char* extension); /*! @brief Returns the address of the specified function for the current * context. * * This function returns the address of the specified OpenGL or OpenGL ES * [core or extension function](@ref context_glext), if it is supported * by the current context. * * A context must be current on the calling thread. Calling this function * without a current context will cause a @ref GLFW_NO_CURRENT_CONTEXT error. * * This function does not apply to Vulkan. If you are rendering with Vulkan, * see @ref glfwGetInstanceProcAddress, `vkGetInstanceProcAddr` and * `vkGetDeviceProcAddr` instead. * * @param[in] procname The ASCII encoded name of the function. * @return The address of the function, or `NULL` if an * [error](@ref error_handling) occurred. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref * GLFW_NO_CURRENT_CONTEXT and @ref GLFW_PLATFORM_ERROR. * * @remark The address of a given function is not guaranteed to be the same * between contexts. * * @remark This function may return a non-`NULL` address despite the * associated version or extension not being available. Always check the * context version or extension string first. * * @pointer_lifetime The returned function pointer is valid until the context * is destroyed or the library is terminated. * * @thread_safety This function may be called from any thread. * * @sa @ref context_glext * @sa glfwExtensionSupported * * @since Added in version 1.0. * * @ingroup context */ GLFWAPI GLFWglproc glfwGetProcAddress(const char* procname); /*! @brief Returns whether the Vulkan loader has been found. * * This function returns whether the Vulkan loader has been found. This check * is performed by @ref glfwInit. * * The availability of a Vulkan loader does not by itself guarantee that window * surface creation or even device creation is possible. Call @ref * glfwGetRequiredInstanceExtensions to check whether the extensions necessary * for Vulkan surface creation are available and @ref * glfwGetPhysicalDevicePresentationSupport to check whether a queue family of * a physical device supports image presentation. * * @return `GLFW_TRUE` if Vulkan is available, or `GLFW_FALSE` otherwise. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. * * @thread_safety This function may be called from any thread. * * @sa @ref vulkan_support * * @since Added in version 3.2. * * @ingroup vulkan */ GLFWAPI int glfwVulkanSupported(void); /*! @brief Returns the Vulkan instance extensions required by GLFW. * * This function returns an array of names of Vulkan instance extensions required * by GLFW for creating Vulkan surfaces for GLFW windows. If successful, the * list will always contains `VK_KHR_surface`, so if you don't require any * additional extensions you can pass this list directly to the * `VkInstanceCreateInfo` struct. * * If Vulkan is not available on the machine, this function returns `NULL` and * generates a @ref GLFW_API_UNAVAILABLE error. Call @ref glfwVulkanSupported * to check whether Vulkan is available. * * If Vulkan is available but no set of extensions allowing window surface * creation was found, this function returns `NULL`. You may still use Vulkan * for off-screen rendering and compute work. * * @param[out] count Where to store the number of extensions in the returned * array. This is set to zero if an error occurred. * @return An array of ASCII encoded extension names, or `NULL` if an * [error](@ref error_handling) occurred. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_API_UNAVAILABLE. * * @remarks Additional extensions may be required by future versions of GLFW. * You should check if any extensions you wish to enable are already in the * returned array, as it is an error to specify an extension more than once in * the `VkInstanceCreateInfo` struct. * * @pointer_lifetime The returned array is allocated and freed by GLFW. You * should not free it yourself. It is guaranteed to be valid only until the * library is terminated. * * @thread_safety This function may be called from any thread. * * @sa @ref vulkan_ext * @sa glfwCreateWindowSurface * * @since Added in version 3.2. * * @ingroup vulkan */ GLFWAPI const char** glfwGetRequiredInstanceExtensions(uint32_t* count); #if defined(VK_VERSION_1_0) /*! @brief Returns the address of the specified Vulkan instance function. * * This function returns the address of the specified Vulkan core or extension * function for the specified instance. If instance is set to `NULL` it can * return any function exported from the Vulkan loader, including at least the * following functions: * * - `vkEnumerateInstanceExtensionProperties` * - `vkEnumerateInstanceLayerProperties` * - `vkCreateInstance` * - `vkGetInstanceProcAddr` * * If Vulkan is not available on the machine, this function returns `NULL` and * generates a @ref GLFW_API_UNAVAILABLE error. Call @ref glfwVulkanSupported * to check whether Vulkan is available. * * This function is equivalent to calling `vkGetInstanceProcAddr` with * a platform-specific query of the Vulkan loader as a fallback. * * @param[in] instance The Vulkan instance to query, or `NULL` to retrieve * functions related to instance creation. * @param[in] procname The ASCII encoded name of the function. * @return The address of the function, or `NULL` if an * [error](@ref error_handling) occurred. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref * GLFW_API_UNAVAILABLE. * * @pointer_lifetime The returned function pointer is valid until the library * is terminated. * * @thread_safety This function may be called from any thread. * * @sa @ref vulkan_proc * * @since Added in version 3.2. * * @ingroup vulkan */ GLFWAPI GLFWvkproc glfwGetInstanceProcAddress(VkInstance instance, const char* procname); /*! @brief Returns whether the specified queue family can present images. * * This function returns whether the specified queue family of the specified * physical device supports presentation to the platform GLFW was built for. * * If Vulkan or the required window surface creation instance extensions are * not available on the machine, or if the specified instance was not created * with the required extensions, this function returns `GLFW_FALSE` and * generates a @ref GLFW_API_UNAVAILABLE error. Call @ref glfwVulkanSupported * to check whether Vulkan is available and @ref * glfwGetRequiredInstanceExtensions to check what instance extensions are * required. * * @param[in] instance The instance that the physical device belongs to. * @param[in] device The physical device that the queue family belongs to. * @param[in] queuefamily The index of the queue family to query. * @return `GLFW_TRUE` if the queue family supports presentation, or * `GLFW_FALSE` otherwise. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref * GLFW_API_UNAVAILABLE and @ref GLFW_PLATFORM_ERROR. * * @thread_safety This function may be called from any thread. For * synchronization details of Vulkan objects, see the Vulkan specification. * * @sa @ref vulkan_present * * @since Added in version 3.2. * * @ingroup vulkan */ GLFWAPI int glfwGetPhysicalDevicePresentationSupport(VkInstance instance, VkPhysicalDevice device, uint32_t queuefamily); /*! @brief Creates a Vulkan surface for the specified window. * * This function creates a Vulkan surface for the specified window. * * If the Vulkan loader was not found at initialization, this function returns * `VK_ERROR_INITIALIZATION_FAILED` and generates a @ref GLFW_API_UNAVAILABLE * error. Call @ref glfwVulkanSupported to check whether the Vulkan loader was * found. * * If the required window surface creation instance extensions are not * available or if the specified instance was not created with these extensions * enabled, this function returns `VK_ERROR_EXTENSION_NOT_PRESENT` and * generates a @ref GLFW_API_UNAVAILABLE error. Call @ref * glfwGetRequiredInstanceExtensions to check what instance extensions are * required. * * The window surface must be destroyed before the specified Vulkan instance. * It is the responsibility of the caller to destroy the window surface. GLFW * does not destroy it for you. Call `vkDestroySurfaceKHR` to destroy the * surface. * * @param[in] instance The Vulkan instance to create the surface in. * @param[in] window The window to create the surface for. * @param[in] allocator The allocator to use, or `NULL` to use the default * allocator. * @param[out] surface Where to store the handle of the surface. This is set * to `VK_NULL_HANDLE` if an error occurred. * @return `VK_SUCCESS` if successful, or a Vulkan error code if an * [error](@ref error_handling) occurred. * * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref * GLFW_API_UNAVAILABLE and @ref GLFW_PLATFORM_ERROR. * * @remarks If an error occurs before the creation call is made, GLFW returns * the Vulkan error code most appropriate for the error. Appropriate use of * @ref glfwVulkanSupported and @ref glfwGetRequiredInstanceExtensions should * eliminate almost all occurrences of these errors. * * @thread_safety This function may be called from any thread. For * synchronization details of Vulkan objects, see the Vulkan specification. * * @sa @ref vulkan_surface * @sa glfwGetRequiredInstanceExtensions * * @since Added in version 3.2. * * @ingroup vulkan */ GLFWAPI VkResult glfwCreateWindowSurface(VkInstance instance, GLFWwindow* window, const VkAllocationCallbacks* allocator, VkSurfaceKHR* surface); #endif /*VK_VERSION_1_0*/ /************************************************************************* * Global definition cleanup *************************************************************************/ /* ------------------- BEGIN SYSTEM/COMPILER SPECIFIC -------------------- */ #ifdef GLFW_WINGDIAPI_DEFINED #undef WINGDIAPI #undef GLFW_WINGDIAPI_DEFINED #endif #ifdef GLFW_CALLBACK_DEFINED #undef CALLBACK #undef GLFW_CALLBACK_DEFINED #endif /* -------------------- END SYSTEM/COMPILER SPECIFIC --------------------- */ #ifdef __cplusplus } #endif #endif /* _glfw3_h_ */
NVIDIA-Omniverse/PhysX/flow/external/imgui/imgui.h
// dear imgui, v1.72b // (headers) // See imgui.cpp file for documentation. // Call and read ImGui::ShowDemoWindow() in imgui_demo.cpp for demo code. // Newcomers, read 'Programmer guide' in imgui.cpp for notes on how to setup Dear ImGui in your codebase. // Get latest version at https://github.com/ocornut/imgui /* Index of this file: // Header mess // Forward declarations and basic types // ImGui API (Dear ImGui end-user API) // Flags & Enumerations // Memory allocations macros // ImVector<> // ImGuiStyle // ImGuiIO // Misc data structures (ImGuiInputTextCallbackData, ImGuiSizeCallbackData, ImGuiPayload) // Obsolete functions // Helpers (ImGuiOnceUponAFrame, ImGuiTextFilter, ImGuiTextBuffer, ImGuiStorage, ImGuiListClipper, ImColor) // Draw List API (ImDrawCallback, ImDrawCmd, ImDrawIdx, ImDrawVert, ImDrawChannel, ImDrawListSplitter, ImDrawListFlags, ImDrawList, ImDrawData) // Font API (ImFontConfig, ImFontGlyph, ImFontGlyphRangesBuilder, ImFontAtlasFlags, ImFontAtlas, ImFont) */ #pragma once // Configuration file with compile-time options (edit imconfig.h or define IMGUI_USER_CONFIG to your own filename) #ifdef IMGUI_USER_CONFIG #include IMGUI_USER_CONFIG #endif #if !defined(IMGUI_DISABLE_INCLUDE_IMCONFIG_H) || defined(IMGUI_INCLUDE_IMCONFIG_H) #include "imconfig.h" #endif //----------------------------------------------------------------------------- // Header mess //----------------------------------------------------------------------------- #include <float.h> // FLT_MAX #include <stdarg.h> // va_list #include <stddef.h> // ptrdiff_t, NULL #include <string.h> // memset, memmove, memcpy, strlen, strchr, strcpy, strcmp // Version // (Integer encoded as XYYZZ for use in #if preprocessor conditionals. Work in progress versions typically starts at XYY99 then bounce up to XYY00, XYY01 etc. when release tagging happens) #define IMGUI_VERSION "1.72b" #define IMGUI_VERSION_NUM 17202 #define IMGUI_CHECKVERSION() ImGui::DebugCheckVersionAndDataLayout(IMGUI_VERSION, sizeof(ImGuiIO), sizeof(ImGuiStyle), sizeof(ImVec2), sizeof(ImVec4), sizeof(ImDrawVert), sizeof(ImDrawIdx)) // Define attributes of all API symbols declarations (e.g. for DLL under Windows) // IMGUI_API is used for core imgui functions, IMGUI_IMPL_API is used for the default bindings files (imgui_impl_xxx.h) // Using dear imgui via a shared library is not recommended, because of function call overhead and because we don't guarantee backward nor forward ABI compatibility. #ifndef IMGUI_API #define IMGUI_API #endif #ifndef IMGUI_IMPL_API #define IMGUI_IMPL_API IMGUI_API #endif // Helper Macros #ifndef IM_ASSERT #include <assert.h> #define IM_ASSERT(_EXPR) assert(_EXPR) // You can override the default assert handler by editing imconfig.h #endif #if defined(__clang__) || defined(__GNUC__) #define IM_FMTARGS(FMT) __attribute__((format(printf, FMT, FMT+1))) // Apply printf-style warnings to user functions. #define IM_FMTLIST(FMT) __attribute__((format(printf, FMT, 0))) #else #define IM_FMTARGS(FMT) #define IM_FMTLIST(FMT) #endif #define IM_ARRAYSIZE(_ARR) ((int)(sizeof(_ARR)/sizeof(*_ARR))) // Size of a static C-style array. Don't use on pointers! #define IM_OFFSETOF(_TYPE,_MEMBER) ((size_t)&(((_TYPE*)0)->_MEMBER)) // Offset of _MEMBER within _TYPE. Standardized as offsetof() in modern C++. #define IM_UNUSED(_VAR) ((void)_VAR) // Used to silence "unused variable warnings". Often useful as asserts may be stripped out from final builds. // Warnings #if defined(__clang__) #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wold-style-cast" #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #elif defined(__GNUC__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wpragmas" // warning: unknown option after '#pragma GCC diagnostic' kind #pragma GCC diagnostic ignored "-Wclass-memaccess" // [__GNUC__ >= 8] warning: 'memset/memcpy' clearing/writing an object of type 'xxxx' with no trivial copy-assignment; use assignment or value-initialization instead #endif //----------------------------------------------------------------------------- // Forward declarations and basic types //----------------------------------------------------------------------------- struct ImDrawChannel; // Temporary storage to output draw commands out of order, used by ImDrawListSplitter and ImDrawList::ChannelsSplit() struct ImDrawCmd; // A single draw command within a parent ImDrawList (generally maps to 1 GPU draw call, unless it is a callback) struct ImDrawData; // All draw command lists required to render the frame + pos/size coordinates to use for the projection matrix. struct ImDrawList; // A single draw command list (generally one per window, conceptually you may see this as a dynamic "mesh" builder) struct ImDrawListSharedData; // Data shared among multiple draw lists (typically owned by parent ImGui context, but you may create one yourself) struct ImDrawListSplitter; // Helper to split a draw list into different layers which can be drawn into out of order, then flattened back. struct ImDrawVert; // A single vertex (pos + uv + col = 20 bytes by default. Override layout with IMGUI_OVERRIDE_DRAWVERT_STRUCT_LAYOUT) struct ImFont; // Runtime data for a single font within a parent ImFontAtlas struct ImFontAtlas; // Runtime data for multiple fonts, bake multiple fonts into a single texture, TTF/OTF font loader struct ImFontConfig; // Configuration data when adding a font or merging fonts struct ImFontGlyph; // A single font glyph (code point + coordinates within in ImFontAtlas + offset) struct ImFontGlyphRangesBuilder; // Helper to build glyph ranges from text/string data struct ImColor; // Helper functions to create a color that can be converted to either u32 or float4 (*OBSOLETE* please avoid using) struct ImGuiContext; // Dear ImGui context (opaque structure, unless including imgui_internal.h) struct ImGuiIO; // Main configuration and I/O between your application and ImGui struct ImGuiInputTextCallbackData; // Shared state of InputText() when using custom ImGuiInputTextCallback (rare/advanced use) struct ImGuiListClipper; // Helper to manually clip large list of items struct ImGuiOnceUponAFrame; // Helper for running a block of code not more than once a frame, used by IMGUI_ONCE_UPON_A_FRAME macro struct ImGuiPayload; // User data payload for drag and drop operations struct ImGuiSizeCallbackData; // Callback data when using SetNextWindowSizeConstraints() (rare/advanced use) struct ImGuiStorage; // Helper for key->value storage struct ImGuiStyle; // Runtime data for styling/colors struct ImGuiTextBuffer; // Helper to hold and append into a text buffer (~string builder) struct ImGuiTextFilter; // Helper to parse and apply text filters (e.g. "aaaaa[,bbbb][,ccccc]") // Typedefs and Enums/Flags (declared as int for compatibility with old C++, to allow using as flags and to not pollute the top of this file) // Use your programming IDE "Go to definition" facility on the names of the center columns to find the actual flags/enum lists. #ifndef ImTextureID typedef void* ImTextureID; // User data to identify a texture (this is whatever to you want it to be! read the FAQ about ImTextureID in imgui.cpp) #endif typedef unsigned int ImGuiID; // Unique ID used by widgets (typically hashed from a stack of string) typedef unsigned short ImWchar; // A single U16 character for keyboard input/display. We encode them as multi bytes UTF-8 when used in strings. typedef int ImGuiCol; // -> enum ImGuiCol_ // Enum: A color identifier for styling typedef int ImGuiCond; // -> enum ImGuiCond_ // Enum: A condition for many Set*() functions typedef int ImGuiDataType; // -> enum ImGuiDataType_ // Enum: A primary data type typedef int ImGuiDir; // -> enum ImGuiDir_ // Enum: A cardinal direction typedef int ImGuiKey; // -> enum ImGuiKey_ // Enum: A key identifier (ImGui-side enum) typedef int ImGuiNavInput; // -> enum ImGuiNavInput_ // Enum: An input identifier for navigation typedef int ImGuiMouseCursor; // -> enum ImGuiMouseCursor_ // Enum: A mouse cursor identifier typedef int ImGuiStyleVar; // -> enum ImGuiStyleVar_ // Enum: A variable identifier for styling typedef int ImDrawCornerFlags; // -> enum ImDrawCornerFlags_ // Flags: for ImDrawList::AddRect(), AddRectFilled() etc. typedef int ImDrawListFlags; // -> enum ImDrawListFlags_ // Flags: for ImDrawList typedef int ImFontAtlasFlags; // -> enum ImFontAtlasFlags_ // Flags: for ImFontAtlas typedef int ImGuiBackendFlags; // -> enum ImGuiBackendFlags_ // Flags: for io.BackendFlags typedef int ImGuiColorEditFlags; // -> enum ImGuiColorEditFlags_ // Flags: for ColorEdit4(), ColorPicker4() etc. typedef int ImGuiConfigFlags; // -> enum ImGuiConfigFlags_ // Flags: for io.ConfigFlags typedef int ImGuiComboFlags; // -> enum ImGuiComboFlags_ // Flags: for BeginCombo() typedef int ImGuiDragDropFlags; // -> enum ImGuiDragDropFlags_ // Flags: for BeginDragDropSource(), AcceptDragDropPayload() typedef int ImGuiFocusedFlags; // -> enum ImGuiFocusedFlags_ // Flags: for IsWindowFocused() typedef int ImGuiHoveredFlags; // -> enum ImGuiHoveredFlags_ // Flags: for IsItemHovered(), IsWindowHovered() etc. typedef int ImGuiInputTextFlags; // -> enum ImGuiInputTextFlags_ // Flags: for InputText(), InputTextMultiline() typedef int ImGuiSelectableFlags; // -> enum ImGuiSelectableFlags_ // Flags: for Selectable() typedef int ImGuiTabBarFlags; // -> enum ImGuiTabBarFlags_ // Flags: for BeginTabBar() typedef int ImGuiTabItemFlags; // -> enum ImGuiTabItemFlags_ // Flags: for BeginTabItem() typedef int ImGuiTreeNodeFlags; // -> enum ImGuiTreeNodeFlags_ // Flags: for TreeNode(), TreeNodeEx(), CollapsingHeader() typedef int ImGuiWindowFlags; // -> enum ImGuiWindowFlags_ // Flags: for Begin(), BeginChild() typedef int (*ImGuiInputTextCallback)(ImGuiInputTextCallbackData *data); typedef void (*ImGuiSizeCallback)(ImGuiSizeCallbackData* data); // Scalar data types typedef signed char ImS8; // 8-bit signed integer == char typedef unsigned char ImU8; // 8-bit unsigned integer typedef signed short ImS16; // 16-bit signed integer typedef unsigned short ImU16; // 16-bit unsigned integer typedef signed int ImS32; // 32-bit signed integer == int typedef unsigned int ImU32; // 32-bit unsigned integer (often used to store packed colors) #if defined(_MSC_VER) && !defined(__clang__) typedef signed __int64 ImS64; // 64-bit signed integer (pre and post C++11 with Visual Studio) typedef unsigned __int64 ImU64; // 64-bit unsigned integer (pre and post C++11 with Visual Studio) #elif (defined(__clang__) || defined(__GNUC__)) && (__cplusplus < 201100) #include <stdint.h> typedef int64_t ImS64; // 64-bit signed integer (pre C++11) typedef uint64_t ImU64; // 64-bit unsigned integer (pre C++11) #else typedef signed long long ImS64; // 64-bit signed integer (post C++11) typedef unsigned long long ImU64; // 64-bit unsigned integer (post C++11) #endif // 2D vector (often used to store positions, sizes, etc.) struct ImVec2 { float x, y; ImVec2() { x = y = 0.0f; } ImVec2(float _x, float _y) { x = _x; y = _y; } float operator[] (size_t idx) const { IM_ASSERT(idx <= 1); return (&x)[idx]; } // We very rarely use this [] operator, the assert overhead is fine. float& operator[] (size_t idx) { IM_ASSERT(idx <= 1); return (&x)[idx]; } // We very rarely use this [] operator, the assert overhead is fine. #ifdef IM_VEC2_CLASS_EXTRA IM_VEC2_CLASS_EXTRA // Define additional constructors and implicit cast operators in imconfig.h to convert back and forth between your math types and ImVec2. #endif }; // 4D vector (often used to store floating-point colors) struct ImVec4 { float x, y, z, w; ImVec4() { x = y = z = w = 0.0f; } ImVec4(float _x, float _y, float _z, float _w) { x = _x; y = _y; z = _z; w = _w; } #ifdef IM_VEC4_CLASS_EXTRA IM_VEC4_CLASS_EXTRA // Define additional constructors and implicit cast operators in imconfig.h to convert back and forth between your math types and ImVec4. #endif }; //----------------------------------------------------------------------------- // ImGui: Dear ImGui end-user API // (Inside a namespace so you can add extra functions in your own separate file. Please don't modify imgui.cpp/.h!) //----------------------------------------------------------------------------- namespace ImGui { // Context creation and access // Each context create its own ImFontAtlas by default. You may instance one yourself and pass it to CreateContext() to share a font atlas between imgui contexts. // All those functions are not reliant on the current context. IMGUI_API ImGuiContext* CreateContext(ImFontAtlas* shared_font_atlas = NULL); IMGUI_API void DestroyContext(ImGuiContext* ctx = NULL); // NULL = destroy current context IMGUI_API ImGuiContext* GetCurrentContext(); IMGUI_API void SetCurrentContext(ImGuiContext* ctx); IMGUI_API bool DebugCheckVersionAndDataLayout(const char* version_str, size_t sz_io, size_t sz_style, size_t sz_vec2, size_t sz_vec4, size_t sz_drawvert, size_t sz_drawidx); // Main IMGUI_API ImGuiIO& GetIO(); // access the IO structure (mouse/keyboard/gamepad inputs, time, various configuration options/flags) IMGUI_API ImGuiStyle& GetStyle(); // access the Style structure (colors, sizes). Always use PushStyleCol(), PushStyleVar() to modify style mid-frame. IMGUI_API void NewFrame(); // start a new Dear ImGui frame, you can submit any command from this point until Render()/EndFrame(). IMGUI_API void EndFrame(); // ends the Dear ImGui frame. automatically called by Render(), you likely don't need to call that yourself directly. If you don't need to render data (skipping rendering) you may call EndFrame() but you'll have wasted CPU already! If you don't need to render, better to not create any imgui windows and not call NewFrame() at all! IMGUI_API void Render(); // ends the Dear ImGui frame, finalize the draw data. You can get call GetDrawData() to obtain it and run your rendering function. (Obsolete: this used to call io.RenderDrawListsFn(). Nowadays, we allow and prefer calling your render function yourself.) IMGUI_API ImDrawData* GetDrawData(); // valid after Render() and until the next call to NewFrame(). this is what you have to render. // Demo, Debug, Information IMGUI_API void ShowDemoWindow(bool* p_open = NULL); // create Demo window (previously called ShowTestWindow). demonstrate most ImGui features. call this to learn about the library! try to make it always available in your application! IMGUI_API void ShowAboutWindow(bool* p_open = NULL); // create About window. display Dear ImGui version, credits and build/system information. IMGUI_API void ShowMetricsWindow(bool* p_open = NULL); // create Metrics/Debug window. display Dear ImGui internals: draw commands (with individual draw calls and vertices), window list, basic internal state, etc. IMGUI_API void ShowStyleEditor(ImGuiStyle* ref = NULL); // add style editor block (not a window). you can pass in a reference ImGuiStyle structure to compare to, revert to and save to (else it uses the default style) IMGUI_API bool ShowStyleSelector(const char* label); // add style selector block (not a window), essentially a combo listing the default styles. IMGUI_API void ShowFontSelector(const char* label); // add font selector block (not a window), essentially a combo listing the loaded fonts. IMGUI_API void ShowUserGuide(); // add basic help/info block (not a window): how to manipulate ImGui as a end-user (mouse/keyboard controls). IMGUI_API const char* GetVersion(); // get the compiled version string e.g. "1.23" (essentially the compiled value for IMGUI_VERSION) // Styles IMGUI_API void StyleColorsDark(ImGuiStyle* dst = NULL); // new, recommended style (default) IMGUI_API void StyleColorsClassic(ImGuiStyle* dst = NULL); // classic imgui style IMGUI_API void StyleColorsLight(ImGuiStyle* dst = NULL); // best used with borders and a custom, thicker font // Windows // - Begin() = push window to the stack and start appending to it. End() = pop window from the stack. // - You may append multiple times to the same window during the same frame. // - Passing 'bool* p_open != NULL' shows a window-closing widget in the upper-right corner of the window, // which clicking will set the boolean to false when clicked. // - Begin() return false to indicate the window is collapsed or fully clipped, so you may early out and omit submitting // anything to the window. Always call a matching End() for each Begin() call, regardless of its return value! // [this is due to legacy reason and is inconsistent with most other functions such as BeginMenu/EndMenu, BeginPopup/EndPopup, etc. // where the EndXXX call should only be called if the corresponding BeginXXX function returned true.] // - Note that the bottom of window stack always contains a window called "Debug". IMGUI_API bool Begin(const char* name, bool* p_open = NULL, ImGuiWindowFlags flags = 0); IMGUI_API void End(); // Child Windows // - Use child windows to begin into a self-contained independent scrolling/clipping regions within a host window. Child windows can embed their own child. // - For each independent axis of 'size': ==0.0f: use remaining host window size / >0.0f: fixed size / <0.0f: use remaining window size minus abs(size) / Each axis can use a different mode, e.g. ImVec2(0,400). // - BeginChild() returns false to indicate the window is collapsed or fully clipped, so you may early out and omit submitting anything to the window. // Always call a matching EndChild() for each BeginChild() call, regardless of its return value [this is due to legacy reason and is inconsistent with most other functions such as BeginMenu/EndMenu, BeginPopup/EndPopup, etc. where the EndXXX call should only be called if the corresponding BeginXXX function returned true.] IMGUI_API bool BeginChild(const char* str_id, const ImVec2& size = ImVec2(0,0), bool border = false, ImGuiWindowFlags flags = 0); IMGUI_API bool BeginChild(ImGuiID id, const ImVec2& size = ImVec2(0,0), bool border = false, ImGuiWindowFlags flags = 0); IMGUI_API void EndChild(); // Windows Utilities // - "current window" = the window we are appending into while inside a Begin()/End() block. "next window" = next window we will Begin() into. IMGUI_API bool IsWindowAppearing(); IMGUI_API bool IsWindowCollapsed(); IMGUI_API bool IsWindowFocused(ImGuiFocusedFlags flags=0); // is current window focused? or its root/child, depending on flags. see flags for options. IMGUI_API bool IsWindowHovered(ImGuiHoveredFlags flags=0); // is current window hovered (and typically: not blocked by a popup/modal)? see flags for options. NB: If you are trying to check whether your mouse should be dispatched to imgui or to your app, you should use the 'io.WantCaptureMouse' boolean for that! Please read the FAQ! IMGUI_API ImDrawList* GetWindowDrawList(); // get draw list associated to the current window, to append your own drawing primitives IMGUI_API ImVec2 GetWindowPos(); // get current window position in screen space (useful if you want to do your own drawing via the DrawList API) IMGUI_API ImVec2 GetWindowSize(); // get current window size IMGUI_API float GetWindowWidth(); // get current window width (shortcut for GetWindowSize().x) IMGUI_API float GetWindowHeight(); // get current window height (shortcut for GetWindowSize().y) // Prefer using SetNextXXX functions (before Begin) rather that SetXXX functions (after Begin). IMGUI_API void SetNextWindowPos(const ImVec2& pos, ImGuiCond cond = 0, const ImVec2& pivot = ImVec2(0,0)); // set next window position. call before Begin(). use pivot=(0.5f,0.5f) to center on given point, etc. IMGUI_API void SetNextWindowSize(const ImVec2& size, ImGuiCond cond = 0); // set next window size. set axis to 0.0f to force an auto-fit on this axis. call before Begin() IMGUI_API void SetNextWindowSizeConstraints(const ImVec2& size_min, const ImVec2& size_max, ImGuiSizeCallback custom_callback = NULL, void* custom_callback_data = NULL); // set next window size limits. use -1,-1 on either X/Y axis to preserve the current size. Sizes will be rounded down. Use callback to apply non-trivial programmatic constraints. IMGUI_API void SetNextWindowContentSize(const ImVec2& size); // set next window content size (~ scrollable client area, which enforce the range of scrollbars). Not including window decorations (title bar, menu bar, etc.) nor WindowPadding. set an axis to 0.0f to leave it automatic. call before Begin() IMGUI_API void SetNextWindowCollapsed(bool collapsed, ImGuiCond cond = 0); // set next window collapsed state. call before Begin() IMGUI_API void SetNextWindowFocus(); // set next window to be focused / top-most. call before Begin() IMGUI_API void SetNextWindowBgAlpha(float alpha); // set next window background color alpha. helper to easily modify ImGuiCol_WindowBg/ChildBg/PopupBg. you may also use ImGuiWindowFlags_NoBackground. IMGUI_API void SetWindowPos(const ImVec2& pos, ImGuiCond cond = 0); // (not recommended) set current window position - call within Begin()/End(). prefer using SetNextWindowPos(), as this may incur tearing and side-effects. IMGUI_API void SetWindowSize(const ImVec2& size, ImGuiCond cond = 0); // (not recommended) set current window size - call within Begin()/End(). set to ImVec2(0,0) to force an auto-fit. prefer using SetNextWindowSize(), as this may incur tearing and minor side-effects. IMGUI_API void SetWindowCollapsed(bool collapsed, ImGuiCond cond = 0); // (not recommended) set current window collapsed state. prefer using SetNextWindowCollapsed(). IMGUI_API void SetWindowFocus(); // (not recommended) set current window to be focused / top-most. prefer using SetNextWindowFocus(). IMGUI_API void SetWindowFontScale(float scale); // set font scale. Adjust IO.FontGlobalScale if you want to scale all windows. This is an old API! For correct scaling, prefer to reload font + rebuild ImFontAtlas + call style.ScaleAllSizes(). IMGUI_API void SetWindowPos(const char* name, const ImVec2& pos, ImGuiCond cond = 0); // set named window position. IMGUI_API void SetWindowSize(const char* name, const ImVec2& size, ImGuiCond cond = 0); // set named window size. set axis to 0.0f to force an auto-fit on this axis. IMGUI_API void SetWindowCollapsed(const char* name, bool collapsed, ImGuiCond cond = 0); // set named window collapsed state IMGUI_API void SetWindowFocus(const char* name); // set named window to be focused / top-most. use NULL to remove focus. // Content region // - Those functions are bound to be redesigned soon (they are confusing, incomplete and return values in local window coordinates which increases confusion) IMGUI_API ImVec2 GetContentRegionMax(); // current content boundaries (typically window boundaries including scrolling, or current column boundaries), in windows coordinates IMGUI_API ImVec2 GetContentRegionAvail(); // == GetContentRegionMax() - GetCursorPos() IMGUI_API ImVec2 GetWindowContentRegionMin(); // content boundaries min (roughly (0,0)-Scroll), in window coordinates IMGUI_API ImVec2 GetWindowContentRegionMax(); // content boundaries max (roughly (0,0)+Size-Scroll) where Size can be override with SetNextWindowContentSize(), in window coordinates IMGUI_API float GetWindowContentRegionWidth(); // // Windows Scrolling IMGUI_API float GetScrollX(); // get scrolling amount [0..GetScrollMaxX()] IMGUI_API float GetScrollY(); // get scrolling amount [0..GetScrollMaxY()] IMGUI_API float GetScrollMaxX(); // get maximum scrolling amount ~~ ContentSize.X - WindowSize.X IMGUI_API float GetScrollMaxY(); // get maximum scrolling amount ~~ ContentSize.Y - WindowSize.Y IMGUI_API void SetScrollX(float scroll_x); // set scrolling amount [0..GetScrollMaxX()] IMGUI_API void SetScrollY(float scroll_y); // set scrolling amount [0..GetScrollMaxY()] IMGUI_API void SetScrollHereX(float center_x_ratio = 0.5f); // adjust scrolling amount to make current cursor position visible. center_x_ratio=0.0: left, 0.5: center, 1.0: right. When using to make a "default/current item" visible, consider using SetItemDefaultFocus() instead. IMGUI_API void SetScrollHereY(float center_y_ratio = 0.5f); // adjust scrolling amount to make current cursor position visible. center_y_ratio=0.0: top, 0.5: center, 1.0: bottom. When using to make a "default/current item" visible, consider using SetItemDefaultFocus() instead. IMGUI_API void SetScrollFromPosX(float local_x, float center_x_ratio = 0.5f); // adjust scrolling amount to make given position visible. Generally GetCursorStartPos() + offset to compute a valid position. IMGUI_API void SetScrollFromPosY(float local_y, float center_y_ratio = 0.5f); // adjust scrolling amount to make given position visible. Generally GetCursorStartPos() + offset to compute a valid position. // Parameters stacks (shared) IMGUI_API void PushFont(ImFont* font); // use NULL as a shortcut to push default font IMGUI_API void PopFont(); IMGUI_API void PushStyleColor(ImGuiCol idx, ImU32 col); IMGUI_API void PushStyleColor(ImGuiCol idx, const ImVec4& col); IMGUI_API void PopStyleColor(int count = 1); IMGUI_API void PushStyleVar(ImGuiStyleVar idx, float val); IMGUI_API void PushStyleVar(ImGuiStyleVar idx, const ImVec2& val); IMGUI_API void PopStyleVar(int count = 1); IMGUI_API const ImVec4& GetStyleColorVec4(ImGuiCol idx); // retrieve style color as stored in ImGuiStyle structure. use to feed back into PushStyleColor(), otherwise use GetColorU32() to get style color with style alpha baked in. IMGUI_API ImFont* GetFont(); // get current font IMGUI_API float GetFontSize(); // get current font size (= height in pixels) of current font with current scale applied IMGUI_API ImVec2 GetFontTexUvWhitePixel(); // get UV coordinate for a while pixel, useful to draw custom shapes via the ImDrawList API IMGUI_API ImU32 GetColorU32(ImGuiCol idx, float alpha_mul = 1.0f); // retrieve given style color with style alpha applied and optional extra alpha multiplier IMGUI_API ImU32 GetColorU32(const ImVec4& col); // retrieve given color with style alpha applied IMGUI_API ImU32 GetColorU32(ImU32 col); // retrieve given color with style alpha applied // Parameters stacks (current window) IMGUI_API void PushItemWidth(float item_width); // set width of items for common large "item+label" widgets. >0.0f: width in pixels, <0.0f align xx pixels to the right of window (so -1.0f always align width to the right side). 0.0f = default to ~2/3 of windows width, IMGUI_API void PopItemWidth(); IMGUI_API void SetNextItemWidth(float item_width); // set width of the _next_ common large "item+label" widget. >0.0f: width in pixels, <0.0f align xx pixels to the right of window (so -1.0f always align width to the right side) IMGUI_API float CalcItemWidth(); // width of item given pushed settings and current cursor position. NOT necessarily the width of last item unlike most 'Item' functions. IMGUI_API void PushTextWrapPos(float wrap_local_pos_x = 0.0f); // word-wrapping for Text*() commands. < 0.0f: no wrapping; 0.0f: wrap to end of window (or column); > 0.0f: wrap at 'wrap_pos_x' position in window local space IMGUI_API void PopTextWrapPos(); IMGUI_API void PushAllowKeyboardFocus(bool allow_keyboard_focus); // allow focusing using TAB/Shift-TAB, enabled by default but you can disable it for certain widgets IMGUI_API void PopAllowKeyboardFocus(); IMGUI_API void PushButtonRepeat(bool repeat); // in 'repeat' mode, Button*() functions return repeated true in a typematic manner (using io.KeyRepeatDelay/io.KeyRepeatRate setting). Note that you can call IsItemActive() after any Button() to tell if the button is held in the current frame. IMGUI_API void PopButtonRepeat(); // Cursor / Layout // - By "cursor" we mean the current output position. // - The typical widget behavior is to output themselves at the current cursor position, then move the cursor one line down. IMGUI_API void Separator(); // separator, generally horizontal. inside a menu bar or in horizontal layout mode, this becomes a vertical separator. IMGUI_API void SameLine(float offset_from_start_x=0.0f, float spacing=-1.0f); // call between widgets or groups to layout them horizontally. X position given in window coordinates. IMGUI_API void NewLine(); // undo a SameLine() or force a new line when in an horizontal-layout context. IMGUI_API void Spacing(); // add vertical spacing. IMGUI_API void Dummy(const ImVec2& size); // add a dummy item of given size. unlike InvisibleButton(), Dummy() won't take the mouse click or be navigable into. IMGUI_API void Indent(float indent_w = 0.0f); // move content position toward the right, by style.IndentSpacing or indent_w if != 0 IMGUI_API void Unindent(float indent_w = 0.0f); // move content position back to the left, by style.IndentSpacing or indent_w if != 0 IMGUI_API void BeginGroup(); // lock horizontal starting position IMGUI_API void EndGroup(); // unlock horizontal starting position + capture the whole group bounding box into one "item" (so you can use IsItemHovered() or layout primitives such as SameLine() on whole group, etc.) IMGUI_API ImVec2 GetCursorPos(); // cursor position in window coordinates (relative to window position) IMGUI_API float GetCursorPosX(); // (some functions are using window-relative coordinates, such as: GetCursorPos, GetCursorStartPos, GetContentRegionMax, GetWindowContentRegion* etc. IMGUI_API float GetCursorPosY(); // other functions such as GetCursorScreenPos or everything in ImDrawList:: IMGUI_API void SetCursorPos(const ImVec2& local_pos); // are using the main, absolute coordinate system. IMGUI_API void SetCursorPosX(float local_x); // GetWindowPos() + GetCursorPos() == GetCursorScreenPos() etc.) IMGUI_API void SetCursorPosY(float local_y); // IMGUI_API ImVec2 GetCursorStartPos(); // initial cursor position in window coordinates IMGUI_API ImVec2 GetCursorScreenPos(); // cursor position in absolute screen coordinates [0..io.DisplaySize] (useful to work with ImDrawList API) IMGUI_API void SetCursorScreenPos(const ImVec2& pos); // cursor position in absolute screen coordinates [0..io.DisplaySize] IMGUI_API void AlignTextToFramePadding(); // vertically align upcoming text baseline to FramePadding.y so that it will align properly to regularly framed items (call if you have text on a line before a framed item) IMGUI_API float GetTextLineHeight(); // ~ FontSize IMGUI_API float GetTextLineHeightWithSpacing(); // ~ FontSize + style.ItemSpacing.y (distance in pixels between 2 consecutive lines of text) IMGUI_API float GetFrameHeight(); // ~ FontSize + style.FramePadding.y * 2 IMGUI_API float GetFrameHeightWithSpacing(); // ~ FontSize + style.FramePadding.y * 2 + style.ItemSpacing.y (distance in pixels between 2 consecutive lines of framed widgets) // ID stack/scopes // - Read the FAQ for more details about how ID are handled in dear imgui. If you are creating widgets in a loop you most // likely want to push a unique identifier (e.g. object pointer, loop index) to uniquely differentiate them. // - The resulting ID are hashes of the entire stack. // - You can also use the "Label##foobar" syntax within widget label to distinguish them from each others. // - In this header file we use the "label"/"name" terminology to denote a string that will be displayed and used as an ID, // whereas "str_id" denote a string that is only used as an ID and not normally displayed. IMGUI_API void PushID(const char* str_id); // push string into the ID stack (will hash string). IMGUI_API void PushID(const char* str_id_begin, const char* str_id_end); // push string into the ID stack (will hash string). IMGUI_API void PushID(const void* ptr_id); // push pointer into the ID stack (will hash pointer). IMGUI_API void PushID(int int_id); // push integer into the ID stack (will hash integer). IMGUI_API void PopID(); // pop from the ID stack. IMGUI_API ImGuiID GetID(const char* str_id); // calculate unique ID (hash of whole ID stack + given parameter). e.g. if you want to query into ImGuiStorage yourself IMGUI_API ImGuiID GetID(const char* str_id_begin, const char* str_id_end); IMGUI_API ImGuiID GetID(const void* ptr_id); // Widgets: Text IMGUI_API void TextUnformatted(const char* text, const char* text_end = NULL); // raw text without formatting. Roughly equivalent to Text("%s", text) but: A) doesn't require null terminated string if 'text_end' is specified, B) it's faster, no memory copy is done, no buffer size limits, recommended for long chunks of text. IMGUI_API void Text(const char* fmt, ...) IM_FMTARGS(1); // simple formatted text IMGUI_API void TextV(const char* fmt, va_list args) IM_FMTLIST(1); IMGUI_API void TextColored(const ImVec4& col, const char* fmt, ...) IM_FMTARGS(2); // shortcut for PushStyleColor(ImGuiCol_Text, col); Text(fmt, ...); PopStyleColor(); IMGUI_API void TextColoredV(const ImVec4& col, const char* fmt, va_list args) IM_FMTLIST(2); IMGUI_API void TextDisabled(const char* fmt, ...) IM_FMTARGS(1); // shortcut for PushStyleColor(ImGuiCol_Text, style.Colors[ImGuiCol_TextDisabled]); Text(fmt, ...); PopStyleColor(); IMGUI_API void TextDisabledV(const char* fmt, va_list args) IM_FMTLIST(1); IMGUI_API void TextWrapped(const char* fmt, ...) IM_FMTARGS(1); // shortcut for PushTextWrapPos(0.0f); Text(fmt, ...); PopTextWrapPos();. Note that this won't work on an auto-resizing window if there's no other widgets to extend the window width, yoy may need to set a size using SetNextWindowSize(). IMGUI_API void TextWrappedV(const char* fmt, va_list args) IM_FMTLIST(1); IMGUI_API void LabelText(const char* label, const char* fmt, ...) IM_FMTARGS(2); // display text+label aligned the same way as value+label widgets IMGUI_API void LabelTextV(const char* label, const char* fmt, va_list args) IM_FMTLIST(2); IMGUI_API void BulletText(const char* fmt, ...) IM_FMTARGS(1); // shortcut for Bullet()+Text() IMGUI_API void BulletTextV(const char* fmt, va_list args) IM_FMTLIST(1); // Widgets: Main // - Most widgets return true when the value has been changed or when pressed/selected IMGUI_API bool Button(const char* label, const ImVec2& size = ImVec2(0,0)); // button IMGUI_API bool SmallButton(const char* label); // button with FramePadding=(0,0) to easily embed within text IMGUI_API bool InvisibleButton(const char* str_id, const ImVec2& size); // button behavior without the visuals, frequently useful to build custom behaviors using the public api (along with IsItemActive, IsItemHovered, etc.) IMGUI_API bool ArrowButton(const char* str_id, ImGuiDir dir); // square button with an arrow shape IMGUI_API void Image(ImTextureID user_texture_id, const ImVec2& size, const ImVec2& uv0 = ImVec2(0,0), const ImVec2& uv1 = ImVec2(1,1), const ImVec4& tint_col = ImVec4(1,1,1,1), const ImVec4& border_col = ImVec4(0,0,0,0)); IMGUI_API bool ImageButton(ImTextureID user_texture_id, const ImVec2& size, const ImVec2& uv0 = ImVec2(0,0), const ImVec2& uv1 = ImVec2(1,1), int frame_padding = -1, const ImVec4& bg_col = ImVec4(0,0,0,0), const ImVec4& tint_col = ImVec4(1,1,1,1)); // <0 frame_padding uses default frame padding settings. 0 for no padding IMGUI_API bool Checkbox(const char* label, bool* v); IMGUI_API bool CheckboxFlags(const char* label, unsigned int* flags, unsigned int flags_value); IMGUI_API bool RadioButton(const char* label, bool active); // use with e.g. if (RadioButton("one", my_value==1)) { my_value = 1; } IMGUI_API bool RadioButton(const char* label, int* v, int v_button); // shortcut to handle the above pattern when value is an integer IMGUI_API void ProgressBar(float fraction, const ImVec2& size_arg = ImVec2(-1,0), const char* overlay = NULL); IMGUI_API void Bullet(); // draw a small circle and keep the cursor on the same line. advance cursor x position by GetTreeNodeToLabelSpacing(), same distance that TreeNode() uses // Widgets: Combo Box // - The new BeginCombo()/EndCombo() api allows you to manage your contents and selection state however you want it, by creating e.g. Selectable() items. // - The old Combo() api are helpers over BeginCombo()/EndCombo() which are kept available for convenience purpose. IMGUI_API bool BeginCombo(const char* label, const char* preview_value, ImGuiComboFlags flags = 0); IMGUI_API void EndCombo(); // only call EndCombo() if BeginCombo() returns true! IMGUI_API bool Combo(const char* label, int* current_item, const char* const items[], int items_count, int popup_max_height_in_items = -1); IMGUI_API bool Combo(const char* label, int* current_item, const char* items_separated_by_zeros, int popup_max_height_in_items = -1); // Separate items with \0 within a string, end item-list with \0\0. e.g. "One\0Two\0Three\0" IMGUI_API bool Combo(const char* label, int* current_item, bool(*items_getter)(void* data, int idx, const char** out_text), void* data, int items_count, int popup_max_height_in_items = -1); // Widgets: Drags // - CTRL+Click on any drag box to turn them into an input box. Manually input values aren't clamped and can go off-bounds. // - For all the Float2/Float3/Float4/Int2/Int3/Int4 versions of every functions, note that a 'float v[X]' function argument is the same as 'float* v', the array syntax is just a way to document the number of elements that are expected to be accessible. You can pass address of your first element out of a contiguous set, e.g. &myvector.x // - Adjust format string to decorate the value with a prefix, a suffix, or adapt the editing and display precision e.g. "%.3f" -> 1.234; "%5.2f secs" -> 01.23 secs; "Biscuit: %.0f" -> Biscuit: 1; etc. // - Speed are per-pixel of mouse movement (v_speed=0.2f: mouse needs to move by 5 pixels to increase value by 1). For gamepad/keyboard navigation, minimum speed is Max(v_speed, minimum_step_at_given_precision). IMGUI_API bool DragFloat(const char* label, float* v, float v_speed = 1.0f, float v_min = 0.0f, float v_max = 0.0f, const char* format = "%.3f", float power = 1.0f); // If v_min >= v_max we have no bound IMGUI_API bool DragFloat2(const char* label, float v[2], float v_speed = 1.0f, float v_min = 0.0f, float v_max = 0.0f, const char* format = "%.3f", float power = 1.0f); IMGUI_API bool DragFloat3(const char* label, float v[3], float v_speed = 1.0f, float v_min = 0.0f, float v_max = 0.0f, const char* format = "%.3f", float power = 1.0f); IMGUI_API bool DragFloat4(const char* label, float v[4], float v_speed = 1.0f, float v_min = 0.0f, float v_max = 0.0f, const char* format = "%.3f", float power = 1.0f); IMGUI_API bool DragFloatRange2(const char* label, float* v_current_min, float* v_current_max, float v_speed = 1.0f, float v_min = 0.0f, float v_max = 0.0f, const char* format = "%.3f", const char* format_max = NULL, float power = 1.0f); IMGUI_API bool DragInt(const char* label, int* v, float v_speed = 1.0f, int v_min = 0, int v_max = 0, const char* format = "%d"); // If v_min >= v_max we have no bound IMGUI_API bool DragInt2(const char* label, int v[2], float v_speed = 1.0f, int v_min = 0, int v_max = 0, const char* format = "%d"); IMGUI_API bool DragInt3(const char* label, int v[3], float v_speed = 1.0f, int v_min = 0, int v_max = 0, const char* format = "%d"); IMGUI_API bool DragInt4(const char* label, int v[4], float v_speed = 1.0f, int v_min = 0, int v_max = 0, const char* format = "%d"); IMGUI_API bool DragIntRange2(const char* label, int* v_current_min, int* v_current_max, float v_speed = 1.0f, int v_min = 0, int v_max = 0, const char* format = "%d", const char* format_max = NULL); IMGUI_API bool DragScalar(const char* label, ImGuiDataType data_type, void* v, float v_speed, const void* v_min = NULL, const void* v_max = NULL, const char* format = NULL, float power = 1.0f); IMGUI_API bool DragScalarN(const char* label, ImGuiDataType data_type, void* v, int components, float v_speed, const void* v_min = NULL, const void* v_max = NULL, const char* format = NULL, float power = 1.0f); // Widgets: Sliders // - CTRL+Click on any slider to turn them into an input box. Manually input values aren't clamped and can go off-bounds. // - Adjust format string to decorate the value with a prefix, a suffix, or adapt the editing and display precision e.g. "%.3f" -> 1.234; "%5.2f secs" -> 01.23 secs; "Biscuit: %.0f" -> Biscuit: 1; etc. IMGUI_API bool SliderFloat(const char* label, float* v, float v_min, float v_max, const char* format = "%.3f", float power = 1.0f); // adjust format to decorate the value with a prefix or a suffix for in-slider labels or unit display. Use power!=1.0 for power curve sliders IMGUI_API bool SliderFloat2(const char* label, float v[2], float v_min, float v_max, const char* format = "%.3f", float power = 1.0f); IMGUI_API bool SliderFloat3(const char* label, float v[3], float v_min, float v_max, const char* format = "%.3f", float power = 1.0f); IMGUI_API bool SliderFloat4(const char* label, float v[4], float v_min, float v_max, const char* format = "%.3f", float power = 1.0f); IMGUI_API bool SliderAngle(const char* label, float* v_rad, float v_degrees_min = -360.0f, float v_degrees_max = +360.0f, const char* format = "%.0f deg"); IMGUI_API bool SliderInt(const char* label, int* v, int v_min, int v_max, const char* format = "%d"); IMGUI_API bool SliderInt2(const char* label, int v[2], int v_min, int v_max, const char* format = "%d"); IMGUI_API bool SliderInt3(const char* label, int v[3], int v_min, int v_max, const char* format = "%d"); IMGUI_API bool SliderInt4(const char* label, int v[4], int v_min, int v_max, const char* format = "%d"); IMGUI_API bool SliderScalar(const char* label, ImGuiDataType data_type, void* v, const void* v_min, const void* v_max, const char* format = NULL, float power = 1.0f); IMGUI_API bool SliderScalarN(const char* label, ImGuiDataType data_type, void* v, int components, const void* v_min, const void* v_max, const char* format = NULL, float power = 1.0f); IMGUI_API bool VSliderFloat(const char* label, const ImVec2& size, float* v, float v_min, float v_max, const char* format = "%.3f", float power = 1.0f); IMGUI_API bool VSliderInt(const char* label, const ImVec2& size, int* v, int v_min, int v_max, const char* format = "%d"); IMGUI_API bool VSliderScalar(const char* label, const ImVec2& size, ImGuiDataType data_type, void* v, const void* v_min, const void* v_max, const char* format = NULL, float power = 1.0f); // Widgets: Input with Keyboard // - If you want to use InputText() with a dynamic string type such as std::string or your own, see misc/cpp/imgui_stdlib.h // - Most of the ImGuiInputTextFlags flags are only useful for InputText() and not for InputFloatX, InputIntX, InputDouble etc. IMGUI_API bool InputText(const char* label, char* buf, size_t buf_size, ImGuiInputTextFlags flags = 0, ImGuiInputTextCallback callback = NULL, void* user_data = NULL); IMGUI_API bool InputTextMultiline(const char* label, char* buf, size_t buf_size, const ImVec2& size = ImVec2(0,0), ImGuiInputTextFlags flags = 0, ImGuiInputTextCallback callback = NULL, void* user_data = NULL); IMGUI_API bool InputTextWithHint(const char* label, const char* hint, char* buf, size_t buf_size, ImGuiInputTextFlags flags = 0, ImGuiInputTextCallback callback = NULL, void* user_data = NULL); IMGUI_API bool InputFloat(const char* label, float* v, float step = 0.0f, float step_fast = 0.0f, const char* format = "%.3f", ImGuiInputTextFlags flags = 0); IMGUI_API bool InputFloat2(const char* label, float v[2], const char* format = "%.3f", ImGuiInputTextFlags flags = 0); IMGUI_API bool InputFloat3(const char* label, float v[3], const char* format = "%.3f", ImGuiInputTextFlags flags = 0); IMGUI_API bool InputFloat4(const char* label, float v[4], const char* format = "%.3f", ImGuiInputTextFlags flags = 0); IMGUI_API bool InputInt(const char* label, int* v, int step = 1, int step_fast = 100, ImGuiInputTextFlags flags = 0); IMGUI_API bool InputInt2(const char* label, int v[2], ImGuiInputTextFlags flags = 0); IMGUI_API bool InputInt3(const char* label, int v[3], ImGuiInputTextFlags flags = 0); IMGUI_API bool InputInt4(const char* label, int v[4], ImGuiInputTextFlags flags = 0); IMGUI_API bool InputDouble(const char* label, double* v, double step = 0.0, double step_fast = 0.0, const char* format = "%.6f", ImGuiInputTextFlags flags = 0); IMGUI_API bool InputScalar(const char* label, ImGuiDataType data_type, void* v, const void* step = NULL, const void* step_fast = NULL, const char* format = NULL, ImGuiInputTextFlags flags = 0); IMGUI_API bool InputScalarN(const char* label, ImGuiDataType data_type, void* v, int components, const void* step = NULL, const void* step_fast = NULL, const char* format = NULL, ImGuiInputTextFlags flags = 0); // Widgets: Color Editor/Picker (tip: the ColorEdit* functions have a little colored preview square that can be left-clicked to open a picker, and right-clicked to open an option menu.) // - Note that in C++ a 'float v[X]' function argument is the _same_ as 'float* v', the array syntax is just a way to document the number of elements that are expected to be accessible. // - You can pass the address of a first float element out of a contiguous structure, e.g. &myvector.x IMGUI_API bool ColorEdit3(const char* label, float col[3], ImGuiColorEditFlags flags = 0); IMGUI_API bool ColorEdit4(const char* label, float col[4], ImGuiColorEditFlags flags = 0); IMGUI_API bool ColorPicker3(const char* label, float col[3], ImGuiColorEditFlags flags = 0); IMGUI_API bool ColorPicker4(const char* label, float col[4], ImGuiColorEditFlags flags = 0, const float* ref_col = NULL); IMGUI_API bool ColorButton(const char* desc_id, const ImVec4& col, ImGuiColorEditFlags flags = 0, ImVec2 size = ImVec2(0,0)); // display a colored square/button, hover for details, return true when pressed. IMGUI_API void SetColorEditOptions(ImGuiColorEditFlags flags); // initialize current options (generally on application startup) if you want to select a default format, picker type, etc. User will be able to change many settings, unless you pass the _NoOptions flag to your calls. // Widgets: Trees // - TreeNode functions return true when the node is open, in which case you need to also call TreePop() when you are finished displaying the tree node contents. IMGUI_API bool TreeNode(const char* label); IMGUI_API bool TreeNode(const char* str_id, const char* fmt, ...) IM_FMTARGS(2); // helper variation to easily decorelate the id from the displayed string. Read the FAQ about why and how to use ID. to align arbitrary text at the same level as a TreeNode() you can use Bullet(). IMGUI_API bool TreeNode(const void* ptr_id, const char* fmt, ...) IM_FMTARGS(2); // " IMGUI_API bool TreeNodeV(const char* str_id, const char* fmt, va_list args) IM_FMTLIST(2); IMGUI_API bool TreeNodeV(const void* ptr_id, const char* fmt, va_list args) IM_FMTLIST(2); IMGUI_API bool TreeNodeEx(const char* label, ImGuiTreeNodeFlags flags = 0); IMGUI_API bool TreeNodeEx(const char* str_id, ImGuiTreeNodeFlags flags, const char* fmt, ...) IM_FMTARGS(3); IMGUI_API bool TreeNodeEx(const void* ptr_id, ImGuiTreeNodeFlags flags, const char* fmt, ...) IM_FMTARGS(3); IMGUI_API bool TreeNodeExV(const char* str_id, ImGuiTreeNodeFlags flags, const char* fmt, va_list args) IM_FMTLIST(3); IMGUI_API bool TreeNodeExV(const void* ptr_id, ImGuiTreeNodeFlags flags, const char* fmt, va_list args) IM_FMTLIST(3); IMGUI_API void TreePush(const char* str_id); // ~ Indent()+PushId(). Already called by TreeNode() when returning true, but you can call TreePush/TreePop yourself if desired. IMGUI_API void TreePush(const void* ptr_id = NULL); // " IMGUI_API void TreePop(); // ~ Unindent()+PopId() IMGUI_API float GetTreeNodeToLabelSpacing(); // horizontal distance preceding label when using TreeNode*() or Bullet() == (g.FontSize + style.FramePadding.x*2) for a regular unframed TreeNode IMGUI_API bool CollapsingHeader(const char* label, ImGuiTreeNodeFlags flags = 0); // if returning 'true' the header is open. doesn't indent nor push on ID stack. user doesn't have to call TreePop(). IMGUI_API bool CollapsingHeader(const char* label, bool* p_open, ImGuiTreeNodeFlags flags = 0); // when 'p_open' isn't NULL, display an additional small close button on upper right of the header IMGUI_API void SetNextItemOpen(bool is_open, ImGuiCond cond = 0); // set next TreeNode/CollapsingHeader open state. // Widgets: Selectables // - A selectable highlights when hovered, and can display another color when selected. // - Neighbors selectable extend their highlight bounds in order to leave no gap between them. IMGUI_API bool Selectable(const char* label, bool selected = false, ImGuiSelectableFlags flags = 0, const ImVec2& size = ImVec2(0,0)); // "bool selected" carry the selection state (read-only). Selectable() is clicked is returns true so you can modify your selection state. size.x==0.0: use remaining width, size.x>0.0: specify width. size.y==0.0: use label height, size.y>0.0: specify height IMGUI_API bool Selectable(const char* label, bool* p_selected, ImGuiSelectableFlags flags = 0, const ImVec2& size = ImVec2(0,0)); // "bool* p_selected" point to the selection state (read-write), as a convenient helper. // Widgets: List Boxes // - FIXME: To be consistent with all the newer API, ListBoxHeader/ListBoxFooter should in reality be called BeginListBox/EndListBox. Will rename them. IMGUI_API bool ListBox(const char* label, int* current_item, const char* const items[], int items_count, int height_in_items = -1); IMGUI_API bool ListBox(const char* label, int* current_item, bool (*items_getter)(void* data, int idx, const char** out_text), void* data, int items_count, int height_in_items = -1); IMGUI_API bool ListBoxHeader(const char* label, const ImVec2& size = ImVec2(0,0)); // use if you want to reimplement ListBox() will custom data or interactions. if the function return true, you can output elements then call ListBoxFooter() afterwards. IMGUI_API bool ListBoxHeader(const char* label, int items_count, int height_in_items = -1); // " IMGUI_API void ListBoxFooter(); // terminate the scrolling region. only call ListBoxFooter() if ListBoxHeader() returned true! // Widgets: Data Plotting IMGUI_API void PlotLines(const char* label, const float* values, int values_count, int values_offset = 0, const char* overlay_text = NULL, float scale_min = FLT_MAX, float scale_max = FLT_MAX, ImVec2 graph_size = ImVec2(0, 0), int stride = sizeof(float)); IMGUI_API void PlotLines(const char* label, float(*values_getter)(void* data, int idx), void* data, int values_count, int values_offset = 0, const char* overlay_text = NULL, float scale_min = FLT_MAX, float scale_max = FLT_MAX, ImVec2 graph_size = ImVec2(0, 0)); IMGUI_API void PlotHistogram(const char* label, const float* values, int values_count, int values_offset = 0, const char* overlay_text = NULL, float scale_min = FLT_MAX, float scale_max = FLT_MAX, ImVec2 graph_size = ImVec2(0, 0), int stride = sizeof(float)); IMGUI_API void PlotHistogram(const char* label, float(*values_getter)(void* data, int idx), void* data, int values_count, int values_offset = 0, const char* overlay_text = NULL, float scale_min = FLT_MAX, float scale_max = FLT_MAX, ImVec2 graph_size = ImVec2(0, 0)); // Widgets: Value() Helpers. // - Those are merely shortcut to calling Text() with a format string. Output single value in "name: value" format (tip: freely declare more in your code to handle your types. you can add functions to the ImGui namespace) IMGUI_API void Value(const char* prefix, bool b); IMGUI_API void Value(const char* prefix, int v); IMGUI_API void Value(const char* prefix, unsigned int v); IMGUI_API void Value(const char* prefix, float v, const char* float_format = NULL); // Widgets: Menus IMGUI_API bool BeginMainMenuBar(); // create and append to a full screen menu-bar. IMGUI_API void EndMainMenuBar(); // only call EndMainMenuBar() if BeginMainMenuBar() returns true! IMGUI_API bool BeginMenuBar(); // append to menu-bar of current window (requires ImGuiWindowFlags_MenuBar flag set on parent window). IMGUI_API void EndMenuBar(); // only call EndMenuBar() if BeginMenuBar() returns true! IMGUI_API bool BeginMenu(const char* label, bool enabled = true); // create a sub-menu entry. only call EndMenu() if this returns true! IMGUI_API void EndMenu(); // only call EndMenu() if BeginMenu() returns true! IMGUI_API bool MenuItem(const char* label, const char* shortcut = NULL, bool selected = false, bool enabled = true); // return true when activated. shortcuts are displayed for convenience but not processed by ImGui at the moment IMGUI_API bool MenuItem(const char* label, const char* shortcut, bool* p_selected, bool enabled = true); // return true when activated + toggle (*p_selected) if p_selected != NULL // Tooltips IMGUI_API void BeginTooltip(); // begin/append a tooltip window. to create full-featured tooltip (with any kind of items). IMGUI_API void EndTooltip(); IMGUI_API void SetTooltip(const char* fmt, ...) IM_FMTARGS(1); // set a text-only tooltip, typically use with ImGui::IsItemHovered(). override any previous call to SetTooltip(). IMGUI_API void SetTooltipV(const char* fmt, va_list args) IM_FMTLIST(1); // Popups, Modals // The properties of popups windows are: // - They block normal mouse hovering detection outside them. (*) // - Unless modal, they can be closed by clicking anywhere outside them, or by pressing ESCAPE. // - Their visibility state (~bool) is held internally by imgui instead of being held by the programmer as we are used to with regular Begin() calls. // User can manipulate the visibility state by calling OpenPopup(). // (*) One can use IsItemHovered(ImGuiHoveredFlags_AllowWhenBlockedByPopup) to bypass it and detect hovering even when normally blocked by a popup. // Those three properties are connected. The library needs to hold their visibility state because it can close popups at any time. IMGUI_API void OpenPopup(const char* str_id); // call to mark popup as open (don't call every frame!). popups are closed when user click outside, or if CloseCurrentPopup() is called within a BeginPopup()/EndPopup() block. By default, Selectable()/MenuItem() are calling CloseCurrentPopup(). Popup identifiers are relative to the current ID-stack (so OpenPopup and BeginPopup needs to be at the same level). IMGUI_API bool BeginPopup(const char* str_id, ImGuiWindowFlags flags = 0); // return true if the popup is open, and you can start outputting to it. only call EndPopup() if BeginPopup() returns true! IMGUI_API bool BeginPopupContextItem(const char* str_id = NULL, int mouse_button = 1); // helper to open and begin popup when clicked on last item. if you can pass a NULL str_id only if the previous item had an id. If you want to use that on a non-interactive item such as Text() you need to pass in an explicit ID here. read comments in .cpp! IMGUI_API bool BeginPopupContextWindow(const char* str_id = NULL, int mouse_button = 1, bool also_over_items = true); // helper to open and begin popup when clicked on current window. IMGUI_API bool BeginPopupContextVoid(const char* str_id = NULL, int mouse_button = 1); // helper to open and begin popup when clicked in void (where there are no imgui windows). IMGUI_API bool BeginPopupModal(const char* name, bool* p_open = NULL, ImGuiWindowFlags flags = 0); // modal dialog (regular window with title bar, block interactions behind the modal window, can't close the modal window by clicking outside) IMGUI_API void EndPopup(); // only call EndPopup() if BeginPopupXXX() returns true! IMGUI_API bool OpenPopupOnItemClick(const char* str_id = NULL, int mouse_button = 1); // helper to open popup when clicked on last item (note: actually triggers on the mouse _released_ event to be consistent with popup behaviors). return true when just opened. IMGUI_API bool IsPopupOpen(const char* str_id); // return true if the popup is open at the current begin-ed level of the popup stack. IMGUI_API void CloseCurrentPopup(); // close the popup we have begin-ed into. clicking on a MenuItem or Selectable automatically close the current popup. // Columns // - You can also use SameLine(pos_x) to mimic simplified columns. // - The columns API is work-in-progress and rather lacking (columns are arguably the worst part of dear imgui at the moment!) IMGUI_API void Columns(int count = 1, const char* id = NULL, bool border = true); IMGUI_API void NextColumn(); // next column, defaults to current row or next row if the current row is finished IMGUI_API int GetColumnIndex(); // get current column index IMGUI_API float GetColumnWidth(int column_index = -1); // get column width (in pixels). pass -1 to use current column IMGUI_API void SetColumnWidth(int column_index, float width); // set column width (in pixels). pass -1 to use current column IMGUI_API float GetColumnOffset(int column_index = -1); // get position of column line (in pixels, from the left side of the contents region). pass -1 to use current column, otherwise 0..GetColumnsCount() inclusive. column 0 is typically 0.0f IMGUI_API void SetColumnOffset(int column_index, float offset_x); // set position of column line (in pixels, from the left side of the contents region). pass -1 to use current column IMGUI_API int GetColumnsCount(); // Tab Bars, Tabs // [BETA API] API may evolve! IMGUI_API bool BeginTabBar(const char* str_id, ImGuiTabBarFlags flags = 0); // create and append into a TabBar IMGUI_API void EndTabBar(); // only call EndTabBar() if BeginTabBar() returns true! IMGUI_API bool BeginTabItem(const char* label, bool* p_open = NULL, ImGuiTabItemFlags flags = 0);// create a Tab. Returns true if the Tab is selected. IMGUI_API void EndTabItem(); // only call EndTabItem() if BeginTabItem() returns true! IMGUI_API void SetTabItemClosed(const char* tab_or_docked_window_label); // notify TabBar or Docking system of a closed tab/window ahead (useful to reduce visual flicker on reorderable tab bars). For tab-bar: call after BeginTabBar() and before Tab submissions. Otherwise call with a window name. // Logging/Capture // - All text output from the interface can be captured into tty/file/clipboard. By default, tree nodes are automatically opened during logging. IMGUI_API void LogToTTY(int auto_open_depth = -1); // start logging to tty (stdout) IMGUI_API void LogToFile(int auto_open_depth = -1, const char* filename = NULL); // start logging to file IMGUI_API void LogToClipboard(int auto_open_depth = -1); // start logging to OS clipboard IMGUI_API void LogFinish(); // stop logging (close file, etc.) IMGUI_API void LogButtons(); // helper to display buttons for logging to tty/file/clipboard IMGUI_API void LogText(const char* fmt, ...) IM_FMTARGS(1); // pass text data straight to log (without being displayed) // Drag and Drop // [BETA API] API may evolve! IMGUI_API bool BeginDragDropSource(ImGuiDragDropFlags flags = 0); // call when the current item is active. If this return true, you can call SetDragDropPayload() + EndDragDropSource() IMGUI_API bool SetDragDropPayload(const char* type, const void* data, size_t sz, ImGuiCond cond = 0); // type is a user defined string of maximum 32 characters. Strings starting with '_' are reserved for dear imgui internal types. Data is copied and held by imgui. IMGUI_API void EndDragDropSource(); // only call EndDragDropSource() if BeginDragDropSource() returns true! IMGUI_API bool BeginDragDropTarget(); // call after submitting an item that may receive a payload. If this returns true, you can call AcceptDragDropPayload() + EndDragDropTarget() IMGUI_API const ImGuiPayload* AcceptDragDropPayload(const char* type, ImGuiDragDropFlags flags = 0); // accept contents of a given type. If ImGuiDragDropFlags_AcceptBeforeDelivery is set you can peek into the payload before the mouse button is released. IMGUI_API void EndDragDropTarget(); // only call EndDragDropTarget() if BeginDragDropTarget() returns true! IMGUI_API const ImGuiPayload* GetDragDropPayload(); // peek directly into the current payload from anywhere. may return NULL. use ImGuiPayload::IsDataType() to test for the payload type. // Clipping IMGUI_API void PushClipRect(const ImVec2& clip_rect_min, const ImVec2& clip_rect_max, bool intersect_with_current_clip_rect); IMGUI_API void PopClipRect(); // Focus, Activation // - Prefer using "SetItemDefaultFocus()" over "if (IsWindowAppearing()) SetScrollHereY()" when applicable to signify "this is the default item" IMGUI_API void SetItemDefaultFocus(); // make last item the default focused item of a window. IMGUI_API void SetKeyboardFocusHere(int offset = 0); // focus keyboard on the next widget. Use positive 'offset' to access sub components of a multiple component widget. Use -1 to access previous widget. // Item/Widgets Utilities // - Most of the functions are referring to the last/previous item we submitted. // - See Demo Window under "Widgets->Querying Status" for an interactive visualization of most of those functions. IMGUI_API bool IsItemHovered(ImGuiHoveredFlags flags = 0); // is the last item hovered? (and usable, aka not blocked by a popup, etc.). See ImGuiHoveredFlags for more options. IMGUI_API bool IsItemActive(); // is the last item active? (e.g. button being held, text field being edited. This will continuously return true while holding mouse button on an item. Items that don't interact will always return false) IMGUI_API bool IsItemFocused(); // is the last item focused for keyboard/gamepad navigation? IMGUI_API bool IsItemClicked(int mouse_button = 0); // is the last item clicked? (e.g. button/node just clicked on) == IsMouseClicked(mouse_button) && IsItemHovered() IMGUI_API bool IsItemVisible(); // is the last item visible? (items may be out of sight because of clipping/scrolling) IMGUI_API bool IsItemEdited(); // did the last item modify its underlying value this frame? or was pressed? This is generally the same as the "bool" return value of many widgets. IMGUI_API bool IsItemActivated(); // was the last item just made active (item was previously inactive). IMGUI_API bool IsItemDeactivated(); // was the last item just made inactive (item was previously active). Useful for Undo/Redo patterns with widgets that requires continuous editing. IMGUI_API bool IsItemDeactivatedAfterEdit(); // was the last item just made inactive and made a value change when it was active? (e.g. Slider/Drag moved). Useful for Undo/Redo patterns with widgets that requires continuous editing. Note that you may get false positives (some widgets such as Combo()/ListBox()/Selectable() will return true even when clicking an already selected item). IMGUI_API bool IsAnyItemHovered(); // is any item hovered? IMGUI_API bool IsAnyItemActive(); // is any item active? IMGUI_API bool IsAnyItemFocused(); // is any item focused? IMGUI_API ImVec2 GetItemRectMin(); // get upper-left bounding rectangle of the last item (screen space) IMGUI_API ImVec2 GetItemRectMax(); // get lower-right bounding rectangle of the last item (screen space) IMGUI_API ImVec2 GetItemRectSize(); // get size of last item IMGUI_API void SetItemAllowOverlap(); // allow last item to be overlapped by a subsequent item. sometimes useful with invisible buttons, selectables, etc. to catch unused area. // Miscellaneous Utilities IMGUI_API bool IsRectVisible(const ImVec2& size); // test if rectangle (of given size, starting from cursor position) is visible / not clipped. IMGUI_API bool IsRectVisible(const ImVec2& rect_min, const ImVec2& rect_max); // test if rectangle (in screen space) is visible / not clipped. to perform coarse clipping on user's side. IMGUI_API double GetTime(); // get global imgui time. incremented by io.DeltaTime every frame. IMGUI_API int GetFrameCount(); // get global imgui frame count. incremented by 1 every frame. IMGUI_API ImDrawList* GetBackgroundDrawList(); // this draw list will be the first rendering one. Useful to quickly draw shapes/text behind dear imgui contents. IMGUI_API ImDrawList* GetForegroundDrawList(); // this draw list will be the last rendered one. Useful to quickly draw shapes/text over dear imgui contents. IMGUI_API ImDrawListSharedData* GetDrawListSharedData(); // you may use this when creating your own ImDrawList instances. IMGUI_API const char* GetStyleColorName(ImGuiCol idx); // get a string corresponding to the enum value (for display, saving, etc.). IMGUI_API void SetStateStorage(ImGuiStorage* storage); // replace current window storage with our own (if you want to manipulate it yourself, typically clear subsection of it) IMGUI_API ImGuiStorage* GetStateStorage(); IMGUI_API ImVec2 CalcTextSize(const char* text, const char* text_end = NULL, bool hide_text_after_double_hash = false, float wrap_width = -1.0f); IMGUI_API void CalcListClipping(int items_count, float items_height, int* out_items_display_start, int* out_items_display_end); // calculate coarse clipping for large list of evenly sized items. Prefer using the ImGuiListClipper higher-level helper if you can. IMGUI_API bool BeginChildFrame(ImGuiID id, const ImVec2& size, ImGuiWindowFlags flags = 0); // helper to create a child window / scrolling region that looks like a normal widget frame IMGUI_API void EndChildFrame(); // always call EndChildFrame() regardless of BeginChildFrame() return values (which indicates a collapsed/clipped window) // Color Utilities IMGUI_API ImVec4 ColorConvertU32ToFloat4(ImU32 in); IMGUI_API ImU32 ColorConvertFloat4ToU32(const ImVec4& in); IMGUI_API void ColorConvertRGBtoHSV(float r, float g, float b, float& out_h, float& out_s, float& out_v); IMGUI_API void ColorConvertHSVtoRGB(float h, float s, float v, float& out_r, float& out_g, float& out_b); // Inputs Utilities IMGUI_API int GetKeyIndex(ImGuiKey imgui_key); // map ImGuiKey_* values into user's key index. == io.KeyMap[key] IMGUI_API bool IsKeyDown(int user_key_index); // is key being held. == io.KeysDown[user_key_index]. note that imgui doesn't know the semantic of each entry of io.KeysDown[]. Use your own indices/enums according to how your backend/engine stored them into io.KeysDown[]! IMGUI_API bool IsKeyPressed(int user_key_index, bool repeat = true); // was key pressed (went from !Down to Down). if repeat=true, uses io.KeyRepeatDelay / KeyRepeatRate IMGUI_API bool IsKeyReleased(int user_key_index); // was key released (went from Down to !Down).. IMGUI_API int GetKeyPressedAmount(int key_index, float repeat_delay, float rate); // uses provided repeat rate/delay. return a count, most often 0 or 1 but might be >1 if RepeatRate is small enough that DeltaTime > RepeatRate IMGUI_API bool IsMouseDown(int button); // is mouse button held (0=left, 1=right, 2=middle) IMGUI_API bool IsAnyMouseDown(); // is any mouse button held IMGUI_API bool IsMouseClicked(int button, bool repeat = false); // did mouse button clicked (went from !Down to Down) (0=left, 1=right, 2=middle) IMGUI_API bool IsMouseDoubleClicked(int button); // did mouse button double-clicked. a double-click returns false in IsMouseClicked(). uses io.MouseDoubleClickTime. IMGUI_API bool IsMouseReleased(int button); // did mouse button released (went from Down to !Down) IMGUI_API bool IsMouseDragging(int button = 0, float lock_threshold = -1.0f); // is mouse dragging. if lock_threshold < -1.0f uses io.MouseDraggingThreshold IMGUI_API bool IsMouseHoveringRect(const ImVec2& r_min, const ImVec2& r_max, bool clip = true); // is mouse hovering given bounding rect (in screen space). clipped by current clipping settings, but disregarding of other consideration of focus/window ordering/popup-block. IMGUI_API bool IsMousePosValid(const ImVec2* mouse_pos = NULL); // by convention we use (-FLT_MAX,-FLT_MAX) to denote that there is no mouse IMGUI_API ImVec2 GetMousePos(); // shortcut to ImGui::GetIO().MousePos provided by user, to be consistent with other calls IMGUI_API ImVec2 GetMousePosOnOpeningCurrentPopup(); // retrieve backup of mouse position at the time of opening popup we have BeginPopup() into IMGUI_API ImVec2 GetMouseDragDelta(int button = 0, float lock_threshold = -1.0f); // return the delta from the initial clicking position while the mouse button is pressed or was just released. This is locked and return 0.0f until the mouse moves past a distance threshold at least once. If lock_threshold < -1.0f uses io.MouseDraggingThreshold. IMGUI_API void ResetMouseDragDelta(int button = 0); // IMGUI_API ImGuiMouseCursor GetMouseCursor(); // get desired cursor type, reset in ImGui::NewFrame(), this is updated during the frame. valid before Render(). If you use software rendering by setting io.MouseDrawCursor ImGui will render those for you IMGUI_API void SetMouseCursor(ImGuiMouseCursor type); // set desired cursor type IMGUI_API void CaptureKeyboardFromApp(bool want_capture_keyboard_value = true); // attention: misleading name! manually override io.WantCaptureKeyboard flag next frame (said flag is entirely left for your application to handle). e.g. force capture keyboard when your widget is being hovered. This is equivalent to setting "io.WantCaptureKeyboard = want_capture_keyboard_value"; after the next NewFrame() call. IMGUI_API void CaptureMouseFromApp(bool want_capture_mouse_value = true); // attention: misleading name! manually override io.WantCaptureMouse flag next frame (said flag is entirely left for your application to handle). This is equivalent to setting "io.WantCaptureMouse = want_capture_mouse_value;" after the next NewFrame() call. // Clipboard Utilities (also see the LogToClipboard() function to capture or output text data to the clipboard) IMGUI_API const char* GetClipboardText(); IMGUI_API void SetClipboardText(const char* text); // Settings/.Ini Utilities // - The disk functions are automatically called if io.IniFilename != NULL (default is "imgui.ini"). // - Set io.IniFilename to NULL to load/save manually. Read io.WantSaveIniSettings description about handling .ini saving manually. IMGUI_API void LoadIniSettingsFromDisk(const char* ini_filename); // call after CreateContext() and before the first call to NewFrame(). NewFrame() automatically calls LoadIniSettingsFromDisk(io.IniFilename). IMGUI_API void LoadIniSettingsFromMemory(const char* ini_data, size_t ini_size=0); // call after CreateContext() and before the first call to NewFrame() to provide .ini data from your own data source. IMGUI_API void SaveIniSettingsToDisk(const char* ini_filename); // this is automatically called (if io.IniFilename is not empty) a few seconds after any modification that should be reflected in the .ini file (and also by DestroyContext). IMGUI_API const char* SaveIniSettingsToMemory(size_t* out_ini_size = NULL); // return a zero-terminated string with the .ini data which you can save by your own mean. call when io.WantSaveIniSettings is set, then save data by your own mean and clear io.WantSaveIniSettings. // Memory Allocators // - All those functions are not reliant on the current context. // - If you reload the contents of imgui.cpp at runtime, you may need to call SetCurrentContext() + SetAllocatorFunctions() again because we use global storage for those. IMGUI_API void SetAllocatorFunctions(void* (*alloc_func)(size_t sz, void* user_data), void (*free_func)(void* ptr, void* user_data), void* user_data = NULL); IMGUI_API void* MemAlloc(size_t size); IMGUI_API void MemFree(void* ptr); } // namespace ImGui //----------------------------------------------------------------------------- // Flags & Enumerations //----------------------------------------------------------------------------- // Flags for ImGui::Begin() enum ImGuiWindowFlags_ { ImGuiWindowFlags_None = 0, ImGuiWindowFlags_NoTitleBar = 1 << 0, // Disable title-bar ImGuiWindowFlags_NoResize = 1 << 1, // Disable user resizing with the lower-right grip ImGuiWindowFlags_NoMove = 1 << 2, // Disable user moving the window ImGuiWindowFlags_NoScrollbar = 1 << 3, // Disable scrollbars (window can still scroll with mouse or programmatically) ImGuiWindowFlags_NoScrollWithMouse = 1 << 4, // Disable user vertically scrolling with mouse wheel. On child window, mouse wheel will be forwarded to the parent unless NoScrollbar is also set. ImGuiWindowFlags_NoCollapse = 1 << 5, // Disable user collapsing window by double-clicking on it ImGuiWindowFlags_AlwaysAutoResize = 1 << 6, // Resize every window to its content every frame ImGuiWindowFlags_NoBackground = 1 << 7, // Disable drawing background color (WindowBg, etc.) and outside border. Similar as using SetNextWindowBgAlpha(0.0f). ImGuiWindowFlags_NoSavedSettings = 1 << 8, // Never load/save settings in .ini file ImGuiWindowFlags_NoMouseInputs = 1 << 9, // Disable catching mouse, hovering test with pass through. ImGuiWindowFlags_MenuBar = 1 << 10, // Has a menu-bar ImGuiWindowFlags_HorizontalScrollbar = 1 << 11, // Allow horizontal scrollbar to appear (off by default). You may use SetNextWindowContentSize(ImVec2(width,0.0f)); prior to calling Begin() to specify width. Read code in imgui_demo in the "Horizontal Scrolling" section. ImGuiWindowFlags_NoFocusOnAppearing = 1 << 12, // Disable taking focus when transitioning from hidden to visible state ImGuiWindowFlags_NoBringToFrontOnFocus = 1 << 13, // Disable bringing window to front when taking focus (e.g. clicking on it or programmatically giving it focus) ImGuiWindowFlags_AlwaysVerticalScrollbar= 1 << 14, // Always show vertical scrollbar (even if ContentSize.y < Size.y) ImGuiWindowFlags_AlwaysHorizontalScrollbar=1<< 15, // Always show horizontal scrollbar (even if ContentSize.x < Size.x) ImGuiWindowFlags_AlwaysUseWindowPadding = 1 << 16, // Ensure child windows without border uses style.WindowPadding (ignored by default for non-bordered child windows, because more convenient) ImGuiWindowFlags_NoNavInputs = 1 << 18, // No gamepad/keyboard navigation within the window ImGuiWindowFlags_NoNavFocus = 1 << 19, // No focusing toward this window with gamepad/keyboard navigation (e.g. skipped by CTRL+TAB) ImGuiWindowFlags_UnsavedDocument = 1 << 20, // Append '*' to title without affecting the ID, as a convenience to avoid using the ### operator. When used in a tab/docking context, tab is selected on closure and closure is deferred by one frame to allow code to cancel the closure (with a confirmation popup, etc.) without flicker. ImGuiWindowFlags_NoNav = ImGuiWindowFlags_NoNavInputs | ImGuiWindowFlags_NoNavFocus, ImGuiWindowFlags_NoDecoration = ImGuiWindowFlags_NoTitleBar | ImGuiWindowFlags_NoResize | ImGuiWindowFlags_NoScrollbar | ImGuiWindowFlags_NoCollapse, ImGuiWindowFlags_NoInputs = ImGuiWindowFlags_NoMouseInputs | ImGuiWindowFlags_NoNavInputs | ImGuiWindowFlags_NoNavFocus, // [Internal] ImGuiWindowFlags_NavFlattened = 1 << 23, // [BETA] Allow gamepad/keyboard navigation to cross over parent border to this child (only use on child that have no scrolling!) ImGuiWindowFlags_ChildWindow = 1 << 24, // Don't use! For internal use by BeginChild() ImGuiWindowFlags_Tooltip = 1 << 25, // Don't use! For internal use by BeginTooltip() ImGuiWindowFlags_Popup = 1 << 26, // Don't use! For internal use by BeginPopup() ImGuiWindowFlags_Modal = 1 << 27, // Don't use! For internal use by BeginPopupModal() ImGuiWindowFlags_ChildMenu = 1 << 28 // Don't use! For internal use by BeginMenu() // [Obsolete] //ImGuiWindowFlags_ShowBorders = 1 << 7, // --> Set style.FrameBorderSize=1.0f / style.WindowBorderSize=1.0f to enable borders around windows and items //ImGuiWindowFlags_ResizeFromAnySide = 1 << 17, // --> Set io.ConfigWindowsResizeFromEdges and make sure mouse cursors are supported by back-end (io.BackendFlags & ImGuiBackendFlags_HasMouseCursors) }; // Flags for ImGui::InputText() enum ImGuiInputTextFlags_ { ImGuiInputTextFlags_None = 0, ImGuiInputTextFlags_CharsDecimal = 1 << 0, // Allow 0123456789.+-*/ ImGuiInputTextFlags_CharsHexadecimal = 1 << 1, // Allow 0123456789ABCDEFabcdef ImGuiInputTextFlags_CharsUppercase = 1 << 2, // Turn a..z into A..Z ImGuiInputTextFlags_CharsNoBlank = 1 << 3, // Filter out spaces, tabs ImGuiInputTextFlags_AutoSelectAll = 1 << 4, // Select entire text when first taking mouse focus ImGuiInputTextFlags_EnterReturnsTrue = 1 << 5, // Return 'true' when Enter is pressed (as opposed to every time the value was modified). Consider looking at the IsItemDeactivatedAfterEdit() function. ImGuiInputTextFlags_CallbackCompletion = 1 << 6, // Callback on pressing TAB (for completion handling) ImGuiInputTextFlags_CallbackHistory = 1 << 7, // Callback on pressing Up/Down arrows (for history handling) ImGuiInputTextFlags_CallbackAlways = 1 << 8, // Callback on each iteration. User code may query cursor position, modify text buffer. ImGuiInputTextFlags_CallbackCharFilter = 1 << 9, // Callback on character inputs to replace or discard them. Modify 'EventChar' to replace or discard, or return 1 in callback to discard. ImGuiInputTextFlags_AllowTabInput = 1 << 10, // Pressing TAB input a '\t' character into the text field ImGuiInputTextFlags_CtrlEnterForNewLine = 1 << 11, // In multi-line mode, unfocus with Enter, add new line with Ctrl+Enter (default is opposite: unfocus with Ctrl+Enter, add line with Enter). ImGuiInputTextFlags_NoHorizontalScroll = 1 << 12, // Disable following the cursor horizontally ImGuiInputTextFlags_AlwaysInsertMode = 1 << 13, // Insert mode ImGuiInputTextFlags_ReadOnly = 1 << 14, // Read-only mode ImGuiInputTextFlags_Password = 1 << 15, // Password mode, display all characters as '*' ImGuiInputTextFlags_NoUndoRedo = 1 << 16, // Disable undo/redo. Note that input text owns the text data while active, if you want to provide your own undo/redo stack you need e.g. to call ClearActiveID(). ImGuiInputTextFlags_CharsScientific = 1 << 17, // Allow 0123456789.+-*/eE (Scientific notation input) ImGuiInputTextFlags_CallbackResize = 1 << 18, // Callback on buffer capacity changes request (beyond 'buf_size' parameter value), allowing the string to grow. Notify when the string wants to be resized (for string types which hold a cache of their Size). You will be provided a new BufSize in the callback and NEED to honor it. (see misc/cpp/imgui_stdlib.h for an example of using this) // [Internal] ImGuiInputTextFlags_Multiline = 1 << 20, // For internal use by InputTextMultiline() ImGuiInputTextFlags_NoMarkEdited = 1 << 21 // For internal use by functions using InputText() before reformatting data }; // Flags for ImGui::TreeNodeEx(), ImGui::CollapsingHeader*() enum ImGuiTreeNodeFlags_ { ImGuiTreeNodeFlags_None = 0, ImGuiTreeNodeFlags_Selected = 1 << 0, // Draw as selected ImGuiTreeNodeFlags_Framed = 1 << 1, // Full colored frame (e.g. for CollapsingHeader) ImGuiTreeNodeFlags_AllowItemOverlap = 1 << 2, // Hit testing to allow subsequent widgets to overlap this one ImGuiTreeNodeFlags_NoTreePushOnOpen = 1 << 3, // Don't do a TreePush() when open (e.g. for CollapsingHeader) = no extra indent nor pushing on ID stack ImGuiTreeNodeFlags_NoAutoOpenOnLog = 1 << 4, // Don't automatically and temporarily open node when Logging is active (by default logging will automatically open tree nodes) ImGuiTreeNodeFlags_DefaultOpen = 1 << 5, // Default node to be open ImGuiTreeNodeFlags_OpenOnDoubleClick = 1 << 6, // Need double-click to open node ImGuiTreeNodeFlags_OpenOnArrow = 1 << 7, // Only open when clicking on the arrow part. If ImGuiTreeNodeFlags_OpenOnDoubleClick is also set, single-click arrow or double-click all box to open. ImGuiTreeNodeFlags_Leaf = 1 << 8, // No collapsing, no arrow (use as a convenience for leaf nodes). ImGuiTreeNodeFlags_Bullet = 1 << 9, // Display a bullet instead of arrow ImGuiTreeNodeFlags_FramePadding = 1 << 10, // Use FramePadding (even for an unframed text node) to vertically align text baseline to regular widget height. Equivalent to calling AlignTextToFramePadding(). //ImGuiTreeNodeFlags_SpanAllAvailWidth = 1 << 11, // FIXME: TODO: Extend hit box horizontally even if not framed //ImGuiTreeNodeFlags_NoScrollOnOpen = 1 << 12, // FIXME: TODO: Disable automatic scroll on TreePop() if node got just open and contents is not visible ImGuiTreeNodeFlags_NavLeftJumpsBackHere = 1 << 13, // (WIP) Nav: left direction may move to this TreeNode() from any of its child (items submitted between TreeNode and TreePop) ImGuiTreeNodeFlags_CollapsingHeader = ImGuiTreeNodeFlags_Framed | ImGuiTreeNodeFlags_NoTreePushOnOpen | ImGuiTreeNodeFlags_NoAutoOpenOnLog // Obsolete names (will be removed) #ifndef IMGUI_DISABLE_OBSOLETE_FUNCTIONS , ImGuiTreeNodeFlags_AllowOverlapMode = ImGuiTreeNodeFlags_AllowItemOverlap // [renamed in 1.53] #endif }; // Flags for ImGui::Selectable() enum ImGuiSelectableFlags_ { ImGuiSelectableFlags_None = 0, ImGuiSelectableFlags_DontClosePopups = 1 << 0, // Clicking this don't close parent popup window ImGuiSelectableFlags_SpanAllColumns = 1 << 1, // Selectable frame can span all columns (text will still fit in current column) ImGuiSelectableFlags_AllowDoubleClick = 1 << 2, // Generate press events on double clicks too ImGuiSelectableFlags_Disabled = 1 << 3 // Cannot be selected, display grayed out text }; // Flags for ImGui::BeginCombo() enum ImGuiComboFlags_ { ImGuiComboFlags_None = 0, ImGuiComboFlags_PopupAlignLeft = 1 << 0, // Align the popup toward the left by default ImGuiComboFlags_HeightSmall = 1 << 1, // Max ~4 items visible. Tip: If you want your combo popup to be a specific size you can use SetNextWindowSizeConstraints() prior to calling BeginCombo() ImGuiComboFlags_HeightRegular = 1 << 2, // Max ~8 items visible (default) ImGuiComboFlags_HeightLarge = 1 << 3, // Max ~20 items visible ImGuiComboFlags_HeightLargest = 1 << 4, // As many fitting items as possible ImGuiComboFlags_NoArrowButton = 1 << 5, // Display on the preview box without the square arrow button ImGuiComboFlags_NoPreview = 1 << 6, // Display only a square arrow button ImGuiComboFlags_HeightMask_ = ImGuiComboFlags_HeightSmall | ImGuiComboFlags_HeightRegular | ImGuiComboFlags_HeightLarge | ImGuiComboFlags_HeightLargest }; // Flags for ImGui::BeginTabBar() enum ImGuiTabBarFlags_ { ImGuiTabBarFlags_None = 0, ImGuiTabBarFlags_Reorderable = 1 << 0, // Allow manually dragging tabs to re-order them + New tabs are appended at the end of list ImGuiTabBarFlags_AutoSelectNewTabs = 1 << 1, // Automatically select new tabs when they appear ImGuiTabBarFlags_TabListPopupButton = 1 << 2, // Disable buttons to open the tab list popup ImGuiTabBarFlags_NoCloseWithMiddleMouseButton = 1 << 3, // Disable behavior of closing tabs (that are submitted with p_open != NULL) with middle mouse button. You can still repro this behavior on user's side with if (IsItemHovered() && IsMouseClicked(2)) *p_open = false. ImGuiTabBarFlags_NoTabListScrollingButtons = 1 << 4, // Disable scrolling buttons (apply when fitting policy is ImGuiTabBarFlags_FittingPolicyScroll) ImGuiTabBarFlags_NoTooltip = 1 << 5, // Disable tooltips when hovering a tab ImGuiTabBarFlags_FittingPolicyResizeDown = 1 << 6, // Resize tabs when they don't fit ImGuiTabBarFlags_FittingPolicyScroll = 1 << 7, // Add scroll buttons when tabs don't fit ImGuiTabBarFlags_FittingPolicyMask_ = ImGuiTabBarFlags_FittingPolicyResizeDown | ImGuiTabBarFlags_FittingPolicyScroll, ImGuiTabBarFlags_FittingPolicyDefault_ = ImGuiTabBarFlags_FittingPolicyResizeDown }; // Flags for ImGui::BeginTabItem() enum ImGuiTabItemFlags_ { ImGuiTabItemFlags_None = 0, ImGuiTabItemFlags_UnsavedDocument = 1 << 0, // Append '*' to title without affecting the ID, as a convenience to avoid using the ### operator. Also: tab is selected on closure and closure is deferred by one frame to allow code to undo it without flicker. ImGuiTabItemFlags_SetSelected = 1 << 1, // Trigger flag to programmatically make the tab selected when calling BeginTabItem() ImGuiTabItemFlags_NoCloseWithMiddleMouseButton = 1 << 2, // Disable behavior of closing tabs (that are submitted with p_open != NULL) with middle mouse button. You can still repro this behavior on user's side with if (IsItemHovered() && IsMouseClicked(2)) *p_open = false. ImGuiTabItemFlags_NoPushId = 1 << 3 // Don't call PushID(tab->ID)/PopID() on BeginTabItem()/EndTabItem() }; // Flags for ImGui::IsWindowFocused() enum ImGuiFocusedFlags_ { ImGuiFocusedFlags_None = 0, ImGuiFocusedFlags_ChildWindows = 1 << 0, // IsWindowFocused(): Return true if any children of the window is focused ImGuiFocusedFlags_RootWindow = 1 << 1, // IsWindowFocused(): Test from root window (top most parent of the current hierarchy) ImGuiFocusedFlags_AnyWindow = 1 << 2, // IsWindowFocused(): Return true if any window is focused. Important: If you are trying to tell how to dispatch your low-level inputs, do NOT use this. Use ImGui::GetIO().WantCaptureMouse instead. ImGuiFocusedFlags_RootAndChildWindows = ImGuiFocusedFlags_RootWindow | ImGuiFocusedFlags_ChildWindows }; // Flags for ImGui::IsItemHovered(), ImGui::IsWindowHovered() // Note: if you are trying to check whether your mouse should be dispatched to imgui or to your app, you should use the 'io.WantCaptureMouse' boolean for that. Please read the FAQ! // Note: windows with the ImGuiWindowFlags_NoInputs flag are ignored by IsWindowHovered() calls. enum ImGuiHoveredFlags_ { ImGuiHoveredFlags_None = 0, // Return true if directly over the item/window, not obstructed by another window, not obstructed by an active popup or modal blocking inputs under them. ImGuiHoveredFlags_ChildWindows = 1 << 0, // IsWindowHovered() only: Return true if any children of the window is hovered ImGuiHoveredFlags_RootWindow = 1 << 1, // IsWindowHovered() only: Test from root window (top most parent of the current hierarchy) ImGuiHoveredFlags_AnyWindow = 1 << 2, // IsWindowHovered() only: Return true if any window is hovered ImGuiHoveredFlags_AllowWhenBlockedByPopup = 1 << 3, // Return true even if a popup window is normally blocking access to this item/window //ImGuiHoveredFlags_AllowWhenBlockedByModal = 1 << 4, // Return true even if a modal popup window is normally blocking access to this item/window. FIXME-TODO: Unavailable yet. ImGuiHoveredFlags_AllowWhenBlockedByActiveItem = 1 << 5, // Return true even if an active item is blocking access to this item/window. Useful for Drag and Drop patterns. ImGuiHoveredFlags_AllowWhenOverlapped = 1 << 6, // Return true even if the position is obstructed or overlapped by another window ImGuiHoveredFlags_AllowWhenDisabled = 1 << 7, // Return true even if the item is disabled ImGuiHoveredFlags_RectOnly = ImGuiHoveredFlags_AllowWhenBlockedByPopup | ImGuiHoveredFlags_AllowWhenBlockedByActiveItem | ImGuiHoveredFlags_AllowWhenOverlapped, ImGuiHoveredFlags_RootAndChildWindows = ImGuiHoveredFlags_RootWindow | ImGuiHoveredFlags_ChildWindows }; // Flags for ImGui::BeginDragDropSource(), ImGui::AcceptDragDropPayload() enum ImGuiDragDropFlags_ { ImGuiDragDropFlags_None = 0, // BeginDragDropSource() flags ImGuiDragDropFlags_SourceNoPreviewTooltip = 1 << 0, // By default, a successful call to BeginDragDropSource opens a tooltip so you can display a preview or description of the source contents. This flag disable this behavior. ImGuiDragDropFlags_SourceNoDisableHover = 1 << 1, // By default, when dragging we clear data so that IsItemHovered() will return false, to avoid subsequent user code submitting tooltips. This flag disable this behavior so you can still call IsItemHovered() on the source item. ImGuiDragDropFlags_SourceNoHoldToOpenOthers = 1 << 2, // Disable the behavior that allows to open tree nodes and collapsing header by holding over them while dragging a source item. ImGuiDragDropFlags_SourceAllowNullID = 1 << 3, // Allow items such as Text(), Image() that have no unique identifier to be used as drag source, by manufacturing a temporary identifier based on their window-relative position. This is extremely unusual within the dear imgui ecosystem and so we made it explicit. ImGuiDragDropFlags_SourceExtern = 1 << 4, // External source (from outside of dear imgui), won't attempt to read current item/window info. Will always return true. Only one Extern source can be active simultaneously. ImGuiDragDropFlags_SourceAutoExpirePayload = 1 << 5, // Automatically expire the payload if the source cease to be submitted (otherwise payloads are persisting while being dragged) // AcceptDragDropPayload() flags ImGuiDragDropFlags_AcceptBeforeDelivery = 1 << 10, // AcceptDragDropPayload() will returns true even before the mouse button is released. You can then call IsDelivery() to test if the payload needs to be delivered. ImGuiDragDropFlags_AcceptNoDrawDefaultRect = 1 << 11, // Do not draw the default highlight rectangle when hovering over target. ImGuiDragDropFlags_AcceptNoPreviewTooltip = 1 << 12, // Request hiding the BeginDragDropSource tooltip from the BeginDragDropTarget site. ImGuiDragDropFlags_AcceptPeekOnly = ImGuiDragDropFlags_AcceptBeforeDelivery | ImGuiDragDropFlags_AcceptNoDrawDefaultRect // For peeking ahead and inspecting the payload before delivery. }; // Standard Drag and Drop payload types. You can define you own payload types using short strings. Types starting with '_' are defined by Dear ImGui. #define IMGUI_PAYLOAD_TYPE_COLOR_3F "_COL3F" // float[3]: Standard type for colors, without alpha. User code may use this type. #define IMGUI_PAYLOAD_TYPE_COLOR_4F "_COL4F" // float[4]: Standard type for colors. User code may use this type. // A primary data type enum ImGuiDataType_ { ImGuiDataType_S8, // signed char / char (with sensible compilers) ImGuiDataType_U8, // unsigned char ImGuiDataType_S16, // short ImGuiDataType_U16, // unsigned short ImGuiDataType_S32, // int ImGuiDataType_U32, // unsigned int ImGuiDataType_S64, // long long / __int64 ImGuiDataType_U64, // unsigned long long / unsigned __int64 ImGuiDataType_Float, // float ImGuiDataType_Double, // double ImGuiDataType_COUNT }; // A cardinal direction enum ImGuiDir_ { ImGuiDir_None = -1, ImGuiDir_Left = 0, ImGuiDir_Right = 1, ImGuiDir_Up = 2, ImGuiDir_Down = 3, ImGuiDir_COUNT }; // User fill ImGuiIO.KeyMap[] array with indices into the ImGuiIO.KeysDown[512] array enum ImGuiKey_ { ImGuiKey_Tab, ImGuiKey_LeftArrow, ImGuiKey_RightArrow, ImGuiKey_UpArrow, ImGuiKey_DownArrow, ImGuiKey_PageUp, ImGuiKey_PageDown, ImGuiKey_Home, ImGuiKey_End, ImGuiKey_Insert, ImGuiKey_Delete, ImGuiKey_Backspace, ImGuiKey_Space, ImGuiKey_Enter, ImGuiKey_Escape, ImGuiKey_KeyPadEnter, ImGuiKey_A, // for text edit CTRL+A: select all ImGuiKey_C, // for text edit CTRL+C: copy ImGuiKey_V, // for text edit CTRL+V: paste ImGuiKey_X, // for text edit CTRL+X: cut ImGuiKey_Y, // for text edit CTRL+Y: redo ImGuiKey_Z, // for text edit CTRL+Z: undo ImGuiKey_COUNT }; // Gamepad/Keyboard directional navigation // Keyboard: Set io.ConfigFlags |= ImGuiConfigFlags_NavEnableKeyboard to enable. NewFrame() will automatically fill io.NavInputs[] based on your io.KeysDown[] + io.KeyMap[] arrays. // Gamepad: Set io.ConfigFlags |= ImGuiConfigFlags_NavEnableGamepad to enable. Back-end: set ImGuiBackendFlags_HasGamepad and fill the io.NavInputs[] fields before calling NewFrame(). Note that io.NavInputs[] is cleared by EndFrame(). // Read instructions in imgui.cpp for more details. Download PNG/PSD at http://goo.gl/9LgVZW. enum ImGuiNavInput_ { // Gamepad Mapping ImGuiNavInput_Activate, // activate / open / toggle / tweak value // e.g. Cross (PS4), A (Xbox), A (Switch), Space (Keyboard) ImGuiNavInput_Cancel, // cancel / close / exit // e.g. Circle (PS4), B (Xbox), B (Switch), Escape (Keyboard) ImGuiNavInput_Input, // text input / on-screen keyboard // e.g. Triang.(PS4), Y (Xbox), X (Switch), Return (Keyboard) ImGuiNavInput_Menu, // tap: toggle menu / hold: focus, move, resize // e.g. Square (PS4), X (Xbox), Y (Switch), Alt (Keyboard) ImGuiNavInput_DpadLeft, // move / tweak / resize window (w/ PadMenu) // e.g. D-pad Left/Right/Up/Down (Gamepads), Arrow keys (Keyboard) ImGuiNavInput_DpadRight, // ImGuiNavInput_DpadUp, // ImGuiNavInput_DpadDown, // ImGuiNavInput_LStickLeft, // scroll / move window (w/ PadMenu) // e.g. Left Analog Stick Left/Right/Up/Down ImGuiNavInput_LStickRight, // ImGuiNavInput_LStickUp, // ImGuiNavInput_LStickDown, // ImGuiNavInput_FocusPrev, // next window (w/ PadMenu) // e.g. L1 or L2 (PS4), LB or LT (Xbox), L or ZL (Switch) ImGuiNavInput_FocusNext, // prev window (w/ PadMenu) // e.g. R1 or R2 (PS4), RB or RT (Xbox), R or ZL (Switch) ImGuiNavInput_TweakSlow, // slower tweaks // e.g. L1 or L2 (PS4), LB or LT (Xbox), L or ZL (Switch) ImGuiNavInput_TweakFast, // faster tweaks // e.g. R1 or R2 (PS4), RB or RT (Xbox), R or ZL (Switch) // [Internal] Don't use directly! This is used internally to differentiate keyboard from gamepad inputs for behaviors that require to differentiate them. // Keyboard behavior that have no corresponding gamepad mapping (e.g. CTRL+TAB) will be directly reading from io.KeysDown[] instead of io.NavInputs[]. ImGuiNavInput_KeyMenu_, // toggle menu // = io.KeyAlt ImGuiNavInput_KeyTab_, // tab // = Tab key ImGuiNavInput_KeyLeft_, // move left // = Arrow keys ImGuiNavInput_KeyRight_, // move right ImGuiNavInput_KeyUp_, // move up ImGuiNavInput_KeyDown_, // move down ImGuiNavInput_COUNT, ImGuiNavInput_InternalStart_ = ImGuiNavInput_KeyMenu_ }; // Configuration flags stored in io.ConfigFlags. Set by user/application. enum ImGuiConfigFlags_ { ImGuiConfigFlags_None = 0, ImGuiConfigFlags_NavEnableKeyboard = 1 << 0, // Master keyboard navigation enable flag. NewFrame() will automatically fill io.NavInputs[] based on io.KeysDown[]. ImGuiConfigFlags_NavEnableGamepad = 1 << 1, // Master gamepad navigation enable flag. This is mostly to instruct your imgui back-end to fill io.NavInputs[]. Back-end also needs to set ImGuiBackendFlags_HasGamepad. ImGuiConfigFlags_NavEnableSetMousePos = 1 << 2, // Instruct navigation to move the mouse cursor. May be useful on TV/console systems where moving a virtual mouse is awkward. Will update io.MousePos and set io.WantSetMousePos=true. If enabled you MUST honor io.WantSetMousePos requests in your binding, otherwise ImGui will react as if the mouse is jumping around back and forth. ImGuiConfigFlags_NavNoCaptureKeyboard = 1 << 3, // Instruct navigation to not set the io.WantCaptureKeyboard flag when io.NavActive is set. ImGuiConfigFlags_NoMouse = 1 << 4, // Instruct imgui to clear mouse position/buttons in NewFrame(). This allows ignoring the mouse information set by the back-end. ImGuiConfigFlags_NoMouseCursorChange = 1 << 5, // Instruct back-end to not alter mouse cursor shape and visibility. Use if the back-end cursor changes are interfering with yours and you don't want to use SetMouseCursor() to change mouse cursor. You may want to honor requests from imgui by reading GetMouseCursor() yourself instead. // User storage (to allow your back-end/engine to communicate to code that may be shared between multiple projects. Those flags are not used by core Dear ImGui) ImGuiConfigFlags_IsSRGB = 1 << 20, // Application is SRGB-aware. ImGuiConfigFlags_IsTouchScreen = 1 << 21 // Application is using a touch screen instead of a mouse. }; // Back-end capabilities flags stored in io.BackendFlags. Set by imgui_impl_xxx or custom back-end. enum ImGuiBackendFlags_ { ImGuiBackendFlags_None = 0, ImGuiBackendFlags_HasGamepad = 1 << 0, // Back-end Platform supports gamepad and currently has one connected. ImGuiBackendFlags_HasMouseCursors = 1 << 1, // Back-end Platform supports honoring GetMouseCursor() value to change the OS cursor shape. ImGuiBackendFlags_HasSetMousePos = 1 << 2, // Back-end Platform supports io.WantSetMousePos requests to reposition the OS mouse position (only used if ImGuiConfigFlags_NavEnableSetMousePos is set). ImGuiBackendFlags_RendererHasVtxOffset = 1 << 3 // Back-end Renderer supports ImDrawCmd::VtxOffset. This enables output of large meshes (64K+ vertices) while still using 16-bits indices. }; // Enumeration for PushStyleColor() / PopStyleColor() enum ImGuiCol_ { ImGuiCol_Text, ImGuiCol_TextDisabled, ImGuiCol_WindowBg, // Background of normal windows ImGuiCol_ChildBg, // Background of child windows ImGuiCol_PopupBg, // Background of popups, menus, tooltips windows ImGuiCol_Border, ImGuiCol_BorderShadow, ImGuiCol_FrameBg, // Background of checkbox, radio button, plot, slider, text input ImGuiCol_FrameBgHovered, ImGuiCol_FrameBgActive, ImGuiCol_TitleBg, ImGuiCol_TitleBgActive, ImGuiCol_TitleBgCollapsed, ImGuiCol_MenuBarBg, ImGuiCol_ScrollbarBg, ImGuiCol_ScrollbarGrab, ImGuiCol_ScrollbarGrabHovered, ImGuiCol_ScrollbarGrabActive, ImGuiCol_CheckMark, ImGuiCol_SliderGrab, ImGuiCol_SliderGrabActive, ImGuiCol_Button, ImGuiCol_ButtonHovered, ImGuiCol_ButtonActive, ImGuiCol_Header, // Header* colors are used for CollapsingHeader, TreeNode, Selectable, MenuItem ImGuiCol_HeaderHovered, ImGuiCol_HeaderActive, ImGuiCol_Separator, ImGuiCol_SeparatorHovered, ImGuiCol_SeparatorActive, ImGuiCol_ResizeGrip, ImGuiCol_ResizeGripHovered, ImGuiCol_ResizeGripActive, ImGuiCol_Tab, ImGuiCol_TabHovered, ImGuiCol_TabActive, ImGuiCol_TabUnfocused, ImGuiCol_TabUnfocusedActive, ImGuiCol_PlotLines, ImGuiCol_PlotLinesHovered, ImGuiCol_PlotHistogram, ImGuiCol_PlotHistogramHovered, ImGuiCol_TextSelectedBg, ImGuiCol_DragDropTarget, ImGuiCol_NavHighlight, // Gamepad/keyboard: current highlighted item ImGuiCol_NavWindowingHighlight, // Highlight window when using CTRL+TAB ImGuiCol_NavWindowingDimBg, // Darken/colorize entire screen behind the CTRL+TAB window list, when active ImGuiCol_ModalWindowDimBg, // Darken/colorize entire screen behind a modal window, when one is active ImGuiCol_COUNT // Obsolete names (will be removed) #ifndef IMGUI_DISABLE_OBSOLETE_FUNCTIONS , ImGuiCol_ModalWindowDarkening = ImGuiCol_ModalWindowDimBg // [renamed in 1.63] , ImGuiCol_ChildWindowBg = ImGuiCol_ChildBg // [renamed in 1.53] //ImGuiCol_CloseButton, ImGuiCol_CloseButtonActive, ImGuiCol_CloseButtonHovered, // [unused since 1.60+] the close button now uses regular button colors. //ImGuiCol_ComboBg, // [unused since 1.53+] ComboBg has been merged with PopupBg, so a redirect isn't accurate. #endif }; // Enumeration for PushStyleVar() / PopStyleVar() to temporarily modify the ImGuiStyle structure. // NB: the enum only refers to fields of ImGuiStyle which makes sense to be pushed/popped inside UI code. During initialization, feel free to just poke into ImGuiStyle directly. // NB: if changing this enum, you need to update the associated internal table GStyleVarInfo[] accordingly. This is where we link enum values to members offset/type. enum ImGuiStyleVar_ { // Enum name --------------------- // Member in ImGuiStyle structure (see ImGuiStyle for descriptions) ImGuiStyleVar_Alpha, // float Alpha ImGuiStyleVar_WindowPadding, // ImVec2 WindowPadding ImGuiStyleVar_WindowRounding, // float WindowRounding ImGuiStyleVar_WindowBorderSize, // float WindowBorderSize ImGuiStyleVar_WindowMinSize, // ImVec2 WindowMinSize ImGuiStyleVar_WindowTitleAlign, // ImVec2 WindowTitleAlign ImGuiStyleVar_ChildRounding, // float ChildRounding ImGuiStyleVar_ChildBorderSize, // float ChildBorderSize ImGuiStyleVar_PopupRounding, // float PopupRounding ImGuiStyleVar_PopupBorderSize, // float PopupBorderSize ImGuiStyleVar_FramePadding, // ImVec2 FramePadding ImGuiStyleVar_FrameRounding, // float FrameRounding ImGuiStyleVar_FrameBorderSize, // float FrameBorderSize ImGuiStyleVar_ItemSpacing, // ImVec2 ItemSpacing ImGuiStyleVar_ItemInnerSpacing, // ImVec2 ItemInnerSpacing ImGuiStyleVar_IndentSpacing, // float IndentSpacing ImGuiStyleVar_ScrollbarSize, // float ScrollbarSize ImGuiStyleVar_ScrollbarRounding, // float ScrollbarRounding ImGuiStyleVar_GrabMinSize, // float GrabMinSize ImGuiStyleVar_GrabRounding, // float GrabRounding ImGuiStyleVar_TabRounding, // float TabRounding ImGuiStyleVar_ButtonTextAlign, // ImVec2 ButtonTextAlign ImGuiStyleVar_SelectableTextAlign, // ImVec2 SelectableTextAlign ImGuiStyleVar_COUNT // Obsolete names (will be removed) #ifndef IMGUI_DISABLE_OBSOLETE_FUNCTIONS , ImGuiStyleVar_Count_ = ImGuiStyleVar_COUNT // [renamed in 1.60] , ImGuiStyleVar_ChildWindowRounding = ImGuiStyleVar_ChildRounding // [renamed in 1.53] #endif }; // Flags for ColorEdit3() / ColorEdit4() / ColorPicker3() / ColorPicker4() / ColorButton() enum ImGuiColorEditFlags_ { ImGuiColorEditFlags_None = 0, ImGuiColorEditFlags_NoAlpha = 1 << 1, // // ColorEdit, ColorPicker, ColorButton: ignore Alpha component (will only read 3 components from the input pointer). ImGuiColorEditFlags_NoPicker = 1 << 2, // // ColorEdit: disable picker when clicking on colored square. ImGuiColorEditFlags_NoOptions = 1 << 3, // // ColorEdit: disable toggling options menu when right-clicking on inputs/small preview. ImGuiColorEditFlags_NoSmallPreview = 1 << 4, // // ColorEdit, ColorPicker: disable colored square preview next to the inputs. (e.g. to show only the inputs) ImGuiColorEditFlags_NoInputs = 1 << 5, // // ColorEdit, ColorPicker: disable inputs sliders/text widgets (e.g. to show only the small preview colored square). ImGuiColorEditFlags_NoTooltip = 1 << 6, // // ColorEdit, ColorPicker, ColorButton: disable tooltip when hovering the preview. ImGuiColorEditFlags_NoLabel = 1 << 7, // // ColorEdit, ColorPicker: disable display of inline text label (the label is still forwarded to the tooltip and picker). ImGuiColorEditFlags_NoSidePreview = 1 << 8, // // ColorPicker: disable bigger color preview on right side of the picker, use small colored square preview instead. ImGuiColorEditFlags_NoDragDrop = 1 << 9, // // ColorEdit: disable drag and drop target. ColorButton: disable drag and drop source. // User Options (right-click on widget to change some of them). ImGuiColorEditFlags_AlphaBar = 1 << 16, // // ColorEdit, ColorPicker: show vertical alpha bar/gradient in picker. ImGuiColorEditFlags_AlphaPreview = 1 << 17, // // ColorEdit, ColorPicker, ColorButton: display preview as a transparent color over a checkerboard, instead of opaque. ImGuiColorEditFlags_AlphaPreviewHalf= 1 << 18, // // ColorEdit, ColorPicker, ColorButton: display half opaque / half checkerboard, instead of opaque. ImGuiColorEditFlags_HDR = 1 << 19, // // (WIP) ColorEdit: Currently only disable 0.0f..1.0f limits in RGBA edition (note: you probably want to use ImGuiColorEditFlags_Float flag as well). ImGuiColorEditFlags_DisplayRGB = 1 << 20, // [Display] // ColorEdit: override _display_ type among RGB/HSV/Hex. ColorPicker: select any combination using one or more of RGB/HSV/Hex. ImGuiColorEditFlags_DisplayHSV = 1 << 21, // [Display] // " ImGuiColorEditFlags_DisplayHex = 1 << 22, // [Display] // " ImGuiColorEditFlags_Uint8 = 1 << 23, // [DataType] // ColorEdit, ColorPicker, ColorButton: _display_ values formatted as 0..255. ImGuiColorEditFlags_Float = 1 << 24, // [DataType] // ColorEdit, ColorPicker, ColorButton: _display_ values formatted as 0.0f..1.0f floats instead of 0..255 integers. No round-trip of value via integers. ImGuiColorEditFlags_PickerHueBar = 1 << 25, // [Picker] // ColorPicker: bar for Hue, rectangle for Sat/Value. ImGuiColorEditFlags_PickerHueWheel = 1 << 26, // [Picker] // ColorPicker: wheel for Hue, triangle for Sat/Value. ImGuiColorEditFlags_InputRGB = 1 << 27, // [Input] // ColorEdit, ColorPicker: input and output data in RGB format. ImGuiColorEditFlags_InputHSV = 1 << 28, // [Input] // ColorEdit, ColorPicker: input and output data in HSV format. // Defaults Options. You can set application defaults using SetColorEditOptions(). The intent is that you probably don't want to // override them in most of your calls. Let the user choose via the option menu and/or call SetColorEditOptions() once during startup. ImGuiColorEditFlags__OptionsDefault = ImGuiColorEditFlags_Uint8|ImGuiColorEditFlags_DisplayRGB|ImGuiColorEditFlags_InputRGB|ImGuiColorEditFlags_PickerHueBar, // [Internal] Masks ImGuiColorEditFlags__DisplayMask = ImGuiColorEditFlags_DisplayRGB|ImGuiColorEditFlags_DisplayHSV|ImGuiColorEditFlags_DisplayHex, ImGuiColorEditFlags__DataTypeMask = ImGuiColorEditFlags_Uint8|ImGuiColorEditFlags_Float, ImGuiColorEditFlags__PickerMask = ImGuiColorEditFlags_PickerHueWheel|ImGuiColorEditFlags_PickerHueBar, ImGuiColorEditFlags__InputMask = ImGuiColorEditFlags_InputRGB|ImGuiColorEditFlags_InputHSV // Obsolete names (will be removed) #ifndef IMGUI_DISABLE_OBSOLETE_FUNCTIONS , ImGuiColorEditFlags_RGB = ImGuiColorEditFlags_DisplayRGB, ImGuiColorEditFlags_HSV = ImGuiColorEditFlags_DisplayHSV, ImGuiColorEditFlags_HEX = ImGuiColorEditFlags_DisplayHex // [renamed in 1.69] #endif }; // Enumeration for GetMouseCursor() // User code may request binding to display given cursor by calling SetMouseCursor(), which is why we have some cursors that are marked unused here enum ImGuiMouseCursor_ { ImGuiMouseCursor_None = -1, ImGuiMouseCursor_Arrow = 0, ImGuiMouseCursor_TextInput, // When hovering over InputText, etc. ImGuiMouseCursor_ResizeAll, // (Unused by Dear ImGui functions) ImGuiMouseCursor_ResizeNS, // When hovering over an horizontal border ImGuiMouseCursor_ResizeEW, // When hovering over a vertical border or a column ImGuiMouseCursor_ResizeNESW, // When hovering over the bottom-left corner of a window ImGuiMouseCursor_ResizeNWSE, // When hovering over the bottom-right corner of a window ImGuiMouseCursor_Hand, // (Unused by Dear ImGui functions. Use for e.g. hyperlinks) ImGuiMouseCursor_COUNT // Obsolete names (will be removed) #ifndef IMGUI_DISABLE_OBSOLETE_FUNCTIONS , ImGuiMouseCursor_Count_ = ImGuiMouseCursor_COUNT // [renamed in 1.60] #endif }; // Enumateration for ImGui::SetWindow***(), SetNextWindow***(), SetNextItem***() functions // Represent a condition. // Important: Treat as a regular enum! Do NOT combine multiple values using binary operators! All the functions above treat 0 as a shortcut to ImGuiCond_Always. enum ImGuiCond_ { ImGuiCond_Always = 1 << 0, // Set the variable ImGuiCond_Once = 1 << 1, // Set the variable once per runtime session (only the first call with succeed) ImGuiCond_FirstUseEver = 1 << 2, // Set the variable if the object/window has no persistently saved data (no entry in .ini file) ImGuiCond_Appearing = 1 << 3 // Set the variable if the object/window is appearing after being hidden/inactive (or the first time) }; //----------------------------------------------------------------------------- // Helpers: Memory allocations macros // IM_MALLOC(), IM_FREE(), IM_NEW(), IM_PLACEMENT_NEW(), IM_DELETE() // We call C++ constructor on own allocated memory via the placement "new(ptr) Type()" syntax. // Defining a custom placement new() with a dummy parameter allows us to bypass including <new> which on some platforms complains when user has disabled exceptions. //----------------------------------------------------------------------------- struct ImNewDummy {}; inline void* operator new(size_t, ImNewDummy, void* ptr) { return ptr; } inline void operator delete(void*, ImNewDummy, void*) {} // This is only required so we can use the symmetrical new() #define IM_ALLOC(_SIZE) ImGui::MemAlloc(_SIZE) #define IM_FREE(_PTR) ImGui::MemFree(_PTR) #define IM_PLACEMENT_NEW(_PTR) new(ImNewDummy(), _PTR) #define IM_NEW(_TYPE) new(ImNewDummy(), ImGui::MemAlloc(sizeof(_TYPE))) _TYPE template<typename T> void IM_DELETE(T* p) { if (p) { p->~T(); ImGui::MemFree(p); } } //----------------------------------------------------------------------------- // Helper: ImVector<> // Lightweight std::vector<>-like class to avoid dragging dependencies (also, some implementations of STL with debug enabled are absurdly slow, we bypass it so our code runs fast in debug). // You generally do NOT need to care or use this ever. But we need to make it available in imgui.h because some of our data structures are relying on it. // Important: clear() frees memory, resize(0) keep the allocated buffer. We use resize(0) a lot to intentionally recycle allocated buffers across frames and amortize our costs. // Important: our implementation does NOT call C++ constructors/destructors, we treat everything as raw data! This is intentional but be extra mindful of that, // do NOT use this class as a std::vector replacement in your own code! Many of the structures used by dear imgui can be safely initialized by a zero-memset. //----------------------------------------------------------------------------- template<typename T> struct ImVector { int Size; int Capacity; T* Data; // Provide standard typedefs but we don't use them ourselves. typedef T value_type; typedef value_type* iterator; typedef const value_type* const_iterator; // Constructors, destructor inline ImVector() { Size = Capacity = 0; Data = NULL; } inline ImVector(const ImVector<T>& src) { Size = Capacity = 0; Data = NULL; operator=(src); } inline ImVector<T>& operator=(const ImVector<T>& src) { clear(); resize(src.Size); memcpy(Data, src.Data, (size_t)Size * sizeof(T)); return *this; } inline ~ImVector() { if (Data) IM_FREE(Data); } inline bool empty() const { return Size == 0; } inline int size() const { return Size; } inline int size_in_bytes() const { return Size * (int)sizeof(T); } inline int capacity() const { return Capacity; } inline T& operator[](int i) { IM_ASSERT(i < Size); return Data[i]; } inline const T& operator[](int i) const { IM_ASSERT(i < Size); return Data[i]; } inline void clear() { if (Data) { Size = Capacity = 0; IM_FREE(Data); Data = NULL; } } inline T* begin() { return Data; } inline const T* begin() const { return Data; } inline T* end() { return Data + Size; } inline const T* end() const { return Data + Size; } inline T& front() { IM_ASSERT(Size > 0); return Data[0]; } inline const T& front() const { IM_ASSERT(Size > 0); return Data[0]; } inline T& back() { IM_ASSERT(Size > 0); return Data[Size - 1]; } inline const T& back() const { IM_ASSERT(Size > 0); return Data[Size - 1]; } inline void swap(ImVector<T>& rhs) { int rhs_size = rhs.Size; rhs.Size = Size; Size = rhs_size; int rhs_cap = rhs.Capacity; rhs.Capacity = Capacity; Capacity = rhs_cap; T* rhs_data = rhs.Data; rhs.Data = Data; Data = rhs_data; } inline int _grow_capacity(int sz) const { int new_capacity = Capacity ? (Capacity + Capacity/2) : 8; return new_capacity > sz ? new_capacity : sz; } inline void resize(int new_size) { if (new_size > Capacity) reserve(_grow_capacity(new_size)); Size = new_size; } inline void resize(int new_size, const T& v) { if (new_size > Capacity) reserve(_grow_capacity(new_size)); if (new_size > Size) for (int n = Size; n < new_size; n++) memcpy(&Data[n], &v, sizeof(v)); Size = new_size; } inline void reserve(int new_capacity) { if (new_capacity <= Capacity) return; T* new_data = (T*)IM_ALLOC((size_t)new_capacity * sizeof(T)); if (Data) { memcpy(new_data, Data, (size_t)Size * sizeof(T)); IM_FREE(Data); } Data = new_data; Capacity = new_capacity; } // NB: It is illegal to call push_back/push_front/insert with a reference pointing inside the ImVector data itself! e.g. v.push_back(v[10]) is forbidden. inline void push_back(const T& v) { if (Size == Capacity) reserve(_grow_capacity(Size + 1)); memcpy(&Data[Size], &v, sizeof(v)); Size++; } inline void pop_back() { IM_ASSERT(Size > 0); Size--; } inline void push_front(const T& v) { if (Size == 0) push_back(v); else insert(Data, v); } inline T* erase(const T* it) { IM_ASSERT(it >= Data && it < Data+Size); const ptrdiff_t off = it - Data; memmove(Data + off, Data + off + 1, ((size_t)Size - (size_t)off - 1) * sizeof(T)); Size--; return Data + off; } inline T* erase(const T* it, const T* it_last){ IM_ASSERT(it >= Data && it < Data+Size && it_last > it && it_last <= Data+Size); const ptrdiff_t count = it_last - it; const ptrdiff_t off = it - Data; memmove(Data + off, Data + off + count, ((size_t)Size - (size_t)off - count) * sizeof(T)); Size -= (int)count; return Data + off; } inline T* erase_unsorted(const T* it) { IM_ASSERT(it >= Data && it < Data+Size); const ptrdiff_t off = it - Data; if (it < Data+Size-1) memcpy(Data + off, Data + Size - 1, sizeof(T)); Size--; return Data + off; } inline T* insert(const T* it, const T& v) { IM_ASSERT(it >= Data && it <= Data+Size); const ptrdiff_t off = it - Data; if (Size == Capacity) reserve(_grow_capacity(Size + 1)); if (off < (int)Size) memmove(Data + off + 1, Data + off, ((size_t)Size - (size_t)off) * sizeof(T)); memcpy(&Data[off], &v, sizeof(v)); Size++; return Data + off; } inline bool contains(const T& v) const { const T* data = Data; const T* data_end = Data + Size; while (data < data_end) if (*data++ == v) return true; return false; } inline int index_from_ptr(const T* it) const { IM_ASSERT(it >= Data && it <= Data+Size); const ptrdiff_t off = it - Data; return (int)off; } }; //----------------------------------------------------------------------------- // ImGuiStyle // You may modify the ImGui::GetStyle() main instance during initialization and before NewFrame(). // During the frame, use ImGui::PushStyleVar(ImGuiStyleVar_XXXX)/PopStyleVar() to alter the main style values, // and ImGui::PushStyleColor(ImGuiCol_XXX)/PopStyleColor() for colors. //----------------------------------------------------------------------------- struct ImGuiStyle { float Alpha; // Global alpha applies to everything in Dear ImGui. ImVec2 WindowPadding; // Padding within a window. float WindowRounding; // Radius of window corners rounding. Set to 0.0f to have rectangular windows. float WindowBorderSize; // Thickness of border around windows. Generally set to 0.0f or 1.0f. (Other values are not well tested and more CPU/GPU costly). ImVec2 WindowMinSize; // Minimum window size. This is a global setting. If you want to constraint individual windows, use SetNextWindowSizeConstraints(). ImVec2 WindowTitleAlign; // Alignment for title bar text. Defaults to (0.0f,0.5f) for left-aligned,vertically centered. ImGuiDir WindowMenuButtonPosition; // Side of the collapsing/docking button in the title bar (left/right). Defaults to ImGuiDir_Left. float ChildRounding; // Radius of child window corners rounding. Set to 0.0f to have rectangular windows. float ChildBorderSize; // Thickness of border around child windows. Generally set to 0.0f or 1.0f. (Other values are not well tested and more CPU/GPU costly). float PopupRounding; // Radius of popup window corners rounding. (Note that tooltip windows use WindowRounding) float PopupBorderSize; // Thickness of border around popup/tooltip windows. Generally set to 0.0f or 1.0f. (Other values are not well tested and more CPU/GPU costly). ImVec2 FramePadding; // Padding within a framed rectangle (used by most widgets). float FrameRounding; // Radius of frame corners rounding. Set to 0.0f to have rectangular frame (used by most widgets). float FrameBorderSize; // Thickness of border around frames. Generally set to 0.0f or 1.0f. (Other values are not well tested and more CPU/GPU costly). ImVec2 ItemSpacing; // Horizontal and vertical spacing between widgets/lines. ImVec2 ItemInnerSpacing; // Horizontal and vertical spacing between within elements of a composed widget (e.g. a slider and its label). ImVec2 TouchExtraPadding; // Expand reactive bounding box for touch-based system where touch position is not accurate enough. Unfortunately we don't sort widgets so priority on overlap will always be given to the first widget. So don't grow this too much! float IndentSpacing; // Horizontal indentation when e.g. entering a tree node. Generally == (FontSize + FramePadding.x*2). float ColumnsMinSpacing; // Minimum horizontal spacing between two columns. Preferably > (FramePadding.x + 1). float ScrollbarSize; // Width of the vertical scrollbar, Height of the horizontal scrollbar. float ScrollbarRounding; // Radius of grab corners for scrollbar. float GrabMinSize; // Minimum width/height of a grab box for slider/scrollbar. float GrabRounding; // Radius of grabs corners rounding. Set to 0.0f to have rectangular slider grabs. float TabRounding; // Radius of upper corners of a tab. Set to 0.0f to have rectangular tabs. float TabBorderSize; // Thickness of border around tabs. ImGuiDir ColorButtonPosition; // Side of the color button in the ColorEdit4 widget (left/right). Defaults to ImGuiDir_Right. ImVec2 ButtonTextAlign; // Alignment of button text when button is larger than text. Defaults to (0.5f, 0.5f) (centered). ImVec2 SelectableTextAlign; // Alignment of selectable text when selectable is larger than text. Defaults to (0.0f, 0.0f) (top-left aligned). ImVec2 DisplayWindowPadding; // Window position are clamped to be visible within the display area by at least this amount. Only applies to regular windows. ImVec2 DisplaySafeAreaPadding; // If you cannot see the edges of your screen (e.g. on a TV) increase the safe area padding. Apply to popups/tooltips as well regular windows. NB: Prefer configuring your TV sets correctly! float MouseCursorScale; // Scale software rendered mouse cursor (when io.MouseDrawCursor is enabled). May be removed later. bool AntiAliasedLines; // Enable anti-aliasing on lines/borders. Disable if you are really tight on CPU/GPU. bool AntiAliasedFill; // Enable anti-aliasing on filled shapes (rounded rectangles, circles, etc.) float CurveTessellationTol; // Tessellation tolerance when using PathBezierCurveTo() without a specific number of segments. Decrease for highly tessellated curves (higher quality, more polygons), increase to reduce quality. ImVec4 Colors[ImGuiCol_COUNT]; IMGUI_API ImGuiStyle(); IMGUI_API void ScaleAllSizes(float scale_factor); }; //----------------------------------------------------------------------------- // ImGuiIO // Communicate most settings and inputs/outputs to Dear ImGui using this structure. // Access via ImGui::GetIO(). Read 'Programmer guide' section in .cpp file for general usage. //----------------------------------------------------------------------------- struct ImGuiIO { //------------------------------------------------------------------ // Configuration (fill once) // Default value //------------------------------------------------------------------ ImGuiConfigFlags ConfigFlags; // = 0 // See ImGuiConfigFlags_ enum. Set by user/application. Gamepad/keyboard navigation options, etc. ImGuiBackendFlags BackendFlags; // = 0 // See ImGuiBackendFlags_ enum. Set by back-end (imgui_impl_xxx files or custom back-end) to communicate features supported by the back-end. ImVec2 DisplaySize; // <unset> // Main display size, in pixels. float DeltaTime; // = 1.0f/60.0f // Time elapsed since last frame, in seconds. float IniSavingRate; // = 5.0f // Minimum time between saving positions/sizes to .ini file, in seconds. const char* IniFilename; // = "imgui.ini" // Path to .ini file. Set NULL to disable automatic .ini loading/saving, if e.g. you want to manually load/save from memory. const char* LogFilename; // = "imgui_log.txt"// Path to .log file (default parameter to ImGui::LogToFile when no file is specified). float MouseDoubleClickTime; // = 0.30f // Time for a double-click, in seconds. float MouseDoubleClickMaxDist; // = 6.0f // Distance threshold to stay in to validate a double-click, in pixels. float MouseDragThreshold; // = 6.0f // Distance threshold before considering we are dragging. int KeyMap[ImGuiKey_COUNT]; // <unset> // Map of indices into the KeysDown[512] entries array which represent your "native" keyboard state. float KeyRepeatDelay; // = 0.250f // When holding a key/button, time before it starts repeating, in seconds (for buttons in Repeat mode, etc.). float KeyRepeatRate; // = 0.050f // When holding a key/button, rate at which it repeats, in seconds. void* UserData; // = NULL // Store your own data for retrieval by callbacks. ImFontAtlas*Fonts; // <auto> // Font atlas: load, rasterize and pack one or more fonts into a single texture. float FontGlobalScale; // = 1.0f // Global scale all fonts bool FontAllowUserScaling; // = false // Allow user scaling text of individual window with CTRL+Wheel. ImFont* FontDefault; // = NULL // Font to use on NewFrame(). Use NULL to uses Fonts->Fonts[0]. ImVec2 DisplayFramebufferScale; // = (1, 1) // For retina display or other situations where window coordinates are different from framebuffer coordinates. This generally ends up in ImDrawData::FramebufferScale. // Miscellaneous options bool MouseDrawCursor; // = false // Request ImGui to draw a mouse cursor for you (if you are on a platform without a mouse cursor). Cannot be easily renamed to 'io.ConfigXXX' because this is frequently used by back-end implementations. bool ConfigMacOSXBehaviors; // = defined(__APPLE__) // OS X style: Text editing cursor movement using Alt instead of Ctrl, Shortcuts using Cmd/Super instead of Ctrl, Line/Text Start and End using Cmd+Arrows instead of Home/End, Double click selects by word instead of selecting whole text, Multi-selection in lists uses Cmd/Super instead of Ctrl (was called io.OptMacOSXBehaviors prior to 1.63) bool ConfigInputTextCursorBlink; // = true // Set to false to disable blinking cursor, for users who consider it distracting. (was called: io.OptCursorBlink prior to 1.63) bool ConfigWindowsResizeFromEdges; // = true // Enable resizing of windows from their edges and from the lower-left corner. This requires (io.BackendFlags & ImGuiBackendFlags_HasMouseCursors) because it needs mouse cursor feedback. (This used to be a per-window ImGuiWindowFlags_ResizeFromAnySide flag) bool ConfigWindowsMoveFromTitleBarOnly; // = false // [BETA] Set to true to only allow moving windows when clicked+dragged from the title bar. Windows without a title bar are not affected. //------------------------------------------------------------------ // Platform Functions // (the imgui_impl_xxxx back-end files are setting those up for you) //------------------------------------------------------------------ // Optional: Platform/Renderer back-end name (informational only! will be displayed in About Window) + User data for back-end/wrappers to store their own stuff. const char* BackendPlatformName; // = NULL const char* BackendRendererName; // = NULL void* BackendPlatformUserData; // = NULL void* BackendRendererUserData; // = NULL void* BackendLanguageUserData; // = NULL // Optional: Access OS clipboard // (default to use native Win32 clipboard on Windows, otherwise uses a private clipboard. Override to access OS clipboard on other architectures) const char* (*GetClipboardTextFn)(void* user_data); void (*SetClipboardTextFn)(void* user_data, const char* text); void* ClipboardUserData; // Optional: Notify OS Input Method Editor of the screen position of your cursor for text input position (e.g. when using Japanese/Chinese IME on Windows) // (default to use native imm32 api on Windows) void (*ImeSetInputScreenPosFn)(int x, int y); void* ImeWindowHandle; // = NULL // (Windows) Set this to your HWND to get automatic IME cursor positioning. #ifndef IMGUI_DISABLE_OBSOLETE_FUNCTIONS // [OBSOLETE since 1.60+] Rendering function, will be automatically called in Render(). Please call your rendering function yourself now! // You can obtain the ImDrawData* by calling ImGui::GetDrawData() after Render(). See example applications if you are unsure of how to implement this. void (*RenderDrawListsFn)(ImDrawData* data); #else // This is only here to keep ImGuiIO the same size/layout, so that IMGUI_DISABLE_OBSOLETE_FUNCTIONS can exceptionally be used outside of imconfig.h. void* RenderDrawListsFnUnused; #endif //------------------------------------------------------------------ // Input - Fill before calling NewFrame() //------------------------------------------------------------------ ImVec2 MousePos; // Mouse position, in pixels. Set to ImVec2(-FLT_MAX,-FLT_MAX) if mouse is unavailable (on another screen, etc.) bool MouseDown[5]; // Mouse buttons: 0=left, 1=right, 2=middle + extras. ImGui itself mostly only uses left button (BeginPopupContext** are using right button). Others buttons allows us to track if the mouse is being used by your application + available to user as a convenience via IsMouse** API. float MouseWheel; // Mouse wheel Vertical: 1 unit scrolls about 5 lines text. float MouseWheelH; // Mouse wheel Horizontal. Most users don't have a mouse with an horizontal wheel, may not be filled by all back-ends. bool KeyCtrl; // Keyboard modifier pressed: Control bool KeyShift; // Keyboard modifier pressed: Shift bool KeyAlt; // Keyboard modifier pressed: Alt bool KeySuper; // Keyboard modifier pressed: Cmd/Super/Windows bool KeysDown[512]; // Keyboard keys that are pressed (ideally left in the "native" order your engine has access to keyboard keys, so you can use your own defines/enums for keys). float NavInputs[ImGuiNavInput_COUNT]; // Gamepad inputs. Cleared back to zero by EndFrame(). Keyboard keys will be auto-mapped and be written here by NewFrame(). // Functions IMGUI_API void AddInputCharacter(unsigned int c); // Queue new character input IMGUI_API void AddInputCharactersUTF8(const char* str); // Queue new characters input from an UTF-8 string IMGUI_API void ClearInputCharacters(); // Clear the text input buffer manually //------------------------------------------------------------------ // Output - Retrieve after calling NewFrame() //------------------------------------------------------------------ bool WantCaptureMouse; // When io.WantCaptureMouse is true, imgui will use the mouse inputs, do not dispatch them to your main game/application (in both cases, always pass on mouse inputs to imgui). (e.g. unclicked mouse is hovering over an imgui window, widget is active, mouse was clicked over an imgui window, etc.). bool WantCaptureKeyboard; // When io.WantCaptureKeyboard is true, imgui will use the keyboard inputs, do not dispatch them to your main game/application (in both cases, always pass keyboard inputs to imgui). (e.g. InputText active, or an imgui window is focused and navigation is enabled, etc.). bool WantTextInput; // Mobile/console: when io.WantTextInput is true, you may display an on-screen keyboard. This is set by ImGui when it wants textual keyboard input to happen (e.g. when a InputText widget is active). bool WantSetMousePos; // MousePos has been altered, back-end should reposition mouse on next frame. Set only when ImGuiConfigFlags_NavEnableSetMousePos flag is enabled. bool WantSaveIniSettings; // When manual .ini load/save is active (io.IniFilename == NULL), this will be set to notify your application that you can call SaveIniSettingsToMemory() and save yourself. IMPORTANT: You need to clear io.WantSaveIniSettings yourself. bool NavActive; // Directional navigation is currently allowed (will handle ImGuiKey_NavXXX events) = a window is focused and it doesn't use the ImGuiWindowFlags_NoNavInputs flag. bool NavVisible; // Directional navigation is visible and allowed (will handle ImGuiKey_NavXXX events). float Framerate; // Application framerate estimation, in frame per second. Solely for convenience. Rolling average estimation based on IO.DeltaTime over 120 frames int MetricsRenderVertices; // Vertices output during last call to Render() int MetricsRenderIndices; // Indices output during last call to Render() = number of triangles * 3 int MetricsRenderWindows; // Number of visible windows int MetricsActiveWindows; // Number of active windows int MetricsActiveAllocations; // Number of active allocations, updated by MemAlloc/MemFree based on current context. May be off if you have multiple imgui contexts. ImVec2 MouseDelta; // Mouse delta. Note that this is zero if either current or previous position are invalid (-FLT_MAX,-FLT_MAX), so a disappearing/reappearing mouse won't have a huge delta. //------------------------------------------------------------------ // [Internal] ImGui will maintain those fields. Forward compatibility not guaranteed! //------------------------------------------------------------------ ImVec2 MousePosPrev; // Previous mouse position (note that MouseDelta is not necessary == MousePos-MousePosPrev, in case either position is invalid) ImVec2 MouseClickedPos[5]; // Position at time of clicking double MouseClickedTime[5]; // Time of last click (used to figure out double-click) bool MouseClicked[5]; // Mouse button went from !Down to Down bool MouseDoubleClicked[5]; // Has mouse button been double-clicked? bool MouseReleased[5]; // Mouse button went from Down to !Down bool MouseDownOwned[5]; // Track if button was clicked inside a dear imgui window. We don't request mouse capture from the application if click started outside ImGui bounds. bool MouseDownWasDoubleClick[5]; // Track if button down was a double-click float MouseDownDuration[5]; // Duration the mouse button has been down (0.0f == just clicked) float MouseDownDurationPrev[5]; // Previous time the mouse button has been down ImVec2 MouseDragMaxDistanceAbs[5]; // Maximum distance, absolute, on each axis, of how much mouse has traveled from the clicking point float MouseDragMaxDistanceSqr[5]; // Squared maximum distance of how much mouse has traveled from the clicking point float KeysDownDuration[512]; // Duration the keyboard key has been down (0.0f == just pressed) float KeysDownDurationPrev[512]; // Previous duration the key has been down float NavInputsDownDuration[ImGuiNavInput_COUNT]; float NavInputsDownDurationPrev[ImGuiNavInput_COUNT]; ImVector<ImWchar> InputQueueCharacters; // Queue of _characters_ input (obtained by platform back-end). Fill using AddInputCharacter() helper. IMGUI_API ImGuiIO(); }; //----------------------------------------------------------------------------- // Misc data structures //----------------------------------------------------------------------------- // Shared state of InputText(), passed as an argument to your callback when a ImGuiInputTextFlags_Callback* flag is used. // The callback function should return 0 by default. // Callbacks (follow a flag name and see comments in ImGuiInputTextFlags_ declarations for more details) // - ImGuiInputTextFlags_CallbackCompletion: Callback on pressing TAB // - ImGuiInputTextFlags_CallbackHistory: Callback on pressing Up/Down arrows // - ImGuiInputTextFlags_CallbackAlways: Callback on each iteration // - ImGuiInputTextFlags_CallbackCharFilter: Callback on character inputs to replace or discard them. Modify 'EventChar' to replace or discard, or return 1 in callback to discard. // - ImGuiInputTextFlags_CallbackResize: Callback on buffer capacity changes request (beyond 'buf_size' parameter value), allowing the string to grow. struct ImGuiInputTextCallbackData { ImGuiInputTextFlags EventFlag; // One ImGuiInputTextFlags_Callback* // Read-only ImGuiInputTextFlags Flags; // What user passed to InputText() // Read-only void* UserData; // What user passed to InputText() // Read-only // Arguments for the different callback events // - To modify the text buffer in a callback, prefer using the InsertChars() / DeleteChars() function. InsertChars() will take care of calling the resize callback if necessary. // - If you know your edits are not going to resize the underlying buffer allocation, you may modify the contents of 'Buf[]' directly. You need to update 'BufTextLen' accordingly (0 <= BufTextLen < BufSize) and set 'BufDirty'' to true so InputText can update its internal state. ImWchar EventChar; // Character input // Read-write // [CharFilter] Replace character with another one, or set to zero to drop. return 1 is equivalent to setting EventChar=0; ImGuiKey EventKey; // Key pressed (Up/Down/TAB) // Read-only // [Completion,History] char* Buf; // Text buffer // Read-write // [Resize] Can replace pointer / [Completion,History,Always] Only write to pointed data, don't replace the actual pointer! int BufTextLen; // Text length (in bytes) // Read-write // [Resize,Completion,History,Always] Exclude zero-terminator storage. In C land: == strlen(some_text), in C++ land: string.length() int BufSize; // Buffer size (in bytes) = capacity+1 // Read-only // [Resize,Completion,History,Always] Include zero-terminator storage. In C land == ARRAYSIZE(my_char_array), in C++ land: string.capacity()+1 bool BufDirty; // Set if you modify Buf/BufTextLen! // Write // [Completion,History,Always] int CursorPos; // // Read-write // [Completion,History,Always] int SelectionStart; // // Read-write // [Completion,History,Always] == to SelectionEnd when no selection) int SelectionEnd; // // Read-write // [Completion,History,Always] // Helper functions for text manipulation. // Use those function to benefit from the CallbackResize behaviors. Calling those function reset the selection. IMGUI_API ImGuiInputTextCallbackData(); IMGUI_API void DeleteChars(int pos, int bytes_count); IMGUI_API void InsertChars(int pos, const char* text, const char* text_end = NULL); bool HasSelection() const { return SelectionStart != SelectionEnd; } }; // Resizing callback data to apply custom constraint. As enabled by SetNextWindowSizeConstraints(). Callback is called during the next Begin(). // NB: For basic min/max size constraint on each axis you don't need to use the callback! The SetNextWindowSizeConstraints() parameters are enough. struct ImGuiSizeCallbackData { void* UserData; // Read-only. What user passed to SetNextWindowSizeConstraints() ImVec2 Pos; // Read-only. Window position, for reference. ImVec2 CurrentSize; // Read-only. Current window size. ImVec2 DesiredSize; // Read-write. Desired size, based on user's mouse position. Write to this field to restrain resizing. }; // Data payload for Drag and Drop operations: AcceptDragDropPayload(), GetDragDropPayload() struct ImGuiPayload { // Members void* Data; // Data (copied and owned by dear imgui) int DataSize; // Data size // [Internal] ImGuiID SourceId; // Source item id ImGuiID SourceParentId; // Source parent id (if available) int DataFrameCount; // Data timestamp char DataType[32+1]; // Data type tag (short user-supplied string, 32 characters max) bool Preview; // Set when AcceptDragDropPayload() was called and mouse has been hovering the target item (nb: handle overlapping drag targets) bool Delivery; // Set when AcceptDragDropPayload() was called and mouse button is released over the target item. ImGuiPayload() { Clear(); } void Clear() { SourceId = SourceParentId = 0; Data = NULL; DataSize = 0; memset(DataType, 0, sizeof(DataType)); DataFrameCount = -1; Preview = Delivery = false; } bool IsDataType(const char* type) const { return DataFrameCount != -1 && strcmp(type, DataType) == 0; } bool IsPreview() const { return Preview; } bool IsDelivery() const { return Delivery; } }; //----------------------------------------------------------------------------- // Obsolete functions (Will be removed! Read 'API BREAKING CHANGES' section in imgui.cpp for details) // Please keep your copy of dear imgui up to date! Occasionally set '#define IMGUI_DISABLE_OBSOLETE_FUNCTIONS' in imconfig.h to stay ahead. //----------------------------------------------------------------------------- #ifndef IMGUI_DISABLE_OBSOLETE_FUNCTIONS namespace ImGui { // OBSOLETED in 1.72 (from July 2019) static inline void TreeAdvanceToLabelPos() { SetCursorPosX(GetCursorPosX() + GetTreeNodeToLabelSpacing()); } // OBSOLETED in 1.71 (from June 2019) static inline void SetNextTreeNodeOpen(bool open, ImGuiCond cond = 0) { SetNextItemOpen(open, cond); } // OBSOLETED in 1.70 (from May 2019) static inline float GetContentRegionAvailWidth() { return GetContentRegionAvail().x; } // OBSOLETED in 1.69 (from Mar 2019) static inline ImDrawList* GetOverlayDrawList() { return GetForegroundDrawList(); } // OBSOLETED in 1.66 (from Sep 2018) static inline void SetScrollHere(float center_ratio=0.5f){ SetScrollHereY(center_ratio); } // OBSOLETED in 1.63 (between Aug 2018 and Sept 2018) static inline bool IsItemDeactivatedAfterChange() { return IsItemDeactivatedAfterEdit(); } // OBSOLETED in 1.61 (between Apr 2018 and Aug 2018) IMGUI_API bool InputFloat(const char* label, float* v, float step, float step_fast, int decimal_precision, ImGuiInputTextFlags flags = 0); // Use the 'const char* format' version instead of 'decimal_precision'! IMGUI_API bool InputFloat2(const char* label, float v[2], int decimal_precision, ImGuiInputTextFlags flags = 0); IMGUI_API bool InputFloat3(const char* label, float v[3], int decimal_precision, ImGuiInputTextFlags flags = 0); IMGUI_API bool InputFloat4(const char* label, float v[4], int decimal_precision, ImGuiInputTextFlags flags = 0); // OBSOLETED in 1.60 (between Dec 2017 and Apr 2018) static inline bool IsAnyWindowFocused() { return IsWindowFocused(ImGuiFocusedFlags_AnyWindow); } static inline bool IsAnyWindowHovered() { return IsWindowHovered(ImGuiHoveredFlags_AnyWindow); } static inline ImVec2 CalcItemRectClosestPoint(const ImVec2& pos, bool on_edge = false, float outward = 0.f) { IM_UNUSED(on_edge); IM_UNUSED(outward); IM_ASSERT(0); return pos; } // OBSOLETED in 1.53 (between Oct 2017 and Dec 2017) static inline void ShowTestWindow() { return ShowDemoWindow(); } static inline bool IsRootWindowFocused() { return IsWindowFocused(ImGuiFocusedFlags_RootWindow); } static inline bool IsRootWindowOrAnyChildFocused() { return IsWindowFocused(ImGuiFocusedFlags_RootAndChildWindows); } static inline void SetNextWindowContentWidth(float w) { SetNextWindowContentSize(ImVec2(w, 0.0f)); } static inline float GetItemsLineHeightWithSpacing() { return GetFrameHeightWithSpacing(); } // OBSOLETED in 1.52 (between Aug 2017 and Oct 2017) IMGUI_API bool Begin(const char* name, bool* p_open, const ImVec2& size_on_first_use, float bg_alpha_override = -1.0f, ImGuiWindowFlags flags = 0); // Use SetNextWindowSize(size, ImGuiCond_FirstUseEver) + SetNextWindowBgAlpha() instead. static inline bool IsRootWindowOrAnyChildHovered() { return IsWindowHovered(ImGuiHoveredFlags_RootAndChildWindows); } static inline void AlignFirstTextHeightToWidgets() { AlignTextToFramePadding(); } static inline void SetNextWindowPosCenter(ImGuiCond c=0) { ImGuiIO& io = GetIO(); SetNextWindowPos(ImVec2(io.DisplaySize.x * 0.5f, io.DisplaySize.y * 0.5f), c, ImVec2(0.5f, 0.5f)); } } typedef ImGuiInputTextCallback ImGuiTextEditCallback; // OBSOLETED in 1.63 (from Aug 2018): made the names consistent typedef ImGuiInputTextCallbackData ImGuiTextEditCallbackData; #endif //----------------------------------------------------------------------------- // Helpers //----------------------------------------------------------------------------- // Helper: Execute a block of code at maximum once a frame. Convenient if you want to quickly create an UI within deep-nested code that runs multiple times every frame. // Usage: static ImGuiOnceUponAFrame oaf; if (oaf) ImGui::Text("This will be called only once per frame"); struct ImGuiOnceUponAFrame { ImGuiOnceUponAFrame() { RefFrame = -1; } mutable int RefFrame; operator bool() const { int current_frame = ImGui::GetFrameCount(); if (RefFrame == current_frame) return false; RefFrame = current_frame; return true; } }; // Helper: Parse and apply text filters. In format "aaaaa[,bbbb][,ccccc]" struct ImGuiTextFilter { IMGUI_API ImGuiTextFilter(const char* default_filter = ""); IMGUI_API bool Draw(const char* label = "Filter (inc,-exc)", float width = 0.0f); // Helper calling InputText+Build IMGUI_API bool PassFilter(const char* text, const char* text_end = NULL) const; IMGUI_API void Build(); void Clear() { InputBuf[0] = 0; Build(); } bool IsActive() const { return !Filters.empty(); } // [Internal] struct ImGuiTextRange { const char* b; const char* e; ImGuiTextRange() { b = e = NULL; } ImGuiTextRange(const char* _b, const char* _e) { b = _b; e = _e; } bool empty() const { return b == e; } IMGUI_API void split(char separator, ImVector<ImGuiTextRange>* out) const; }; char InputBuf[256]; ImVector<ImGuiTextRange>Filters; int CountGrep; }; // Helper: Growable text buffer for logging/accumulating text // (this could be called 'ImGuiTextBuilder' / 'ImGuiStringBuilder') struct ImGuiTextBuffer { ImVector<char> Buf; IMGUI_API static char EmptyString[1]; ImGuiTextBuffer() { } inline char operator[](int i) { IM_ASSERT(Buf.Data != NULL); return Buf.Data[i]; } const char* begin() const { return Buf.Data ? &Buf.front() : EmptyString; } const char* end() const { return Buf.Data ? &Buf.back() : EmptyString; } // Buf is zero-terminated, so end() will point on the zero-terminator int size() const { return Buf.Size ? Buf.Size - 1 : 0; } bool empty() { return Buf.Size <= 1; } void clear() { Buf.clear(); } void reserve(int capacity) { Buf.reserve(capacity); } const char* c_str() const { return Buf.Data ? Buf.Data : EmptyString; } IMGUI_API void append(const char* str, const char* str_end = NULL); IMGUI_API void appendf(const char* fmt, ...) IM_FMTARGS(2); IMGUI_API void appendfv(const char* fmt, va_list args) IM_FMTLIST(2); }; // Helper: Key->Value storage // Typically you don't have to worry about this since a storage is held within each Window. // We use it to e.g. store collapse state for a tree (Int 0/1) // This is optimized for efficient lookup (dichotomy into a contiguous buffer) and rare insertion (typically tied to user interactions aka max once a frame) // You can use it as custom user storage for temporary values. Declare your own storage if, for example: // - You want to manipulate the open/close state of a particular sub-tree in your interface (tree node uses Int 0/1 to store their state). // - You want to store custom debug data easily without adding or editing structures in your code (probably not efficient, but convenient) // Types are NOT stored, so it is up to you to make sure your Key don't collide with different types. struct ImGuiStorage { // [Internal] struct ImGuiStoragePair { ImGuiID key; union { int val_i; float val_f; void* val_p; }; ImGuiStoragePair(ImGuiID _key, int _val_i) { key = _key; val_i = _val_i; } ImGuiStoragePair(ImGuiID _key, float _val_f) { key = _key; val_f = _val_f; } ImGuiStoragePair(ImGuiID _key, void* _val_p) { key = _key; val_p = _val_p; } }; ImVector<ImGuiStoragePair> Data; // - Get***() functions find pair, never add/allocate. Pairs are sorted so a query is O(log N) // - Set***() functions find pair, insertion on demand if missing. // - Sorted insertion is costly, paid once. A typical frame shouldn't need to insert any new pair. void Clear() { Data.clear(); } IMGUI_API int GetInt(ImGuiID key, int default_val = 0) const; IMGUI_API void SetInt(ImGuiID key, int val); IMGUI_API bool GetBool(ImGuiID key, bool default_val = false) const; IMGUI_API void SetBool(ImGuiID key, bool val); IMGUI_API float GetFloat(ImGuiID key, float default_val = 0.0f) const; IMGUI_API void SetFloat(ImGuiID key, float val); IMGUI_API void* GetVoidPtr(ImGuiID key) const; // default_val is NULL IMGUI_API void SetVoidPtr(ImGuiID key, void* val); // - Get***Ref() functions finds pair, insert on demand if missing, return pointer. Useful if you intend to do Get+Set. // - References are only valid until a new value is added to the storage. Calling a Set***() function or a Get***Ref() function invalidates the pointer. // - A typical use case where this is convenient for quick hacking (e.g. add storage during a live Edit&Continue session if you can't modify existing struct) // float* pvar = ImGui::GetFloatRef(key); ImGui::SliderFloat("var", pvar, 0, 100.0f); some_var += *pvar; IMGUI_API int* GetIntRef(ImGuiID key, int default_val = 0); IMGUI_API bool* GetBoolRef(ImGuiID key, bool default_val = false); IMGUI_API float* GetFloatRef(ImGuiID key, float default_val = 0.0f); IMGUI_API void** GetVoidPtrRef(ImGuiID key, void* default_val = NULL); // Use on your own storage if you know only integer are being stored (open/close all tree nodes) IMGUI_API void SetAllInt(int val); // For quicker full rebuild of a storage (instead of an incremental one), you may add all your contents and then sort once. IMGUI_API void BuildSortByKey(); }; // Helper: Manually clip large list of items. // If you are submitting lots of evenly spaced items and you have a random access to the list, you can perform coarse clipping based on visibility to save yourself from processing those items at all. // The clipper calculates the range of visible items and advance the cursor to compensate for the non-visible items we have skipped. // ImGui already clip items based on their bounds but it needs to measure text size to do so. Coarse clipping before submission makes this cost and your own data fetching/submission cost null. // Usage: // ImGuiListClipper clipper(1000); // we have 1000 elements, evenly spaced. // while (clipper.Step()) // for (int i = clipper.DisplayStart; i < clipper.DisplayEnd; i++) // ImGui::Text("line number %d", i); // - Step 0: the clipper let you process the first element, regardless of it being visible or not, so we can measure the element height (step skipped if we passed a known height as second arg to constructor). // - Step 1: the clipper infer height from first element, calculate the actual range of elements to display, and position the cursor before the first element. // - (Step 2: dummy step only required if an explicit items_height was passed to constructor or Begin() and user call Step(). Does nothing and switch to Step 3.) // - Step 3: the clipper validate that we have reached the expected Y position (corresponding to element DisplayEnd), advance the cursor to the end of the list and then returns 'false' to end the loop. struct ImGuiListClipper { float StartPosY; float ItemsHeight; int ItemsCount, StepNo, DisplayStart, DisplayEnd; // items_count: Use -1 to ignore (you can call Begin later). Use INT_MAX if you don't know how many items you have (in which case the cursor won't be advanced in the final step). // items_height: Use -1.0f to be calculated automatically on first step. Otherwise pass in the distance between your items, typically GetTextLineHeightWithSpacing() or GetFrameHeightWithSpacing(). // If you don't specify an items_height, you NEED to call Step(). If you specify items_height you may call the old Begin()/End() api directly, but prefer calling Step(). ImGuiListClipper(int items_count = -1, float items_height = -1.0f) { Begin(items_count, items_height); } // NB: Begin() initialize every fields (as we allow user to call Begin/End multiple times on a same instance if they want). ~ImGuiListClipper() { IM_ASSERT(ItemsCount == -1); } // Assert if user forgot to call End() or Step() until false. IMGUI_API bool Step(); // Call until it returns false. The DisplayStart/DisplayEnd fields will be set and you can process/draw those items. IMGUI_API void Begin(int items_count, float items_height = -1.0f); // Automatically called by constructor if you passed 'items_count' or by Step() in Step 1. IMGUI_API void End(); // Automatically called on the last call of Step() that returns false. }; // Helpers macros to generate 32-bits encoded colors #ifdef IMGUI_USE_BGRA_PACKED_COLOR #define IM_COL32_R_SHIFT 16 #define IM_COL32_G_SHIFT 8 #define IM_COL32_B_SHIFT 0 #define IM_COL32_A_SHIFT 24 #define IM_COL32_A_MASK 0xFF000000 #else #define IM_COL32_R_SHIFT 0 #define IM_COL32_G_SHIFT 8 #define IM_COL32_B_SHIFT 16 #define IM_COL32_A_SHIFT 24 #define IM_COL32_A_MASK 0xFF000000 #endif #define IM_COL32(R,G,B,A) (((ImU32)(A)<<IM_COL32_A_SHIFT) | ((ImU32)(B)<<IM_COL32_B_SHIFT) | ((ImU32)(G)<<IM_COL32_G_SHIFT) | ((ImU32)(R)<<IM_COL32_R_SHIFT)) #define IM_COL32_WHITE IM_COL32(255,255,255,255) // Opaque white = 0xFFFFFFFF #define IM_COL32_BLACK IM_COL32(0,0,0,255) // Opaque black #define IM_COL32_BLACK_TRANS IM_COL32(0,0,0,0) // Transparent black = 0x00000000 // Helper: ImColor() implicitly converts colors to either ImU32 (packed 4x1 byte) or ImVec4 (4x1 float) // Prefer using IM_COL32() macros if you want a guaranteed compile-time ImU32 for usage with ImDrawList API. // **Avoid storing ImColor! Store either u32 of ImVec4. This is not a full-featured color class. MAY OBSOLETE. // **None of the ImGui API are using ImColor directly but you can use it as a convenience to pass colors in either ImU32 or ImVec4 formats. Explicitly cast to ImU32 or ImVec4 if needed. struct ImColor { ImVec4 Value; ImColor() { Value.x = Value.y = Value.z = Value.w = 0.0f; } ImColor(int r, int g, int b, int a = 255) { float sc = 1.0f/255.0f; Value.x = (float)r * sc; Value.y = (float)g * sc; Value.z = (float)b * sc; Value.w = (float)a * sc; } ImColor(ImU32 rgba) { float sc = 1.0f/255.0f; Value.x = (float)((rgba>>IM_COL32_R_SHIFT)&0xFF) * sc; Value.y = (float)((rgba>>IM_COL32_G_SHIFT)&0xFF) * sc; Value.z = (float)((rgba>>IM_COL32_B_SHIFT)&0xFF) * sc; Value.w = (float)((rgba>>IM_COL32_A_SHIFT)&0xFF) * sc; } ImColor(float r, float g, float b, float a = 1.0f) { Value.x = r; Value.y = g; Value.z = b; Value.w = a; } ImColor(const ImVec4& col) { Value = col; } inline operator ImU32() const { return ImGui::ColorConvertFloat4ToU32(Value); } inline operator ImVec4() const { return Value; } // FIXME-OBSOLETE: May need to obsolete/cleanup those helpers. inline void SetHSV(float h, float s, float v, float a = 1.0f){ ImGui::ColorConvertHSVtoRGB(h, s, v, Value.x, Value.y, Value.z); Value.w = a; } static ImColor HSV(float h, float s, float v, float a = 1.0f) { float r,g,b; ImGui::ColorConvertHSVtoRGB(h, s, v, r, g, b); return ImColor(r,g,b,a); } }; //----------------------------------------------------------------------------- // Draw List API (ImDrawCmd, ImDrawIdx, ImDrawVert, ImDrawChannel, ImDrawListSplitter, ImDrawListFlags, ImDrawList, ImDrawData) // Hold a series of drawing commands. The user provides a renderer for ImDrawData which essentially contains an array of ImDrawList. //----------------------------------------------------------------------------- // Draw callbacks for advanced uses. // NB: You most likely do NOT need to use draw callbacks just to create your own widget or customized UI rendering, // you can poke into the draw list for that! Draw callback may be useful for example to: // A) Change your GPU render state, // B) render a complex 3D scene inside a UI element without an intermediate texture/render target, etc. // The expected behavior from your rendering function is 'if (cmd.UserCallback != NULL) { cmd.UserCallback(parent_list, cmd); } else { RenderTriangles() }' // If you want to override the signature of ImDrawCallback, you can simply use e.g. '#define ImDrawCallback MyDrawCallback' (in imconfig.h) + update rendering back-end accordingly. #ifndef ImDrawCallback typedef void (*ImDrawCallback)(const ImDrawList* parent_list, const ImDrawCmd* cmd); #endif // Special Draw callback value to request renderer back-end to reset the graphics/render state. // The renderer back-end needs to handle this special value, otherwise it will crash trying to call a function at this address. // This is useful for example if you submitted callbacks which you know have altered the render state and you want it to be restored. // It is not done by default because they are many perfectly useful way of altering render state for imgui contents (e.g. changing shader/blending settings before an Image call). #define ImDrawCallback_ResetRenderState (ImDrawCallback)(-1) // Typically, 1 command = 1 GPU draw call (unless command is a callback) // Pre 1.71 back-ends will typically ignore the VtxOffset/IdxOffset fields. When 'io.BackendFlags & ImGuiBackendFlags_RendererHasVtxOffset' // is enabled, those fields allow us to render meshes larger than 64K vertices while keeping 16-bits indices. struct ImDrawCmd { unsigned int ElemCount; // Number of indices (multiple of 3) to be rendered as triangles. Vertices are stored in the callee ImDrawList's vtx_buffer[] array, indices in idx_buffer[]. ImVec4 ClipRect; // Clipping rectangle (x1, y1, x2, y2). Subtract ImDrawData->DisplayPos to get clipping rectangle in "viewport" coordinates ImTextureID TextureId; // User-provided texture ID. Set by user in ImfontAtlas::SetTexID() for fonts or passed to Image*() functions. Ignore if never using images or multiple fonts atlas. unsigned int VtxOffset; // Start offset in vertex buffer. Pre-1.71 or without ImGuiBackendFlags_RendererHasVtxOffset: always 0. With ImGuiBackendFlags_RendererHasVtxOffset: may be >0 to support meshes larger than 64K vertices with 16-bits indices. unsigned int IdxOffset; // Start offset in index buffer. Always equal to sum of ElemCount drawn so far. ImDrawCallback UserCallback; // If != NULL, call the function instead of rendering the vertices. clip_rect and texture_id will be set normally. void* UserCallbackData; // The draw callback code can access this. ImDrawCmd() { ElemCount = 0; ClipRect.x = ClipRect.y = ClipRect.z = ClipRect.w = 0.0f; TextureId = (ImTextureID)NULL; VtxOffset = IdxOffset = 0; UserCallback = NULL; UserCallbackData = NULL; } }; // Vertex index // (to allow large meshes with 16-bits indices: set 'io.BackendFlags |= ImGuiBackendFlags_RendererHasVtxOffset' and handle ImDrawCmd::VtxOffset in the renderer back-end) // (to use 32-bits indices: override with '#define ImDrawIdx unsigned int' in imconfig.h) #ifndef ImDrawIdx typedef unsigned short ImDrawIdx; #endif // Vertex layout #ifndef IMGUI_OVERRIDE_DRAWVERT_STRUCT_LAYOUT struct ImDrawVert { ImVec2 pos; ImVec2 uv; ImU32 col; }; #else // You can override the vertex format layout by defining IMGUI_OVERRIDE_DRAWVERT_STRUCT_LAYOUT in imconfig.h // The code expect ImVec2 pos (8 bytes), ImVec2 uv (8 bytes), ImU32 col (4 bytes), but you can re-order them or add other fields as needed to simplify integration in your engine. // The type has to be described within the macro (you can either declare the struct or use a typedef). This is because ImVec2/ImU32 are likely not declared a the time you'd want to set your type up. // NOTE: IMGUI DOESN'T CLEAR THE STRUCTURE AND DOESN'T CALL A CONSTRUCTOR SO ANY CUSTOM FIELD WILL BE UNINITIALIZED. IF YOU ADD EXTRA FIELDS (SUCH AS A 'Z' COORDINATES) YOU WILL NEED TO CLEAR THEM DURING RENDER OR TO IGNORE THEM. IMGUI_OVERRIDE_DRAWVERT_STRUCT_LAYOUT; #endif // For use by ImDrawListSplitter. struct ImDrawChannel { ImVector<ImDrawCmd> _CmdBuffer; ImVector<ImDrawIdx> _IdxBuffer; }; // Split/Merge functions are used to split the draw list into different layers which can be drawn into out of order. // This is used by the Columns api, so items of each column can be batched together in a same draw call. struct ImDrawListSplitter { int _Current; // Current channel number (0) int _Count; // Number of active channels (1+) ImVector<ImDrawChannel> _Channels; // Draw channels (not resized down so _Count might be < Channels.Size) inline ImDrawListSplitter() { Clear(); } inline ~ImDrawListSplitter() { ClearFreeMemory(); } inline void Clear() { _Current = 0; _Count = 1; } // Do not clear Channels[] so our allocations are reused next frame IMGUI_API void ClearFreeMemory(); IMGUI_API void Split(ImDrawList* draw_list, int count); IMGUI_API void Merge(ImDrawList* draw_list); IMGUI_API void SetCurrentChannel(ImDrawList* draw_list, int channel_idx); }; enum ImDrawCornerFlags_ { ImDrawCornerFlags_None = 0, ImDrawCornerFlags_TopLeft = 1 << 0, // 0x1 ImDrawCornerFlags_TopRight = 1 << 1, // 0x2 ImDrawCornerFlags_BotLeft = 1 << 2, // 0x4 ImDrawCornerFlags_BotRight = 1 << 3, // 0x8 ImDrawCornerFlags_Top = ImDrawCornerFlags_TopLeft | ImDrawCornerFlags_TopRight, // 0x3 ImDrawCornerFlags_Bot = ImDrawCornerFlags_BotLeft | ImDrawCornerFlags_BotRight, // 0xC ImDrawCornerFlags_Left = ImDrawCornerFlags_TopLeft | ImDrawCornerFlags_BotLeft, // 0x5 ImDrawCornerFlags_Right = ImDrawCornerFlags_TopRight | ImDrawCornerFlags_BotRight, // 0xA ImDrawCornerFlags_All = 0xF // In your function calls you may use ~0 (= all bits sets) instead of ImDrawCornerFlags_All, as a convenience }; enum ImDrawListFlags_ { ImDrawListFlags_None = 0, ImDrawListFlags_AntiAliasedLines = 1 << 0, // Lines are anti-aliased (*2 the number of triangles for 1.0f wide line, otherwise *3 the number of triangles) ImDrawListFlags_AntiAliasedFill = 1 << 1, // Filled shapes have anti-aliased edges (*2 the number of vertices) ImDrawListFlags_AllowVtxOffset = 1 << 2 // Can emit 'VtxOffset > 0' to allow large meshes. Set when 'ImGuiBackendFlags_RendererHasVtxOffset' is enabled. }; // Draw command list // This is the low-level list of polygons that ImGui:: functions are filling. At the end of the frame, // all command lists are passed to your ImGuiIO::RenderDrawListFn function for rendering. // Each dear imgui window contains its own ImDrawList. You can use ImGui::GetWindowDrawList() to // access the current window draw list and draw custom primitives. // You can interleave normal ImGui:: calls and adding primitives to the current draw list. // All positions are generally in pixel coordinates (top-left at (0,0), bottom-right at io.DisplaySize), but you are totally free to apply whatever transformation matrix to want to the data (if you apply such transformation you'll want to apply it to ClipRect as well) // Important: Primitives are always added to the list and not culled (culling is done at higher-level by ImGui:: functions), if you use this API a lot consider coarse culling your drawn objects. struct ImDrawList { // This is what you have to render ImVector<ImDrawCmd> CmdBuffer; // Draw commands. Typically 1 command = 1 GPU draw call, unless the command is a callback. ImVector<ImDrawIdx> IdxBuffer; // Index buffer. Each command consume ImDrawCmd::ElemCount of those ImVector<ImDrawVert> VtxBuffer; // Vertex buffer. ImDrawListFlags Flags; // Flags, you may poke into these to adjust anti-aliasing settings per-primitive. // [Internal, used while building lists] const ImDrawListSharedData* _Data; // Pointer to shared draw data (you can use ImGui::GetDrawListSharedData() to get the one from current ImGui context) const char* _OwnerName; // Pointer to owner window's name for debugging unsigned int _VtxCurrentOffset; // [Internal] Always 0 unless 'Flags & ImDrawListFlags_AllowVtxOffset'. unsigned int _VtxCurrentIdx; // [Internal] Generally == VtxBuffer.Size unless we are past 64K vertices, in which case this gets reset to 0. ImDrawVert* _VtxWritePtr; // [Internal] point within VtxBuffer.Data after each add command (to avoid using the ImVector<> operators too much) ImDrawIdx* _IdxWritePtr; // [Internal] point within IdxBuffer.Data after each add command (to avoid using the ImVector<> operators too much) ImVector<ImVec4> _ClipRectStack; // [Internal] ImVector<ImTextureID> _TextureIdStack; // [Internal] ImVector<ImVec2> _Path; // [Internal] current path building ImDrawListSplitter _Splitter; // [Internal] for channels api // If you want to create ImDrawList instances, pass them ImGui::GetDrawListSharedData() or create and use your own ImDrawListSharedData (so you can use ImDrawList without ImGui) ImDrawList(const ImDrawListSharedData* shared_data) { _Data = shared_data; _OwnerName = NULL; Clear(); } ~ImDrawList() { ClearFreeMemory(); } IMGUI_API void PushClipRect(ImVec2 clip_rect_min, ImVec2 clip_rect_max, bool intersect_with_current_clip_rect = false); // Render-level scissoring. This is passed down to your render function but not used for CPU-side coarse clipping. Prefer using higher-level ImGui::PushClipRect() to affect logic (hit-testing and widget culling) IMGUI_API void PushClipRectFullScreen(); IMGUI_API void PopClipRect(); IMGUI_API void PushTextureID(ImTextureID texture_id); IMGUI_API void PopTextureID(); inline ImVec2 GetClipRectMin() const { const ImVec4& cr = _ClipRectStack.back(); return ImVec2(cr.x, cr.y); } inline ImVec2 GetClipRectMax() const { const ImVec4& cr = _ClipRectStack.back(); return ImVec2(cr.z, cr.w); } // Primitives IMGUI_API void AddLine(const ImVec2& a, const ImVec2& b, ImU32 col, float thickness = 1.0f); IMGUI_API void AddRect(const ImVec2& a, const ImVec2& b, ImU32 col, float rounding = 0.0f, ImDrawCornerFlags rounding_corners = ImDrawCornerFlags_All, float thickness = 1.0f); // a: upper-left, b: lower-right (== upper-left + size), rounding_corners_flags: 4-bits corresponding to which corner to round IMGUI_API void AddRectFilled(const ImVec2& a, const ImVec2& b, ImU32 col, float rounding = 0.0f, ImDrawCornerFlags rounding_corners = ImDrawCornerFlags_All); // a: upper-left, b: lower-right (== upper-left + size) IMGUI_API void AddRectFilledMultiColor(const ImVec2& a, const ImVec2& b, ImU32 col_upr_left, ImU32 col_upr_right, ImU32 col_bot_right, ImU32 col_bot_left); IMGUI_API void AddQuad(const ImVec2& a, const ImVec2& b, const ImVec2& c, const ImVec2& d, ImU32 col, float thickness = 1.0f); IMGUI_API void AddQuadFilled(const ImVec2& a, const ImVec2& b, const ImVec2& c, const ImVec2& d, ImU32 col); IMGUI_API void AddTriangle(const ImVec2& a, const ImVec2& b, const ImVec2& c, ImU32 col, float thickness = 1.0f); IMGUI_API void AddTriangleFilled(const ImVec2& a, const ImVec2& b, const ImVec2& c, ImU32 col); IMGUI_API void AddCircle(const ImVec2& centre, float radius, ImU32 col, int num_segments = 12, float thickness = 1.0f); IMGUI_API void AddCircleFilled(const ImVec2& centre, float radius, ImU32 col, int num_segments = 12); IMGUI_API void AddText(const ImVec2& pos, ImU32 col, const char* text_begin, const char* text_end = NULL); IMGUI_API void AddText(const ImFont* font, float font_size, const ImVec2& pos, ImU32 col, const char* text_begin, const char* text_end = NULL, float wrap_width = 0.0f, const ImVec4* cpu_fine_clip_rect = NULL); IMGUI_API void AddImage(ImTextureID user_texture_id, const ImVec2& a, const ImVec2& b, const ImVec2& uv_a = ImVec2(0,0), const ImVec2& uv_b = ImVec2(1,1), ImU32 col = IM_COL32_WHITE); IMGUI_API void AddImageQuad(ImTextureID user_texture_id, const ImVec2& a, const ImVec2& b, const ImVec2& c, const ImVec2& d, const ImVec2& uv_a = ImVec2(0,0), const ImVec2& uv_b = ImVec2(1,0), const ImVec2& uv_c = ImVec2(1,1), const ImVec2& uv_d = ImVec2(0,1), ImU32 col = IM_COL32_WHITE); IMGUI_API void AddImageRounded(ImTextureID user_texture_id, const ImVec2& a, const ImVec2& b, const ImVec2& uv_a, const ImVec2& uv_b, ImU32 col, float rounding, ImDrawCornerFlags rounding_corners = ImDrawCornerFlags_All); IMGUI_API void AddPolyline(const ImVec2* points, int num_points, ImU32 col, bool closed, float thickness); IMGUI_API void AddConvexPolyFilled(const ImVec2* points, int num_points, ImU32 col); // Note: Anti-aliased filling requires points to be in clockwise order. IMGUI_API void AddBezierCurve(const ImVec2& pos0, const ImVec2& cp0, const ImVec2& cp1, const ImVec2& pos1, ImU32 col, float thickness, int num_segments = 0); // Stateful path API, add points then finish with PathFillConvex() or PathStroke() inline void PathClear() { _Path.Size = 0; } inline void PathLineTo(const ImVec2& pos) { _Path.push_back(pos); } inline void PathLineToMergeDuplicate(const ImVec2& pos) { if (_Path.Size == 0 || memcmp(&_Path.Data[_Path.Size-1], &pos, 8) != 0) _Path.push_back(pos); } inline void PathFillConvex(ImU32 col) { AddConvexPolyFilled(_Path.Data, _Path.Size, col); _Path.Size = 0; } // Note: Anti-aliased filling requires points to be in clockwise order. inline void PathStroke(ImU32 col, bool closed, float thickness = 1.0f) { AddPolyline(_Path.Data, _Path.Size, col, closed, thickness); _Path.Size = 0; } IMGUI_API void PathArcTo(const ImVec2& centre, float radius, float a_min, float a_max, int num_segments = 10); IMGUI_API void PathArcToFast(const ImVec2& centre, float radius, int a_min_of_12, int a_max_of_12); // Use precomputed angles for a 12 steps circle IMGUI_API void PathBezierCurveTo(const ImVec2& p1, const ImVec2& p2, const ImVec2& p3, int num_segments = 0); IMGUI_API void PathRect(const ImVec2& rect_min, const ImVec2& rect_max, float rounding = 0.0f, ImDrawCornerFlags rounding_corners = ImDrawCornerFlags_All); // Advanced IMGUI_API void AddCallback(ImDrawCallback callback, void* callback_data); // Your rendering function must check for 'UserCallback' in ImDrawCmd and call the function instead of rendering triangles. IMGUI_API void AddDrawCmd(); // This is useful if you need to forcefully create a new draw call (to allow for dependent rendering / blending). Otherwise primitives are merged into the same draw-call as much as possible IMGUI_API ImDrawList* CloneOutput() const; // Create a clone of the CmdBuffer/IdxBuffer/VtxBuffer. // Advanced: Channels // - Use to split render into layers. By switching channels to can render out-of-order (e.g. submit foreground primitives before background primitives) // - Use to minimize draw calls (e.g. if going back-and-forth between multiple non-overlapping clipping rectangles, prefer to append into separate channels then merge at the end) inline void ChannelsSplit(int count) { _Splitter.Split(this, count); } inline void ChannelsMerge() { _Splitter.Merge(this); } inline void ChannelsSetCurrent(int n) { _Splitter.SetCurrentChannel(this, n); } // Internal helpers // NB: all primitives needs to be reserved via PrimReserve() beforehand! IMGUI_API void Clear(); IMGUI_API void ClearFreeMemory(); IMGUI_API void PrimReserve(int idx_count, int vtx_count); IMGUI_API void PrimRect(const ImVec2& a, const ImVec2& b, ImU32 col); // Axis aligned rectangle (composed of two triangles) IMGUI_API void PrimRectUV(const ImVec2& a, const ImVec2& b, const ImVec2& uv_a, const ImVec2& uv_b, ImU32 col); IMGUI_API void PrimQuadUV(const ImVec2& a, const ImVec2& b, const ImVec2& c, const ImVec2& d, const ImVec2& uv_a, const ImVec2& uv_b, const ImVec2& uv_c, const ImVec2& uv_d, ImU32 col); inline void PrimWriteVtx(const ImVec2& pos, const ImVec2& uv, ImU32 col){ _VtxWritePtr->pos = pos; _VtxWritePtr->uv = uv; _VtxWritePtr->col = col; _VtxWritePtr++; _VtxCurrentIdx++; } inline void PrimWriteIdx(ImDrawIdx idx) { *_IdxWritePtr = idx; _IdxWritePtr++; } inline void PrimVtx(const ImVec2& pos, const ImVec2& uv, ImU32 col) { PrimWriteIdx((ImDrawIdx)_VtxCurrentIdx); PrimWriteVtx(pos, uv, col); } IMGUI_API void UpdateClipRect(); IMGUI_API void UpdateTextureID(); }; // All draw data to render a Dear ImGui frame // (NB: the style and the naming convention here is a little inconsistent, we currently preserve them for backward compatibility purpose, // as this is one of the oldest structure exposed by the library! Basically, ImDrawList == CmdList) struct ImDrawData { bool Valid; // Only valid after Render() is called and before the next NewFrame() is called. ImDrawList** CmdLists; // Array of ImDrawList* to render. The ImDrawList are owned by ImGuiContext and only pointed to from here. int CmdListsCount; // Number of ImDrawList* to render int TotalIdxCount; // For convenience, sum of all ImDrawList's IdxBuffer.Size int TotalVtxCount; // For convenience, sum of all ImDrawList's VtxBuffer.Size ImVec2 DisplayPos; // Upper-left position of the viewport to render (== upper-left of the orthogonal projection matrix to use) ImVec2 DisplaySize; // Size of the viewport to render (== io.DisplaySize for the main viewport) (DisplayPos + DisplaySize == lower-right of the orthogonal projection matrix to use) ImVec2 FramebufferScale; // Amount of pixels for each unit of DisplaySize. Based on io.DisplayFramebufferScale. Generally (1,1) on normal display, (2,2) on OSX with Retina display. // Functions ImDrawData() { Valid = false; Clear(); } ~ImDrawData() { Clear(); } void Clear() { Valid = false; CmdLists = NULL; CmdListsCount = TotalVtxCount = TotalIdxCount = 0; DisplayPos = DisplaySize = FramebufferScale = ImVec2(0.f, 0.f); } // The ImDrawList are owned by ImGuiContext! IMGUI_API void DeIndexAllBuffers(); // Helper to convert all buffers from indexed to non-indexed, in case you cannot render indexed. Note: this is slow and most likely a waste of resources. Always prefer indexed rendering! IMGUI_API void ScaleClipRects(const ImVec2& fb_scale); // Helper to scale the ClipRect field of each ImDrawCmd. Use if your final output buffer is at a different scale than Dear ImGui expects, or if there is a difference between your window resolution and framebuffer resolution. }; //----------------------------------------------------------------------------- // Font API (ImFontConfig, ImFontGlyph, ImFontAtlasFlags, ImFontAtlas, ImFontGlyphRangesBuilder, ImFont) //----------------------------------------------------------------------------- struct ImFontConfig { void* FontData; // // TTF/OTF data int FontDataSize; // // TTF/OTF data size bool FontDataOwnedByAtlas; // true // TTF/OTF data ownership taken by the container ImFontAtlas (will delete memory itself). int FontNo; // 0 // Index of font within TTF/OTF file float SizePixels; // // Size in pixels for rasterizer (more or less maps to the resulting font height). int OversampleH; // 3 // Rasterize at higher quality for sub-pixel positioning. Read https://github.com/nothings/stb/blob/master/tests/oversample/README.md for details. int OversampleV; // 1 // Rasterize at higher quality for sub-pixel positioning. We don't use sub-pixel positions on the Y axis. bool PixelSnapH; // false // Align every glyph to pixel boundary. Useful e.g. if you are merging a non-pixel aligned font with the default font. If enabled, you can set OversampleH/V to 1. ImVec2 GlyphExtraSpacing; // 0, 0 // Extra spacing (in pixels) between glyphs. Only X axis is supported for now. ImVec2 GlyphOffset; // 0, 0 // Offset all glyphs from this font input. const ImWchar* GlyphRanges; // NULL // Pointer to a user-provided list of Unicode range (2 value per range, values are inclusive, zero-terminated list). THE ARRAY DATA NEEDS TO PERSIST AS LONG AS THE FONT IS ALIVE. float GlyphMinAdvanceX; // 0 // Minimum AdvanceX for glyphs, set Min to align font icons, set both Min/Max to enforce mono-space font float GlyphMaxAdvanceX; // FLT_MAX // Maximum AdvanceX for glyphs bool MergeMode; // false // Merge into previous ImFont, so you can combine multiple inputs font into one ImFont (e.g. ASCII font + icons + Japanese glyphs). You may want to use GlyphOffset.y when merge font of different heights. unsigned int RasterizerFlags; // 0x00 // Settings for custom font rasterizer (e.g. ImGuiFreeType). Leave as zero if you aren't using one. float RasterizerMultiply; // 1.0f // Brighten (>1.0f) or darken (<1.0f) font output. Brightening small fonts may be a good workaround to make them more readable. // [Internal] char Name[40]; // Name (strictly to ease debugging) ImFont* DstFont; IMGUI_API ImFontConfig(); }; struct ImFontGlyph { ImWchar Codepoint; // 0x0000..0xFFFF float AdvanceX; // Distance to next character (= data from font + ImFontConfig::GlyphExtraSpacing.x baked in) float X0, Y0, X1, Y1; // Glyph corners float U0, V0, U1, V1; // Texture coordinates }; // Helper to build glyph ranges from text/string data. Feed your application strings/characters to it then call BuildRanges(). // This is essentially a tightly packed of vector of 64k booleans = 8KB storage. struct ImFontGlyphRangesBuilder { ImVector<ImU32> UsedChars; // Store 1-bit per Unicode code point (0=unused, 1=used) ImFontGlyphRangesBuilder() { Clear(); } inline void Clear() { int size_in_bytes = 0x10000 / 8; UsedChars.resize(size_in_bytes / (int)sizeof(ImU32)); memset(UsedChars.Data, 0, (size_t)size_in_bytes); } inline bool GetBit(int n) const { int off = (n >> 5); ImU32 mask = 1u << (n & 31); return (UsedChars[off] & mask) != 0; } // Get bit n in the array inline void SetBit(int n) { int off = (n >> 5); ImU32 mask = 1u << (n & 31); UsedChars[off] |= mask; } // Set bit n in the array inline void AddChar(ImWchar c) { SetBit(c); } // Add character IMGUI_API void AddText(const char* text, const char* text_end = NULL); // Add string (each character of the UTF-8 string are added) IMGUI_API void AddRanges(const ImWchar* ranges); // Add ranges, e.g. builder.AddRanges(ImFontAtlas::GetGlyphRangesDefault()) to force add all of ASCII/Latin+Ext IMGUI_API void BuildRanges(ImVector<ImWchar>* out_ranges); // Output new ranges }; // See ImFontAtlas::AddCustomRectXXX functions. struct ImFontAtlasCustomRect { unsigned int ID; // Input // User ID. Use <0x10000 to map into a font glyph, >=0x10000 for other/internal/custom texture data. unsigned short Width, Height; // Input // Desired rectangle dimension unsigned short X, Y; // Output // Packed position in Atlas float GlyphAdvanceX; // Input // For custom font glyphs only (ID<0x10000): glyph xadvance ImVec2 GlyphOffset; // Input // For custom font glyphs only (ID<0x10000): glyph display offset ImFont* Font; // Input // For custom font glyphs only (ID<0x10000): target font ImFontAtlasCustomRect() { ID = 0xFFFFFFFF; Width = Height = 0; X = Y = 0xFFFF; GlyphAdvanceX = 0.0f; GlyphOffset = ImVec2(0,0); Font = NULL; } bool IsPacked() const { return X != 0xFFFF; } }; enum ImFontAtlasFlags_ { ImFontAtlasFlags_None = 0, ImFontAtlasFlags_NoPowerOfTwoHeight = 1 << 0, // Don't round the height to next power of two ImFontAtlasFlags_NoMouseCursors = 1 << 1 // Don't build software mouse cursors into the atlas }; // Load and rasterize multiple TTF/OTF fonts into a same texture. The font atlas will build a single texture holding: // - One or more fonts. // - Custom graphics data needed to render the shapes needed by Dear ImGui. // - Mouse cursor shapes for software cursor rendering (unless setting 'Flags |= ImFontAtlasFlags_NoMouseCursors' in the font atlas). // It is the user-code responsibility to setup/build the atlas, then upload the pixel data into a texture accessible by your graphics api. // - Optionally, call any of the AddFont*** functions. If you don't call any, the default font embedded in the code will be loaded for you. // - Call GetTexDataAsAlpha8() or GetTexDataAsRGBA32() to build and retrieve pixels data. // - Upload the pixels data into a texture within your graphics system (see imgui_impl_xxxx.cpp examples) // - Call SetTexID(my_tex_id); and pass the pointer/identifier to your texture in a format natural to your graphics API. // This value will be passed back to you during rendering to identify the texture. Read FAQ entry about ImTextureID for more details. // Common pitfalls: // - If you pass a 'glyph_ranges' array to AddFont*** functions, you need to make sure that your array persist up until the // atlas is build (when calling GetTexData*** or Build()). We only copy the pointer, not the data. // - Important: By default, AddFontFromMemoryTTF() takes ownership of the data. Even though we are not writing to it, we will free the pointer on destruction. // You can set font_cfg->FontDataOwnedByAtlas=false to keep ownership of your data and it won't be freed, // - Even though many functions are suffixed with "TTF", OTF data is supported just as well. // - This is an old API and it is currently awkward for those and and various other reasons! We will address them in the future! struct ImFontAtlas { IMGUI_API ImFontAtlas(); IMGUI_API ~ImFontAtlas(); IMGUI_API ImFont* AddFont(const ImFontConfig* font_cfg); IMGUI_API ImFont* AddFontDefault(const ImFontConfig* font_cfg = NULL); IMGUI_API ImFont* AddFontFromFileTTF(const char* filename, float size_pixels, const ImFontConfig* font_cfg = NULL, const ImWchar* glyph_ranges = NULL); IMGUI_API ImFont* AddFontFromMemoryTTF(void* font_data, int font_size, float size_pixels, const ImFontConfig* font_cfg = NULL, const ImWchar* glyph_ranges = NULL); // Note: Transfer ownership of 'ttf_data' to ImFontAtlas! Will be deleted after destruction of the atlas. Set font_cfg->FontDataOwnedByAtlas=false to keep ownership of your data and it won't be freed. IMGUI_API ImFont* AddFontFromMemoryCompressedTTF(const void* compressed_font_data, int compressed_font_size, float size_pixels, const ImFontConfig* font_cfg = NULL, const ImWchar* glyph_ranges = NULL); // 'compressed_font_data' still owned by caller. Compress with binary_to_compressed_c.cpp. IMGUI_API ImFont* AddFontFromMemoryCompressedBase85TTF(const char* compressed_font_data_base85, float size_pixels, const ImFontConfig* font_cfg = NULL, const ImWchar* glyph_ranges = NULL); // 'compressed_font_data_base85' still owned by caller. Compress with binary_to_compressed_c.cpp with -base85 parameter. IMGUI_API void ClearInputData(); // Clear input data (all ImFontConfig structures including sizes, TTF data, glyph ranges, etc.) = all the data used to build the texture and fonts. IMGUI_API void ClearTexData(); // Clear output texture data (CPU side). Saves RAM once the texture has been copied to graphics memory. IMGUI_API void ClearFonts(); // Clear output font data (glyphs storage, UV coordinates). IMGUI_API void Clear(); // Clear all input and output. // Build atlas, retrieve pixel data. // User is in charge of copying the pixels into graphics memory (e.g. create a texture with your engine). Then store your texture handle with SetTexID(). // The pitch is always = Width * BytesPerPixels (1 or 4) // Building in RGBA32 format is provided for convenience and compatibility, but note that unless you manually manipulate or copy color data into // the texture (e.g. when using the AddCustomRect*** api), then the RGB pixels emitted will always be white (~75% of memory/bandwidth waste. IMGUI_API bool Build(); // Build pixels data. This is called automatically for you by the GetTexData*** functions. IMGUI_API void GetTexDataAsAlpha8(unsigned char** out_pixels, int* out_width, int* out_height, int* out_bytes_per_pixel = NULL); // 1 byte per-pixel IMGUI_API void GetTexDataAsRGBA32(unsigned char** out_pixels, int* out_width, int* out_height, int* out_bytes_per_pixel = NULL); // 4 bytes-per-pixel bool IsBuilt() { return Fonts.Size > 0 && (TexPixelsAlpha8 != NULL || TexPixelsRGBA32 != NULL); } void SetTexID(ImTextureID id) { TexID = id; } //------------------------------------------- // Glyph Ranges //------------------------------------------- // Helpers to retrieve list of common Unicode ranges (2 value per range, values are inclusive, zero-terminated list) // NB: Make sure that your string are UTF-8 and NOT in your local code page. In C++11, you can create UTF-8 string literal using the u8"Hello world" syntax. See FAQ for details. // NB: Consider using ImFontGlyphRangesBuilder to build glyph ranges from textual data. IMGUI_API const ImWchar* GetGlyphRangesDefault(); // Basic Latin, Extended Latin IMGUI_API const ImWchar* GetGlyphRangesKorean(); // Default + Korean characters IMGUI_API const ImWchar* GetGlyphRangesJapanese(); // Default + Hiragana, Katakana, Half-Width, Selection of 1946 Ideographs IMGUI_API const ImWchar* GetGlyphRangesChineseFull(); // Default + Half-Width + Japanese Hiragana/Katakana + full set of about 21000 CJK Unified Ideographs IMGUI_API const ImWchar* GetGlyphRangesChineseSimplifiedCommon();// Default + Half-Width + Japanese Hiragana/Katakana + set of 2500 CJK Unified Ideographs for common simplified Chinese IMGUI_API const ImWchar* GetGlyphRangesCyrillic(); // Default + about 400 Cyrillic characters IMGUI_API const ImWchar* GetGlyphRangesThai(); // Default + Thai characters IMGUI_API const ImWchar* GetGlyphRangesVietnamese(); // Default + Vietnamese characters //------------------------------------------- // [BETA] Custom Rectangles/Glyphs API //------------------------------------------- // You can request arbitrary rectangles to be packed into the atlas, for your own purposes. // After calling Build(), you can query the rectangle position and render your pixels. // You can also request your rectangles to be mapped as font glyph (given a font + Unicode point), // so you can render e.g. custom colorful icons and use them as regular glyphs. // Read misc/fonts/README.txt for more details about using colorful icons. IMGUI_API int AddCustomRectRegular(unsigned int id, int width, int height); // Id needs to be >= 0x10000. Id >= 0x80000000 are reserved for ImGui and ImDrawList IMGUI_API int AddCustomRectFontGlyph(ImFont* font, ImWchar id, int width, int height, float advance_x, const ImVec2& offset = ImVec2(0,0)); // Id needs to be < 0x10000 to register a rectangle to map into a specific font. const ImFontAtlasCustomRect*GetCustomRectByIndex(int index) const { if (index < 0) return NULL; return &CustomRects[index]; } // [Internal] IMGUI_API void CalcCustomRectUV(const ImFontAtlasCustomRect* rect, ImVec2* out_uv_min, ImVec2* out_uv_max); IMGUI_API bool GetMouseCursorTexData(ImGuiMouseCursor cursor, ImVec2* out_offset, ImVec2* out_size, ImVec2 out_uv_border[2], ImVec2 out_uv_fill[2]); //------------------------------------------- // Members //------------------------------------------- bool Locked; // Marked as Locked by ImGui::NewFrame() so attempt to modify the atlas will assert. ImFontAtlasFlags Flags; // Build flags (see ImFontAtlasFlags_) ImTextureID TexID; // User data to refer to the texture once it has been uploaded to user's graphic systems. It is passed back to you during rendering via the ImDrawCmd structure. int TexDesiredWidth; // Texture width desired by user before Build(). Must be a power-of-two. If have many glyphs your graphics API have texture size restrictions you may want to increase texture width to decrease height. int TexGlyphPadding; // Padding between glyphs within texture in pixels. Defaults to 1. If your rendering method doesn't rely on bilinear filtering you may set this to 0. // [Internal] // NB: Access texture data via GetTexData*() calls! Which will setup a default font for you. unsigned char* TexPixelsAlpha8; // 1 component per pixel, each component is unsigned 8-bit. Total size = TexWidth * TexHeight unsigned int* TexPixelsRGBA32; // 4 component per pixel, each component is unsigned 8-bit. Total size = TexWidth * TexHeight * 4 int TexWidth; // Texture width calculated during Build(). int TexHeight; // Texture height calculated during Build(). ImVec2 TexUvScale; // = (1.0f/TexWidth, 1.0f/TexHeight) ImVec2 TexUvWhitePixel; // Texture coordinates to a white pixel ImVector<ImFont*> Fonts; // Hold all the fonts returned by AddFont*. Fonts[0] is the default font upon calling ImGui::NewFrame(), use ImGui::PushFont()/PopFont() to change the current font. ImVector<ImFontAtlasCustomRect> CustomRects; // Rectangles for packing custom texture data into the atlas. ImVector<ImFontConfig> ConfigData; // Internal data int CustomRectIds[1]; // Identifiers of custom texture rectangle used by ImFontAtlas/ImDrawList #ifndef IMGUI_DISABLE_OBSOLETE_FUNCTIONS typedef ImFontAtlasCustomRect CustomRect; // OBSOLETED in 1.72+ typedef ImFontGlyphRangesBuilder GlyphRangesBuilder; // OBSOLETED in 1.67+ #endif }; // Font runtime data and rendering // ImFontAtlas automatically loads a default embedded font for you when you call GetTexDataAsAlpha8() or GetTexDataAsRGBA32(). struct ImFont { // Members: Hot ~20/24 bytes (for CalcTextSize) ImVector<float> IndexAdvanceX; // 12-16 // out // // Sparse. Glyphs->AdvanceX in a directly indexable way (cache-friendly for CalcTextSize functions which only this this info, and are often bottleneck in large UI). float FallbackAdvanceX; // 4 // out // = FallbackGlyph->AdvanceX float FontSize; // 4 // in // // Height of characters/line, set during loading (don't change after loading) // Members: Hot ~36/48 bytes (for CalcTextSize + render loop) ImVector<ImWchar> IndexLookup; // 12-16 // out // // Sparse. Index glyphs by Unicode code-point. ImVector<ImFontGlyph> Glyphs; // 12-16 // out // // All glyphs. const ImFontGlyph* FallbackGlyph; // 4-8 // out // = FindGlyph(FontFallbackChar) ImVec2 DisplayOffset; // 8 // in // = (0,0) // Offset font rendering by xx pixels // Members: Cold ~32/40 bytes ImFontAtlas* ContainerAtlas; // 4-8 // out // // What we has been loaded into const ImFontConfig* ConfigData; // 4-8 // in // // Pointer within ContainerAtlas->ConfigData short ConfigDataCount; // 2 // in // ~ 1 // Number of ImFontConfig involved in creating this font. Bigger than 1 when merging multiple font sources into one ImFont. ImWchar FallbackChar; // 2 // in // = '?' // Replacement glyph if one isn't found. Only set via SetFallbackChar() float Scale; // 4 // in // = 1.f // Base font scale, multiplied by the per-window font scale which you can adjust with SetWindowFontScale() float Ascent, Descent; // 4+4 // out // // Ascent: distance from top to bottom of e.g. 'A' [0..FontSize] int MetricsTotalSurface;// 4 // out // // Total surface in pixels to get an idea of the font rasterization/texture cost (not exact, we approximate the cost of padding between glyphs) bool DirtyLookupTables; // 1 // out // // Methods IMGUI_API ImFont(); IMGUI_API ~ImFont(); IMGUI_API const ImFontGlyph*FindGlyph(ImWchar c) const; IMGUI_API const ImFontGlyph*FindGlyphNoFallback(ImWchar c) const; float GetCharAdvance(ImWchar c) const { return ((int)c < IndexAdvanceX.Size) ? IndexAdvanceX[(int)c] : FallbackAdvanceX; } bool IsLoaded() const { return ContainerAtlas != NULL; } const char* GetDebugName() const { return ConfigData ? ConfigData->Name : "<unknown>"; } // 'max_width' stops rendering after a certain width (could be turned into a 2d size). FLT_MAX to disable. // 'wrap_width' enable automatic word-wrapping across multiple lines to fit into given width. 0.0f to disable. IMGUI_API ImVec2 CalcTextSizeA(float size, float max_width, float wrap_width, const char* text_begin, const char* text_end = NULL, const char** remaining = NULL) const; // utf8 IMGUI_API const char* CalcWordWrapPositionA(float scale, const char* text, const char* text_end, float wrap_width) const; IMGUI_API void RenderChar(ImDrawList* draw_list, float size, ImVec2 pos, ImU32 col, ImWchar c) const; IMGUI_API void RenderText(ImDrawList* draw_list, float size, ImVec2 pos, ImU32 col, const ImVec4& clip_rect, const char* text_begin, const char* text_end, float wrap_width = 0.0f, bool cpu_fine_clip = false) const; // [Internal] Don't use! IMGUI_API void BuildLookupTable(); IMGUI_API void ClearOutputData(); IMGUI_API void GrowIndex(int new_size); IMGUI_API void AddGlyph(ImWchar c, float x0, float y0, float x1, float y1, float u0, float v0, float u1, float v1, float advance_x); IMGUI_API void AddRemapChar(ImWchar dst, ImWchar src, bool overwrite_dst = true); // Makes 'dst' character/glyph points to 'src' character/glyph. Currently needs to be called AFTER fonts have been built. IMGUI_API void SetFallbackChar(ImWchar c); #ifndef IMGUI_DISABLE_OBSOLETE_FUNCTIONS typedef ImFontGlyph Glyph; // OBSOLETED in 1.52+ #endif }; #if defined(__clang__) #pragma clang diagnostic pop #elif defined(__GNUC__) #pragma GCC diagnostic pop #endif // Include imgui_user.h at the end of imgui.h (convenient for user to only explicitly include vanilla imgui.h) #ifdef IMGUI_INCLUDE_IMGUI_USER_H #include "imgui_user.h" #endif
NVIDIA-Omniverse/PhysX/flow/external/imgui/imstb_rectpack.h
// [DEAR IMGUI] // This is a slightly modified version of stb_rect_pack.h 0.99. // Those changes would need to be pushed into nothings/stb: // - Added STBRP__CDECL // Grep for [DEAR IMGUI] to find the changes. // stb_rect_pack.h - v0.99 - public domain - rectangle packing // Sean Barrett 2014 // // Useful for e.g. packing rectangular textures into an atlas. // Does not do rotation. // // Not necessarily the awesomest packing method, but better than // the totally naive one in stb_truetype (which is primarily what // this is meant to replace). // // Has only had a few tests run, may have issues. // // More docs to come. // // No memory allocations; uses qsort() and assert() from stdlib. // Can override those by defining STBRP_SORT and STBRP_ASSERT. // // This library currently uses the Skyline Bottom-Left algorithm. // // Please note: better rectangle packers are welcome! Please // implement them to the same API, but with a different init // function. // // Credits // // Library // Sean Barrett // Minor features // Martins Mozeiko // github:IntellectualKitty // // Bugfixes / warning fixes // Jeremy Jaussaud // // Version history: // // 0.99 (2019-02-07) warning fixes // 0.11 (2017-03-03) return packing success/fail result // 0.10 (2016-10-25) remove cast-away-const to avoid warnings // 0.09 (2016-08-27) fix compiler warnings // 0.08 (2015-09-13) really fix bug with empty rects (w=0 or h=0) // 0.07 (2015-09-13) fix bug with empty rects (w=0 or h=0) // 0.06 (2015-04-15) added STBRP_SORT to allow replacing qsort // 0.05: added STBRP_ASSERT to allow replacing assert // 0.04: fixed minor bug in STBRP_LARGE_RECTS support // 0.01: initial release // // LICENSE // // See end of file for license information. ////////////////////////////////////////////////////////////////////////////// // // INCLUDE SECTION // #ifndef STB_INCLUDE_STB_RECT_PACK_H #define STB_INCLUDE_STB_RECT_PACK_H #define STB_RECT_PACK_VERSION 1 #ifdef STBRP_STATIC #define STBRP_DEF static #else #define STBRP_DEF extern #endif #ifdef __cplusplus extern "C" { #endif typedef struct stbrp_context stbrp_context; typedef struct stbrp_node stbrp_node; typedef struct stbrp_rect stbrp_rect; #ifdef STBRP_LARGE_RECTS typedef int stbrp_coord; #else typedef unsigned short stbrp_coord; #endif STBRP_DEF int stbrp_pack_rects (stbrp_context *context, stbrp_rect *rects, int num_rects); // Assign packed locations to rectangles. The rectangles are of type // 'stbrp_rect' defined below, stored in the array 'rects', and there // are 'num_rects' many of them. // // Rectangles which are successfully packed have the 'was_packed' flag // set to a non-zero value and 'x' and 'y' store the minimum location // on each axis (i.e. bottom-left in cartesian coordinates, top-left // if you imagine y increasing downwards). Rectangles which do not fit // have the 'was_packed' flag set to 0. // // You should not try to access the 'rects' array from another thread // while this function is running, as the function temporarily reorders // the array while it executes. // // To pack into another rectangle, you need to call stbrp_init_target // again. To continue packing into the same rectangle, you can call // this function again. Calling this multiple times with multiple rect // arrays will probably produce worse packing results than calling it // a single time with the full rectangle array, but the option is // available. // // The function returns 1 if all of the rectangles were successfully // packed and 0 otherwise. struct stbrp_rect { // reserved for your use: int id; // input: stbrp_coord w, h; // output: stbrp_coord x, y; int was_packed; // non-zero if valid packing }; // 16 bytes, nominally STBRP_DEF void stbrp_init_target (stbrp_context *context, int width, int height, stbrp_node *nodes, int num_nodes); // Initialize a rectangle packer to: // pack a rectangle that is 'width' by 'height' in dimensions // using temporary storage provided by the array 'nodes', which is 'num_nodes' long // // You must call this function every time you start packing into a new target. // // There is no "shutdown" function. The 'nodes' memory must stay valid for // the following stbrp_pack_rects() call (or calls), but can be freed after // the call (or calls) finish. // // Note: to guarantee best results, either: // 1. make sure 'num_nodes' >= 'width' // or 2. call stbrp_allow_out_of_mem() defined below with 'allow_out_of_mem = 1' // // If you don't do either of the above things, widths will be quantized to multiples // of small integers to guarantee the algorithm doesn't run out of temporary storage. // // If you do #2, then the non-quantized algorithm will be used, but the algorithm // may run out of temporary storage and be unable to pack some rectangles. STBRP_DEF void stbrp_setup_allow_out_of_mem (stbrp_context *context, int allow_out_of_mem); // Optionally call this function after init but before doing any packing to // change the handling of the out-of-temp-memory scenario, described above. // If you call init again, this will be reset to the default (false). STBRP_DEF void stbrp_setup_heuristic (stbrp_context *context, int heuristic); // Optionally select which packing heuristic the library should use. Different // heuristics will produce better/worse results for different data sets. // If you call init again, this will be reset to the default. enum { STBRP_HEURISTIC_Skyline_default=0, STBRP_HEURISTIC_Skyline_BL_sortHeight = STBRP_HEURISTIC_Skyline_default, STBRP_HEURISTIC_Skyline_BF_sortHeight }; ////////////////////////////////////////////////////////////////////////////// // // the details of the following structures don't matter to you, but they must // be visible so you can handle the memory allocations for them struct stbrp_node { stbrp_coord x,y; stbrp_node *next; }; struct stbrp_context { int width; int height; int align; int init_mode; int heuristic; int num_nodes; stbrp_node *active_head; stbrp_node *free_head; stbrp_node extra[2]; // we allocate two extra nodes so optimal user-node-count is 'width' not 'width+2' }; #ifdef __cplusplus } #endif #endif ////////////////////////////////////////////////////////////////////////////// // // IMPLEMENTATION SECTION // #ifdef STB_RECT_PACK_IMPLEMENTATION #ifndef STBRP_SORT #include <stdlib.h> #define STBRP_SORT qsort #endif #ifndef STBRP_ASSERT #include <assert.h> #define STBRP_ASSERT assert #endif // [DEAR IMGUI] Added STBRP__CDECL #ifdef _MSC_VER #define STBRP__NOTUSED(v) (void)(v) #define STBRP__CDECL __cdecl #else #define STBRP__NOTUSED(v) (void)sizeof(v) #define STBRP__CDECL #endif enum { STBRP__INIT_skyline = 1 }; STBRP_DEF void stbrp_setup_heuristic(stbrp_context *context, int heuristic) { switch (context->init_mode) { case STBRP__INIT_skyline: STBRP_ASSERT(heuristic == STBRP_HEURISTIC_Skyline_BL_sortHeight || heuristic == STBRP_HEURISTIC_Skyline_BF_sortHeight); context->heuristic = heuristic; break; default: STBRP_ASSERT(0); } } STBRP_DEF void stbrp_setup_allow_out_of_mem(stbrp_context *context, int allow_out_of_mem) { if (allow_out_of_mem) // if it's ok to run out of memory, then don't bother aligning them; // this gives better packing, but may fail due to OOM (even though // the rectangles easily fit). @TODO a smarter approach would be to only // quantize once we've hit OOM, then we could get rid of this parameter. context->align = 1; else { // if it's not ok to run out of memory, then quantize the widths // so that num_nodes is always enough nodes. // // I.e. num_nodes * align >= width // align >= width / num_nodes // align = ceil(width/num_nodes) context->align = (context->width + context->num_nodes-1) / context->num_nodes; } } STBRP_DEF void stbrp_init_target(stbrp_context *context, int width, int height, stbrp_node *nodes, int num_nodes) { int i; #ifndef STBRP_LARGE_RECTS STBRP_ASSERT(width <= 0xffff && height <= 0xffff); #endif for (i=0; i < num_nodes-1; ++i) nodes[i].next = &nodes[i+1]; nodes[i].next = NULL; context->init_mode = STBRP__INIT_skyline; context->heuristic = STBRP_HEURISTIC_Skyline_default; context->free_head = &nodes[0]; context->active_head = &context->extra[0]; context->width = width; context->height = height; context->num_nodes = num_nodes; stbrp_setup_allow_out_of_mem(context, 0); // node 0 is the full width, node 1 is the sentinel (lets us not store width explicitly) context->extra[0].x = 0; context->extra[0].y = 0; context->extra[0].next = &context->extra[1]; context->extra[1].x = (stbrp_coord) width; #ifdef STBRP_LARGE_RECTS context->extra[1].y = (1<<30); #else context->extra[1].y = 65535; #endif context->extra[1].next = NULL; } // find minimum y position if it starts at x1 static int stbrp__skyline_find_min_y(stbrp_context *c, stbrp_node *first, int x0, int width, int *pwaste) { stbrp_node *node = first; int x1 = x0 + width; int min_y, visited_width, waste_area; STBRP__NOTUSED(c); STBRP_ASSERT(first->x <= x0); #if 0 // skip in case we're past the node while (node->next->x <= x0) ++node; #else STBRP_ASSERT(node->next->x > x0); // we ended up handling this in the caller for efficiency #endif STBRP_ASSERT(node->x <= x0); min_y = 0; waste_area = 0; visited_width = 0; while (node->x < x1) { if (node->y > min_y) { // raise min_y higher. // we've accounted for all waste up to min_y, // but we'll now add more waste for everything we've visted waste_area += visited_width * (node->y - min_y); min_y = node->y; // the first time through, visited_width might be reduced if (node->x < x0) visited_width += node->next->x - x0; else visited_width += node->next->x - node->x; } else { // add waste area int under_width = node->next->x - node->x; if (under_width + visited_width > width) under_width = width - visited_width; waste_area += under_width * (min_y - node->y); visited_width += under_width; } node = node->next; } *pwaste = waste_area; return min_y; } typedef struct { int x,y; stbrp_node **prev_link; } stbrp__findresult; static stbrp__findresult stbrp__skyline_find_best_pos(stbrp_context *c, int width, int height) { int best_waste = (1<<30), best_x, best_y = (1 << 30); stbrp__findresult fr; stbrp_node **prev, *node, *tail, **best = NULL; // align to multiple of c->align width = (width + c->align - 1); width -= width % c->align; STBRP_ASSERT(width % c->align == 0); node = c->active_head; prev = &c->active_head; while (node->x + width <= c->width) { int y,waste; y = stbrp__skyline_find_min_y(c, node, node->x, width, &waste); if (c->heuristic == STBRP_HEURISTIC_Skyline_BL_sortHeight) { // actually just want to test BL // bottom left if (y < best_y) { best_y = y; best = prev; } } else { // best-fit if (y + height <= c->height) { // can only use it if it first vertically if (y < best_y || (y == best_y && waste < best_waste)) { best_y = y; best_waste = waste; best = prev; } } } prev = &node->next; node = node->next; } best_x = (best == NULL) ? 0 : (*best)->x; // if doing best-fit (BF), we also have to try aligning right edge to each node position // // e.g, if fitting // // ____________________ // |____________________| // // into // // | | // | ____________| // |____________| // // then right-aligned reduces waste, but bottom-left BL is always chooses left-aligned // // This makes BF take about 2x the time if (c->heuristic == STBRP_HEURISTIC_Skyline_BF_sortHeight) { tail = c->active_head; node = c->active_head; prev = &c->active_head; // find first node that's admissible while (tail->x < width) tail = tail->next; while (tail) { int xpos = tail->x - width; int y,waste; STBRP_ASSERT(xpos >= 0); // find the left position that matches this while (node->next->x <= xpos) { prev = &node->next; node = node->next; } STBRP_ASSERT(node->next->x > xpos && node->x <= xpos); y = stbrp__skyline_find_min_y(c, node, xpos, width, &waste); if (y + height < c->height) { if (y <= best_y) { if (y < best_y || waste < best_waste || (waste==best_waste && xpos < best_x)) { best_x = xpos; STBRP_ASSERT(y <= best_y); best_y = y; best_waste = waste; best = prev; } } } tail = tail->next; } } fr.prev_link = best; fr.x = best_x; fr.y = best_y; return fr; } static stbrp__findresult stbrp__skyline_pack_rectangle(stbrp_context *context, int width, int height) { // find best position according to heuristic stbrp__findresult res = stbrp__skyline_find_best_pos(context, width, height); stbrp_node *node, *cur; // bail if: // 1. it failed // 2. the best node doesn't fit (we don't always check this) // 3. we're out of memory if (res.prev_link == NULL || res.y + height > context->height || context->free_head == NULL) { res.prev_link = NULL; return res; } // on success, create new node node = context->free_head; node->x = (stbrp_coord) res.x; node->y = (stbrp_coord) (res.y + height); context->free_head = node->next; // insert the new node into the right starting point, and // let 'cur' point to the remaining nodes needing to be // stiched back in cur = *res.prev_link; if (cur->x < res.x) { // preserve the existing one, so start testing with the next one stbrp_node *next = cur->next; cur->next = node; cur = next; } else { *res.prev_link = node; } // from here, traverse cur and free the nodes, until we get to one // that shouldn't be freed while (cur->next && cur->next->x <= res.x + width) { stbrp_node *next = cur->next; // move the current node to the free list cur->next = context->free_head; context->free_head = cur; cur = next; } // stitch the list back in node->next = cur; if (cur->x < res.x + width) cur->x = (stbrp_coord) (res.x + width); #ifdef _DEBUG cur = context->active_head; while (cur->x < context->width) { STBRP_ASSERT(cur->x < cur->next->x); cur = cur->next; } STBRP_ASSERT(cur->next == NULL); { int count=0; cur = context->active_head; while (cur) { cur = cur->next; ++count; } cur = context->free_head; while (cur) { cur = cur->next; ++count; } STBRP_ASSERT(count == context->num_nodes+2); } #endif return res; } // [DEAR IMGUI] Added STBRP__CDECL static int STBRP__CDECL rect_height_compare(const void *a, const void *b) { const stbrp_rect *p = (const stbrp_rect *) a; const stbrp_rect *q = (const stbrp_rect *) b; if (p->h > q->h) return -1; if (p->h < q->h) return 1; return (p->w > q->w) ? -1 : (p->w < q->w); } // [DEAR IMGUI] Added STBRP__CDECL static int STBRP__CDECL rect_original_order(const void *a, const void *b) { const stbrp_rect *p = (const stbrp_rect *) a; const stbrp_rect *q = (const stbrp_rect *) b; return (p->was_packed < q->was_packed) ? -1 : (p->was_packed > q->was_packed); } #ifdef STBRP_LARGE_RECTS #define STBRP__MAXVAL 0xffffffff #else #define STBRP__MAXVAL 0xffff #endif STBRP_DEF int stbrp_pack_rects(stbrp_context *context, stbrp_rect *rects, int num_rects) { int i, all_rects_packed = 1; // we use the 'was_packed' field internally to allow sorting/unsorting for (i=0; i < num_rects; ++i) { rects[i].was_packed = i; } // sort according to heuristic STBRP_SORT(rects, num_rects, sizeof(rects[0]), rect_height_compare); for (i=0; i < num_rects; ++i) { if (rects[i].w == 0 || rects[i].h == 0) { rects[i].x = rects[i].y = 0; // empty rect needs no space } else { stbrp__findresult fr = stbrp__skyline_pack_rectangle(context, rects[i].w, rects[i].h); if (fr.prev_link) { rects[i].x = (stbrp_coord) fr.x; rects[i].y = (stbrp_coord) fr.y; } else { rects[i].x = rects[i].y = STBRP__MAXVAL; } } } // unsort STBRP_SORT(rects, num_rects, sizeof(rects[0]), rect_original_order); // set was_packed flags and all_rects_packed status for (i=0; i < num_rects; ++i) { rects[i].was_packed = !(rects[i].x == STBRP__MAXVAL && rects[i].y == STBRP__MAXVAL); if (!rects[i].was_packed) all_rects_packed = 0; } // return the all_rects_packed status return all_rects_packed; } #endif /* ------------------------------------------------------------------------------ This software is available under 2 licenses -- choose whichever you prefer. ------------------------------------------------------------------------------ ALTERNATIVE A - MIT License Copyright (c) 2017 Sean Barrett Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ------------------------------------------------------------------------------ ALTERNATIVE B - Public Domain (www.unlicense.org) This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ------------------------------------------------------------------------------ */
NVIDIA-Omniverse/PhysX/flow/external/imgui/imgui_demo.cpp
// dear imgui, v1.72b // (demo code) // Message to the person tempted to delete this file when integrating Dear ImGui into their code base: // Do NOT remove this file from your project! Think again! It is the most useful reference code that you and other coders // will want to refer to and call. Have the ImGui::ShowDemoWindow() function wired in an always-available debug menu of // your game/app! Removing this file from your project is hindering access to documentation for everyone in your team, // likely leading you to poorer usage of the library. // Everything in this file will be stripped out by the linker if you don't call ImGui::ShowDemoWindow(). // If you want to link core Dear ImGui in your shipped builds but want an easy guarantee that the demo will not be linked, // you can setup your imconfig.h with #define IMGUI_DISABLE_DEMO_WINDOWS and those functions will be empty. // In other situation, whenever you have Dear ImGui available you probably want this to be available for reference. // Thank you, // -Your beloved friend, imgui_demo.cpp (that you won't delete) // Message to beginner C/C++ programmers about the meaning of the 'static' keyword: // In this demo code, we frequently we use 'static' variables inside functions. A static variable persist across calls, so it is // essentially like a global variable but declared inside the scope of the function. We do this as a way to gather code and data // in the same place, to make the demo source code faster to read, faster to write, and smaller in size. // It also happens to be a convenient way of storing simple UI related information as long as your function doesn't need to be // reentrant or used in multiple threads. This might be a pattern you will want to use in your code, but most of the real data // you would be editing is likely going to be stored outside your functions. // The Demo code is this file is designed to be easy to copy-and-paste in into your application! // Because of this: // - We never omit the ImGui:: namespace when calling functions, even though most of our code is already in the same namespace. // - We try to declare static variables in the local scope, as close as possible to the code using them. // - We never use any of the helpers/facilities used internally by dear imgui, unless it has been exposed in the public API (imgui.h). // - We never use maths operators on ImVec2/ImVec4. For other imgui sources files, they are provided by imgui_internal.h w/ IMGUI_DEFINE_MATH_OPERATORS, // for your own sources file they are optional and require you either enable those, either provide your own via IM_VEC2_CLASS_EXTRA in imconfig.h. // Because we don't want to assume anything about your support of maths operators, we don't use them in imgui_demo.cpp. /* Index of this file: // [SECTION] Forward Declarations, Helpers // [SECTION] Demo Window / ShowDemoWindow() // [SECTION] About Window / ShowAboutWindow() // [SECTION] Style Editor / ShowStyleEditor() // [SECTION] Example App: Main Menu Bar / ShowExampleAppMainMenuBar() // [SECTION] Example App: Debug Console / ShowExampleAppConsole() // [SECTION] Example App: Debug Log / ShowExampleAppLog() // [SECTION] Example App: Simple Layout / ShowExampleAppLayout() // [SECTION] Example App: Property Editor / ShowExampleAppPropertyEditor() // [SECTION] Example App: Long Text / ShowExampleAppLongText() // [SECTION] Example App: Auto Resize / ShowExampleAppAutoResize() // [SECTION] Example App: Constrained Resize / ShowExampleAppConstrainedResize() // [SECTION] Example App: Simple Overlay / ShowExampleAppSimpleOverlay() // [SECTION] Example App: Manipulating Window Titles / ShowExampleAppWindowTitles() // [SECTION] Example App: Custom Rendering using ImDrawList API / ShowExampleAppCustomRendering() // [SECTION] Example App: Documents Handling / ShowExampleAppDocuments() */ #if defined(_MSC_VER) && !defined(_CRT_SECURE_NO_WARNINGS) #define _CRT_SECURE_NO_WARNINGS #endif #include "imgui.h" #include <ctype.h> // toupper #include <limits.h> // INT_MIN, INT_MAX #include <math.h> // sqrtf, powf, cosf, sinf, floorf, ceilf #include <stdio.h> // vsnprintf, sscanf, printf #include <stdlib.h> // NULL, malloc, free, atoi #if defined(_MSC_VER) && _MSC_VER <= 1500 // MSVC 2008 or earlier #include <stddef.h> // intptr_t #else #include <stdint.h> // intptr_t #endif #ifdef _MSC_VER #pragma warning (disable: 4996) // 'This function or variable may be unsafe': strcpy, strdup, sprintf, vsnprintf, sscanf, fopen #endif #if defined(__clang__) #pragma clang diagnostic ignored "-Wold-style-cast" // warning : use of old-style cast // yes, they are more terse. #pragma clang diagnostic ignored "-Wdeprecated-declarations" // warning : 'xx' is deprecated: The POSIX name for this item.. // for strdup used in demo code (so user can copy & paste the code) #pragma clang diagnostic ignored "-Wint-to-void-pointer-cast" // warning : cast to 'void *' from smaller integer type 'int' #pragma clang diagnostic ignored "-Wformat-security" // warning : warning: format string is not a string literal #pragma clang diagnostic ignored "-Wexit-time-destructors" // warning : declaration requires an exit-time destructor // exit-time destruction order is undefined. if MemFree() leads to users code that has been disabled before exit it might cause problems. ImGui coding style welcomes static/globals. #pragma clang diagnostic ignored "-Wunused-macros" // warning : warning: macro is not used // we define snprintf/vsnprintf on Windows so they are available, but not always used. #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" // warning : zero as null pointer constant // some standard header variations use #define NULL 0 #endif #if __has_warning("-Wdouble-promotion") #pragma clang diagnostic ignored "-Wdouble-promotion" // warning: implicit conversion from 'float' to 'double' when passing argument to function // using printf() is a misery with this as C++ va_arg ellipsis changes float to double. #endif #if __has_warning("-Wreserved-id-macro") #pragma clang diagnostic ignored "-Wreserved-id-macro" // warning : macro name is a reserved identifier // #endif #elif defined(__GNUC__) #pragma GCC diagnostic ignored "-Wpragmas" // warning: unknown option after '#pragma GCC diagnostic' kind #pragma GCC diagnostic ignored "-Wint-to-pointer-cast" // warning: cast to pointer from integer of different size #pragma GCC diagnostic ignored "-Wformat-security" // warning : format string is not a string literal (potentially insecure) #pragma GCC diagnostic ignored "-Wdouble-promotion" // warning: implicit conversion from 'float' to 'double' when passing argument to function #pragma GCC diagnostic ignored "-Wconversion" // warning: conversion to 'xxxx' from 'xxxx' may alter its value #pragma GCC diagnostic ignored "-Wmisleading-indentation" // [__GNUC__ >= 6] warning: this 'if' clause does not guard this statement // GCC 6.0+ only. See #883 on GitHub. #endif // Play it nice with Windows users. Notepad in 2017 still doesn't display text data with Unix-style \n. #ifdef _WIN32 #define IM_NEWLINE "\r\n" #define snprintf _snprintf #define vsnprintf _vsnprintf #else #define IM_NEWLINE "\n" #endif #define IM_MAX(_A,_B) (((_A) >= (_B)) ? (_A) : (_B)) //----------------------------------------------------------------------------- // [SECTION] Forward Declarations, Helpers //----------------------------------------------------------------------------- #if !defined(IMGUI_DISABLE_OBSOLETE_FUNCTIONS) && defined(IMGUI_DISABLE_TEST_WINDOWS) && !defined(IMGUI_DISABLE_DEMO_WINDOWS) // Obsolete name since 1.53, TEST->DEMO #define IMGUI_DISABLE_DEMO_WINDOWS #endif #if !defined(IMGUI_DISABLE_DEMO_WINDOWS) // Forward Declarations static void ShowExampleAppDocuments(bool* p_open); static void ShowExampleAppMainMenuBar(); static void ShowExampleAppConsole(bool* p_open); static void ShowExampleAppLog(bool* p_open); static void ShowExampleAppLayout(bool* p_open); static void ShowExampleAppPropertyEditor(bool* p_open); static void ShowExampleAppLongText(bool* p_open); static void ShowExampleAppAutoResize(bool* p_open); static void ShowExampleAppConstrainedResize(bool* p_open); static void ShowExampleAppSimpleOverlay(bool* p_open); static void ShowExampleAppWindowTitles(bool* p_open); static void ShowExampleAppCustomRendering(bool* p_open); static void ShowExampleMenuFile(); // Helper to display a little (?) mark which shows a tooltip when hovered. // In your own code you may want to display an actual icon if you are using a merged icon fonts (see misc/fonts/README.txt) static void HelpMarker(const char* desc) { ImGui::TextDisabled("(?)"); if (ImGui::IsItemHovered()) { ImGui::BeginTooltip(); ImGui::PushTextWrapPos(ImGui::GetFontSize() * 35.0f); ImGui::TextUnformatted(desc); ImGui::PopTextWrapPos(); ImGui::EndTooltip(); } } // Helper to display basic user controls. void ImGui::ShowUserGuide() { ImGuiIO& io = ImGui::GetIO(); ImGui::BulletText("Double-click on title bar to collapse window."); ImGui::BulletText("Click and drag on lower right corner to resize window\n(double-click to auto fit window to its contents)."); ImGui::BulletText("Click and drag on any empty space to move window."); ImGui::BulletText("TAB/SHIFT+TAB to cycle through keyboard editable fields."); ImGui::BulletText("CTRL+Click on a slider or drag box to input value as text."); if (io.FontAllowUserScaling) ImGui::BulletText("CTRL+Mouse Wheel to zoom window contents."); ImGui::BulletText("Mouse Wheel to scroll."); ImGui::BulletText("While editing text:\n"); ImGui::Indent(); ImGui::BulletText("Hold SHIFT or use mouse to select text."); ImGui::BulletText("CTRL+Left/Right to word jump."); ImGui::BulletText("CTRL+A or double-click to select all."); ImGui::BulletText("CTRL+X,CTRL+C,CTRL+V to use clipboard."); ImGui::BulletText("CTRL+Z,CTRL+Y to undo/redo."); ImGui::BulletText("ESCAPE to revert."); ImGui::BulletText("You can apply arithmetic operators +,*,/ on numerical values.\nUse +- to subtract."); ImGui::Unindent(); } //----------------------------------------------------------------------------- // [SECTION] Demo Window / ShowDemoWindow() //----------------------------------------------------------------------------- // We split the contents of the big ShowDemoWindow() function into smaller functions (because the link time of very large functions grow non-linearly) static void ShowDemoWindowWidgets(); static void ShowDemoWindowLayout(); static void ShowDemoWindowPopups(); static void ShowDemoWindowColumns(); static void ShowDemoWindowMisc(); // Demonstrate most Dear ImGui features (this is big function!) // You may execute this function to experiment with the UI and understand what it does. You may then search for keywords in the code when you are interested by a specific feature. void ImGui::ShowDemoWindow(bool* p_open) { IM_ASSERT(ImGui::GetCurrentContext() != NULL && "Missing dear imgui context. Refer to examples app!"); // Exceptionally add an extra assert here for people confused with initial dear imgui setup // Examples Apps (accessible from the "Examples" menu) static bool show_app_documents = false; static bool show_app_main_menu_bar = false; static bool show_app_console = false; static bool show_app_log = false; static bool show_app_layout = false; static bool show_app_property_editor = false; static bool show_app_long_text = false; static bool show_app_auto_resize = false; static bool show_app_constrained_resize = false; static bool show_app_simple_overlay = false; static bool show_app_window_titles = false; static bool show_app_custom_rendering = false; if (show_app_documents) ShowExampleAppDocuments(&show_app_documents); if (show_app_main_menu_bar) ShowExampleAppMainMenuBar(); if (show_app_console) ShowExampleAppConsole(&show_app_console); if (show_app_log) ShowExampleAppLog(&show_app_log); if (show_app_layout) ShowExampleAppLayout(&show_app_layout); if (show_app_property_editor) ShowExampleAppPropertyEditor(&show_app_property_editor); if (show_app_long_text) ShowExampleAppLongText(&show_app_long_text); if (show_app_auto_resize) ShowExampleAppAutoResize(&show_app_auto_resize); if (show_app_constrained_resize) ShowExampleAppConstrainedResize(&show_app_constrained_resize); if (show_app_simple_overlay) ShowExampleAppSimpleOverlay(&show_app_simple_overlay); if (show_app_window_titles) ShowExampleAppWindowTitles(&show_app_window_titles); if (show_app_custom_rendering) ShowExampleAppCustomRendering(&show_app_custom_rendering); // Dear ImGui Apps (accessible from the "Help" menu) static bool show_app_metrics = false; static bool show_app_style_editor = false; static bool show_app_about = false; if (show_app_metrics) { ImGui::ShowMetricsWindow(&show_app_metrics); } if (show_app_style_editor) { ImGui::Begin("Style Editor", &show_app_style_editor); ImGui::ShowStyleEditor(); ImGui::End(); } if (show_app_about) { ImGui::ShowAboutWindow(&show_app_about); } // Demonstrate the various window flags. Typically you would just use the default! static bool no_titlebar = false; static bool no_scrollbar = false; static bool no_menu = false; static bool no_move = false; static bool no_resize = false; static bool no_collapse = false; static bool no_close = false; static bool no_nav = false; static bool no_background = false; static bool no_bring_to_front = false; ImGuiWindowFlags window_flags = 0; if (no_titlebar) window_flags |= ImGuiWindowFlags_NoTitleBar; if (no_scrollbar) window_flags |= ImGuiWindowFlags_NoScrollbar; if (!no_menu) window_flags |= ImGuiWindowFlags_MenuBar; if (no_move) window_flags |= ImGuiWindowFlags_NoMove; if (no_resize) window_flags |= ImGuiWindowFlags_NoResize; if (no_collapse) window_flags |= ImGuiWindowFlags_NoCollapse; if (no_nav) window_flags |= ImGuiWindowFlags_NoNav; if (no_background) window_flags |= ImGuiWindowFlags_NoBackground; if (no_bring_to_front) window_flags |= ImGuiWindowFlags_NoBringToFrontOnFocus; if (no_close) p_open = NULL; // Don't pass our bool* to Begin // We specify a default position/size in case there's no data in the .ini file. Typically this isn't required! We only do it to make the Demo applications a little more welcoming. ImGui::SetNextWindowPos(ImVec2(650, 20), ImGuiCond_FirstUseEver); ImGui::SetNextWindowSize(ImVec2(550, 680), ImGuiCond_FirstUseEver); // Main body of the Demo window starts here. if (!ImGui::Begin("Dear ImGui Demo", p_open, window_flags)) { // Early out if the window is collapsed, as an optimization. ImGui::End(); return; } // Most "big" widgets share a common width settings by default. //ImGui::PushItemWidth(ImGui::GetWindowWidth() * 0.65f); // Use 2/3 of the space for widgets and 1/3 for labels (default) ImGui::PushItemWidth(ImGui::GetFontSize() * -12); // Use fixed width for labels (by passing a negative value), the rest goes to widgets. We choose a width proportional to our font size. // Menu Bar if (ImGui::BeginMenuBar()) { if (ImGui::BeginMenu("Menu")) { ShowExampleMenuFile(); ImGui::EndMenu(); } if (ImGui::BeginMenu("Examples")) { ImGui::MenuItem("Main menu bar", NULL, &show_app_main_menu_bar); ImGui::MenuItem("Console", NULL, &show_app_console); ImGui::MenuItem("Log", NULL, &show_app_log); ImGui::MenuItem("Simple layout", NULL, &show_app_layout); ImGui::MenuItem("Property editor", NULL, &show_app_property_editor); ImGui::MenuItem("Long text display", NULL, &show_app_long_text); ImGui::MenuItem("Auto-resizing window", NULL, &show_app_auto_resize); ImGui::MenuItem("Constrained-resizing window", NULL, &show_app_constrained_resize); ImGui::MenuItem("Simple overlay", NULL, &show_app_simple_overlay); ImGui::MenuItem("Manipulating window titles", NULL, &show_app_window_titles); ImGui::MenuItem("Custom rendering", NULL, &show_app_custom_rendering); ImGui::MenuItem("Documents", NULL, &show_app_documents); ImGui::EndMenu(); } if (ImGui::BeginMenu("Help")) { ImGui::MenuItem("Metrics", NULL, &show_app_metrics); ImGui::MenuItem("Style Editor", NULL, &show_app_style_editor); ImGui::MenuItem("About Dear ImGui", NULL, &show_app_about); ImGui::EndMenu(); } ImGui::EndMenuBar(); } ImGui::Text("dear imgui says hello. (%s)", IMGUI_VERSION); ImGui::Spacing(); if (ImGui::CollapsingHeader("Help")) { ImGui::Text("PROGRAMMER GUIDE:"); ImGui::BulletText("Please see the ShowDemoWindow() code in imgui_demo.cpp. <- you are here!"); ImGui::BulletText("Please see the comments in imgui.cpp."); ImGui::BulletText("Please see the examples/ in application."); ImGui::BulletText("Enable 'io.ConfigFlags |= NavEnableKeyboard' for keyboard controls."); ImGui::BulletText("Enable 'io.ConfigFlags |= NavEnableGamepad' for gamepad controls."); ImGui::Separator(); ImGui::Text("USER GUIDE:"); ImGui::ShowUserGuide(); } if (ImGui::CollapsingHeader("Configuration")) { ImGuiIO& io = ImGui::GetIO(); if (ImGui::TreeNode("Configuration##2")) { ImGui::CheckboxFlags("io.ConfigFlags: NavEnableKeyboard", (unsigned int *)&io.ConfigFlags, ImGuiConfigFlags_NavEnableKeyboard); ImGui::CheckboxFlags("io.ConfigFlags: NavEnableGamepad", (unsigned int *)&io.ConfigFlags, ImGuiConfigFlags_NavEnableGamepad); ImGui::SameLine(); HelpMarker("Required back-end to feed in gamepad inputs in io.NavInputs[] and set io.BackendFlags |= ImGuiBackendFlags_HasGamepad.\n\nRead instructions in imgui.cpp for details."); ImGui::CheckboxFlags("io.ConfigFlags: NavEnableSetMousePos", (unsigned int *)&io.ConfigFlags, ImGuiConfigFlags_NavEnableSetMousePos); ImGui::SameLine(); HelpMarker("Instruct navigation to move the mouse cursor. See comment for ImGuiConfigFlags_NavEnableSetMousePos."); ImGui::CheckboxFlags("io.ConfigFlags: NoMouse", (unsigned int *)&io.ConfigFlags, ImGuiConfigFlags_NoMouse); if (io.ConfigFlags & ImGuiConfigFlags_NoMouse) // Create a way to restore this flag otherwise we could be stuck completely! { if (fmodf((float)ImGui::GetTime(), 0.40f) < 0.20f) { ImGui::SameLine(); ImGui::Text("<<PRESS SPACE TO DISABLE>>"); } if (ImGui::IsKeyPressed(ImGui::GetKeyIndex(ImGuiKey_Space))) io.ConfigFlags &= ~ImGuiConfigFlags_NoMouse; } ImGui::CheckboxFlags("io.ConfigFlags: NoMouseCursorChange", (unsigned int *)&io.ConfigFlags, ImGuiConfigFlags_NoMouseCursorChange); ImGui::SameLine(); HelpMarker("Instruct back-end to not alter mouse cursor shape and visibility."); ImGui::Checkbox("io.ConfigInputTextCursorBlink", &io.ConfigInputTextCursorBlink); ImGui::SameLine(); HelpMarker("Set to false to disable blinking cursor, for users who consider it distracting"); ImGui::Checkbox("io.ConfigWindowsResizeFromEdges", &io.ConfigWindowsResizeFromEdges); ImGui::SameLine(); HelpMarker("Enable resizing of windows from their edges and from the lower-left corner.\nThis requires (io.BackendFlags & ImGuiBackendFlags_HasMouseCursors) because it needs mouse cursor feedback."); ImGui::Checkbox("io.ConfigWindowsMoveFromTitleBarOnly", &io.ConfigWindowsMoveFromTitleBarOnly); ImGui::Checkbox("io.MouseDrawCursor", &io.MouseDrawCursor); ImGui::SameLine(); HelpMarker("Instruct Dear ImGui to render a mouse cursor for you. Note that a mouse cursor rendered via your application GPU rendering path will feel more laggy than hardware cursor, but will be more in sync with your other visuals.\n\nSome desktop applications may use both kinds of cursors (e.g. enable software cursor only when resizing/dragging something)."); ImGui::TreePop(); ImGui::Separator(); } if (ImGui::TreeNode("Backend Flags")) { HelpMarker("Those flags are set by the back-ends (imgui_impl_xxx files) to specify their capabilities."); ImGuiBackendFlags backend_flags = io.BackendFlags; // Make a local copy to avoid modifying actual back-end flags. ImGui::CheckboxFlags("io.BackendFlags: HasGamepad", (unsigned int *)&backend_flags, ImGuiBackendFlags_HasGamepad); ImGui::CheckboxFlags("io.BackendFlags: HasMouseCursors", (unsigned int *)&backend_flags, ImGuiBackendFlags_HasMouseCursors); ImGui::CheckboxFlags("io.BackendFlags: HasSetMousePos", (unsigned int *)&backend_flags, ImGuiBackendFlags_HasSetMousePos); ImGui::CheckboxFlags("io.BackendFlags: RendererHasVtxOffset", (unsigned int *)&backend_flags, ImGuiBackendFlags_RendererHasVtxOffset); ImGui::TreePop(); ImGui::Separator(); } if (ImGui::TreeNode("Style")) { ImGui::ShowStyleEditor(); ImGui::TreePop(); ImGui::Separator(); } if (ImGui::TreeNode("Capture/Logging")) { ImGui::TextWrapped("The logging API redirects all text output so you can easily capture the content of a window or a block. Tree nodes can be automatically expanded."); HelpMarker("Try opening any of the contents below in this window and then click one of the \"Log To\" button."); ImGui::LogButtons(); ImGui::TextWrapped("You can also call ImGui::LogText() to output directly to the log without a visual output."); if (ImGui::Button("Copy \"Hello, world!\" to clipboard")) { ImGui::LogToClipboard(); ImGui::LogText("Hello, world!"); ImGui::LogFinish(); } ImGui::TreePop(); } } if (ImGui::CollapsingHeader("Window options")) { ImGui::Checkbox("No titlebar", &no_titlebar); ImGui::SameLine(150); ImGui::Checkbox("No scrollbar", &no_scrollbar); ImGui::SameLine(300); ImGui::Checkbox("No menu", &no_menu); ImGui::Checkbox("No move", &no_move); ImGui::SameLine(150); ImGui::Checkbox("No resize", &no_resize); ImGui::SameLine(300); ImGui::Checkbox("No collapse", &no_collapse); ImGui::Checkbox("No close", &no_close); ImGui::SameLine(150); ImGui::Checkbox("No nav", &no_nav); ImGui::SameLine(300); ImGui::Checkbox("No background", &no_background); ImGui::Checkbox("No bring to front", &no_bring_to_front); } // All demo contents ShowDemoWindowWidgets(); ShowDemoWindowLayout(); ShowDemoWindowPopups(); ShowDemoWindowColumns(); ShowDemoWindowMisc(); // End of ShowDemoWindow() ImGui::End(); } static void ShowDemoWindowWidgets() { if (!ImGui::CollapsingHeader("Widgets")) return; if (ImGui::TreeNode("Basic")) { static int clicked = 0; if (ImGui::Button("Button")) clicked++; if (clicked & 1) { ImGui::SameLine(); ImGui::Text("Thanks for clicking me!"); } static bool check = true; ImGui::Checkbox("checkbox", &check); static int e = 0; ImGui::RadioButton("radio a", &e, 0); ImGui::SameLine(); ImGui::RadioButton("radio b", &e, 1); ImGui::SameLine(); ImGui::RadioButton("radio c", &e, 2); // Color buttons, demonstrate using PushID() to add unique identifier in the ID stack, and changing style. for (int i = 0; i < 7; i++) { if (i > 0) ImGui::SameLine(); ImGui::PushID(i); ImGui::PushStyleColor(ImGuiCol_Button, (ImVec4)ImColor::HSV(i/7.0f, 0.6f, 0.6f)); ImGui::PushStyleColor(ImGuiCol_ButtonHovered, (ImVec4)ImColor::HSV(i/7.0f, 0.7f, 0.7f)); ImGui::PushStyleColor(ImGuiCol_ButtonActive, (ImVec4)ImColor::HSV(i/7.0f, 0.8f, 0.8f)); ImGui::Button("Click"); ImGui::PopStyleColor(3); ImGui::PopID(); } // Use AlignTextToFramePadding() to align text baseline to the baseline of framed elements (otherwise a Text+SameLine+Button sequence will have the text a little too high by default) ImGui::AlignTextToFramePadding(); ImGui::Text("Hold to repeat:"); ImGui::SameLine(); // Arrow buttons with Repeater static int counter = 0; float spacing = ImGui::GetStyle().ItemInnerSpacing.x; ImGui::PushButtonRepeat(true); if (ImGui::ArrowButton("##left", ImGuiDir_Left)) { counter--; } ImGui::SameLine(0.0f, spacing); if (ImGui::ArrowButton("##right", ImGuiDir_Right)) { counter++; } ImGui::PopButtonRepeat(); ImGui::SameLine(); ImGui::Text("%d", counter); ImGui::Text("Hover over me"); if (ImGui::IsItemHovered()) ImGui::SetTooltip("I am a tooltip"); ImGui::SameLine(); ImGui::Text("- or me"); if (ImGui::IsItemHovered()) { ImGui::BeginTooltip(); ImGui::Text("I am a fancy tooltip"); static float arr[] = { 0.6f, 0.1f, 1.0f, 0.5f, 0.92f, 0.1f, 0.2f }; ImGui::PlotLines("Curve", arr, IM_ARRAYSIZE(arr)); ImGui::EndTooltip(); } ImGui::Separator(); ImGui::LabelText("label", "Value"); { // Using the _simplified_ one-liner Combo() api here // See "Combo" section for examples of how to use the more complete BeginCombo()/EndCombo() api. const char* items[] = { "AAAA", "BBBB", "CCCC", "DDDD", "EEEE", "FFFF", "GGGG", "HHHH", "IIII", "JJJJ", "KKKK", "LLLLLLL", "MMMM", "OOOOOOO" }; static int item_current = 0; ImGui::Combo("combo", &item_current, items, IM_ARRAYSIZE(items)); ImGui::SameLine(); HelpMarker("Refer to the \"Combo\" section below for an explanation of the full BeginCombo/EndCombo API, and demonstration of various flags.\n"); } { static char str0[128] = "Hello, world!"; ImGui::InputText("input text", str0, IM_ARRAYSIZE(str0)); ImGui::SameLine(); HelpMarker("USER:\nHold SHIFT or use mouse to select text.\n" "CTRL+Left/Right to word jump.\n" "CTRL+A or double-click to select all.\n" "CTRL+X,CTRL+C,CTRL+V clipboard.\n" "CTRL+Z,CTRL+Y undo/redo.\n" "ESCAPE to revert.\n\nPROGRAMMER:\nYou can use the ImGuiInputTextFlags_CallbackResize facility if you need to wire InputText() to a dynamic string type. See misc/cpp/imgui_stdlib.h for an example (this is not demonstrated in imgui_demo.cpp)."); static char str1[128] = ""; ImGui::InputTextWithHint("input text (w/ hint)", "enter text here", str1, IM_ARRAYSIZE(str1)); static int i0 = 123; ImGui::InputInt("input int", &i0); ImGui::SameLine(); HelpMarker("You can apply arithmetic operators +,*,/ on numerical values.\n e.g. [ 100 ], input \'*2\', result becomes [ 200 ]\nUse +- to subtract.\n"); static float f0 = 0.001f; ImGui::InputFloat("input float", &f0, 0.01f, 1.0f, "%.3f"); static double d0 = 999999.00000001; ImGui::InputDouble("input double", &d0, 0.01f, 1.0f, "%.8f"); static float f1 = 1.e10f; ImGui::InputFloat("input scientific", &f1, 0.0f, 0.0f, "%e"); ImGui::SameLine(); HelpMarker("You can input value using the scientific notation,\n e.g. \"1e+8\" becomes \"100000000\".\n"); static float vec4a[4] = { 0.10f, 0.20f, 0.30f, 0.44f }; ImGui::InputFloat3("input float3", vec4a); } { static int i1 = 50, i2 = 42; ImGui::DragInt("drag int", &i1, 1); ImGui::SameLine(); HelpMarker("Click and drag to edit value.\nHold SHIFT/ALT for faster/slower edit.\nDouble-click or CTRL+click to input value."); ImGui::DragInt("drag int 0..100", &i2, 1, 0, 100, "%d%%"); static float f1=1.00f, f2=0.0067f; ImGui::DragFloat("drag float", &f1, 0.005f); ImGui::DragFloat("drag small float", &f2, 0.0001f, 0.0f, 0.0f, "%.06f ns"); } { static int i1=0; ImGui::SliderInt("slider int", &i1, -1, 3); ImGui::SameLine(); HelpMarker("CTRL+click to input value."); static float f1=0.123f, f2=0.0f; ImGui::SliderFloat("slider float", &f1, 0.0f, 1.0f, "ratio = %.3f"); ImGui::SliderFloat("slider float (curve)", &f2, -10.0f, 10.0f, "%.4f", 2.0f); static float angle = 0.0f; ImGui::SliderAngle("slider angle", &angle); // Using the format string to display a name instead of an integer. // Here we completely omit '%d' from the format string, so it'll only display a name. // This technique can also be used with DragInt(). enum Element { Element_Fire, Element_Earth, Element_Air, Element_Water, Element_COUNT }; const char* element_names[Element_COUNT] = { "Fire", "Earth", "Air", "Water" }; static int current_element = Element_Fire; const char* current_element_name = (current_element >= 0 && current_element < Element_COUNT) ? element_names[current_element] : "Unknown"; ImGui::SliderInt("slider enum", &current_element, 0, Element_COUNT - 1, current_element_name); ImGui::SameLine(); HelpMarker("Using the format string parameter to display a name instead of the underlying integer."); } { static float col1[3] = { 1.0f,0.0f,0.2f }; static float col2[4] = { 0.4f,0.7f,0.0f,0.5f }; ImGui::ColorEdit3("color 1", col1); ImGui::SameLine(); HelpMarker("Click on the colored square to open a color picker.\nClick and hold to use drag and drop.\nRight-click on the colored square to show options.\nCTRL+click on individual component to input value.\n"); ImGui::ColorEdit4("color 2", col2); } { // List box const char* listbox_items[] = { "Apple", "Banana", "Cherry", "Kiwi", "Mango", "Orange", "Pineapple", "Strawberry", "Watermelon" }; static int listbox_item_current = 1; ImGui::ListBox("listbox\n(single select)", &listbox_item_current, listbox_items, IM_ARRAYSIZE(listbox_items), 4); //static int listbox_item_current2 = 2; //ImGui::SetNextItemWidth(-1); //ImGui::ListBox("##listbox2", &listbox_item_current2, listbox_items, IM_ARRAYSIZE(listbox_items), 4); } ImGui::TreePop(); } // Testing ImGuiOnceUponAFrame helper. //static ImGuiOnceUponAFrame once; //for (int i = 0; i < 5; i++) // if (once) // ImGui::Text("This will be displayed only once."); if (ImGui::TreeNode("Trees")) { if (ImGui::TreeNode("Basic trees")) { for (int i = 0; i < 5; i++) { // Use SetNextItemOpen() so set the default state of a node to be open. // We could also use TreeNodeEx() with the ImGuiTreeNodeFlags_DefaultOpen flag to achieve the same thing! if (i == 0) ImGui::SetNextItemOpen(true, ImGuiCond_Once); if (ImGui::TreeNode((void*)(intptr_t)i, "Child %d", i)) { ImGui::Text("blah blah"); ImGui::SameLine(); if (ImGui::SmallButton("button")) {}; ImGui::TreePop(); } } ImGui::TreePop(); } if (ImGui::TreeNode("Advanced, with Selectable nodes")) { HelpMarker("This is a more typical looking tree with selectable nodes.\nClick to select, CTRL+Click to toggle, click on arrows or double-click to open."); static bool align_label_with_current_x_position = false; ImGui::Checkbox("Align label with current X position)", &align_label_with_current_x_position); ImGui::Text("Hello!"); if (align_label_with_current_x_position) ImGui::Unindent(ImGui::GetTreeNodeToLabelSpacing()); static int selection_mask = (1 << 2); // Dumb representation of what may be user-side selection state. You may carry selection state inside or outside your objects in whatever format you see fit. int node_clicked = -1; // Temporary storage of what node we have clicked to process selection at the end of the loop. May be a pointer to your own node type, etc. ImGui::PushStyleVar(ImGuiStyleVar_IndentSpacing, ImGui::GetFontSize()*3); // Increase spacing to differentiate leaves from expanded contents. for (int i = 0; i < 6; i++) { // Disable the default open on single-click behavior and pass in Selected flag according to our selection state. ImGuiTreeNodeFlags node_flags = ImGuiTreeNodeFlags_OpenOnArrow | ImGuiTreeNodeFlags_OpenOnDoubleClick; if (selection_mask & (1 << i)) node_flags |= ImGuiTreeNodeFlags_Selected; if (i < 3) { // Items 0..2 are Tree Node bool node_open = ImGui::TreeNodeEx((void*)(intptr_t)i, node_flags, "Selectable Node %d", i); if (ImGui::IsItemClicked()) node_clicked = i; if (node_open) { ImGui::Text("Blah blah\nBlah Blah"); ImGui::TreePop(); } } else { // Items 3..5 are Tree Leaves // The only reason we use TreeNode at all is to allow selection of the leaf. // Otherwise we can use BulletText() or advance the cursor by GetTreeNodeToLabelSpacing() and call Text(). node_flags |= ImGuiTreeNodeFlags_Leaf | ImGuiTreeNodeFlags_NoTreePushOnOpen; // ImGuiTreeNodeFlags_Bullet ImGui::TreeNodeEx((void*)(intptr_t)i, node_flags, "Selectable Leaf %d", i); if (ImGui::IsItemClicked()) node_clicked = i; } } if (node_clicked != -1) { // Update selection state. Process outside of tree loop to avoid visual inconsistencies during the clicking-frame. if (ImGui::GetIO().KeyCtrl) selection_mask ^= (1 << node_clicked); // CTRL+click to toggle else //if (!(selection_mask & (1 << node_clicked))) // Depending on selection behavior you want, this commented bit preserve selection when clicking on item that is part of the selection selection_mask = (1 << node_clicked); // Click to single-select } ImGui::PopStyleVar(); if (align_label_with_current_x_position) ImGui::Indent(ImGui::GetTreeNodeToLabelSpacing()); ImGui::TreePop(); } ImGui::TreePop(); } if (ImGui::TreeNode("Collapsing Headers")) { static bool closable_group = true; ImGui::Checkbox("Show 2nd header", &closable_group); if (ImGui::CollapsingHeader("Header")) { ImGui::Text("IsItemHovered: %d", ImGui::IsItemHovered()); for (int i = 0; i < 5; i++) ImGui::Text("Some content %d", i); } if (ImGui::CollapsingHeader("Header with a close button", &closable_group)) { ImGui::Text("IsItemHovered: %d", ImGui::IsItemHovered()); for (int i = 0; i < 5; i++) ImGui::Text("More content %d", i); } ImGui::TreePop(); } if (ImGui::TreeNode("Bullets")) { ImGui::BulletText("Bullet point 1"); ImGui::BulletText("Bullet point 2\nOn multiple lines"); ImGui::Bullet(); ImGui::Text("Bullet point 3 (two calls)"); ImGui::Bullet(); ImGui::SmallButton("Button"); ImGui::TreePop(); } if (ImGui::TreeNode("Text")) { if (ImGui::TreeNode("Colored Text")) { // Using shortcut. You can use PushStyleColor()/PopStyleColor() for more flexibility. ImGui::TextColored(ImVec4(1.0f,0.0f,1.0f,1.0f), "Pink"); ImGui::TextColored(ImVec4(1.0f,1.0f,0.0f,1.0f), "Yellow"); ImGui::TextDisabled("Disabled"); ImGui::SameLine(); HelpMarker("The TextDisabled color is stored in ImGuiStyle."); ImGui::TreePop(); } if (ImGui::TreeNode("Word Wrapping")) { // Using shortcut. You can use PushTextWrapPos()/PopTextWrapPos() for more flexibility. ImGui::TextWrapped("This text should automatically wrap on the edge of the window. The current implementation for text wrapping follows simple rules suitable for English and possibly other languages."); ImGui::Spacing(); static float wrap_width = 200.0f; ImGui::SliderFloat("Wrap width", &wrap_width, -20, 600, "%.0f"); ImGui::Text("Test paragraph 1:"); ImVec2 pos = ImGui::GetCursorScreenPos(); ImGui::GetWindowDrawList()->AddRectFilled(ImVec2(pos.x + wrap_width, pos.y), ImVec2(pos.x + wrap_width + 10, pos.y + ImGui::GetTextLineHeight()), IM_COL32(255,0,255,255)); ImGui::PushTextWrapPos(ImGui::GetCursorPos().x + wrap_width); ImGui::Text("The lazy dog is a good dog. This paragraph is made to fit within %.0f pixels. Testing a 1 character word. The quick brown fox jumps over the lazy dog.", wrap_width); ImGui::GetWindowDrawList()->AddRect(ImGui::GetItemRectMin(), ImGui::GetItemRectMax(), IM_COL32(255,255,0,255)); ImGui::PopTextWrapPos(); ImGui::Text("Test paragraph 2:"); pos = ImGui::GetCursorScreenPos(); ImGui::GetWindowDrawList()->AddRectFilled(ImVec2(pos.x + wrap_width, pos.y), ImVec2(pos.x + wrap_width + 10, pos.y + ImGui::GetTextLineHeight()), IM_COL32(255,0,255,255)); ImGui::PushTextWrapPos(ImGui::GetCursorPos().x + wrap_width); ImGui::Text("aaaaaaaa bbbbbbbb, c cccccccc,dddddddd. d eeeeeeee ffffffff. gggggggg!hhhhhhhh"); ImGui::GetWindowDrawList()->AddRect(ImGui::GetItemRectMin(), ImGui::GetItemRectMax(), IM_COL32(255,255,0,255)); ImGui::PopTextWrapPos(); ImGui::TreePop(); } if (ImGui::TreeNode("UTF-8 Text")) { // UTF-8 test with Japanese characters // (Needs a suitable font, try Noto, or Arial Unicode, or M+ fonts. Read misc/fonts/README.txt for details.) // - From C++11 you can use the u8"my text" syntax to encode literal strings as UTF-8 // - For earlier compiler, you may be able to encode your sources as UTF-8 (e.g. Visual Studio save your file as 'UTF-8 without signature') // - FOR THIS DEMO FILE ONLY, BECAUSE WE WANT TO SUPPORT OLD COMPILERS, WE ARE *NOT* INCLUDING RAW UTF-8 CHARACTERS IN THIS SOURCE FILE. // Instead we are encoding a few strings with hexadecimal constants. Don't do this in your application! // Please use u8"text in any language" in your application! // Note that characters values are preserved even by InputText() if the font cannot be displayed, so you can safely copy & paste garbled characters into another application. ImGui::TextWrapped("CJK text will only appears if the font was loaded with the appropriate CJK character ranges. Call io.Font->AddFontFromFileTTF() manually to load extra character ranges. Read misc/fonts/README.txt for details."); ImGui::Text("Hiragana: \xe3\x81\x8b\xe3\x81\x8d\xe3\x81\x8f\xe3\x81\x91\xe3\x81\x93 (kakikukeko)"); // Normally we would use u8"blah blah" with the proper characters directly in the string. ImGui::Text("Kanjis: \xe6\x97\xa5\xe6\x9c\xac\xe8\xaa\x9e (nihongo)"); static char buf[32] = "\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa\x9e"; //static char buf[32] = u8"NIHONGO"; // <- this is how you would write it with C++11, using real kanjis ImGui::InputText("UTF-8 input", buf, IM_ARRAYSIZE(buf)); ImGui::TreePop(); } ImGui::TreePop(); } if (ImGui::TreeNode("Images")) { ImGuiIO& io = ImGui::GetIO(); ImGui::TextWrapped("Below we are displaying the font texture (which is the only texture we have access to in this demo). Use the 'ImTextureID' type as storage to pass pointers or identifier to your own texture data. Hover the texture for a zoomed view!"); // Here we are grabbing the font texture because that's the only one we have access to inside the demo code. // Remember that ImTextureID is just storage for whatever you want it to be, it is essentially a value that will be passed to the render function inside the ImDrawCmd structure. // If you use one of the default imgui_impl_XXXX.cpp renderer, they all have comments at the top of their file to specify what they expect to be stored in ImTextureID. // (for example, the imgui_impl_dx11.cpp renderer expect a 'ID3D11ShaderResourceView*' pointer. The imgui_impl_glfw_gl3.cpp renderer expect a GLuint OpenGL texture identifier etc.) // If you decided that ImTextureID = MyEngineTexture*, then you can pass your MyEngineTexture* pointers to ImGui::Image(), and gather width/height through your own functions, etc. // Using ShowMetricsWindow() as a "debugger" to inspect the draw data that are being passed to your render will help you debug issues if you are confused about this. // Consider using the lower-level ImDrawList::AddImage() API, via ImGui::GetWindowDrawList()->AddImage(). ImTextureID my_tex_id = io.Fonts->TexID; float my_tex_w = (float)io.Fonts->TexWidth; float my_tex_h = (float)io.Fonts->TexHeight; ImGui::Text("%.0fx%.0f", my_tex_w, my_tex_h); ImVec2 pos = ImGui::GetCursorScreenPos(); ImGui::Image(my_tex_id, ImVec2(my_tex_w, my_tex_h), ImVec2(0,0), ImVec2(1,1), ImVec4(1.0f,1.0f,1.0f,1.0f), ImVec4(1.0f,1.0f,1.0f,0.5f)); if (ImGui::IsItemHovered()) { ImGui::BeginTooltip(); float region_sz = 32.0f; float region_x = io.MousePos.x - pos.x - region_sz * 0.5f; if (region_x < 0.0f) region_x = 0.0f; else if (region_x > my_tex_w - region_sz) region_x = my_tex_w - region_sz; float region_y = io.MousePos.y - pos.y - region_sz * 0.5f; if (region_y < 0.0f) region_y = 0.0f; else if (region_y > my_tex_h - region_sz) region_y = my_tex_h - region_sz; float zoom = 4.0f; ImGui::Text("Min: (%.2f, %.2f)", region_x, region_y); ImGui::Text("Max: (%.2f, %.2f)", region_x + region_sz, region_y + region_sz); ImVec2 uv0 = ImVec2((region_x) / my_tex_w, (region_y) / my_tex_h); ImVec2 uv1 = ImVec2((region_x + region_sz) / my_tex_w, (region_y + region_sz) / my_tex_h); ImGui::Image(my_tex_id, ImVec2(region_sz * zoom, region_sz * zoom), uv0, uv1, ImVec4(1.0f, 1.0f, 1.0f, 1.0f), ImVec4(1.0f, 1.0f, 1.0f, 0.5f)); ImGui::EndTooltip(); } ImGui::TextWrapped("And now some textured buttons.."); static int pressed_count = 0; for (int i = 0; i < 8; i++) { ImGui::PushID(i); int frame_padding = -1 + i; // -1 = uses default padding if (ImGui::ImageButton(my_tex_id, ImVec2(32,32), ImVec2(0,0), ImVec2(32.0f/my_tex_w,32/my_tex_h), frame_padding, ImVec4(0.0f,0.0f,0.0f,1.0f))) pressed_count += 1; ImGui::PopID(); ImGui::SameLine(); } ImGui::NewLine(); ImGui::Text("Pressed %d times.", pressed_count); ImGui::TreePop(); } if (ImGui::TreeNode("Combo")) { // Expose flags as checkbox for the demo static ImGuiComboFlags flags = 0; ImGui::CheckboxFlags("ImGuiComboFlags_PopupAlignLeft", (unsigned int*)&flags, ImGuiComboFlags_PopupAlignLeft); ImGui::SameLine(); HelpMarker("Only makes a difference if the popup is larger than the combo"); if (ImGui::CheckboxFlags("ImGuiComboFlags_NoArrowButton", (unsigned int*)&flags, ImGuiComboFlags_NoArrowButton)) flags &= ~ImGuiComboFlags_NoPreview; // Clear the other flag, as we cannot combine both if (ImGui::CheckboxFlags("ImGuiComboFlags_NoPreview", (unsigned int*)&flags, ImGuiComboFlags_NoPreview)) flags &= ~ImGuiComboFlags_NoArrowButton; // Clear the other flag, as we cannot combine both // General BeginCombo() API, you have full control over your selection data and display type. // (your selection data could be an index, a pointer to the object, an id for the object, a flag stored in the object itself, etc.) const char* items[] = { "AAAA", "BBBB", "CCCC", "DDDD", "EEEE", "FFFF", "GGGG", "HHHH", "IIII", "JJJJ", "KKKK", "LLLLLLL", "MMMM", "OOOOOOO" }; static const char* item_current = items[0]; // Here our selection is a single pointer stored outside the object. if (ImGui::BeginCombo("combo 1", item_current, flags)) // The second parameter is the label previewed before opening the combo. { for (int n = 0; n < IM_ARRAYSIZE(items); n++) { bool is_selected = (item_current == items[n]); if (ImGui::Selectable(items[n], is_selected)) item_current = items[n]; if (is_selected) ImGui::SetItemDefaultFocus(); // Set the initial focus when opening the combo (scrolling + for keyboard navigation support in the upcoming navigation branch) } ImGui::EndCombo(); } // Simplified one-liner Combo() API, using values packed in a single constant string static int item_current_2 = 0; ImGui::Combo("combo 2 (one-liner)", &item_current_2, "aaaa\0bbbb\0cccc\0dddd\0eeee\0\0"); // Simplified one-liner Combo() using an array of const char* static int item_current_3 = -1; // If the selection isn't within 0..count, Combo won't display a preview ImGui::Combo("combo 3 (array)", &item_current_3, items, IM_ARRAYSIZE(items)); // Simplified one-liner Combo() using an accessor function struct FuncHolder { static bool ItemGetter(void* data, int idx, const char** out_str) { *out_str = ((const char**)data)[idx]; return true; } }; static int item_current_4 = 0; ImGui::Combo("combo 4 (function)", &item_current_4, &FuncHolder::ItemGetter, items, IM_ARRAYSIZE(items)); ImGui::TreePop(); } if (ImGui::TreeNode("Selectables")) { // Selectable() has 2 overloads: // - The one taking "bool selected" as a read-only selection information. When Selectable() has been clicked is returns true and you can alter selection state accordingly. // - The one taking "bool* p_selected" as a read-write selection information (convenient in some cases) // The earlier is more flexible, as in real application your selection may be stored in a different manner (in flags within objects, as an external list, etc). if (ImGui::TreeNode("Basic")) { static bool selection[5] = { false, true, false, false, false }; ImGui::Selectable("1. I am selectable", &selection[0]); ImGui::Selectable("2. I am selectable", &selection[1]); ImGui::Text("3. I am not selectable"); ImGui::Selectable("4. I am selectable", &selection[3]); if (ImGui::Selectable("5. I am double clickable", selection[4], ImGuiSelectableFlags_AllowDoubleClick)) if (ImGui::IsMouseDoubleClicked(0)) selection[4] = !selection[4]; ImGui::TreePop(); } if (ImGui::TreeNode("Selection State: Single Selection")) { static int selected = -1; for (int n = 0; n < 5; n++) { char buf[32]; sprintf(buf, "Object %d", n); if (ImGui::Selectable(buf, selected == n)) selected = n; } ImGui::TreePop(); } if (ImGui::TreeNode("Selection State: Multiple Selection")) { HelpMarker("Hold CTRL and click to select multiple items."); static bool selection[5] = { false, false, false, false, false }; for (int n = 0; n < 5; n++) { char buf[32]; sprintf(buf, "Object %d", n); if (ImGui::Selectable(buf, selection[n])) { if (!ImGui::GetIO().KeyCtrl) // Clear selection when CTRL is not held memset(selection, 0, sizeof(selection)); selection[n] ^= 1; } } ImGui::TreePop(); } if (ImGui::TreeNode("Rendering more text into the same line")) { // Using the Selectable() override that takes "bool* p_selected" parameter and toggle your booleans automatically. static bool selected[3] = { false, false, false }; ImGui::Selectable("main.c", &selected[0]); ImGui::SameLine(300); ImGui::Text(" 2,345 bytes"); ImGui::Selectable("Hello.cpp", &selected[1]); ImGui::SameLine(300); ImGui::Text("12,345 bytes"); ImGui::Selectable("Hello.h", &selected[2]); ImGui::SameLine(300); ImGui::Text(" 2,345 bytes"); ImGui::TreePop(); } if (ImGui::TreeNode("In columns")) { ImGui::Columns(3, NULL, false); static bool selected[16] = { 0 }; for (int i = 0; i < 16; i++) { char label[32]; sprintf(label, "Item %d", i); if (ImGui::Selectable(label, &selected[i])) {} ImGui::NextColumn(); } ImGui::Columns(1); ImGui::TreePop(); } if (ImGui::TreeNode("Grid")) { static bool selected[4*4] = { true, false, false, false, false, true, false, false, false, false, true, false, false, false, false, true }; for (int i = 0; i < 4*4; i++) { ImGui::PushID(i); if (ImGui::Selectable("Sailor", &selected[i], 0, ImVec2(50,50))) { // Note: We _unnecessarily_ test for both x/y and i here only to silence some static analyzer. The second part of each test is unnecessary. int x = i % 4; int y = i / 4; if (x > 0) { selected[i - 1] ^= 1; } if (x < 3 && i < 15) { selected[i + 1] ^= 1; } if (y > 0 && i > 3) { selected[i - 4] ^= 1; } if (y < 3 && i < 12) { selected[i + 4] ^= 1; } } if ((i % 4) < 3) ImGui::SameLine(); ImGui::PopID(); } ImGui::TreePop(); } if (ImGui::TreeNode("Alignment")) { HelpMarker("Alignment applies when a selectable is larger than its text content.\nBy default, Selectables uses style.SelectableTextAlign but it can be overriden on a per-item basis using PushStyleVar()."); static bool selected[3*3] = { true, false, true, false, true, false, true, false, true }; for (int y = 0; y < 3; y++) { for (int x = 0; x < 3; x++) { ImVec2 alignment = ImVec2((float)x / 2.0f, (float)y / 2.0f); char name[32]; sprintf(name, "(%.1f,%.1f)", alignment.x, alignment.y); if (x > 0) ImGui::SameLine(); ImGui::PushStyleVar(ImGuiStyleVar_SelectableTextAlign, alignment); ImGui::Selectable(name, &selected[3*y+x], ImGuiSelectableFlags_None, ImVec2(80,80)); ImGui::PopStyleVar(); } } ImGui::TreePop(); } ImGui::TreePop(); } if (ImGui::TreeNode("Text Input")) { if (ImGui::TreeNode("Multi-line Text Input")) { // Note: we are using a fixed-sized buffer for simplicity here. See ImGuiInputTextFlags_CallbackResize // and the code in misc/cpp/imgui_stdlib.h for how to setup InputText() for dynamically resizing strings. static char text[1024 * 16] = "/*\n" " The Pentium F00F bug, shorthand for F0 0F C7 C8,\n" " the hexadecimal encoding of one offending instruction,\n" " more formally, the invalid operand with locked CMPXCHG8B\n" " instruction bug, is a design flaw in the majority of\n" " Intel Pentium, Pentium MMX, and Pentium OverDrive\n" " processors (all in the P5 microarchitecture).\n" "*/\n\n" "label:\n" "\tlock cmpxchg8b eax\n"; static ImGuiInputTextFlags flags = ImGuiInputTextFlags_AllowTabInput; HelpMarker("You can use the ImGuiInputTextFlags_CallbackResize facility if you need to wire InputTextMultiline() to a dynamic string type. See misc/cpp/imgui_stdlib.h for an example. (This is not demonstrated in imgui_demo.cpp)"); ImGui::CheckboxFlags("ImGuiInputTextFlags_ReadOnly", (unsigned int*)&flags, ImGuiInputTextFlags_ReadOnly); ImGui::CheckboxFlags("ImGuiInputTextFlags_AllowTabInput", (unsigned int*)&flags, ImGuiInputTextFlags_AllowTabInput); ImGui::CheckboxFlags("ImGuiInputTextFlags_CtrlEnterForNewLine", (unsigned int*)&flags, ImGuiInputTextFlags_CtrlEnterForNewLine); ImGui::InputTextMultiline("##source", text, IM_ARRAYSIZE(text), ImVec2(-FLT_MIN, ImGui::GetTextLineHeight() * 16), flags); ImGui::TreePop(); } if (ImGui::TreeNode("Filtered Text Input")) { static char buf1[64] = ""; ImGui::InputText("default", buf1, 64); static char buf2[64] = ""; ImGui::InputText("decimal", buf2, 64, ImGuiInputTextFlags_CharsDecimal); static char buf3[64] = ""; ImGui::InputText("hexadecimal", buf3, 64, ImGuiInputTextFlags_CharsHexadecimal | ImGuiInputTextFlags_CharsUppercase); static char buf4[64] = ""; ImGui::InputText("uppercase", buf4, 64, ImGuiInputTextFlags_CharsUppercase); static char buf5[64] = ""; ImGui::InputText("no blank", buf5, 64, ImGuiInputTextFlags_CharsNoBlank); struct TextFilters { static int FilterImGuiLetters(ImGuiInputTextCallbackData* data) { if (data->EventChar < 256 && strchr("imgui", (char)data->EventChar)) return 0; return 1; } }; static char buf6[64] = ""; ImGui::InputText("\"imgui\" letters", buf6, 64, ImGuiInputTextFlags_CallbackCharFilter, TextFilters::FilterImGuiLetters); ImGui::Text("Password input"); static char bufpass[64] = "password123"; ImGui::InputText("password", bufpass, 64, ImGuiInputTextFlags_Password | ImGuiInputTextFlags_CharsNoBlank); ImGui::SameLine(); HelpMarker("Display all characters as '*'.\nDisable clipboard cut and copy.\nDisable logging.\n"); ImGui::InputTextWithHint("password (w/ hint)", "<password>", bufpass, 64, ImGuiInputTextFlags_Password | ImGuiInputTextFlags_CharsNoBlank); ImGui::InputText("password (clear)", bufpass, 64, ImGuiInputTextFlags_CharsNoBlank); ImGui::TreePop(); } if (ImGui::TreeNode("Resize Callback")) { // If you have a custom string type you would typically create a ImGui::InputText() wrapper than takes your type as input. // See misc/cpp/imgui_stdlib.h and .cpp for an implementation of this using std::string. HelpMarker("Demonstrate using ImGuiInputTextFlags_CallbackResize to wire your resizable string type to InputText().\n\nSee misc/cpp/imgui_stdlib.h for an implementation of this for std::string."); struct Funcs { static int MyResizeCallback(ImGuiInputTextCallbackData* data) { if (data->EventFlag == ImGuiInputTextFlags_CallbackResize) { ImVector<char>* my_str = (ImVector<char>*)data->UserData; IM_ASSERT(my_str->begin() == data->Buf); my_str->resize(data->BufSize); // NB: On resizing calls, generally data->BufSize == data->BufTextLen + 1 data->Buf = my_str->begin(); } return 0; } // Tip: Because ImGui:: is a namespace you would typicall add your own function into the namespace in your own source files. // For example, you may add a function called ImGui::InputText(const char* label, MyString* my_str). static bool MyInputTextMultiline(const char* label, ImVector<char>* my_str, const ImVec2& size = ImVec2(0, 0), ImGuiInputTextFlags flags = 0) { IM_ASSERT((flags & ImGuiInputTextFlags_CallbackResize) == 0); return ImGui::InputTextMultiline(label, my_str->begin(), (size_t)my_str->size(), size, flags | ImGuiInputTextFlags_CallbackResize, Funcs::MyResizeCallback, (void*)my_str); } }; // For this demo we are using ImVector as a string container. // Note that because we need to store a terminating zero character, our size/capacity are 1 more than usually reported by a typical string class. static ImVector<char> my_str; if (my_str.empty()) my_str.push_back(0); Funcs::MyInputTextMultiline("##MyStr", &my_str, ImVec2(-FLT_MIN, ImGui::GetTextLineHeight() * 16)); ImGui::Text("Data: %p\nSize: %d\nCapacity: %d", (void*)my_str.begin(), my_str.size(), my_str.capacity()); ImGui::TreePop(); } ImGui::TreePop(); } if (ImGui::TreeNode("Plots Widgets")) { static bool animate = true; ImGui::Checkbox("Animate", &animate); static float arr[] = { 0.6f, 0.1f, 1.0f, 0.5f, 0.92f, 0.1f, 0.2f }; ImGui::PlotLines("Frame Times", arr, IM_ARRAYSIZE(arr)); // Create a dummy array of contiguous float values to plot // Tip: If your float aren't contiguous but part of a structure, you can pass a pointer to your first float and the sizeof() of your structure in the Stride parameter. static float values[90] = { 0 }; static int values_offset = 0; static double refresh_time = 0.0; if (!animate || refresh_time == 0.0) refresh_time = ImGui::GetTime(); while (refresh_time < ImGui::GetTime()) // Create dummy data at fixed 60 hz rate for the demo { static float phase = 0.0f; values[values_offset] = cosf(phase); values_offset = (values_offset+1) % IM_ARRAYSIZE(values); phase += 0.10f*values_offset; refresh_time += 1.0f/60.0f; } ImGui::PlotLines("Lines", values, IM_ARRAYSIZE(values), values_offset, "avg 0.0", -1.0f, 1.0f, ImVec2(0,80)); ImGui::PlotHistogram("Histogram", arr, IM_ARRAYSIZE(arr), 0, NULL, 0.0f, 1.0f, ImVec2(0,80)); // Use functions to generate output // FIXME: This is rather awkward because current plot API only pass in indices. We probably want an API passing floats and user provide sample rate/count. struct Funcs { static float Sin(void*, int i) { return sinf(i * 0.1f); } static float Saw(void*, int i) { return (i & 1) ? 1.0f : -1.0f; } }; static int func_type = 0, display_count = 70; ImGui::Separator(); ImGui::SetNextItemWidth(100); ImGui::Combo("func", &func_type, "Sin\0Saw\0"); ImGui::SameLine(); ImGui::SliderInt("Sample count", &display_count, 1, 400); float (*func)(void*, int) = (func_type == 0) ? Funcs::Sin : Funcs::Saw; ImGui::PlotLines("Lines", func, NULL, display_count, 0, NULL, -1.0f, 1.0f, ImVec2(0,80)); ImGui::PlotHistogram("Histogram", func, NULL, display_count, 0, NULL, -1.0f, 1.0f, ImVec2(0,80)); ImGui::Separator(); // Animate a simple progress bar static float progress = 0.0f, progress_dir = 1.0f; if (animate) { progress += progress_dir * 0.4f * ImGui::GetIO().DeltaTime; if (progress >= +1.1f) { progress = +1.1f; progress_dir *= -1.0f; } if (progress <= -0.1f) { progress = -0.1f; progress_dir *= -1.0f; } } // Typically we would use ImVec2(-1.0f,0.0f) or ImVec2(-FLT_MIN,0.0f) to use all available width, // or ImVec2(width,0.0f) for a specified width. ImVec2(0.0f,0.0f) uses ItemWidth. ImGui::ProgressBar(progress, ImVec2(0.0f,0.0f)); ImGui::SameLine(0.0f, ImGui::GetStyle().ItemInnerSpacing.x); ImGui::Text("Progress Bar"); float progress_saturated = (progress < 0.0f) ? 0.0f : (progress > 1.0f) ? 1.0f : progress; char buf[32]; sprintf(buf, "%d/%d", (int)(progress_saturated*1753), 1753); ImGui::ProgressBar(progress, ImVec2(0.f,0.f), buf); ImGui::TreePop(); } if (ImGui::TreeNode("Color/Picker Widgets")) { static ImVec4 color = ImVec4(114.0f/255.0f, 144.0f/255.0f, 154.0f/255.0f, 200.0f/255.0f); static bool alpha_preview = true; static bool alpha_half_preview = false; static bool drag_and_drop = true; static bool options_menu = true; static bool hdr = false; ImGui::Checkbox("With Alpha Preview", &alpha_preview); ImGui::Checkbox("With Half Alpha Preview", &alpha_half_preview); ImGui::Checkbox("With Drag and Drop", &drag_and_drop); ImGui::Checkbox("With Options Menu", &options_menu); ImGui::SameLine(); HelpMarker("Right-click on the individual color widget to show options."); ImGui::Checkbox("With HDR", &hdr); ImGui::SameLine(); HelpMarker("Currently all this does is to lift the 0..1 limits on dragging widgets."); ImGuiColorEditFlags misc_flags = (hdr ? ImGuiColorEditFlags_HDR : 0) | (drag_and_drop ? 0 : ImGuiColorEditFlags_NoDragDrop) | (alpha_half_preview ? ImGuiColorEditFlags_AlphaPreviewHalf : (alpha_preview ? ImGuiColorEditFlags_AlphaPreview : 0)) | (options_menu ? 0 : ImGuiColorEditFlags_NoOptions); ImGui::Text("Color widget:"); ImGui::SameLine(); HelpMarker("Click on the colored square to open a color picker.\nCTRL+click on individual component to input value.\n"); ImGui::ColorEdit3("MyColor##1", (float*)&color, misc_flags); ImGui::Text("Color widget HSV with Alpha:"); ImGui::ColorEdit4("MyColor##2", (float*)&color, ImGuiColorEditFlags_DisplayHSV | misc_flags); ImGui::Text("Color widget with Float Display:"); ImGui::ColorEdit4("MyColor##2f", (float*)&color, ImGuiColorEditFlags_Float | misc_flags); ImGui::Text("Color button with Picker:"); ImGui::SameLine(); HelpMarker("With the ImGuiColorEditFlags_NoInputs flag you can hide all the slider/text inputs.\nWith the ImGuiColorEditFlags_NoLabel flag you can pass a non-empty label which will only be used for the tooltip and picker popup."); ImGui::ColorEdit4("MyColor##3", (float*)&color, ImGuiColorEditFlags_NoInputs | ImGuiColorEditFlags_NoLabel | misc_flags); ImGui::Text("Color button with Custom Picker Popup:"); // Generate a dummy default palette. The palette will persist and can be edited. static bool saved_palette_init = true; static ImVec4 saved_palette[32] = { }; if (saved_palette_init) { for (int n = 0; n < IM_ARRAYSIZE(saved_palette); n++) { ImGui::ColorConvertHSVtoRGB(n / 31.0f, 0.8f, 0.8f, saved_palette[n].x, saved_palette[n].y, saved_palette[n].z); saved_palette[n].w = 1.0f; // Alpha } saved_palette_init = false; } static ImVec4 backup_color; bool open_popup = ImGui::ColorButton("MyColor##3b", color, misc_flags); ImGui::SameLine(0, ImGui::GetStyle().ItemInnerSpacing.x); open_popup |= ImGui::Button("Palette"); if (open_popup) { ImGui::OpenPopup("mypicker"); backup_color = color; } if (ImGui::BeginPopup("mypicker")) { ImGui::Text("MY CUSTOM COLOR PICKER WITH AN AMAZING PALETTE!"); ImGui::Separator(); ImGui::ColorPicker4("##picker", (float*)&color, misc_flags | ImGuiColorEditFlags_NoSidePreview | ImGuiColorEditFlags_NoSmallPreview); ImGui::SameLine(); ImGui::BeginGroup(); // Lock X position ImGui::Text("Current"); ImGui::ColorButton("##current", color, ImGuiColorEditFlags_NoPicker | ImGuiColorEditFlags_AlphaPreviewHalf, ImVec2(60,40)); ImGui::Text("Previous"); if (ImGui::ColorButton("##previous", backup_color, ImGuiColorEditFlags_NoPicker | ImGuiColorEditFlags_AlphaPreviewHalf, ImVec2(60,40))) color = backup_color; ImGui::Separator(); ImGui::Text("Palette"); for (int n = 0; n < IM_ARRAYSIZE(saved_palette); n++) { ImGui::PushID(n); if ((n % 8) != 0) ImGui::SameLine(0.0f, ImGui::GetStyle().ItemSpacing.y); if (ImGui::ColorButton("##palette", saved_palette[n], ImGuiColorEditFlags_NoAlpha | ImGuiColorEditFlags_NoPicker | ImGuiColorEditFlags_NoTooltip, ImVec2(20,20))) color = ImVec4(saved_palette[n].x, saved_palette[n].y, saved_palette[n].z, color.w); // Preserve alpha! // Allow user to drop colors into each palette entry // (Note that ColorButton is already a drag source by default, unless using ImGuiColorEditFlags_NoDragDrop) if (ImGui::BeginDragDropTarget()) { if (const ImGuiPayload* payload = ImGui::AcceptDragDropPayload(IMGUI_PAYLOAD_TYPE_COLOR_3F)) memcpy((float*)&saved_palette[n], payload->Data, sizeof(float) * 3); if (const ImGuiPayload* payload = ImGui::AcceptDragDropPayload(IMGUI_PAYLOAD_TYPE_COLOR_4F)) memcpy((float*)&saved_palette[n], payload->Data, sizeof(float) * 4); ImGui::EndDragDropTarget(); } ImGui::PopID(); } ImGui::EndGroup(); ImGui::EndPopup(); } ImGui::Text("Color button only:"); ImGui::ColorButton("MyColor##3c", *(ImVec4*)&color, misc_flags, ImVec2(80,80)); ImGui::Text("Color picker:"); static bool alpha = true; static bool alpha_bar = true; static bool side_preview = true; static bool ref_color = false; static ImVec4 ref_color_v(1.0f,0.0f,1.0f,0.5f); static int display_mode = 0; static int picker_mode = 0; ImGui::Checkbox("With Alpha", &alpha); ImGui::Checkbox("With Alpha Bar", &alpha_bar); ImGui::Checkbox("With Side Preview", &side_preview); if (side_preview) { ImGui::SameLine(); ImGui::Checkbox("With Ref Color", &ref_color); if (ref_color) { ImGui::SameLine(); ImGui::ColorEdit4("##RefColor", &ref_color_v.x, ImGuiColorEditFlags_NoInputs | misc_flags); } } ImGui::Combo("Display Mode", &display_mode, "Auto/Current\0None\0RGB Only\0HSV Only\0Hex Only\0"); ImGui::SameLine(); HelpMarker("ColorEdit defaults to displaying RGB inputs if you don't specify a display mode, but the user can change it with a right-click.\n\nColorPicker defaults to displaying RGB+HSV+Hex if you don't specify a display mode.\n\nYou can change the defaults using SetColorEditOptions()."); ImGui::Combo("Picker Mode", &picker_mode, "Auto/Current\0Hue bar + SV rect\0Hue wheel + SV triangle\0"); ImGui::SameLine(); HelpMarker("User can right-click the picker to change mode."); ImGuiColorEditFlags flags = misc_flags; if (!alpha) flags |= ImGuiColorEditFlags_NoAlpha; // This is by default if you call ColorPicker3() instead of ColorPicker4() if (alpha_bar) flags |= ImGuiColorEditFlags_AlphaBar; if (!side_preview) flags |= ImGuiColorEditFlags_NoSidePreview; if (picker_mode == 1) flags |= ImGuiColorEditFlags_PickerHueBar; if (picker_mode == 2) flags |= ImGuiColorEditFlags_PickerHueWheel; if (display_mode == 1) flags |= ImGuiColorEditFlags_NoInputs; // Disable all RGB/HSV/Hex displays if (display_mode == 2) flags |= ImGuiColorEditFlags_DisplayRGB; // Override display mode if (display_mode == 3) flags |= ImGuiColorEditFlags_DisplayHSV; if (display_mode == 4) flags |= ImGuiColorEditFlags_DisplayHex; ImGui::ColorPicker4("MyColor##4", (float*)&color, flags, ref_color ? &ref_color_v.x : NULL); ImGui::Text("Programmatically set defaults:"); ImGui::SameLine(); HelpMarker("SetColorEditOptions() is designed to allow you to set boot-time default.\nWe don't have Push/Pop functions because you can force options on a per-widget basis if needed, and the user can change non-forced ones with the options menu.\nWe don't have a getter to avoid encouraging you to persistently save values that aren't forward-compatible."); if (ImGui::Button("Default: Uint8 + HSV + Hue Bar")) ImGui::SetColorEditOptions(ImGuiColorEditFlags_Uint8 | ImGuiColorEditFlags_DisplayHSV | ImGuiColorEditFlags_PickerHueBar); if (ImGui::Button("Default: Float + HDR + Hue Wheel")) ImGui::SetColorEditOptions(ImGuiColorEditFlags_Float | ImGuiColorEditFlags_HDR | ImGuiColorEditFlags_PickerHueWheel); // HSV encoded support (to avoid RGB<>HSV round trips and singularities when S==0 or V==0) static ImVec4 color_stored_as_hsv(0.23f, 1.0f, 1.0f, 1.0f); ImGui::Spacing(); ImGui::Text("HSV encoded colors"); ImGui::SameLine(); HelpMarker("By default, colors are given to ColorEdit and ColorPicker in RGB, but ImGuiColorEditFlags_InputHSV allows you to store colors as HSV and pass them to ColorEdit and ColorPicker as HSV. This comes with the added benefit that you can manipulate hue values with the picker even when saturation or value are zero."); ImGui::Text("Color widget with InputHSV:"); ImGui::ColorEdit4("HSV shown as RGB##1", (float*)&color_stored_as_hsv, ImGuiColorEditFlags_DisplayRGB | ImGuiColorEditFlags_InputHSV | ImGuiColorEditFlags_Float); ImGui::ColorEdit4("HSV shown as HSV##1", (float*)&color_stored_as_hsv, ImGuiColorEditFlags_DisplayHSV | ImGuiColorEditFlags_InputHSV | ImGuiColorEditFlags_Float); ImGui::DragFloat4("Raw HSV values", (float*)&color_stored_as_hsv, 0.01f, 0.0f, 1.0f); ImGui::TreePop(); } if (ImGui::TreeNode("Range Widgets")) { static float begin = 10, end = 90; static int begin_i = 100, end_i = 1000; ImGui::DragFloatRange2("range", &begin, &end, 0.25f, 0.0f, 100.0f, "Min: %.1f %%", "Max: %.1f %%"); ImGui::DragIntRange2("range int (no bounds)", &begin_i, &end_i, 5, 0, 0, "Min: %d units", "Max: %d units"); ImGui::TreePop(); } if (ImGui::TreeNode("Data Types")) { // The DragScalar/InputScalar/SliderScalar functions allow various data types: signed/unsigned int/long long and float/double // To avoid polluting the public API with all possible combinations, we use the ImGuiDataType enum to pass the type, // and passing all arguments by address. // This is the reason the test code below creates local variables to hold "zero" "one" etc. for each types. // In practice, if you frequently use a given type that is not covered by the normal API entry points, you can wrap it // yourself inside a 1 line function which can take typed argument as value instead of void*, and then pass their address // to the generic function. For example: // bool MySliderU64(const char *label, u64* value, u64 min = 0, u64 max = 0, const char* format = "%lld") // { // return SliderScalar(label, ImGuiDataType_U64, value, &min, &max, format); // } // Limits (as helper variables that we can take the address of) // Note that the SliderScalar function has a maximum usable range of half the natural type maximum, hence the /2 below. #ifndef LLONG_MIN ImS64 LLONG_MIN = -9223372036854775807LL - 1; ImS64 LLONG_MAX = 9223372036854775807LL; ImU64 ULLONG_MAX = (2ULL * 9223372036854775807LL + 1); #endif const char s8_zero = 0, s8_one = 1, s8_fifty = 50, s8_min = -128, s8_max = 127; const ImU8 u8_zero = 0, u8_one = 1, u8_fifty = 50, u8_min = 0, u8_max = 255; const short s16_zero = 0, s16_one = 1, s16_fifty = 50, s16_min = -32768, s16_max = 32767; const ImU16 u16_zero = 0, u16_one = 1, u16_fifty = 50, u16_min = 0, u16_max = 65535; const ImS32 s32_zero = 0, s32_one = 1, s32_fifty = 50, s32_min = INT_MIN/2, s32_max = INT_MAX/2, s32_hi_a = INT_MAX/2 - 100, s32_hi_b = INT_MAX/2; const ImU32 u32_zero = 0, u32_one = 1, u32_fifty = 50, u32_min = 0, u32_max = UINT_MAX/2, u32_hi_a = UINT_MAX/2 - 100, u32_hi_b = UINT_MAX/2; const ImS64 s64_zero = 0, s64_one = 1, s64_fifty = 50, s64_min = LLONG_MIN/2, s64_max = LLONG_MAX/2, s64_hi_a = LLONG_MAX/2 - 100, s64_hi_b = LLONG_MAX/2; const ImU64 u64_zero = 0, u64_one = 1, u64_fifty = 50, u64_min = 0, u64_max = ULLONG_MAX/2, u64_hi_a = ULLONG_MAX/2 - 100, u64_hi_b = ULLONG_MAX/2; const float f32_zero = 0.f, f32_one = 1.f, f32_lo_a = -10000000000.0f, f32_hi_a = +10000000000.0f; const double f64_zero = 0., f64_one = 1., f64_lo_a = -1000000000000000.0, f64_hi_a = +1000000000000000.0; // State static char s8_v = 127; static ImU8 u8_v = 255; static short s16_v = 32767; static ImU16 u16_v = 65535; static ImS32 s32_v = -1; static ImU32 u32_v = (ImU32)-1; static ImS64 s64_v = -1; static ImU64 u64_v = (ImU64)-1; static float f32_v = 0.123f; static double f64_v = 90000.01234567890123456789; const float drag_speed = 0.2f; static bool drag_clamp = false; ImGui::Text("Drags:"); ImGui::Checkbox("Clamp integers to 0..50", &drag_clamp); ImGui::SameLine(); HelpMarker("As with every widgets in dear imgui, we never modify values unless there is a user interaction.\nYou can override the clamping limits by using CTRL+Click to input a value."); ImGui::DragScalar("drag s8", ImGuiDataType_S8, &s8_v, drag_speed, drag_clamp ? &s8_zero : NULL, drag_clamp ? &s8_fifty : NULL); ImGui::DragScalar("drag u8", ImGuiDataType_U8, &u8_v, drag_speed, drag_clamp ? &u8_zero : NULL, drag_clamp ? &u8_fifty : NULL, "%u ms"); ImGui::DragScalar("drag s16", ImGuiDataType_S16, &s16_v, drag_speed, drag_clamp ? &s16_zero : NULL, drag_clamp ? &s16_fifty : NULL); ImGui::DragScalar("drag u16", ImGuiDataType_U16, &u16_v, drag_speed, drag_clamp ? &u16_zero : NULL, drag_clamp ? &u16_fifty : NULL, "%u ms"); ImGui::DragScalar("drag s32", ImGuiDataType_S32, &s32_v, drag_speed, drag_clamp ? &s32_zero : NULL, drag_clamp ? &s32_fifty : NULL); ImGui::DragScalar("drag u32", ImGuiDataType_U32, &u32_v, drag_speed, drag_clamp ? &u32_zero : NULL, drag_clamp ? &u32_fifty : NULL, "%u ms"); ImGui::DragScalar("drag s64", ImGuiDataType_S64, &s64_v, drag_speed, drag_clamp ? &s64_zero : NULL, drag_clamp ? &s64_fifty : NULL); ImGui::DragScalar("drag u64", ImGuiDataType_U64, &u64_v, drag_speed, drag_clamp ? &u64_zero : NULL, drag_clamp ? &u64_fifty : NULL); ImGui::DragScalar("drag float", ImGuiDataType_Float, &f32_v, 0.005f, &f32_zero, &f32_one, "%f", 1.0f); ImGui::DragScalar("drag float ^2", ImGuiDataType_Float, &f32_v, 0.005f, &f32_zero, &f32_one, "%f", 2.0f); ImGui::SameLine(); HelpMarker("You can use the 'power' parameter to increase tweaking precision on one side of the range."); ImGui::DragScalar("drag double", ImGuiDataType_Double, &f64_v, 0.0005f, &f64_zero, NULL, "%.10f grams", 1.0f); ImGui::DragScalar("drag double ^2", ImGuiDataType_Double, &f64_v, 0.0005f, &f64_zero, &f64_one, "0 < %.10f < 1", 2.0f); ImGui::Text("Sliders"); ImGui::SliderScalar("slider s8 full", ImGuiDataType_S8, &s8_v, &s8_min, &s8_max, "%d"); ImGui::SliderScalar("slider u8 full", ImGuiDataType_U8, &u8_v, &u8_min, &u8_max, "%u"); ImGui::SliderScalar("slider s16 full", ImGuiDataType_S16, &s16_v, &s16_min, &s16_max, "%d"); ImGui::SliderScalar("slider u16 full", ImGuiDataType_U16, &u16_v, &u16_min, &u16_max, "%u"); ImGui::SliderScalar("slider s32 low", ImGuiDataType_S32, &s32_v, &s32_zero, &s32_fifty,"%d"); ImGui::SliderScalar("slider s32 high", ImGuiDataType_S32, &s32_v, &s32_hi_a, &s32_hi_b, "%d"); ImGui::SliderScalar("slider s32 full", ImGuiDataType_S32, &s32_v, &s32_min, &s32_max, "%d"); ImGui::SliderScalar("slider u32 low", ImGuiDataType_U32, &u32_v, &u32_zero, &u32_fifty,"%u"); ImGui::SliderScalar("slider u32 high", ImGuiDataType_U32, &u32_v, &u32_hi_a, &u32_hi_b, "%u"); ImGui::SliderScalar("slider u32 full", ImGuiDataType_U32, &u32_v, &u32_min, &u32_max, "%u"); ImGui::SliderScalar("slider s64 low", ImGuiDataType_S64, &s64_v, &s64_zero, &s64_fifty,"%I64d"); ImGui::SliderScalar("slider s64 high", ImGuiDataType_S64, &s64_v, &s64_hi_a, &s64_hi_b, "%I64d"); ImGui::SliderScalar("slider s64 full", ImGuiDataType_S64, &s64_v, &s64_min, &s64_max, "%I64d"); ImGui::SliderScalar("slider u64 low", ImGuiDataType_U64, &u64_v, &u64_zero, &u64_fifty,"%I64u ms"); ImGui::SliderScalar("slider u64 high", ImGuiDataType_U64, &u64_v, &u64_hi_a, &u64_hi_b, "%I64u ms"); ImGui::SliderScalar("slider u64 full", ImGuiDataType_U64, &u64_v, &u64_min, &u64_max, "%I64u ms"); ImGui::SliderScalar("slider float low", ImGuiDataType_Float, &f32_v, &f32_zero, &f32_one); ImGui::SliderScalar("slider float low^2", ImGuiDataType_Float, &f32_v, &f32_zero, &f32_one, "%.10f", 2.0f); ImGui::SliderScalar("slider float high", ImGuiDataType_Float, &f32_v, &f32_lo_a, &f32_hi_a, "%e"); ImGui::SliderScalar("slider double low", ImGuiDataType_Double, &f64_v, &f64_zero, &f64_one, "%.10f grams", 1.0f); ImGui::SliderScalar("slider double low^2",ImGuiDataType_Double, &f64_v, &f64_zero, &f64_one, "%.10f", 2.0f); ImGui::SliderScalar("slider double high", ImGuiDataType_Double, &f64_v, &f64_lo_a, &f64_hi_a, "%e grams", 1.0f); static bool inputs_step = true; ImGui::Text("Inputs"); ImGui::Checkbox("Show step buttons", &inputs_step); ImGui::InputScalar("input s8", ImGuiDataType_S8, &s8_v, inputs_step ? &s8_one : NULL, NULL, "%d"); ImGui::InputScalar("input u8", ImGuiDataType_U8, &u8_v, inputs_step ? &u8_one : NULL, NULL, "%u"); ImGui::InputScalar("input s16", ImGuiDataType_S16, &s16_v, inputs_step ? &s16_one : NULL, NULL, "%d"); ImGui::InputScalar("input u16", ImGuiDataType_U16, &u16_v, inputs_step ? &u16_one : NULL, NULL, "%u"); ImGui::InputScalar("input s32", ImGuiDataType_S32, &s32_v, inputs_step ? &s32_one : NULL, NULL, "%d"); ImGui::InputScalar("input s32 hex", ImGuiDataType_S32, &s32_v, inputs_step ? &s32_one : NULL, NULL, "%08X", ImGuiInputTextFlags_CharsHexadecimal); ImGui::InputScalar("input u32", ImGuiDataType_U32, &u32_v, inputs_step ? &u32_one : NULL, NULL, "%u"); ImGui::InputScalar("input u32 hex", ImGuiDataType_U32, &u32_v, inputs_step ? &u32_one : NULL, NULL, "%08X", ImGuiInputTextFlags_CharsHexadecimal); ImGui::InputScalar("input s64", ImGuiDataType_S64, &s64_v, inputs_step ? &s64_one : NULL); ImGui::InputScalar("input u64", ImGuiDataType_U64, &u64_v, inputs_step ? &u64_one : NULL); ImGui::InputScalar("input float", ImGuiDataType_Float, &f32_v, inputs_step ? &f32_one : NULL); ImGui::InputScalar("input double", ImGuiDataType_Double, &f64_v, inputs_step ? &f64_one : NULL); ImGui::TreePop(); } if (ImGui::TreeNode("Multi-component Widgets")) { static float vec4f[4] = { 0.10f, 0.20f, 0.30f, 0.44f }; static int vec4i[4] = { 1, 5, 100, 255 }; ImGui::InputFloat2("input float2", vec4f); ImGui::DragFloat2("drag float2", vec4f, 0.01f, 0.0f, 1.0f); ImGui::SliderFloat2("slider float2", vec4f, 0.0f, 1.0f); ImGui::InputInt2("input int2", vec4i); ImGui::DragInt2("drag int2", vec4i, 1, 0, 255); ImGui::SliderInt2("slider int2", vec4i, 0, 255); ImGui::Spacing(); ImGui::InputFloat3("input float3", vec4f); ImGui::DragFloat3("drag float3", vec4f, 0.01f, 0.0f, 1.0f); ImGui::SliderFloat3("slider float3", vec4f, 0.0f, 1.0f); ImGui::InputInt3("input int3", vec4i); ImGui::DragInt3("drag int3", vec4i, 1, 0, 255); ImGui::SliderInt3("slider int3", vec4i, 0, 255); ImGui::Spacing(); ImGui::InputFloat4("input float4", vec4f); ImGui::DragFloat4("drag float4", vec4f, 0.01f, 0.0f, 1.0f); ImGui::SliderFloat4("slider float4", vec4f, 0.0f, 1.0f); ImGui::InputInt4("input int4", vec4i); ImGui::DragInt4("drag int4", vec4i, 1, 0, 255); ImGui::SliderInt4("slider int4", vec4i, 0, 255); ImGui::TreePop(); } if (ImGui::TreeNode("Vertical Sliders")) { const float spacing = 4; ImGui::PushStyleVar(ImGuiStyleVar_ItemSpacing, ImVec2(spacing, spacing)); static int int_value = 0; ImGui::VSliderInt("##int", ImVec2(18,160), &int_value, 0, 5); ImGui::SameLine(); static float values[7] = { 0.0f, 0.60f, 0.35f, 0.9f, 0.70f, 0.20f, 0.0f }; ImGui::PushID("set1"); for (int i = 0; i < 7; i++) { if (i > 0) ImGui::SameLine(); ImGui::PushID(i); ImGui::PushStyleColor(ImGuiCol_FrameBg, (ImVec4)ImColor::HSV(i/7.0f, 0.5f, 0.5f)); ImGui::PushStyleColor(ImGuiCol_FrameBgHovered, (ImVec4)ImColor::HSV(i/7.0f, 0.6f, 0.5f)); ImGui::PushStyleColor(ImGuiCol_FrameBgActive, (ImVec4)ImColor::HSV(i/7.0f, 0.7f, 0.5f)); ImGui::PushStyleColor(ImGuiCol_SliderGrab, (ImVec4)ImColor::HSV(i/7.0f, 0.9f, 0.9f)); ImGui::VSliderFloat("##v", ImVec2(18,160), &values[i], 0.0f, 1.0f, ""); if (ImGui::IsItemActive() || ImGui::IsItemHovered()) ImGui::SetTooltip("%.3f", values[i]); ImGui::PopStyleColor(4); ImGui::PopID(); } ImGui::PopID(); ImGui::SameLine(); ImGui::PushID("set2"); static float values2[4] = { 0.20f, 0.80f, 0.40f, 0.25f }; const int rows = 3; const ImVec2 small_slider_size(18, (160.0f-(rows-1)*spacing)/rows); for (int nx = 0; nx < 4; nx++) { if (nx > 0) ImGui::SameLine(); ImGui::BeginGroup(); for (int ny = 0; ny < rows; ny++) { ImGui::PushID(nx*rows+ny); ImGui::VSliderFloat("##v", small_slider_size, &values2[nx], 0.0f, 1.0f, ""); if (ImGui::IsItemActive() || ImGui::IsItemHovered()) ImGui::SetTooltip("%.3f", values2[nx]); ImGui::PopID(); } ImGui::EndGroup(); } ImGui::PopID(); ImGui::SameLine(); ImGui::PushID("set3"); for (int i = 0; i < 4; i++) { if (i > 0) ImGui::SameLine(); ImGui::PushID(i); ImGui::PushStyleVar(ImGuiStyleVar_GrabMinSize, 40); ImGui::VSliderFloat("##v", ImVec2(40,160), &values[i], 0.0f, 1.0f, "%.2f\nsec"); ImGui::PopStyleVar(); ImGui::PopID(); } ImGui::PopID(); ImGui::PopStyleVar(); ImGui::TreePop(); } if (ImGui::TreeNode("Drag and Drop")) { { // ColorEdit widgets automatically act as drag source and drag target. // They are using standardized payload strings IMGUI_PAYLOAD_TYPE_COLOR_3F and IMGUI_PAYLOAD_TYPE_COLOR_4F to allow your own widgets // to use colors in their drag and drop interaction. Also see the demo in Color Picker -> Palette demo. ImGui::BulletText("Drag and drop in standard widgets"); ImGui::Indent(); static float col1[3] = { 1.0f,0.0f,0.2f }; static float col2[4] = { 0.4f,0.7f,0.0f,0.5f }; ImGui::ColorEdit3("color 1", col1); ImGui::ColorEdit4("color 2", col2); ImGui::Unindent(); } { ImGui::BulletText("Drag and drop to copy/swap items"); ImGui::Indent(); enum Mode { Mode_Copy, Mode_Move, Mode_Swap }; static int mode = 0; if (ImGui::RadioButton("Copy", mode == Mode_Copy)) { mode = Mode_Copy; } ImGui::SameLine(); if (ImGui::RadioButton("Move", mode == Mode_Move)) { mode = Mode_Move; } ImGui::SameLine(); if (ImGui::RadioButton("Swap", mode == Mode_Swap)) { mode = Mode_Swap; } static const char* names[9] = { "Bobby", "Beatrice", "Betty", "Brianna", "Barry", "Bernard", "Bibi", "Blaine", "Bryn" }; for (int n = 0; n < IM_ARRAYSIZE(names); n++) { ImGui::PushID(n); if ((n % 3) != 0) ImGui::SameLine(); ImGui::Button(names[n], ImVec2(60,60)); // Our buttons are both drag sources and drag targets here! if (ImGui::BeginDragDropSource(ImGuiDragDropFlags_None)) { ImGui::SetDragDropPayload("DND_DEMO_CELL", &n, sizeof(int)); // Set payload to carry the index of our item (could be anything) if (mode == Mode_Copy) { ImGui::Text("Copy %s", names[n]); } // Display preview (could be anything, e.g. when dragging an image we could decide to display the filename and a small preview of the image, etc.) if (mode == Mode_Move) { ImGui::Text("Move %s", names[n]); } if (mode == Mode_Swap) { ImGui::Text("Swap %s", names[n]); } ImGui::EndDragDropSource(); } if (ImGui::BeginDragDropTarget()) { if (const ImGuiPayload* payload = ImGui::AcceptDragDropPayload("DND_DEMO_CELL")) { IM_ASSERT(payload->DataSize == sizeof(int)); int payload_n = *(const int*)payload->Data; if (mode == Mode_Copy) { names[n] = names[payload_n]; } if (mode == Mode_Move) { names[n] = names[payload_n]; names[payload_n] = ""; } if (mode == Mode_Swap) { const char* tmp = names[n]; names[n] = names[payload_n]; names[payload_n] = tmp; } } ImGui::EndDragDropTarget(); } ImGui::PopID(); } ImGui::Unindent(); } ImGui::TreePop(); } if (ImGui::TreeNode("Querying Status (Active/Focused/Hovered etc.)")) { // Display the value of IsItemHovered() and other common item state functions. Note that the flags can be combined. // (because BulletText is an item itself and that would affect the output of IsItemHovered() we pass all state in a single call to simplify the code). static int item_type = 1; static bool b = false; static float col4f[4] = { 1.0f, 0.5, 0.0f, 1.0f }; static char str[16] = {}; ImGui::Combo("Item Type", &item_type, "Text\0Button\0Button (w/ repeat)\0Checkbox\0SliderFloat\0InputText\0InputFloat\0InputFloat3\0ColorEdit4\0MenuItem\0TreeNode (w/ double-click)\0ListBox\0"); ImGui::SameLine(); HelpMarker("Testing how various types of items are interacting with the IsItemXXX functions."); bool ret = false; if (item_type == 0) { ImGui::Text("ITEM: Text"); } // Testing text items with no identifier/interaction if (item_type == 1) { ret = ImGui::Button("ITEM: Button"); } // Testing button if (item_type == 2) { ImGui::PushButtonRepeat(true); ret = ImGui::Button("ITEM: Button"); ImGui::PopButtonRepeat(); } // Testing button (with repeater) if (item_type == 3) { ret = ImGui::Checkbox("ITEM: Checkbox", &b); } // Testing checkbox if (item_type == 4) { ret = ImGui::SliderFloat("ITEM: SliderFloat", &col4f[0], 0.0f, 1.0f); } // Testing basic item if (item_type == 5) { ret = ImGui::InputText("ITEM: InputText", &str[0], IM_ARRAYSIZE(str)); } // Testing input text (which handles tabbing) if (item_type == 6) { ret = ImGui::InputFloat("ITEM: InputFloat", col4f, 1.0f); } // Testing +/- buttons on scalar input if (item_type == 7) { ret = ImGui::InputFloat3("ITEM: InputFloat3", col4f); } // Testing multi-component items (IsItemXXX flags are reported merged) if (item_type == 8) { ret = ImGui::ColorEdit4("ITEM: ColorEdit4", col4f); } // Testing multi-component items (IsItemXXX flags are reported merged) if (item_type == 9) { ret = ImGui::MenuItem("ITEM: MenuItem"); } // Testing menu item (they use ImGuiButtonFlags_PressedOnRelease button policy) if (item_type == 10){ ret = ImGui::TreeNodeEx("ITEM: TreeNode w/ ImGuiTreeNodeFlags_OpenOnDoubleClick", ImGuiTreeNodeFlags_OpenOnDoubleClick | ImGuiTreeNodeFlags_NoTreePushOnOpen); } // Testing tree node with ImGuiButtonFlags_PressedOnDoubleClick button policy. if (item_type == 11){ const char* items[] = { "Apple", "Banana", "Cherry", "Kiwi" }; static int current = 1; ret = ImGui::ListBox("ITEM: ListBox", &current, items, IM_ARRAYSIZE(items), IM_ARRAYSIZE(items)); } ImGui::BulletText( "Return value = %d\n" "IsItemFocused() = %d\n" "IsItemHovered() = %d\n" "IsItemHovered(_AllowWhenBlockedByPopup) = %d\n" "IsItemHovered(_AllowWhenBlockedByActiveItem) = %d\n" "IsItemHovered(_AllowWhenOverlapped) = %d\n" "IsItemHovered(_RectOnly) = %d\n" "IsItemActive() = %d\n" "IsItemEdited() = %d\n" "IsItemActivated() = %d\n" "IsItemDeactivated() = %d\n" "IsItemDeactivatedAfterEdit() = %d\n" "IsItemVisible() = %d\n" "IsItemClicked() = %d\n" "GetItemRectMin() = (%.1f, %.1f)\n" "GetItemRectMax() = (%.1f, %.1f)\n" "GetItemRectSize() = (%.1f, %.1f)", ret, ImGui::IsItemFocused(), ImGui::IsItemHovered(), ImGui::IsItemHovered(ImGuiHoveredFlags_AllowWhenBlockedByPopup), ImGui::IsItemHovered(ImGuiHoveredFlags_AllowWhenBlockedByActiveItem), ImGui::IsItemHovered(ImGuiHoveredFlags_AllowWhenOverlapped), ImGui::IsItemHovered(ImGuiHoveredFlags_RectOnly), ImGui::IsItemActive(), ImGui::IsItemEdited(), ImGui::IsItemActivated(), ImGui::IsItemDeactivated(), ImGui::IsItemDeactivatedAfterEdit(), ImGui::IsItemVisible(), ImGui::IsItemClicked(), ImGui::GetItemRectMin().x, ImGui::GetItemRectMin().y, ImGui::GetItemRectMax().x, ImGui::GetItemRectMax().y, ImGui::GetItemRectSize().x, ImGui::GetItemRectSize().y ); static bool embed_all_inside_a_child_window = false; ImGui::Checkbox("Embed everything inside a child window (for additional testing)", &embed_all_inside_a_child_window); if (embed_all_inside_a_child_window) ImGui::BeginChild("outer_child", ImVec2(0, ImGui::GetFontSize() * 20), true); // Testing IsWindowFocused() function with its various flags. Note that the flags can be combined. ImGui::BulletText( "IsWindowFocused() = %d\n" "IsWindowFocused(_ChildWindows) = %d\n" "IsWindowFocused(_ChildWindows|_RootWindow) = %d\n" "IsWindowFocused(_RootWindow) = %d\n" "IsWindowFocused(_AnyWindow) = %d\n", ImGui::IsWindowFocused(), ImGui::IsWindowFocused(ImGuiFocusedFlags_ChildWindows), ImGui::IsWindowFocused(ImGuiFocusedFlags_ChildWindows | ImGuiFocusedFlags_RootWindow), ImGui::IsWindowFocused(ImGuiFocusedFlags_RootWindow), ImGui::IsWindowFocused(ImGuiFocusedFlags_AnyWindow)); // Testing IsWindowHovered() function with its various flags. Note that the flags can be combined. ImGui::BulletText( "IsWindowHovered() = %d\n" "IsWindowHovered(_AllowWhenBlockedByPopup) = %d\n" "IsWindowHovered(_AllowWhenBlockedByActiveItem) = %d\n" "IsWindowHovered(_ChildWindows) = %d\n" "IsWindowHovered(_ChildWindows|_RootWindow) = %d\n" "IsWindowHovered(_ChildWindows|_AllowWhenBlockedByPopup) = %d\n" "IsWindowHovered(_RootWindow) = %d\n" "IsWindowHovered(_AnyWindow) = %d\n", ImGui::IsWindowHovered(), ImGui::IsWindowHovered(ImGuiHoveredFlags_AllowWhenBlockedByPopup), ImGui::IsWindowHovered(ImGuiHoveredFlags_AllowWhenBlockedByActiveItem), ImGui::IsWindowHovered(ImGuiHoveredFlags_ChildWindows), ImGui::IsWindowHovered(ImGuiHoveredFlags_ChildWindows | ImGuiHoveredFlags_RootWindow), ImGui::IsWindowHovered(ImGuiHoveredFlags_ChildWindows | ImGuiHoveredFlags_AllowWhenBlockedByPopup), ImGui::IsWindowHovered(ImGuiHoveredFlags_RootWindow), ImGui::IsWindowHovered(ImGuiHoveredFlags_AnyWindow)); ImGui::BeginChild("child", ImVec2(0, 50), true); ImGui::Text("This is another child window for testing the _ChildWindows flag."); ImGui::EndChild(); if (embed_all_inside_a_child_window) ImGui::EndChild(); static char dummy_str[] = "This is a dummy field to be able to tab-out of the widgets above."; ImGui::InputText("dummy", dummy_str, IM_ARRAYSIZE(dummy_str), ImGuiInputTextFlags_ReadOnly); // Calling IsItemHovered() after begin returns the hovered status of the title bar. // This is useful in particular if you want to create a context menu (with BeginPopupContextItem) associated to the title bar of a window. static bool test_window = false; ImGui::Checkbox("Hovered/Active tests after Begin() for title bar testing", &test_window); if (test_window) { ImGui::Begin("Title bar Hovered/Active tests", &test_window); if (ImGui::BeginPopupContextItem()) // <-- This is using IsItemHovered() { if (ImGui::MenuItem("Close")) { test_window = false; } ImGui::EndPopup(); } ImGui::Text( "IsItemHovered() after begin = %d (== is title bar hovered)\n" "IsItemActive() after begin = %d (== is window being clicked/moved)\n", ImGui::IsItemHovered(), ImGui::IsItemActive()); ImGui::End(); } ImGui::TreePop(); } } static void ShowDemoWindowLayout() { if (!ImGui::CollapsingHeader("Layout")) return; if (ImGui::TreeNode("Child windows")) { HelpMarker("Use child windows to begin into a self-contained independent scrolling/clipping regions within a host window."); static bool disable_mouse_wheel = false; static bool disable_menu = false; ImGui::Checkbox("Disable Mouse Wheel", &disable_mouse_wheel); ImGui::Checkbox("Disable Menu", &disable_menu); static int line = 50; bool goto_line = ImGui::Button("Goto"); ImGui::SameLine(); ImGui::SetNextItemWidth(100); goto_line |= ImGui::InputInt("##Line", &line, 0, 0, ImGuiInputTextFlags_EnterReturnsTrue); // Child 1: no border, enable horizontal scrollbar { ImGuiWindowFlags window_flags = ImGuiWindowFlags_HorizontalScrollbar | (disable_mouse_wheel ? ImGuiWindowFlags_NoScrollWithMouse : 0); ImGui::BeginChild("Child1", ImVec2(ImGui::GetWindowContentRegionWidth() * 0.5f, 260), false, window_flags); for (int i = 0; i < 100; i++) { ImGui::Text("%04d: scrollable region", i); if (goto_line && line == i) ImGui::SetScrollHereY(); } if (goto_line && line >= 100) ImGui::SetScrollHereY(); ImGui::EndChild(); } ImGui::SameLine(); // Child 2: rounded border { ImGuiWindowFlags window_flags = (disable_mouse_wheel ? ImGuiWindowFlags_NoScrollWithMouse : 0) | (disable_menu ? 0 : ImGuiWindowFlags_MenuBar); ImGui::PushStyleVar(ImGuiStyleVar_ChildRounding, 5.0f); ImGui::BeginChild("Child2", ImVec2(0, 260), true, window_flags); if (!disable_menu && ImGui::BeginMenuBar()) { if (ImGui::BeginMenu("Menu")) { ShowExampleMenuFile(); ImGui::EndMenu(); } ImGui::EndMenuBar(); } ImGui::Columns(2); for (int i = 0; i < 100; i++) { char buf[32]; sprintf(buf, "%03d", i); ImGui::Button(buf, ImVec2(-FLT_MIN, 0.0f)); ImGui::NextColumn(); } ImGui::EndChild(); ImGui::PopStyleVar(); } ImGui::Separator(); // Demonstrate a few extra things // - Changing ImGuiCol_ChildBg (which is transparent black in default styles) // - Using SetCursorPos() to position the child window (because the child window is an item from the POV of the parent window) // You can also call SetNextWindowPos() to position the child window. The parent window will effectively layout from this position. // - Using ImGui::GetItemRectMin/Max() to query the "item" state (because the child window is an item from the POV of the parent window) // See "Widgets" -> "Querying Status (Active/Focused/Hovered etc.)" section for more details about this. { ImGui::SetCursorPosX(ImGui::GetCursorPosX() + 10); ImGui::PushStyleColor(ImGuiCol_ChildBg, IM_COL32(255, 0, 0, 100)); ImGui::BeginChild("blah", ImVec2(200, 100), true, ImGuiWindowFlags_None); for (int n = 0; n < 50; n++) ImGui::Text("Some test %d", n); ImGui::EndChild(); ImVec2 child_rect_min = ImGui::GetItemRectMin(); ImVec2 child_rect_max = ImGui::GetItemRectMax(); ImGui::PopStyleColor(); ImGui::Text("Rect of child window is: (%.0f,%.0f) (%.0f,%.0f)", child_rect_min.x, child_rect_min.y, child_rect_max.x, child_rect_max.y); } ImGui::TreePop(); } if (ImGui::TreeNode("Widgets Width")) { // Use SetNextItemWidth() to set the width of a single upcoming item. // Use PushItemWidth()/PopItemWidth() to set the width of a group of items. static float f = 0.0f; ImGui::Text("SetNextItemWidth/PushItemWidth(100)"); ImGui::SameLine(); HelpMarker("Fixed width."); ImGui::SetNextItemWidth(100); ImGui::DragFloat("float##1", &f); ImGui::Text("SetNextItemWidth/PushItemWidth(GetWindowWidth() * 0.5f)"); ImGui::SameLine(); HelpMarker("Half of window width."); ImGui::SetNextItemWidth(ImGui::GetWindowWidth() * 0.5f); ImGui::DragFloat("float##2", &f); ImGui::Text("SetNextItemWidth/PushItemWidth(GetContentRegionAvail().x * 0.5f)"); ImGui::SameLine(); HelpMarker("Half of available width.\n(~ right-cursor_pos)\n(works within a column set)"); ImGui::SetNextItemWidth(ImGui::GetContentRegionAvail().x * 0.5f); ImGui::DragFloat("float##3", &f); ImGui::Text("SetNextItemWidth/PushItemWidth(-100)"); ImGui::SameLine(); HelpMarker("Align to right edge minus 100"); ImGui::SetNextItemWidth(-100); ImGui::DragFloat("float##4", &f); // Demonstrate using PushItemWidth to surround three items. Calling SetNextItemWidth() before each of them would have the same effect. ImGui::Text("SetNextItemWidth/PushItemWidth(-1)"); ImGui::SameLine(); HelpMarker("Align to right edge"); ImGui::PushItemWidth(-1); ImGui::DragFloat("##float5a", &f); ImGui::DragFloat("##float5b", &f); ImGui::DragFloat("##float5c", &f); ImGui::PopItemWidth(); ImGui::TreePop(); } if (ImGui::TreeNode("Basic Horizontal Layout")) { ImGui::TextWrapped("(Use ImGui::SameLine() to keep adding items to the right of the preceding item)"); // Text ImGui::Text("Two items: Hello"); ImGui::SameLine(); ImGui::TextColored(ImVec4(1,1,0,1), "Sailor"); // Adjust spacing ImGui::Text("More spacing: Hello"); ImGui::SameLine(0, 20); ImGui::TextColored(ImVec4(1,1,0,1), "Sailor"); // Button ImGui::AlignTextToFramePadding(); ImGui::Text("Normal buttons"); ImGui::SameLine(); ImGui::Button("Banana"); ImGui::SameLine(); ImGui::Button("Apple"); ImGui::SameLine(); ImGui::Button("Corniflower"); // Button ImGui::Text("Small buttons"); ImGui::SameLine(); ImGui::SmallButton("Like this one"); ImGui::SameLine(); ImGui::Text("can fit within a text block."); // Aligned to arbitrary position. Easy/cheap column. ImGui::Text("Aligned"); ImGui::SameLine(150); ImGui::Text("x=150"); ImGui::SameLine(300); ImGui::Text("x=300"); ImGui::Text("Aligned"); ImGui::SameLine(150); ImGui::SmallButton("x=150"); ImGui::SameLine(300); ImGui::SmallButton("x=300"); // Checkbox static bool c1 = false, c2 = false, c3 = false, c4 = false; ImGui::Checkbox("My", &c1); ImGui::SameLine(); ImGui::Checkbox("Tailor", &c2); ImGui::SameLine(); ImGui::Checkbox("Is", &c3); ImGui::SameLine(); ImGui::Checkbox("Rich", &c4); // Various static float f0 = 1.0f, f1 = 2.0f, f2 = 3.0f; ImGui::PushItemWidth(80); const char* items[] = { "AAAA", "BBBB", "CCCC", "DDDD" }; static int item = -1; ImGui::Combo("Combo", &item, items, IM_ARRAYSIZE(items)); ImGui::SameLine(); ImGui::SliderFloat("X", &f0, 0.0f, 5.0f); ImGui::SameLine(); ImGui::SliderFloat("Y", &f1, 0.0f, 5.0f); ImGui::SameLine(); ImGui::SliderFloat("Z", &f2, 0.0f, 5.0f); ImGui::PopItemWidth(); ImGui::PushItemWidth(80); ImGui::Text("Lists:"); static int selection[4] = { 0, 1, 2, 3 }; for (int i = 0; i < 4; i++) { if (i > 0) ImGui::SameLine(); ImGui::PushID(i); ImGui::ListBox("", &selection[i], items, IM_ARRAYSIZE(items)); ImGui::PopID(); //if (ImGui::IsItemHovered()) ImGui::SetTooltip("ListBox %d hovered", i); } ImGui::PopItemWidth(); // Dummy ImVec2 button_sz(40, 40); ImGui::Button("A", button_sz); ImGui::SameLine(); ImGui::Dummy(button_sz); ImGui::SameLine(); ImGui::Button("B", button_sz); // Manually wrapping (we should eventually provide this as an automatic layout feature, but for now you can do it manually) ImGui::Text("Manually wrapping:"); ImGuiStyle& style = ImGui::GetStyle(); int buttons_count = 20; float window_visible_x2 = ImGui::GetWindowPos().x + ImGui::GetWindowContentRegionMax().x; for (int n = 0; n < buttons_count; n++) { ImGui::PushID(n); ImGui::Button("Box", button_sz); float last_button_x2 = ImGui::GetItemRectMax().x; float next_button_x2 = last_button_x2 + style.ItemSpacing.x + button_sz.x; // Expected position if next button was on same line if (n + 1 < buttons_count && next_button_x2 < window_visible_x2) ImGui::SameLine(); ImGui::PopID(); } ImGui::TreePop(); } if (ImGui::TreeNode("Tabs")) { if (ImGui::TreeNode("Basic")) { ImGuiTabBarFlags tab_bar_flags = ImGuiTabBarFlags_None; if (ImGui::BeginTabBar("MyTabBar", tab_bar_flags)) { if (ImGui::BeginTabItem("Avocado")) { ImGui::Text("This is the Avocado tab!\nblah blah blah blah blah"); ImGui::EndTabItem(); } if (ImGui::BeginTabItem("Broccoli")) { ImGui::Text("This is the Broccoli tab!\nblah blah blah blah blah"); ImGui::EndTabItem(); } if (ImGui::BeginTabItem("Cucumber")) { ImGui::Text("This is the Cucumber tab!\nblah blah blah blah blah"); ImGui::EndTabItem(); } ImGui::EndTabBar(); } ImGui::Separator(); ImGui::TreePop(); } if (ImGui::TreeNode("Advanced & Close Button")) { // Expose a couple of the available flags. In most cases you may just call BeginTabBar() with no flags (0). static ImGuiTabBarFlags tab_bar_flags = ImGuiTabBarFlags_Reorderable; ImGui::CheckboxFlags("ImGuiTabBarFlags_Reorderable", (unsigned int*)&tab_bar_flags, ImGuiTabBarFlags_Reorderable); ImGui::CheckboxFlags("ImGuiTabBarFlags_AutoSelectNewTabs", (unsigned int*)&tab_bar_flags, ImGuiTabBarFlags_AutoSelectNewTabs); ImGui::CheckboxFlags("ImGuiTabBarFlags_TabListPopupButton", (unsigned int*)&tab_bar_flags, ImGuiTabBarFlags_TabListPopupButton); ImGui::CheckboxFlags("ImGuiTabBarFlags_NoCloseWithMiddleMouseButton", (unsigned int*)&tab_bar_flags, ImGuiTabBarFlags_NoCloseWithMiddleMouseButton); if ((tab_bar_flags & ImGuiTabBarFlags_FittingPolicyMask_) == 0) tab_bar_flags |= ImGuiTabBarFlags_FittingPolicyDefault_; if (ImGui::CheckboxFlags("ImGuiTabBarFlags_FittingPolicyResizeDown", (unsigned int*)&tab_bar_flags, ImGuiTabBarFlags_FittingPolicyResizeDown)) tab_bar_flags &= ~(ImGuiTabBarFlags_FittingPolicyMask_ ^ ImGuiTabBarFlags_FittingPolicyResizeDown); if (ImGui::CheckboxFlags("ImGuiTabBarFlags_FittingPolicyScroll", (unsigned int*)&tab_bar_flags, ImGuiTabBarFlags_FittingPolicyScroll)) tab_bar_flags &= ~(ImGuiTabBarFlags_FittingPolicyMask_ ^ ImGuiTabBarFlags_FittingPolicyScroll); // Tab Bar const char* names[4] = { "Artichoke", "Beetroot", "Celery", "Daikon" }; static bool opened[4] = { true, true, true, true }; // Persistent user state for (int n = 0; n < IM_ARRAYSIZE(opened); n++) { if (n > 0) { ImGui::SameLine(); } ImGui::Checkbox(names[n], &opened[n]); } // Passing a bool* to BeginTabItem() is similar to passing one to Begin(): the underlying bool will be set to false when the tab is closed. if (ImGui::BeginTabBar("MyTabBar", tab_bar_flags)) { for (int n = 0; n < IM_ARRAYSIZE(opened); n++) if (opened[n] && ImGui::BeginTabItem(names[n], &opened[n])) { ImGui::Text("This is the %s tab!", names[n]); if (n & 1) ImGui::Text("I am an odd tab."); ImGui::EndTabItem(); } ImGui::EndTabBar(); } ImGui::Separator(); ImGui::TreePop(); } ImGui::TreePop(); } if (ImGui::TreeNode("Groups")) { HelpMarker("BeginGroup() basically locks the horizontal position for new line. EndGroup() bundles the whole group so that you can use \"item\" functions such as IsItemHovered()/IsItemActive() or SameLine() etc. on the whole group."); ImGui::BeginGroup(); { ImGui::BeginGroup(); ImGui::Button("AAA"); ImGui::SameLine(); ImGui::Button("BBB"); ImGui::SameLine(); ImGui::BeginGroup(); ImGui::Button("CCC"); ImGui::Button("DDD"); ImGui::EndGroup(); ImGui::SameLine(); ImGui::Button("EEE"); ImGui::EndGroup(); if (ImGui::IsItemHovered()) ImGui::SetTooltip("First group hovered"); } // Capture the group size and create widgets using the same size ImVec2 size = ImGui::GetItemRectSize(); const float values[5] = { 0.5f, 0.20f, 0.80f, 0.60f, 0.25f }; ImGui::PlotHistogram("##values", values, IM_ARRAYSIZE(values), 0, NULL, 0.0f, 1.0f, size); ImGui::Button("ACTION", ImVec2((size.x - ImGui::GetStyle().ItemSpacing.x)*0.5f, size.y)); ImGui::SameLine(); ImGui::Button("REACTION", ImVec2((size.x - ImGui::GetStyle().ItemSpacing.x)*0.5f, size.y)); ImGui::EndGroup(); ImGui::SameLine(); ImGui::Button("LEVERAGE\nBUZZWORD", size); ImGui::SameLine(); if (ImGui::ListBoxHeader("List", size)) { ImGui::Selectable("Selected", true); ImGui::Selectable("Not Selected", false); ImGui::ListBoxFooter(); } ImGui::TreePop(); } if (ImGui::TreeNode("Text Baseline Alignment")) { HelpMarker("This is testing the vertical alignment that gets applied on text to keep it aligned with widgets. Lines only composed of text or \"small\" widgets fit in less vertical spaces than lines with normal widgets."); ImGui::Text("One\nTwo\nThree"); ImGui::SameLine(); ImGui::Text("Hello\nWorld"); ImGui::SameLine(); ImGui::Text("Banana"); ImGui::Text("Banana"); ImGui::SameLine(); ImGui::Text("Hello\nWorld"); ImGui::SameLine(); ImGui::Text("One\nTwo\nThree"); ImGui::Button("HOP##1"); ImGui::SameLine(); ImGui::Text("Banana"); ImGui::SameLine(); ImGui::Text("Hello\nWorld"); ImGui::SameLine(); ImGui::Text("Banana"); ImGui::Button("HOP##2"); ImGui::SameLine(); ImGui::Text("Hello\nWorld"); ImGui::SameLine(); ImGui::Text("Banana"); ImGui::Button("TEST##1"); ImGui::SameLine(); ImGui::Text("TEST"); ImGui::SameLine(); ImGui::SmallButton("TEST##2"); ImGui::AlignTextToFramePadding(); // If your line starts with text, call this to align it to upcoming widgets. ImGui::Text("Text aligned to Widget"); ImGui::SameLine(); ImGui::Button("Widget##1"); ImGui::SameLine(); ImGui::Text("Widget"); ImGui::SameLine(); ImGui::SmallButton("Widget##2"); ImGui::SameLine(); ImGui::Button("Widget##3"); // Tree const float spacing = ImGui::GetStyle().ItemInnerSpacing.x; ImGui::Button("Button##1"); ImGui::SameLine(0.0f, spacing); if (ImGui::TreeNode("Node##1")) { for (int i = 0; i < 6; i++) ImGui::BulletText("Item %d..", i); ImGui::TreePop(); } // Dummy tree data ImGui::AlignTextToFramePadding(); // Vertically align text node a bit lower so it'll be vertically centered with upcoming widget. Otherwise you can use SmallButton (smaller fit). bool node_open = ImGui::TreeNode("Node##2"); // Common mistake to avoid: if we want to SameLine after TreeNode we need to do it before we add child content. ImGui::SameLine(0.0f, spacing); ImGui::Button("Button##2"); if (node_open) { for (int i = 0; i < 6; i++) ImGui::BulletText("Item %d..", i); ImGui::TreePop(); } // Dummy tree data // Bullet ImGui::Button("Button##3"); ImGui::SameLine(0.0f, spacing); ImGui::BulletText("Bullet text"); ImGui::AlignTextToFramePadding(); ImGui::BulletText("Node"); ImGui::SameLine(0.0f, spacing); ImGui::Button("Button##4"); ImGui::TreePop(); } if (ImGui::TreeNode("Scrolling")) { // Vertical scroll functions HelpMarker("Use SetScrollHereY() or SetScrollFromPosY() to scroll to a given vertical position."); static int track_item = 50; static bool enable_track = true; static bool enable_extra_decorations = false; static float scroll_to_off_px = 0.0f; static float scroll_to_pos_px = 200.0f; ImGui::Checkbox("Decoration", &enable_extra_decorations); ImGui::SameLine(); HelpMarker("We expose this for testing because scrolling sometimes had issues with window decoration such as menu-bars."); ImGui::Checkbox("Track", &enable_track); ImGui::PushItemWidth(100); ImGui::SameLine(140); enable_track |= ImGui::DragInt("##item", &track_item, 0.25f, 0, 99, "Item = %d"); bool scroll_to_off = ImGui::Button("Scroll Offset"); ImGui::SameLine(140); scroll_to_off |= ImGui::DragFloat("##off", &scroll_to_off_px, 1.00f, 0, 9999, "+%.0f px"); bool scroll_to_pos = ImGui::Button("Scroll To Pos"); ImGui::SameLine(140); scroll_to_pos |= ImGui::DragFloat("##pos", &scroll_to_pos_px, 1.00f, -10, 9999, "X/Y = %.0f px"); ImGui::PopItemWidth(); if (scroll_to_off || scroll_to_pos) enable_track = false; ImGuiStyle& style = ImGui::GetStyle(); float child_w = (ImGui::GetContentRegionAvail().x - 4 * style.ItemSpacing.x) / 5; if (child_w < 1.0f) child_w = 1.0f; ImGui::PushID("##VerticalScrolling"); for (int i = 0; i < 5; i++) { if (i > 0) ImGui::SameLine(); ImGui::BeginGroup(); const char* names[] = { "Top", "25%", "Center", "75%", "Bottom" }; ImGui::TextUnformatted(names[i]); ImGuiWindowFlags child_flags = enable_extra_decorations ? ImGuiWindowFlags_MenuBar : 0; ImGui::BeginChild(ImGui::GetID((void*)(intptr_t)i), ImVec2(child_w, 200.0f), true, child_flags); if (ImGui::BeginMenuBar()) { ImGui::TextUnformatted("abc"); ImGui::EndMenuBar(); } if (scroll_to_off) ImGui::SetScrollY(scroll_to_off_px); if (scroll_to_pos) ImGui::SetScrollFromPosY(ImGui::GetCursorStartPos().y + scroll_to_pos_px, i * 0.25f); for (int item = 0; item < 100; item++) { if (enable_track && item == track_item) { ImGui::TextColored(ImVec4(1,1,0,1), "Item %d", item); ImGui::SetScrollHereY(i * 0.25f); // 0.0f:top, 0.5f:center, 1.0f:bottom } else { ImGui::Text("Item %d", item); } } float scroll_y = ImGui::GetScrollY(); float scroll_max_y = ImGui::GetScrollMaxY(); ImGui::EndChild(); ImGui::Text("%.0f/%.0f", scroll_y, scroll_max_y); ImGui::EndGroup(); } ImGui::PopID(); // Horizontal scroll functions ImGui::Spacing(); HelpMarker("Use SetScrollHereX() or SetScrollFromPosX() to scroll to a given horizontal position.\n\nUsing the \"Scroll To Pos\" button above will make the discontinuity at edges visible: scrolling to the top/bottom/left/right-most item will add an additional WindowPadding to reflect on reaching the edge of the list.\n\nBecause the clipping rectangle of most window hides half worth of WindowPadding on the left/right, using SetScrollFromPosX(+1) will usually result in clipped text whereas the equivalent SetScrollFromPosY(+1) wouldn't."); ImGui::PushID("##HorizontalScrolling"); for (int i = 0; i < 5; i++) { float child_height = ImGui::GetTextLineHeight() + style.ScrollbarSize + style.WindowPadding.y * 2.0f; ImGuiWindowFlags child_flags = ImGuiWindowFlags_HorizontalScrollbar | (enable_extra_decorations ? ImGuiWindowFlags_AlwaysVerticalScrollbar : 0); ImGui::BeginChild(ImGui::GetID((void*)(intptr_t)i), ImVec2(-100, child_height), true, child_flags); if (scroll_to_off) ImGui::SetScrollX(scroll_to_off_px); if (scroll_to_pos) ImGui::SetScrollFromPosX(ImGui::GetCursorStartPos().x + scroll_to_pos_px, i * 0.25f); for (int item = 0; item < 100; item++) { if (enable_track && item == track_item) { ImGui::TextColored(ImVec4(1, 1, 0, 1), "Item %d", item); ImGui::SetScrollHereX(i * 0.25f); // 0.0f:left, 0.5f:center, 1.0f:right } else { ImGui::Text("Item %d", item); } ImGui::SameLine(); } float scroll_x = ImGui::GetScrollX(); float scroll_max_x = ImGui::GetScrollMaxX(); ImGui::EndChild(); ImGui::SameLine(); const char* names[] = { "Left", "25%", "Center", "75%", "Right" }; ImGui::Text("%s\n%.0f/%.0f", names[i], scroll_x, scroll_max_x); ImGui::Spacing(); } ImGui::PopID(); // Miscellaneous Horizontal Scrolling Demo HelpMarker("Horizontal scrolling for a window has to be enabled explicitly via the ImGuiWindowFlags_HorizontalScrollbar flag.\n\nYou may want to explicitly specify content width by calling SetNextWindowContentWidth() before Begin()."); static int lines = 7; ImGui::SliderInt("Lines", &lines, 1, 15); ImGui::PushStyleVar(ImGuiStyleVar_FrameRounding, 3.0f); ImGui::PushStyleVar(ImGuiStyleVar_FramePadding, ImVec2(2.0f, 1.0f)); ImGui::BeginChild("scrolling", ImVec2(0, ImGui::GetFrameHeightWithSpacing() * 7 + 30), true, ImGuiWindowFlags_HorizontalScrollbar); for (int line = 0; line < lines; line++) { // Display random stuff (for the sake of this trivial demo we are using basic Button+SameLine. If you want to create your own time line for a real application you may be better off // manipulating the cursor position yourself, aka using SetCursorPos/SetCursorScreenPos to position the widgets yourself. You may also want to use the lower-level ImDrawList API) int num_buttons = 10 + ((line & 1) ? line * 9 : line * 3); for (int n = 0; n < num_buttons; n++) { if (n > 0) ImGui::SameLine(); ImGui::PushID(n + line * 1000); char num_buf[16]; sprintf(num_buf, "%d", n); const char* label = (!(n%15)) ? "FizzBuzz" : (!(n%3)) ? "Fizz" : (!(n%5)) ? "Buzz" : num_buf; float hue = n*0.05f; ImGui::PushStyleColor(ImGuiCol_Button, (ImVec4)ImColor::HSV(hue, 0.6f, 0.6f)); ImGui::PushStyleColor(ImGuiCol_ButtonHovered, (ImVec4)ImColor::HSV(hue, 0.7f, 0.7f)); ImGui::PushStyleColor(ImGuiCol_ButtonActive, (ImVec4)ImColor::HSV(hue, 0.8f, 0.8f)); ImGui::Button(label, ImVec2(40.0f + sinf((float)(line + n)) * 20.0f, 0.0f)); ImGui::PopStyleColor(3); ImGui::PopID(); } } float scroll_x = ImGui::GetScrollX(); float scroll_max_x = ImGui::GetScrollMaxX(); ImGui::EndChild(); ImGui::PopStyleVar(2); float scroll_x_delta = 0.0f; ImGui::SmallButton("<<"); if (ImGui::IsItemActive()) { scroll_x_delta = -ImGui::GetIO().DeltaTime * 1000.0f; } ImGui::SameLine(); ImGui::Text("Scroll from code"); ImGui::SameLine(); ImGui::SmallButton(">>"); if (ImGui::IsItemActive()) { scroll_x_delta = +ImGui::GetIO().DeltaTime * 1000.0f; } ImGui::SameLine(); ImGui::Text("%.0f/%.0f", scroll_x, scroll_max_x); if (scroll_x_delta != 0.0f) { ImGui::BeginChild("scrolling"); // Demonstrate a trick: you can use Begin to set yourself in the context of another window (here we are already out of your child window) ImGui::SetScrollX(ImGui::GetScrollX() + scroll_x_delta); ImGui::EndChild(); } ImGui::Spacing(); static bool show_horizontal_contents_size_demo_window = false; ImGui::Checkbox("Show Horizontal contents size demo window", &show_horizontal_contents_size_demo_window); if (show_horizontal_contents_size_demo_window) { static bool show_h_scrollbar = true; static bool show_button = true; static bool show_tree_nodes = true; static bool show_text_wrapped = false; static bool show_columns = true; static bool show_tab_bar = true; static bool show_child = false; static bool explicit_content_size = false; static float contents_size_x = 300.0f; if (explicit_content_size) ImGui::SetNextWindowContentSize(ImVec2(contents_size_x, 0.0f)); ImGui::Begin("Horizontal contents size demo window", &show_horizontal_contents_size_demo_window, show_h_scrollbar ? ImGuiWindowFlags_HorizontalScrollbar : 0); ImGui::PushStyleVar(ImGuiStyleVar_ItemSpacing, ImVec2(2, 0)); ImGui::PushStyleVar(ImGuiStyleVar_FramePadding, ImVec2(2, 0)); HelpMarker("Test of different widgets react and impact the work rectangle growing when horizontal scrolling is enabled.\n\nUse 'Metrics->Tools->Show windows rectangles' to visualize rectangles."); ImGui::Checkbox("H-scrollbar", &show_h_scrollbar); ImGui::Checkbox("Button", &show_button); // Will grow contents size (unless explicitly overwritten) ImGui::Checkbox("Tree nodes", &show_tree_nodes); // Will grow contents size and display highlight over full width ImGui::Checkbox("Text wrapped", &show_text_wrapped);// Will grow and use contents size ImGui::Checkbox("Columns", &show_columns); // Will use contents size ImGui::Checkbox("Tab bar", &show_tab_bar); // Will use contents size ImGui::Checkbox("Child", &show_child); // Will grow and use contents size ImGui::Checkbox("Explicit content size", &explicit_content_size); ImGui::Text("Scroll %.1f/%.1f %.1f/%.1f", ImGui::GetScrollX(), ImGui::GetScrollMaxX(), ImGui::GetScrollY(), ImGui::GetScrollMaxY()); if (explicit_content_size) { ImGui::SameLine(); ImGui::SetNextItemWidth(100); ImGui::DragFloat("##csx", &contents_size_x); ImVec2 p = ImGui::GetCursorScreenPos(); ImGui::GetWindowDrawList()->AddRectFilled(p, ImVec2(p.x + 10, p.y + 10), IM_COL32_WHITE); ImGui::GetWindowDrawList()->AddRectFilled(ImVec2(p.x + contents_size_x - 10, p.y), ImVec2(p.x + contents_size_x, p.y + 10), IM_COL32_WHITE); ImGui::Dummy(ImVec2(0, 10)); } ImGui::PopStyleVar(2); ImGui::Separator(); if (show_button) { ImGui::Button("this is a 300-wide button", ImVec2(300, 0)); } if (show_tree_nodes) { bool open = true; if (ImGui::TreeNode("this is a tree node")) { if (ImGui::TreeNode("another one of those tree node...")) { ImGui::Text("Some tree contents"); ImGui::TreePop(); } ImGui::TreePop(); } ImGui::CollapsingHeader("CollapsingHeader", &open); } if (show_text_wrapped) { ImGui::TextWrapped("This text should automatically wrap on the edge of the work rectangle."); } if (show_columns) { ImGui::Columns(4); for (int n = 0; n < 4; n++) { ImGui::Text("Width %.2f", ImGui::GetColumnWidth()); ImGui::NextColumn(); } ImGui::Columns(1); } if (show_tab_bar && ImGui::BeginTabBar("Hello")) { if (ImGui::BeginTabItem("OneOneOne")) { ImGui::EndTabItem(); } if (ImGui::BeginTabItem("TwoTwoTwo")) { ImGui::EndTabItem(); } if (ImGui::BeginTabItem("ThreeThreeThree")) { ImGui::EndTabItem(); } if (ImGui::BeginTabItem("FourFourFour")) { ImGui::EndTabItem(); } ImGui::EndTabBar(); } if (show_child) { ImGui::BeginChild("child", ImVec2(0,0), true); ImGui::EndChild(); } ImGui::End(); } ImGui::TreePop(); } if (ImGui::TreeNode("Clipping")) { static ImVec2 size(100, 100), offset(50, 20); ImGui::TextWrapped("On a per-widget basis we are occasionally clipping text CPU-side if it won't fit in its frame. Otherwise we are doing coarser clipping + passing a scissor rectangle to the renderer. The system is designed to try minimizing both execution and CPU/GPU rendering cost."); ImGui::DragFloat2("size", (float*)&size, 0.5f, 1.0f, 200.0f, "%.0f"); ImGui::TextWrapped("(Click and drag)"); ImVec2 pos = ImGui::GetCursorScreenPos(); ImVec4 clip_rect(pos.x, pos.y, pos.x + size.x, pos.y + size.y); ImGui::InvisibleButton("##dummy", size); if (ImGui::IsItemActive() && ImGui::IsMouseDragging()) { offset.x += ImGui::GetIO().MouseDelta.x; offset.y += ImGui::GetIO().MouseDelta.y; } ImGui::GetWindowDrawList()->AddRectFilled(pos, ImVec2(pos.x + size.x, pos.y + size.y), IM_COL32(90, 90, 120, 255)); ImGui::GetWindowDrawList()->AddText(ImGui::GetFont(), ImGui::GetFontSize()*2.0f, ImVec2(pos.x + offset.x, pos.y + offset.y), IM_COL32(255, 255, 255, 255), "Line 1 hello\nLine 2 clip me!", NULL, 0.0f, &clip_rect); ImGui::TreePop(); } } static void ShowDemoWindowPopups() { if (!ImGui::CollapsingHeader("Popups & Modal windows")) return; // The properties of popups windows are: // - They block normal mouse hovering detection outside them. (*) // - Unless modal, they can be closed by clicking anywhere outside them, or by pressing ESCAPE. // - Their visibility state (~bool) is held internally by Dear ImGui instead of being held by the programmer as we are used to with regular Begin() calls. // User can manipulate the visibility state by calling OpenPopup(). // (*) One can use IsItemHovered(ImGuiHoveredFlags_AllowWhenBlockedByPopup) to bypass it and detect hovering even when normally blocked by a popup. // Those three properties are connected. The library needs to hold their visibility state because it can close popups at any time. // Typical use for regular windows: // bool my_tool_is_active = false; if (ImGui::Button("Open")) my_tool_is_active = true; [...] if (my_tool_is_active) Begin("My Tool", &my_tool_is_active) { [...] } End(); // Typical use for popups: // if (ImGui::Button("Open")) ImGui::OpenPopup("MyPopup"); if (ImGui::BeginPopup("MyPopup") { [...] EndPopup(); } // With popups we have to go through a library call (here OpenPopup) to manipulate the visibility state. // This may be a bit confusing at first but it should quickly make sense. Follow on the examples below. if (ImGui::TreeNode("Popups")) { ImGui::TextWrapped("When a popup is active, it inhibits interacting with windows that are behind the popup. Clicking outside the popup closes it."); static int selected_fish = -1; const char* names[] = { "Bream", "Haddock", "Mackerel", "Pollock", "Tilefish" }; static bool toggles[] = { true, false, false, false, false }; // Simple selection popup // (If you want to show the current selection inside the Button itself, you may want to build a string using the "###" operator to preserve a constant ID with a variable label) if (ImGui::Button("Select..")) ImGui::OpenPopup("my_select_popup"); ImGui::SameLine(); ImGui::TextUnformatted(selected_fish == -1 ? "<None>" : names[selected_fish]); if (ImGui::BeginPopup("my_select_popup")) { ImGui::Text("Aquarium"); ImGui::Separator(); for (int i = 0; i < IM_ARRAYSIZE(names); i++) if (ImGui::Selectable(names[i])) selected_fish = i; ImGui::EndPopup(); } // Showing a menu with toggles if (ImGui::Button("Toggle..")) ImGui::OpenPopup("my_toggle_popup"); if (ImGui::BeginPopup("my_toggle_popup")) { for (int i = 0; i < IM_ARRAYSIZE(names); i++) ImGui::MenuItem(names[i], "", &toggles[i]); if (ImGui::BeginMenu("Sub-menu")) { ImGui::MenuItem("Click me"); ImGui::EndMenu(); } ImGui::Separator(); ImGui::Text("Tooltip here"); if (ImGui::IsItemHovered()) ImGui::SetTooltip("I am a tooltip over a popup"); if (ImGui::Button("Stacked Popup")) ImGui::OpenPopup("another popup"); if (ImGui::BeginPopup("another popup")) { for (int i = 0; i < IM_ARRAYSIZE(names); i++) ImGui::MenuItem(names[i], "", &toggles[i]); if (ImGui::BeginMenu("Sub-menu")) { ImGui::MenuItem("Click me"); if (ImGui::Button("Stacked Popup")) ImGui::OpenPopup("another popup"); if (ImGui::BeginPopup("another popup")) { ImGui::Text("I am the last one here."); ImGui::EndPopup(); } ImGui::EndMenu(); } ImGui::EndPopup(); } ImGui::EndPopup(); } // Call the more complete ShowExampleMenuFile which we use in various places of this demo if (ImGui::Button("File Menu..")) ImGui::OpenPopup("my_file_popup"); if (ImGui::BeginPopup("my_file_popup")) { ShowExampleMenuFile(); ImGui::EndPopup(); } ImGui::TreePop(); } if (ImGui::TreeNode("Context menus")) { // BeginPopupContextItem() is a helper to provide common/simple popup behavior of essentially doing: // if (IsItemHovered() && IsMouseReleased(0)) // OpenPopup(id); // return BeginPopup(id); // For more advanced uses you may want to replicate and cuztomize this code. This the comments inside BeginPopupContextItem() implementation. static float value = 0.5f; ImGui::Text("Value = %.3f (<-- right-click here)", value); if (ImGui::BeginPopupContextItem("item context menu")) { if (ImGui::Selectable("Set to zero")) value = 0.0f; if (ImGui::Selectable("Set to PI")) value = 3.1415f; ImGui::SetNextItemWidth(-1); ImGui::DragFloat("##Value", &value, 0.1f, 0.0f, 0.0f); ImGui::EndPopup(); } // We can also use OpenPopupOnItemClick() which is the same as BeginPopupContextItem() but without the Begin call. // So here we will make it that clicking on the text field with the right mouse button (1) will toggle the visibility of the popup above. ImGui::Text("(You can also right-click me to open the same popup as above.)"); ImGui::OpenPopupOnItemClick("item context menu", 1); // When used after an item that has an ID (here the Button), we can skip providing an ID to BeginPopupContextItem(). // BeginPopupContextItem() will use the last item ID as the popup ID. // In addition here, we want to include your editable label inside the button label. We use the ### operator to override the ID (read FAQ about ID for details) static char name[32] = "Label1"; char buf[64]; sprintf(buf, "Button: %s###Button", name); // ### operator override ID ignoring the preceding label ImGui::Button(buf); if (ImGui::BeginPopupContextItem()) { ImGui::Text("Edit name:"); ImGui::InputText("##edit", name, IM_ARRAYSIZE(name)); if (ImGui::Button("Close")) ImGui::CloseCurrentPopup(); ImGui::EndPopup(); } ImGui::SameLine(); ImGui::Text("(<-- right-click here)"); ImGui::TreePop(); } if (ImGui::TreeNode("Modals")) { ImGui::TextWrapped("Modal windows are like popups but the user cannot close them by clicking outside the window."); if (ImGui::Button("Delete..")) ImGui::OpenPopup("Delete?"); if (ImGui::BeginPopupModal("Delete?", NULL, ImGuiWindowFlags_AlwaysAutoResize)) { ImGui::Text("All those beautiful files will be deleted.\nThis operation cannot be undone!\n\n"); ImGui::Separator(); //static int dummy_i = 0; //ImGui::Combo("Combo", &dummy_i, "Delete\0Delete harder\0"); static bool dont_ask_me_next_time = false; ImGui::PushStyleVar(ImGuiStyleVar_FramePadding, ImVec2(0, 0)); ImGui::Checkbox("Don't ask me next time", &dont_ask_me_next_time); ImGui::PopStyleVar(); if (ImGui::Button("OK", ImVec2(120, 0))) { ImGui::CloseCurrentPopup(); } ImGui::SetItemDefaultFocus(); ImGui::SameLine(); if (ImGui::Button("Cancel", ImVec2(120, 0))) { ImGui::CloseCurrentPopup(); } ImGui::EndPopup(); } if (ImGui::Button("Stacked modals..")) ImGui::OpenPopup("Stacked 1"); if (ImGui::BeginPopupModal("Stacked 1", NULL, ImGuiWindowFlags_MenuBar)) { if (ImGui::BeginMenuBar()) { if (ImGui::BeginMenu("File")) { if (ImGui::MenuItem("Dummy menu item")) {} ImGui::EndMenu(); } ImGui::EndMenuBar(); } ImGui::Text("Hello from Stacked The First\nUsing style.Colors[ImGuiCol_ModalWindowDimBg] behind it."); // Testing behavior of widgets stacking their own regular popups over the modal. static int item = 1; static float color[4] = { 0.4f,0.7f,0.0f,0.5f }; ImGui::Combo("Combo", &item, "aaaa\0bbbb\0cccc\0dddd\0eeee\0\0"); ImGui::ColorEdit4("color", color); if (ImGui::Button("Add another modal..")) ImGui::OpenPopup("Stacked 2"); // Also demonstrate passing a bool* to BeginPopupModal(), this will create a regular close button which will close the popup. // Note that the visibility state of popups is owned by imgui, so the input value of the bool actually doesn't matter here. bool dummy_open = true; if (ImGui::BeginPopupModal("Stacked 2", &dummy_open)) { ImGui::Text("Hello from Stacked The Second!"); if (ImGui::Button("Close")) ImGui::CloseCurrentPopup(); ImGui::EndPopup(); } if (ImGui::Button("Close")) ImGui::CloseCurrentPopup(); ImGui::EndPopup(); } ImGui::TreePop(); } if (ImGui::TreeNode("Menus inside a regular window")) { ImGui::TextWrapped("Below we are testing adding menu items to a regular window. It's rather unusual but should work!"); ImGui::Separator(); // NB: As a quirk in this very specific example, we want to differentiate the parent of this menu from the parent of the various popup menus above. // To do so we are encloding the items in a PushID()/PopID() block to make them two different menusets. If we don't, opening any popup above and hovering our menu here // would open it. This is because once a menu is active, we allow to switch to a sibling menu by just hovering on it, which is the desired behavior for regular menus. ImGui::PushID("foo"); ImGui::MenuItem("Menu item", "CTRL+M"); if (ImGui::BeginMenu("Menu inside a regular window")) { ShowExampleMenuFile(); ImGui::EndMenu(); } ImGui::PopID(); ImGui::Separator(); ImGui::TreePop(); } } static void ShowDemoWindowColumns() { if (!ImGui::CollapsingHeader("Columns")) return; ImGui::PushID("Columns"); static bool disable_indent = false; ImGui::Checkbox("Disable tree indentation", &disable_indent); ImGui::SameLine(); HelpMarker("Disable the indenting of tree nodes so demo columns can use the full window width."); if (disable_indent) ImGui::PushStyleVar(ImGuiStyleVar_IndentSpacing, 0.0f); // Basic columns if (ImGui::TreeNode("Basic")) { ImGui::Text("Without border:"); ImGui::Columns(3, "mycolumns3", false); // 3-ways, no border ImGui::Separator(); for (int n = 0; n < 14; n++) { char label[32]; sprintf(label, "Item %d", n); if (ImGui::Selectable(label)) {} //if (ImGui::Button(label, ImVec2(-FLT_MIN,0.0f))) {} ImGui::NextColumn(); } ImGui::Columns(1); ImGui::Separator(); ImGui::Text("With border:"); ImGui::Columns(4, "mycolumns"); // 4-ways, with border ImGui::Separator(); ImGui::Text("ID"); ImGui::NextColumn(); ImGui::Text("Name"); ImGui::NextColumn(); ImGui::Text("Path"); ImGui::NextColumn(); ImGui::Text("Hovered"); ImGui::NextColumn(); ImGui::Separator(); const char* names[3] = { "One", "Two", "Three" }; const char* paths[3] = { "/path/one", "/path/two", "/path/three" }; static int selected = -1; for (int i = 0; i < 3; i++) { char label[32]; sprintf(label, "%04d", i); if (ImGui::Selectable(label, selected == i, ImGuiSelectableFlags_SpanAllColumns)) selected = i; bool hovered = ImGui::IsItemHovered(); ImGui::NextColumn(); ImGui::Text(names[i]); ImGui::NextColumn(); ImGui::Text(paths[i]); ImGui::NextColumn(); ImGui::Text("%d", hovered); ImGui::NextColumn(); } ImGui::Columns(1); ImGui::Separator(); ImGui::TreePop(); } if (ImGui::TreeNode("Borders")) { // NB: Future columns API should allow automatic horizontal borders. static bool h_borders = true; static bool v_borders = true; static int columns_count = 4; const int lines_count = 3; ImGui::SetNextItemWidth(ImGui::GetFontSize() * 8); ImGui::DragInt("##columns_count", &columns_count, 0.1f, 2, 10, "%d columns"); if (columns_count < 2) columns_count = 2; ImGui::SameLine(); ImGui::Checkbox("horizontal", &h_borders); ImGui::SameLine(); ImGui::Checkbox("vertical", &v_borders); ImGui::Columns(columns_count, NULL, v_borders); for (int i = 0; i < columns_count * lines_count; i++) { if (h_borders && ImGui::GetColumnIndex() == 0) ImGui::Separator(); ImGui::Text("%c%c%c", 'a' + i, 'a' + i, 'a' + i); ImGui::Text("Width %.2f", ImGui::GetColumnWidth()); ImGui::Text("Avail %.2f", ImGui::GetContentRegionAvail().x); ImGui::Text("Offset %.2f", ImGui::GetColumnOffset()); ImGui::Text("Long text that is likely to clip"); ImGui::Button("Button", ImVec2(-FLT_MIN, 0.0f)); ImGui::NextColumn(); } ImGui::Columns(1); if (h_borders) ImGui::Separator(); ImGui::TreePop(); } // Create multiple items in a same cell before switching to next column if (ImGui::TreeNode("Mixed items")) { ImGui::Columns(3, "mixed"); ImGui::Separator(); ImGui::Text("Hello"); ImGui::Button("Banana"); ImGui::NextColumn(); ImGui::Text("ImGui"); ImGui::Button("Apple"); static float foo = 1.0f; ImGui::InputFloat("red", &foo, 0.05f, 0, "%.3f"); ImGui::Text("An extra line here."); ImGui::NextColumn(); ImGui::Text("Sailor"); ImGui::Button("Corniflower"); static float bar = 1.0f; ImGui::InputFloat("blue", &bar, 0.05f, 0, "%.3f"); ImGui::NextColumn(); if (ImGui::CollapsingHeader("Category A")) { ImGui::Text("Blah blah blah"); } ImGui::NextColumn(); if (ImGui::CollapsingHeader("Category B")) { ImGui::Text("Blah blah blah"); } ImGui::NextColumn(); if (ImGui::CollapsingHeader("Category C")) { ImGui::Text("Blah blah blah"); } ImGui::NextColumn(); ImGui::Columns(1); ImGui::Separator(); ImGui::TreePop(); } // Word wrapping if (ImGui::TreeNode("Word-wrapping")) { ImGui::Columns(2, "word-wrapping"); ImGui::Separator(); ImGui::TextWrapped("The quick brown fox jumps over the lazy dog."); ImGui::TextWrapped("Hello Left"); ImGui::NextColumn(); ImGui::TextWrapped("The quick brown fox jumps over the lazy dog."); ImGui::TextWrapped("Hello Right"); ImGui::Columns(1); ImGui::Separator(); ImGui::TreePop(); } // Scrolling columns /* if (ImGui::TreeNode("Vertical Scrolling")) { ImGui::BeginChild("##header", ImVec2(0, ImGui::GetTextLineHeightWithSpacing()+ImGui::GetStyle().ItemSpacing.y)); ImGui::Columns(3); ImGui::Text("ID"); ImGui::NextColumn(); ImGui::Text("Name"); ImGui::NextColumn(); ImGui::Text("Path"); ImGui::NextColumn(); ImGui::Columns(1); ImGui::Separator(); ImGui::EndChild(); ImGui::BeginChild("##scrollingregion", ImVec2(0, 60)); ImGui::Columns(3); for (int i = 0; i < 10; i++) { ImGui::Text("%04d", i); ImGui::NextColumn(); ImGui::Text("Foobar"); ImGui::NextColumn(); ImGui::Text("/path/foobar/%04d/", i); ImGui::NextColumn(); } ImGui::Columns(1); ImGui::EndChild(); ImGui::TreePop(); } */ if (ImGui::TreeNode("Horizontal Scrolling")) { ImGui::SetNextWindowContentSize(ImVec2(1500.0f, 0.0f)); ImGui::BeginChild("##ScrollingRegion", ImVec2(0, ImGui::GetFontSize() * 20), false, ImGuiWindowFlags_HorizontalScrollbar); ImGui::Columns(10); int ITEMS_COUNT = 2000; ImGuiListClipper clipper(ITEMS_COUNT); // Also demonstrate using the clipper for large list while (clipper.Step()) { for (int i = clipper.DisplayStart; i < clipper.DisplayEnd; i++) for (int j = 0; j < 10; j++) { ImGui::Text("Line %d Column %d...", i, j); ImGui::NextColumn(); } } ImGui::Columns(1); ImGui::EndChild(); ImGui::TreePop(); } if (ImGui::TreeNode("Tree")) { ImGui::Columns(2, "tree", true); for (int x = 0; x < 3; x++) { bool open1 = ImGui::TreeNode((void*)(intptr_t)x, "Node%d", x); ImGui::NextColumn(); ImGui::Text("Node contents"); ImGui::NextColumn(); if (open1) { for (int y = 0; y < 3; y++) { bool open2 = ImGui::TreeNode((void*)(intptr_t)y, "Node%d.%d", x, y); ImGui::NextColumn(); ImGui::Text("Node contents"); if (open2) { ImGui::Text("Even more contents"); if (ImGui::TreeNode("Tree in column")) { ImGui::Text("The quick brown fox jumps over the lazy dog"); ImGui::TreePop(); } } ImGui::NextColumn(); if (open2) ImGui::TreePop(); } ImGui::TreePop(); } } ImGui::Columns(1); ImGui::TreePop(); } if (disable_indent) ImGui::PopStyleVar(); ImGui::PopID(); } static void ShowDemoWindowMisc() { if (ImGui::CollapsingHeader("Filtering")) { static ImGuiTextFilter filter; ImGui::Text("Filter usage:\n" " \"\" display all lines\n" " \"xxx\" display lines containing \"xxx\"\n" " \"xxx,yyy\" display lines containing \"xxx\" or \"yyy\"\n" " \"-xxx\" hide lines containing \"xxx\""); filter.Draw(); const char* lines[] = { "aaa1.c", "bbb1.c", "ccc1.c", "aaa2.cpp", "bbb2.cpp", "ccc2.cpp", "abc.h", "hello, world" }; for (int i = 0; i < IM_ARRAYSIZE(lines); i++) if (filter.PassFilter(lines[i])) ImGui::BulletText("%s", lines[i]); } if (ImGui::CollapsingHeader("Inputs, Navigation & Focus")) { ImGuiIO& io = ImGui::GetIO(); ImGui::Text("WantCaptureMouse: %d", io.WantCaptureMouse); ImGui::Text("WantCaptureKeyboard: %d", io.WantCaptureKeyboard); ImGui::Text("WantTextInput: %d", io.WantTextInput); ImGui::Text("WantSetMousePos: %d", io.WantSetMousePos); ImGui::Text("NavActive: %d, NavVisible: %d", io.NavActive, io.NavVisible); if (ImGui::TreeNode("Keyboard, Mouse & Navigation State")) { if (ImGui::IsMousePosValid()) ImGui::Text("Mouse pos: (%g, %g)", io.MousePos.x, io.MousePos.y); else ImGui::Text("Mouse pos: <INVALID>"); ImGui::Text("Mouse delta: (%g, %g)", io.MouseDelta.x, io.MouseDelta.y); ImGui::Text("Mouse down:"); for (int i = 0; i < IM_ARRAYSIZE(io.MouseDown); i++) if (io.MouseDownDuration[i] >= 0.0f) { ImGui::SameLine(); ImGui::Text("b%d (%.02f secs)", i, io.MouseDownDuration[i]); } ImGui::Text("Mouse clicked:"); for (int i = 0; i < IM_ARRAYSIZE(io.MouseDown); i++) if (ImGui::IsMouseClicked(i)) { ImGui::SameLine(); ImGui::Text("b%d", i); } ImGui::Text("Mouse dbl-clicked:"); for (int i = 0; i < IM_ARRAYSIZE(io.MouseDown); i++) if (ImGui::IsMouseDoubleClicked(i)) { ImGui::SameLine(); ImGui::Text("b%d", i); } ImGui::Text("Mouse released:"); for (int i = 0; i < IM_ARRAYSIZE(io.MouseDown); i++) if (ImGui::IsMouseReleased(i)) { ImGui::SameLine(); ImGui::Text("b%d", i); } ImGui::Text("Mouse wheel: %.1f", io.MouseWheel); ImGui::Text("Keys down:"); for (int i = 0; i < IM_ARRAYSIZE(io.KeysDown); i++) if (io.KeysDownDuration[i] >= 0.0f) { ImGui::SameLine(); ImGui::Text("%d (0x%X) (%.02f secs)", i, i, io.KeysDownDuration[i]); } ImGui::Text("Keys pressed:"); for (int i = 0; i < IM_ARRAYSIZE(io.KeysDown); i++) if (ImGui::IsKeyPressed(i)) { ImGui::SameLine(); ImGui::Text("%d (0x%X)", i, i); } ImGui::Text("Keys release:"); for (int i = 0; i < IM_ARRAYSIZE(io.KeysDown); i++) if (ImGui::IsKeyReleased(i)) { ImGui::SameLine(); ImGui::Text("%d (0x%X)", i, i); } ImGui::Text("Keys mods: %s%s%s%s", io.KeyCtrl ? "CTRL " : "", io.KeyShift ? "SHIFT " : "", io.KeyAlt ? "ALT " : "", io.KeySuper ? "SUPER " : ""); ImGui::Text("Chars queue:"); for (int i = 0; i < io.InputQueueCharacters.Size; i++) { ImWchar c = io.InputQueueCharacters[i]; ImGui::SameLine(); ImGui::Text("\'%c\' (0x%04X)", (c > ' ' && c <= 255) ? (char)c : '?', c); } // FIXME: We should convert 'c' to UTF-8 here but the functions are not public. ImGui::Text("NavInputs down:"); for (int i = 0; i < IM_ARRAYSIZE(io.NavInputs); i++) if (io.NavInputs[i] > 0.0f) { ImGui::SameLine(); ImGui::Text("[%d] %.2f", i, io.NavInputs[i]); } ImGui::Text("NavInputs pressed:"); for (int i = 0; i < IM_ARRAYSIZE(io.NavInputs); i++) if (io.NavInputsDownDuration[i] == 0.0f) { ImGui::SameLine(); ImGui::Text("[%d]", i); } ImGui::Text("NavInputs duration:"); for (int i = 0; i < IM_ARRAYSIZE(io.NavInputs); i++) if (io.NavInputsDownDuration[i] >= 0.0f) { ImGui::SameLine(); ImGui::Text("[%d] %.2f", i, io.NavInputsDownDuration[i]); } ImGui::Button("Hovering me sets the\nkeyboard capture flag"); if (ImGui::IsItemHovered()) ImGui::CaptureKeyboardFromApp(true); ImGui::SameLine(); ImGui::Button("Holding me clears the\nthe keyboard capture flag"); if (ImGui::IsItemActive()) ImGui::CaptureKeyboardFromApp(false); ImGui::TreePop(); } if (ImGui::TreeNode("Tabbing")) { ImGui::Text("Use TAB/SHIFT+TAB to cycle through keyboard editable fields."); static char buf[32] = "dummy"; ImGui::InputText("1", buf, IM_ARRAYSIZE(buf)); ImGui::InputText("2", buf, IM_ARRAYSIZE(buf)); ImGui::InputText("3", buf, IM_ARRAYSIZE(buf)); ImGui::PushAllowKeyboardFocus(false); ImGui::InputText("4 (tab skip)", buf, IM_ARRAYSIZE(buf)); //ImGui::SameLine(); HelpMarker("Use ImGui::PushAllowKeyboardFocus(bool)\nto disable tabbing through certain widgets."); ImGui::PopAllowKeyboardFocus(); ImGui::InputText("5", buf, IM_ARRAYSIZE(buf)); ImGui::TreePop(); } if (ImGui::TreeNode("Focus from code")) { bool focus_1 = ImGui::Button("Focus on 1"); ImGui::SameLine(); bool focus_2 = ImGui::Button("Focus on 2"); ImGui::SameLine(); bool focus_3 = ImGui::Button("Focus on 3"); int has_focus = 0; static char buf[128] = "click on a button to set focus"; if (focus_1) ImGui::SetKeyboardFocusHere(); ImGui::InputText("1", buf, IM_ARRAYSIZE(buf)); if (ImGui::IsItemActive()) has_focus = 1; if (focus_2) ImGui::SetKeyboardFocusHere(); ImGui::InputText("2", buf, IM_ARRAYSIZE(buf)); if (ImGui::IsItemActive()) has_focus = 2; ImGui::PushAllowKeyboardFocus(false); if (focus_3) ImGui::SetKeyboardFocusHere(); ImGui::InputText("3 (tab skip)", buf, IM_ARRAYSIZE(buf)); if (ImGui::IsItemActive()) has_focus = 3; ImGui::PopAllowKeyboardFocus(); if (has_focus) ImGui::Text("Item with focus: %d", has_focus); else ImGui::Text("Item with focus: <none>"); // Use >= 0 parameter to SetKeyboardFocusHere() to focus an upcoming item static float f3[3] = { 0.0f, 0.0f, 0.0f }; int focus_ahead = -1; if (ImGui::Button("Focus on X")) { focus_ahead = 0; } ImGui::SameLine(); if (ImGui::Button("Focus on Y")) { focus_ahead = 1; } ImGui::SameLine(); if (ImGui::Button("Focus on Z")) { focus_ahead = 2; } if (focus_ahead != -1) ImGui::SetKeyboardFocusHere(focus_ahead); ImGui::SliderFloat3("Float3", &f3[0], 0.0f, 1.0f); ImGui::TextWrapped("NB: Cursor & selection are preserved when refocusing last used item in code."); ImGui::TreePop(); } if (ImGui::TreeNode("Dragging")) { ImGui::TextWrapped("You can use ImGui::GetMouseDragDelta(0) to query for the dragged amount on any widget."); for (int button = 0; button < 3; button++) ImGui::Text("IsMouseDragging(%d):\n w/ default threshold: %d,\n w/ zero threshold: %d\n w/ large threshold: %d", button, ImGui::IsMouseDragging(button), ImGui::IsMouseDragging(button, 0.0f), ImGui::IsMouseDragging(button, 20.0f)); ImGui::Button("Drag Me"); if (ImGui::IsItemActive()) ImGui::GetForegroundDrawList()->AddLine(io.MouseClickedPos[0], io.MousePos, ImGui::GetColorU32(ImGuiCol_Button), 4.0f); // Draw a line between the button and the mouse cursor // Drag operations gets "unlocked" when the mouse has moved past a certain threshold (the default threshold is stored in io.MouseDragThreshold) // You can request a lower or higher threshold using the second parameter of IsMouseDragging() and GetMouseDragDelta() ImVec2 value_raw = ImGui::GetMouseDragDelta(0, 0.0f); ImVec2 value_with_lock_threshold = ImGui::GetMouseDragDelta(0); ImVec2 mouse_delta = io.MouseDelta; ImGui::Text("GetMouseDragDelta(0):\n w/ default threshold: (%.1f, %.1f),\n w/ zero threshold: (%.1f, %.1f)\nMouseDelta: (%.1f, %.1f)", value_with_lock_threshold.x, value_with_lock_threshold.y, value_raw.x, value_raw.y, mouse_delta.x, mouse_delta.y); ImGui::TreePop(); } if (ImGui::TreeNode("Mouse cursors")) { const char* mouse_cursors_names[] = { "Arrow", "TextInput", "Move", "ResizeNS", "ResizeEW", "ResizeNESW", "ResizeNWSE", "Hand" }; IM_ASSERT(IM_ARRAYSIZE(mouse_cursors_names) == ImGuiMouseCursor_COUNT); ImGui::Text("Current mouse cursor = %d: %s", ImGui::GetMouseCursor(), mouse_cursors_names[ImGui::GetMouseCursor()]); ImGui::Text("Hover to see mouse cursors:"); ImGui::SameLine(); HelpMarker("Your application can render a different mouse cursor based on what ImGui::GetMouseCursor() returns. If software cursor rendering (io.MouseDrawCursor) is set ImGui will draw the right cursor for you, otherwise your backend needs to handle it."); for (int i = 0; i < ImGuiMouseCursor_COUNT; i++) { char label[32]; sprintf(label, "Mouse cursor %d: %s", i, mouse_cursors_names[i]); ImGui::Bullet(); ImGui::Selectable(label, false); if (ImGui::IsItemHovered() || ImGui::IsItemFocused()) ImGui::SetMouseCursor(i); } ImGui::TreePop(); } } } //----------------------------------------------------------------------------- // [SECTION] About Window / ShowAboutWindow() // Access from Dear ImGui Demo -> Help -> About //----------------------------------------------------------------------------- void ImGui::ShowAboutWindow(bool* p_open) { if (!ImGui::Begin("About Dear ImGui", p_open, ImGuiWindowFlags_AlwaysAutoResize)) { ImGui::End(); return; } ImGui::Text("Dear ImGui %s", ImGui::GetVersion()); ImGui::Separator(); ImGui::Text("By Omar Cornut and all dear imgui contributors."); ImGui::Text("Dear ImGui is licensed under the MIT License, see LICENSE for more information."); static bool show_config_info = false; ImGui::Checkbox("Config/Build Information", &show_config_info); if (show_config_info) { ImGuiIO& io = ImGui::GetIO(); ImGuiStyle& style = ImGui::GetStyle(); bool copy_to_clipboard = ImGui::Button("Copy to clipboard"); ImGui::BeginChildFrame(ImGui::GetID("cfginfos"), ImVec2(0, ImGui::GetTextLineHeightWithSpacing() * 18), ImGuiWindowFlags_NoMove); if (copy_to_clipboard) ImGui::LogToClipboard(); ImGui::Text("Dear ImGui %s (%d)", IMGUI_VERSION, IMGUI_VERSION_NUM); ImGui::Separator(); ImGui::Text("sizeof(size_t): %d, sizeof(ImDrawIdx): %d, sizeof(ImDrawVert): %d", (int)sizeof(size_t), (int)sizeof(ImDrawIdx), (int)sizeof(ImDrawVert)); ImGui::Text("define: __cplusplus=%d", (int)__cplusplus); #ifdef IMGUI_DISABLE_OBSOLETE_FUNCTIONS ImGui::Text("define: IMGUI_DISABLE_OBSOLETE_FUNCTIONS"); #endif #ifdef IMGUI_DISABLE_WIN32_DEFAULT_CLIPBOARD_FUNCTIONS ImGui::Text("define: IMGUI_DISABLE_WIN32_DEFAULT_CLIPBOARD_FUNCTIONS"); #endif #ifdef IMGUI_DISABLE_WIN32_DEFAULT_IME_FUNCTIONS ImGui::Text("define: IMGUI_DISABLE_WIN32_DEFAULT_IME_FUNCTIONS"); #endif #ifdef IMGUI_DISABLE_WIN32_FUNCTIONS ImGui::Text("define: IMGUI_DISABLE_WIN32_FUNCTIONS"); #endif #ifdef IMGUI_DISABLE_FORMAT_STRING_FUNCTIONS ImGui::Text("define: IMGUI_DISABLE_FORMAT_STRING_FUNCTIONS"); #endif #ifdef IMGUI_DISABLE_MATH_FUNCTIONS ImGui::Text("define: IMGUI_DISABLE_MATH_FUNCTIONS"); #endif #ifdef IMGUI_DISABLE_DEFAULT_ALLOCATORS ImGui::Text("define: IMGUI_DISABLE_DEFAULT_ALLOCATORS"); #endif #ifdef IMGUI_USE_BGRA_PACKED_COLOR ImGui::Text("define: IMGUI_USE_BGRA_PACKED_COLOR"); #endif #ifdef _WIN32 ImGui::Text("define: _WIN32"); #endif #ifdef _WIN64 ImGui::Text("define: _WIN64"); #endif #ifdef __linux__ ImGui::Text("define: __linux__"); #endif #ifdef __APPLE__ ImGui::Text("define: __APPLE__"); #endif #ifdef _MSC_VER ImGui::Text("define: _MSC_VER=%d", _MSC_VER); #endif #ifdef __MINGW32__ ImGui::Text("define: __MINGW32__"); #endif #ifdef __MINGW64__ ImGui::Text("define: __MINGW64__"); #endif #ifdef __GNUC__ ImGui::Text("define: __GNUC__=%d", (int)__GNUC__); #endif #ifdef __clang_version__ ImGui::Text("define: __clang_version__=%s", __clang_version__); #endif ImGui::Separator(); ImGui::Text("io.BackendPlatformName: %s", io.BackendPlatformName ? io.BackendPlatformName : "NULL"); ImGui::Text("io.BackendRendererName: %s", io.BackendRendererName ? io.BackendRendererName : "NULL"); ImGui::Text("io.ConfigFlags: 0x%08X", io.ConfigFlags); if (io.ConfigFlags & ImGuiConfigFlags_NavEnableKeyboard) ImGui::Text(" NavEnableKeyboard"); if (io.ConfigFlags & ImGuiConfigFlags_NavEnableGamepad) ImGui::Text(" NavEnableGamepad"); if (io.ConfigFlags & ImGuiConfigFlags_NavEnableSetMousePos) ImGui::Text(" NavEnableSetMousePos"); if (io.ConfigFlags & ImGuiConfigFlags_NavNoCaptureKeyboard) ImGui::Text(" NavNoCaptureKeyboard"); if (io.ConfigFlags & ImGuiConfigFlags_NoMouse) ImGui::Text(" NoMouse"); if (io.ConfigFlags & ImGuiConfigFlags_NoMouseCursorChange) ImGui::Text(" NoMouseCursorChange"); if (io.MouseDrawCursor) ImGui::Text("io.MouseDrawCursor"); if (io.ConfigMacOSXBehaviors) ImGui::Text("io.ConfigMacOSXBehaviors"); if (io.ConfigInputTextCursorBlink) ImGui::Text("io.ConfigInputTextCursorBlink"); if (io.ConfigWindowsResizeFromEdges) ImGui::Text("io.ConfigWindowsResizeFromEdges"); if (io.ConfigWindowsMoveFromTitleBarOnly) ImGui::Text("io.ConfigWindowsMoveFromTitleBarOnly"); ImGui::Text("io.BackendFlags: 0x%08X", io.BackendFlags); if (io.BackendFlags & ImGuiBackendFlags_HasGamepad) ImGui::Text(" HasGamepad"); if (io.BackendFlags & ImGuiBackendFlags_HasMouseCursors) ImGui::Text(" HasMouseCursors"); if (io.BackendFlags & ImGuiBackendFlags_HasSetMousePos) ImGui::Text(" HasSetMousePos"); if (io.BackendFlags & ImGuiBackendFlags_RendererHasVtxOffset) ImGui::Text(" RendererHasVtxOffset"); ImGui::Separator(); ImGui::Text("io.Fonts: %d fonts, Flags: 0x%08X, TexSize: %d,%d", io.Fonts->Fonts.Size, io.Fonts->Flags, io.Fonts->TexWidth, io.Fonts->TexHeight); ImGui::Text("io.DisplaySize: %.2f,%.2f", io.DisplaySize.x, io.DisplaySize.y); ImGui::Text("io.DisplayFramebufferScale: %.2f,%.2f", io.DisplayFramebufferScale.x, io.DisplayFramebufferScale.y); ImGui::Separator(); ImGui::Text("style.WindowPadding: %.2f,%.2f", style.WindowPadding.x, style.WindowPadding.y); ImGui::Text("style.WindowBorderSize: %.2f", style.WindowBorderSize); ImGui::Text("style.FramePadding: %.2f,%.2f", style.FramePadding.x, style.FramePadding.y); ImGui::Text("style.FrameRounding: %.2f", style.FrameRounding); ImGui::Text("style.FrameBorderSize: %.2f", style.FrameBorderSize); ImGui::Text("style.ItemSpacing: %.2f,%.2f", style.ItemSpacing.x, style.ItemSpacing.y); ImGui::Text("style.ItemInnerSpacing: %.2f,%.2f", style.ItemInnerSpacing.x, style.ItemInnerSpacing.y); if (copy_to_clipboard) ImGui::LogFinish(); ImGui::EndChildFrame(); } ImGui::End(); } //----------------------------------------------------------------------------- // [SECTION] Style Editor / ShowStyleEditor() //----------------------------------------------------------------------------- // Demo helper function to select among default colors. See ShowStyleEditor() for more advanced options. // Here we use the simplified Combo() api that packs items into a single literal string. Useful for quick combo boxes where the choices are known locally. bool ImGui::ShowStyleSelector(const char* label) { static int style_idx = -1; if (ImGui::Combo(label, &style_idx, "Classic\0Dark\0Light\0")) { switch (style_idx) { case 0: ImGui::StyleColorsClassic(); break; case 1: ImGui::StyleColorsDark(); break; case 2: ImGui::StyleColorsLight(); break; } return true; } return false; } // Demo helper function to select among loaded fonts. // Here we use the regular BeginCombo()/EndCombo() api which is more the more flexible one. void ImGui::ShowFontSelector(const char* label) { ImGuiIO& io = ImGui::GetIO(); ImFont* font_current = ImGui::GetFont(); if (ImGui::BeginCombo(label, font_current->GetDebugName())) { for (int n = 0; n < io.Fonts->Fonts.Size; n++) { ImFont* font = io.Fonts->Fonts[n]; ImGui::PushID((void*)font); if (ImGui::Selectable(font->GetDebugName(), font == font_current)) io.FontDefault = font; ImGui::PopID(); } ImGui::EndCombo(); } ImGui::SameLine(); HelpMarker( "- Load additional fonts with io.Fonts->AddFontFromFileTTF().\n" "- The font atlas is built when calling io.Fonts->GetTexDataAsXXXX() or io.Fonts->Build().\n" "- Read FAQ and documentation in misc/fonts/ for more details.\n" "- If you need to add/remove fonts at runtime (e.g. for DPI change), do it before calling NewFrame()."); } void ImGui::ShowStyleEditor(ImGuiStyle* ref) { // You can pass in a reference ImGuiStyle structure to compare to, revert to and save to (else it compares to an internally stored reference) ImGuiStyle& style = ImGui::GetStyle(); static ImGuiStyle ref_saved_style; // Default to using internal storage as reference static bool init = true; if (init && ref == NULL) ref_saved_style = style; init = false; if (ref == NULL) ref = &ref_saved_style; ImGui::PushItemWidth(ImGui::GetWindowWidth() * 0.50f); if (ImGui::ShowStyleSelector("Colors##Selector")) ref_saved_style = style; ImGui::ShowFontSelector("Fonts##Selector"); // Simplified Settings if (ImGui::SliderFloat("FrameRounding", &style.FrameRounding, 0.0f, 12.0f, "%.0f")) style.GrabRounding = style.FrameRounding; // Make GrabRounding always the same value as FrameRounding { bool window_border = (style.WindowBorderSize > 0.0f); if (ImGui::Checkbox("WindowBorder", &window_border)) style.WindowBorderSize = window_border ? 1.0f : 0.0f; } ImGui::SameLine(); { bool frame_border = (style.FrameBorderSize > 0.0f); if (ImGui::Checkbox("FrameBorder", &frame_border)) style.FrameBorderSize = frame_border ? 1.0f : 0.0f; } ImGui::SameLine(); { bool popup_border = (style.PopupBorderSize > 0.0f); if (ImGui::Checkbox("PopupBorder", &popup_border)) style.PopupBorderSize = popup_border ? 1.0f : 0.0f; } // Save/Revert button if (ImGui::Button("Save Ref")) *ref = ref_saved_style = style; ImGui::SameLine(); if (ImGui::Button("Revert Ref")) style = *ref; ImGui::SameLine(); HelpMarker("Save/Revert in local non-persistent storage. Default Colors definition are not affected. Use \"Export Colors\" below to save them somewhere."); ImGui::Separator(); if (ImGui::BeginTabBar("##tabs", ImGuiTabBarFlags_None)) { if (ImGui::BeginTabItem("Sizes")) { ImGui::Text("Main"); ImGui::SliderFloat2("WindowPadding", (float*)&style.WindowPadding, 0.0f, 20.0f, "%.0f"); ImGui::SliderFloat2("FramePadding", (float*)&style.FramePadding, 0.0f, 20.0f, "%.0f"); ImGui::SliderFloat2("ItemSpacing", (float*)&style.ItemSpacing, 0.0f, 20.0f, "%.0f"); ImGui::SliderFloat2("ItemInnerSpacing", (float*)&style.ItemInnerSpacing, 0.0f, 20.0f, "%.0f"); ImGui::SliderFloat2("TouchExtraPadding", (float*)&style.TouchExtraPadding, 0.0f, 10.0f, "%.0f"); ImGui::SliderFloat("IndentSpacing", &style.IndentSpacing, 0.0f, 30.0f, "%.0f"); ImGui::SliderFloat("ScrollbarSize", &style.ScrollbarSize, 1.0f, 20.0f, "%.0f"); ImGui::SliderFloat("GrabMinSize", &style.GrabMinSize, 1.0f, 20.0f, "%.0f"); ImGui::Text("Borders"); ImGui::SliderFloat("WindowBorderSize", &style.WindowBorderSize, 0.0f, 1.0f, "%.0f"); ImGui::SliderFloat("ChildBorderSize", &style.ChildBorderSize, 0.0f, 1.0f, "%.0f"); ImGui::SliderFloat("PopupBorderSize", &style.PopupBorderSize, 0.0f, 1.0f, "%.0f"); ImGui::SliderFloat("FrameBorderSize", &style.FrameBorderSize, 0.0f, 1.0f, "%.0f"); ImGui::SliderFloat("TabBorderSize", &style.TabBorderSize, 0.0f, 1.0f, "%.0f"); ImGui::Text("Rounding"); ImGui::SliderFloat("WindowRounding", &style.WindowRounding, 0.0f, 12.0f, "%.0f"); ImGui::SliderFloat("ChildRounding", &style.ChildRounding, 0.0f, 12.0f, "%.0f"); ImGui::SliderFloat("FrameRounding", &style.FrameRounding, 0.0f, 12.0f, "%.0f"); ImGui::SliderFloat("PopupRounding", &style.PopupRounding, 0.0f, 12.0f, "%.0f"); ImGui::SliderFloat("ScrollbarRounding", &style.ScrollbarRounding, 0.0f, 12.0f, "%.0f"); ImGui::SliderFloat("GrabRounding", &style.GrabRounding, 0.0f, 12.0f, "%.0f"); ImGui::SliderFloat("TabRounding", &style.TabRounding, 0.0f, 12.0f, "%.0f"); ImGui::Text("Alignment"); ImGui::SliderFloat2("WindowTitleAlign", (float*)&style.WindowTitleAlign, 0.0f, 1.0f, "%.2f"); ImGui::Combo("WindowMenuButtonPosition", (int*)&style.WindowMenuButtonPosition, "Left\0Right\0"); ImGui::Combo("ColorButtonPosition", (int*)&style.ColorButtonPosition, "Left\0Right\0"); ImGui::SliderFloat2("ButtonTextAlign", (float*)&style.ButtonTextAlign, 0.0f, 1.0f, "%.2f"); ImGui::SameLine(); HelpMarker("Alignment applies when a button is larger than its text content."); ImGui::SliderFloat2("SelectableTextAlign", (float*)&style.SelectableTextAlign, 0.0f, 1.0f, "%.2f"); ImGui::SameLine(); HelpMarker("Alignment applies when a selectable is larger than its text content."); ImGui::Text("Safe Area Padding"); ImGui::SameLine(); HelpMarker("Adjust if you cannot see the edges of your screen (e.g. on a TV where scaling has not been configured)."); ImGui::SliderFloat2("DisplaySafeAreaPadding", (float*)&style.DisplaySafeAreaPadding, 0.0f, 30.0f, "%.0f"); ImGui::EndTabItem(); } if (ImGui::BeginTabItem("Colors")) { static int output_dest = 0; static bool output_only_modified = true; if (ImGui::Button("Export Unsaved")) { if (output_dest == 0) ImGui::LogToClipboard(); else ImGui::LogToTTY(); ImGui::LogText("ImVec4* colors = ImGui::GetStyle().Colors;" IM_NEWLINE); for (int i = 0; i < ImGuiCol_COUNT; i++) { const ImVec4& col = style.Colors[i]; const char* name = ImGui::GetStyleColorName(i); if (!output_only_modified || memcmp(&col, &ref->Colors[i], sizeof(ImVec4)) != 0) ImGui::LogText("colors[ImGuiCol_%s]%*s= ImVec4(%.2ff, %.2ff, %.2ff, %.2ff);" IM_NEWLINE, name, 23 - (int)strlen(name), "", col.x, col.y, col.z, col.w); } ImGui::LogFinish(); } ImGui::SameLine(); ImGui::SetNextItemWidth(120); ImGui::Combo("##output_type", &output_dest, "To Clipboard\0To TTY\0"); ImGui::SameLine(); ImGui::Checkbox("Only Modified Colors", &output_only_modified); static ImGuiTextFilter filter; filter.Draw("Filter colors", ImGui::GetFontSize() * 16); static ImGuiColorEditFlags alpha_flags = 0; ImGui::RadioButton("Opaque", &alpha_flags, 0); ImGui::SameLine(); ImGui::RadioButton("Alpha", &alpha_flags, ImGuiColorEditFlags_AlphaPreview); ImGui::SameLine(); ImGui::RadioButton("Both", &alpha_flags, ImGuiColorEditFlags_AlphaPreviewHalf); ImGui::SameLine(); HelpMarker("In the color list:\nLeft-click on colored square to open color picker,\nRight-click to open edit options menu."); ImGui::BeginChild("##colors", ImVec2(0, 0), true, ImGuiWindowFlags_AlwaysVerticalScrollbar | ImGuiWindowFlags_AlwaysHorizontalScrollbar | ImGuiWindowFlags_NavFlattened); ImGui::PushItemWidth(-160); for (int i = 0; i < ImGuiCol_COUNT; i++) { const char* name = ImGui::GetStyleColorName(i); if (!filter.PassFilter(name)) continue; ImGui::PushID(i); ImGui::ColorEdit4("##color", (float*)&style.Colors[i], ImGuiColorEditFlags_AlphaBar | alpha_flags); if (memcmp(&style.Colors[i], &ref->Colors[i], sizeof(ImVec4)) != 0) { // Tips: in a real user application, you may want to merge and use an icon font into the main font, so instead of "Save"/"Revert" you'd use icons. // Read the FAQ and misc/fonts/README.txt about using icon fonts. It's really easy and super convenient! ImGui::SameLine(0.0f, style.ItemInnerSpacing.x); if (ImGui::Button("Save")) ref->Colors[i] = style.Colors[i]; ImGui::SameLine(0.0f, style.ItemInnerSpacing.x); if (ImGui::Button("Revert")) style.Colors[i] = ref->Colors[i]; } ImGui::SameLine(0.0f, style.ItemInnerSpacing.x); ImGui::TextUnformatted(name); ImGui::PopID(); } ImGui::PopItemWidth(); ImGui::EndChild(); ImGui::EndTabItem(); } if (ImGui::BeginTabItem("Fonts")) { ImGuiIO& io = ImGui::GetIO(); ImFontAtlas* atlas = io.Fonts; HelpMarker("Read FAQ and misc/fonts/README.txt for details on font loading."); ImGui::PushItemWidth(120); for (int i = 0; i < atlas->Fonts.Size; i++) { ImFont* font = atlas->Fonts[i]; ImGui::PushID(font); bool font_details_opened = ImGui::TreeNode(font, "Font %d: \"%s\"\n%.2f px, %d glyphs, %d file(s)", i, font->ConfigData ? font->ConfigData[0].Name : "", font->FontSize, font->Glyphs.Size, font->ConfigDataCount); ImGui::SameLine(); if (ImGui::SmallButton("Set as default")) { io.FontDefault = font; } if (font_details_opened) { ImGui::PushFont(font); ImGui::Text("The quick brown fox jumps over the lazy dog"); ImGui::PopFont(); ImGui::DragFloat("Font scale", &font->Scale, 0.005f, 0.3f, 2.0f, "%.1f"); // Scale only this font ImGui::SameLine(); HelpMarker("Note than the default embedded font is NOT meant to be scaled.\n\nFont are currently rendered into bitmaps at a given size at the time of building the atlas. You may oversample them to get some flexibility with scaling. You can also render at multiple sizes and select which one to use at runtime.\n\n(Glimmer of hope: the atlas system should hopefully be rewritten in the future to make scaling more natural and automatic.)"); ImGui::InputFloat("Font offset", &font->DisplayOffset.y, 1, 1, "%.0f"); ImGui::Text("Ascent: %f, Descent: %f, Height: %f", font->Ascent, font->Descent, font->Ascent - font->Descent); ImGui::Text("Fallback character: '%c' (%d)", font->FallbackChar, font->FallbackChar); const float surface_sqrt = sqrtf((float)font->MetricsTotalSurface); ImGui::Text("Texture surface: %d pixels (approx) ~ %dx%d", font->MetricsTotalSurface, (int)surface_sqrt, (int)surface_sqrt); for (int config_i = 0; config_i < font->ConfigDataCount; config_i++) if (const ImFontConfig* cfg = &font->ConfigData[config_i]) ImGui::BulletText("Input %d: \'%s\', Oversample: (%d,%d), PixelSnapH: %d", config_i, cfg->Name, cfg->OversampleH, cfg->OversampleV, cfg->PixelSnapH); if (ImGui::TreeNode("Glyphs", "Glyphs (%d)", font->Glyphs.Size)) { // Display all glyphs of the fonts in separate pages of 256 characters for (int base = 0; base < 0x10000; base += 256) { int count = 0; for (int n = 0; n < 256; n++) count += font->FindGlyphNoFallback((ImWchar)(base + n)) ? 1 : 0; if (count > 0 && ImGui::TreeNode((void*)(intptr_t)base, "U+%04X..U+%04X (%d %s)", base, base + 255, count, count > 1 ? "glyphs" : "glyph")) { float cell_size = font->FontSize * 1; float cell_spacing = style.ItemSpacing.y; ImVec2 base_pos = ImGui::GetCursorScreenPos(); ImDrawList* draw_list = ImGui::GetWindowDrawList(); for (int n = 0; n < 256; n++) { ImVec2 cell_p1(base_pos.x + (n % 16) * (cell_size + cell_spacing), base_pos.y + (n / 16) * (cell_size + cell_spacing)); ImVec2 cell_p2(cell_p1.x + cell_size, cell_p1.y + cell_size); const ImFontGlyph* glyph = font->FindGlyphNoFallback((ImWchar)(base + n)); draw_list->AddRect(cell_p1, cell_p2, glyph ? IM_COL32(255, 255, 255, 100) : IM_COL32(255, 255, 255, 50)); if (glyph) font->RenderChar(draw_list, cell_size, cell_p1, ImGui::GetColorU32(ImGuiCol_Text), (ImWchar)(base + n)); // We use ImFont::RenderChar as a shortcut because we don't have UTF-8 conversion functions available to generate a string. if (glyph && ImGui::IsMouseHoveringRect(cell_p1, cell_p2)) { ImGui::BeginTooltip(); ImGui::Text("Codepoint: U+%04X", base + n); ImGui::Separator(); ImGui::Text("AdvanceX: %.1f", glyph->AdvanceX); ImGui::Text("Pos: (%.2f,%.2f)->(%.2f,%.2f)", glyph->X0, glyph->Y0, glyph->X1, glyph->Y1); ImGui::Text("UV: (%.3f,%.3f)->(%.3f,%.3f)", glyph->U0, glyph->V0, glyph->U1, glyph->V1); ImGui::EndTooltip(); } } ImGui::Dummy(ImVec2((cell_size + cell_spacing) * 16, (cell_size + cell_spacing) * 16)); ImGui::TreePop(); } } ImGui::TreePop(); } ImGui::TreePop(); } ImGui::PopID(); } if (ImGui::TreeNode("Atlas texture", "Atlas texture (%dx%d pixels)", atlas->TexWidth, atlas->TexHeight)) { ImVec4 tint_col = ImVec4(1.0f, 1.0f, 1.0f, 1.0f); ImVec4 border_col = ImVec4(1.0f, 1.0f, 1.0f, 0.5f); ImGui::Image(atlas->TexID, ImVec2((float)atlas->TexWidth, (float)atlas->TexHeight), ImVec2(0, 0), ImVec2(1, 1), tint_col, border_col); ImGui::TreePop(); } HelpMarker("Those are old settings provided for convenience.\nHowever, the _correct_ way of scaling your UI is currently to reload your font at the designed size, rebuild the font atlas, and call style.ScaleAllSizes() on a reference ImGuiStyle structure."); static float window_scale = 1.0f; if (ImGui::DragFloat("window scale", &window_scale, 0.005f, 0.3f, 2.0f, "%.2f")) // scale only this window ImGui::SetWindowFontScale(window_scale); ImGui::DragFloat("global scale", &io.FontGlobalScale, 0.005f, 0.3f, 2.0f, "%.2f"); // scale everything ImGui::PopItemWidth(); ImGui::EndTabItem(); } if (ImGui::BeginTabItem("Rendering")) { ImGui::Checkbox("Anti-aliased lines", &style.AntiAliasedLines); ImGui::SameLine(); HelpMarker("When disabling anti-aliasing lines, you'll probably want to disable borders in your style as well."); ImGui::Checkbox("Anti-aliased fill", &style.AntiAliasedFill); ImGui::PushItemWidth(100); ImGui::DragFloat("Curve Tessellation Tolerance", &style.CurveTessellationTol, 0.02f, 0.10f, FLT_MAX, "%.2f", 2.0f); if (style.CurveTessellationTol < 0.10f) style.CurveTessellationTol = 0.10f; ImGui::DragFloat("Global Alpha", &style.Alpha, 0.005f, 0.20f, 1.0f, "%.2f"); // Not exposing zero here so user doesn't "lose" the UI (zero alpha clips all widgets). But application code could have a toggle to switch between zero and non-zero. ImGui::PopItemWidth(); ImGui::EndTabItem(); } ImGui::EndTabBar(); } ImGui::PopItemWidth(); } //----------------------------------------------------------------------------- // [SECTION] Example App: Main Menu Bar / ShowExampleAppMainMenuBar() //----------------------------------------------------------------------------- // Demonstrate creating a "main" fullscreen menu bar and populating it. // Note the difference between BeginMainMenuBar() and BeginMenuBar(): // - BeginMenuBar() = menu-bar inside current window we Begin()-ed into (the window needs the ImGuiWindowFlags_MenuBar flag) // - BeginMainMenuBar() = helper to create menu-bar-sized window at the top of the main viewport + call BeginMenuBar() into it. static void ShowExampleAppMainMenuBar() { if (ImGui::BeginMainMenuBar()) { if (ImGui::BeginMenu("File")) { ShowExampleMenuFile(); ImGui::EndMenu(); } if (ImGui::BeginMenu("Edit")) { if (ImGui::MenuItem("Undo", "CTRL+Z")) {} if (ImGui::MenuItem("Redo", "CTRL+Y", false, false)) {} // Disabled item ImGui::Separator(); if (ImGui::MenuItem("Cut", "CTRL+X")) {} if (ImGui::MenuItem("Copy", "CTRL+C")) {} if (ImGui::MenuItem("Paste", "CTRL+V")) {} ImGui::EndMenu(); } ImGui::EndMainMenuBar(); } } // Note that shortcuts are currently provided for display only (future version will add flags to BeginMenu to process shortcuts) static void ShowExampleMenuFile() { ImGui::MenuItem("(dummy menu)", NULL, false, false); if (ImGui::MenuItem("New")) {} if (ImGui::MenuItem("Open", "Ctrl+O")) {} if (ImGui::BeginMenu("Open Recent")) { ImGui::MenuItem("fish_hat.c"); ImGui::MenuItem("fish_hat.inl"); ImGui::MenuItem("fish_hat.h"); if (ImGui::BeginMenu("More..")) { ImGui::MenuItem("Hello"); ImGui::MenuItem("Sailor"); if (ImGui::BeginMenu("Recurse..")) { ShowExampleMenuFile(); ImGui::EndMenu(); } ImGui::EndMenu(); } ImGui::EndMenu(); } if (ImGui::MenuItem("Save", "Ctrl+S")) {} if (ImGui::MenuItem("Save As..")) {} ImGui::Separator(); if (ImGui::BeginMenu("Options")) { static bool enabled = true; ImGui::MenuItem("Enabled", "", &enabled); ImGui::BeginChild("child", ImVec2(0, 60), true); for (int i = 0; i < 10; i++) ImGui::Text("Scrolling Text %d", i); ImGui::EndChild(); static float f = 0.5f; static int n = 0; static bool b = true; ImGui::SliderFloat("Value", &f, 0.0f, 1.0f); ImGui::InputFloat("Input", &f, 0.1f); ImGui::Combo("Combo", &n, "Yes\0No\0Maybe\0\0"); ImGui::Checkbox("Check", &b); ImGui::EndMenu(); } if (ImGui::BeginMenu("Colors")) { float sz = ImGui::GetTextLineHeight(); for (int i = 0; i < ImGuiCol_COUNT; i++) { const char* name = ImGui::GetStyleColorName((ImGuiCol)i); ImVec2 p = ImGui::GetCursorScreenPos(); ImGui::GetWindowDrawList()->AddRectFilled(p, ImVec2(p.x+sz, p.y+sz), ImGui::GetColorU32((ImGuiCol)i)); ImGui::Dummy(ImVec2(sz, sz)); ImGui::SameLine(); ImGui::MenuItem(name); } ImGui::EndMenu(); } if (ImGui::BeginMenu("Disabled", false)) // Disabled { IM_ASSERT(0); } if (ImGui::MenuItem("Checked", NULL, true)) {} if (ImGui::MenuItem("Quit", "Alt+F4")) {} } //----------------------------------------------------------------------------- // [SECTION] Example App: Debug Console / ShowExampleAppConsole() //----------------------------------------------------------------------------- // Demonstrate creating a simple console window, with scrolling, filtering, completion and history. // For the console example, here we are using a more C++ like approach of declaring a class to hold the data and the functions. struct ExampleAppConsole { char InputBuf[256]; ImVector<char*> Items; ImVector<const char*> Commands; ImVector<char*> History; int HistoryPos; // -1: new line, 0..History.Size-1 browsing history. ImGuiTextFilter Filter; bool AutoScroll; bool ScrollToBottom; ExampleAppConsole() { ClearLog(); memset(InputBuf, 0, sizeof(InputBuf)); HistoryPos = -1; Commands.push_back("HELP"); Commands.push_back("HISTORY"); Commands.push_back("CLEAR"); Commands.push_back("CLASSIFY"); // "classify" is only here to provide an example of "C"+[tab] completing to "CL" and displaying matches. AutoScroll = true; ScrollToBottom = false; AddLog("Welcome to Dear ImGui!"); } ~ExampleAppConsole() { ClearLog(); for (int i = 0; i < History.Size; i++) free(History[i]); } // Portable helpers static int Stricmp(const char* str1, const char* str2) { int d; while ((d = toupper(*str2) - toupper(*str1)) == 0 && *str1) { str1++; str2++; } return d; } static int Strnicmp(const char* str1, const char* str2, int n) { int d = 0; while (n > 0 && (d = toupper(*str2) - toupper(*str1)) == 0 && *str1) { str1++; str2++; n--; } return d; } static char* Strdup(const char *str) { size_t len = strlen(str) + 1; void* buf = malloc(len); IM_ASSERT(buf); return (char*)memcpy(buf, (const void*)str, len); } static void Strtrim(char* str) { char* str_end = str + strlen(str); while (str_end > str && str_end[-1] == ' ') str_end--; *str_end = 0; } void ClearLog() { for (int i = 0; i < Items.Size; i++) free(Items[i]); Items.clear(); } void AddLog(const char* fmt, ...) IM_FMTARGS(2) { // FIXME-OPT char buf[1024]; va_list args; va_start(args, fmt); vsnprintf(buf, IM_ARRAYSIZE(buf), fmt, args); buf[IM_ARRAYSIZE(buf)-1] = 0; va_end(args); Items.push_back(Strdup(buf)); } void Draw(const char* title, bool* p_open) { ImGui::SetNextWindowSize(ImVec2(520,600), ImGuiCond_FirstUseEver); if (!ImGui::Begin(title, p_open)) { ImGui::End(); return; } // As a specific feature guaranteed by the library, after calling Begin() the last Item represent the title bar. So e.g. IsItemHovered() will return true when hovering the title bar. // Here we create a context menu only available from the title bar. if (ImGui::BeginPopupContextItem()) { if (ImGui::MenuItem("Close Console")) *p_open = false; ImGui::EndPopup(); } ImGui::TextWrapped("This example implements a console with basic coloring, completion and history. A more elaborate implementation may want to store entries along with extra data such as timestamp, emitter, etc."); ImGui::TextWrapped("Enter 'HELP' for help, press TAB to use text completion."); // TODO: display items starting from the bottom if (ImGui::SmallButton("Add Dummy Text")) { AddLog("%d some text", Items.Size); AddLog("some more text"); AddLog("display very important message here!"); } ImGui::SameLine(); if (ImGui::SmallButton("Add Dummy Error")) { AddLog("[error] something went wrong"); } ImGui::SameLine(); if (ImGui::SmallButton("Clear")) { ClearLog(); } ImGui::SameLine(); bool copy_to_clipboard = ImGui::SmallButton("Copy"); //static float t = 0.0f; if (ImGui::GetTime() - t > 0.02f) { t = ImGui::GetTime(); AddLog("Spam %f", t); } ImGui::Separator(); // Options menu if (ImGui::BeginPopup("Options")) { ImGui::Checkbox("Auto-scroll", &AutoScroll); ImGui::EndPopup(); } // Options, Filter if (ImGui::Button("Options")) ImGui::OpenPopup("Options"); ImGui::SameLine(); Filter.Draw("Filter (\"incl,-excl\") (\"error\")", 180); ImGui::Separator(); const float footer_height_to_reserve = ImGui::GetStyle().ItemSpacing.y + ImGui::GetFrameHeightWithSpacing(); // 1 separator, 1 input text ImGui::BeginChild("ScrollingRegion", ImVec2(0, -footer_height_to_reserve), false, ImGuiWindowFlags_HorizontalScrollbar); // Leave room for 1 separator + 1 InputText if (ImGui::BeginPopupContextWindow()) { if (ImGui::Selectable("Clear")) ClearLog(); ImGui::EndPopup(); } // Display every line as a separate entry so we can change their color or add custom widgets. If you only want raw text you can use ImGui::TextUnformatted(log.begin(), log.end()); // NB- if you have thousands of entries this approach may be too inefficient and may require user-side clipping to only process visible items. // You can seek and display only the lines that are visible using the ImGuiListClipper helper, if your elements are evenly spaced and you have cheap random access to the elements. // To use the clipper we could replace the 'for (int i = 0; i < Items.Size; i++)' loop with: // ImGuiListClipper clipper(Items.Size); // while (clipper.Step()) // for (int i = clipper.DisplayStart; i < clipper.DisplayEnd; i++) // However, note that you can not use this code as is if a filter is active because it breaks the 'cheap random-access' property. We would need random-access on the post-filtered list. // A typical application wanting coarse clipping and filtering may want to pre-compute an array of indices that passed the filtering test, recomputing this array when user changes the filter, // and appending newly elements as they are inserted. This is left as a task to the user until we can manage to improve this example code! // If your items are of variable size you may want to implement code similar to what ImGuiListClipper does. Or split your data into fixed height items to allow random-seeking into your list. ImGui::PushStyleVar(ImGuiStyleVar_ItemSpacing, ImVec2(4,1)); // Tighten spacing if (copy_to_clipboard) ImGui::LogToClipboard(); for (int i = 0; i < Items.Size; i++) { const char* item = Items[i]; if (!Filter.PassFilter(item)) continue; // Normally you would store more information in your item (e.g. make Items[] an array of structure, store color/type etc.) bool pop_color = false; if (strstr(item, "[error]")) { ImGui::PushStyleColor(ImGuiCol_Text, ImVec4(1.0f, 0.4f, 0.4f, 1.0f)); pop_color = true; } else if (strncmp(item, "# ", 2) == 0) { ImGui::PushStyleColor(ImGuiCol_Text, ImVec4(1.0f, 0.8f, 0.6f, 1.0f)); pop_color = true; } ImGui::TextUnformatted(item); if (pop_color) ImGui::PopStyleColor(); } if (copy_to_clipboard) ImGui::LogFinish(); if (ScrollToBottom || (AutoScroll && ImGui::GetScrollY() >= ImGui::GetScrollMaxY())) ImGui::SetScrollHereY(1.0f); ScrollToBottom = false; ImGui::PopStyleVar(); ImGui::EndChild(); ImGui::Separator(); // Command-line bool reclaim_focus = false; if (ImGui::InputText("Input", InputBuf, IM_ARRAYSIZE(InputBuf), ImGuiInputTextFlags_EnterReturnsTrue|ImGuiInputTextFlags_CallbackCompletion|ImGuiInputTextFlags_CallbackHistory, &TextEditCallbackStub, (void*)this)) { char* s = InputBuf; Strtrim(s); if (s[0]) ExecCommand(s); strcpy(s, ""); reclaim_focus = true; } // Auto-focus on window apparition ImGui::SetItemDefaultFocus(); if (reclaim_focus) ImGui::SetKeyboardFocusHere(-1); // Auto focus previous widget ImGui::End(); } void ExecCommand(const char* command_line) { AddLog("# %s\n", command_line); // Insert into history. First find match and delete it so it can be pushed to the back. This isn't trying to be smart or optimal. HistoryPos = -1; for (int i = History.Size-1; i >= 0; i--) if (Stricmp(History[i], command_line) == 0) { free(History[i]); History.erase(History.begin() + i); break; } History.push_back(Strdup(command_line)); // Process command if (Stricmp(command_line, "CLEAR") == 0) { ClearLog(); } else if (Stricmp(command_line, "HELP") == 0) { AddLog("Commands:"); for (int i = 0; i < Commands.Size; i++) AddLog("- %s", Commands[i]); } else if (Stricmp(command_line, "HISTORY") == 0) { int first = History.Size - 10; for (int i = first > 0 ? first : 0; i < History.Size; i++) AddLog("%3d: %s\n", i, History[i]); } else { AddLog("Unknown command: '%s'\n", command_line); } // On commad input, we scroll to bottom even if AutoScroll==false ScrollToBottom = true; } static int TextEditCallbackStub(ImGuiInputTextCallbackData* data) // In C++11 you are better off using lambdas for this sort of forwarding callbacks { ExampleAppConsole* console = (ExampleAppConsole*)data->UserData; return console->TextEditCallback(data); } int TextEditCallback(ImGuiInputTextCallbackData* data) { //AddLog("cursor: %d, selection: %d-%d", data->CursorPos, data->SelectionStart, data->SelectionEnd); switch (data->EventFlag) { case ImGuiInputTextFlags_CallbackCompletion: { // Example of TEXT COMPLETION // Locate beginning of current word const char* word_end = data->Buf + data->CursorPos; const char* word_start = word_end; while (word_start > data->Buf) { const char c = word_start[-1]; if (c == ' ' || c == '\t' || c == ',' || c == ';') break; word_start--; } // Build a list of candidates ImVector<const char*> candidates; for (int i = 0; i < Commands.Size; i++) if (Strnicmp(Commands[i], word_start, (int)(word_end-word_start)) == 0) candidates.push_back(Commands[i]); if (candidates.Size == 0) { // No match AddLog("No match for \"%.*s\"!\n", (int)(word_end-word_start), word_start); } else if (candidates.Size == 1) { // Single match. Delete the beginning of the word and replace it entirely so we've got nice casing data->DeleteChars((int)(word_start-data->Buf), (int)(word_end-word_start)); data->InsertChars(data->CursorPos, candidates[0]); data->InsertChars(data->CursorPos, " "); } else { // Multiple matches. Complete as much as we can, so inputing "C" will complete to "CL" and display "CLEAR" and "CLASSIFY" int match_len = (int)(word_end - word_start); for (;;) { int c = 0; bool all_candidates_matches = true; for (int i = 0; i < candidates.Size && all_candidates_matches; i++) if (i == 0) c = toupper(candidates[i][match_len]); else if (c == 0 || c != toupper(candidates[i][match_len])) all_candidates_matches = false; if (!all_candidates_matches) break; match_len++; } if (match_len > 0) { data->DeleteChars((int)(word_start - data->Buf), (int)(word_end-word_start)); data->InsertChars(data->CursorPos, candidates[0], candidates[0] + match_len); } // List matches AddLog("Possible matches:\n"); for (int i = 0; i < candidates.Size; i++) AddLog("- %s\n", candidates[i]); } break; } case ImGuiInputTextFlags_CallbackHistory: { // Example of HISTORY const int prev_history_pos = HistoryPos; if (data->EventKey == ImGuiKey_UpArrow) { if (HistoryPos == -1) HistoryPos = History.Size - 1; else if (HistoryPos > 0) HistoryPos--; } else if (data->EventKey == ImGuiKey_DownArrow) { if (HistoryPos != -1) if (++HistoryPos >= History.Size) HistoryPos = -1; } // A better implementation would preserve the data on the current input line along with cursor position. if (prev_history_pos != HistoryPos) { const char* history_str = (HistoryPos >= 0) ? History[HistoryPos] : ""; data->DeleteChars(0, data->BufTextLen); data->InsertChars(0, history_str); } } } return 0; } }; static void ShowExampleAppConsole(bool* p_open) { static ExampleAppConsole console; console.Draw("Example: Console", p_open); } //----------------------------------------------------------------------------- // [SECTION] Example App: Debug Log / ShowExampleAppLog() //----------------------------------------------------------------------------- // Usage: // static ExampleAppLog my_log; // my_log.AddLog("Hello %d world\n", 123); // my_log.Draw("title"); struct ExampleAppLog { ImGuiTextBuffer Buf; ImGuiTextFilter Filter; ImVector<int> LineOffsets; // Index to lines offset. We maintain this with AddLog() calls, allowing us to have a random access on lines bool AutoScroll; // Keep scrolling if already at the bottom ExampleAppLog() { AutoScroll = true; Clear(); } void Clear() { Buf.clear(); LineOffsets.clear(); LineOffsets.push_back(0); } void AddLog(const char* fmt, ...) IM_FMTARGS(2) { int old_size = Buf.size(); va_list args; va_start(args, fmt); Buf.appendfv(fmt, args); va_end(args); for (int new_size = Buf.size(); old_size < new_size; old_size++) if (Buf[old_size] == '\n') LineOffsets.push_back(old_size + 1); } void Draw(const char* title, bool* p_open = NULL) { if (!ImGui::Begin(title, p_open)) { ImGui::End(); return; } // Options menu if (ImGui::BeginPopup("Options")) { ImGui::Checkbox("Auto-scroll", &AutoScroll); ImGui::EndPopup(); } // Main window if (ImGui::Button("Options")) ImGui::OpenPopup("Options"); ImGui::SameLine(); bool clear = ImGui::Button("Clear"); ImGui::SameLine(); bool copy = ImGui::Button("Copy"); ImGui::SameLine(); Filter.Draw("Filter", -100.0f); ImGui::Separator(); ImGui::BeginChild("scrolling", ImVec2(0,0), false, ImGuiWindowFlags_HorizontalScrollbar); if (clear) Clear(); if (copy) ImGui::LogToClipboard(); ImGui::PushStyleVar(ImGuiStyleVar_ItemSpacing, ImVec2(0, 0)); const char* buf = Buf.begin(); const char* buf_end = Buf.end(); if (Filter.IsActive()) { // In this example we don't use the clipper when Filter is enabled. // This is because we don't have a random access on the result on our filter. // A real application processing logs with ten of thousands of entries may want to store the result of search/filter. // especially if the filtering function is not trivial (e.g. reg-exp). for (int line_no = 0; line_no < LineOffsets.Size; line_no++) { const char* line_start = buf + LineOffsets[line_no]; const char* line_end = (line_no + 1 < LineOffsets.Size) ? (buf + LineOffsets[line_no + 1] - 1) : buf_end; if (Filter.PassFilter(line_start, line_end)) ImGui::TextUnformatted(line_start, line_end); } } else { // The simplest and easy way to display the entire buffer: // ImGui::TextUnformatted(buf_begin, buf_end); // And it'll just work. TextUnformatted() has specialization for large blob of text and will fast-forward to skip non-visible lines. // Here we instead demonstrate using the clipper to only process lines that are within the visible area. // If you have tens of thousands of items and their processing cost is non-negligible, coarse clipping them on your side is recommended. // Using ImGuiListClipper requires A) random access into your data, and B) items all being the same height, // both of which we can handle since we an array pointing to the beginning of each line of text. // When using the filter (in the block of code above) we don't have random access into the data to display anymore, which is why we don't use the clipper. // Storing or skimming through the search result would make it possible (and would be recommended if you want to search through tens of thousands of entries) ImGuiListClipper clipper; clipper.Begin(LineOffsets.Size); while (clipper.Step()) { for (int line_no = clipper.DisplayStart; line_no < clipper.DisplayEnd; line_no++) { const char* line_start = buf + LineOffsets[line_no]; const char* line_end = (line_no + 1 < LineOffsets.Size) ? (buf + LineOffsets[line_no + 1] - 1) : buf_end; ImGui::TextUnformatted(line_start, line_end); } } clipper.End(); } ImGui::PopStyleVar(); if (AutoScroll && ImGui::GetScrollY() >= ImGui::GetScrollMaxY()) ImGui::SetScrollHereY(1.0f); ImGui::EndChild(); ImGui::End(); } }; // Demonstrate creating a simple log window with basic filtering. static void ShowExampleAppLog(bool* p_open) { static ExampleAppLog log; // For the demo: add a debug button _BEFORE_ the normal log window contents // We take advantage of a rarely used feature: multiple calls to Begin()/End() are appending to the _same_ window. // Most of the contents of the window will be added by the log.Draw() call. ImGui::SetNextWindowSize(ImVec2(500, 400), ImGuiCond_FirstUseEver); ImGui::Begin("Example: Log", p_open); if (ImGui::SmallButton("[Debug] Add 5 entries")) { static int counter = 0; for (int n = 0; n < 5; n++) { const char* categories[3] = { "info", "warn", "error" }; const char* words[] = { "Bumfuzzled", "Cattywampus", "Snickersnee", "Abibliophobia", "Absquatulate", "Nincompoop", "Pauciloquent" }; log.AddLog("[%05d] [%s] Hello, current time is %.1f, here's a word: '%s'\n", ImGui::GetFrameCount(), categories[counter % IM_ARRAYSIZE(categories)], ImGui::GetTime(), words[counter % IM_ARRAYSIZE(words)]); counter++; } } ImGui::End(); // Actually call in the regular Log helper (which will Begin() into the same window as we just did) log.Draw("Example: Log", p_open); } //----------------------------------------------------------------------------- // [SECTION] Example App: Simple Layout / ShowExampleAppLayout() //----------------------------------------------------------------------------- // Demonstrate create a window with multiple child windows. static void ShowExampleAppLayout(bool* p_open) { ImGui::SetNextWindowSize(ImVec2(500, 440), ImGuiCond_FirstUseEver); if (ImGui::Begin("Example: Simple layout", p_open, ImGuiWindowFlags_MenuBar)) { if (ImGui::BeginMenuBar()) { if (ImGui::BeginMenu("File")) { if (ImGui::MenuItem("Close")) *p_open = false; ImGui::EndMenu(); } ImGui::EndMenuBar(); } // left static int selected = 0; ImGui::BeginChild("left pane", ImVec2(150, 0), true); for (int i = 0; i < 100; i++) { char label[128]; sprintf(label, "MyObject %d", i); if (ImGui::Selectable(label, selected == i)) selected = i; } ImGui::EndChild(); ImGui::SameLine(); // right ImGui::BeginGroup(); ImGui::BeginChild("item view", ImVec2(0, -ImGui::GetFrameHeightWithSpacing())); // Leave room for 1 line below us ImGui::Text("MyObject: %d", selected); ImGui::Separator(); if (ImGui::BeginTabBar("##Tabs", ImGuiTabBarFlags_None)) { if (ImGui::BeginTabItem("Description")) { ImGui::TextWrapped("Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. "); ImGui::EndTabItem(); } if (ImGui::BeginTabItem("Details")) { ImGui::Text("ID: 0123456789"); ImGui::EndTabItem(); } ImGui::EndTabBar(); } ImGui::EndChild(); if (ImGui::Button("Revert")) {} ImGui::SameLine(); if (ImGui::Button("Save")) {} ImGui::EndGroup(); } ImGui::End(); } //----------------------------------------------------------------------------- // [SECTION] Example App: Property Editor / ShowExampleAppPropertyEditor() //----------------------------------------------------------------------------- // Demonstrate create a simple property editor. static void ShowExampleAppPropertyEditor(bool* p_open) { ImGui::SetNextWindowSize(ImVec2(430,450), ImGuiCond_FirstUseEver); if (!ImGui::Begin("Example: Property editor", p_open)) { ImGui::End(); return; } HelpMarker("This example shows how you may implement a property editor using two columns.\nAll objects/fields data are dummies here.\nRemember that in many simple cases, you can use ImGui::SameLine(xxx) to position\nyour cursor horizontally instead of using the Columns() API."); ImGui::PushStyleVar(ImGuiStyleVar_FramePadding, ImVec2(2,2)); ImGui::Columns(2); ImGui::Separator(); struct funcs { static void ShowDummyObject(const char* prefix, int uid) { ImGui::PushID(uid); // Use object uid as identifier. Most commonly you could also use the object pointer as a base ID. ImGui::AlignTextToFramePadding(); // Text and Tree nodes are less high than regular widgets, here we add vertical spacing to make the tree lines equal high. bool node_open = ImGui::TreeNode("Object", "%s_%u", prefix, uid); ImGui::NextColumn(); ImGui::AlignTextToFramePadding(); ImGui::Text("my sailor is rich"); ImGui::NextColumn(); if (node_open) { static float dummy_members[8] = { 0.0f,0.0f,1.0f,3.1416f,100.0f,999.0f }; for (int i = 0; i < 8; i++) { ImGui::PushID(i); // Use field index as identifier. if (i < 2) { ShowDummyObject("Child", 424242); } else { // Here we use a TreeNode to highlight on hover (we could use e.g. Selectable as well) ImGui::AlignTextToFramePadding(); ImGui::TreeNodeEx("Field", ImGuiTreeNodeFlags_Leaf | ImGuiTreeNodeFlags_NoTreePushOnOpen | ImGuiTreeNodeFlags_Bullet, "Field_%d", i); ImGui::NextColumn(); ImGui::SetNextItemWidth(-1); if (i >= 5) ImGui::InputFloat("##value", &dummy_members[i], 1.0f); else ImGui::DragFloat("##value", &dummy_members[i], 0.01f); ImGui::NextColumn(); } ImGui::PopID(); } ImGui::TreePop(); } ImGui::PopID(); } }; // Iterate dummy objects with dummy members (all the same data) for (int obj_i = 0; obj_i < 3; obj_i++) funcs::ShowDummyObject("Object", obj_i); ImGui::Columns(1); ImGui::Separator(); ImGui::PopStyleVar(); ImGui::End(); } //----------------------------------------------------------------------------- // [SECTION] Example App: Long Text / ShowExampleAppLongText() //----------------------------------------------------------------------------- // Demonstrate/test rendering huge amount of text, and the incidence of clipping. static void ShowExampleAppLongText(bool* p_open) { ImGui::SetNextWindowSize(ImVec2(520,600), ImGuiCond_FirstUseEver); if (!ImGui::Begin("Example: Long text display", p_open)) { ImGui::End(); return; } static int test_type = 0; static ImGuiTextBuffer log; static int lines = 0; ImGui::Text("Printing unusually long amount of text."); ImGui::Combo("Test type", &test_type, "Single call to TextUnformatted()\0Multiple calls to Text(), clipped\0Multiple calls to Text(), not clipped (slow)\0"); ImGui::Text("Buffer contents: %d lines, %d bytes", lines, log.size()); if (ImGui::Button("Clear")) { log.clear(); lines = 0; } ImGui::SameLine(); if (ImGui::Button("Add 1000 lines")) { for (int i = 0; i < 1000; i++) log.appendf("%i The quick brown fox jumps over the lazy dog\n", lines+i); lines += 1000; } ImGui::BeginChild("Log"); switch (test_type) { case 0: // Single call to TextUnformatted() with a big buffer ImGui::TextUnformatted(log.begin(), log.end()); break; case 1: { // Multiple calls to Text(), manually coarsely clipped - demonstrate how to use the ImGuiListClipper helper. ImGui::PushStyleVar(ImGuiStyleVar_ItemSpacing, ImVec2(0,0)); ImGuiListClipper clipper(lines); while (clipper.Step()) for (int i = clipper.DisplayStart; i < clipper.DisplayEnd; i++) ImGui::Text("%i The quick brown fox jumps over the lazy dog", i); ImGui::PopStyleVar(); break; } case 2: // Multiple calls to Text(), not clipped (slow) ImGui::PushStyleVar(ImGuiStyleVar_ItemSpacing, ImVec2(0,0)); for (int i = 0; i < lines; i++) ImGui::Text("%i The quick brown fox jumps over the lazy dog", i); ImGui::PopStyleVar(); break; } ImGui::EndChild(); ImGui::End(); } //----------------------------------------------------------------------------- // [SECTION] Example App: Auto Resize / ShowExampleAppAutoResize() //----------------------------------------------------------------------------- // Demonstrate creating a window which gets auto-resized according to its content. static void ShowExampleAppAutoResize(bool* p_open) { if (!ImGui::Begin("Example: Auto-resizing window", p_open, ImGuiWindowFlags_AlwaysAutoResize)) { ImGui::End(); return; } static int lines = 10; ImGui::Text("Window will resize every-frame to the size of its content.\nNote that you probably don't want to query the window size to\noutput your content because that would create a feedback loop."); ImGui::SliderInt("Number of lines", &lines, 1, 20); for (int i = 0; i < lines; i++) ImGui::Text("%*sThis is line %d", i * 4, "", i); // Pad with space to extend size horizontally ImGui::End(); } //----------------------------------------------------------------------------- // [SECTION] Example App: Constrained Resize / ShowExampleAppConstrainedResize() //----------------------------------------------------------------------------- // Demonstrate creating a window with custom resize constraints. static void ShowExampleAppConstrainedResize(bool* p_open) { struct CustomConstraints // Helper functions to demonstrate programmatic constraints { static void Square(ImGuiSizeCallbackData* data) { data->DesiredSize = ImVec2(IM_MAX(data->DesiredSize.x, data->DesiredSize.y), IM_MAX(data->DesiredSize.x, data->DesiredSize.y)); } static void Step(ImGuiSizeCallbackData* data) { float step = (float)(int)(intptr_t)data->UserData; data->DesiredSize = ImVec2((int)(data->DesiredSize.x / step + 0.5f) * step, (int)(data->DesiredSize.y / step + 0.5f) * step); } }; static bool auto_resize = false; static int type = 0; static int display_lines = 10; if (type == 0) ImGui::SetNextWindowSizeConstraints(ImVec2(-1, 0), ImVec2(-1, FLT_MAX)); // Vertical only if (type == 1) ImGui::SetNextWindowSizeConstraints(ImVec2(0, -1), ImVec2(FLT_MAX, -1)); // Horizontal only if (type == 2) ImGui::SetNextWindowSizeConstraints(ImVec2(100, 100), ImVec2(FLT_MAX, FLT_MAX)); // Width > 100, Height > 100 if (type == 3) ImGui::SetNextWindowSizeConstraints(ImVec2(400, -1), ImVec2(500, -1)); // Width 400-500 if (type == 4) ImGui::SetNextWindowSizeConstraints(ImVec2(-1, 400), ImVec2(-1, 500)); // Height 400-500 if (type == 5) ImGui::SetNextWindowSizeConstraints(ImVec2(0, 0), ImVec2(FLT_MAX, FLT_MAX), CustomConstraints::Square); // Always Square if (type == 6) ImGui::SetNextWindowSizeConstraints(ImVec2(0, 0), ImVec2(FLT_MAX, FLT_MAX), CustomConstraints::Step, (void*)(intptr_t)100); // Fixed Step ImGuiWindowFlags flags = auto_resize ? ImGuiWindowFlags_AlwaysAutoResize : 0; if (ImGui::Begin("Example: Constrained Resize", p_open, flags)) { const char* desc[] = { "Resize vertical only", "Resize horizontal only", "Width > 100, Height > 100", "Width 400-500", "Height 400-500", "Custom: Always Square", "Custom: Fixed Steps (100)", }; if (ImGui::Button("200x200")) { ImGui::SetWindowSize(ImVec2(200, 200)); } ImGui::SameLine(); if (ImGui::Button("500x500")) { ImGui::SetWindowSize(ImVec2(500, 500)); } ImGui::SameLine(); if (ImGui::Button("800x200")) { ImGui::SetWindowSize(ImVec2(800, 200)); } ImGui::SetNextItemWidth(200); ImGui::Combo("Constraint", &type, desc, IM_ARRAYSIZE(desc)); ImGui::SetNextItemWidth(200); ImGui::DragInt("Lines", &display_lines, 0.2f, 1, 100); ImGui::Checkbox("Auto-resize", &auto_resize); for (int i = 0; i < display_lines; i++) ImGui::Text("%*sHello, sailor! Making this line long enough for the example.", i * 4, ""); } ImGui::End(); } //----------------------------------------------------------------------------- // [SECTION] Example App: Simple Overlay / ShowExampleAppSimpleOverlay() //----------------------------------------------------------------------------- // Demonstrate creating a simple static window with no decoration + a context-menu to choose which corner of the screen to use. static void ShowExampleAppSimpleOverlay(bool* p_open) { const float DISTANCE = 10.0f; static int corner = 0; ImGuiIO& io = ImGui::GetIO(); if (corner != -1) { ImVec2 window_pos = ImVec2((corner & 1) ? io.DisplaySize.x - DISTANCE : DISTANCE, (corner & 2) ? io.DisplaySize.y - DISTANCE : DISTANCE); ImVec2 window_pos_pivot = ImVec2((corner & 1) ? 1.0f : 0.0f, (corner & 2) ? 1.0f : 0.0f); ImGui::SetNextWindowPos(window_pos, ImGuiCond_Always, window_pos_pivot); } ImGui::SetNextWindowBgAlpha(0.35f); // Transparent background if (ImGui::Begin("Example: Simple overlay", p_open, (corner != -1 ? ImGuiWindowFlags_NoMove : 0) | ImGuiWindowFlags_NoDecoration | ImGuiWindowFlags_AlwaysAutoResize | ImGuiWindowFlags_NoSavedSettings | ImGuiWindowFlags_NoFocusOnAppearing | ImGuiWindowFlags_NoNav)) { ImGui::Text("Simple overlay\n" "in the corner of the screen.\n" "(right-click to change position)"); ImGui::Separator(); if (ImGui::IsMousePosValid()) ImGui::Text("Mouse Position: (%.1f,%.1f)", io.MousePos.x, io.MousePos.y); else ImGui::Text("Mouse Position: <invalid>"); if (ImGui::BeginPopupContextWindow()) { if (ImGui::MenuItem("Custom", NULL, corner == -1)) corner = -1; if (ImGui::MenuItem("Top-left", NULL, corner == 0)) corner = 0; if (ImGui::MenuItem("Top-right", NULL, corner == 1)) corner = 1; if (ImGui::MenuItem("Bottom-left", NULL, corner == 2)) corner = 2; if (ImGui::MenuItem("Bottom-right", NULL, corner == 3)) corner = 3; if (p_open && ImGui::MenuItem("Close")) *p_open = false; ImGui::EndPopup(); } } ImGui::End(); } //----------------------------------------------------------------------------- // [SECTION] Example App: Manipulating Window Titles / ShowExampleAppWindowTitles() //----------------------------------------------------------------------------- // Demonstrate using "##" and "###" in identifiers to manipulate ID generation. // This apply to all regular items as well. Read FAQ section "How can I have multiple widgets with the same label? Can I have widget without a label? (Yes). A primer on the purpose of labels/IDs." for details. static void ShowExampleAppWindowTitles(bool*) { // By default, Windows are uniquely identified by their title. // You can use the "##" and "###" markers to manipulate the display/ID. // Using "##" to display same title but have unique identifier. ImGui::SetNextWindowPos(ImVec2(100, 100), ImGuiCond_FirstUseEver); ImGui::Begin("Same title as another window##1"); ImGui::Text("This is window 1.\nMy title is the same as window 2, but my identifier is unique."); ImGui::End(); ImGui::SetNextWindowPos(ImVec2(100, 200), ImGuiCond_FirstUseEver); ImGui::Begin("Same title as another window##2"); ImGui::Text("This is window 2.\nMy title is the same as window 1, but my identifier is unique."); ImGui::End(); // Using "###" to display a changing title but keep a static identifier "AnimatedTitle" char buf[128]; sprintf(buf, "Animated title %c %d###AnimatedTitle", "|/-\\"[(int)(ImGui::GetTime() / 0.25f) & 3], ImGui::GetFrameCount()); ImGui::SetNextWindowPos(ImVec2(100, 300), ImGuiCond_FirstUseEver); ImGui::Begin(buf); ImGui::Text("This window has a changing title."); ImGui::End(); } //----------------------------------------------------------------------------- // [SECTION] Example App: Custom Rendering using ImDrawList API / ShowExampleAppCustomRendering() //----------------------------------------------------------------------------- // Demonstrate using the low-level ImDrawList to draw custom shapes. static void ShowExampleAppCustomRendering(bool* p_open) { ImGui::SetNextWindowSize(ImVec2(350, 560), ImGuiCond_FirstUseEver); if (!ImGui::Begin("Example: Custom rendering", p_open)) { ImGui::End(); return; } // Tip: If you do a lot of custom rendering, you probably want to use your own geometrical types and benefit of overloaded operators, etc. // Define IM_VEC2_CLASS_EXTRA in imconfig.h to create implicit conversions between your types and ImVec2/ImVec4. // ImGui defines overloaded operators but they are internal to imgui.cpp and not exposed outside (to avoid messing with your types) // In this example we are not using the maths operators! ImDrawList* draw_list = ImGui::GetWindowDrawList(); if (ImGui::BeginTabBar("##TabBar")) { // Primitives if (ImGui::BeginTabItem("Primitives")) { static float sz = 36.0f; static float thickness = 3.0f; static ImVec4 colf = ImVec4(1.0f, 1.0f, 0.4f, 1.0f); ImGui::DragFloat("Size", &sz, 0.2f, 2.0f, 72.0f, "%.0f"); ImGui::DragFloat("Thickness", &thickness, 0.05f, 1.0f, 8.0f, "%.02f"); ImGui::ColorEdit4("Color", &colf.x); const ImVec2 p = ImGui::GetCursorScreenPos(); const ImU32 col = ImColor(colf); float x = p.x + 4.0f, y = p.y + 4.0f; float spacing = 10.0f; ImDrawCornerFlags corners_none = 0; ImDrawCornerFlags corners_all = ImDrawCornerFlags_All; ImDrawCornerFlags corners_tl_br = ImDrawCornerFlags_TopLeft | ImDrawCornerFlags_BotRight; for (int n = 0; n < 2; n++) { // First line uses a thickness of 1.0f, second line uses the configurable thickness float th = (n == 0) ? 1.0f : thickness; draw_list->AddCircle(ImVec2(x + sz*0.5f, y + sz*0.5f), sz*0.5f, col, 6, th); x += sz + spacing; // Hexagon draw_list->AddCircle(ImVec2(x + sz*0.5f, y + sz*0.5f), sz*0.5f, col, 20, th); x += sz + spacing; // Circle draw_list->AddRect(ImVec2(x, y), ImVec2(x + sz, y + sz), col, 0.0f, corners_none, th); x += sz + spacing; // Square draw_list->AddRect(ImVec2(x, y), ImVec2(x + sz, y + sz), col, 10.0f, corners_all, th); x += sz + spacing; // Square with all rounded corners draw_list->AddRect(ImVec2(x, y), ImVec2(x + sz, y + sz), col, 10.0f, corners_tl_br, th); x += sz + spacing; // Square with two rounded corners draw_list->AddTriangle(ImVec2(x+sz*0.5f,y), ImVec2(x+sz, y+sz-0.5f), ImVec2(x, y+sz-0.5f), col, th); x += sz + spacing; // Triangle draw_list->AddTriangle(ImVec2(x+sz*0.2f,y), ImVec2(x, y+sz-0.5f), ImVec2(x+sz*0.4f, y+sz-0.5f), col, th); x += sz*0.4f + spacing; // Thin triangle draw_list->AddLine(ImVec2(x, y), ImVec2(x + sz, y), col, th); x += sz + spacing; // Horizontal line (note: drawing a filled rectangle will be faster!) draw_list->AddLine(ImVec2(x, y), ImVec2(x, y + sz), col, th); x += spacing; // Vertical line (note: drawing a filled rectangle will be faster!) draw_list->AddLine(ImVec2(x, y), ImVec2(x + sz, y + sz), col, th); x += sz + spacing; // Diagonal line draw_list->AddBezierCurve(ImVec2(x, y), ImVec2(x + sz*1.3f, y + sz*0.3f), ImVec2(x + sz - sz*1.3f, y + sz - sz*0.3f), ImVec2(x + sz, y + sz), col, th); x = p.x + 4; y += sz + spacing; } draw_list->AddCircleFilled(ImVec2(x + sz*0.5f, y + sz*0.5f), sz*0.5f, col, 6); x += sz + spacing; // Hexagon draw_list->AddCircleFilled(ImVec2(x + sz*0.5f, y + sz*0.5f), sz*0.5f, col, 32); x += sz + spacing; // Circle draw_list->AddRectFilled(ImVec2(x, y), ImVec2(x + sz, y + sz), col); x += sz + spacing; // Square draw_list->AddRectFilled(ImVec2(x, y), ImVec2(x + sz, y + sz), col, 10.0f); x += sz + spacing; // Square with all rounded corners draw_list->AddRectFilled(ImVec2(x, y), ImVec2(x + sz, y + sz), col, 10.0f, corners_tl_br); x += sz + spacing; // Square with two rounded corners draw_list->AddTriangleFilled(ImVec2(x+sz*0.5f,y), ImVec2(x+sz, y+sz-0.5f), ImVec2(x, y+sz-0.5f), col); x += sz + spacing; // Triangle draw_list->AddTriangleFilled(ImVec2(x+sz*0.2f,y), ImVec2(x, y+sz-0.5f), ImVec2(x+sz*0.4f, y+sz-0.5f), col); x += sz*0.4f + spacing; // Thin triangle draw_list->AddRectFilled(ImVec2(x, y), ImVec2(x + sz, y + thickness), col); x += sz + spacing; // Horizontal line (faster than AddLine, but only handle integer thickness) draw_list->AddRectFilled(ImVec2(x, y), ImVec2(x + thickness, y + sz), col); x += spacing*2.0f; // Vertical line (faster than AddLine, but only handle integer thickness) draw_list->AddRectFilled(ImVec2(x, y), ImVec2(x + 1, y + 1), col); x += sz; // Pixel (faster than AddLine) draw_list->AddRectFilledMultiColor(ImVec2(x, y), ImVec2(x + sz, y + sz), IM_COL32(0, 0, 0, 255), IM_COL32(255, 0, 0, 255), IM_COL32(255, 255, 0, 255), IM_COL32(0, 255, 0, 255)); ImGui::Dummy(ImVec2((sz + spacing) * 9.8f, (sz + spacing) * 3)); ImGui::EndTabItem(); } if (ImGui::BeginTabItem("Canvas")) { static ImVector<ImVec2> points; static bool adding_line = false; if (ImGui::Button("Clear")) points.clear(); if (points.Size >= 2) { ImGui::SameLine(); if (ImGui::Button("Undo")) { points.pop_back(); points.pop_back(); } } ImGui::Text("Left-click and drag to add lines,\nRight-click to undo"); // Here we are using InvisibleButton() as a convenience to 1) advance the cursor and 2) allows us to use IsItemHovered() // But you can also draw directly and poll mouse/keyboard by yourself. You can manipulate the cursor using GetCursorPos() and SetCursorPos(). // If you only use the ImDrawList API, you can notify the owner window of its extends by using SetCursorPos(max). ImVec2 canvas_pos = ImGui::GetCursorScreenPos(); // ImDrawList API uses screen coordinates! ImVec2 canvas_size = ImGui::GetContentRegionAvail(); // Resize canvas to what's available if (canvas_size.x < 50.0f) canvas_size.x = 50.0f; if (canvas_size.y < 50.0f) canvas_size.y = 50.0f; draw_list->AddRectFilledMultiColor(canvas_pos, ImVec2(canvas_pos.x + canvas_size.x, canvas_pos.y + canvas_size.y), IM_COL32(50, 50, 50, 255), IM_COL32(50, 50, 60, 255), IM_COL32(60, 60, 70, 255), IM_COL32(50, 50, 60, 255)); draw_list->AddRect(canvas_pos, ImVec2(canvas_pos.x + canvas_size.x, canvas_pos.y + canvas_size.y), IM_COL32(255, 255, 255, 255)); bool adding_preview = false; ImGui::InvisibleButton("canvas", canvas_size); ImVec2 mouse_pos_in_canvas = ImVec2(ImGui::GetIO().MousePos.x - canvas_pos.x, ImGui::GetIO().MousePos.y - canvas_pos.y); if (adding_line) { adding_preview = true; points.push_back(mouse_pos_in_canvas); if (!ImGui::IsMouseDown(0)) adding_line = adding_preview = false; } if (ImGui::IsItemHovered()) { if (!adding_line && ImGui::IsMouseClicked(0)) { points.push_back(mouse_pos_in_canvas); adding_line = true; } if (ImGui::IsMouseClicked(1) && !points.empty()) { adding_line = adding_preview = false; points.pop_back(); points.pop_back(); } } draw_list->PushClipRect(canvas_pos, ImVec2(canvas_pos.x + canvas_size.x, canvas_pos.y + canvas_size.y), true); // clip lines within the canvas (if we resize it, etc.) for (int i = 0; i < points.Size - 1; i += 2) draw_list->AddLine(ImVec2(canvas_pos.x + points[i].x, canvas_pos.y + points[i].y), ImVec2(canvas_pos.x + points[i + 1].x, canvas_pos.y + points[i + 1].y), IM_COL32(255, 255, 0, 255), 2.0f); draw_list->PopClipRect(); if (adding_preview) points.pop_back(); ImGui::EndTabItem(); } if (ImGui::BeginTabItem("BG/FG draw lists")) { static bool draw_bg = true; static bool draw_fg = true; ImGui::Checkbox("Draw in Background draw list", &draw_bg); ImGui::Checkbox("Draw in Foreground draw list", &draw_fg); ImVec2 window_pos = ImGui::GetWindowPos(); ImVec2 window_size = ImGui::GetWindowSize(); ImVec2 window_center = ImVec2(window_pos.x + window_size.x * 0.5f, window_pos.y + window_size.y * 0.5f); if (draw_bg) ImGui::GetBackgroundDrawList()->AddCircle(window_center, window_size.x * 0.6f, IM_COL32(255, 0, 0, 200), 32, 10+4); if (draw_fg) ImGui::GetForegroundDrawList()->AddCircle(window_center, window_size.y * 0.6f, IM_COL32(0, 255, 0, 200), 32, 10); ImGui::EndTabItem(); } ImGui::EndTabBar(); } ImGui::End(); } //----------------------------------------------------------------------------- // [SECTION] Example App: Documents Handling / ShowExampleAppDocuments() //----------------------------------------------------------------------------- // Simplified structure to mimic a Document model struct MyDocument { const char* Name; // Document title bool Open; // Set when the document is open (in this demo, we keep an array of all available documents to simplify the demo) bool OpenPrev; // Copy of Open from last update. bool Dirty; // Set when the document has been modified bool WantClose; // Set when the document ImVec4 Color; // An arbitrary variable associated to the document MyDocument(const char* name, bool open = true, const ImVec4& color = ImVec4(1.0f,1.0f,1.0f,1.0f)) { Name = name; Open = OpenPrev = open; Dirty = false; WantClose = false; Color = color; } void DoOpen() { Open = true; } void DoQueueClose() { WantClose = true; } void DoForceClose() { Open = false; Dirty = false; } void DoSave() { Dirty = false; } // Display dummy contents for the Document static void DisplayContents(MyDocument* doc) { ImGui::PushID(doc); ImGui::Text("Document \"%s\"", doc->Name); ImGui::PushStyleColor(ImGuiCol_Text, doc->Color); ImGui::TextWrapped("Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua."); ImGui::PopStyleColor(); if (ImGui::Button("Modify", ImVec2(100, 0))) doc->Dirty = true; ImGui::SameLine(); if (ImGui::Button("Save", ImVec2(100, 0))) doc->DoSave(); ImGui::ColorEdit3("color", &doc->Color.x); // Useful to test drag and drop and hold-dragged-to-open-tab behavior. ImGui::PopID(); } // Display context menu for the Document static void DisplayContextMenu(MyDocument* doc) { if (!ImGui::BeginPopupContextItem()) return; char buf[256]; sprintf(buf, "Save %s", doc->Name); if (ImGui::MenuItem(buf, "CTRL+S", false, doc->Open)) doc->DoSave(); if (ImGui::MenuItem("Close", "CTRL+W", false, doc->Open)) doc->DoQueueClose(); ImGui::EndPopup(); } }; struct ExampleAppDocuments { ImVector<MyDocument> Documents; ExampleAppDocuments() { Documents.push_back(MyDocument("Lettuce", true, ImVec4(0.4f, 0.8f, 0.4f, 1.0f))); Documents.push_back(MyDocument("Eggplant", true, ImVec4(0.8f, 0.5f, 1.0f, 1.0f))); Documents.push_back(MyDocument("Carrot", true, ImVec4(1.0f, 0.8f, 0.5f, 1.0f))); Documents.push_back(MyDocument("Tomato", false, ImVec4(1.0f, 0.3f, 0.4f, 1.0f))); Documents.push_back(MyDocument("A Rather Long Title", false)); Documents.push_back(MyDocument("Some Document", false)); } }; // [Optional] Notify the system of Tabs/Windows closure that happened outside the regular tab interface. // If a tab has been closed programmatically (aka closed from another source such as the Checkbox() in the demo, as opposed // to clicking on the regular tab closing button) and stops being submitted, it will take a frame for the tab bar to notice its absence. // During this frame there will be a gap in the tab bar, and if the tab that has disappeared was the selected one, the tab bar // will report no selected tab during the frame. This will effectively give the impression of a flicker for one frame. // We call SetTabItemClosed() to manually notify the Tab Bar or Docking system of removed tabs to avoid this glitch. // Note that this completely optional, and only affect tab bars with the ImGuiTabBarFlags_Reorderable flag. static void NotifyOfDocumentsClosedElsewhere(ExampleAppDocuments& app) { for (int doc_n = 0; doc_n < app.Documents.Size; doc_n++) { MyDocument* doc = &app.Documents[doc_n]; if (!doc->Open && doc->OpenPrev) ImGui::SetTabItemClosed(doc->Name); doc->OpenPrev = doc->Open; } } void ShowExampleAppDocuments(bool* p_open) { static ExampleAppDocuments app; // Options static bool opt_reorderable = true; static ImGuiTabBarFlags opt_fitting_flags = ImGuiTabBarFlags_FittingPolicyDefault_; if (!ImGui::Begin("Example: Documents", p_open, ImGuiWindowFlags_MenuBar)) { ImGui::End(); return; } // Menu if (ImGui::BeginMenuBar()) { if (ImGui::BeginMenu("File")) { int open_count = 0; for (int doc_n = 0; doc_n < app.Documents.Size; doc_n++) open_count += app.Documents[doc_n].Open ? 1 : 0; if (ImGui::BeginMenu("Open", open_count < app.Documents.Size)) { for (int doc_n = 0; doc_n < app.Documents.Size; doc_n++) { MyDocument* doc = &app.Documents[doc_n]; if (!doc->Open) if (ImGui::MenuItem(doc->Name)) doc->DoOpen(); } ImGui::EndMenu(); } if (ImGui::MenuItem("Close All Documents", NULL, false, open_count > 0)) for (int doc_n = 0; doc_n < app.Documents.Size; doc_n++) app.Documents[doc_n].DoQueueClose(); if (ImGui::MenuItem("Exit", "Alt+F4")) {} ImGui::EndMenu(); } ImGui::EndMenuBar(); } // [Debug] List documents with one checkbox for each for (int doc_n = 0; doc_n < app.Documents.Size; doc_n++) { MyDocument* doc = &app.Documents[doc_n]; if (doc_n > 0) ImGui::SameLine(); ImGui::PushID(doc); if (ImGui::Checkbox(doc->Name, &doc->Open)) if (!doc->Open) doc->DoForceClose(); ImGui::PopID(); } ImGui::Separator(); // Submit Tab Bar and Tabs { ImGuiTabBarFlags tab_bar_flags = (opt_fitting_flags) | (opt_reorderable ? ImGuiTabBarFlags_Reorderable : 0); if (ImGui::BeginTabBar("##tabs", tab_bar_flags)) { if (opt_reorderable) NotifyOfDocumentsClosedElsewhere(app); // [DEBUG] Stress tests //if ((ImGui::GetFrameCount() % 30) == 0) docs[1].Open ^= 1; // [DEBUG] Automatically show/hide a tab. Test various interactions e.g. dragging with this on. //if (ImGui::GetIO().KeyCtrl) ImGui::SetTabItemSelected(docs[1].Name); // [DEBUG] Test SetTabItemSelected(), probably not very useful as-is anyway.. // Submit Tabs for (int doc_n = 0; doc_n < app.Documents.Size; doc_n++) { MyDocument* doc = &app.Documents[doc_n]; if (!doc->Open) continue; ImGuiTabItemFlags tab_flags = (doc->Dirty ? ImGuiTabItemFlags_UnsavedDocument : 0); bool visible = ImGui::BeginTabItem(doc->Name, &doc->Open, tab_flags); // Cancel attempt to close when unsaved add to save queue so we can display a popup. if (!doc->Open && doc->Dirty) { doc->Open = true; doc->DoQueueClose(); } MyDocument::DisplayContextMenu(doc); if (visible) { MyDocument::DisplayContents(doc); ImGui::EndTabItem(); } } ImGui::EndTabBar(); } } // Update closing queue static ImVector<MyDocument*> close_queue; if (close_queue.empty()) { // Close queue is locked once we started a popup for (int doc_n = 0; doc_n < app.Documents.Size; doc_n++) { MyDocument* doc = &app.Documents[doc_n]; if (doc->WantClose) { doc->WantClose = false; close_queue.push_back(doc); } } } // Display closing confirmation UI if (!close_queue.empty()) { int close_queue_unsaved_documents = 0; for (int n = 0; n < close_queue.Size; n++) if (close_queue[n]->Dirty) close_queue_unsaved_documents++; if (close_queue_unsaved_documents == 0) { // Close documents when all are unsaved for (int n = 0; n < close_queue.Size; n++) close_queue[n]->DoForceClose(); close_queue.clear(); } else { if (!ImGui::IsPopupOpen("Save?")) ImGui::OpenPopup("Save?"); if (ImGui::BeginPopupModal("Save?")) { ImGui::Text("Save change to the following items?"); ImGui::SetNextItemWidth(-1.0f); if (ImGui::ListBoxHeader("##", close_queue_unsaved_documents, 6)) { for (int n = 0; n < close_queue.Size; n++) if (close_queue[n]->Dirty) ImGui::Text("%s", close_queue[n]->Name); ImGui::ListBoxFooter(); } if (ImGui::Button("Yes", ImVec2(80, 0))) { for (int n = 0; n < close_queue.Size; n++) { if (close_queue[n]->Dirty) close_queue[n]->DoSave(); close_queue[n]->DoForceClose(); } close_queue.clear(); ImGui::CloseCurrentPopup(); } ImGui::SameLine(); if (ImGui::Button("No", ImVec2(80, 0))) { for (int n = 0; n < close_queue.Size; n++) close_queue[n]->DoForceClose(); close_queue.clear(); ImGui::CloseCurrentPopup(); } ImGui::SameLine(); if (ImGui::Button("Cancel", ImVec2(80, 0))) { close_queue.clear(); ImGui::CloseCurrentPopup(); } ImGui::EndPopup(); } } } ImGui::End(); } // End of Demo code #else void ImGui::ShowAboutWindow(bool*) {} void ImGui::ShowDemoWindow(bool*) {} void ImGui::ShowUserGuide() {} void ImGui::ShowStyleEditor(ImGuiStyle*) {} #endif
NVIDIA-Omniverse/PhysX/flow/external/imgui/imgui_draw.cpp
// dear imgui, v1.72b // (drawing and font code) /* Index of this file: // [SECTION] STB libraries implementation // [SECTION] Style functions // [SECTION] ImDrawList // [SECTION] ImDrawListSplitter // [SECTION] ImDrawData // [SECTION] Helpers ShadeVertsXXX functions // [SECTION] ImFontConfig // [SECTION] ImFontAtlas // [SECTION] ImFontAtlas glyph ranges helpers // [SECTION] ImFontGlyphRangesBuilder // [SECTION] ImFont // [SECTION] Internal Render Helpers // [SECTION] Decompression code // [SECTION] Default font data (ProggyClean.ttf) */ #if defined(_MSC_VER) && !defined(_CRT_SECURE_NO_WARNINGS) #define _CRT_SECURE_NO_WARNINGS #endif #include "imgui.h" #ifndef IMGUI_DEFINE_MATH_OPERATORS #define IMGUI_DEFINE_MATH_OPERATORS #endif #include "imgui_internal.h" #include <stdio.h> // vsnprintf, sscanf, printf #if !defined(alloca) #if defined(__GLIBC__) || defined(__sun) || defined(__CYGWIN__) || defined(__APPLE__) || defined(__SWITCH__) #include <alloca.h> // alloca (glibc uses <alloca.h>. Note that Cygwin may have _WIN32 defined, so the order matters here) #elif defined(_WIN32) #include <malloc.h> // alloca #if !defined(alloca) #define alloca _alloca // for clang with MS Codegen #endif #else #include <stdlib.h> // alloca #endif #endif // Visual Studio warnings #ifdef _MSC_VER #pragma warning (disable: 4127) // condition expression is constant #pragma warning (disable: 4505) // unreferenced local function has been removed (stb stuff) #pragma warning (disable: 4996) // 'This function or variable may be unsafe': strcpy, strdup, sprintf, vsnprintf, sscanf, fopen #endif // Clang/GCC warnings with -Weverything #if defined(__clang__) #pragma clang diagnostic ignored "-Wold-style-cast" // warning : use of old-style cast // yes, they are more terse. #pragma clang diagnostic ignored "-Wfloat-equal" // warning : comparing floating point with == or != is unsafe // storing and comparing against same constants ok. #pragma clang diagnostic ignored "-Wglobal-constructors" // warning : declaration requires a global destructor // similar to above, not sure what the exact difference is. #pragma clang diagnostic ignored "-Wsign-conversion" // warning : implicit conversion changes signedness // #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" // warning : zero as null pointer constant // some standard header variations use #define NULL 0 #endif #if __has_warning("-Wcomma") #pragma clang diagnostic ignored "-Wcomma" // warning : possible misuse of comma operator here // #endif #if __has_warning("-Wreserved-id-macro") #pragma clang diagnostic ignored "-Wreserved-id-macro" // warning : macro name is a reserved identifier // #endif #if __has_warning("-Wdouble-promotion") #pragma clang diagnostic ignored "-Wdouble-promotion" // warning: implicit conversion from 'float' to 'double' when passing argument to function // using printf() is a misery with this as C++ va_arg ellipsis changes float to double. #endif #elif defined(__GNUC__) #pragma GCC diagnostic ignored "-Wpragmas" // warning: unknown option after '#pragma GCC diagnostic' kind #pragma GCC diagnostic ignored "-Wunused-function" // warning: 'xxxx' defined but not used #pragma GCC diagnostic ignored "-Wdouble-promotion" // warning: implicit conversion from 'float' to 'double' when passing argument to function #pragma GCC diagnostic ignored "-Wconversion" // warning: conversion to 'xxxx' from 'xxxx' may alter its value #pragma GCC diagnostic ignored "-Wstack-protector" // warning: stack protector not protecting local variables: variable length buffer #pragma GCC diagnostic ignored "-Wclass-memaccess" // [__GNUC__ >= 8] warning: 'memset/memcpy' clearing/writing an object of type 'xxxx' with no trivial copy-assignment; use assignment or value-initialization instead #endif //------------------------------------------------------------------------- // [SECTION] STB libraries implementation //------------------------------------------------------------------------- // Compile time options: //#define IMGUI_STB_NAMESPACE ImStb //#define IMGUI_STB_TRUETYPE_FILENAME "my_folder/stb_truetype.h" //#define IMGUI_STB_RECT_PACK_FILENAME "my_folder/stb_rect_pack.h" //#define IMGUI_DISABLE_STB_TRUETYPE_IMPLEMENTATION //#define IMGUI_DISABLE_STB_RECT_PACK_IMPLEMENTATION #ifdef IMGUI_STB_NAMESPACE namespace IMGUI_STB_NAMESPACE { #endif #ifdef _MSC_VER #pragma warning (push) #pragma warning (disable: 4456) // declaration of 'xx' hides previous local declaration #endif #if defined(__clang__) #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunused-function" #pragma clang diagnostic ignored "-Wmissing-prototypes" #pragma clang diagnostic ignored "-Wimplicit-fallthrough" #pragma clang diagnostic ignored "-Wcast-qual" // warning : cast from 'const xxxx *' to 'xxx *' drops const qualifier // #endif #if defined(__GNUC__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wtype-limits" // warning: comparison is always true due to limited range of data type [-Wtype-limits] #pragma GCC diagnostic ignored "-Wcast-qual" // warning: cast from type 'const xxxx *' to type 'xxxx *' casts away qualifiers #endif #ifndef STB_RECT_PACK_IMPLEMENTATION // in case the user already have an implementation in the _same_ compilation unit (e.g. unity builds) #ifndef IMGUI_DISABLE_STB_RECT_PACK_IMPLEMENTATION #define STBRP_STATIC #define STBRP_ASSERT(x) IM_ASSERT(x) #define STBRP_SORT ImQsort #define STB_RECT_PACK_IMPLEMENTATION #endif #ifdef IMGUI_STB_RECT_PACK_FILENAME #include IMGUI_STB_RECT_PACK_FILENAME #else #include "imstb_rectpack.h" #endif #endif #ifndef STB_TRUETYPE_IMPLEMENTATION // in case the user already have an implementation in the _same_ compilation unit (e.g. unity builds) #ifndef IMGUI_DISABLE_STB_TRUETYPE_IMPLEMENTATION #define STBTT_malloc(x,u) ((void)(u), IM_ALLOC(x)) #define STBTT_free(x,u) ((void)(u), IM_FREE(x)) #define STBTT_assert(x) IM_ASSERT(x) #define STBTT_fmod(x,y) ImFmod(x,y) #define STBTT_sqrt(x) ImSqrt(x) #define STBTT_pow(x,y) ImPow(x,y) #define STBTT_fabs(x) ImFabs(x) #define STBTT_ifloor(x) ((int)ImFloorStd(x)) #define STBTT_iceil(x) ((int)ImCeil(x)) #define STBTT_STATIC #define STB_TRUETYPE_IMPLEMENTATION #else #define STBTT_DEF extern #endif #ifdef IMGUI_STB_TRUETYPE_FILENAME #include IMGUI_STB_TRUETYPE_FILENAME #else #include "imstb_truetype.h" #endif #endif #if defined(__GNUC__) #pragma GCC diagnostic pop #endif #if defined(__clang__) #pragma clang diagnostic pop #endif #if defined(_MSC_VER) #pragma warning (pop) #endif #ifdef IMGUI_STB_NAMESPACE } // namespace ImStb using namespace IMGUI_STB_NAMESPACE; #endif //----------------------------------------------------------------------------- // [SECTION] Style functions //----------------------------------------------------------------------------- void ImGui::StyleColorsDark(ImGuiStyle* dst) { ImGuiStyle* style = dst ? dst : &ImGui::GetStyle(); ImVec4* colors = style->Colors; colors[ImGuiCol_Text] = ImVec4(1.00f, 1.00f, 1.00f, 1.00f); colors[ImGuiCol_TextDisabled] = ImVec4(0.50f, 0.50f, 0.50f, 1.00f); colors[ImGuiCol_WindowBg] = ImVec4(0.06f, 0.06f, 0.06f, 0.94f); colors[ImGuiCol_ChildBg] = ImVec4(0.00f, 0.00f, 0.00f, 0.00f); colors[ImGuiCol_PopupBg] = ImVec4(0.08f, 0.08f, 0.08f, 0.94f); colors[ImGuiCol_Border] = ImVec4(0.43f, 0.43f, 0.50f, 0.50f); colors[ImGuiCol_BorderShadow] = ImVec4(0.00f, 0.00f, 0.00f, 0.00f); colors[ImGuiCol_FrameBg] = ImVec4(0.16f, 0.29f, 0.48f, 0.54f); colors[ImGuiCol_FrameBgHovered] = ImVec4(0.26f, 0.59f, 0.98f, 0.40f); colors[ImGuiCol_FrameBgActive] = ImVec4(0.26f, 0.59f, 0.98f, 0.67f); colors[ImGuiCol_TitleBg] = ImVec4(0.04f, 0.04f, 0.04f, 1.00f); colors[ImGuiCol_TitleBgActive] = ImVec4(0.16f, 0.29f, 0.48f, 1.00f); colors[ImGuiCol_TitleBgCollapsed] = ImVec4(0.00f, 0.00f, 0.00f, 0.51f); colors[ImGuiCol_MenuBarBg] = ImVec4(0.14f, 0.14f, 0.14f, 1.00f); colors[ImGuiCol_ScrollbarBg] = ImVec4(0.02f, 0.02f, 0.02f, 0.53f); colors[ImGuiCol_ScrollbarGrab] = ImVec4(0.31f, 0.31f, 0.31f, 1.00f); colors[ImGuiCol_ScrollbarGrabHovered] = ImVec4(0.41f, 0.41f, 0.41f, 1.00f); colors[ImGuiCol_ScrollbarGrabActive] = ImVec4(0.51f, 0.51f, 0.51f, 1.00f); colors[ImGuiCol_CheckMark] = ImVec4(0.26f, 0.59f, 0.98f, 1.00f); colors[ImGuiCol_SliderGrab] = ImVec4(0.24f, 0.52f, 0.88f, 1.00f); colors[ImGuiCol_SliderGrabActive] = ImVec4(0.26f, 0.59f, 0.98f, 1.00f); colors[ImGuiCol_Button] = ImVec4(0.26f, 0.59f, 0.98f, 0.40f); colors[ImGuiCol_ButtonHovered] = ImVec4(0.26f, 0.59f, 0.98f, 1.00f); colors[ImGuiCol_ButtonActive] = ImVec4(0.06f, 0.53f, 0.98f, 1.00f); colors[ImGuiCol_Header] = ImVec4(0.26f, 0.59f, 0.98f, 0.31f); colors[ImGuiCol_HeaderHovered] = ImVec4(0.26f, 0.59f, 0.98f, 0.80f); colors[ImGuiCol_HeaderActive] = ImVec4(0.26f, 0.59f, 0.98f, 1.00f); colors[ImGuiCol_Separator] = colors[ImGuiCol_Border]; colors[ImGuiCol_SeparatorHovered] = ImVec4(0.10f, 0.40f, 0.75f, 0.78f); colors[ImGuiCol_SeparatorActive] = ImVec4(0.10f, 0.40f, 0.75f, 1.00f); colors[ImGuiCol_ResizeGrip] = ImVec4(0.26f, 0.59f, 0.98f, 0.25f); colors[ImGuiCol_ResizeGripHovered] = ImVec4(0.26f, 0.59f, 0.98f, 0.67f); colors[ImGuiCol_ResizeGripActive] = ImVec4(0.26f, 0.59f, 0.98f, 0.95f); colors[ImGuiCol_Tab] = ImLerp(colors[ImGuiCol_Header], colors[ImGuiCol_TitleBgActive], 0.80f); colors[ImGuiCol_TabHovered] = colors[ImGuiCol_HeaderHovered]; colors[ImGuiCol_TabActive] = ImLerp(colors[ImGuiCol_HeaderActive], colors[ImGuiCol_TitleBgActive], 0.60f); colors[ImGuiCol_TabUnfocused] = ImLerp(colors[ImGuiCol_Tab], colors[ImGuiCol_TitleBg], 0.80f); colors[ImGuiCol_TabUnfocusedActive] = ImLerp(colors[ImGuiCol_TabActive], colors[ImGuiCol_TitleBg], 0.40f); colors[ImGuiCol_PlotLines] = ImVec4(0.61f, 0.61f, 0.61f, 1.00f); colors[ImGuiCol_PlotLinesHovered] = ImVec4(1.00f, 0.43f, 0.35f, 1.00f); colors[ImGuiCol_PlotHistogram] = ImVec4(0.90f, 0.70f, 0.00f, 1.00f); colors[ImGuiCol_PlotHistogramHovered] = ImVec4(1.00f, 0.60f, 0.00f, 1.00f); colors[ImGuiCol_TextSelectedBg] = ImVec4(0.26f, 0.59f, 0.98f, 0.35f); colors[ImGuiCol_DragDropTarget] = ImVec4(1.00f, 1.00f, 0.00f, 0.90f); colors[ImGuiCol_NavHighlight] = ImVec4(0.26f, 0.59f, 0.98f, 1.00f); colors[ImGuiCol_NavWindowingHighlight] = ImVec4(1.00f, 1.00f, 1.00f, 0.70f); colors[ImGuiCol_NavWindowingDimBg] = ImVec4(0.80f, 0.80f, 0.80f, 0.20f); colors[ImGuiCol_ModalWindowDimBg] = ImVec4(0.80f, 0.80f, 0.80f, 0.35f); } void ImGui::StyleColorsClassic(ImGuiStyle* dst) { ImGuiStyle* style = dst ? dst : &ImGui::GetStyle(); ImVec4* colors = style->Colors; colors[ImGuiCol_Text] = ImVec4(0.90f, 0.90f, 0.90f, 1.00f); colors[ImGuiCol_TextDisabled] = ImVec4(0.60f, 0.60f, 0.60f, 1.00f); colors[ImGuiCol_WindowBg] = ImVec4(0.00f, 0.00f, 0.00f, 0.70f); colors[ImGuiCol_ChildBg] = ImVec4(0.00f, 0.00f, 0.00f, 0.00f); colors[ImGuiCol_PopupBg] = ImVec4(0.11f, 0.11f, 0.14f, 0.92f); colors[ImGuiCol_Border] = ImVec4(0.50f, 0.50f, 0.50f, 0.50f); colors[ImGuiCol_BorderShadow] = ImVec4(0.00f, 0.00f, 0.00f, 0.00f); colors[ImGuiCol_FrameBg] = ImVec4(0.43f, 0.43f, 0.43f, 0.39f); colors[ImGuiCol_FrameBgHovered] = ImVec4(0.47f, 0.47f, 0.69f, 0.40f); colors[ImGuiCol_FrameBgActive] = ImVec4(0.42f, 0.41f, 0.64f, 0.69f); colors[ImGuiCol_TitleBg] = ImVec4(0.27f, 0.27f, 0.54f, 0.83f); colors[ImGuiCol_TitleBgActive] = ImVec4(0.32f, 0.32f, 0.63f, 0.87f); colors[ImGuiCol_TitleBgCollapsed] = ImVec4(0.40f, 0.40f, 0.80f, 0.20f); colors[ImGuiCol_MenuBarBg] = ImVec4(0.40f, 0.40f, 0.55f, 0.80f); colors[ImGuiCol_ScrollbarBg] = ImVec4(0.20f, 0.25f, 0.30f, 0.60f); colors[ImGuiCol_ScrollbarGrab] = ImVec4(0.40f, 0.40f, 0.80f, 0.30f); colors[ImGuiCol_ScrollbarGrabHovered] = ImVec4(0.40f, 0.40f, 0.80f, 0.40f); colors[ImGuiCol_ScrollbarGrabActive] = ImVec4(0.41f, 0.39f, 0.80f, 0.60f); colors[ImGuiCol_CheckMark] = ImVec4(0.90f, 0.90f, 0.90f, 0.50f); colors[ImGuiCol_SliderGrab] = ImVec4(1.00f, 1.00f, 1.00f, 0.30f); colors[ImGuiCol_SliderGrabActive] = ImVec4(0.41f, 0.39f, 0.80f, 0.60f); colors[ImGuiCol_Button] = ImVec4(0.35f, 0.40f, 0.61f, 0.62f); colors[ImGuiCol_ButtonHovered] = ImVec4(0.40f, 0.48f, 0.71f, 0.79f); colors[ImGuiCol_ButtonActive] = ImVec4(0.46f, 0.54f, 0.80f, 1.00f); colors[ImGuiCol_Header] = ImVec4(0.40f, 0.40f, 0.90f, 0.45f); colors[ImGuiCol_HeaderHovered] = ImVec4(0.45f, 0.45f, 0.90f, 0.80f); colors[ImGuiCol_HeaderActive] = ImVec4(0.53f, 0.53f, 0.87f, 0.80f); colors[ImGuiCol_Separator] = ImVec4(0.50f, 0.50f, 0.50f, 0.60f); colors[ImGuiCol_SeparatorHovered] = ImVec4(0.60f, 0.60f, 0.70f, 1.00f); colors[ImGuiCol_SeparatorActive] = ImVec4(0.70f, 0.70f, 0.90f, 1.00f); colors[ImGuiCol_ResizeGrip] = ImVec4(1.00f, 1.00f, 1.00f, 0.16f); colors[ImGuiCol_ResizeGripHovered] = ImVec4(0.78f, 0.82f, 1.00f, 0.60f); colors[ImGuiCol_ResizeGripActive] = ImVec4(0.78f, 0.82f, 1.00f, 0.90f); colors[ImGuiCol_Tab] = ImLerp(colors[ImGuiCol_Header], colors[ImGuiCol_TitleBgActive], 0.80f); colors[ImGuiCol_TabHovered] = colors[ImGuiCol_HeaderHovered]; colors[ImGuiCol_TabActive] = ImLerp(colors[ImGuiCol_HeaderActive], colors[ImGuiCol_TitleBgActive], 0.60f); colors[ImGuiCol_TabUnfocused] = ImLerp(colors[ImGuiCol_Tab], colors[ImGuiCol_TitleBg], 0.80f); colors[ImGuiCol_TabUnfocusedActive] = ImLerp(colors[ImGuiCol_TabActive], colors[ImGuiCol_TitleBg], 0.40f); colors[ImGuiCol_PlotLines] = ImVec4(1.00f, 1.00f, 1.00f, 1.00f); colors[ImGuiCol_PlotLinesHovered] = ImVec4(0.90f, 0.70f, 0.00f, 1.00f); colors[ImGuiCol_PlotHistogram] = ImVec4(0.90f, 0.70f, 0.00f, 1.00f); colors[ImGuiCol_PlotHistogramHovered] = ImVec4(1.00f, 0.60f, 0.00f, 1.00f); colors[ImGuiCol_TextSelectedBg] = ImVec4(0.00f, 0.00f, 1.00f, 0.35f); colors[ImGuiCol_DragDropTarget] = ImVec4(1.00f, 1.00f, 0.00f, 0.90f); colors[ImGuiCol_NavHighlight] = colors[ImGuiCol_HeaderHovered]; colors[ImGuiCol_NavWindowingHighlight] = ImVec4(1.00f, 1.00f, 1.00f, 0.70f); colors[ImGuiCol_NavWindowingDimBg] = ImVec4(0.80f, 0.80f, 0.80f, 0.20f); colors[ImGuiCol_ModalWindowDimBg] = ImVec4(0.20f, 0.20f, 0.20f, 0.35f); } // Those light colors are better suited with a thicker font than the default one + FrameBorder void ImGui::StyleColorsLight(ImGuiStyle* dst) { ImGuiStyle* style = dst ? dst : &ImGui::GetStyle(); ImVec4* colors = style->Colors; colors[ImGuiCol_Text] = ImVec4(0.00f, 0.00f, 0.00f, 1.00f); colors[ImGuiCol_TextDisabled] = ImVec4(0.60f, 0.60f, 0.60f, 1.00f); colors[ImGuiCol_WindowBg] = ImVec4(0.94f, 0.94f, 0.94f, 1.00f); colors[ImGuiCol_ChildBg] = ImVec4(0.00f, 0.00f, 0.00f, 0.00f); colors[ImGuiCol_PopupBg] = ImVec4(1.00f, 1.00f, 1.00f, 0.98f); colors[ImGuiCol_Border] = ImVec4(0.00f, 0.00f, 0.00f, 0.30f); colors[ImGuiCol_BorderShadow] = ImVec4(0.00f, 0.00f, 0.00f, 0.00f); colors[ImGuiCol_FrameBg] = ImVec4(1.00f, 1.00f, 1.00f, 1.00f); colors[ImGuiCol_FrameBgHovered] = ImVec4(0.26f, 0.59f, 0.98f, 0.40f); colors[ImGuiCol_FrameBgActive] = ImVec4(0.26f, 0.59f, 0.98f, 0.67f); colors[ImGuiCol_TitleBg] = ImVec4(0.96f, 0.96f, 0.96f, 1.00f); colors[ImGuiCol_TitleBgActive] = ImVec4(0.82f, 0.82f, 0.82f, 1.00f); colors[ImGuiCol_TitleBgCollapsed] = ImVec4(1.00f, 1.00f, 1.00f, 0.51f); colors[ImGuiCol_MenuBarBg] = ImVec4(0.86f, 0.86f, 0.86f, 1.00f); colors[ImGuiCol_ScrollbarBg] = ImVec4(0.98f, 0.98f, 0.98f, 0.53f); colors[ImGuiCol_ScrollbarGrab] = ImVec4(0.69f, 0.69f, 0.69f, 0.80f); colors[ImGuiCol_ScrollbarGrabHovered] = ImVec4(0.49f, 0.49f, 0.49f, 0.80f); colors[ImGuiCol_ScrollbarGrabActive] = ImVec4(0.49f, 0.49f, 0.49f, 1.00f); colors[ImGuiCol_CheckMark] = ImVec4(0.26f, 0.59f, 0.98f, 1.00f); colors[ImGuiCol_SliderGrab] = ImVec4(0.26f, 0.59f, 0.98f, 0.78f); colors[ImGuiCol_SliderGrabActive] = ImVec4(0.46f, 0.54f, 0.80f, 0.60f); colors[ImGuiCol_Button] = ImVec4(0.26f, 0.59f, 0.98f, 0.40f); colors[ImGuiCol_ButtonHovered] = ImVec4(0.26f, 0.59f, 0.98f, 1.00f); colors[ImGuiCol_ButtonActive] = ImVec4(0.06f, 0.53f, 0.98f, 1.00f); colors[ImGuiCol_Header] = ImVec4(0.26f, 0.59f, 0.98f, 0.31f); colors[ImGuiCol_HeaderHovered] = ImVec4(0.26f, 0.59f, 0.98f, 0.80f); colors[ImGuiCol_HeaderActive] = ImVec4(0.26f, 0.59f, 0.98f, 1.00f); colors[ImGuiCol_Separator] = ImVec4(0.39f, 0.39f, 0.39f, 0.62f); colors[ImGuiCol_SeparatorHovered] = ImVec4(0.14f, 0.44f, 0.80f, 0.78f); colors[ImGuiCol_SeparatorActive] = ImVec4(0.14f, 0.44f, 0.80f, 1.00f); colors[ImGuiCol_ResizeGrip] = ImVec4(0.80f, 0.80f, 0.80f, 0.56f); colors[ImGuiCol_ResizeGripHovered] = ImVec4(0.26f, 0.59f, 0.98f, 0.67f); colors[ImGuiCol_ResizeGripActive] = ImVec4(0.26f, 0.59f, 0.98f, 0.95f); colors[ImGuiCol_Tab] = ImLerp(colors[ImGuiCol_Header], colors[ImGuiCol_TitleBgActive], 0.90f); colors[ImGuiCol_TabHovered] = colors[ImGuiCol_HeaderHovered]; colors[ImGuiCol_TabActive] = ImLerp(colors[ImGuiCol_HeaderActive], colors[ImGuiCol_TitleBgActive], 0.60f); colors[ImGuiCol_TabUnfocused] = ImLerp(colors[ImGuiCol_Tab], colors[ImGuiCol_TitleBg], 0.80f); colors[ImGuiCol_TabUnfocusedActive] = ImLerp(colors[ImGuiCol_TabActive], colors[ImGuiCol_TitleBg], 0.40f); colors[ImGuiCol_PlotLines] = ImVec4(0.39f, 0.39f, 0.39f, 1.00f); colors[ImGuiCol_PlotLinesHovered] = ImVec4(1.00f, 0.43f, 0.35f, 1.00f); colors[ImGuiCol_PlotHistogram] = ImVec4(0.90f, 0.70f, 0.00f, 1.00f); colors[ImGuiCol_PlotHistogramHovered] = ImVec4(1.00f, 0.45f, 0.00f, 1.00f); colors[ImGuiCol_TextSelectedBg] = ImVec4(0.26f, 0.59f, 0.98f, 0.35f); colors[ImGuiCol_DragDropTarget] = ImVec4(0.26f, 0.59f, 0.98f, 0.95f); colors[ImGuiCol_NavHighlight] = colors[ImGuiCol_HeaderHovered]; colors[ImGuiCol_NavWindowingHighlight] = ImVec4(0.70f, 0.70f, 0.70f, 0.70f); colors[ImGuiCol_NavWindowingDimBg] = ImVec4(0.20f, 0.20f, 0.20f, 0.20f); colors[ImGuiCol_ModalWindowDimBg] = ImVec4(0.20f, 0.20f, 0.20f, 0.35f); } //----------------------------------------------------------------------------- // ImDrawList //----------------------------------------------------------------------------- ImDrawListSharedData::ImDrawListSharedData() { Font = NULL; FontSize = 0.0f; CurveTessellationTol = 0.0f; ClipRectFullscreen = ImVec4(-8192.0f, -8192.0f, +8192.0f, +8192.0f); InitialFlags = ImDrawListFlags_None; // Const data for (int i = 0; i < IM_ARRAYSIZE(CircleVtx12); i++) { const float a = ((float)i * 2 * IM_PI) / (float)IM_ARRAYSIZE(CircleVtx12); CircleVtx12[i] = ImVec2(ImCos(a), ImSin(a)); } } void ImDrawList::Clear() { CmdBuffer.resize(0); IdxBuffer.resize(0); VtxBuffer.resize(0); Flags = _Data ? _Data->InitialFlags : ImDrawListFlags_None; _VtxCurrentOffset = 0; _VtxCurrentIdx = 0; _VtxWritePtr = NULL; _IdxWritePtr = NULL; _ClipRectStack.resize(0); _TextureIdStack.resize(0); _Path.resize(0); _Splitter.Clear(); } void ImDrawList::ClearFreeMemory() { CmdBuffer.clear(); IdxBuffer.clear(); VtxBuffer.clear(); _VtxCurrentIdx = 0; _VtxWritePtr = NULL; _IdxWritePtr = NULL; _ClipRectStack.clear(); _TextureIdStack.clear(); _Path.clear(); _Splitter.ClearFreeMemory(); } ImDrawList* ImDrawList::CloneOutput() const { ImDrawList* dst = IM_NEW(ImDrawList(_Data)); dst->CmdBuffer = CmdBuffer; dst->IdxBuffer = IdxBuffer; dst->VtxBuffer = VtxBuffer; dst->Flags = Flags; return dst; } // Using macros because C++ is a terrible language, we want guaranteed inline, no code in header, and no overhead in Debug builds #define GetCurrentClipRect() (_ClipRectStack.Size ? _ClipRectStack.Data[_ClipRectStack.Size-1] : _Data->ClipRectFullscreen) #define GetCurrentTextureId() (_TextureIdStack.Size ? _TextureIdStack.Data[_TextureIdStack.Size-1] : (ImTextureID)NULL) void ImDrawList::AddDrawCmd() { ImDrawCmd draw_cmd; draw_cmd.ClipRect = GetCurrentClipRect(); draw_cmd.TextureId = GetCurrentTextureId(); draw_cmd.VtxOffset = _VtxCurrentOffset; draw_cmd.IdxOffset = IdxBuffer.Size; IM_ASSERT(draw_cmd.ClipRect.x <= draw_cmd.ClipRect.z && draw_cmd.ClipRect.y <= draw_cmd.ClipRect.w); CmdBuffer.push_back(draw_cmd); } void ImDrawList::AddCallback(ImDrawCallback callback, void* callback_data) { ImDrawCmd* current_cmd = CmdBuffer.Size ? &CmdBuffer.back() : NULL; if (!current_cmd || current_cmd->ElemCount != 0 || current_cmd->UserCallback != NULL) { AddDrawCmd(); current_cmd = &CmdBuffer.back(); } current_cmd->UserCallback = callback; current_cmd->UserCallbackData = callback_data; AddDrawCmd(); // Force a new command after us (see comment below) } // Our scheme may appears a bit unusual, basically we want the most-common calls AddLine AddRect etc. to not have to perform any check so we always have a command ready in the stack. // The cost of figuring out if a new command has to be added or if we can merge is paid in those Update** functions only. void ImDrawList::UpdateClipRect() { // If current command is used with different settings we need to add a new command const ImVec4 curr_clip_rect = GetCurrentClipRect(); ImDrawCmd* curr_cmd = CmdBuffer.Size > 0 ? &CmdBuffer.Data[CmdBuffer.Size-1] : NULL; if (!curr_cmd || (curr_cmd->ElemCount != 0 && memcmp(&curr_cmd->ClipRect, &curr_clip_rect, sizeof(ImVec4)) != 0) || curr_cmd->UserCallback != NULL) { AddDrawCmd(); return; } // Try to merge with previous command if it matches, else use current command ImDrawCmd* prev_cmd = CmdBuffer.Size > 1 ? curr_cmd - 1 : NULL; if (curr_cmd->ElemCount == 0 && prev_cmd && memcmp(&prev_cmd->ClipRect, &curr_clip_rect, sizeof(ImVec4)) == 0 && prev_cmd->TextureId == GetCurrentTextureId() && prev_cmd->UserCallback == NULL) CmdBuffer.pop_back(); else curr_cmd->ClipRect = curr_clip_rect; } void ImDrawList::UpdateTextureID() { // If current command is used with different settings we need to add a new command const ImTextureID curr_texture_id = GetCurrentTextureId(); ImDrawCmd* curr_cmd = CmdBuffer.Size ? &CmdBuffer.back() : NULL; if (!curr_cmd || (curr_cmd->ElemCount != 0 && curr_cmd->TextureId != curr_texture_id) || curr_cmd->UserCallback != NULL) { AddDrawCmd(); return; } // Try to merge with previous command if it matches, else use current command ImDrawCmd* prev_cmd = CmdBuffer.Size > 1 ? curr_cmd - 1 : NULL; if (curr_cmd->ElemCount == 0 && prev_cmd && prev_cmd->TextureId == curr_texture_id && memcmp(&prev_cmd->ClipRect, &GetCurrentClipRect(), sizeof(ImVec4)) == 0 && prev_cmd->UserCallback == NULL) CmdBuffer.pop_back(); else curr_cmd->TextureId = curr_texture_id; } #undef GetCurrentClipRect #undef GetCurrentTextureId // Render-level scissoring. This is passed down to your render function but not used for CPU-side coarse clipping. Prefer using higher-level ImGui::PushClipRect() to affect logic (hit-testing and widget culling) void ImDrawList::PushClipRect(ImVec2 cr_min, ImVec2 cr_max, bool intersect_with_current_clip_rect) { ImVec4 cr(cr_min.x, cr_min.y, cr_max.x, cr_max.y); if (intersect_with_current_clip_rect && _ClipRectStack.Size) { ImVec4 current = _ClipRectStack.Data[_ClipRectStack.Size-1]; if (cr.x < current.x) cr.x = current.x; if (cr.y < current.y) cr.y = current.y; if (cr.z > current.z) cr.z = current.z; if (cr.w > current.w) cr.w = current.w; } cr.z = ImMax(cr.x, cr.z); cr.w = ImMax(cr.y, cr.w); _ClipRectStack.push_back(cr); UpdateClipRect(); } void ImDrawList::PushClipRectFullScreen() { PushClipRect(ImVec2(_Data->ClipRectFullscreen.x, _Data->ClipRectFullscreen.y), ImVec2(_Data->ClipRectFullscreen.z, _Data->ClipRectFullscreen.w)); } void ImDrawList::PopClipRect() { IM_ASSERT(_ClipRectStack.Size > 0); _ClipRectStack.pop_back(); UpdateClipRect(); } void ImDrawList::PushTextureID(ImTextureID texture_id) { _TextureIdStack.push_back(texture_id); UpdateTextureID(); } void ImDrawList::PopTextureID() { IM_ASSERT(_TextureIdStack.Size > 0); _TextureIdStack.pop_back(); UpdateTextureID(); } // NB: this can be called with negative count for removing primitives (as long as the result does not underflow) void ImDrawList::PrimReserve(int idx_count, int vtx_count) { // Large mesh support (when enabled) if (sizeof(ImDrawIdx) == 2 && (_VtxCurrentIdx + vtx_count >= (1 << 16)) && (Flags & ImDrawListFlags_AllowVtxOffset)) { _VtxCurrentOffset = VtxBuffer.Size; _VtxCurrentIdx = 0; AddDrawCmd(); } ImDrawCmd& draw_cmd = CmdBuffer.Data[CmdBuffer.Size-1]; draw_cmd.ElemCount += idx_count; int vtx_buffer_old_size = VtxBuffer.Size; VtxBuffer.resize(vtx_buffer_old_size + vtx_count); _VtxWritePtr = VtxBuffer.Data + vtx_buffer_old_size; int idx_buffer_old_size = IdxBuffer.Size; IdxBuffer.resize(idx_buffer_old_size + idx_count); _IdxWritePtr = IdxBuffer.Data + idx_buffer_old_size; } // Fully unrolled with inline call to keep our debug builds decently fast. void ImDrawList::PrimRect(const ImVec2& a, const ImVec2& c, ImU32 col) { ImVec2 b(c.x, a.y), d(a.x, c.y), uv(_Data->TexUvWhitePixel); ImDrawIdx idx = (ImDrawIdx)_VtxCurrentIdx; _IdxWritePtr[0] = idx; _IdxWritePtr[1] = (ImDrawIdx)(idx+1); _IdxWritePtr[2] = (ImDrawIdx)(idx+2); _IdxWritePtr[3] = idx; _IdxWritePtr[4] = (ImDrawIdx)(idx+2); _IdxWritePtr[5] = (ImDrawIdx)(idx+3); _VtxWritePtr[0].pos = a; _VtxWritePtr[0].uv = uv; _VtxWritePtr[0].col = col; _VtxWritePtr[1].pos = b; _VtxWritePtr[1].uv = uv; _VtxWritePtr[1].col = col; _VtxWritePtr[2].pos = c; _VtxWritePtr[2].uv = uv; _VtxWritePtr[2].col = col; _VtxWritePtr[3].pos = d; _VtxWritePtr[3].uv = uv; _VtxWritePtr[3].col = col; _VtxWritePtr += 4; _VtxCurrentIdx += 4; _IdxWritePtr += 6; } void ImDrawList::PrimRectUV(const ImVec2& a, const ImVec2& c, const ImVec2& uv_a, const ImVec2& uv_c, ImU32 col) { ImVec2 b(c.x, a.y), d(a.x, c.y), uv_b(uv_c.x, uv_a.y), uv_d(uv_a.x, uv_c.y); ImDrawIdx idx = (ImDrawIdx)_VtxCurrentIdx; _IdxWritePtr[0] = idx; _IdxWritePtr[1] = (ImDrawIdx)(idx+1); _IdxWritePtr[2] = (ImDrawIdx)(idx+2); _IdxWritePtr[3] = idx; _IdxWritePtr[4] = (ImDrawIdx)(idx+2); _IdxWritePtr[5] = (ImDrawIdx)(idx+3); _VtxWritePtr[0].pos = a; _VtxWritePtr[0].uv = uv_a; _VtxWritePtr[0].col = col; _VtxWritePtr[1].pos = b; _VtxWritePtr[1].uv = uv_b; _VtxWritePtr[1].col = col; _VtxWritePtr[2].pos = c; _VtxWritePtr[2].uv = uv_c; _VtxWritePtr[2].col = col; _VtxWritePtr[3].pos = d; _VtxWritePtr[3].uv = uv_d; _VtxWritePtr[3].col = col; _VtxWritePtr += 4; _VtxCurrentIdx += 4; _IdxWritePtr += 6; } void ImDrawList::PrimQuadUV(const ImVec2& a, const ImVec2& b, const ImVec2& c, const ImVec2& d, const ImVec2& uv_a, const ImVec2& uv_b, const ImVec2& uv_c, const ImVec2& uv_d, ImU32 col) { ImDrawIdx idx = (ImDrawIdx)_VtxCurrentIdx; _IdxWritePtr[0] = idx; _IdxWritePtr[1] = (ImDrawIdx)(idx+1); _IdxWritePtr[2] = (ImDrawIdx)(idx+2); _IdxWritePtr[3] = idx; _IdxWritePtr[4] = (ImDrawIdx)(idx+2); _IdxWritePtr[5] = (ImDrawIdx)(idx+3); _VtxWritePtr[0].pos = a; _VtxWritePtr[0].uv = uv_a; _VtxWritePtr[0].col = col; _VtxWritePtr[1].pos = b; _VtxWritePtr[1].uv = uv_b; _VtxWritePtr[1].col = col; _VtxWritePtr[2].pos = c; _VtxWritePtr[2].uv = uv_c; _VtxWritePtr[2].col = col; _VtxWritePtr[3].pos = d; _VtxWritePtr[3].uv = uv_d; _VtxWritePtr[3].col = col; _VtxWritePtr += 4; _VtxCurrentIdx += 4; _IdxWritePtr += 6; } // On AddPolyline() and AddConvexPolyFilled() we intentionally avoid using ImVec2 and superflous function calls to optimize debug/non-inlined builds. // Those macros expects l-values. #define IM_NORMALIZE2F_OVER_ZERO(VX,VY) { float d2 = VX*VX + VY*VY; if (d2 > 0.0f) { float inv_len = 1.0f / ImSqrt(d2); VX *= inv_len; VY *= inv_len; } } #define IM_FIXNORMAL2F(VX,VY) { float d2 = VX*VX + VY*VY; if (d2 < 0.5f) d2 = 0.5f; float inv_lensq = 1.0f / d2; VX *= inv_lensq; VY *= inv_lensq; } // TODO: Thickness anti-aliased lines cap are missing their AA fringe. // We avoid using the ImVec2 math operators here to reduce cost to a minimum for debug/non-inlined builds. void ImDrawList::AddPolyline(const ImVec2* points, const int points_count, ImU32 col, bool closed, float thickness) { if (points_count < 2) return; const ImVec2 uv = _Data->TexUvWhitePixel; int count = points_count; if (!closed) count = points_count-1; const bool thick_line = thickness > 1.0f; if (Flags & ImDrawListFlags_AntiAliasedLines) { // Anti-aliased stroke const float AA_SIZE = 1.0f; const ImU32 col_trans = col & ~IM_COL32_A_MASK; const int idx_count = thick_line ? count*18 : count*12; const int vtx_count = thick_line ? points_count*4 : points_count*3; PrimReserve(idx_count, vtx_count); // Temporary buffer ImVec2* temp_normals = (ImVec2*)alloca(points_count * (thick_line ? 5 : 3) * sizeof(ImVec2)); //-V630 ImVec2* temp_points = temp_normals + points_count; for (int i1 = 0; i1 < count; i1++) { const int i2 = (i1+1) == points_count ? 0 : i1+1; float dx = points[i2].x - points[i1].x; float dy = points[i2].y - points[i1].y; IM_NORMALIZE2F_OVER_ZERO(dx, dy); temp_normals[i1].x = dy; temp_normals[i1].y = -dx; } if (!closed) temp_normals[points_count-1] = temp_normals[points_count-2]; if (!thick_line) { if (!closed) { temp_points[0] = points[0] + temp_normals[0] * AA_SIZE; temp_points[1] = points[0] - temp_normals[0] * AA_SIZE; temp_points[(points_count-1)*2+0] = points[points_count-1] + temp_normals[points_count-1] * AA_SIZE; temp_points[(points_count-1)*2+1] = points[points_count-1] - temp_normals[points_count-1] * AA_SIZE; } // FIXME-OPT: Merge the different loops, possibly remove the temporary buffer. unsigned int idx1 = _VtxCurrentIdx; for (int i1 = 0; i1 < count; i1++) { const int i2 = (i1+1) == points_count ? 0 : i1+1; unsigned int idx2 = (i1+1) == points_count ? _VtxCurrentIdx : idx1+3; // Average normals float dm_x = (temp_normals[i1].x + temp_normals[i2].x) * 0.5f; float dm_y = (temp_normals[i1].y + temp_normals[i2].y) * 0.5f; IM_FIXNORMAL2F(dm_x, dm_y) dm_x *= AA_SIZE; dm_y *= AA_SIZE; // Add temporary vertexes ImVec2* out_vtx = &temp_points[i2*2]; out_vtx[0].x = points[i2].x + dm_x; out_vtx[0].y = points[i2].y + dm_y; out_vtx[1].x = points[i2].x - dm_x; out_vtx[1].y = points[i2].y - dm_y; // Add indexes _IdxWritePtr[0] = (ImDrawIdx)(idx2+0); _IdxWritePtr[1] = (ImDrawIdx)(idx1+0); _IdxWritePtr[2] = (ImDrawIdx)(idx1+2); _IdxWritePtr[3] = (ImDrawIdx)(idx1+2); _IdxWritePtr[4] = (ImDrawIdx)(idx2+2); _IdxWritePtr[5] = (ImDrawIdx)(idx2+0); _IdxWritePtr[6] = (ImDrawIdx)(idx2+1); _IdxWritePtr[7] = (ImDrawIdx)(idx1+1); _IdxWritePtr[8] = (ImDrawIdx)(idx1+0); _IdxWritePtr[9] = (ImDrawIdx)(idx1+0); _IdxWritePtr[10]= (ImDrawIdx)(idx2+0); _IdxWritePtr[11]= (ImDrawIdx)(idx2+1); _IdxWritePtr += 12; idx1 = idx2; } // Add vertexes for (int i = 0; i < points_count; i++) { _VtxWritePtr[0].pos = points[i]; _VtxWritePtr[0].uv = uv; _VtxWritePtr[0].col = col; _VtxWritePtr[1].pos = temp_points[i*2+0]; _VtxWritePtr[1].uv = uv; _VtxWritePtr[1].col = col_trans; _VtxWritePtr[2].pos = temp_points[i*2+1]; _VtxWritePtr[2].uv = uv; _VtxWritePtr[2].col = col_trans; _VtxWritePtr += 3; } } else { const float half_inner_thickness = (thickness - AA_SIZE) * 0.5f; if (!closed) { temp_points[0] = points[0] + temp_normals[0] * (half_inner_thickness + AA_SIZE); temp_points[1] = points[0] + temp_normals[0] * (half_inner_thickness); temp_points[2] = points[0] - temp_normals[0] * (half_inner_thickness); temp_points[3] = points[0] - temp_normals[0] * (half_inner_thickness + AA_SIZE); temp_points[(points_count-1)*4+0] = points[points_count-1] + temp_normals[points_count-1] * (half_inner_thickness + AA_SIZE); temp_points[(points_count-1)*4+1] = points[points_count-1] + temp_normals[points_count-1] * (half_inner_thickness); temp_points[(points_count-1)*4+2] = points[points_count-1] - temp_normals[points_count-1] * (half_inner_thickness); temp_points[(points_count-1)*4+3] = points[points_count-1] - temp_normals[points_count-1] * (half_inner_thickness + AA_SIZE); } // FIXME-OPT: Merge the different loops, possibly remove the temporary buffer. unsigned int idx1 = _VtxCurrentIdx; for (int i1 = 0; i1 < count; i1++) { const int i2 = (i1+1) == points_count ? 0 : i1+1; unsigned int idx2 = (i1+1) == points_count ? _VtxCurrentIdx : idx1+4; // Average normals float dm_x = (temp_normals[i1].x + temp_normals[i2].x) * 0.5f; float dm_y = (temp_normals[i1].y + temp_normals[i2].y) * 0.5f; IM_FIXNORMAL2F(dm_x, dm_y); float dm_out_x = dm_x * (half_inner_thickness + AA_SIZE); float dm_out_y = dm_y * (half_inner_thickness + AA_SIZE); float dm_in_x = dm_x * half_inner_thickness; float dm_in_y = dm_y * half_inner_thickness; // Add temporary vertexes ImVec2* out_vtx = &temp_points[i2*4]; out_vtx[0].x = points[i2].x + dm_out_x; out_vtx[0].y = points[i2].y + dm_out_y; out_vtx[1].x = points[i2].x + dm_in_x; out_vtx[1].y = points[i2].y + dm_in_y; out_vtx[2].x = points[i2].x - dm_in_x; out_vtx[2].y = points[i2].y - dm_in_y; out_vtx[3].x = points[i2].x - dm_out_x; out_vtx[3].y = points[i2].y - dm_out_y; // Add indexes _IdxWritePtr[0] = (ImDrawIdx)(idx2+1); _IdxWritePtr[1] = (ImDrawIdx)(idx1+1); _IdxWritePtr[2] = (ImDrawIdx)(idx1+2); _IdxWritePtr[3] = (ImDrawIdx)(idx1+2); _IdxWritePtr[4] = (ImDrawIdx)(idx2+2); _IdxWritePtr[5] = (ImDrawIdx)(idx2+1); _IdxWritePtr[6] = (ImDrawIdx)(idx2+1); _IdxWritePtr[7] = (ImDrawIdx)(idx1+1); _IdxWritePtr[8] = (ImDrawIdx)(idx1+0); _IdxWritePtr[9] = (ImDrawIdx)(idx1+0); _IdxWritePtr[10] = (ImDrawIdx)(idx2+0); _IdxWritePtr[11] = (ImDrawIdx)(idx2+1); _IdxWritePtr[12] = (ImDrawIdx)(idx2+2); _IdxWritePtr[13] = (ImDrawIdx)(idx1+2); _IdxWritePtr[14] = (ImDrawIdx)(idx1+3); _IdxWritePtr[15] = (ImDrawIdx)(idx1+3); _IdxWritePtr[16] = (ImDrawIdx)(idx2+3); _IdxWritePtr[17] = (ImDrawIdx)(idx2+2); _IdxWritePtr += 18; idx1 = idx2; } // Add vertexes for (int i = 0; i < points_count; i++) { _VtxWritePtr[0].pos = temp_points[i*4+0]; _VtxWritePtr[0].uv = uv; _VtxWritePtr[0].col = col_trans; _VtxWritePtr[1].pos = temp_points[i*4+1]; _VtxWritePtr[1].uv = uv; _VtxWritePtr[1].col = col; _VtxWritePtr[2].pos = temp_points[i*4+2]; _VtxWritePtr[2].uv = uv; _VtxWritePtr[2].col = col; _VtxWritePtr[3].pos = temp_points[i*4+3]; _VtxWritePtr[3].uv = uv; _VtxWritePtr[3].col = col_trans; _VtxWritePtr += 4; } } _VtxCurrentIdx += (ImDrawIdx)vtx_count; } else { // Non Anti-aliased Stroke const int idx_count = count*6; const int vtx_count = count*4; // FIXME-OPT: Not sharing edges PrimReserve(idx_count, vtx_count); for (int i1 = 0; i1 < count; i1++) { const int i2 = (i1+1) == points_count ? 0 : i1+1; const ImVec2& p1 = points[i1]; const ImVec2& p2 = points[i2]; float dx = p2.x - p1.x; float dy = p2.y - p1.y; IM_NORMALIZE2F_OVER_ZERO(dx, dy); dx *= (thickness * 0.5f); dy *= (thickness * 0.5f); _VtxWritePtr[0].pos.x = p1.x + dy; _VtxWritePtr[0].pos.y = p1.y - dx; _VtxWritePtr[0].uv = uv; _VtxWritePtr[0].col = col; _VtxWritePtr[1].pos.x = p2.x + dy; _VtxWritePtr[1].pos.y = p2.y - dx; _VtxWritePtr[1].uv = uv; _VtxWritePtr[1].col = col; _VtxWritePtr[2].pos.x = p2.x - dy; _VtxWritePtr[2].pos.y = p2.y + dx; _VtxWritePtr[2].uv = uv; _VtxWritePtr[2].col = col; _VtxWritePtr[3].pos.x = p1.x - dy; _VtxWritePtr[3].pos.y = p1.y + dx; _VtxWritePtr[3].uv = uv; _VtxWritePtr[3].col = col; _VtxWritePtr += 4; _IdxWritePtr[0] = (ImDrawIdx)(_VtxCurrentIdx); _IdxWritePtr[1] = (ImDrawIdx)(_VtxCurrentIdx+1); _IdxWritePtr[2] = (ImDrawIdx)(_VtxCurrentIdx+2); _IdxWritePtr[3] = (ImDrawIdx)(_VtxCurrentIdx); _IdxWritePtr[4] = (ImDrawIdx)(_VtxCurrentIdx+2); _IdxWritePtr[5] = (ImDrawIdx)(_VtxCurrentIdx+3); _IdxWritePtr += 6; _VtxCurrentIdx += 4; } } } // We intentionally avoid using ImVec2 and its math operators here to reduce cost to a minimum for debug/non-inlined builds. void ImDrawList::AddConvexPolyFilled(const ImVec2* points, const int points_count, ImU32 col) { if (points_count < 3) return; const ImVec2 uv = _Data->TexUvWhitePixel; if (Flags & ImDrawListFlags_AntiAliasedFill) { // Anti-aliased Fill const float AA_SIZE = 1.0f; const ImU32 col_trans = col & ~IM_COL32_A_MASK; const int idx_count = (points_count-2)*3 + points_count*6; const int vtx_count = (points_count*2); PrimReserve(idx_count, vtx_count); // Add indexes for fill unsigned int vtx_inner_idx = _VtxCurrentIdx; unsigned int vtx_outer_idx = _VtxCurrentIdx+1; for (int i = 2; i < points_count; i++) { _IdxWritePtr[0] = (ImDrawIdx)(vtx_inner_idx); _IdxWritePtr[1] = (ImDrawIdx)(vtx_inner_idx+((i-1)<<1)); _IdxWritePtr[2] = (ImDrawIdx)(vtx_inner_idx+(i<<1)); _IdxWritePtr += 3; } // Compute normals ImVec2* temp_normals = (ImVec2*)alloca(points_count * sizeof(ImVec2)); //-V630 for (int i0 = points_count-1, i1 = 0; i1 < points_count; i0 = i1++) { const ImVec2& p0 = points[i0]; const ImVec2& p1 = points[i1]; float dx = p1.x - p0.x; float dy = p1.y - p0.y; IM_NORMALIZE2F_OVER_ZERO(dx, dy); temp_normals[i0].x = dy; temp_normals[i0].y = -dx; } for (int i0 = points_count-1, i1 = 0; i1 < points_count; i0 = i1++) { // Average normals const ImVec2& n0 = temp_normals[i0]; const ImVec2& n1 = temp_normals[i1]; float dm_x = (n0.x + n1.x) * 0.5f; float dm_y = (n0.y + n1.y) * 0.5f; IM_FIXNORMAL2F(dm_x, dm_y); dm_x *= AA_SIZE * 0.5f; dm_y *= AA_SIZE * 0.5f; // Add vertices _VtxWritePtr[0].pos.x = (points[i1].x - dm_x); _VtxWritePtr[0].pos.y = (points[i1].y - dm_y); _VtxWritePtr[0].uv = uv; _VtxWritePtr[0].col = col; // Inner _VtxWritePtr[1].pos.x = (points[i1].x + dm_x); _VtxWritePtr[1].pos.y = (points[i1].y + dm_y); _VtxWritePtr[1].uv = uv; _VtxWritePtr[1].col = col_trans; // Outer _VtxWritePtr += 2; // Add indexes for fringes _IdxWritePtr[0] = (ImDrawIdx)(vtx_inner_idx+(i1<<1)); _IdxWritePtr[1] = (ImDrawIdx)(vtx_inner_idx+(i0<<1)); _IdxWritePtr[2] = (ImDrawIdx)(vtx_outer_idx+(i0<<1)); _IdxWritePtr[3] = (ImDrawIdx)(vtx_outer_idx+(i0<<1)); _IdxWritePtr[4] = (ImDrawIdx)(vtx_outer_idx+(i1<<1)); _IdxWritePtr[5] = (ImDrawIdx)(vtx_inner_idx+(i1<<1)); _IdxWritePtr += 6; } _VtxCurrentIdx += (ImDrawIdx)vtx_count; } else { // Non Anti-aliased Fill const int idx_count = (points_count-2)*3; const int vtx_count = points_count; PrimReserve(idx_count, vtx_count); for (int i = 0; i < vtx_count; i++) { _VtxWritePtr[0].pos = points[i]; _VtxWritePtr[0].uv = uv; _VtxWritePtr[0].col = col; _VtxWritePtr++; } for (int i = 2; i < points_count; i++) { _IdxWritePtr[0] = (ImDrawIdx)(_VtxCurrentIdx); _IdxWritePtr[1] = (ImDrawIdx)(_VtxCurrentIdx+i-1); _IdxWritePtr[2] = (ImDrawIdx)(_VtxCurrentIdx+i); _IdxWritePtr += 3; } _VtxCurrentIdx += (ImDrawIdx)vtx_count; } } void ImDrawList::PathArcToFast(const ImVec2& centre, float radius, int a_min_of_12, int a_max_of_12) { if (radius == 0.0f || a_min_of_12 > a_max_of_12) { _Path.push_back(centre); return; } _Path.reserve(_Path.Size + (a_max_of_12 - a_min_of_12 + 1)); for (int a = a_min_of_12; a <= a_max_of_12; a++) { const ImVec2& c = _Data->CircleVtx12[a % IM_ARRAYSIZE(_Data->CircleVtx12)]; _Path.push_back(ImVec2(centre.x + c.x * radius, centre.y + c.y * radius)); } } void ImDrawList::PathArcTo(const ImVec2& centre, float radius, float a_min, float a_max, int num_segments) { if (radius == 0.0f) { _Path.push_back(centre); return; } // Note that we are adding a point at both a_min and a_max. // If you are trying to draw a full closed circle you don't want the overlapping points! _Path.reserve(_Path.Size + (num_segments + 1)); for (int i = 0; i <= num_segments; i++) { const float a = a_min + ((float)i / (float)num_segments) * (a_max - a_min); _Path.push_back(ImVec2(centre.x + ImCos(a) * radius, centre.y + ImSin(a) * radius)); } } static void PathBezierToCasteljau(ImVector<ImVec2>* path, float x1, float y1, float x2, float y2, float x3, float y3, float x4, float y4, float tess_tol, int level) { float dx = x4 - x1; float dy = y4 - y1; float d2 = ((x2 - x4) * dy - (y2 - y4) * dx); float d3 = ((x3 - x4) * dy - (y3 - y4) * dx); d2 = (d2 >= 0) ? d2 : -d2; d3 = (d3 >= 0) ? d3 : -d3; if ((d2+d3) * (d2+d3) < tess_tol * (dx*dx + dy*dy)) { path->push_back(ImVec2(x4, y4)); } else if (level < 10) { float x12 = (x1+x2)*0.5f, y12 = (y1+y2)*0.5f; float x23 = (x2+x3)*0.5f, y23 = (y2+y3)*0.5f; float x34 = (x3+x4)*0.5f, y34 = (y3+y4)*0.5f; float x123 = (x12+x23)*0.5f, y123 = (y12+y23)*0.5f; float x234 = (x23+x34)*0.5f, y234 = (y23+y34)*0.5f; float x1234 = (x123+x234)*0.5f, y1234 = (y123+y234)*0.5f; PathBezierToCasteljau(path, x1,y1, x12,y12, x123,y123, x1234,y1234, tess_tol, level+1); PathBezierToCasteljau(path, x1234,y1234, x234,y234, x34,y34, x4,y4, tess_tol, level+1); } } void ImDrawList::PathBezierCurveTo(const ImVec2& p2, const ImVec2& p3, const ImVec2& p4, int num_segments) { ImVec2 p1 = _Path.back(); if (num_segments == 0) { // Auto-tessellated PathBezierToCasteljau(&_Path, p1.x, p1.y, p2.x, p2.y, p3.x, p3.y, p4.x, p4.y, _Data->CurveTessellationTol, 0); } else { float t_step = 1.0f / (float)num_segments; for (int i_step = 1; i_step <= num_segments; i_step++) { float t = t_step * i_step; float u = 1.0f - t; float w1 = u*u*u; float w2 = 3*u*u*t; float w3 = 3*u*t*t; float w4 = t*t*t; _Path.push_back(ImVec2(w1*p1.x + w2*p2.x + w3*p3.x + w4*p4.x, w1*p1.y + w2*p2.y + w3*p3.y + w4*p4.y)); } } } void ImDrawList::PathRect(const ImVec2& a, const ImVec2& b, float rounding, ImDrawCornerFlags rounding_corners) { rounding = ImMin(rounding, ImFabs(b.x - a.x) * ( ((rounding_corners & ImDrawCornerFlags_Top) == ImDrawCornerFlags_Top) || ((rounding_corners & ImDrawCornerFlags_Bot) == ImDrawCornerFlags_Bot) ? 0.5f : 1.0f ) - 1.0f); rounding = ImMin(rounding, ImFabs(b.y - a.y) * ( ((rounding_corners & ImDrawCornerFlags_Left) == ImDrawCornerFlags_Left) || ((rounding_corners & ImDrawCornerFlags_Right) == ImDrawCornerFlags_Right) ? 0.5f : 1.0f ) - 1.0f); if (rounding <= 0.0f || rounding_corners == 0) { PathLineTo(a); PathLineTo(ImVec2(b.x, a.y)); PathLineTo(b); PathLineTo(ImVec2(a.x, b.y)); } else { const float rounding_tl = (rounding_corners & ImDrawCornerFlags_TopLeft) ? rounding : 0.0f; const float rounding_tr = (rounding_corners & ImDrawCornerFlags_TopRight) ? rounding : 0.0f; const float rounding_br = (rounding_corners & ImDrawCornerFlags_BotRight) ? rounding : 0.0f; const float rounding_bl = (rounding_corners & ImDrawCornerFlags_BotLeft) ? rounding : 0.0f; PathArcToFast(ImVec2(a.x + rounding_tl, a.y + rounding_tl), rounding_tl, 6, 9); PathArcToFast(ImVec2(b.x - rounding_tr, a.y + rounding_tr), rounding_tr, 9, 12); PathArcToFast(ImVec2(b.x - rounding_br, b.y - rounding_br), rounding_br, 0, 3); PathArcToFast(ImVec2(a.x + rounding_bl, b.y - rounding_bl), rounding_bl, 3, 6); } } void ImDrawList::AddLine(const ImVec2& a, const ImVec2& b, ImU32 col, float thickness) { if ((col & IM_COL32_A_MASK) == 0) return; PathLineTo(a + ImVec2(0.5f,0.5f)); PathLineTo(b + ImVec2(0.5f,0.5f)); PathStroke(col, false, thickness); } // a: upper-left, b: lower-right. we don't render 1 px sized rectangles properly. void ImDrawList::AddRect(const ImVec2& a, const ImVec2& b, ImU32 col, float rounding, ImDrawCornerFlags rounding_corners, float thickness) { if ((col & IM_COL32_A_MASK) == 0) return; if (Flags & ImDrawListFlags_AntiAliasedLines) PathRect(a + ImVec2(0.5f,0.5f), b - ImVec2(0.50f,0.50f), rounding, rounding_corners); else PathRect(a + ImVec2(0.5f,0.5f), b - ImVec2(0.49f,0.49f), rounding, rounding_corners); // Better looking lower-right corner and rounded non-AA shapes. PathStroke(col, true, thickness); } void ImDrawList::AddRectFilled(const ImVec2& a, const ImVec2& b, ImU32 col, float rounding, ImDrawCornerFlags rounding_corners) { if ((col & IM_COL32_A_MASK) == 0) return; if (rounding > 0.0f) { PathRect(a, b, rounding, rounding_corners); PathFillConvex(col); } else { PrimReserve(6, 4); PrimRect(a, b, col); } } void ImDrawList::AddRectFilledMultiColor(const ImVec2& a, const ImVec2& c, ImU32 col_upr_left, ImU32 col_upr_right, ImU32 col_bot_right, ImU32 col_bot_left) { if (((col_upr_left | col_upr_right | col_bot_right | col_bot_left) & IM_COL32_A_MASK) == 0) return; const ImVec2 uv = _Data->TexUvWhitePixel; PrimReserve(6, 4); PrimWriteIdx((ImDrawIdx)(_VtxCurrentIdx)); PrimWriteIdx((ImDrawIdx)(_VtxCurrentIdx+1)); PrimWriteIdx((ImDrawIdx)(_VtxCurrentIdx+2)); PrimWriteIdx((ImDrawIdx)(_VtxCurrentIdx)); PrimWriteIdx((ImDrawIdx)(_VtxCurrentIdx+2)); PrimWriteIdx((ImDrawIdx)(_VtxCurrentIdx+3)); PrimWriteVtx(a, uv, col_upr_left); PrimWriteVtx(ImVec2(c.x, a.y), uv, col_upr_right); PrimWriteVtx(c, uv, col_bot_right); PrimWriteVtx(ImVec2(a.x, c.y), uv, col_bot_left); } void ImDrawList::AddQuad(const ImVec2& a, const ImVec2& b, const ImVec2& c, const ImVec2& d, ImU32 col, float thickness) { if ((col & IM_COL32_A_MASK) == 0) return; PathLineTo(a); PathLineTo(b); PathLineTo(c); PathLineTo(d); PathStroke(col, true, thickness); } void ImDrawList::AddQuadFilled(const ImVec2& a, const ImVec2& b, const ImVec2& c, const ImVec2& d, ImU32 col) { if ((col & IM_COL32_A_MASK) == 0) return; PathLineTo(a); PathLineTo(b); PathLineTo(c); PathLineTo(d); PathFillConvex(col); } void ImDrawList::AddTriangle(const ImVec2& a, const ImVec2& b, const ImVec2& c, ImU32 col, float thickness) { if ((col & IM_COL32_A_MASK) == 0) return; PathLineTo(a); PathLineTo(b); PathLineTo(c); PathStroke(col, true, thickness); } void ImDrawList::AddTriangleFilled(const ImVec2& a, const ImVec2& b, const ImVec2& c, ImU32 col) { if ((col & IM_COL32_A_MASK) == 0) return; PathLineTo(a); PathLineTo(b); PathLineTo(c); PathFillConvex(col); } void ImDrawList::AddCircle(const ImVec2& centre, float radius, ImU32 col, int num_segments, float thickness) { if ((col & IM_COL32_A_MASK) == 0 || num_segments <= 2) return; // Because we are filling a closed shape we remove 1 from the count of segments/points const float a_max = IM_PI*2.0f * ((float)num_segments - 1.0f) / (float)num_segments; PathArcTo(centre, radius-0.5f, 0.0f, a_max, num_segments - 1); PathStroke(col, true, thickness); } void ImDrawList::AddCircleFilled(const ImVec2& centre, float radius, ImU32 col, int num_segments) { if ((col & IM_COL32_A_MASK) == 0 || num_segments <= 2) return; // Because we are filling a closed shape we remove 1 from the count of segments/points const float a_max = IM_PI*2.0f * ((float)num_segments - 1.0f) / (float)num_segments; PathArcTo(centre, radius, 0.0f, a_max, num_segments - 1); PathFillConvex(col); } void ImDrawList::AddBezierCurve(const ImVec2& pos0, const ImVec2& cp0, const ImVec2& cp1, const ImVec2& pos1, ImU32 col, float thickness, int num_segments) { if ((col & IM_COL32_A_MASK) == 0) return; PathLineTo(pos0); PathBezierCurveTo(cp0, cp1, pos1, num_segments); PathStroke(col, false, thickness); } void ImDrawList::AddText(const ImFont* font, float font_size, const ImVec2& pos, ImU32 col, const char* text_begin, const char* text_end, float wrap_width, const ImVec4* cpu_fine_clip_rect) { if ((col & IM_COL32_A_MASK) == 0) return; if (text_end == NULL) text_end = text_begin + strlen(text_begin); if (text_begin == text_end) return; // Pull default font/size from the shared ImDrawListSharedData instance if (font == NULL) font = _Data->Font; if (font_size == 0.0f) font_size = _Data->FontSize; IM_ASSERT(font->ContainerAtlas->TexID == _TextureIdStack.back()); // Use high-level ImGui::PushFont() or low-level ImDrawList::PushTextureId() to change font. ImVec4 clip_rect = _ClipRectStack.back(); if (cpu_fine_clip_rect) { clip_rect.x = ImMax(clip_rect.x, cpu_fine_clip_rect->x); clip_rect.y = ImMax(clip_rect.y, cpu_fine_clip_rect->y); clip_rect.z = ImMin(clip_rect.z, cpu_fine_clip_rect->z); clip_rect.w = ImMin(clip_rect.w, cpu_fine_clip_rect->w); } font->RenderText(this, font_size, pos, col, clip_rect, text_begin, text_end, wrap_width, cpu_fine_clip_rect != NULL); } void ImDrawList::AddText(const ImVec2& pos, ImU32 col, const char* text_begin, const char* text_end) { AddText(NULL, 0.0f, pos, col, text_begin, text_end); } void ImDrawList::AddImage(ImTextureID user_texture_id, const ImVec2& a, const ImVec2& b, const ImVec2& uv_a, const ImVec2& uv_b, ImU32 col) { if ((col & IM_COL32_A_MASK) == 0) return; const bool push_texture_id = _TextureIdStack.empty() || user_texture_id != _TextureIdStack.back(); if (push_texture_id) PushTextureID(user_texture_id); PrimReserve(6, 4); PrimRectUV(a, b, uv_a, uv_b, col); if (push_texture_id) PopTextureID(); } void ImDrawList::AddImageQuad(ImTextureID user_texture_id, const ImVec2& a, const ImVec2& b, const ImVec2& c, const ImVec2& d, const ImVec2& uv_a, const ImVec2& uv_b, const ImVec2& uv_c, const ImVec2& uv_d, ImU32 col) { if ((col & IM_COL32_A_MASK) == 0) return; const bool push_texture_id = _TextureIdStack.empty() || user_texture_id != _TextureIdStack.back(); if (push_texture_id) PushTextureID(user_texture_id); PrimReserve(6, 4); PrimQuadUV(a, b, c, d, uv_a, uv_b, uv_c, uv_d, col); if (push_texture_id) PopTextureID(); } void ImDrawList::AddImageRounded(ImTextureID user_texture_id, const ImVec2& a, const ImVec2& b, const ImVec2& uv_a, const ImVec2& uv_b, ImU32 col, float rounding, ImDrawCornerFlags rounding_corners) { if ((col & IM_COL32_A_MASK) == 0) return; if (rounding <= 0.0f || (rounding_corners & ImDrawCornerFlags_All) == 0) { AddImage(user_texture_id, a, b, uv_a, uv_b, col); return; } const bool push_texture_id = _TextureIdStack.empty() || user_texture_id != _TextureIdStack.back(); if (push_texture_id) PushTextureID(user_texture_id); int vert_start_idx = VtxBuffer.Size; PathRect(a, b, rounding, rounding_corners); PathFillConvex(col); int vert_end_idx = VtxBuffer.Size; ImGui::ShadeVertsLinearUV(this, vert_start_idx, vert_end_idx, a, b, uv_a, uv_b, true); if (push_texture_id) PopTextureID(); } //----------------------------------------------------------------------------- // ImDrawListSplitter //----------------------------------------------------------------------------- // FIXME: This may be a little confusing, trying to be a little too low-level/optimal instead of just doing vector swap.. //----------------------------------------------------------------------------- void ImDrawListSplitter::ClearFreeMemory() { for (int i = 0; i < _Channels.Size; i++) { if (i == _Current) memset(&_Channels[i], 0, sizeof(_Channels[i])); // Current channel is a copy of CmdBuffer/IdxBuffer, don't destruct again _Channels[i]._CmdBuffer.clear(); _Channels[i]._IdxBuffer.clear(); } _Current = 0; _Count = 1; _Channels.clear(); } void ImDrawListSplitter::Split(ImDrawList* draw_list, int channels_count) { IM_ASSERT(_Current == 0 && _Count <= 1); int old_channels_count = _Channels.Size; if (old_channels_count < channels_count) _Channels.resize(channels_count); _Count = channels_count; // Channels[] (24/32 bytes each) hold storage that we'll swap with draw_list->_CmdBuffer/_IdxBuffer // The content of Channels[0] at this point doesn't matter. We clear it to make state tidy in a debugger but we don't strictly need to. // When we switch to the next channel, we'll copy draw_list->_CmdBuffer/_IdxBuffer into Channels[0] and then Channels[1] into draw_list->CmdBuffer/_IdxBuffer memset(&_Channels[0], 0, sizeof(ImDrawChannel)); for (int i = 1; i < channels_count; i++) { if (i >= old_channels_count) { IM_PLACEMENT_NEW(&_Channels[i]) ImDrawChannel(); } else { _Channels[i]._CmdBuffer.resize(0); _Channels[i]._IdxBuffer.resize(0); } if (_Channels[i]._CmdBuffer.Size == 0) { ImDrawCmd draw_cmd; draw_cmd.ClipRect = draw_list->_ClipRectStack.back(); draw_cmd.TextureId = draw_list->_TextureIdStack.back(); _Channels[i]._CmdBuffer.push_back(draw_cmd); } } } static inline bool CanMergeDrawCommands(ImDrawCmd* a, ImDrawCmd* b) { return memcmp(&a->ClipRect, &b->ClipRect, sizeof(a->ClipRect)) == 0 && a->TextureId == b->TextureId && a->VtxOffset == b->VtxOffset && !a->UserCallback && !b->UserCallback; } void ImDrawListSplitter::Merge(ImDrawList* draw_list) { // Note that we never use or rely on channels.Size because it is merely a buffer that we never shrink back to 0 to keep all sub-buffers ready for use. if (_Count <= 1) return; SetCurrentChannel(draw_list, 0); if (draw_list->CmdBuffer.Size != 0 && draw_list->CmdBuffer.back().ElemCount == 0) draw_list->CmdBuffer.pop_back(); // Calculate our final buffer sizes. Also fix the incorrect IdxOffset values in each command. int new_cmd_buffer_count = 0; int new_idx_buffer_count = 0; ImDrawCmd* last_cmd = (_Count > 0 && draw_list->CmdBuffer.Size > 0) ? &draw_list->CmdBuffer.back() : NULL; int idx_offset = last_cmd ? last_cmd->IdxOffset + last_cmd->ElemCount : 0; for (int i = 1; i < _Count; i++) { ImDrawChannel& ch = _Channels[i]; if (ch._CmdBuffer.Size > 0 && ch._CmdBuffer.back().ElemCount == 0) ch._CmdBuffer.pop_back(); if (ch._CmdBuffer.Size > 0 && last_cmd != NULL && CanMergeDrawCommands(last_cmd, &ch._CmdBuffer[0])) { // Merge previous channel last draw command with current channel first draw command if matching. last_cmd->ElemCount += ch._CmdBuffer[0].ElemCount; idx_offset += ch._CmdBuffer[0].ElemCount; ch._CmdBuffer.erase(ch._CmdBuffer.Data); } if (ch._CmdBuffer.Size > 0) last_cmd = &ch._CmdBuffer.back(); new_cmd_buffer_count += ch._CmdBuffer.Size; new_idx_buffer_count += ch._IdxBuffer.Size; for (int cmd_n = 0; cmd_n < ch._CmdBuffer.Size; cmd_n++) { ch._CmdBuffer.Data[cmd_n].IdxOffset = idx_offset; idx_offset += ch._CmdBuffer.Data[cmd_n].ElemCount; } } draw_list->CmdBuffer.resize(draw_list->CmdBuffer.Size + new_cmd_buffer_count); draw_list->IdxBuffer.resize(draw_list->IdxBuffer.Size + new_idx_buffer_count); // Write commands and indices in order (they are fairly small structures, we don't copy vertices only indices) ImDrawCmd* cmd_write = draw_list->CmdBuffer.Data + draw_list->CmdBuffer.Size - new_cmd_buffer_count; ImDrawIdx* idx_write = draw_list->IdxBuffer.Data + draw_list->IdxBuffer.Size - new_idx_buffer_count; for (int i = 1; i < _Count; i++) { ImDrawChannel& ch = _Channels[i]; if (int sz = ch._CmdBuffer.Size) { memcpy(cmd_write, ch._CmdBuffer.Data, sz * sizeof(ImDrawCmd)); cmd_write += sz; } if (int sz = ch._IdxBuffer.Size) { memcpy(idx_write, ch._IdxBuffer.Data, sz * sizeof(ImDrawIdx)); idx_write += sz; } } draw_list->_IdxWritePtr = idx_write; draw_list->UpdateClipRect(); // We call this instead of AddDrawCmd(), so that empty channels won't produce an extra draw call. _Count = 1; } void ImDrawListSplitter::SetCurrentChannel(ImDrawList* draw_list, int idx) { IM_ASSERT(idx >= 0 && idx < _Count); if (_Current == idx) return; // Overwrite ImVector (12/16 bytes), four times. This is merely a silly optimization instead of doing .swap() memcpy(&_Channels.Data[_Current]._CmdBuffer, &draw_list->CmdBuffer, sizeof(draw_list->CmdBuffer)); memcpy(&_Channels.Data[_Current]._IdxBuffer, &draw_list->IdxBuffer, sizeof(draw_list->IdxBuffer)); _Current = idx; memcpy(&draw_list->CmdBuffer, &_Channels.Data[idx]._CmdBuffer, sizeof(draw_list->CmdBuffer)); memcpy(&draw_list->IdxBuffer, &_Channels.Data[idx]._IdxBuffer, sizeof(draw_list->IdxBuffer)); draw_list->_IdxWritePtr = draw_list->IdxBuffer.Data + draw_list->IdxBuffer.Size; } //----------------------------------------------------------------------------- // [SECTION] ImDrawData //----------------------------------------------------------------------------- // For backward compatibility: convert all buffers from indexed to de-indexed, in case you cannot render indexed. Note: this is slow and most likely a waste of resources. Always prefer indexed rendering! void ImDrawData::DeIndexAllBuffers() { ImVector<ImDrawVert> new_vtx_buffer; TotalVtxCount = TotalIdxCount = 0; for (int i = 0; i < CmdListsCount; i++) { ImDrawList* cmd_list = CmdLists[i]; if (cmd_list->IdxBuffer.empty()) continue; new_vtx_buffer.resize(cmd_list->IdxBuffer.Size); for (int j = 0; j < cmd_list->IdxBuffer.Size; j++) new_vtx_buffer[j] = cmd_list->VtxBuffer[cmd_list->IdxBuffer[j]]; cmd_list->VtxBuffer.swap(new_vtx_buffer); cmd_list->IdxBuffer.resize(0); TotalVtxCount += cmd_list->VtxBuffer.Size; } } // Helper to scale the ClipRect field of each ImDrawCmd. // Use if your final output buffer is at a different scale than draw_data->DisplaySize, // or if there is a difference between your window resolution and framebuffer resolution. void ImDrawData::ScaleClipRects(const ImVec2& fb_scale) { for (int i = 0; i < CmdListsCount; i++) { ImDrawList* cmd_list = CmdLists[i]; for (int cmd_i = 0; cmd_i < cmd_list->CmdBuffer.Size; cmd_i++) { ImDrawCmd* cmd = &cmd_list->CmdBuffer[cmd_i]; cmd->ClipRect = ImVec4(cmd->ClipRect.x * fb_scale.x, cmd->ClipRect.y * fb_scale.y, cmd->ClipRect.z * fb_scale.x, cmd->ClipRect.w * fb_scale.y); } } } //----------------------------------------------------------------------------- // [SECTION] Helpers ShadeVertsXXX functions //----------------------------------------------------------------------------- // Generic linear color gradient, write to RGB fields, leave A untouched. void ImGui::ShadeVertsLinearColorGradientKeepAlpha(ImDrawList* draw_list, int vert_start_idx, int vert_end_idx, ImVec2 gradient_p0, ImVec2 gradient_p1, ImU32 col0, ImU32 col1) { ImVec2 gradient_extent = gradient_p1 - gradient_p0; float gradient_inv_length2 = 1.0f / ImLengthSqr(gradient_extent); ImDrawVert* vert_start = draw_list->VtxBuffer.Data + vert_start_idx; ImDrawVert* vert_end = draw_list->VtxBuffer.Data + vert_end_idx; for (ImDrawVert* vert = vert_start; vert < vert_end; vert++) { float d = ImDot(vert->pos - gradient_p0, gradient_extent); float t = ImClamp(d * gradient_inv_length2, 0.0f, 1.0f); int r = ImLerp((int)(col0 >> IM_COL32_R_SHIFT) & 0xFF, (int)(col1 >> IM_COL32_R_SHIFT) & 0xFF, t); int g = ImLerp((int)(col0 >> IM_COL32_G_SHIFT) & 0xFF, (int)(col1 >> IM_COL32_G_SHIFT) & 0xFF, t); int b = ImLerp((int)(col0 >> IM_COL32_B_SHIFT) & 0xFF, (int)(col1 >> IM_COL32_B_SHIFT) & 0xFF, t); vert->col = (r << IM_COL32_R_SHIFT) | (g << IM_COL32_G_SHIFT) | (b << IM_COL32_B_SHIFT) | (vert->col & IM_COL32_A_MASK); } } // Distribute UV over (a, b) rectangle void ImGui::ShadeVertsLinearUV(ImDrawList* draw_list, int vert_start_idx, int vert_end_idx, const ImVec2& a, const ImVec2& b, const ImVec2& uv_a, const ImVec2& uv_b, bool clamp) { const ImVec2 size = b - a; const ImVec2 uv_size = uv_b - uv_a; const ImVec2 scale = ImVec2( size.x != 0.0f ? (uv_size.x / size.x) : 0.0f, size.y != 0.0f ? (uv_size.y / size.y) : 0.0f); ImDrawVert* vert_start = draw_list->VtxBuffer.Data + vert_start_idx; ImDrawVert* vert_end = draw_list->VtxBuffer.Data + vert_end_idx; if (clamp) { const ImVec2 min = ImMin(uv_a, uv_b); const ImVec2 max = ImMax(uv_a, uv_b); for (ImDrawVert* vertex = vert_start; vertex < vert_end; ++vertex) vertex->uv = ImClamp(uv_a + ImMul(ImVec2(vertex->pos.x, vertex->pos.y) - a, scale), min, max); } else { for (ImDrawVert* vertex = vert_start; vertex < vert_end; ++vertex) vertex->uv = uv_a + ImMul(ImVec2(vertex->pos.x, vertex->pos.y) - a, scale); } } //----------------------------------------------------------------------------- // [SECTION] ImFontConfig //----------------------------------------------------------------------------- ImFontConfig::ImFontConfig() { FontData = NULL; FontDataSize = 0; FontDataOwnedByAtlas = true; FontNo = 0; SizePixels = 0.0f; OversampleH = 3; // FIXME: 2 may be a better default? OversampleV = 1; PixelSnapH = false; GlyphExtraSpacing = ImVec2(0.0f, 0.0f); GlyphOffset = ImVec2(0.0f, 0.0f); GlyphRanges = NULL; GlyphMinAdvanceX = 0.0f; GlyphMaxAdvanceX = FLT_MAX; MergeMode = false; RasterizerFlags = 0x00; RasterizerMultiply = 1.0f; memset(Name, 0, sizeof(Name)); DstFont = NULL; } //----------------------------------------------------------------------------- // [SECTION] ImFontAtlas //----------------------------------------------------------------------------- // A work of art lies ahead! (. = white layer, X = black layer, others are blank) // The white texels on the top left are the ones we'll use everywhere in Dear ImGui to render filled shapes. const int FONT_ATLAS_DEFAULT_TEX_DATA_W_HALF = 108; const int FONT_ATLAS_DEFAULT_TEX_DATA_H = 27; const unsigned int FONT_ATLAS_DEFAULT_TEX_DATA_ID = 0x80000000; static const char FONT_ATLAS_DEFAULT_TEX_DATA_PIXELS[FONT_ATLAS_DEFAULT_TEX_DATA_W_HALF * FONT_ATLAS_DEFAULT_TEX_DATA_H + 1] = { "..- -XXXXXXX- X - X -XXXXXXX - XXXXXXX- XX " "..- -X.....X- X.X - X.X -X.....X - X.....X- X..X " "--- -XXX.XXX- X...X - X...X -X....X - X....X- X..X " "X - X.X - X.....X - X.....X -X...X - X...X- X..X " "XX - X.X -X.......X- X.......X -X..X.X - X.X..X- X..X " "X.X - X.X -XXXX.XXXX- XXXX.XXXX -X.X X.X - X.X X.X- X..XXX " "X..X - X.X - X.X - X.X -XX X.X - X.X XX- X..X..XXX " "X...X - X.X - X.X - XX X.X XX - X.X - X.X - X..X..X..XX " "X....X - X.X - X.X - X.X X.X X.X - X.X - X.X - X..X..X..X.X " "X.....X - X.X - X.X - X..X X.X X..X - X.X - X.X -XXX X..X..X..X..X" "X......X - X.X - X.X - X...XXXXXX.XXXXXX...X - X.X XX-XX X.X -X..XX........X..X" "X.......X - X.X - X.X -X.....................X- X.X X.X-X.X X.X -X...X...........X" "X........X - X.X - X.X - X...XXXXXX.XXXXXX...X - X.X..X-X..X.X - X..............X" "X.........X -XXX.XXX- X.X - X..X X.X X..X - X...X-X...X - X.............X" "X..........X-X.....X- X.X - X.X X.X X.X - X....X-X....X - X.............X" "X......XXXXX-XXXXXXX- X.X - XX X.X XX - X.....X-X.....X - X............X" "X...X..X --------- X.X - X.X - XXXXXXX-XXXXXXX - X...........X " "X..X X..X - -XXXX.XXXX- XXXX.XXXX ------------------------------------- X..........X " "X.X X..X - -X.......X- X.......X - XX XX - - X..........X " "XX X..X - - X.....X - X.....X - X.X X.X - - X........X " " X..X - X...X - X...X - X..X X..X - - X........X " " XX - X.X - X.X - X...XXXXXXXXXXXXX...X - - XXXXXXXXXX " "------------ - X - X -X.....................X- ------------------" " ----------------------------------- X...XXXXXXXXXXXXX...X - " " - X..X X..X - " " - X.X X.X - " " - XX XX - " }; static const ImVec2 FONT_ATLAS_DEFAULT_TEX_CURSOR_DATA[ImGuiMouseCursor_COUNT][3] = { // Pos ........ Size ......... Offset ...... { ImVec2( 0,3), ImVec2(12,19), ImVec2( 0, 0) }, // ImGuiMouseCursor_Arrow { ImVec2(13,0), ImVec2( 7,16), ImVec2( 1, 8) }, // ImGuiMouseCursor_TextInput { ImVec2(31,0), ImVec2(23,23), ImVec2(11,11) }, // ImGuiMouseCursor_ResizeAll { ImVec2(21,0), ImVec2( 9,23), ImVec2( 4,11) }, // ImGuiMouseCursor_ResizeNS { ImVec2(55,18),ImVec2(23, 9), ImVec2(11, 4) }, // ImGuiMouseCursor_ResizeEW { ImVec2(73,0), ImVec2(17,17), ImVec2( 8, 8) }, // ImGuiMouseCursor_ResizeNESW { ImVec2(55,0), ImVec2(17,17), ImVec2( 8, 8) }, // ImGuiMouseCursor_ResizeNWSE { ImVec2(91,0), ImVec2(17,22), ImVec2( 5, 0) }, // ImGuiMouseCursor_Hand }; ImFontAtlas::ImFontAtlas() { Locked = false; Flags = ImFontAtlasFlags_None; TexID = (ImTextureID)NULL; TexDesiredWidth = 0; TexGlyphPadding = 1; TexPixelsAlpha8 = NULL; TexPixelsRGBA32 = NULL; TexWidth = TexHeight = 0; TexUvScale = ImVec2(0.0f, 0.0f); TexUvWhitePixel = ImVec2(0.0f, 0.0f); for (int n = 0; n < IM_ARRAYSIZE(CustomRectIds); n++) CustomRectIds[n] = -1; } ImFontAtlas::~ImFontAtlas() { IM_ASSERT(!Locked && "Cannot modify a locked ImFontAtlas between NewFrame() and EndFrame/Render()!"); Clear(); } void ImFontAtlas::ClearInputData() { IM_ASSERT(!Locked && "Cannot modify a locked ImFontAtlas between NewFrame() and EndFrame/Render()!"); for (int i = 0; i < ConfigData.Size; i++) if (ConfigData[i].FontData && ConfigData[i].FontDataOwnedByAtlas) { IM_FREE(ConfigData[i].FontData); ConfigData[i].FontData = NULL; } // When clearing this we lose access to the font name and other information used to build the font. for (int i = 0; i < Fonts.Size; i++) if (Fonts[i]->ConfigData >= ConfigData.Data && Fonts[i]->ConfigData < ConfigData.Data + ConfigData.Size) { Fonts[i]->ConfigData = NULL; Fonts[i]->ConfigDataCount = 0; } ConfigData.clear(); CustomRects.clear(); for (int n = 0; n < IM_ARRAYSIZE(CustomRectIds); n++) CustomRectIds[n] = -1; } void ImFontAtlas::ClearTexData() { IM_ASSERT(!Locked && "Cannot modify a locked ImFontAtlas between NewFrame() and EndFrame/Render()!"); if (TexPixelsAlpha8) IM_FREE(TexPixelsAlpha8); if (TexPixelsRGBA32) IM_FREE(TexPixelsRGBA32); TexPixelsAlpha8 = NULL; TexPixelsRGBA32 = NULL; } void ImFontAtlas::ClearFonts() { IM_ASSERT(!Locked && "Cannot modify a locked ImFontAtlas between NewFrame() and EndFrame/Render()!"); for (int i = 0; i < Fonts.Size; i++) IM_DELETE(Fonts[i]); Fonts.clear(); } void ImFontAtlas::Clear() { ClearInputData(); ClearTexData(); ClearFonts(); } void ImFontAtlas::GetTexDataAsAlpha8(unsigned char** out_pixels, int* out_width, int* out_height, int* out_bytes_per_pixel) { // Build atlas on demand if (TexPixelsAlpha8 == NULL) { if (ConfigData.empty()) AddFontDefault(); Build(); } *out_pixels = TexPixelsAlpha8; if (out_width) *out_width = TexWidth; if (out_height) *out_height = TexHeight; if (out_bytes_per_pixel) *out_bytes_per_pixel = 1; } void ImFontAtlas::GetTexDataAsRGBA32(unsigned char** out_pixels, int* out_width, int* out_height, int* out_bytes_per_pixel) { // Convert to RGBA32 format on demand // Although it is likely to be the most commonly used format, our font rendering is 1 channel / 8 bpp if (!TexPixelsRGBA32) { unsigned char* pixels = NULL; GetTexDataAsAlpha8(&pixels, NULL, NULL); if (pixels) { TexPixelsRGBA32 = (unsigned int*)IM_ALLOC((size_t)TexWidth * (size_t)TexHeight * 4); const unsigned char* src = pixels; unsigned int* dst = TexPixelsRGBA32; for (int n = TexWidth * TexHeight; n > 0; n--) *dst++ = IM_COL32(255, 255, 255, (unsigned int)(*src++)); } } *out_pixels = (unsigned char*)TexPixelsRGBA32; if (out_width) *out_width = TexWidth; if (out_height) *out_height = TexHeight; if (out_bytes_per_pixel) *out_bytes_per_pixel = 4; } ImFont* ImFontAtlas::AddFont(const ImFontConfig* font_cfg) { IM_ASSERT(!Locked && "Cannot modify a locked ImFontAtlas between NewFrame() and EndFrame/Render()!"); IM_ASSERT(font_cfg->FontData != NULL && font_cfg->FontDataSize > 0); IM_ASSERT(font_cfg->SizePixels > 0.0f); // Create new font if (!font_cfg->MergeMode) Fonts.push_back(IM_NEW(ImFont)); else IM_ASSERT(!Fonts.empty() && "Cannot use MergeMode for the first font"); // When using MergeMode make sure that a font has already been added before. You can use ImGui::GetIO().Fonts->AddFontDefault() to add the default imgui font. ConfigData.push_back(*font_cfg); ImFontConfig& new_font_cfg = ConfigData.back(); if (new_font_cfg.DstFont == NULL) new_font_cfg.DstFont = Fonts.back(); if (!new_font_cfg.FontDataOwnedByAtlas) { new_font_cfg.FontData = IM_ALLOC(new_font_cfg.FontDataSize); new_font_cfg.FontDataOwnedByAtlas = true; memcpy(new_font_cfg.FontData, font_cfg->FontData, (size_t)new_font_cfg.FontDataSize); } // Invalidate texture ClearTexData(); return new_font_cfg.DstFont; } // Default font TTF is compressed with stb_compress then base85 encoded (see misc/fonts/binary_to_compressed_c.cpp for encoder) static unsigned int stb_decompress_length(const unsigned char *input); static unsigned int stb_decompress(unsigned char *output, const unsigned char *input, unsigned int length); static const char* GetDefaultCompressedFontDataTTFBase85(); static unsigned int Decode85Byte(char c) { return c >= '\\' ? c-36 : c-35; } static void Decode85(const unsigned char* src, unsigned char* dst) { while (*src) { unsigned int tmp = Decode85Byte(src[0]) + 85*(Decode85Byte(src[1]) + 85*(Decode85Byte(src[2]) + 85*(Decode85Byte(src[3]) + 85*Decode85Byte(src[4])))); dst[0] = ((tmp >> 0) & 0xFF); dst[1] = ((tmp >> 8) & 0xFF); dst[2] = ((tmp >> 16) & 0xFF); dst[3] = ((tmp >> 24) & 0xFF); // We can't assume little-endianness. src += 5; dst += 4; } } // Load embedded ProggyClean.ttf at size 13, disable oversampling ImFont* ImFontAtlas::AddFontDefault(const ImFontConfig* font_cfg_template) { ImFontConfig font_cfg = font_cfg_template ? *font_cfg_template : ImFontConfig(); if (!font_cfg_template) { font_cfg.OversampleH = font_cfg.OversampleV = 1; font_cfg.PixelSnapH = true; } if (font_cfg.SizePixels <= 0.0f) font_cfg.SizePixels = 13.0f * 1.0f; if (font_cfg.Name[0] == '\0') ImFormatString(font_cfg.Name, IM_ARRAYSIZE(font_cfg.Name), "ProggyClean.ttf, %dpx", (int)font_cfg.SizePixels); const char* ttf_compressed_base85 = GetDefaultCompressedFontDataTTFBase85(); const ImWchar* glyph_ranges = font_cfg.GlyphRanges != NULL ? font_cfg.GlyphRanges : GetGlyphRangesDefault(); ImFont* font = AddFontFromMemoryCompressedBase85TTF(ttf_compressed_base85, font_cfg.SizePixels, &font_cfg, glyph_ranges); font->DisplayOffset.y = 1.0f; return font; } ImFont* ImFontAtlas::AddFontFromFileTTF(const char* filename, float size_pixels, const ImFontConfig* font_cfg_template, const ImWchar* glyph_ranges) { IM_ASSERT(!Locked && "Cannot modify a locked ImFontAtlas between NewFrame() and EndFrame/Render()!"); size_t data_size = 0; void* data = ImFileLoadToMemory(filename, "rb", &data_size, 0); if (!data) { IM_ASSERT(0); // Could not load file. return NULL; } ImFontConfig font_cfg = font_cfg_template ? *font_cfg_template : ImFontConfig(); if (font_cfg.Name[0] == '\0') { // Store a short copy of filename into into the font name for convenience const char* p; for (p = filename + strlen(filename); p > filename && p[-1] != '/' && p[-1] != '\\'; p--) {} ImFormatString(font_cfg.Name, IM_ARRAYSIZE(font_cfg.Name), "%s, %.0fpx", p, size_pixels); } return AddFontFromMemoryTTF(data, (int)data_size, size_pixels, &font_cfg, glyph_ranges); } // NB: Transfer ownership of 'ttf_data' to ImFontAtlas, unless font_cfg_template->FontDataOwnedByAtlas == false. Owned TTF buffer will be deleted after Build(). ImFont* ImFontAtlas::AddFontFromMemoryTTF(void* ttf_data, int ttf_size, float size_pixels, const ImFontConfig* font_cfg_template, const ImWchar* glyph_ranges) { IM_ASSERT(!Locked && "Cannot modify a locked ImFontAtlas between NewFrame() and EndFrame/Render()!"); ImFontConfig font_cfg = font_cfg_template ? *font_cfg_template : ImFontConfig(); IM_ASSERT(font_cfg.FontData == NULL); font_cfg.FontData = ttf_data; font_cfg.FontDataSize = ttf_size; font_cfg.SizePixels = size_pixels; if (glyph_ranges) font_cfg.GlyphRanges = glyph_ranges; return AddFont(&font_cfg); } ImFont* ImFontAtlas::AddFontFromMemoryCompressedTTF(const void* compressed_ttf_data, int compressed_ttf_size, float size_pixels, const ImFontConfig* font_cfg_template, const ImWchar* glyph_ranges) { const unsigned int buf_decompressed_size = stb_decompress_length((const unsigned char*)compressed_ttf_data); unsigned char* buf_decompressed_data = (unsigned char *)IM_ALLOC(buf_decompressed_size); stb_decompress(buf_decompressed_data, (const unsigned char*)compressed_ttf_data, (unsigned int)compressed_ttf_size); ImFontConfig font_cfg = font_cfg_template ? *font_cfg_template : ImFontConfig(); IM_ASSERT(font_cfg.FontData == NULL); font_cfg.FontDataOwnedByAtlas = true; return AddFontFromMemoryTTF(buf_decompressed_data, (int)buf_decompressed_size, size_pixels, &font_cfg, glyph_ranges); } ImFont* ImFontAtlas::AddFontFromMemoryCompressedBase85TTF(const char* compressed_ttf_data_base85, float size_pixels, const ImFontConfig* font_cfg, const ImWchar* glyph_ranges) { int compressed_ttf_size = (((int)strlen(compressed_ttf_data_base85) + 4) / 5) * 4; void* compressed_ttf = IM_ALLOC((size_t)compressed_ttf_size); Decode85((const unsigned char*)compressed_ttf_data_base85, (unsigned char*)compressed_ttf); ImFont* font = AddFontFromMemoryCompressedTTF(compressed_ttf, compressed_ttf_size, size_pixels, font_cfg, glyph_ranges); IM_FREE(compressed_ttf); return font; } int ImFontAtlas::AddCustomRectRegular(unsigned int id, int width, int height) { IM_ASSERT(id >= 0x10000); IM_ASSERT(width > 0 && width <= 0xFFFF); IM_ASSERT(height > 0 && height <= 0xFFFF); ImFontAtlasCustomRect r; r.ID = id; r.Width = (unsigned short)width; r.Height = (unsigned short)height; CustomRects.push_back(r); return CustomRects.Size - 1; // Return index } int ImFontAtlas::AddCustomRectFontGlyph(ImFont* font, ImWchar id, int width, int height, float advance_x, const ImVec2& offset) { IM_ASSERT(font != NULL); IM_ASSERT(width > 0 && width <= 0xFFFF); IM_ASSERT(height > 0 && height <= 0xFFFF); ImFontAtlasCustomRect r; r.ID = id; r.Width = (unsigned short)width; r.Height = (unsigned short)height; r.GlyphAdvanceX = advance_x; r.GlyphOffset = offset; r.Font = font; CustomRects.push_back(r); return CustomRects.Size - 1; // Return index } void ImFontAtlas::CalcCustomRectUV(const ImFontAtlasCustomRect* rect, ImVec2* out_uv_min, ImVec2* out_uv_max) { IM_ASSERT(TexWidth > 0 && TexHeight > 0); // Font atlas needs to be built before we can calculate UV coordinates IM_ASSERT(rect->IsPacked()); // Make sure the rectangle has been packed *out_uv_min = ImVec2((float)rect->X * TexUvScale.x, (float)rect->Y * TexUvScale.y); *out_uv_max = ImVec2((float)(rect->X + rect->Width) * TexUvScale.x, (float)(rect->Y + rect->Height) * TexUvScale.y); } bool ImFontAtlas::GetMouseCursorTexData(ImGuiMouseCursor cursor_type, ImVec2* out_offset, ImVec2* out_size, ImVec2 out_uv_border[2], ImVec2 out_uv_fill[2]) { if (cursor_type <= ImGuiMouseCursor_None || cursor_type >= ImGuiMouseCursor_COUNT) return false; if (Flags & ImFontAtlasFlags_NoMouseCursors) return false; IM_ASSERT(CustomRectIds[0] != -1); ImFontAtlasCustomRect& r = CustomRects[CustomRectIds[0]]; IM_ASSERT(r.ID == FONT_ATLAS_DEFAULT_TEX_DATA_ID); ImVec2 pos = FONT_ATLAS_DEFAULT_TEX_CURSOR_DATA[cursor_type][0] + ImVec2((float)r.X, (float)r.Y); ImVec2 size = FONT_ATLAS_DEFAULT_TEX_CURSOR_DATA[cursor_type][1]; *out_size = size; *out_offset = FONT_ATLAS_DEFAULT_TEX_CURSOR_DATA[cursor_type][2]; out_uv_border[0] = (pos) * TexUvScale; out_uv_border[1] = (pos + size) * TexUvScale; pos.x += FONT_ATLAS_DEFAULT_TEX_DATA_W_HALF + 1; out_uv_fill[0] = (pos) * TexUvScale; out_uv_fill[1] = (pos + size) * TexUvScale; return true; } bool ImFontAtlas::Build() { IM_ASSERT(!Locked && "Cannot modify a locked ImFontAtlas between NewFrame() and EndFrame/Render()!"); return ImFontAtlasBuildWithStbTruetype(this); } void ImFontAtlasBuildMultiplyCalcLookupTable(unsigned char out_table[256], float in_brighten_factor) { for (unsigned int i = 0; i < 256; i++) { unsigned int value = (unsigned int)(i * in_brighten_factor); out_table[i] = value > 255 ? 255 : (value & 0xFF); } } void ImFontAtlasBuildMultiplyRectAlpha8(const unsigned char table[256], unsigned char* pixels, int x, int y, int w, int h, int stride) { unsigned char* data = pixels + x + y * stride; for (int j = h; j > 0; j--, data += stride) for (int i = 0; i < w; i++) data[i] = table[data[i]]; } // Temporary data for one source font (multiple source fonts can be merged into one destination ImFont) // (C++03 doesn't allow instancing ImVector<> with function-local types so we declare the type here.) struct ImFontBuildSrcData { stbtt_fontinfo FontInfo; stbtt_pack_range PackRange; // Hold the list of codepoints to pack (essentially points to Codepoints.Data) stbrp_rect* Rects; // Rectangle to pack. We first fill in their size and the packer will give us their position. stbtt_packedchar* PackedChars; // Output glyphs const ImWchar* SrcRanges; // Ranges as requested by user (user is allowed to request too much, e.g. 0x0020..0xFFFF) int DstIndex; // Index into atlas->Fonts[] and dst_tmp_array[] int GlyphsHighest; // Highest requested codepoint int GlyphsCount; // Glyph count (excluding missing glyphs and glyphs already set by an earlier source font) ImBoolVector GlyphsSet; // Glyph bit map (random access, 1-bit per codepoint. This will be a maximum of 8KB) ImVector<int> GlyphsList; // Glyph codepoints list (flattened version of GlyphsMap) }; // Temporary data for one destination ImFont* (multiple source fonts can be merged into one destination ImFont) struct ImFontBuildDstData { int SrcCount; // Number of source fonts targeting this destination font. int GlyphsHighest; int GlyphsCount; ImBoolVector GlyphsSet; // This is used to resolve collision when multiple sources are merged into a same destination font. }; static void UnpackBoolVectorToFlatIndexList(const ImBoolVector* in, ImVector<int>* out) { IM_ASSERT(sizeof(in->Storage.Data[0]) == sizeof(int)); const int* it_begin = in->Storage.begin(); const int* it_end = in->Storage.end(); for (const int* it = it_begin; it < it_end; it++) if (int entries_32 = *it) for (int bit_n = 0; bit_n < 32; bit_n++) if (entries_32 & (1u << bit_n)) out->push_back((int)((it - it_begin) << 5) + bit_n); } bool ImFontAtlasBuildWithStbTruetype(ImFontAtlas* atlas) { IM_ASSERT(atlas->ConfigData.Size > 0); ImFontAtlasBuildRegisterDefaultCustomRects(atlas); // Clear atlas atlas->TexID = (ImTextureID)NULL; atlas->TexWidth = atlas->TexHeight = 0; atlas->TexUvScale = ImVec2(0.0f, 0.0f); atlas->TexUvWhitePixel = ImVec2(0.0f, 0.0f); atlas->ClearTexData(); // Temporary storage for building ImVector<ImFontBuildSrcData> src_tmp_array; ImVector<ImFontBuildDstData> dst_tmp_array; src_tmp_array.resize(atlas->ConfigData.Size); dst_tmp_array.resize(atlas->Fonts.Size); memset(src_tmp_array.Data, 0, (size_t)src_tmp_array.size_in_bytes()); memset(dst_tmp_array.Data, 0, (size_t)dst_tmp_array.size_in_bytes()); // 1. Initialize font loading structure, check font data validity for (int src_i = 0; src_i < atlas->ConfigData.Size; src_i++) { ImFontBuildSrcData& src_tmp = src_tmp_array[src_i]; ImFontConfig& cfg = atlas->ConfigData[src_i]; IM_ASSERT(cfg.DstFont && (!cfg.DstFont->IsLoaded() || cfg.DstFont->ContainerAtlas == atlas)); // Find index from cfg.DstFont (we allow the user to set cfg.DstFont. Also it makes casual debugging nicer than when storing indices) src_tmp.DstIndex = -1; for (int output_i = 0; output_i < atlas->Fonts.Size && src_tmp.DstIndex == -1; output_i++) if (cfg.DstFont == atlas->Fonts[output_i]) src_tmp.DstIndex = output_i; IM_ASSERT(src_tmp.DstIndex != -1); // cfg.DstFont not pointing within atlas->Fonts[] array? if (src_tmp.DstIndex == -1) return false; // Initialize helper structure for font loading and verify that the TTF/OTF data is correct const int font_offset = stbtt_GetFontOffsetForIndex((unsigned char*)cfg.FontData, cfg.FontNo); IM_ASSERT(font_offset >= 0 && "FontData is incorrect, or FontNo cannot be found."); if (!stbtt_InitFont(&src_tmp.FontInfo, (unsigned char*)cfg.FontData, font_offset)) return false; // Measure highest codepoints ImFontBuildDstData& dst_tmp = dst_tmp_array[src_tmp.DstIndex]; src_tmp.SrcRanges = cfg.GlyphRanges ? cfg.GlyphRanges : atlas->GetGlyphRangesDefault(); for (const ImWchar* src_range = src_tmp.SrcRanges; src_range[0] && src_range[1]; src_range += 2) src_tmp.GlyphsHighest = ImMax(src_tmp.GlyphsHighest, (int)src_range[1]); dst_tmp.SrcCount++; dst_tmp.GlyphsHighest = ImMax(dst_tmp.GlyphsHighest, src_tmp.GlyphsHighest); } // 2. For every requested codepoint, check for their presence in the font data, and handle redundancy or overlaps between source fonts to avoid unused glyphs. int total_glyphs_count = 0; for (int src_i = 0; src_i < src_tmp_array.Size; src_i++) { ImFontBuildSrcData& src_tmp = src_tmp_array[src_i]; ImFontBuildDstData& dst_tmp = dst_tmp_array[src_tmp.DstIndex]; src_tmp.GlyphsSet.Resize(src_tmp.GlyphsHighest + 1); if (dst_tmp.GlyphsSet.Storage.empty()) dst_tmp.GlyphsSet.Resize(dst_tmp.GlyphsHighest + 1); for (const ImWchar* src_range = src_tmp.SrcRanges; src_range[0] && src_range[1]; src_range += 2) for (int codepoint = src_range[0]; codepoint <= src_range[1]; codepoint++) { if (dst_tmp.GlyphsSet.GetBit(codepoint)) // Don't overwrite existing glyphs. We could make this an option for MergeMode (e.g. MergeOverwrite==true) continue; if (!stbtt_FindGlyphIndex(&src_tmp.FontInfo, codepoint)) // It is actually in the font? continue; // Add to avail set/counters src_tmp.GlyphsCount++; dst_tmp.GlyphsCount++; src_tmp.GlyphsSet.SetBit(codepoint, true); dst_tmp.GlyphsSet.SetBit(codepoint, true); total_glyphs_count++; } } // 3. Unpack our bit map into a flat list (we now have all the Unicode points that we know are requested _and_ available _and_ not overlapping another) for (int src_i = 0; src_i < src_tmp_array.Size; src_i++) { ImFontBuildSrcData& src_tmp = src_tmp_array[src_i]; src_tmp.GlyphsList.reserve(src_tmp.GlyphsCount); UnpackBoolVectorToFlatIndexList(&src_tmp.GlyphsSet, &src_tmp.GlyphsList); src_tmp.GlyphsSet.Clear(); IM_ASSERT(src_tmp.GlyphsList.Size == src_tmp.GlyphsCount); } for (int dst_i = 0; dst_i < dst_tmp_array.Size; dst_i++) dst_tmp_array[dst_i].GlyphsSet.Clear(); dst_tmp_array.clear(); // Allocate packing character data and flag packed characters buffer as non-packed (x0=y0=x1=y1=0) // (We technically don't need to zero-clear buf_rects, but let's do it for the sake of sanity) ImVector<stbrp_rect> buf_rects; ImVector<stbtt_packedchar> buf_packedchars; buf_rects.resize(total_glyphs_count); buf_packedchars.resize(total_glyphs_count); memset(buf_rects.Data, 0, (size_t)buf_rects.size_in_bytes()); memset(buf_packedchars.Data, 0, (size_t)buf_packedchars.size_in_bytes()); // 4. Gather glyphs sizes so we can pack them in our virtual canvas. int total_surface = 0; int buf_rects_out_n = 0; int buf_packedchars_out_n = 0; for (int src_i = 0; src_i < src_tmp_array.Size; src_i++) { ImFontBuildSrcData& src_tmp = src_tmp_array[src_i]; if (src_tmp.GlyphsCount == 0) continue; src_tmp.Rects = &buf_rects[buf_rects_out_n]; src_tmp.PackedChars = &buf_packedchars[buf_packedchars_out_n]; buf_rects_out_n += src_tmp.GlyphsCount; buf_packedchars_out_n += src_tmp.GlyphsCount; // Convert our ranges in the format stb_truetype wants ImFontConfig& cfg = atlas->ConfigData[src_i]; src_tmp.PackRange.font_size = cfg.SizePixels; src_tmp.PackRange.first_unicode_codepoint_in_range = 0; src_tmp.PackRange.array_of_unicode_codepoints = src_tmp.GlyphsList.Data; src_tmp.PackRange.num_chars = src_tmp.GlyphsList.Size; src_tmp.PackRange.chardata_for_range = src_tmp.PackedChars; src_tmp.PackRange.h_oversample = (unsigned char)cfg.OversampleH; src_tmp.PackRange.v_oversample = (unsigned char)cfg.OversampleV; // Gather the sizes of all rectangles we will need to pack (this loop is based on stbtt_PackFontRangesGatherRects) const float scale = (cfg.SizePixels > 0) ? stbtt_ScaleForPixelHeight(&src_tmp.FontInfo, cfg.SizePixels) : stbtt_ScaleForMappingEmToPixels(&src_tmp.FontInfo, -cfg.SizePixels); const int padding = atlas->TexGlyphPadding; for (int glyph_i = 0; glyph_i < src_tmp.GlyphsList.Size; glyph_i++) { int x0, y0, x1, y1; const int glyph_index_in_font = stbtt_FindGlyphIndex(&src_tmp.FontInfo, src_tmp.GlyphsList[glyph_i]); IM_ASSERT(glyph_index_in_font != 0); stbtt_GetGlyphBitmapBoxSubpixel(&src_tmp.FontInfo, glyph_index_in_font, scale * cfg.OversampleH, scale * cfg.OversampleV, 0, 0, &x0, &y0, &x1, &y1); src_tmp.Rects[glyph_i].w = (stbrp_coord)(x1 - x0 + padding + cfg.OversampleH - 1); src_tmp.Rects[glyph_i].h = (stbrp_coord)(y1 - y0 + padding + cfg.OversampleV - 1); total_surface += src_tmp.Rects[glyph_i].w * src_tmp.Rects[glyph_i].h; } } // We need a width for the skyline algorithm, any width! // The exact width doesn't really matter much, but some API/GPU have texture size limitations and increasing width can decrease height. // User can override TexDesiredWidth and TexGlyphPadding if they wish, otherwise we use a simple heuristic to select the width based on expected surface. const int surface_sqrt = (int)ImSqrt((float)total_surface) + 1; atlas->TexHeight = 0; if (atlas->TexDesiredWidth > 0) atlas->TexWidth = atlas->TexDesiredWidth; else atlas->TexWidth = (surface_sqrt >= 4096*0.7f) ? 4096 : (surface_sqrt >= 2048*0.7f) ? 2048 : (surface_sqrt >= 1024*0.7f) ? 1024 : 512; // 5. Start packing // Pack our extra data rectangles first, so it will be on the upper-left corner of our texture (UV will have small values). const int TEX_HEIGHT_MAX = 1024 * 32; stbtt_pack_context spc = {}; stbtt_PackBegin(&spc, NULL, atlas->TexWidth, TEX_HEIGHT_MAX, 0, atlas->TexGlyphPadding, NULL); ImFontAtlasBuildPackCustomRects(atlas, spc.pack_info); // 6. Pack each source font. No rendering yet, we are working with rectangles in an infinitely tall texture at this point. for (int src_i = 0; src_i < src_tmp_array.Size; src_i++) { ImFontBuildSrcData& src_tmp = src_tmp_array[src_i]; if (src_tmp.GlyphsCount == 0) continue; stbrp_pack_rects((stbrp_context*)spc.pack_info, src_tmp.Rects, src_tmp.GlyphsCount); // Extend texture height and mark missing glyphs as non-packed so we won't render them. // FIXME: We are not handling packing failure here (would happen if we got off TEX_HEIGHT_MAX or if a single if larger than TexWidth?) for (int glyph_i = 0; glyph_i < src_tmp.GlyphsCount; glyph_i++) if (src_tmp.Rects[glyph_i].was_packed) atlas->TexHeight = ImMax(atlas->TexHeight, src_tmp.Rects[glyph_i].y + src_tmp.Rects[glyph_i].h); } // 7. Allocate texture atlas->TexHeight = (atlas->Flags & ImFontAtlasFlags_NoPowerOfTwoHeight) ? (atlas->TexHeight + 1) : ImUpperPowerOfTwo(atlas->TexHeight); atlas->TexUvScale = ImVec2(1.0f / atlas->TexWidth, 1.0f / atlas->TexHeight); atlas->TexPixelsAlpha8 = (unsigned char*)IM_ALLOC(atlas->TexWidth * atlas->TexHeight); memset(atlas->TexPixelsAlpha8, 0, atlas->TexWidth * atlas->TexHeight); spc.pixels = atlas->TexPixelsAlpha8; spc.height = atlas->TexHeight; // 8. Render/rasterize font characters into the texture for (int src_i = 0; src_i < src_tmp_array.Size; src_i++) { ImFontConfig& cfg = atlas->ConfigData[src_i]; ImFontBuildSrcData& src_tmp = src_tmp_array[src_i]; if (src_tmp.GlyphsCount == 0) continue; stbtt_PackFontRangesRenderIntoRects(&spc, &src_tmp.FontInfo, &src_tmp.PackRange, 1, src_tmp.Rects); // Apply multiply operator if (cfg.RasterizerMultiply != 1.0f) { unsigned char multiply_table[256]; ImFontAtlasBuildMultiplyCalcLookupTable(multiply_table, cfg.RasterizerMultiply); stbrp_rect* r = &src_tmp.Rects[0]; for (int glyph_i = 0; glyph_i < src_tmp.GlyphsCount; glyph_i++, r++) if (r->was_packed) ImFontAtlasBuildMultiplyRectAlpha8(multiply_table, atlas->TexPixelsAlpha8, r->x, r->y, r->w, r->h, atlas->TexWidth * 1); } src_tmp.Rects = NULL; } // End packing stbtt_PackEnd(&spc); buf_rects.clear(); // 9. Setup ImFont and glyphs for runtime for (int src_i = 0; src_i < src_tmp_array.Size; src_i++) { ImFontBuildSrcData& src_tmp = src_tmp_array[src_i]; if (src_tmp.GlyphsCount == 0) continue; ImFontConfig& cfg = atlas->ConfigData[src_i]; ImFont* dst_font = cfg.DstFont; // We can have multiple input fonts writing into a same destination font (when using MergeMode=true) const float font_scale = stbtt_ScaleForPixelHeight(&src_tmp.FontInfo, cfg.SizePixels); int unscaled_ascent, unscaled_descent, unscaled_line_gap; stbtt_GetFontVMetrics(&src_tmp.FontInfo, &unscaled_ascent, &unscaled_descent, &unscaled_line_gap); const float ascent = ImFloor(unscaled_ascent * font_scale + ((unscaled_ascent > 0.0f) ? +1 : -1)); const float descent = ImFloor(unscaled_descent * font_scale + ((unscaled_descent > 0.0f) ? +1 : -1)); ImFontAtlasBuildSetupFont(atlas, dst_font, &cfg, ascent, descent); const float font_off_x = cfg.GlyphOffset.x; const float font_off_y = cfg.GlyphOffset.y + (float)(int)(dst_font->Ascent + 0.5f); for (int glyph_i = 0; glyph_i < src_tmp.GlyphsCount; glyph_i++) { const int codepoint = src_tmp.GlyphsList[glyph_i]; const stbtt_packedchar& pc = src_tmp.PackedChars[glyph_i]; const float char_advance_x_org = pc.xadvance; const float char_advance_x_mod = ImClamp(char_advance_x_org, cfg.GlyphMinAdvanceX, cfg.GlyphMaxAdvanceX); float char_off_x = font_off_x; if (char_advance_x_org != char_advance_x_mod) char_off_x += cfg.PixelSnapH ? (float)(int)((char_advance_x_mod - char_advance_x_org) * 0.5f) : (char_advance_x_mod - char_advance_x_org) * 0.5f; // Register glyph stbtt_aligned_quad q; float dummy_x = 0.0f, dummy_y = 0.0f; stbtt_GetPackedQuad(src_tmp.PackedChars, atlas->TexWidth, atlas->TexHeight, glyph_i, &dummy_x, &dummy_y, &q, 0); dst_font->AddGlyph((ImWchar)codepoint, q.x0 + char_off_x, q.y0 + font_off_y, q.x1 + char_off_x, q.y1 + font_off_y, q.s0, q.t0, q.s1, q.t1, char_advance_x_mod); } } // Cleanup temporary (ImVector doesn't honor destructor) for (int src_i = 0; src_i < src_tmp_array.Size; src_i++) src_tmp_array[src_i].~ImFontBuildSrcData(); ImFontAtlasBuildFinish(atlas); return true; } void ImFontAtlasBuildRegisterDefaultCustomRects(ImFontAtlas* atlas) { if (atlas->CustomRectIds[0] >= 0) return; if (!(atlas->Flags & ImFontAtlasFlags_NoMouseCursors)) atlas->CustomRectIds[0] = atlas->AddCustomRectRegular(FONT_ATLAS_DEFAULT_TEX_DATA_ID, FONT_ATLAS_DEFAULT_TEX_DATA_W_HALF*2+1, FONT_ATLAS_DEFAULT_TEX_DATA_H); else atlas->CustomRectIds[0] = atlas->AddCustomRectRegular(FONT_ATLAS_DEFAULT_TEX_DATA_ID, 2, 2); } void ImFontAtlasBuildSetupFont(ImFontAtlas* atlas, ImFont* font, ImFontConfig* font_config, float ascent, float descent) { if (!font_config->MergeMode) { font->ClearOutputData(); font->FontSize = font_config->SizePixels; font->ConfigData = font_config; font->ContainerAtlas = atlas; font->Ascent = ascent; font->Descent = descent; } font->ConfigDataCount++; } void ImFontAtlasBuildPackCustomRects(ImFontAtlas* atlas, void* stbrp_context_opaque) { stbrp_context* pack_context = (stbrp_context*)stbrp_context_opaque; IM_ASSERT(pack_context != NULL); ImVector<ImFontAtlasCustomRect>& user_rects = atlas->CustomRects; IM_ASSERT(user_rects.Size >= 1); // We expect at least the default custom rects to be registered, else something went wrong. ImVector<stbrp_rect> pack_rects; pack_rects.resize(user_rects.Size); memset(pack_rects.Data, 0, (size_t)pack_rects.size_in_bytes()); for (int i = 0; i < user_rects.Size; i++) { pack_rects[i].w = user_rects[i].Width; pack_rects[i].h = user_rects[i].Height; } stbrp_pack_rects(pack_context, &pack_rects[0], pack_rects.Size); for (int i = 0; i < pack_rects.Size; i++) if (pack_rects[i].was_packed) { user_rects[i].X = pack_rects[i].x; user_rects[i].Y = pack_rects[i].y; IM_ASSERT(pack_rects[i].w == user_rects[i].Width && pack_rects[i].h == user_rects[i].Height); atlas->TexHeight = ImMax(atlas->TexHeight, pack_rects[i].y + pack_rects[i].h); } } static void ImFontAtlasBuildRenderDefaultTexData(ImFontAtlas* atlas) { IM_ASSERT(atlas->CustomRectIds[0] >= 0); IM_ASSERT(atlas->TexPixelsAlpha8 != NULL); ImFontAtlasCustomRect& r = atlas->CustomRects[atlas->CustomRectIds[0]]; IM_ASSERT(r.ID == FONT_ATLAS_DEFAULT_TEX_DATA_ID); IM_ASSERT(r.IsPacked()); const int w = atlas->TexWidth; if (!(atlas->Flags & ImFontAtlasFlags_NoMouseCursors)) { // Render/copy pixels IM_ASSERT(r.Width == FONT_ATLAS_DEFAULT_TEX_DATA_W_HALF * 2 + 1 && r.Height == FONT_ATLAS_DEFAULT_TEX_DATA_H); for (int y = 0, n = 0; y < FONT_ATLAS_DEFAULT_TEX_DATA_H; y++) for (int x = 0; x < FONT_ATLAS_DEFAULT_TEX_DATA_W_HALF; x++, n++) { const int offset0 = (int)(r.X + x) + (int)(r.Y + y) * w; const int offset1 = offset0 + FONT_ATLAS_DEFAULT_TEX_DATA_W_HALF + 1; atlas->TexPixelsAlpha8[offset0] = FONT_ATLAS_DEFAULT_TEX_DATA_PIXELS[n] == '.' ? 0xFF : 0x00; atlas->TexPixelsAlpha8[offset1] = FONT_ATLAS_DEFAULT_TEX_DATA_PIXELS[n] == 'X' ? 0xFF : 0x00; } } else { IM_ASSERT(r.Width == 2 && r.Height == 2); const int offset = (int)(r.X) + (int)(r.Y) * w; atlas->TexPixelsAlpha8[offset] = atlas->TexPixelsAlpha8[offset + 1] = atlas->TexPixelsAlpha8[offset + w] = atlas->TexPixelsAlpha8[offset + w + 1] = 0xFF; } atlas->TexUvWhitePixel = ImVec2((r.X + 0.5f) * atlas->TexUvScale.x, (r.Y + 0.5f) * atlas->TexUvScale.y); } void ImFontAtlasBuildFinish(ImFontAtlas* atlas) { // Render into our custom data block ImFontAtlasBuildRenderDefaultTexData(atlas); // Register custom rectangle glyphs for (int i = 0; i < atlas->CustomRects.Size; i++) { const ImFontAtlasCustomRect& r = atlas->CustomRects[i]; if (r.Font == NULL || r.ID > 0x10000) continue; IM_ASSERT(r.Font->ContainerAtlas == atlas); ImVec2 uv0, uv1; atlas->CalcCustomRectUV(&r, &uv0, &uv1); r.Font->AddGlyph((ImWchar)r.ID, r.GlyphOffset.x, r.GlyphOffset.y, r.GlyphOffset.x + r.Width, r.GlyphOffset.y + r.Height, uv0.x, uv0.y, uv1.x, uv1.y, r.GlyphAdvanceX); } // Build all fonts lookup tables for (int i = 0; i < atlas->Fonts.Size; i++) if (atlas->Fonts[i]->DirtyLookupTables) atlas->Fonts[i]->BuildLookupTable(); } // Retrieve list of range (2 int per range, values are inclusive) const ImWchar* ImFontAtlas::GetGlyphRangesDefault() { static const ImWchar ranges[] = { 0x0020, 0x00FF, // Basic Latin + Latin Supplement 0, }; return &ranges[0]; } const ImWchar* ImFontAtlas::GetGlyphRangesKorean() { static const ImWchar ranges[] = { 0x0020, 0x00FF, // Basic Latin + Latin Supplement 0x3131, 0x3163, // Korean alphabets 0xAC00, 0xD79D, // Korean characters 0, }; return &ranges[0]; } const ImWchar* ImFontAtlas::GetGlyphRangesChineseFull() { static const ImWchar ranges[] = { 0x0020, 0x00FF, // Basic Latin + Latin Supplement 0x2000, 0x206F, // General Punctuation 0x3000, 0x30FF, // CJK Symbols and Punctuations, Hiragana, Katakana 0x31F0, 0x31FF, // Katakana Phonetic Extensions 0xFF00, 0xFFEF, // Half-width characters 0x4e00, 0x9FAF, // CJK Ideograms 0, }; return &ranges[0]; } static void UnpackAccumulativeOffsetsIntoRanges(int base_codepoint, const short* accumulative_offsets, int accumulative_offsets_count, ImWchar* out_ranges) { for (int n = 0; n < accumulative_offsets_count; n++, out_ranges += 2) { out_ranges[0] = out_ranges[1] = (ImWchar)(base_codepoint + accumulative_offsets[n]); base_codepoint += accumulative_offsets[n]; } out_ranges[0] = 0; } //------------------------------------------------------------------------- // [SECTION] ImFontAtlas glyph ranges helpers //------------------------------------------------------------------------- const ImWchar* ImFontAtlas::GetGlyphRangesChineseSimplifiedCommon() { // Store 2500 regularly used characters for Simplified Chinese. // Sourced from https://zh.wiktionary.org/wiki/%E9%99%84%E5%BD%95:%E7%8E%B0%E4%BB%A3%E6%B1%89%E8%AF%AD%E5%B8%B8%E7%94%A8%E5%AD%97%E8%A1%A8 // This table covers 97.97% of all characters used during the month in July, 1987. // You can use ImFontGlyphRangesBuilder to create your own ranges derived from this, by merging existing ranges or adding new characters. // (Stored as accumulative offsets from the initial unicode codepoint 0x4E00. This encoding is designed to helps us compact the source code size.) static const short accumulative_offsets_from_0x4E00[] = { 0,1,2,4,1,1,1,1,2,1,3,2,1,2,2,1,1,1,1,1,5,2,1,2,3,3,3,2,2,4,1,1,1,2,1,5,2,3,1,2,1,2,1,1,2,1,1,2,2,1,4,1,1,1,1,5,10,1,2,19,2,1,2,1,2,1,2,1,2, 1,5,1,6,3,2,1,2,2,1,1,1,4,8,5,1,1,4,1,1,3,1,2,1,5,1,2,1,1,1,10,1,1,5,2,4,6,1,4,2,2,2,12,2,1,1,6,1,1,1,4,1,1,4,6,5,1,4,2,2,4,10,7,1,1,4,2,4, 2,1,4,3,6,10,12,5,7,2,14,2,9,1,1,6,7,10,4,7,13,1,5,4,8,4,1,1,2,28,5,6,1,1,5,2,5,20,2,2,9,8,11,2,9,17,1,8,6,8,27,4,6,9,20,11,27,6,68,2,2,1,1, 1,2,1,2,2,7,6,11,3,3,1,1,3,1,2,1,1,1,1,1,3,1,1,8,3,4,1,5,7,2,1,4,4,8,4,2,1,2,1,1,4,5,6,3,6,2,12,3,1,3,9,2,4,3,4,1,5,3,3,1,3,7,1,5,1,1,1,1,2, 3,4,5,2,3,2,6,1,1,2,1,7,1,7,3,4,5,15,2,2,1,5,3,22,19,2,1,1,1,1,2,5,1,1,1,6,1,1,12,8,2,9,18,22,4,1,1,5,1,16,1,2,7,10,15,1,1,6,2,4,1,2,4,1,6, 1,1,3,2,4,1,6,4,5,1,2,1,1,2,1,10,3,1,3,2,1,9,3,2,5,7,2,19,4,3,6,1,1,1,1,1,4,3,2,1,1,1,2,5,3,1,1,1,2,2,1,1,2,1,1,2,1,3,1,1,1,3,7,1,4,1,1,2,1, 1,2,1,2,4,4,3,8,1,1,1,2,1,3,5,1,3,1,3,4,6,2,2,14,4,6,6,11,9,1,15,3,1,28,5,2,5,5,3,1,3,4,5,4,6,14,3,2,3,5,21,2,7,20,10,1,2,19,2,4,28,28,2,3, 2,1,14,4,1,26,28,42,12,40,3,52,79,5,14,17,3,2,2,11,3,4,6,3,1,8,2,23,4,5,8,10,4,2,7,3,5,1,1,6,3,1,2,2,2,5,28,1,1,7,7,20,5,3,29,3,17,26,1,8,4, 27,3,6,11,23,5,3,4,6,13,24,16,6,5,10,25,35,7,3,2,3,3,14,3,6,2,6,1,4,2,3,8,2,1,1,3,3,3,4,1,1,13,2,2,4,5,2,1,14,14,1,2,2,1,4,5,2,3,1,14,3,12, 3,17,2,16,5,1,2,1,8,9,3,19,4,2,2,4,17,25,21,20,28,75,1,10,29,103,4,1,2,1,1,4,2,4,1,2,3,24,2,2,2,1,1,2,1,3,8,1,1,1,2,1,1,3,1,1,1,6,1,5,3,1,1, 1,3,4,1,1,5,2,1,5,6,13,9,16,1,1,1,1,3,2,3,2,4,5,2,5,2,2,3,7,13,7,2,2,1,1,1,1,2,3,3,2,1,6,4,9,2,1,14,2,14,2,1,18,3,4,14,4,11,41,15,23,15,23, 176,1,3,4,1,1,1,1,5,3,1,2,3,7,3,1,1,2,1,2,4,4,6,2,4,1,9,7,1,10,5,8,16,29,1,1,2,2,3,1,3,5,2,4,5,4,1,1,2,2,3,3,7,1,6,10,1,17,1,44,4,6,2,1,1,6, 5,4,2,10,1,6,9,2,8,1,24,1,2,13,7,8,8,2,1,4,1,3,1,3,3,5,2,5,10,9,4,9,12,2,1,6,1,10,1,1,7,7,4,10,8,3,1,13,4,3,1,6,1,3,5,2,1,2,17,16,5,2,16,6, 1,4,2,1,3,3,6,8,5,11,11,1,3,3,2,4,6,10,9,5,7,4,7,4,7,1,1,4,2,1,3,6,8,7,1,6,11,5,5,3,24,9,4,2,7,13,5,1,8,82,16,61,1,1,1,4,2,2,16,10,3,8,1,1, 6,4,2,1,3,1,1,1,4,3,8,4,2,2,1,1,1,1,1,6,3,5,1,1,4,6,9,2,1,1,1,2,1,7,2,1,6,1,5,4,4,3,1,8,1,3,3,1,3,2,2,2,2,3,1,6,1,2,1,2,1,3,7,1,8,2,1,2,1,5, 2,5,3,5,10,1,2,1,1,3,2,5,11,3,9,3,5,1,1,5,9,1,2,1,5,7,9,9,8,1,3,3,3,6,8,2,3,2,1,1,32,6,1,2,15,9,3,7,13,1,3,10,13,2,14,1,13,10,2,1,3,10,4,15, 2,15,15,10,1,3,9,6,9,32,25,26,47,7,3,2,3,1,6,3,4,3,2,8,5,4,1,9,4,2,2,19,10,6,2,3,8,1,2,2,4,2,1,9,4,4,4,6,4,8,9,2,3,1,1,1,1,3,5,5,1,3,8,4,6, 2,1,4,12,1,5,3,7,13,2,5,8,1,6,1,2,5,14,6,1,5,2,4,8,15,5,1,23,6,62,2,10,1,1,8,1,2,2,10,4,2,2,9,2,1,1,3,2,3,1,5,3,3,2,1,3,8,1,1,1,11,3,1,1,4, 3,7,1,14,1,2,3,12,5,2,5,1,6,7,5,7,14,11,1,3,1,8,9,12,2,1,11,8,4,4,2,6,10,9,13,1,1,3,1,5,1,3,2,4,4,1,18,2,3,14,11,4,29,4,2,7,1,3,13,9,2,2,5, 3,5,20,7,16,8,5,72,34,6,4,22,12,12,28,45,36,9,7,39,9,191,1,1,1,4,11,8,4,9,2,3,22,1,1,1,1,4,17,1,7,7,1,11,31,10,2,4,8,2,3,2,1,4,2,16,4,32,2, 3,19,13,4,9,1,5,2,14,8,1,1,3,6,19,6,5,1,16,6,2,10,8,5,1,2,3,1,5,5,1,11,6,6,1,3,3,2,6,3,8,1,1,4,10,7,5,7,7,5,8,9,2,1,3,4,1,1,3,1,3,3,2,6,16, 1,4,6,3,1,10,6,1,3,15,2,9,2,10,25,13,9,16,6,2,2,10,11,4,3,9,1,2,6,6,5,4,30,40,1,10,7,12,14,33,6,3,6,7,3,1,3,1,11,14,4,9,5,12,11,49,18,51,31, 140,31,2,2,1,5,1,8,1,10,1,4,4,3,24,1,10,1,3,6,6,16,3,4,5,2,1,4,2,57,10,6,22,2,22,3,7,22,6,10,11,36,18,16,33,36,2,5,5,1,1,1,4,10,1,4,13,2,7, 5,2,9,3,4,1,7,43,3,7,3,9,14,7,9,1,11,1,1,3,7,4,18,13,1,14,1,3,6,10,73,2,2,30,6,1,11,18,19,13,22,3,46,42,37,89,7,3,16,34,2,2,3,9,1,7,1,1,1,2, 2,4,10,7,3,10,3,9,5,28,9,2,6,13,7,3,1,3,10,2,7,2,11,3,6,21,54,85,2,1,4,2,2,1,39,3,21,2,2,5,1,1,1,4,1,1,3,4,15,1,3,2,4,4,2,3,8,2,20,1,8,7,13, 4,1,26,6,2,9,34,4,21,52,10,4,4,1,5,12,2,11,1,7,2,30,12,44,2,30,1,1,3,6,16,9,17,39,82,2,2,24,7,1,7,3,16,9,14,44,2,1,2,1,2,3,5,2,4,1,6,7,5,3, 2,6,1,11,5,11,2,1,18,19,8,1,3,24,29,2,1,3,5,2,2,1,13,6,5,1,46,11,3,5,1,1,5,8,2,10,6,12,6,3,7,11,2,4,16,13,2,5,1,1,2,2,5,2,28,5,2,23,10,8,4, 4,22,39,95,38,8,14,9,5,1,13,5,4,3,13,12,11,1,9,1,27,37,2,5,4,4,63,211,95,2,2,2,1,3,5,2,1,1,2,2,1,1,1,3,2,4,1,2,1,1,5,2,2,1,1,2,3,1,3,1,1,1, 3,1,4,2,1,3,6,1,1,3,7,15,5,3,2,5,3,9,11,4,2,22,1,6,3,8,7,1,4,28,4,16,3,3,25,4,4,27,27,1,4,1,2,2,7,1,3,5,2,28,8,2,14,1,8,6,16,25,3,3,3,14,3, 3,1,1,2,1,4,6,3,8,4,1,1,1,2,3,6,10,6,2,3,18,3,2,5,5,4,3,1,5,2,5,4,23,7,6,12,6,4,17,11,9,5,1,1,10,5,12,1,1,11,26,33,7,3,6,1,17,7,1,5,12,1,11, 2,4,1,8,14,17,23,1,2,1,7,8,16,11,9,6,5,2,6,4,16,2,8,14,1,11,8,9,1,1,1,9,25,4,11,19,7,2,15,2,12,8,52,7,5,19,2,16,4,36,8,1,16,8,24,26,4,6,2,9, 5,4,36,3,28,12,25,15,37,27,17,12,59,38,5,32,127,1,2,9,17,14,4,1,2,1,1,8,11,50,4,14,2,19,16,4,17,5,4,5,26,12,45,2,23,45,104,30,12,8,3,10,2,2, 3,3,1,4,20,7,2,9,6,15,2,20,1,3,16,4,11,15,6,134,2,5,59,1,2,2,2,1,9,17,3,26,137,10,211,59,1,2,4,1,4,1,1,1,2,6,2,3,1,1,2,3,2,3,1,3,4,4,2,3,3, 1,4,3,1,7,2,2,3,1,2,1,3,3,3,2,2,3,2,1,3,14,6,1,3,2,9,6,15,27,9,34,145,1,1,2,1,1,1,1,2,1,1,1,1,2,2,2,3,1,2,1,1,1,2,3,5,8,3,5,2,4,1,3,2,2,2,12, 4,1,1,1,10,4,5,1,20,4,16,1,15,9,5,12,2,9,2,5,4,2,26,19,7,1,26,4,30,12,15,42,1,6,8,172,1,1,4,2,1,1,11,2,2,4,2,1,2,1,10,8,1,2,1,4,5,1,2,5,1,8, 4,1,3,4,2,1,6,2,1,3,4,1,2,1,1,1,1,12,5,7,2,4,3,1,1,1,3,3,6,1,2,2,3,3,3,2,1,2,12,14,11,6,6,4,12,2,8,1,7,10,1,35,7,4,13,15,4,3,23,21,28,52,5, 26,5,6,1,7,10,2,7,53,3,2,1,1,1,2,163,532,1,10,11,1,3,3,4,8,2,8,6,2,2,23,22,4,2,2,4,2,1,3,1,3,3,5,9,8,2,1,2,8,1,10,2,12,21,20,15,105,2,3,1,1, 3,2,3,1,1,2,5,1,4,15,11,19,1,1,1,1,5,4,5,1,1,2,5,3,5,12,1,2,5,1,11,1,1,15,9,1,4,5,3,26,8,2,1,3,1,1,15,19,2,12,1,2,5,2,7,2,19,2,20,6,26,7,5, 2,2,7,34,21,13,70,2,128,1,1,2,1,1,2,1,1,3,2,2,2,15,1,4,1,3,4,42,10,6,1,49,85,8,1,2,1,1,4,4,2,3,6,1,5,7,4,3,211,4,1,2,1,2,5,1,2,4,2,2,6,5,6, 10,3,4,48,100,6,2,16,296,5,27,387,2,2,3,7,16,8,5,38,15,39,21,9,10,3,7,59,13,27,21,47,5,21,6 }; static ImWchar base_ranges[] = // not zero-terminated { 0x0020, 0x00FF, // Basic Latin + Latin Supplement 0x2000, 0x206F, // General Punctuation 0x3000, 0x30FF, // CJK Symbols and Punctuations, Hiragana, Katakana 0x31F0, 0x31FF, // Katakana Phonetic Extensions 0xFF00, 0xFFEF // Half-width characters }; static ImWchar full_ranges[IM_ARRAYSIZE(base_ranges) + IM_ARRAYSIZE(accumulative_offsets_from_0x4E00) * 2 + 1] = { 0 }; if (!full_ranges[0]) { memcpy(full_ranges, base_ranges, sizeof(base_ranges)); UnpackAccumulativeOffsetsIntoRanges(0x4E00, accumulative_offsets_from_0x4E00, IM_ARRAYSIZE(accumulative_offsets_from_0x4E00), full_ranges + IM_ARRAYSIZE(base_ranges)); } return &full_ranges[0]; } const ImWchar* ImFontAtlas::GetGlyphRangesJapanese() { // 1946 common ideograms code points for Japanese // Sourced from http://theinstructionlimit.com/common-kanji-character-ranges-for-xna-spritefont-rendering // FIXME: Source a list of the revised 2136 Joyo Kanji list from 2010 and rebuild this. // You can use ImFontGlyphRangesBuilder to create your own ranges derived from this, by merging existing ranges or adding new characters. // (Stored as accumulative offsets from the initial unicode codepoint 0x4E00. This encoding is designed to helps us compact the source code size.) static const short accumulative_offsets_from_0x4E00[] = { 0,1,2,4,1,1,1,1,2,1,6,2,2,1,8,5,7,11,1,2,10,10,8,2,4,20,2,11,8,2,1,2,1,6,2,1,7,5,3,7,1,1,13,7,9,1,4,6,1,2,1,10,1,1,9,2,2,4,5,6,14,1,1,9,3,18, 5,4,2,2,10,7,1,1,1,3,2,4,3,23,2,10,12,2,14,2,4,13,1,6,10,3,1,7,13,6,4,13,5,2,3,17,2,2,5,7,6,4,1,7,14,16,6,13,9,15,1,1,7,16,4,7,1,19,9,2,7,15, 2,6,5,13,25,4,14,13,11,25,1,1,1,2,1,2,2,3,10,11,3,3,1,1,4,4,2,1,4,9,1,4,3,5,5,2,7,12,11,15,7,16,4,5,16,2,1,1,6,3,3,1,1,2,7,6,6,7,1,4,7,6,1,1, 2,1,12,3,3,9,5,8,1,11,1,2,3,18,20,4,1,3,6,1,7,3,5,5,7,2,2,12,3,1,4,2,3,2,3,11,8,7,4,17,1,9,25,1,1,4,2,2,4,1,2,7,1,1,1,3,1,2,6,16,1,2,1,1,3,12, 20,2,5,20,8,7,6,2,1,1,1,1,6,2,1,2,10,1,1,6,1,3,1,2,1,4,1,12,4,1,3,1,1,1,1,1,10,4,7,5,13,1,15,1,1,30,11,9,1,15,38,14,1,32,17,20,1,9,31,2,21,9, 4,49,22,2,1,13,1,11,45,35,43,55,12,19,83,1,3,2,3,13,2,1,7,3,18,3,13,8,1,8,18,5,3,7,25,24,9,24,40,3,17,24,2,1,6,2,3,16,15,6,7,3,12,1,9,7,3,3, 3,15,21,5,16,4,5,12,11,11,3,6,3,2,31,3,2,1,1,23,6,6,1,4,2,6,5,2,1,1,3,3,22,2,6,2,3,17,3,2,4,5,1,9,5,1,1,6,15,12,3,17,2,14,2,8,1,23,16,4,2,23, 8,15,23,20,12,25,19,47,11,21,65,46,4,3,1,5,6,1,2,5,26,2,1,1,3,11,1,1,1,2,1,2,3,1,1,10,2,3,1,1,1,3,6,3,2,2,6,6,9,2,2,2,6,2,5,10,2,4,1,2,1,2,2, 3,1,1,3,1,2,9,23,9,2,1,1,1,1,5,3,2,1,10,9,6,1,10,2,31,25,3,7,5,40,1,15,6,17,7,27,180,1,3,2,2,1,1,1,6,3,10,7,1,3,6,17,8,6,2,2,1,3,5,5,8,16,14, 15,1,1,4,1,2,1,1,1,3,2,7,5,6,2,5,10,1,4,2,9,1,1,11,6,1,44,1,3,7,9,5,1,3,1,1,10,7,1,10,4,2,7,21,15,7,2,5,1,8,3,4,1,3,1,6,1,4,2,1,4,10,8,1,4,5, 1,5,10,2,7,1,10,1,1,3,4,11,10,29,4,7,3,5,2,3,33,5,2,19,3,1,4,2,6,31,11,1,3,3,3,1,8,10,9,12,11,12,8,3,14,8,6,11,1,4,41,3,1,2,7,13,1,5,6,2,6,12, 12,22,5,9,4,8,9,9,34,6,24,1,1,20,9,9,3,4,1,7,2,2,2,6,2,28,5,3,6,1,4,6,7,4,2,1,4,2,13,6,4,4,3,1,8,8,3,2,1,5,1,2,2,3,1,11,11,7,3,6,10,8,6,16,16, 22,7,12,6,21,5,4,6,6,3,6,1,3,2,1,2,8,29,1,10,1,6,13,6,6,19,31,1,13,4,4,22,17,26,33,10,4,15,12,25,6,67,10,2,3,1,6,10,2,6,2,9,1,9,4,4,1,2,16,2, 5,9,2,3,8,1,8,3,9,4,8,6,4,8,11,3,2,1,1,3,26,1,7,5,1,11,1,5,3,5,2,13,6,39,5,1,5,2,11,6,10,5,1,15,5,3,6,19,21,22,2,4,1,6,1,8,1,4,8,2,4,2,2,9,2, 1,1,1,4,3,6,3,12,7,1,14,2,4,10,2,13,1,17,7,3,2,1,3,2,13,7,14,12,3,1,29,2,8,9,15,14,9,14,1,3,1,6,5,9,11,3,38,43,20,7,7,8,5,15,12,19,15,81,8,7, 1,5,73,13,37,28,8,8,1,15,18,20,165,28,1,6,11,8,4,14,7,15,1,3,3,6,4,1,7,14,1,1,11,30,1,5,1,4,14,1,4,2,7,52,2,6,29,3,1,9,1,21,3,5,1,26,3,11,14, 11,1,17,5,1,2,1,3,2,8,1,2,9,12,1,1,2,3,8,3,24,12,7,7,5,17,3,3,3,1,23,10,4,4,6,3,1,16,17,22,3,10,21,16,16,6,4,10,2,1,1,2,8,8,6,5,3,3,3,39,25, 15,1,1,16,6,7,25,15,6,6,12,1,22,13,1,4,9,5,12,2,9,1,12,28,8,3,5,10,22,60,1,2,40,4,61,63,4,1,13,12,1,4,31,12,1,14,89,5,16,6,29,14,2,5,49,18,18, 5,29,33,47,1,17,1,19,12,2,9,7,39,12,3,7,12,39,3,1,46,4,12,3,8,9,5,31,15,18,3,2,2,66,19,13,17,5,3,46,124,13,57,34,2,5,4,5,8,1,1,1,4,3,1,17,5, 3,5,3,1,8,5,6,3,27,3,26,7,12,7,2,17,3,7,18,78,16,4,36,1,2,1,6,2,1,39,17,7,4,13,4,4,4,1,10,4,2,4,6,3,10,1,19,1,26,2,4,33,2,73,47,7,3,8,2,4,15, 18,1,29,2,41,14,1,21,16,41,7,39,25,13,44,2,2,10,1,13,7,1,7,3,5,20,4,8,2,49,1,10,6,1,6,7,10,7,11,16,3,12,20,4,10,3,1,2,11,2,28,9,2,4,7,2,15,1, 27,1,28,17,4,5,10,7,3,24,10,11,6,26,3,2,7,2,2,49,16,10,16,15,4,5,27,61,30,14,38,22,2,7,5,1,3,12,23,24,17,17,3,3,2,4,1,6,2,7,5,1,1,5,1,1,9,4, 1,3,6,1,8,2,8,4,14,3,5,11,4,1,3,32,1,19,4,1,13,11,5,2,1,8,6,8,1,6,5,13,3,23,11,5,3,16,3,9,10,1,24,3,198,52,4,2,2,5,14,5,4,22,5,20,4,11,6,41, 1,5,2,2,11,5,2,28,35,8,22,3,18,3,10,7,5,3,4,1,5,3,8,9,3,6,2,16,22,4,5,5,3,3,18,23,2,6,23,5,27,8,1,33,2,12,43,16,5,2,3,6,1,20,4,2,9,7,1,11,2, 10,3,14,31,9,3,25,18,20,2,5,5,26,14,1,11,17,12,40,19,9,6,31,83,2,7,9,19,78,12,14,21,76,12,113,79,34,4,1,1,61,18,85,10,2,2,13,31,11,50,6,33,159, 179,6,6,7,4,4,2,4,2,5,8,7,20,32,22,1,3,10,6,7,28,5,10,9,2,77,19,13,2,5,1,4,4,7,4,13,3,9,31,17,3,26,2,6,6,5,4,1,7,11,3,4,2,1,6,2,20,4,1,9,2,6, 3,7,1,1,1,20,2,3,1,6,2,3,6,2,4,8,1,5,13,8,4,11,23,1,10,6,2,1,3,21,2,2,4,24,31,4,10,10,2,5,192,15,4,16,7,9,51,1,2,1,1,5,1,1,2,1,3,5,3,1,3,4,1, 3,1,3,3,9,8,1,2,2,2,4,4,18,12,92,2,10,4,3,14,5,25,16,42,4,14,4,2,21,5,126,30,31,2,1,5,13,3,22,5,6,6,20,12,1,14,12,87,3,19,1,8,2,9,9,3,3,23,2, 3,7,6,3,1,2,3,9,1,3,1,6,3,2,1,3,11,3,1,6,10,3,2,3,1,2,1,5,1,1,11,3,6,4,1,7,2,1,2,5,5,34,4,14,18,4,19,7,5,8,2,6,79,1,5,2,14,8,2,9,2,1,36,28,16, 4,1,1,1,2,12,6,42,39,16,23,7,15,15,3,2,12,7,21,64,6,9,28,8,12,3,3,41,59,24,51,55,57,294,9,9,2,6,2,15,1,2,13,38,90,9,9,9,3,11,7,1,1,1,5,6,3,2, 1,2,2,3,8,1,4,4,1,5,7,1,4,3,20,4,9,1,1,1,5,5,17,1,5,2,6,2,4,1,4,5,7,3,18,11,11,32,7,5,4,7,11,127,8,4,3,3,1,10,1,1,6,21,14,1,16,1,7,1,3,6,9,65, 51,4,3,13,3,10,1,1,12,9,21,110,3,19,24,1,1,10,62,4,1,29,42,78,28,20,18,82,6,3,15,6,84,58,253,15,155,264,15,21,9,14,7,58,40,39, }; static ImWchar base_ranges[] = // not zero-terminated { 0x0020, 0x00FF, // Basic Latin + Latin Supplement 0x3000, 0x30FF, // CJK Symbols and Punctuations, Hiragana, Katakana 0x31F0, 0x31FF, // Katakana Phonetic Extensions 0xFF00, 0xFFEF // Half-width characters }; static ImWchar full_ranges[IM_ARRAYSIZE(base_ranges) + IM_ARRAYSIZE(accumulative_offsets_from_0x4E00)*2 + 1] = { 0 }; if (!full_ranges[0]) { memcpy(full_ranges, base_ranges, sizeof(base_ranges)); UnpackAccumulativeOffsetsIntoRanges(0x4E00, accumulative_offsets_from_0x4E00, IM_ARRAYSIZE(accumulative_offsets_from_0x4E00), full_ranges + IM_ARRAYSIZE(base_ranges)); } return &full_ranges[0]; } const ImWchar* ImFontAtlas::GetGlyphRangesCyrillic() { static const ImWchar ranges[] = { 0x0020, 0x00FF, // Basic Latin + Latin Supplement 0x0400, 0x052F, // Cyrillic + Cyrillic Supplement 0x2DE0, 0x2DFF, // Cyrillic Extended-A 0xA640, 0xA69F, // Cyrillic Extended-B 0, }; return &ranges[0]; } const ImWchar* ImFontAtlas::GetGlyphRangesThai() { static const ImWchar ranges[] = { 0x0020, 0x00FF, // Basic Latin 0x2010, 0x205E, // Punctuations 0x0E00, 0x0E7F, // Thai 0, }; return &ranges[0]; } const ImWchar* ImFontAtlas::GetGlyphRangesVietnamese() { static const ImWchar ranges[] = { 0x0020, 0x00FF, // Basic Latin 0x0102, 0x0103, 0x0110, 0x0111, 0x0128, 0x0129, 0x0168, 0x0169, 0x01A0, 0x01A1, 0x01AF, 0x01B0, 0x1EA0, 0x1EF9, 0, }; return &ranges[0]; } //----------------------------------------------------------------------------- // [SECTION] ImFontGlyphRangesBuilder //----------------------------------------------------------------------------- void ImFontGlyphRangesBuilder::AddText(const char* text, const char* text_end) { while (text_end ? (text < text_end) : *text) { unsigned int c = 0; int c_len = ImTextCharFromUtf8(&c, text, text_end); text += c_len; if (c_len == 0) break; if (c < 0x10000) AddChar((ImWchar)c); } } void ImFontGlyphRangesBuilder::AddRanges(const ImWchar* ranges) { for (; ranges[0]; ranges += 2) for (ImWchar c = ranges[0]; c <= ranges[1]; c++) AddChar(c); } void ImFontGlyphRangesBuilder::BuildRanges(ImVector<ImWchar>* out_ranges) { int max_codepoint = 0x10000; for (int n = 0; n < max_codepoint; n++) if (GetBit(n)) { out_ranges->push_back((ImWchar)n); while (n < max_codepoint - 1 && GetBit(n + 1)) n++; out_ranges->push_back((ImWchar)n); } out_ranges->push_back(0); } //----------------------------------------------------------------------------- // [SECTION] ImFont //----------------------------------------------------------------------------- ImFont::ImFont() { FontSize = 0.0f; FallbackAdvanceX = 0.0f; FallbackChar = (ImWchar)'?'; DisplayOffset = ImVec2(0.0f, 0.0f); FallbackGlyph = NULL; ContainerAtlas = NULL; ConfigData = NULL; ConfigDataCount = 0; DirtyLookupTables = false; Scale = 1.0f; Ascent = Descent = 0.0f; MetricsTotalSurface = 0; } ImFont::~ImFont() { ClearOutputData(); } void ImFont::ClearOutputData() { FontSize = 0.0f; FallbackAdvanceX = 0.0f; Glyphs.clear(); IndexAdvanceX.clear(); IndexLookup.clear(); FallbackGlyph = NULL; ContainerAtlas = NULL; DirtyLookupTables = true; Ascent = Descent = 0.0f; MetricsTotalSurface = 0; } void ImFont::BuildLookupTable() { int max_codepoint = 0; for (int i = 0; i != Glyphs.Size; i++) max_codepoint = ImMax(max_codepoint, (int)Glyphs[i].Codepoint); IM_ASSERT(Glyphs.Size < 0xFFFF); // -1 is reserved IndexAdvanceX.clear(); IndexLookup.clear(); DirtyLookupTables = false; GrowIndex(max_codepoint + 1); for (int i = 0; i < Glyphs.Size; i++) { int codepoint = (int)Glyphs[i].Codepoint; IndexAdvanceX[codepoint] = Glyphs[i].AdvanceX; IndexLookup[codepoint] = (ImWchar)i; } // Create a glyph to handle TAB // FIXME: Needs proper TAB handling but it needs to be contextualized (or we could arbitrary say that each string starts at "column 0" ?) if (FindGlyph((ImWchar)' ')) { if (Glyphs.back().Codepoint != '\t') // So we can call this function multiple times Glyphs.resize(Glyphs.Size + 1); ImFontGlyph& tab_glyph = Glyphs.back(); tab_glyph = *FindGlyph((ImWchar)' '); tab_glyph.Codepoint = '\t'; tab_glyph.AdvanceX *= IM_TABSIZE; IndexAdvanceX[(int)tab_glyph.Codepoint] = (float)tab_glyph.AdvanceX; IndexLookup[(int)tab_glyph.Codepoint] = (ImWchar)(Glyphs.Size-1); } FallbackGlyph = FindGlyphNoFallback(FallbackChar); FallbackAdvanceX = FallbackGlyph ? FallbackGlyph->AdvanceX : 0.0f; for (int i = 0; i < max_codepoint + 1; i++) if (IndexAdvanceX[i] < 0.0f) IndexAdvanceX[i] = FallbackAdvanceX; } void ImFont::SetFallbackChar(ImWchar c) { FallbackChar = c; BuildLookupTable(); } void ImFont::GrowIndex(int new_size) { IM_ASSERT(IndexAdvanceX.Size == IndexLookup.Size); if (new_size <= IndexLookup.Size) return; IndexAdvanceX.resize(new_size, -1.0f); IndexLookup.resize(new_size, (ImWchar)-1); } // x0/y0/x1/y1 are offset from the character upper-left layout position, in pixels. Therefore x0/y0 are often fairly close to zero. // Not to be mistaken with texture coordinates, which are held by u0/v0/u1/v1 in normalized format (0.0..1.0 on each texture axis). void ImFont::AddGlyph(ImWchar codepoint, float x0, float y0, float x1, float y1, float u0, float v0, float u1, float v1, float advance_x) { Glyphs.resize(Glyphs.Size + 1); ImFontGlyph& glyph = Glyphs.back(); glyph.Codepoint = (ImWchar)codepoint; glyph.X0 = x0; glyph.Y0 = y0; glyph.X1 = x1; glyph.Y1 = y1; glyph.U0 = u0; glyph.V0 = v0; glyph.U1 = u1; glyph.V1 = v1; glyph.AdvanceX = advance_x + ConfigData->GlyphExtraSpacing.x; // Bake spacing into AdvanceX if (ConfigData->PixelSnapH) glyph.AdvanceX = (float)(int)(glyph.AdvanceX + 0.5f); // Compute rough surface usage metrics (+1 to account for average padding, +0.99 to round) DirtyLookupTables = true; MetricsTotalSurface += (int)((glyph.U1 - glyph.U0) * ContainerAtlas->TexWidth + 1.99f) * (int)((glyph.V1 - glyph.V0) * ContainerAtlas->TexHeight + 1.99f); } void ImFont::AddRemapChar(ImWchar dst, ImWchar src, bool overwrite_dst) { IM_ASSERT(IndexLookup.Size > 0); // Currently this can only be called AFTER the font has been built, aka after calling ImFontAtlas::GetTexDataAs*() function. int index_size = IndexLookup.Size; if (dst < index_size && IndexLookup.Data[dst] == (ImWchar)-1 && !overwrite_dst) // 'dst' already exists return; if (src >= index_size && dst >= index_size) // both 'dst' and 'src' don't exist -> no-op return; GrowIndex(dst + 1); IndexLookup[dst] = (src < index_size) ? IndexLookup.Data[src] : (ImWchar)-1; IndexAdvanceX[dst] = (src < index_size) ? IndexAdvanceX.Data[src] : 1.0f; } const ImFontGlyph* ImFont::FindGlyph(ImWchar c) const { if (c >= IndexLookup.Size) return FallbackGlyph; const ImWchar i = IndexLookup.Data[c]; if (i == (ImWchar)-1) return FallbackGlyph; return &Glyphs.Data[i]; } const ImFontGlyph* ImFont::FindGlyphNoFallback(ImWchar c) const { if (c >= IndexLookup.Size) return NULL; const ImWchar i = IndexLookup.Data[c]; if (i == (ImWchar)-1) return NULL; return &Glyphs.Data[i]; } const char* ImFont::CalcWordWrapPositionA(float scale, const char* text, const char* text_end, float wrap_width) const { // Simple word-wrapping for English, not full-featured. Please submit failing cases! // FIXME: Much possible improvements (don't cut things like "word !", "word!!!" but cut within "word,,,,", more sensible support for punctuations, support for Unicode punctuations, etc.) // For references, possible wrap point marked with ^ // "aaa bbb, ccc,ddd. eee fff. ggg!" // ^ ^ ^ ^ ^__ ^ ^ // List of hardcoded separators: .,;!?'" // Skip extra blanks after a line returns (that includes not counting them in width computation) // e.g. "Hello world" --> "Hello" "World" // Cut words that cannot possibly fit within one line. // e.g.: "The tropical fish" with ~5 characters worth of width --> "The tr" "opical" "fish" float line_width = 0.0f; float word_width = 0.0f; float blank_width = 0.0f; wrap_width /= scale; // We work with unscaled widths to avoid scaling every characters const char* word_end = text; const char* prev_word_end = NULL; bool inside_word = true; const char* s = text; while (s < text_end) { unsigned int c = (unsigned int)*s; const char* next_s; if (c < 0x80) next_s = s + 1; else next_s = s + ImTextCharFromUtf8(&c, s, text_end); if (c == 0) break; if (c < 32) { if (c == '\n') { line_width = word_width = blank_width = 0.0f; inside_word = true; s = next_s; continue; } if (c == '\r') { s = next_s; continue; } } const float char_width = ((int)c < IndexAdvanceX.Size ? IndexAdvanceX.Data[c] : FallbackAdvanceX); if (ImCharIsBlankW(c)) { if (inside_word) { line_width += blank_width; blank_width = 0.0f; word_end = s; } blank_width += char_width; inside_word = false; } else { word_width += char_width; if (inside_word) { word_end = next_s; } else { prev_word_end = word_end; line_width += word_width + blank_width; word_width = blank_width = 0.0f; } // Allow wrapping after punctuation. inside_word = !(c == '.' || c == ',' || c == ';' || c == '!' || c == '?' || c == '\"'); } // We ignore blank width at the end of the line (they can be skipped) if (line_width + word_width > wrap_width) { // Words that cannot possibly fit within an entire line will be cut anywhere. if (word_width < wrap_width) s = prev_word_end ? prev_word_end : word_end; break; } s = next_s; } return s; } ImVec2 ImFont::CalcTextSizeA(float size, float max_width, float wrap_width, const char* text_begin, const char* text_end, const char** remaining) const { if (!text_end) text_end = text_begin + strlen(text_begin); // FIXME-OPT: Need to avoid this. const float line_height = size; const float scale = size / FontSize; ImVec2 text_size = ImVec2(0,0); float line_width = 0.0f; const bool word_wrap_enabled = (wrap_width > 0.0f); const char* word_wrap_eol = NULL; const char* s = text_begin; while (s < text_end) { if (word_wrap_enabled) { // Calculate how far we can render. Requires two passes on the string data but keeps the code simple and not intrusive for what's essentially an uncommon feature. if (!word_wrap_eol) { word_wrap_eol = CalcWordWrapPositionA(scale, s, text_end, wrap_width - line_width); if (word_wrap_eol == s) // Wrap_width is too small to fit anything. Force displaying 1 character to minimize the height discontinuity. word_wrap_eol++; // +1 may not be a character start point in UTF-8 but it's ok because we use s >= word_wrap_eol below } if (s >= word_wrap_eol) { if (text_size.x < line_width) text_size.x = line_width; text_size.y += line_height; line_width = 0.0f; word_wrap_eol = NULL; // Wrapping skips upcoming blanks while (s < text_end) { const char c = *s; if (ImCharIsBlankA(c)) { s++; } else if (c == '\n') { s++; break; } else { break; } } continue; } } // Decode and advance source const char* prev_s = s; unsigned int c = (unsigned int)*s; if (c < 0x80) { s += 1; } else { s += ImTextCharFromUtf8(&c, s, text_end); if (c == 0) // Malformed UTF-8? break; } if (c < 32) { if (c == '\n') { text_size.x = ImMax(text_size.x, line_width); text_size.y += line_height; line_width = 0.0f; continue; } if (c == '\r') continue; } const float char_width = ((int)c < IndexAdvanceX.Size ? IndexAdvanceX.Data[c] : FallbackAdvanceX) * scale; if (line_width + char_width >= max_width) { s = prev_s; break; } line_width += char_width; } if (text_size.x < line_width) text_size.x = line_width; if (line_width > 0 || text_size.y == 0.0f) text_size.y += line_height; if (remaining) *remaining = s; return text_size; } void ImFont::RenderChar(ImDrawList* draw_list, float size, ImVec2 pos, ImU32 col, ImWchar c) const { if (c == ' ' || c == '\t' || c == '\n' || c == '\r') // Match behavior of RenderText(), those 4 codepoints are hard-coded. return; if (const ImFontGlyph* glyph = FindGlyph(c)) { float scale = (size >= 0.0f) ? (size / FontSize) : 1.0f; pos.x = (float)(int)pos.x + DisplayOffset.x; pos.y = (float)(int)pos.y + DisplayOffset.y; draw_list->PrimReserve(6, 4); draw_list->PrimRectUV(ImVec2(pos.x + glyph->X0 * scale, pos.y + glyph->Y0 * scale), ImVec2(pos.x + glyph->X1 * scale, pos.y + glyph->Y1 * scale), ImVec2(glyph->U0, glyph->V0), ImVec2(glyph->U1, glyph->V1), col); } } void ImFont::RenderText(ImDrawList* draw_list, float size, ImVec2 pos, ImU32 col, const ImVec4& clip_rect, const char* text_begin, const char* text_end, float wrap_width, bool cpu_fine_clip) const { if (!text_end) text_end = text_begin + strlen(text_begin); // ImGui:: functions generally already provides a valid text_end, so this is merely to handle direct calls. // Align to be pixel perfect pos.x = (float)(int)pos.x + DisplayOffset.x; pos.y = (float)(int)pos.y + DisplayOffset.y; float x = pos.x; float y = pos.y; if (y > clip_rect.w) return; const float scale = size / FontSize; const float line_height = FontSize * scale; const bool word_wrap_enabled = (wrap_width > 0.0f); const char* word_wrap_eol = NULL; // Fast-forward to first visible line const char* s = text_begin; if (y + line_height < clip_rect.y && !word_wrap_enabled) while (y + line_height < clip_rect.y && s < text_end) { s = (const char*)memchr(s, '\n', text_end - s); s = s ? s + 1 : text_end; y += line_height; } // For large text, scan for the last visible line in order to avoid over-reserving in the call to PrimReserve() // Note that very large horizontal line will still be affected by the issue (e.g. a one megabyte string buffer without a newline will likely crash atm) if (text_end - s > 10000 && !word_wrap_enabled) { const char* s_end = s; float y_end = y; while (y_end < clip_rect.w && s_end < text_end) { s_end = (const char*)memchr(s_end, '\n', text_end - s_end); s_end = s_end ? s_end + 1 : text_end; y_end += line_height; } text_end = s_end; } if (s == text_end) return; // Reserve vertices for remaining worse case (over-reserving is useful and easily amortized) const int vtx_count_max = (int)(text_end - s) * 4; const int idx_count_max = (int)(text_end - s) * 6; const int idx_expected_size = draw_list->IdxBuffer.Size + idx_count_max; draw_list->PrimReserve(idx_count_max, vtx_count_max); ImDrawVert* vtx_write = draw_list->_VtxWritePtr; ImDrawIdx* idx_write = draw_list->_IdxWritePtr; unsigned int vtx_current_idx = draw_list->_VtxCurrentIdx; while (s < text_end) { if (word_wrap_enabled) { // Calculate how far we can render. Requires two passes on the string data but keeps the code simple and not intrusive for what's essentially an uncommon feature. if (!word_wrap_eol) { word_wrap_eol = CalcWordWrapPositionA(scale, s, text_end, wrap_width - (x - pos.x)); if (word_wrap_eol == s) // Wrap_width is too small to fit anything. Force displaying 1 character to minimize the height discontinuity. word_wrap_eol++; // +1 may not be a character start point in UTF-8 but it's ok because we use s >= word_wrap_eol below } if (s >= word_wrap_eol) { x = pos.x; y += line_height; word_wrap_eol = NULL; // Wrapping skips upcoming blanks while (s < text_end) { const char c = *s; if (ImCharIsBlankA(c)) { s++; } else if (c == '\n') { s++; break; } else { break; } } continue; } } // Decode and advance source unsigned int c = (unsigned int)*s; if (c < 0x80) { s += 1; } else { s += ImTextCharFromUtf8(&c, s, text_end); if (c == 0) // Malformed UTF-8? break; } if (c < 32) { if (c == '\n') { x = pos.x; y += line_height; if (y > clip_rect.w) break; // break out of main loop continue; } if (c == '\r') continue; } float char_width = 0.0f; if (const ImFontGlyph* glyph = FindGlyph((ImWchar)c)) { char_width = glyph->AdvanceX * scale; // Arbitrarily assume that both space and tabs are empty glyphs as an optimization if (c != ' ' && c != '\t') { // We don't do a second finer clipping test on the Y axis as we've already skipped anything before clip_rect.y and exit once we pass clip_rect.w float x1 = x + glyph->X0 * scale; float x2 = x + glyph->X1 * scale; float y1 = y + glyph->Y0 * scale; float y2 = y + glyph->Y1 * scale; if (x1 <= clip_rect.z && x2 >= clip_rect.x) { // Render a character float u1 = glyph->U0; float v1 = glyph->V0; float u2 = glyph->U1; float v2 = glyph->V1; // CPU side clipping used to fit text in their frame when the frame is too small. Only does clipping for axis aligned quads. if (cpu_fine_clip) { if (x1 < clip_rect.x) { u1 = u1 + (1.0f - (x2 - clip_rect.x) / (x2 - x1)) * (u2 - u1); x1 = clip_rect.x; } if (y1 < clip_rect.y) { v1 = v1 + (1.0f - (y2 - clip_rect.y) / (y2 - y1)) * (v2 - v1); y1 = clip_rect.y; } if (x2 > clip_rect.z) { u2 = u1 + ((clip_rect.z - x1) / (x2 - x1)) * (u2 - u1); x2 = clip_rect.z; } if (y2 > clip_rect.w) { v2 = v1 + ((clip_rect.w - y1) / (y2 - y1)) * (v2 - v1); y2 = clip_rect.w; } if (y1 >= y2) { x += char_width; continue; } } // We are NOT calling PrimRectUV() here because non-inlined causes too much overhead in a debug builds. Inlined here: { idx_write[0] = (ImDrawIdx)(vtx_current_idx); idx_write[1] = (ImDrawIdx)(vtx_current_idx+1); idx_write[2] = (ImDrawIdx)(vtx_current_idx+2); idx_write[3] = (ImDrawIdx)(vtx_current_idx); idx_write[4] = (ImDrawIdx)(vtx_current_idx+2); idx_write[5] = (ImDrawIdx)(vtx_current_idx+3); vtx_write[0].pos.x = x1; vtx_write[0].pos.y = y1; vtx_write[0].col = col; vtx_write[0].uv.x = u1; vtx_write[0].uv.y = v1; vtx_write[1].pos.x = x2; vtx_write[1].pos.y = y1; vtx_write[1].col = col; vtx_write[1].uv.x = u2; vtx_write[1].uv.y = v1; vtx_write[2].pos.x = x2; vtx_write[2].pos.y = y2; vtx_write[2].col = col; vtx_write[2].uv.x = u2; vtx_write[2].uv.y = v2; vtx_write[3].pos.x = x1; vtx_write[3].pos.y = y2; vtx_write[3].col = col; vtx_write[3].uv.x = u1; vtx_write[3].uv.y = v2; vtx_write += 4; vtx_current_idx += 4; idx_write += 6; } } } } x += char_width; } // Give back unused vertices draw_list->VtxBuffer.resize((int)(vtx_write - draw_list->VtxBuffer.Data)); draw_list->IdxBuffer.resize((int)(idx_write - draw_list->IdxBuffer.Data)); draw_list->CmdBuffer[draw_list->CmdBuffer.Size-1].ElemCount -= (idx_expected_size - draw_list->IdxBuffer.Size); draw_list->_VtxWritePtr = vtx_write; draw_list->_IdxWritePtr = idx_write; draw_list->_VtxCurrentIdx = vtx_current_idx; } //----------------------------------------------------------------------------- // [SECTION] Internal Render Helpers // (progressively moved from imgui.cpp to here when they are redesigned to stop accessing ImGui global state) //----------------------------------------------------------------------------- // - RenderMouseCursor() // - RenderArrowPointingAt() // - RenderRectFilledRangeH() // - RenderPixelEllipsis() //----------------------------------------------------------------------------- void ImGui::RenderMouseCursor(ImDrawList* draw_list, ImVec2 pos, float scale, ImGuiMouseCursor mouse_cursor) { if (mouse_cursor == ImGuiMouseCursor_None) return; IM_ASSERT(mouse_cursor > ImGuiMouseCursor_None && mouse_cursor < ImGuiMouseCursor_COUNT); const ImU32 col_shadow = IM_COL32(0, 0, 0, 48); const ImU32 col_border = IM_COL32(0, 0, 0, 255); // Black const ImU32 col_fill = IM_COL32(255, 255, 255, 255); // White ImFontAtlas* font_atlas = draw_list->_Data->Font->ContainerAtlas; ImVec2 offset, size, uv[4]; if (font_atlas->GetMouseCursorTexData(mouse_cursor, &offset, &size, &uv[0], &uv[2])) { pos -= offset; const ImTextureID tex_id = font_atlas->TexID; draw_list->PushTextureID(tex_id); draw_list->AddImage(tex_id, pos + ImVec2(1,0)*scale, pos + ImVec2(1,0)*scale + size*scale, uv[2], uv[3], col_shadow); draw_list->AddImage(tex_id, pos + ImVec2(2,0)*scale, pos + ImVec2(2,0)*scale + size*scale, uv[2], uv[3], col_shadow); draw_list->AddImage(tex_id, pos, pos + size*scale, uv[2], uv[3], col_border); draw_list->AddImage(tex_id, pos, pos + size*scale, uv[0], uv[1], col_fill); draw_list->PopTextureID(); } } // Render an arrow. 'pos' is position of the arrow tip. half_sz.x is length from base to tip. half_sz.y is length on each side. void ImGui::RenderArrowPointingAt(ImDrawList* draw_list, ImVec2 pos, ImVec2 half_sz, ImGuiDir direction, ImU32 col) { switch (direction) { case ImGuiDir_Left: draw_list->AddTriangleFilled(ImVec2(pos.x + half_sz.x, pos.y - half_sz.y), ImVec2(pos.x + half_sz.x, pos.y + half_sz.y), pos, col); return; case ImGuiDir_Right: draw_list->AddTriangleFilled(ImVec2(pos.x - half_sz.x, pos.y + half_sz.y), ImVec2(pos.x - half_sz.x, pos.y - half_sz.y), pos, col); return; case ImGuiDir_Up: draw_list->AddTriangleFilled(ImVec2(pos.x + half_sz.x, pos.y + half_sz.y), ImVec2(pos.x - half_sz.x, pos.y + half_sz.y), pos, col); return; case ImGuiDir_Down: draw_list->AddTriangleFilled(ImVec2(pos.x - half_sz.x, pos.y - half_sz.y), ImVec2(pos.x + half_sz.x, pos.y - half_sz.y), pos, col); return; case ImGuiDir_None: case ImGuiDir_COUNT: break; // Fix warnings } } static inline float ImAcos01(float x) { if (x <= 0.0f) return IM_PI * 0.5f; if (x >= 1.0f) return 0.0f; return ImAcos(x); //return (-0.69813170079773212f * x * x - 0.87266462599716477f) * x + 1.5707963267948966f; // Cheap approximation, may be enough for what we do. } // FIXME: Cleanup and move code to ImDrawList. void ImGui::RenderRectFilledRangeH(ImDrawList* draw_list, const ImRect& rect, ImU32 col, float x_start_norm, float x_end_norm, float rounding) { if (x_end_norm == x_start_norm) return; if (x_start_norm > x_end_norm) ImSwap(x_start_norm, x_end_norm); ImVec2 p0 = ImVec2(ImLerp(rect.Min.x, rect.Max.x, x_start_norm), rect.Min.y); ImVec2 p1 = ImVec2(ImLerp(rect.Min.x, rect.Max.x, x_end_norm), rect.Max.y); if (rounding == 0.0f) { draw_list->AddRectFilled(p0, p1, col, 0.0f); return; } rounding = ImClamp(ImMin((rect.Max.x - rect.Min.x) * 0.5f, (rect.Max.y - rect.Min.y) * 0.5f) - 1.0f, 0.0f, rounding); const float inv_rounding = 1.0f / rounding; const float arc0_b = ImAcos01(1.0f - (p0.x - rect.Min.x) * inv_rounding); const float arc0_e = ImAcos01(1.0f - (p1.x - rect.Min.x) * inv_rounding); const float half_pi = IM_PI * 0.5f; // We will == compare to this because we know this is the exact value ImAcos01 can return. const float x0 = ImMax(p0.x, rect.Min.x + rounding); if (arc0_b == arc0_e) { draw_list->PathLineTo(ImVec2(x0, p1.y)); draw_list->PathLineTo(ImVec2(x0, p0.y)); } else if (arc0_b == 0.0f && arc0_e == half_pi) { draw_list->PathArcToFast(ImVec2(x0, p1.y - rounding), rounding, 3, 6); // BL draw_list->PathArcToFast(ImVec2(x0, p0.y + rounding), rounding, 6, 9); // TR } else { draw_list->PathArcTo(ImVec2(x0, p1.y - rounding), rounding, IM_PI - arc0_e, IM_PI - arc0_b, 3); // BL draw_list->PathArcTo(ImVec2(x0, p0.y + rounding), rounding, IM_PI + arc0_b, IM_PI + arc0_e, 3); // TR } if (p1.x > rect.Min.x + rounding) { const float arc1_b = ImAcos01(1.0f - (rect.Max.x - p1.x) * inv_rounding); const float arc1_e = ImAcos01(1.0f - (rect.Max.x - p0.x) * inv_rounding); const float x1 = ImMin(p1.x, rect.Max.x - rounding); if (arc1_b == arc1_e) { draw_list->PathLineTo(ImVec2(x1, p0.y)); draw_list->PathLineTo(ImVec2(x1, p1.y)); } else if (arc1_b == 0.0f && arc1_e == half_pi) { draw_list->PathArcToFast(ImVec2(x1, p0.y + rounding), rounding, 9, 12); // TR draw_list->PathArcToFast(ImVec2(x1, p1.y - rounding), rounding, 0, 3); // BR } else { draw_list->PathArcTo(ImVec2(x1, p0.y + rounding), rounding, -arc1_e, -arc1_b, 3); // TR draw_list->PathArcTo(ImVec2(x1, p1.y - rounding), rounding, +arc1_b, +arc1_e, 3); // BR } } draw_list->PathFillConvex(col); } // FIXME: Rendering an ellipsis "..." is a surprisingly tricky problem for us... we cannot rely on font glyph having it, // and regular dot are typically too wide. If we render a dot/shape ourselves it comes with the risk that it wouldn't match // the boldness or positioning of what the font uses... void ImGui::RenderPixelEllipsis(ImDrawList* draw_list, ImVec2 pos, ImU32 col, int count) { ImFont* font = draw_list->_Data->Font; const float font_scale = draw_list->_Data->FontSize / font->FontSize; pos.y += (float)(int)(font->DisplayOffset.y + font->Ascent * font_scale + 0.5f - 1.0f); for (int dot_n = 0; dot_n < count; dot_n++) draw_list->AddRectFilled(ImVec2(pos.x + dot_n * 2.0f, pos.y), ImVec2(pos.x + dot_n * 2.0f + 1.0f, pos.y + 1.0f), col); } //----------------------------------------------------------------------------- // [SECTION] Decompression code //----------------------------------------------------------------------------- // Compressed with stb_compress() then converted to a C array and encoded as base85. // Use the program in misc/fonts/binary_to_compressed_c.cpp to create the array from a TTF file. // The purpose of encoding as base85 instead of "0x00,0x01,..." style is only save on _source code_ size. // Decompression from stb.h (public domain) by Sean Barrett https://github.com/nothings/stb/blob/master/stb.h //----------------------------------------------------------------------------- static unsigned int stb_decompress_length(const unsigned char *input) { return (input[8] << 24) + (input[9] << 16) + (input[10] << 8) + input[11]; } static unsigned char *stb__barrier_out_e, *stb__barrier_out_b; static const unsigned char *stb__barrier_in_b; static unsigned char *stb__dout; static void stb__match(const unsigned char *data, unsigned int length) { // INVERSE of memmove... write each byte before copying the next... IM_ASSERT(stb__dout + length <= stb__barrier_out_e); if (stb__dout + length > stb__barrier_out_e) { stb__dout += length; return; } if (data < stb__barrier_out_b) { stb__dout = stb__barrier_out_e+1; return; } while (length--) *stb__dout++ = *data++; } static void stb__lit(const unsigned char *data, unsigned int length) { IM_ASSERT(stb__dout + length <= stb__barrier_out_e); if (stb__dout + length > stb__barrier_out_e) { stb__dout += length; return; } if (data < stb__barrier_in_b) { stb__dout = stb__barrier_out_e+1; return; } memcpy(stb__dout, data, length); stb__dout += length; } #define stb__in2(x) ((i[x] << 8) + i[(x)+1]) #define stb__in3(x) ((i[x] << 16) + stb__in2((x)+1)) #define stb__in4(x) ((i[x] << 24) + stb__in3((x)+1)) static const unsigned char *stb_decompress_token(const unsigned char *i) { if (*i >= 0x20) { // use fewer if's for cases that expand small if (*i >= 0x80) stb__match(stb__dout-i[1]-1, i[0] - 0x80 + 1), i += 2; else if (*i >= 0x40) stb__match(stb__dout-(stb__in2(0) - 0x4000 + 1), i[2]+1), i += 3; else /* *i >= 0x20 */ stb__lit(i+1, i[0] - 0x20 + 1), i += 1 + (i[0] - 0x20 + 1); } else { // more ifs for cases that expand large, since overhead is amortized if (*i >= 0x18) stb__match(stb__dout-(stb__in3(0) - 0x180000 + 1), i[3]+1), i += 4; else if (*i >= 0x10) stb__match(stb__dout-(stb__in3(0) - 0x100000 + 1), stb__in2(3)+1), i += 5; else if (*i >= 0x08) stb__lit(i+2, stb__in2(0) - 0x0800 + 1), i += 2 + (stb__in2(0) - 0x0800 + 1); else if (*i == 0x07) stb__lit(i+3, stb__in2(1) + 1), i += 3 + (stb__in2(1) + 1); else if (*i == 0x06) stb__match(stb__dout-(stb__in3(1)+1), i[4]+1), i += 5; else if (*i == 0x04) stb__match(stb__dout-(stb__in3(1)+1), stb__in2(4)+1), i += 6; } return i; } static unsigned int stb_adler32(unsigned int adler32, unsigned char *buffer, unsigned int buflen) { const unsigned long ADLER_MOD = 65521; unsigned long s1 = adler32 & 0xffff, s2 = adler32 >> 16; unsigned long blocklen, i; blocklen = buflen % 5552; while (buflen) { for (i=0; i + 7 < blocklen; i += 8) { s1 += buffer[0], s2 += s1; s1 += buffer[1], s2 += s1; s1 += buffer[2], s2 += s1; s1 += buffer[3], s2 += s1; s1 += buffer[4], s2 += s1; s1 += buffer[5], s2 += s1; s1 += buffer[6], s2 += s1; s1 += buffer[7], s2 += s1; buffer += 8; } for (; i < blocklen; ++i) s1 += *buffer++, s2 += s1; s1 %= ADLER_MOD, s2 %= ADLER_MOD; buflen -= blocklen; blocklen = 5552; } return (unsigned int)(s2 << 16) + (unsigned int)s1; } static unsigned int stb_decompress(unsigned char *output, const unsigned char *i, unsigned int /*length*/) { unsigned int olen; if (stb__in4(0) != 0x57bC0000) return 0; if (stb__in4(4) != 0) return 0; // error! stream is > 4GB olen = stb_decompress_length(i); stb__barrier_in_b = i; stb__barrier_out_e = output + olen; stb__barrier_out_b = output; i += 16; stb__dout = output; for (;;) { const unsigned char *old_i = i; i = stb_decompress_token(i); if (i == old_i) { if (*i == 0x05 && i[1] == 0xfa) { IM_ASSERT(stb__dout == output + olen); if (stb__dout != output + olen) return 0; if (stb_adler32(1, output, olen) != (unsigned int) stb__in4(2)) return 0; return olen; } else { IM_ASSERT(0); /* NOTREACHED */ return 0; } } IM_ASSERT(stb__dout <= output + olen); if (stb__dout > output + olen) return 0; } } //----------------------------------------------------------------------------- // [SECTION] Default font data (ProggyClean.ttf) //----------------------------------------------------------------------------- // ProggyClean.ttf // Copyright (c) 2004, 2005 Tristan Grimmer // MIT license (see License.txt in http://www.upperbounds.net/download/ProggyClean.ttf.zip) // Download and more information at http://upperbounds.net //----------------------------------------------------------------------------- // File: 'ProggyClean.ttf' (41208 bytes) // Exported using misc/fonts/binary_to_compressed_c.cpp (with compression + base85 string encoding). // The purpose of encoding as base85 instead of "0x00,0x01,..." style is only save on _source code_ size. //----------------------------------------------------------------------------- static const char proggy_clean_ttf_compressed_data_base85[11980+1] = "7])#######hV0qs'/###[),##/l:$#Q6>##5[n42>c-TH`->>#/e>11NNV=Bv(*:.F?uu#(gRU.o0XGH`$vhLG1hxt9?W`#,5LsCp#-i>.r$<$6pD>Lb';9Crc6tgXmKVeU2cD4Eo3R/" "2*>]b(MC;$jPfY.;h^`IWM9<Lh2TlS+f-s$o6Q<BWH`YiU.xfLq$N;$0iR/GX:U(jcW2p/W*q?-qmnUCI;jHSAiFWM.R*kU@C=GH?a9wp8f$e.-4^Qg1)Q-GL(lf(r/7GrRgwV%MS=C#" "`8ND>Qo#t'X#(v#Y9w0#1D$CIf;W'#pWUPXOuxXuU(H9M(1<q-UE31#^-V'8IRUo7Qf./L>=Ke$$'5F%)]0^#[email protected]<r:QLtFsLcL6##lOj)#.Y5<-R&KgLwqJfLgN&;Q?gI^#DY2uL" "i@^rMl9t=cWq6##weg>$FBjVQTSDgEKnIS7EM9>ZY9w0#L;>>#Mx&4Mvt//L[MkA#[email protected]'[0#7RL_&#w+F%HtG9M#XL`N&.,GM4Pg;-<nLENhvx>-VsM.M0rJfLH2eTM`*oJMHRC`N" "kfimM2J,W-jXS:)r0wK#@Fge$U>`w'N7G#$#fB#$E^$#:9:hk+eOe--6x)F7*E%?76%^GMHePW-Z5l'&GiF#$956:rS?dA#fiK:)Yr+`&#0j@'DbG&#^$PG.Ll+DNa<XCMKEV*N)LN/N" "*b=%Q6pia-Xg8I$<MR&,VdJe$<(7G;Ckl'&hF;;$<_=X(b.RS%%)###MPBuuE1V:v&cX&#2m#(&cV]`k9OhLMbn%s$G2,B$BfD3X*sp5#l,$R#]x_X1xKX%b5U*[r5iMfUo9U`N99hG)" "tm+/Us9pG)XPu`<0s-)WTt(gCRxIg(%6sfh=ktMKn3j)<6<b5Sk_/0(^]AaN#(p/L>&VZ>1i%h1S9u5o@YaaW$e+b<TWFn/Z:Oh(Cx2$lNEoN^e)#CFY@@I;BOQ*sRwZtZxRcU7uW6CX" "ow0i(?$Q[cjOd[P4d)]>ROPOpxTO7Stwi1::iB1q)C_=dV26J;2,]7op$]uQr@_V7$q^%lQwtuHY]=DX,n3L#0PHDO4f9>dC@O>HBuKPpP*E,N+b3L#lpR/MrTEH.IAQk.a>D[.e;mc." "x]Ip.PH^'/aqUO/$1WxLoW0[iLA<QT;5HKD+@qQ'NQ(3_PLhE48R.qAPSwQ0/WK?Z,[x?-J;jQTWA0X@KJ(_Y8N-:/M74:/-ZpKrUss?d#dZq]DAbkU*JqkL+nwX@@47`5>w=4h(9.`G" "CRUxHPeR`5Mjol(dUWxZa(>STrPkrJiWx`5U7F#.g*jrohGg`cg:lSTvEY/EV_7H4Q9[Z%cnv;JQYZ5q.l7Zeas:HOIZOB?G<Nald$qs]@]L<J7bR*>gv:[7MI2k).'2($5FNP&EQ(,)" "U]W]+fh18.vsai00);D3@4ku5P?DP8aJt+;qUM]=+b'8@;mViBKx0DE[-auGl8:PJ&Dj+M6OC]O^((##]`0i)drT;-7X`=-H3[igUnPG-NZlo.#k@h#=Ork$m>a>$-?Tm$UV(?#P6YY#" "'/###xe7q.73rI3*pP/$1>s9)W,JrM7SN]'/4C#v$U`0#V.[0>xQsH$fEmPMgY2u7Kh(G%siIfLSoS+MK2eTM$=5,M8p`A.;_R%#u[K#$x4AG8.kK/HSB==-'Ie/QTtG?-.*^N-4B/ZM" "_3YlQC7(p7q)&](`6_c)$/*JL(L-^(]$wIM`dPtOdGA,U3:w2M-0<q-]L_?^)1vw'.,MRsqVr.L;aN&#/EgJ)PBc[-f>+WomX2u7lqM2iEumMTcsF?-aT=Z-97UEnXglEn1K-bnEO`gu" "Ft(c%=;Am_Qs@jLooI&NX;]0#j4#F14;gl8-GQpgwhrq8'=l_f-b49'UOqkLu7-##oDY2L(te+Mch&gLYtJ,MEtJfLh'x'M=$CS-ZZ%P]8bZ>#S?YY#%Q&q'3^Fw&?D)UDNrocM3A76/" "/oL?#h7gl85[qW/NDOk%16ij;+:1a'iNIdb-ou8.P*w,v5#EI$TWS>Pot-R*H'-SEpA:g)f+O$%%`kA#G=8RMmG1&O`>to8bC]T&$,n.LoO>29sp3dt-52U%VM#q7'DHpg+#Z9%H[K<L" "%a2E-grWVM3@2=-k22tL]4$##6We'8UJCKE[d_=%wI;'6X-GsLX4j^SgJ$##R*w,vP3wK#iiW&#*h^D&R?jp7+/u&#(AP##XU8c$fSYW-J95_-Dp[g9wcO&#M-h1OcJlc-*vpw0xUX&#" "OQFKNX@QI'IoPp7nb,QU//MQ&ZDkKP)X<WSVL(68uVl&#c'[0#(s1X&xm$Y%B7*K:eDA323j998GXbA#pwMs-jgD$9QISB-A_(aN4xoFM^@C58D0+Q+q3n0#3U1InDjF682-SjMXJK)(" "h$hxua_K]ul92%'BOU&#BRRh-slg8KDlr:%L71Ka:.A;%YULjDPmL<LYs8i#XwJOYaKPKc1h:'9Ke,g)b),78=I39B;xiY$bgGw-&.Zi9InXDuYa%G*f2Bq7mn9^#p1vv%#(Wi-;/Z5h" "o;#2:;%d&#x9v68C5g?ntX0X)pT`;%pB3q7mgGN)3%(P8nTd5L7GeA-GL@+%J3u2:(Yf>et`e;)f#Km8&+DC$I46>#Kr]]u-[=99tts1.qb#q72g1WJO81q+eN'03'eM>&1XxY-caEnO" "j%2n8)),?ILR5^.Ibn<-X-Mq7[a82Lq:F&#ce+S9wsCK*x`569E8ew'He]h:sI[2LM$[guka3ZRd6:t%IG:;$%YiJ:Nq=?eAw;/:nnDq0(CYcMpG)qLN4$##&J<j$UpK<Q4a1]MupW^-" "sj_$%[HK%'F####QRZJ::Y3EGl4'@%FkiAOg#p[##O`gukTfBHagL<LHw%q&OV0##F=6/:chIm0@eCP8X]:kFI%hl8hgO@RcBhS-@Qb$%+m=hPDLg*%K8ln(wcf3/'DW-$.lR?n[nCH-" "eXOONTJlh:.RYF%3'p6sq:UIMA945&^HFS87@$EP2iG<-lCO$%c`uKGD3rC$x0BL8aFn--`ke%#HMP'vh1/R&O_J9'um,.<tx[@%wsJk&bUT2`0uMv7gg#qp/ij.L56'hl;.s5CUrxjO" "M7-##.l+Au'A&O:-T72L]P`&=;ctp'XScX*rU.>-XTt,%OVU4)S1+R-#dg0/Nn?Ku1^0f$B*P:Rowwm-`0PKjYDDM'3]d39VZHEl4,.j']Pk-M.h^&:0FACm$maq-&sgw0t7/6(^xtk%" "LuH88Fj-ekm>GA#_>568x6(OFRl-IZp`&b,_P'$M<Jnq79VsJW/mWS*PUiq76;]/NM_>hLbxfc$mj`,O;&%W2m`Zh:/)Uetw:aJ%]K9h:TcF]u_-Sj9,VK3M.*'&0D[Ca]J9gp8,kAW]" "%(?A%R$f<->Zts'^kn=-^@c4%-pY6qI%J%1IGxfLU9CP8cbPlXv);C=b),<2mOvP8up,UVf3839acAWAW-W?#ao/^#%KYo8fRULNd2.>%m]UK:n%r$'sw]J;5pAoO_#2mO3n,'=H5(et" "Hg*`+RLgv>=4U8guD$I%D:W>-r5V*%j*W:Kvej.Lp$<M-SGZ':+Q_k+uvOSLiEo(<aD/K<CCc`'Lx>'?;++O'>()jLR-^u68PHm8ZFWe+ej8h:9r6L*0//c&iH&R8pRbA#Kjm%upV1g:" "a_#Ur7FuA#(tRh#.Y5K+@?3<-8m0$PEn;J:rh6?I6uG<-`wMU'ircp0LaE_OtlMb&1#6T.#FDKu#1Lw%u%+GM+X'e?YLfjM[VO0MbuFp7;>Q&#WIo)0@F%q7c#4XAXN-U&VB<HFF*qL(" "$/V,;(kXZejWO`<[5?\?ewY(*9=%wDc;,u<'9t3W-(H1th3+G]ucQ]kLs7df($/*JL]@*t7Bu_G3_7mp7<[email protected];x3B0lqp7Hf,^Ze7-##@/c58Mo(3;knp0%)A7?-W+eI'o8)b<" "nKnw'Ho8C=Y>pqB>0ie&jhZ[?iLR@@_AvA-iQC(=ksRZRVp7`.=+NpBC%rh&3]R:8XDmE5^V8O(x<<aG/1N$#FX$0V5Y6x'aErI3I$7x%E`v<-BY,)%-?Psf*l?%C3.mM(=/M0:JxG'?" "7WhH%o'a<-80g0NBxoO(GH<dM]n.+%q@jH?f.UsJ2Ggs&4<-e47&Kl+f//9@`b+?.TeN_&B8Ss?v;^Trk;f#YvJkl&w$]>-+k?'(<S:68tq*WoDfZu';mM?8X[ma8W%*`-=;D.(nc7/;" ")g:T1=^J$&BRV(-lTmNB6xqB[@0*o.erM*<SWF]u2=st-*(6v>^](H.aREZSi,#1:[IXaZFOm<-ui#qUq2$##Ri;u75OK#(RtaW-K-F`S+cF]uN`-KMQ%rP/Xri.LRcB##=YL3BgM/3M" "D?@f&1'BW-)Ju<L25gl8uhVm1hL$##*8###'A3/LkKW+(^rWX?5W_8g)a(m&K8P>#bmmWCMkk&#TR`C,5d>g)F;t,4:@_l8G/5h4vUd%&%950:VXD'QdWoY-F$BtUwmfe$YqL'8(PWX(" "P?^@Po3$##`MSs?DWBZ/S>+4%>fX,VWv/w'KD`LP5IbH;rTV>n3cEK8U#bX]l-/V+^lj3;vlMb&[5YQ8#pekX9JP3XUC72L,,?+Ni&co7ApnO*5NK,((W-i:$,kp'UDAO(G0Sq7MVjJs" "bIu)'Z,*[>br5fX^:FPAWr-m2KgL<LUN098kTF&#lvo58=/vjDo;.;)Ka*hLR#/k=rKbxuV`>Q_nN6'8uTG&#1T5g)uLv:873UpTLgH+#FgpH'_o1780Ph8KmxQJ8#H72L4@768@Tm&Q" "h4CB/5OvmA&,Q&QbUoi$a_%3M01H)4x7I^&KQVgtFnV+;[Pc>[m4k//,]1?#`VY[Jr*3&&slRfLiVZJ:]?=K3Sw=[$=uRB?3xk48@aeg<Z'<$#4H)6,>e0jT6'N#(q%.O=?2S]u*(m<-" "V8J'(1)G][68hW$5'q[GC&5j`TE?m'esFGNRM)j,ffZ?-qx8;->g4t*:CIP/[Qap7/9'#(1sao7w-.qNUdkJ)tCF&#B^;[email protected]$m%#QvQS8U@)2Z+3K:AKM5i" "sZ88+dKQ)W6>J%CL<KE>`.d*(B`-n8D9oK<Up]c$X$(,)M8Zt7/[rdkqTgl-0cuGMv'?>-XV1q['-5k'cAZ69e;D_?$ZPP&s^+7])$*$#@QYi9,5P&#9r+$%CE=68>K8r0=dSC%%(@p7" ".m7jilQ02'0-VWAg<a/''3u.=4L$Y)6k/K:_[3=&jvL<L0C/2'v:^;-DIBW,B4E68:kZ;%?8(Q8BH=kO65BW?xSG&#@uU,DS*,?.+(o(#1vCS8#CHF>TlGW'b)Tq7VT9q^*^$$.:&N@@" "$&)WHtPm*5_rO0&e%K&#-30j(E4#'Zb.o/(Tpm$>K'f@[PvFl,hfINTNU6u'0pao7%XUp9]5.>%h`8_=VYbxuel.NTSsJfLacFu3B'lQSu/m6-Oqem8T+oE--$0a/k]uj9EwsG>%veR*" "hv^BFpQj:K'#SJ,sB-'#](j.Lg92rTw-*n%@/;39rrJF,l#qV%OrtBeC6/,;qB3ebNW[?,Hqj2L.1NP&GjUR=1D8QaS3Up&@*9wP?+lo7b?@%'k4`p0Z$22%K3+iCZj?XJN4Nm&+YF]u" "@-W$U%VEQ/,,>>#)D<h#`)h0:<Q6909ua+&VU%n2:cG3FJ-%@Bj-DgLr`Hw&HAKjKjseK</xKT*)B,N9X3]krc12t'pgTV(Lv-tL[xg_%=M_q7a^x?7Ubd>#%8cY#YZ?=,`Wdxu/ae&#" "w6)R89tI#6@s'(6Bf7a&?S=^ZI_kS&ai`&=tE72L_D,;^R)7[$s<Eh#c&)q.MXI%#v9ROa5FZO%sF7q7Nwb&#ptUJ:aqJe$Sl68%.D###EC><?-aF&#RNQv>o8lKN%5/$(vdfq7+ebA#" "u1p]ovUKW&Y%q]'>$1@-[xfn$7ZTp7mM,G,Ko7a&Gu%G[RMxJs[0MM%wci.LFDK)(<c`Q8N)jEIF*+?P2a8g%)$q]o2aH8C&<SibC/q,(e:v;-b#6[$NtDZ84Je2KNvB#$P5?tQ3nt(0" "d=j.LQf./Ll33+(;q3L-w=8dX$#WF&uIJ@-bfI>%:_i2B5CsR8&9Z&#=mPEnm0f`<&c)QL5uJ#%u%lJj+D-r;BoF&#4DoS97h5g)E#o:&S4weDF,9^Hoe`h*L+_a*NrLW-1pG_&2UdB8" "6e%B/:=>)N4xeW.*wft-;$'58-ESqr<b?UI(_%@[P46>#U`'6AQ]m&6/`Z>#S?YY#Vc;r7U2&326d=w&H####?TZ`*4?&.MK?LP8Vxg>$[QXc%QJv92.(Db*B)gb*BM9dM*hJMAo*c&#" "b0v=Pjer]$gG&JXDf->'StvU7505l9$AFvgYRI^&<^b68?j#q9QX4SM'RO#&sL1IM.rJfLUAj221]d##DW=m83u5;'bYx,*Sl0hL(W;;$doB&O/TQ:(Z^xBdLjL<Lni;''X.`$#8+1GD" ":k$YUWsbn8ogh6rxZ2Z9]%nd+>V#*8U_72Lh+2Q8Cj0i:6hp&$C/:p(HK>T8Y[gHQ4`4)'$Ab(Nof%V'8hL&#<NEdtg(n'=S1A(Q1/I&4([%dM`,Iu'1:_hL>SfD07&6D<fp8dHM7/g+" "tlPN9J*rKaPct&?'uBCem^jn%9_K)<,C5K3s=5g&GmJb*[SYq7K;TRLGCsM-$$;S%:Y@r7AK0pprpL<Lrh,q7e/%KWK:50I^+m'vi`3?%Zp+<-d+$L-Sv:@.o19n$s0&39;kn;S%BSq*" "$3WoJSCLweV[aZ'MQIjO<7;X-X;&+dMLvu#^UsGEC9WEc[X(wI7#2.(F0jV*eZf<-Qv3J-c+J5AlrB#$p(H68LvEA'q3n0#m,[`*8Ft)FcYgEud]CWfm68,(aLA$@EFTgLXoBq/UPlp7" ":d[/;r_ix=:TF`S5H-b<LI&HY(K=h#)]Lk$K14lVfm:x$H<3^Ql<M`$OhapBnkup'D#L$Pb_`N*g]2e;X/Dtg,bsj&K#2[-:iYr'_wgH)NUIR8a1n#S?Yej'h8^58UbZd+^FKD*T@;6A" "7aQC[K8d-(v6GI$x:T<&'Gp5Uf>@M.*J:;$-rv29'M]8qMv-tLp,'886iaC=Hb*YJoKJ,(j%K=H`K.v9HggqBIiZu'QvBT.#=)0ukruV&.)3=(^1`o*Pj4<-<aN((^7('#Z0wK#5GX@7" "u][`*S^43933A4rl][`*O4CgLEl]v$1Q3AeF37dbXk,.)vj#x'd`;qgbQR%FW,2(?LO=s%Sc68%NP'##Aotl8x=BE#j1UD([3$M(]UI2LX3RpKN@;/#f'f/&_mt&F)XdF<9t4)Qa.*kT" "LwQ'(TTB9.xH'>#MJ+gLq9-##@HuZPN0]u:h7.T..G:;$/Usj(T7`Q8tT72LnYl<-qx8;-HV7Q-&Xdx%1a,hC=0u+HlsV>nuIQL-5<N?)NBS)QN*_I,?&)2'IM%L3I)X((e/dl2&8'<M" ":^#M*Q+[T.Xri.LYS3v%fF`68h;b-X[/En'CR.q7E)p'/kle2HM,u;^%OKC-N+Ll%F9CF<Nf'^#t2L,;27W:0O@6##U6W7:$rJfLWHj$#)woqBefIZ.PK<b*t7ed;p*_m;4ExK#h@&]>" "_>@kXQtMacfD.m-VAb8;IReM3$wf0''hra*so568'Ip&vRs849'MRYSp%:t:h5qSgwpEr$B>Q,;s(C#$)`svQuF$##-D,##,g68@2[T;.XSdN9Qe)rpt._K-#5wF)sP'##p#C0c%-Gb%" "hd+<-j'Ai*x&&HMkT]C'OSl##5RG[JXaHN;d'uA#x._U;.`PU@(Z3dt4r152@:v,'R.Sj'w#0<-;kPI)FfJ&#AYJ&#//)>-k=m=*XnK$>=)72L]0I%>.G690a:$##<,);?;72#?x9+d;" "^V'9;jY@;)br#q^YQpx:X#Te$Z^'=-=bGhLf:D6&bNwZ9-ZD#n^9HhLMr5G;']d&6'wYmTFmL<LD)F^%[tC'8;+9E#C$g%#5Y>q9wI>P(9mI[>kC-ekLC/R&CH+s'B;K-M6$EB%is00:" "+A4[7xks.LrNk0&E)wILYF@2L'0Nb$+pv<(2.768/FrY&h$^3i&@+G%JT'<-,v`3;_)I9M^AE]CN?Cl2AZg+%4iTpT3<n-&%H%b<FDj2M<hH=&Eh<2Len$b*aTX=-8QxN)k11IM1c^j%" "9s<L<NFSo)B?+<-(GxsF,^-Eh@$4dXhN$+#rxK8'je'D7k`e;)2pYwPA'_p9&@^18ml1^[@g4t*[JOa*[=Qp7(qJ_oOL^('7fB&Hq-:sf,sNj8xq^>$U4O]GKx'm9)b@p7YsvK3w^YR-" "CdQ*:Ir<($u&)#(&?L9Rg3H)4fiEp^iI9O8KnTj,]H?D*r7'M;PwZ9K0E^k&-cpI;.p/6_vwoFMV<->#%Xi.LxVnrU(4&8/P+:hLSKj$#U%]49t'I:rgMi'FL@a:0Y-uA[39',(vbma*" "hU%<-SRF`Tt:542R_VV$p@[p8DV[A,?1839FWdF<TddF<9Ah-6&9tWoDlh]&1SpGMq>Ti1O*H&#(AL8[_P%.M>v^-))qOT*F5Cq0`Ye%+$B6i:7@0IX<N+T+0MlMBPQ*Vj>SsD<U4JHY" "8kD2)2fU/M#$e.)T4,_=8hLim[&);?UkK'-x?'(:siIfL<$pFM`i<?%W(mGDHM%>iWP,##P`%/L<eXi:@Z9C.7o=@(pXdAO/NLQ8lPl+HPOQa8wD8=^GlPa8TKI1CjhsCTSLJM'/Wl>-" "S(qw%sf/@%#B6;/U7K]uZbi^Oc^2n<bhPmUkMw>%t<)'mEVE''n`WnJra$^TKvX5B>;_aSEK',(hwa0:i4G?.Bci.(X[?b*($,=-n<.Q%`(X=?+@Am*Js0&=3bh8K]mL<LoNs'6,'85`" "0?t/'_U59@]ddF<#LdF<eWdF<OuN/45rY<-L@&#+fm>69=Lb,OcZV/);TTm8VI;?%OtJ<(b4mq7M6:u?KRdF<gR@2L=FNU-<b[(9c/ML3m;Z[$oF3g)GAWqpARc=<ROu7cL5l;-[A]%/" "+fsd;l#SafT/f*W]0=O'$(Tb<[)*@e775R-:Yob%g*>l*:xP?Yb.5)%w_I?7uk5JC+FS(m#i'k.'a0i)9<7b'fs'59hq$*5Uhv##pi^8+hIEBF`nvo`;'l0.^S1<-wUK2/Coh58KKhLj" "M=SO*rfO`+qC`W-On.=AJ56>>i2@2LH6A:&5q`?9I3@@'04&p2/LVa*T-4<-i3;M9UvZd+N7>b*eIwg:CC)c<>nO&#<IGe;__.thjZl<%w(Wk2xmp4Q@I#I9,DF]u7-P=.-_:YJ]aS@V" "?6*C()dOp7:WL,b&3Rg/.cmM9&r^>$(>.Z-I&J(Q0Hd5Q%7Co-b`-c<N(6r@ip+AurK<m86QIth*#v;-OBqi+L7wDE-Ir8K['m+DDSLwK&/.?-V%U_%3:qKNu$_b*B-kp7NaD'QdWQPK" "Yq[@>P)hI;*_F]u`Rb[.j8_Q/<&>uu+VsH$sM9TA%?)(vmJ80),P7E>)tjD%2L=-t#fK[%`v=Q8<FfNkgg^oIbah*#8/Qt$F&:K*-(N/'+1vMB,u()-a.VUU*#[e%gAAO(S>WlA2);Sa" ">gXm8YB`1d@K#n]76-a$U,mF<fX]idqd)<3,]J7JmW4`6]uks=4-72L(jEk+:bJ0M^q-8Dm_Z?0olP1C9Sa&H[d&c$ooQUj]Exd*3ZM@-WGW2%s',B-_M%>%Ul:#/'xoFM9QX-$.QN'>" "[%$Z$uF6pA6Ki2O5:8w*vP1<-1`[G,)-m#>0`P&#eb#.3i)rtB61(o'$?X3B</R90;eZ]%Ncq;-Tl]#F>2Qft^ae_5tKL9MUe9b*sLEQ95C&`=G?@Mj=wh*'3E>=-<)Gt*Iw)'QG:`@I" "wOf7&]1i'S01B+Ev/Nac#9S;=;YQpg_6U`*kVY39xK,[/6Aj7:'1Bm-_1EYfa1+o&o4hp7KN_Q(OlIo@S%;jVdn0'1<Vc52=u`3^o-n1'g4v58Hj&6_t7$##?M)c<$bgQ_'SY((-xkA#" "Y(,p'H9rIVY-b,'%bCPF7.J<Up^,(dU1VY*5#WkTU>h19w,WQhLI)3S#f$2(eb,jr*b;3Vw]*7NH%$c4Vs,eD9>XW8?N]o+(*pgC%/72LV-u<Hp,3@e^9UB1J+ak9-TN/mhKPg+AJYd$" "MlvAF_jCK*.O-^(63adMT->W%iewS8W6m2rtCpo'RS1R84=@paTKt)>=%&1[)*vp'u+x,VrwN;&]kuO9JDbg=pO$J*.jVe;u'm0dr9l,<*wMK*Oe=g8lV_KEBFkO'oU]^=[-792#ok,)" "i]lR8qQ2oA8wcRCZ^7w/Njh;?.stX?Q1>S1q4Bn$)K1<-rGdO'$Wr.Lc.CG)$/*JL4tNR/,SVO3,aUw'DJN:)Ss;wGn9A32ijw%FL+Z0Fn.U9;reSq)bmI32U==5ALuG&#Vf1398/pVo" "1*c-(aY168o<`JsSbk-,1N;$>0:OUas(3:8Z972LSfF8eb=c-;>SPw7.6hn3m`9^Xkn(r.qS[0;T%&Qc=+STRxX'q1BNk3&*eu2;&8q$&x>Q#Q7^Tf+6<(d%ZVmj2bDi%.3L2n+4W'$P" "iDDG)g,r%+?,$@?uou5tSe2aN_AQU*<h`e-GI7)?OK2A.d7_c)?wQ5AS@DL3r#7fSkgl6-++D:'A,uq7SvlB$pcpH'q3n0#_%dY#xCpr-l<F0NR@-##FEV6NTF6##$l84N1w?AO>'IAO" "URQ##V^Fv-XFbGM7Fl(N<3DhLGF%q.1rC$#:T__&Pi68%0xi_&[qFJ(77j_&JWoF.V735&T,[R*:xFR*K5>>#`bW-?4Ne_&6Ne_&6Ne_&n`kr-#GJcM6X;uM6X;uM(.a..^2TkL%oR(#" ";u.T%fAr%4tJ8&><1=GHZ_+m9/#H1F^R#SC#*N=BA9(D?v[UiFY>>^8p,KKF.W]L29uLkLlu/+4T<XoIB&hx=T1PcDaB&;HH+-AFr?(m9HZV)FKS8JCw;SD=6[^/DZUL`EUDf]GGlG&>" "w$)F./^n3+rlo+DB;5sIYGNk+i1t-69Jg--0pao7Sm#K)pdHW&;LuDNH@H>#/X-TI(;P>#,Gc>#0Su>#4`1?#8lC?#<xU?#@.i?#D:%@#HF7@#LRI@#P_[@#Tkn@#Xw*A#]-=A#a9OA#" "d<F&#*;G##.GY##2Sl##6`($#:l:$#>xL$#B.`$#F:r$#JF.%#NR@%#R_R%#Vke%#Zww%#_-4&#3^Rh%Sflr-k'MS.o?.5/sWel/wpEM0%3'/1)K^f1-d>G21&v(35>V`39V7A4=onx4" "A1OY5EI0;6Ibgr6M$HS7Q<)58C5w,;WoA*#[%T*#`1g*#d=#+#hI5+#lUG+#pbY+#tnl+#x$),#&1;,#*=M,#.I`,#2Ur,#6b.-#;w[H#iQtA#m^0B#qjBB#uvTB##-hB#'9$C#+E6C#" "/QHC#3^ZC#7jmC#;v)D#?,<D#C8ND#GDaD#KPsD#O]/E#g1A5#KA*1#gC17#MGd;#8(02#L-d3#rWM4#Hga1#,<w0#T.j<#O#'2#CYN1#qa^:#_4m3#o@/=#eG8=#t8J5#`+78#4uI-#" "m3B2#SB[8#Q0@8#i[*9#iOn8#1Nm;#^sN9#qh<9#:=x-#P;K2#$%X9#bC+.#Rg;<#mN=.#MTF.#RZO.#2?)4#Y#(/#[)1/#b;L/#dAU/#0Sv;#lY$0#n`-0#sf60#(F24#wrH0#%/e0#" "TmD<#%JSMFove:CTBEXI:<eh2g)B,3h2^G3i;#d3jD>)4kMYD4lVu`4m`:&5niUA5@(A5BA1]PBB:xlBCC=2CDLXMCEUtiCf&0g2'tN?PGT4CPGT4CPGT4CPGT4CPGT4CPGT4CPGT4CP" "GT4CPGT4CPGT4CPGT4CPGT4CPGT4CP-qekC`.9kEg^+F$kwViFJTB&5KTB&5KTB&5KTB&5KTB&5KTB&5KTB&5KTB&5KTB&5KTB&5KTB&5KTB&5KTB&5KTB&5KTB&5o,^<-28ZI'O?;xp" "O?;xpO?;xpO?;xpO?;xpO?;xpO?;xpO?;xpO?;xpO?;xpO?;xpO?;xpO?;xpO?;xp;7q-#lLYI:xvD=#"; static const char* GetDefaultCompressedFontDataTTFBase85() { return proggy_clean_ttf_compressed_data_base85; }