file_path
stringlengths 21
207
| content
stringlengths 5
1.02M
| size
int64 5
1.02M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastTime.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "NvBlastTime.h"
#include "NvBlast.h"
#include <cstring>
namespace Nv
{
namespace Blast
{
const double Time::s_secondsPerTick = Time::getTickDuration();
} // namespace Blast
} // namespace Nv
| 1,767 | C++ | 42.12195 | 74 | 0.763441 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastTime.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTTIME_H
#define NVBLASTTIME_H
#include "NvBlastTypes.h"
namespace Nv
{
namespace Blast
{
class Time
{
public:
Time() : m_lastTickCount(getTimeTicks()) {}
int64_t getElapsedTicks()
{
const int64_t lastTickCount = m_lastTickCount;
m_lastTickCount = getTimeTicks();
return m_lastTickCount - lastTickCount;
}
int64_t peekElapsedTicks() const
{
return getTimeTicks() - m_lastTickCount;
}
int64_t getLastTickCount() const
{
return m_lastTickCount;
}
static double seconds(int64_t ticks)
{
return s_secondsPerTick * ticks;
}
private:
int64_t getTimeTicks() const;
static double getTickDuration();
int64_t m_lastTickCount;
static const double s_secondsPerTick;
};
} // namespace Blast
} // namespace Nv
//////// Time inline functions for various platforms ////////
#if NV_MICROSOFT_FAMILY
#include "NvBlastIncludeWindows.h"
NV_INLINE int64_t Nv::Blast::Time::getTimeTicks() const
{
LARGE_INTEGER a;
QueryPerformanceCounter(&a);
return a.QuadPart;
}
NV_INLINE double Nv::Blast::Time::getTickDuration()
{
LARGE_INTEGER a;
QueryPerformanceFrequency(&a);
return 1.0 / (double)a.QuadPart;
}
#elif NV_UNIX_FAMILY
#include <time.h>
NV_INLINE int64_t Nv::Blast::Time::getTimeTicks() const
{
struct timespec mCurrTimeInt;
clock_gettime(CLOCK_REALTIME, &mCurrTimeInt);
return (static_cast<int64_t>(mCurrTimeInt.tv_sec) * 1000000000) + (static_cast<int64_t>(mCurrTimeInt.tv_nsec));
}
NV_INLINE double Nv::Blast::Time::getTickDuration()
{
return 1.e-9;
}
#endif
#endif // #ifndef NVBLASTTIME_H
| 3,279 | C | 27.034188 | 115 | 0.706618 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastFixedBoolArray.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTFIXEDBOOLARRAY_H
#define NVBLASTFIXEDBOOLARRAY_H
#include "NvBlastAssert.h"
#include "NvBlastMemory.h"
#include <cstring>
namespace Nv
{
namespace Blast
{
/*!
FixedBoolArray is an array of bools of fixed size, it's intended to be used with placement new on chunk of memory.
It'll use following memory for data layout. As follows:
// some memory
char *buf = new char[64 * 1024];
const uint32_t size = 100;
// placement new on this memory
FixedBoolArray* arr = new (buf) FixedBoolArray(size);
// you can get max requiredMemorySize by an bitMap to use memory left
buf = buf + FixedBoolArray<SomeClass>::requiredMemorySize(size);
buf:
+------------------------------------------------------------+
| uint32_t | bool0 | bool1 | bool2 | ... |
+------------------------------------------------------------+
*/
class FixedBoolArray
{
public:
explicit FixedBoolArray(uint32_t size)
{
m_size = size;
}
static size_t requiredMemorySize(uint32_t size)
{
return align16(sizeof(FixedBoolArray)) + align16(size);
}
void clear()
{
memset(data(), 0, m_size);
}
void fill()
{
memset(data(), 1, m_size);
}
int test(uint32_t index) const
{
NVBLAST_ASSERT(index < m_size);
return data()[index];
}
void set(uint32_t index)
{
NVBLAST_ASSERT(index < m_size);
data()[index] = 1;
}
void setData(const char* newData, uint32_t newSize)
{
m_size = newSize;
memcpy(data(), newData, m_size);
}
const char* getData() const
{
return data();
}
uint32_t getSize() const
{
return m_size;
}
void reset(uint32_t index)
{
NVBLAST_ASSERT(index < m_size);
data()[index] = 0;
}
private:
uint32_t m_size;
NV_FORCE_INLINE char* data()
{
return ((char*)this + sizeof(FixedBoolArray));
}
NV_FORCE_INLINE const char* data() const
{
return ((char*)this + sizeof(FixedBoolArray));
}
private:
FixedBoolArray(const FixedBoolArray& that);
};
} // namespace Blast
} // namespace Nv
#endif // ifndef NVBLASTFIXEDBOOLARRAY_H
| 3,777 | C | 25.985714 | 114 | 0.65237 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastMath.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTMATH_H
#define NVBLASTMATH_H
#include <math.h>
namespace Nv
{
namespace Blast
{
namespace VecMath
{
NV_INLINE void div(float a[3], float divisor)
{
for (int i = 0; i < 3; i++)
a[i] /= divisor;
}
NV_INLINE void mul(float a[3], float multiplier)
{
for (int i = 0; i < 3; i++)
a[i] *= multiplier;
}
NV_INLINE void add(const float a[3], float b[3])
{
for (int i = 0; i < 3; i++)
b[i] = a[i] + b[i];
}
NV_INLINE void add(const float a[3], const float b[3], float r[3])
{
for (int i = 0; i < 3; i++)
r[i] = a[i] + b[i];
}
NV_INLINE void sub(const float a[3], const float b[3], float r[3])
{
for (int i = 0; i < 3; i++)
r[i] = a[i] - b[i];
}
NV_INLINE float dot(const float a[3], const float b[3])
{
float r = 0;
for (int i = 0; i < 3; i++)
r += a[i] * b[i];
return r;
}
NV_INLINE float length(const float a[3])
{
return sqrtf(dot(a, a));
}
NV_INLINE float dist(const float a[3], const float b[3])
{
float v[3];
sub(a, b, v);
return length(v);
}
NV_INLINE float normal(const float a[3], float r[3])
{
float d = length(a);
for (int i = 0; i < 3; i++)
r[i] = a[i] / d;
return d;
}
} // namespace VecMath
} // namespace Blast
} // namespace Nv
#endif // #ifndef NVBLASTMATH_H
| 2,884 | C | 25.46789 | 74 | 0.660194 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastFixedQueue.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTFIXEDQUEUE_H
#define NVBLASTFIXEDQUEUE_H
#include "NvBlastAssert.h"
#include "NvBlastMemory.h"
namespace Nv
{
namespace Blast
{
/*!
FixedQueue is a queue container which is intended to be used with placement new on chunk of memory.
It'll use following memory for data layout. As follows:
// some memory
char *buf = new char[64 * 1024];
// placement new on this memory
FixedQueue<SomeClass>* arr = new (buf) FixedQueue<SomeClass>();
// you can get max requiredMemorySize by an array of 'capacity' elements count to use memory left
buf = buf + FixedQueue<SomeClass>::requiredMemorySize(capacity);
*/
template <class T>
class FixedQueue
{
public:
explicit FixedQueue(uint32_t maxEntries) : m_num(0), m_head(0), m_tail(0), m_maxEntries(maxEntries)
{
}
static size_t requiredMemorySize(uint32_t capacity)
{
return align16(sizeof(FixedQueue<T>)) + align16(capacity * sizeof(T));
}
T popFront()
{
NVBLAST_ASSERT(m_num>0);
m_num--;
T& element = data()[m_tail];
m_tail = (m_tail+1) % (m_maxEntries);
return element;
}
T front()
{
NVBLAST_ASSERT(m_num>0);
return data()[m_tail];
}
T popBack()
{
NVBLAST_ASSERT(m_num>0);
m_num--;
m_head = (m_head-1) % (m_maxEntries);
return data()[m_head];
}
T back()
{
NVBLAST_ASSERT(m_num>0);
uint32_t headAccess = (m_head-1) % (m_maxEntries);
return data()[headAccess];
}
bool pushBack(const T& element)
{
if (m_num == m_maxEntries) return false;
data()[m_head] = element;
m_num++;
m_head = (m_head+1) % (m_maxEntries);
return true;
}
bool empty() const
{
return m_num == 0;
}
uint32_t size() const
{
return m_num;
}
private:
uint32_t m_num;
uint32_t m_head;
uint32_t m_tail;
uint32_t m_maxEntries;
T* data()
{
return (T*)((char*)this + sizeof(FixedQueue<T>));
}
private:
FixedQueue(const FixedQueue& that);
};
} // namespace Blast
} // namespace Nv
#endif // ifndef NVBLASTFIXEDQUEUE_H
| 3,777 | C | 25.794326 | 103 | 0.657665 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastFixedArray.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTFIXEDARRAY_H
#define NVBLASTFIXEDARRAY_H
#include "NvBlastAssert.h"
#include "NvBlastMemory.h"
namespace Nv
{
namespace Blast
{
/*!
FixedArray is a sequential container which is intended to be used with placement new on chunk of memory.
It'll use following memory for data layout. As follows:
// some memory
char *buf = new char[64 * 1024];
// placement new on this memory
FixedArray<SomeClass>* arr = new (buf) FixedArray<SomeClass>();
// you can get max requiredMemorySize by an array of 'capacity' elements count to use memory left
buf = buf + FixedArray<SomeClass>::requiredMemorySize(capacity);
buf:
+------------------------------------------------------------+
| uint32_t | T[0] | T[1] | T[2] | ... |
+------------------------------------------------------------+
!!!TODO:
- check ctor/dtor of elements calls
*/
template <class T>
class FixedArray
{
public:
explicit FixedArray() : m_size(0)
{
}
static size_t requiredMemorySize(uint32_t capacity)
{
return align16(sizeof(FixedArray<T>)) + align16(capacity * sizeof(T));
}
NV_FORCE_INLINE T& pushBack(T& t)
{
new (data() + m_size) T(t);
return data()[m_size++];
}
T popBack()
{
NVBLAST_ASSERT(m_size);
T t = data()[m_size - 1];
data()[--m_size].~T();
return t;
}
void clear()
{
for(T* first = data(); first < data() + m_size; ++first)
first->~T();
m_size = 0;
}
NV_FORCE_INLINE void forceSize_Unsafe(uint32_t s)
{
m_size = s;
}
NV_FORCE_INLINE T& operator[](uint32_t idx)
{
NVBLAST_ASSERT(idx < m_size);
return data()[idx];
}
NV_FORCE_INLINE const T& operator[](uint32_t idx) const
{
NVBLAST_ASSERT(idx < m_size);
return data()[idx];
}
NV_FORCE_INLINE T& at(uint32_t idx)
{
NVBLAST_ASSERT(idx < m_size);
return data()[idx];
}
NV_FORCE_INLINE const T& at(uint32_t idx) const
{
NVBLAST_ASSERT(idx < m_size);
return data()[idx];
}
NV_FORCE_INLINE uint32_t size() const
{
return m_size;
}
private:
uint32_t m_size;
NV_FORCE_INLINE T* data()
{
return (T*)((char*)this + sizeof(FixedArray<T>));
}
private:
FixedArray(const FixedArray& that);
};
} // namespace Blast
} // namespace Nv
#endif // ifndef NVBLASTFIXEDARRAY_H
| 4,026 | C | 26.582192 | 104 | 0.635122 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastHashSet.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTHASHSET_H
#define NVBLASTHASHSET_H
#include "NvBlastAllocator.h"
#include "NsHashSet.h"
namespace Nv
{
namespace Blast
{
/**
Wrapped NvShared HashSet that uses NvBlastGlobals AllocatorCallback.
*/
template <class Key, class HashFn = nvidia::shdfnd::Hash<Key>>
struct HashSet
{
typedef nvidia::shdfnd::HashSet<Key, HashFn, Allocator> type;
};
} // namespace Blast
} // namespace Nv
#endif // #ifndef NVBLASTHASHSET_H
| 2,006 | C | 36.166666 | 74 | 0.762213 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastIteratorBase.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTITERATORBASE_H
#define NVBLASTITERATORBASE_H
#include "NvBlastIndexFns.h"
namespace Nv
{
namespace Blast
{
/**
Common functionality and implementation for iterators over an index, using invalidIndex<T>() to indicate termination.
Derived class needs to implement increment operators.
*/
template<typename T>
class IteratorBase
{
public:
/** Constructor sets m_curr value */
IteratorBase(T curr);
/** Validity of current value. */
operator bool() const;
/** Current value. */
operator T() const;
protected:
T m_curr;
};
//////// IteratorBase<T> inline methods ////////
template<typename T>
NV_INLINE IteratorBase<T>::IteratorBase(T curr) : m_curr(curr)
{
}
template<typename T>
NV_INLINE IteratorBase<T>::operator bool() const
{
return !isInvalidIndex<T>(m_curr);
}
template<typename T>
NV_INLINE IteratorBase<T>::operator T() const
{
return m_curr;
}
/**
Common functionality and implementation for an indexed linked list iterator
*/
template<typename IndexType>
class LListIt : public IteratorBase<IndexType>
{
public:
LListIt(IndexType curr, IndexType* links);
/** Pre-increment. Only use if valid() == true. */
uint32_t operator ++ ();
protected:
IndexType* m_links;
};
//////// LListIt<IndexType> inline methods ////////
template<typename IndexType>
NV_INLINE LListIt<IndexType>::LListIt(IndexType curr, IndexType* links) : IteratorBase<IndexType>(curr), m_links(links)
{
}
template<typename IndexType>
NV_INLINE uint32_t LListIt<IndexType>::operator ++ ()
{
NVBLAST_ASSERT((bool)(*this));
return (this->m_curr = m_links[this->m_curr]);
}
/**
Common functionality and implementation for an IndexDList<IndexType> iterator
*/
template<typename IndexType>
class DListIt : public IteratorBase<IndexType>
{
public:
DListIt(IndexType curr, IndexDLink<IndexType>* links);
/** Pre-increment. Only use if valid() == true. */
uint32_t operator ++ ();
protected:
IndexDLink<IndexType>* m_links;
};
//////// DListIt<IndexType> inline methods ////////
template<typename IndexType>
NV_INLINE DListIt<IndexType>::DListIt(IndexType curr, IndexDLink<IndexType>* links) : IteratorBase<IndexType>(curr), m_links(links)
{
}
template<typename IndexType>
NV_INLINE uint32_t DListIt<IndexType>::operator ++ ()
{
NVBLAST_ASSERT((bool)(*this));
return (this->m_curr = m_links[this->m_curr].m_adj[1]);
}
} // end namespace Blast
} // end namespace Nv
#endif // #ifndef NVBLASTITERATORBASE_H
| 4,084 | C | 25.699346 | 131 | 0.721107 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastTimers.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "NvBlast.h"
#include "NvBlastTime.h"
#include <cstring>
extern "C"
{
void NvBlastTimersReset(NvBlastTimers* timers)
{
memset(timers, 0, sizeof(NvBlastTimers));
}
double NvBlastTicksToSeconds(int64_t ticks)
{
return Nv::Blast::Time::seconds(ticks);
}
} // extern "C"
| 1,852 | C++ | 38.425531 | 74 | 0.758099 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastMemory.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTMEMORY_H
#define NVBLASTMEMORY_H
#include <math.h>
namespace Nv
{
namespace Blast
{
/**
Utility function to align the given value to the next 16-byte boundary.
Returns the aligned value.
*/
template<typename T>
NV_INLINE T align16(T value)
{
return (value + 0xF)&~(T)0xF;
}
/** Offset void* pointer by 'offset' bytes helper-functions */
template <typename T>
NV_INLINE T pointerOffset(void* p, ptrdiff_t offset)
{
return reinterpret_cast<T>(reinterpret_cast<char*>(p)+offset);
}
template <typename T>
NV_INLINE T pointerOffset(const void* p, ptrdiff_t offset)
{
return reinterpret_cast<T>(reinterpret_cast<const char*>(p)+offset);
}
NV_INLINE const void* pointerOffset(const void* p, ptrdiff_t offset)
{
return pointerOffset<const void*>(p, offset);
}
NV_INLINE void* pointerOffset(void* p, ptrdiff_t offset)
{
return pointerOffset<void*>(p, offset);
}
} // namespace Blast
} // namespace Nv
/** Block data offset and accessor macro. */
#define NvBlastBlockData(_dataType, _name, _accessor) \
_dataType* _accessor() const \
{ \
return (_dataType*)((uintptr_t)this + _name); \
} \
uint32_t _name
/** Block data offset and accessor macro for an array (includes an _accessor##ArraySize() function which returns the last expression). */
#define NvBlastBlockArrayData(_dataType, _name, _accessor, _sizeExpr) \
_dataType* _accessor() const \
{ \
return (_dataType*)((uintptr_t)this + _name); \
} \
uint32_t _accessor##ArraySize() const \
{ \
return _sizeExpr; \
} \
uint32_t _name
/** Block data offset generation macros. */
/** Start offset generation with this. */
#define NvBlastCreateOffsetStart(_baseOffset) \
size_t _lastOffset = _baseOffset; \
size_t _lastSize = 0
/** Create the next offset generation with this. The value will be aligned to a 16-byte boundary. */
#define NvBlastCreateOffsetAlign16(_name, _size) \
_name = align16(_lastOffset + _lastSize); \
_lastOffset = _name; \
_lastSize = _size
/** End offset generation with this. It evaluates to the (16-byte aligned) total size of the data block. */
#define NvBlastCreateOffsetEndAlign16() \
align16(_lastOffset + _lastSize)
/** Stack allocation */
#if NV_WINDOWS_FAMILY
#include <malloc.h>
#define NvBlastAlloca(x) _alloca(x)
#elif NV_LINUX || NV_ANDROID
#include <alloca.h>
#define NvBlastAlloca(x) alloca(x)
#elif NV_APPLE_FAMILY
#include <alloca.h>
#define NvBlastAlloca(x) alloca(x)
#endif
#endif // #ifndef NVBLASTMEMORY_H
| 4,027 | C | 29.515151 | 137 | 0.73032 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastAssert.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "NvBlastAssert.h"
#include <stdio.h>
#include <stdlib.h>
#if NV_WINDOWS_FAMILY
#include <crtdbg.h>
#endif
extern "C"
{
void NvBlastAssertHandler(const char* expr, const char* file, int line, bool& ignore)
{
NV_UNUSED(ignore); // is used only in debug windows config
char buffer[1024];
#if NV_WINDOWS_FAMILY
sprintf_s(buffer, 1024, "%s(%d) : Assertion failed: %s\n", file, line, expr);
#else
sprintf(buffer, "%s(%d) : Assertion failed: %s\n", file, line, expr);
#endif
puts(buffer);
#if NV_WINDOWS_FAMILY && NV_DEBUG
// _CrtDbgReport returns -1 on error, 1 on 'retry', 0 otherwise including 'ignore'.
// Hitting 'abort' will terminate the process immediately.
int result = _CrtDbgReport(_CRT_ASSERT, file, line, NULL, "%s", buffer);
int mode = _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_REPORT_MODE);
ignore = _CRTDBG_MODE_WNDW == mode && result == 0;
if (ignore)
return;
__debugbreak();
#elif (NV_WINDOWS_FAMILY && NV_CHECKED)
__debugbreak();
#else
abort();
#endif
}
} // extern "C"
| 2,621 | C++ | 38.134328 | 87 | 0.720336 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastVolumeIntegrals.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTVOLUMEINTEGRALS_H
#define NVBLASTVOLUMEINTEGRALS_H
#include "NvBlastNvSharedHelpers.h"
#include "NvCMath.h"
#include "NvBlastAssert.h"
namespace Nv {
namespace Blast{
/**
Calculate the volume and centroid of a closed mesh with outward-pointing normals.
\param[out] centroid the calculated centroid of the given mesh
\param[in] mesh a class of templated type MeshQuery
MeshQuery must support the following functions:
size_t faceCount()
size_t vertexCount(size_t faceIndex)
NvcVec3 vertex(size_t faceIndex, size_t vertexIndex)
\return the volume of the given mesh
*/
template<class MeshQuery>
NV_INLINE float calculateMeshVolumeAndCentroid(NvcVec3& centroid, const MeshQuery& mesh)
{
centroid = { 0.0f, 0.0f, 0.0f };
// First find an approximate centroid for a more accurate calculation
size_t N = 0;
NvcVec3 disp = { 0.0f, 0.0f, 0.0f };
for (size_t i = 0; i < mesh.faceCount(); ++i)
{
const size_t faceVertexCount = mesh.vertexCount(i);
for (size_t j = 0; j < faceVertexCount; ++j)
{
disp = disp + mesh.vertex(i, j);
}
N += faceVertexCount;
}
if (N == 0)
{
return 0.0f;
}
disp = disp / (float)N;
float sixV = 0.0f;
for (size_t i = 0; i < mesh.faceCount(); ++i)
{
const size_t faceVertexCount = mesh.vertexCount(i);
if (faceVertexCount < 3)
{
continue;
}
const NvcVec3 a = mesh.vertex(i, 0) - disp;
NvcVec3 b = mesh.vertex(i, 1) - disp;
for (size_t j = 2; j < faceVertexCount; ++j)
{
const NvcVec3 c = mesh.vertex(i, j) - disp;
const float sixTetV =
a.x * b.y * c.z - a.x * b.z * c.y - a.y * b.x * c.z +
a.y * b.z * c.x + a.z * b.x * c.y - a.z * b.y * c.x;
sixV += sixTetV;
centroid = centroid + sixTetV*(a + b + c);
b = c;
}
}
// Extra factor of four to average tet vertices
centroid = centroid / (4.0f * sixV) + disp;
return std::abs(sixV) / 6.0f;
}
} // namespace Blast
} // namespace Nv
#endif // NVBLASTVOLUMEINTEGRALS_H
| 3,744 | C | 31.284482 | 88 | 0.65812 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastGeometry.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTGEOMETRY_H
#define NVBLASTGEOMETRY_H
#include "NvBlastTypes.h"
#include "NvBlastMath.h"
#include "NvBlastAssert.h"
#include <limits>
namespace Nv {
namespace Blast{
/**
Find the closest node to point in the graph. Uses primarily distance to chunk centroids.
Bond normals are expected to be directed from the lower to higher node index.
Cannot be used for graph actors with only the external chunk in the graph.
\param[in] point the point to test against
\param[in] firstGraphNodeIndex the entry point for familyGraphNodeIndexLinks
\param[in] familyGraphNodeIndexLinks the list index links of the actor's graph
\param[in] adjacencyPartition the actor's SupportGraph adjacency partition
\param[in] adjacentNodeIndices the actor's SupportGraph adjacent node indices
\param[in] adjacentBondIndices the actor's SupportGraph adjacent bond indices
\param[in] assetBonds the actor's asset bonds
\param[in] bondHealths the actor's bond healths
\param[in] assetChunks the actor's asset chunks
\param[in] supportChunkHealths the actor's graph chunks healths
\param[in] chunkIndices maps node index to chunk index in SupportGraph
\return the index of the node closest to point
*/
NV_FORCE_INLINE uint32_t findClosestNode(const float point[4],
const uint32_t firstGraphNodeIndex, const uint32_t* familyGraphNodeIndexLinks,
const uint32_t* adjacencyPartition, const uint32_t* adjacentNodeIndices, const uint32_t* adjacentBondIndices,
const NvBlastBond* assetBonds, const float* bondHealths,
const NvBlastChunk* assetChunks, const float* supportChunkHealths, const uint32_t* chunkIndices)
{
// firstGraphNodeIndex could still be the external chunk, however
// there should be no way a single-node actor that is just the external chunk exists.
uint32_t nodeIndex = firstGraphNodeIndex;
// Since there should always be a regular chunk in the graph, it is possible to initialize closestNode
// as external chunk index but it would always evaluate to some meaningful node index eventually.
uint32_t closestNode = nodeIndex;
float minDist = std::numeric_limits<float>().max();
// find the closest healthy chunk in the graph by its centroid to point distance
while (!Nv::Blast::isInvalidIndex(nodeIndex))
{
if (supportChunkHealths[nodeIndex] > 0.0f)
{
uint32_t chunkIndex = chunkIndices[nodeIndex];
if (!isInvalidIndex(chunkIndex)) // Invalid if this is the external chunk
{
const NvBlastChunk& chunk = assetChunks[chunkIndex];
const float* centroid = chunk.centroid;
float d[3]; VecMath::sub(point, centroid, d);
float dist = VecMath::dot(d, d);
if (dist < minDist)
{
minDist = dist;
closestNode = nodeIndex;
}
}
}
nodeIndex = familyGraphNodeIndexLinks[nodeIndex];
}
// as long as the external chunk is not input as a single-node graph actor
NVBLAST_ASSERT(!isInvalidIndex(chunkIndices[closestNode]));
bool iterateOnBonds = true;
if (iterateOnBonds)
{
// improve geometric accuracy by looking on which side of the closest bond the point lies
// expects bond normals to point from the smaller to the larger node index
nodeIndex = closestNode;
minDist = std::numeric_limits<float>().max();
const uint32_t startIndex = adjacencyPartition[nodeIndex];
const uint32_t stopIndex = adjacencyPartition[nodeIndex + 1];
for (uint32_t adjacentIndex = startIndex; adjacentIndex < stopIndex; adjacentIndex++)
{
const uint32_t neighbourIndex = adjacentNodeIndices[adjacentIndex];
const uint32_t neighbourChunk = chunkIndices[neighbourIndex];
if (!isInvalidIndex(neighbourChunk)) // Invalid if neighbor is the external chunk
{
const uint32_t bondIndex = adjacentBondIndices[adjacentIndex];
// do not follow broken bonds, since it means that neighbor is not actually connected in the graph
if (bondHealths[bondIndex] > 0.0f && supportChunkHealths[neighbourIndex] > 0.0f)
{
const NvBlastBond& bond = assetBonds[bondIndex];
const float* centroid = bond.centroid;
float d[3]; VecMath::sub(point, centroid, d);
float dist = VecMath::dot(d, d);
if (dist < minDist)
{
minDist = dist;
float s = VecMath::dot(d, bond.normal);
if (nodeIndex < neighbourIndex)
{
closestNode = s < 0.0f ? nodeIndex : neighbourIndex;
}
else
{
closestNode = s < 0.0f ? neighbourIndex : nodeIndex;
}
}
}
}
}
}
return closestNode;
}
/**
Find the closest node to point in the graph. Uses primarily distance to bond centroids.
Slower compared to chunk based lookup but may yield better accuracy in some cases.
Bond normals are expected to be directed from the lower to higher node index.
Cannot be used for graph actors with only the external chunk in the graph.
\param[in] point the point to test against
\param[in] firstGraphNodeIndex the entry point for familyGraphNodeIndexLinks
\param[in] familyGraphNodeIndexLinks the list index links of the actor's graph
\param[in] adjacencyPartition the actor's SupportGraph adjacency partition
\param[in] adjacentNodeIndices the actor's SupportGraph adjacent node indices
\param[in] adjacentBondIndices the actor's SupportGraph adjacent bond indices
\param[in] assetBonds the actor's asset bonds
\param[in] bondHealths the actor's bond healths
\param[in] chunkIndices maps node index to chunk index in SupportGraph
\return the index of the node closest to point
*/
NV_FORCE_INLINE uint32_t findClosestNode(const float point[4],
const uint32_t firstGraphNodeIndex, const uint32_t* familyGraphNodeIndexLinks,
const uint32_t* adjacencyPartition, const uint32_t* adjacentNodeIndices, const uint32_t* adjacentBondIndices,
const NvBlastBond* bonds, const float* bondHealths, const uint32_t* chunkIndices)
{
// firstGraphNodeIndex could still be the external chunk, however
// there should be no way a single-node actor that is just the external chunk exists.
uint32_t nodeIndex = firstGraphNodeIndex;
// Since there should always be a regular chunk in the graph, it is possible to initialize closestNode
// as external chunk index but it would always evaluate to some meaningful node index eventually.
uint32_t closestNode = nodeIndex;
float minDist = std::numeric_limits<float>().max();
while (!Nv::Blast::isInvalidIndex(nodeIndex))
{
const uint32_t startIndex = adjacencyPartition[nodeIndex];
const uint32_t stopIndex = adjacencyPartition[nodeIndex + 1];
for (uint32_t adjacentIndex = startIndex; adjacentIndex < stopIndex; adjacentIndex++)
{
const uint32_t neighbourIndex = adjacentNodeIndices[adjacentIndex];
if (nodeIndex < neighbourIndex)
{
const uint32_t bondIndex = adjacentBondIndices[adjacentIndex];
if (bondHealths[bondIndex] > 0.0f)
{
const NvBlastBond& bond = bonds[bondIndex];
const float* centroid = bond.centroid;
float d[3]; VecMath::sub(point, centroid, d);
float dist = VecMath::dot(d, d);
if (dist < minDist)
{
minDist = dist;
// if any of the nodes is the external chunk, use the valid one instead
if (isInvalidIndex(chunkIndices[neighbourIndex]))
{
closestNode = nodeIndex;
}
else if (isInvalidIndex(chunkIndices[nodeIndex]))
{
closestNode = neighbourIndex;
}
else
{
float s = VecMath::dot(d, bond.normal);
closestNode = s < 0 ? nodeIndex : neighbourIndex;
}
}
}
}
}
nodeIndex = familyGraphNodeIndexLinks[nodeIndex];
}
// as long as the external chunk is not input as a single-node graph actor
NVBLAST_ASSERT(!isInvalidIndex(chunkIndices[closestNode]));
return closestNode;
}
} // namespace Blast
} // namespace Nv
#endif // NVBLASTGEOMETRY_H
| 10,765 | C | 44.42616 | 114 | 0.63948 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastIncludeWindows.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTINCLUDEWINDOWS_H
#define NVBLASTINCLUDEWINDOWS_H
#ifndef _WINDOWS_ // windows already included if this is defined
#include "NvPreprocessor.h"
#ifndef _WIN32
#error "This file should only be included by Windows builds!!"
#endif
// We only support >= Windows XP, and we need this for critical section and
#if !NV_WINRT
#define _WIN32_WINNT 0x0501
#else
#define _WIN32_WINNT 0x0602
#endif
// turn off as much as we can for windows. All we really need is the thread functions(critical sections/Interlocked*
// etc)
#define NOGDICAPMASKS
#define NOVIRTUALKEYCODES
#define NOWINMESSAGES
#define NOWINSTYLES
#define NOSYSMETRICS
#define NOMENUS
#define NOICONS
#define NOKEYSTATES
#define NOSYSCOMMANDS
#define NORASTEROPS
#define NOSHOWWINDOW
#define NOATOM
#define NOCLIPBOARD
#define NOCOLOR
#define NOCTLMGR
#define NODRAWTEXT
#define NOGDI
#define NOMB
#define NOMEMMGR
#define NOMETAFILE
#define NOMINMAX
#define NOOPENFILE
#define NOSCROLL
#define NOSERVICE
#define NOSOUND
#define NOTEXTMETRIC
#define NOWH
#define NOWINOFFSETS
#define NOCOMM
#define NOKANJI
#define NOHELP
#define NOPROFILER
#define NODEFERWINDOWPOS
#define NOMCX
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#if !NV_WINRT
#define NOUSER
#define NONLS
#define NOMSG
#endif
#pragma warning(push)
#pragma warning(disable : 4668) //'symbol' is not defined as a preprocessor macro, replacing with '0' for 'directives'
#include <windows.h>
#pragma warning(pop)
#if NV_SSE2
#include <xmmintrin.h>
#endif
#endif // #ifndef _WINDOWS_
#endif // #ifndef NVBLASTINCLUDEWINDOWS_H
| 3,143 | C | 29.524272 | 118 | 0.776965 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastAtomic.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTATOMIC_H
#define NVBLASTATOMIC_H
#include "NvBlastTypes.h"
namespace Nv
{
namespace Blast
{
/* increment the specified location. Return the incremented value */
int32_t atomicIncrement(volatile int32_t* val);
/* decrement the specified location. Return the decremented value */
int32_t atomicDecrement(volatile int32_t* val);
} // namespace Blast
} // namespace Nv
#endif // #ifndef NVBLASTATOMIC_H
| 1,986 | C | 38.739999 | 74 | 0.765861 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastPreprocessorInternal.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTPREPROCESSORINTERNAL_H
#define NVBLASTPREPROCESSORINTERNAL_H
#include "NvPreprocessor.h"
/**
Macros for more convenient logging
*/
#define NVBLASTLL_LOG_ERROR(_logFn, _msg) if (_logFn != nullptr) { _logFn(NvBlastMessage::Error, _msg, __FILE__, __LINE__); } ((void)0)
#define NVBLASTLL_LOG_WARNING(_logFn, _msg) if (_logFn != nullptr) { _logFn(NvBlastMessage::Warning, _msg, __FILE__, __LINE__); } ((void)0)
#define NVBLASTLL_LOG_INFO(_logFn, _msg) if (_logFn != nullptr) { _logFn(NvBlastMessage::Info, _msg, __FILE__, __LINE__); } ((void)0)
#define NVBLASTLL_LOG_DEBUG(_logFn, _msg) if (_logFn != nullptr) { _logFn(NvBlastMessage::Debug, _msg, __FILE__, __LINE__); } ((void)0)
/** Blast will check function parameters for debug and checked builds. */
#define NVBLASTLL_CHECK_PARAMS (NV_DEBUG || NV_CHECKED)
#if NVBLASTLL_CHECK_PARAMS
#define NVBLASTLL_CHECK(_expr, _logFn, _msg, _onFail) \
{ \
if(!(_expr)) \
{ \
if (_logFn) { _logFn(NvBlastMessage::Error, _msg, __FILE__, __LINE__); } \
{ _onFail; }; \
} \
}
#else
#define NVBLASTLL_CHECK(_expr, _logFn, _msg, _onFail) NV_UNUSED(_logFn)
#endif
/**
Convenience macro to replace deprecated UINT32_MAX
*/
#ifndef UINT32_MAX
#include <limits>
#define UINT32_MAX (std::numeric_limits<uint32_t>::max())
#endif
#endif // ifndef NVBLASTPREPROCESSORINTERNAL_H
| 3,721 | C | 50.694444 | 143 | 0.562483 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastHashMap.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTHASHMAP_H
#define NVBLASTHASHMAP_H
#include "NvBlastAllocator.h"
#include "NsHashMap.h"
namespace Nv
{
namespace Blast
{
/**
Wrapped NvShared HashMap that uses NvBlastGlobals AllocatorCallback.
*/
template <class Key, class Value, class HashFn = nvidia::shdfnd::Hash<Key>>
struct HashMap
{
typedef nvidia::shdfnd::HashMap<Key, Value, HashFn, Allocator> type;
};
} // namespace Blast
} // namespace Nv
#endif // #ifndef NVBLASTHASHMAP_H
| 2,027 | C | 35.872727 | 75 | 0.761717 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastAssert.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTASSERT_H
#define NVBLASTASSERT_H
#include "NvPreprocessor.h"
#if !NV_ENABLE_ASSERTS
#define NVBLAST_ASSERT(exp) ((void)0)
#define NVBLAST_ALWAYS_ASSERT_MESSAGE(message) ((void)0)
#define NVBLAST_ASSERT_WITH_MESSAGE(condition, message) ((void)0)
#else
#if NV_VC
#define NVBLAST_CODE_ANALYSIS_ASSUME(exp) \
__analysis_assume(!!(exp)) // This macro will be used to get rid of analysis warning messages if a NVBLAST_ASSERT is used
// to "guard" illegal mem access, for example.
#else
#define NVBLAST_CODE_ANALYSIS_ASSUME(exp)
#endif
#define NVBLAST_ASSERT(exp) \
{ \
static bool _ignore = false; \
if (!(exp) && !_ignore) NvBlastAssertHandler(#exp, __FILE__, __LINE__, _ignore); \
NVBLAST_CODE_ANALYSIS_ASSUME(exp); \
} ((void)0)
#define NVBLAST_ALWAYS_ASSERT_MESSAGE(message) \
{ \
static bool _ignore = false; \
if(!_ignore) \
{ \
NvBlastAssertHandler(message, __FILE__, __LINE__, _ignore); \
} \
} ((void)0)
#define NVBLAST_ASSERT_WITH_MESSAGE(exp, message) \
{ \
static bool _ignore = false; \
if (!(exp) && !_ignore) NvBlastAssertHandler(message, __FILE__, __LINE__, _ignore); \
NVBLAST_CODE_ANALYSIS_ASSUME(exp); \
} ((void)0)
#endif
#define NVBLAST_ALWAYS_ASSERT() NVBLAST_ASSERT(0)
extern "C"
{
NV_C_API void NvBlastAssertHandler(const char* expr, const char* file, int line, bool& ignore);
} // extern "C"
#endif // #ifndef NVBLASTASSERT_H
| 4,355 | C | 52.777777 | 125 | 0.494604 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastAtomic.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "NvBlastAtomic.h"
#include <string.h>
#include <stdlib.h>
namespace Nv
{
namespace Blast
{
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Windows Implementation
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#if NV_WINDOWS_FAMILY
#include "NvBlastIncludeWindows.h"
int32_t atomicIncrement(volatile int32_t* val)
{
return (int32_t)InterlockedIncrement((volatile LONG*)val);
}
int32_t atomicDecrement(volatile int32_t* val)
{
return (int32_t)InterlockedDecrement((volatile LONG*)val);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Unix Implementation
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#elif(NV_UNIX_FAMILY)
int32_t atomicIncrement(volatile int32_t* val)
{
return __sync_add_and_fetch(val, 1);
}
int32_t atomicDecrement(volatile int32_t* val)
{
return __sync_sub_and_fetch(val, 1);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Unsupported Platforms
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#else
#error "Platform not supported!"
#endif
} // namespace Blast
} // namespace Nv
| 3,191 | C++ | 34.466666 | 119 | 0.549044 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastDLink.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTDLINK_H
#define NVBLASTDLINK_H
#include "NvBlastAssert.h"
#include "NvBlastIndexFns.h"
namespace Nv
{
namespace Blast
{
template<typename IndexType>
struct IndexDLink
{
IndexType m_adj[2];
};
template<typename IndexType>
class IndexDList
{
public:
void initLinksSolitary(IndexDLink<IndexType>* links, IndexType linkCount)
{
for (IndexType i = 0; i < linkCount; ++i)
{
links[i].m_adj[0] = invalidIndex<IndexType>();
links[i].m_adj[1] = invalidIndex<IndexType>();
}
}
void initLinksChain(IndexDLink<IndexType>* links, IndexType linkCount)
{
if (linkCount > 0)
{
links[0].m_adj[0] = invalidIndex<IndexType>();
for (IndexType i = 1; i < linkCount; ++i)
{
links[i - 1].m_adj[1] = i;
links[i].m_adj[0] = i - 1;
}
links[linkCount - 1].m_adj[1] = invalidIndex<IndexType>();
}
}
IndexType getAdj(IndexDLink<IndexType>* links, IndexType linkIndex, int which)
{
return links[linkIndex].m_adj[which & 1];
}
void remove(IndexDLink<IndexType>* links, IndexType linkIndex)
{
IndexDLink<IndexType>& link = links[linkIndex];
const IndexType adj0 = link.m_adj[0];
const IndexType adj1 = link.m_adj[1];
if (!isInvalidIndex(adj1))
{
links[adj1].m_adj[0] = adj0;
link.m_adj[1] = invalidIndex<IndexType>();
}
if (!isInvalidIndex(adj0))
{
links[adj0].m_adj[1] = adj1;
link.m_adj[0] = invalidIndex<IndexType>();
}
}
bool isSolitary(IndexDLink<IndexType>* links, IndexType linkIndex)
{
const IndexDLink<IndexType>& link = links[linkIndex];
return isInvalidIndex(link.m_adj[0]) && isInvalidIndex(link.m_adj[1]);
}
void insertListHead(IndexType& listHead, IndexDLink<IndexType>* links, IndexType linkIndex)
{
NVBLAST_ASSERT(!isInvalidIndex(linkIndex));
if (!isInvalidIndex(listHead))
{
links[listHead].m_adj[0] = linkIndex;
}
links[linkIndex].m_adj[1] = listHead;
listHead = linkIndex;
}
IndexType removeListHead(IndexType& listHead, IndexDLink<IndexType>* links)
{
const IndexType linkIndex = listHead;
if (!isInvalidIndex(linkIndex))
{
listHead = links[linkIndex].m_adj[1];
if (!isInvalidIndex(listHead))
{
links[listHead].m_adj[0] = invalidIndex<IndexType>();
}
links[linkIndex].m_adj[1] = invalidIndex<IndexType>();
}
return linkIndex;
}
void removeFromList(IndexType& listHead, IndexDLink<IndexType>* links, IndexType linkIndex)
{
NVBLAST_ASSERT(!isInvalidIndex(linkIndex));
if (listHead == linkIndex)
{
listHead = links[linkIndex].m_adj[1];
}
remove(links, linkIndex);
}
};
struct DLink
{
DLink() : m_prev(nullptr), m_next(nullptr) {}
DLink* getPrev() const
{
return m_prev;
}
DLink* getNext() const
{
return m_next;
}
private:
DLink* m_prev;
DLink* m_next;
friend class DList;
};
class DList
{
public:
DList() : m_head(nullptr), m_tail(nullptr) {}
bool isEmpty() const
{
NVBLAST_ASSERT((m_head == nullptr) == (m_tail == nullptr));
return m_head == nullptr;
}
bool isSolitary(const DLink& link) const
{
return link.m_prev == nullptr && link.m_next == nullptr && m_head != &link;
}
DLink* getHead() const
{
return m_head;
}
DLink* getTail() const
{
return m_tail;
}
bool insertHead(DLink& link)
{
NVBLAST_ASSERT(isSolitary(link));
if (!isSolitary(link))
{
return false;
}
link.m_next = m_head;
if (m_head != nullptr)
{
m_head->m_prev = &link;
}
m_head = &link;
if (m_tail == nullptr)
{
m_tail = &link;
}
return true;
}
bool insertTail(DLink& link)
{
NVBLAST_ASSERT(isSolitary(link));
if (!isSolitary(link))
{
return false;
}
link.m_prev = m_tail;
if (m_tail != nullptr)
{
m_tail->m_next = &link;
}
m_tail = &link;
if (m_head == nullptr)
{
m_head = &link;
}
return true;
}
void remove(DLink& link)
{
if (link.m_prev != nullptr)
{
link.m_prev->m_next = link.m_next;
}
else
if (m_head == &link)
{
m_head = link.m_next;
}
if (link.m_next != nullptr)
{
link.m_next->m_prev = link.m_prev;
}
else
if (m_tail == &link)
{
m_tail = link.m_prev;
}
link.m_next = link.m_prev = nullptr;
}
class It
{
public:
enum Direction { Reverse, Forward };
It(const DList& list, Direction dir = Forward) : m_curr(dir == Forward ? list.getHead() : list.getTail()) {}
/** Validity of current value. */
operator bool() const
{
return m_curr != nullptr;
}
/** Current value. */
operator const DLink*() const
{
return m_curr;
}
/** Pre-increment. */
const DLink* operator ++ ()
{
return m_curr = m_curr->getNext();
}
/** Pre-deccrement. */
const DLink* operator -- ()
{
return m_curr = m_curr->getPrev();
}
private:
const DLink* m_curr;
};
private:
DLink* m_head;
DLink* m_tail;
};
} // end namespace Blast
} // end namespace Nv
#endif // #ifndef NVBLASTDLINK_H
| 7,611 | C | 24.122112 | 116 | 0.559322 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastIndexFns.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTINDEXFNS_H
#define NVBLASTINDEXFNS_H
#include "NvBlastTypes.h"
#include <cstring>
namespace Nv
{
namespace Blast
{
/**
Set to invalid index.
*/
template<typename T>
NV_INLINE T invalidIndex()
{
return ~(T)0;
}
/**
Test for invalid index (max representable integer).
*/
template<typename T>
NV_INLINE bool isInvalidIndex(T index)
{
return index == invalidIndex<T>();
}
/**
Create a lookup table for data sorted by a templated index type.
Note: when using this function with unsigned integer index types invalidIndex<T>() is treated as a value less than zero.
On input:
The indices must lie in the interval [indexBase, indexBase+indexRange].
indexSource must point to the first index in the data.
indexCount must be set to the number of indices in the data.
indexByteStride must be set to the distance, in bytes, between subequent indices.
lookup must point to a T array of size indexRange+2.
On return:
lookup will be filled such that:
lookup[i] = the position of first data element with index (i + indexBase)
lookup[indexRange+1] = indexCount
The last (indexRange+1) element is used so that one may always determine the number of data elements with the given index using:
count = lookup[i+1] - lookup[i]
Note, if an index (i + indexBase) is not present in the data then, lookup[i+1] = lookup[i], so the count (above) will correctly be zero.
In this case, the actual value of lookup[i] is irrelevant.
*/
template<typename T>
void createIndexStartLookup(T* lookup, T indexBase, T indexRange, T* indexSource, T indexCount, T indexByteStride)
{
++indexBase; // Ordering invalidIndex<T>() as lowest value
T indexPos = 0;
for (T i = 0; i <= indexRange; ++i)
{
for (; indexPos < indexCount; ++indexPos, indexSource = (T*)((uintptr_t)indexSource + indexByteStride))
{
if (*indexSource + 1 >= i + indexBase) // +1 to order invalidIndex<T>() as lowest value
{
lookup[i] = indexPos;
break;
}
}
if (indexPos == indexCount)
{
lookup[i] = indexPos;
}
}
lookup[indexRange + 1] = indexCount;
}
/**
Creates the inverse of a map, such that inverseMap[map[i]] = i.
Unmapped indices are set to invalidIndex<T>.
\param[out] inverseMap inverse map space of given size
\param[in] map original map of given size, unmapped entries must contain invalidIndex<T>
\param[in] size size of the involved maps
*/
template<typename T>
void invertMap(T* inverseMap, const T* map, const T size)
{
memset(inverseMap, invalidIndex<T>(), size*sizeof(T));
for (T i = 0; i < size; i++)
{
if (!isInvalidIndex(map[i]))
{
inverseMap[map[i]] = i;
}
}
}
} // end namespace Blast
} // end namespace Nv
#endif // #ifndef NVBLASTINDEXFNS_H
| 4,587 | C | 30.641379 | 144 | 0.677349 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastFixedBitmap.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTFIXEDBITMAP_H
#define NVBLASTFIXEDBITMAP_H
#include "NvBlastAssert.h"
#include "NvBlastMemory.h"
#include <cstring>
namespace Nv
{
namespace Blast
{
/*!
FixedBitmap is a bitset (bitmap) of fixed size, it's intended to be used with placement new on chunk of memory.
It'll use following memory for data layout. As follows:
// some memory
char *buf = new char[64 * 1024];
const uint32_t bitsCount = 100;
// placement new on this memory
FixedBitmap* arr = new (buf) FixedBitmap(bitsCount);
// you can get max requiredMemorySize by an bitMap to use memory left
buf = buf + FixedBitmap::requiredMemorySize(bitsCount);
buf:
+------------------------------------------------------------+
| uint32_t | word0 | word1 | word2 | ... |
+------------------------------------------------------------+
*/
class FixedBitmap
{
public:
explicit FixedBitmap(uint32_t bitsCount)
{
m_bitsCount = bitsCount;
}
static uint32_t getWordsCount(uint32_t bitsCount)
{
return (bitsCount + 31) >> 5;
}
static size_t requiredMemorySize(uint32_t bitsCount)
{
return align16(sizeof(FixedBitmap)) + align16(getWordsCount(bitsCount) * sizeof(uint32_t));
}
void clear()
{
memset(data(), 0, getWordsCount(m_bitsCount) * sizeof(uint32_t));
}
void fill()
{
const uint32_t wordCount = getWordsCount(m_bitsCount);
uint32_t* mem = data();
memset(mem, 0xFF, wordCount * sizeof(uint32_t));
const uint32_t bitsRemainder = m_bitsCount & 31;
if (bitsRemainder > 0)
{
mem[wordCount - 1] &= ~(0xFFFFFFFF << bitsRemainder);
}
}
int test(uint32_t index) const
{
NVBLAST_ASSERT(index < m_bitsCount);
return data()[index >> 5] & (1 << (index & 31));
}
void set(uint32_t index)
{
NVBLAST_ASSERT(index < m_bitsCount);
data()[index >> 5] |= 1 << (index & 31);
}
void reset(uint32_t index)
{
NVBLAST_ASSERT(index < m_bitsCount);
data()[index >> 5] &= ~(1 << (index & 31));
}
private:
uint32_t m_bitsCount;
NV_FORCE_INLINE uint32_t* data()
{
return (uint32_t*)((char*)this + sizeof(FixedBitmap));
}
NV_FORCE_INLINE const uint32_t* data() const
{
return (uint32_t*)((char*)this + sizeof(FixedBitmap));
}
private:
FixedBitmap(const FixedBitmap& that);
};
} // namespace Blast
} // namespace Nv
#endif // ifndef NVBLASTFIXEDBITMAP_H
| 4,088 | C | 29.066176 | 111 | 0.651663 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastNvSharedHelpers.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTNVSHAREDSHELPERS_H
#define NVBLASTNVSHAREDSHELPERS_H
#include "NvCTypes.h"
#include "NvVec2.h"
#include "NvVec3.h"
#include "NvVec4.h"
#include "NvTransform.h"
#include "NvPlane.h"
#include "NvMat33.h"
#include "NvMat44.h"
#include "NvBounds3.h"
using namespace nvidia;
#define WCast(type, name) reinterpret_cast<type>(name)
#define RCast(type, name) reinterpret_cast<const type>(name)
#define CONVERT(BlastType, NvSharedType) \
static inline NvSharedType& toNvShared(BlastType& v) \
{ \
return WCast(NvSharedType&, v); \
} \
static inline const NvSharedType& toNvShared(const BlastType& v) \
{ \
return RCast(NvSharedType&, v); \
} \
static inline const BlastType& fromNvShared(const NvSharedType& v) \
{ \
return RCast(BlastType&, v); \
} \
static inline BlastType& fromNvShared(NvSharedType& v) \
{ \
return WCast(BlastType&, v); \
} \
static inline NvSharedType* toNvShared(BlastType* v) \
{ \
return WCast(NvSharedType*, v); \
} \
static inline const NvSharedType* toNvShared(const BlastType* v) \
{ \
return RCast(NvSharedType*, v); \
} \
static inline const BlastType* fromNvShared(const NvSharedType* v) \
{ \
return RCast(BlastType*, v); \
} \
static inline BlastType* fromNvShared(NvSharedType* v) \
{ \
return WCast(BlastType*, v); \
}
CONVERT(NvcVec2, nvidia::NvVec2)
CONVERT(NvcVec3, nvidia::NvVec3)
CONVERT(NvcVec4, nvidia::NvVec4)
CONVERT(NvcQuat, nvidia::NvQuat)
CONVERT(NvcTransform, nvidia::NvTransform)
CONVERT(NvcPlane, nvidia::NvPlane)
CONVERT(NvcMat33, nvidia::NvMat33)
CONVERT(NvcMat44, nvidia::NvMat44)
CONVERT(NvcBounds3, nvidia::NvBounds3)
NV_COMPILE_TIME_ASSERT(sizeof(NvcVec2) == sizeof(nvidia::NvVec2));
NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec2, x) == NV_OFFSET_OF(nvidia::NvVec2, x));
NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec2, y) == NV_OFFSET_OF(nvidia::NvVec2, y));
NV_COMPILE_TIME_ASSERT(sizeof(NvcVec3) == sizeof(nvidia::NvVec3));
NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec3, x) == NV_OFFSET_OF(nvidia::NvVec3, x));
NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec3, y) == NV_OFFSET_OF(nvidia::NvVec3, y));
NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec3, z) == NV_OFFSET_OF(nvidia::NvVec3, z));
NV_COMPILE_TIME_ASSERT(sizeof(NvcVec4) == sizeof(nvidia::NvVec4));
NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec4, x) == NV_OFFSET_OF(nvidia::NvVec4, x));
NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec4, y) == NV_OFFSET_OF(nvidia::NvVec4, y));
NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec4, z) == NV_OFFSET_OF(nvidia::NvVec4, z));
NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec4, w) == NV_OFFSET_OF(nvidia::NvVec4, w));
NV_COMPILE_TIME_ASSERT(sizeof(NvcQuat) == sizeof(nvidia::NvQuat));
NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcQuat, x) == NV_OFFSET_OF(nvidia::NvQuat, x));
NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcQuat, y) == NV_OFFSET_OF(nvidia::NvQuat, y));
NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcQuat, z) == NV_OFFSET_OF(nvidia::NvQuat, z));
NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcQuat, w) == NV_OFFSET_OF(nvidia::NvQuat, w));
NV_COMPILE_TIME_ASSERT(sizeof(NvcTransform) == sizeof(nvidia::NvTransform));
NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcTransform, p) == NV_OFFSET_OF(nvidia::NvTransform, p));
NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcTransform, q) == NV_OFFSET_OF(nvidia::NvTransform, q));
NV_COMPILE_TIME_ASSERT(sizeof(NvcPlane) == sizeof(nvidia::NvPlane));
NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcPlane, n) == NV_OFFSET_OF(nvidia::NvPlane, n));
NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcPlane, d) == NV_OFFSET_OF(nvidia::NvPlane, d));
NV_COMPILE_TIME_ASSERT(sizeof(NvcMat33) == sizeof(nvidia::NvMat33));
NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcMat33, column0) == NV_OFFSET_OF(nvidia::NvMat33, column0));
NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcMat33, column1) == NV_OFFSET_OF(nvidia::NvMat33, column1));
NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcMat33, column2) == NV_OFFSET_OF(nvidia::NvMat33, column2));
NV_COMPILE_TIME_ASSERT(sizeof(NvcBounds3) == sizeof(nvidia::NvBounds3));
NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcBounds3, minimum) == NV_OFFSET_OF(nvidia::NvBounds3, minimum));
NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcBounds3, maximum) == NV_OFFSET_OF(nvidia::NvBounds3, maximum));
#endif // #ifndef NVBLASTNVSHAREDSHELPERS_H
| 8,814 | C | 66.290076 | 126 | 0.482982 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastFamily.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "NvBlastTypes.h"
#include "NvBlastFamily.h"
#include "NvBlastFamilyGraph.h"
#include "NvBlastIndexFns.h"
#include "NvBlastTime.h"
#include <new>
namespace Nv
{
namespace Blast
{
//////// Global functions ////////
struct FamilyDataOffsets
{
size_t m_actors;
size_t m_visibleChunkIndexLinks;
size_t m_chunkActorIndices;
size_t m_graphNodeIndexLinks;
size_t m_lowerSupportChunkHealths;
size_t m_graphBondHealths;
size_t m_graphCachedBondHealths;
size_t m_familyGraph;
};
static size_t createFamilyDataOffsets(FamilyDataOffsets& offsets, const NvBlastAssetMemSizeData& sizeData)
{
NvBlastCreateOffsetStart(sizeof(FamilyHeader));
NvBlastCreateOffsetAlign16(offsets.m_actors, sizeData.lowerSupportChunkCount * sizeof(Actor));
NvBlastCreateOffsetAlign16(offsets.m_visibleChunkIndexLinks, sizeData.chunkCount * sizeof(IndexDLink<uint32_t>));
NvBlastCreateOffsetAlign16(offsets.m_chunkActorIndices, sizeData.upperSupportChunkCount * sizeof(uint32_t));
NvBlastCreateOffsetAlign16(offsets.m_graphNodeIndexLinks, sizeData.nodeCount * sizeof(uint32_t));
NvBlastCreateOffsetAlign16(offsets.m_lowerSupportChunkHealths, sizeData.lowerSupportChunkCount * sizeof(float));
NvBlastCreateOffsetAlign16(offsets.m_graphBondHealths, sizeData.bondCount * sizeof(float));
NvBlastCreateOffsetAlign16(offsets.m_graphCachedBondHealths, sizeData.bondCount * sizeof(float));
NvBlastCreateOffsetAlign16(offsets.m_familyGraph, static_cast<size_t>(FamilyGraph::requiredMemorySize(sizeData.nodeCount, sizeData.bondCount)));
return NvBlastCreateOffsetEndAlign16();
}
size_t getFamilyMemorySize(const Asset* asset)
{
#if NVBLASTLL_CHECK_PARAMS
if (asset == nullptr)
{
NVBLAST_ALWAYS_ASSERT();
return 0;
}
#endif
const NvBlastAssetMemSizeData sizeData = NvBlastAssetMemSizeDataFromAsset(asset);
return getFamilyMemorySize(sizeData);
}
size_t getFamilyMemorySize(const NvBlastAssetMemSizeData& sizeData)
{
FamilyDataOffsets offsets;
return createFamilyDataOffsets(offsets, sizeData);
}
// this path is used by the serialization code
// buffers are set up, but some parts (like asset ID) are left to the serialization code to fill in
static NvBlastFamily* createFamily(void* mem, const NvBlastAssetMemSizeData& sizeData, NvBlastLog logFn)
{
NVBLASTLL_CHECK(mem != nullptr, logFn, "createFamily: NULL mem pointer input.", return nullptr);
NVBLASTLL_CHECK((reinterpret_cast<uintptr_t>(mem) & 0xF) == 0, logFn, "createFamily: mem pointer not 16-byte aligned.", return nullptr);
if (sizeData.chunkCount == 0)
{
NVBLASTLL_LOG_ERROR(logFn, "createFamily: Asset has no chunks. Family not created.\n");
return nullptr;
}
const uint32_t bondCount = sizeData.bondCount;
// We need to keep this many actor representations around for our island indexing scheme.
const uint32_t lowerSupportChunkCount = sizeData.lowerSupportChunkCount;
// We need this many chunk actor indices.
const uint32_t upperSupportChunkCount = sizeData.upperSupportChunkCount;
// Family offsets
FamilyDataOffsets offsets;
const size_t dataSize = createFamilyDataOffsets(offsets, sizeData);
// Restricting our data size to < 4GB so that we may use uint32_t offsets
if (dataSize > (size_t)UINT32_MAX)
{
NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::instanceAllocate: Instance data block size will exceed 4GB. Instance not created.\n");
return nullptr;
}
// Allocate family
NvBlastFamily* family = reinterpret_cast<NvBlastFamily*>(memset(mem, 0, dataSize));
// Fill in family header
FamilyHeader* header = (FamilyHeader*)family;
header->dataType = NvBlastDataBlock::FamilyDataBlock;
header->formatVersion = 0; // Not currently using this field
header->size = (uint32_t)dataSize;
header->m_actorCount = 0;
header->m_actorsOffset = (uint32_t)offsets.m_actors;
header->m_visibleChunkIndexLinksOffset = (uint32_t)offsets.m_visibleChunkIndexLinks;
header->m_chunkActorIndicesOffset = (uint32_t)offsets.m_chunkActorIndices;
header->m_graphNodeIndexLinksOffset = (uint32_t)offsets.m_graphNodeIndexLinks;
header->m_lowerSupportChunkHealthsOffset = (uint32_t)offsets.m_lowerSupportChunkHealths;
header->m_graphBondHealthsOffset = (uint32_t)offsets.m_graphBondHealths;
header->m_graphCachedBondHealthsOffset = (uint32_t)offsets.m_graphCachedBondHealths;
header->m_familyGraphOffset = (uint32_t)offsets.m_familyGraph;
// Initialize family header data:
// Actors - initialize to defaults, with zero offset value (indicating inactive state)
Actor* actors = header->getActors(); // This will get the subsupport actors too
for (uint32_t i = 0; i < lowerSupportChunkCount; ++i)
{
new (actors + i) Actor();
}
// Visible chunk index links - initialize to solitary links (0xFFFFFFFF fields)
memset(header->getVisibleChunkIndexLinks(), 0xFF, sizeData.chunkCount*sizeof(IndexDLink<uint32_t>));
// Chunk actor IDs - initialize to invalid (0xFFFFFFFF)
memset(header->getChunkActorIndices(), 0xFF, upperSupportChunkCount*sizeof(uint32_t));
// Graph node index links - initialize to solitary links
memset(header->getGraphNodeIndexLinks(), 0xFF, sizeData.nodeCount*sizeof(uint32_t));
// Healths are initialized to 0 - the entire memory block is already set to 0 above
// memset(header->getLowerSupportChunkHealths(), 0, lowerSupportChunkCount*sizeof(float));
// memset(header->getBondHealths(), 0, bondCount*sizeof(float));
// FamilyGraph ctor
new (header->getFamilyGraph()) FamilyGraph(sizeData.nodeCount, sizeData.bondCount);
return family;
}
// this path is taken when an asset already exists and a family is to be created from it directly
static NvBlastFamily* createFamily(void* mem, const NvBlastAsset* asset, NvBlastLog logFn)
{
NVBLASTLL_CHECK(asset != nullptr, logFn, "createFamily: NULL asset pointer input.", return nullptr);
const Asset* solverAsset = static_cast<const Asset*>(asset);
// pull count info from the asset and use that to initialize the family buffers
NvBlastAssetMemSizeData sizeData = NvBlastAssetMemSizeDataFromAsset(solverAsset);
NvBlastFamily* family = createFamily(mem, sizeData, logFn);
if (family != nullptr)
{
// set the asset ID and pointer since we have them available
FamilyHeader* header = reinterpret_cast<FamilyHeader*>(family);
header->m_assetID = solverAsset->m_ID;
header->m_asset = solverAsset;
}
return family;
}
//////// Family member methods ////////
void FamilyHeader::fractureSubSupportNoEvents(uint32_t chunkIndex, uint32_t suboffset, float healthDamage, float* chunkHealths, const NvBlastChunk* chunks)
{
const NvBlastChunk& chunk = chunks[chunkIndex];
uint32_t numChildren = chunk.childIndexStop - chunk.firstChildIndex;
if (numChildren > 0)
{
healthDamage /= numChildren;
for (uint32_t childIndex = chunk.firstChildIndex; childIndex < chunk.childIndexStop; childIndex++)
{
float& health = chunkHealths[childIndex - suboffset];
if (canTakeDamage(health))
{
float remainingDamage = healthDamage - health;
health -= healthDamage;
NVBLAST_ASSERT(chunks[childIndex].parentChunkIndex == chunkIndex);
if (health <= 0.0f && remainingDamage > 0.0f)
{
fractureSubSupportNoEvents(childIndex, suboffset, remainingDamage, chunkHealths, chunks);
}
}
}
}
}
void FamilyHeader::fractureSubSupport(uint32_t chunkIndex, uint32_t suboffset, float healthDamage, float* chunkHealths, const NvBlastChunk* chunks, NvBlastChunkFractureData* outBuffer, uint32_t* currentIndex, const uint32_t maxCount)
{
const NvBlastChunk& chunk = chunks[chunkIndex];
uint32_t numChildren = chunk.childIndexStop - chunk.firstChildIndex;
if (numChildren > 0)
{
healthDamage /= numChildren;
for (uint32_t childIndex = chunk.firstChildIndex; childIndex < chunk.childIndexStop; childIndex++)
{
float& health = chunkHealths[childIndex - suboffset];
if (canTakeDamage(health))
{
float remainingDamage = healthDamage - health;
health -= healthDamage;
NVBLAST_ASSERT(chunks[childIndex].parentChunkIndex == chunkIndex);
if (*currentIndex < maxCount)
{
NvBlastChunkFractureData& event = outBuffer[*currentIndex];
event.userdata = chunks[childIndex].userData;
event.chunkIndex = childIndex;
event.health = health;
}
(*currentIndex)++;
if (health <= 0.0f && remainingDamage > 0.0f)
{
fractureSubSupport(childIndex, suboffset, remainingDamage, chunkHealths, chunks, outBuffer, currentIndex, maxCount);
}
}
}
}
}
void FamilyHeader::fractureNoEvents(uint32_t chunkFractureCount, const NvBlastChunkFractureData* chunkFractures, Actor* filterActor, NvBlastLog logFn)
{
const SupportGraph& graph = m_asset->m_graph;
const uint32_t* graphAdjacencyPartition = graph.getAdjacencyPartition();
const uint32_t* adjacentBondIndices = graph.getAdjacentBondIndices();
float* bondHealths = getBondHealths();
float* chunkHealths = getLowerSupportChunkHealths();
float* subChunkHealths = getSubsupportChunkHealths();
const NvBlastChunk* chunks = m_asset->getChunks();
for (uint32_t i = 0; i < chunkFractureCount; ++i)
{
const NvBlastChunkFractureData& command = chunkFractures[i];
const uint32_t chunkIndex = command.chunkIndex;
const uint32_t chunkHealthIndex = m_asset->getContiguousLowerSupportIndex(chunkIndex);
NVBLAST_ASSERT(!isInvalidIndex(chunkHealthIndex));
if (isInvalidIndex(chunkHealthIndex))
{
continue;
}
float& health = chunkHealths[chunkHealthIndex];
if (canTakeDamage(health) && command.health > 0.0f)
{
Actor* actor = getChunkActor(chunkIndex);
if (filterActor && filterActor != actor)
{
NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorApplyFracture: chunk fracture command corresponds to other actor, command is ignored.");
}
else if (actor)
{
const uint32_t nodeIndex = m_asset->getChunkToGraphNodeMap()[chunkIndex];
if (actor->getGraphNodeCount() > 1 && !isInvalidIndex(nodeIndex))
{
for (uint32_t adjacentIndex = graphAdjacencyPartition[nodeIndex]; adjacentIndex < graphAdjacencyPartition[nodeIndex + 1]; adjacentIndex++)
{
const uint32_t bondIndex = adjacentBondIndices[adjacentIndex];
NVBLAST_ASSERT(!isInvalidIndex(bondIndex));
if (bondHealths[bondIndex] > 0.0f)
{
bondHealths[bondIndex] = 0.0f;
}
}
getFamilyGraph()->notifyNodeRemoved(actor->getIndex(), nodeIndex, &graph);
}
health -= command.health;
const float remainingDamage = -health;
if (remainingDamage > 0.0f) // node chunk has been damaged beyond its health
{
fractureSubSupportNoEvents(chunkIndex, m_asset->m_firstSubsupportChunkIndex, remainingDamage, subChunkHealths, chunks);
}
}
}
}
}
void FamilyHeader::fractureWithEvents(uint32_t chunkFractureCount, const NvBlastChunkFractureData* commands, NvBlastChunkFractureData* events, uint32_t eventsSize, uint32_t* count, Actor* filterActor, NvBlastLog logFn)
{
const SupportGraph& graph = m_asset->m_graph;
const uint32_t* graphAdjacencyPartition = graph.getAdjacencyPartition();
const uint32_t* adjacentBondIndices = graph.getAdjacentBondIndices();
float* bondHealths = getBondHealths();
float* chunkHealths = getLowerSupportChunkHealths();
float* subChunkHealths = getSubsupportChunkHealths();
const NvBlastChunk* chunks = m_asset->getChunks();
for (uint32_t i = 0; i < chunkFractureCount; ++i)
{
const NvBlastChunkFractureData& command = commands[i];
const uint32_t chunkIndex = command.chunkIndex;
const uint32_t chunkHealthIndex = m_asset->getContiguousLowerSupportIndex(chunkIndex);
NVBLAST_ASSERT(!isInvalidIndex(chunkHealthIndex));
if (isInvalidIndex(chunkHealthIndex))
{
continue;
}
float& health = chunkHealths[chunkHealthIndex];
if (canTakeDamage(health) && command.health > 0.0f)
{
Actor* actor = getChunkActor(chunkIndex);
if (filterActor && filterActor != actor)
{
NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorApplyFracture: chunk fracture command corresponds to other actor, command is ignored.");
}
else if (actor)
{
const uint32_t nodeIndex = m_asset->getChunkToGraphNodeMap()[chunkIndex];
if (actor->getGraphNodeCount() > 1 && !isInvalidIndex(nodeIndex))
{
for (uint32_t adjacentIndex = graphAdjacencyPartition[nodeIndex]; adjacentIndex < graphAdjacencyPartition[nodeIndex + 1]; adjacentIndex++)
{
const uint32_t bondIndex = adjacentBondIndices[adjacentIndex];
NVBLAST_ASSERT(!isInvalidIndex(bondIndex));
if (bondHealths[bondIndex] > 0.0f)
{
bondHealths[bondIndex] = 0.0f;
}
}
getFamilyGraph()->notifyNodeRemoved(actor->getIndex(), nodeIndex, &graph);
}
health -= command.health;
if (*count < eventsSize)
{
NvBlastChunkFractureData& outEvent = events[*count];
outEvent.userdata = chunks[chunkIndex].userData;
outEvent.chunkIndex = chunkIndex;
outEvent.health = health;
}
(*count)++;
const float remainingDamage = -health;
if (remainingDamage > 0.0f) // node chunk has been damaged beyond its health
{
fractureSubSupport(chunkIndex, m_asset->m_firstSubsupportChunkIndex, remainingDamage, subChunkHealths, chunks, events, count, eventsSize);
}
}
}
}
}
void FamilyHeader::fractureInPlaceEvents(uint32_t chunkFractureCount, NvBlastChunkFractureData* inoutbuffer, uint32_t eventsSize, uint32_t* count, Actor* filterActor, NvBlastLog logFn)
{
const SupportGraph& graph = m_asset->m_graph;
const uint32_t* graphAdjacencyPartition = graph.getAdjacencyPartition();
const uint32_t* adjacentBondIndices = graph.getAdjacentBondIndices();
float* bondHealths = getBondHealths();
float* chunkHealths = getLowerSupportChunkHealths();
float* subChunkHealths = getSubsupportChunkHealths();
const NvBlastChunk* chunks = m_asset->getChunks();
//
// First level Chunk Fractures
//
for (uint32_t i = 0; i < chunkFractureCount; ++i)
{
const NvBlastChunkFractureData& command = inoutbuffer[i];
const uint32_t chunkIndex = command.chunkIndex;
const uint32_t chunkHealthIndex = m_asset->getContiguousLowerSupportIndex(chunkIndex);
NVBLAST_ASSERT(!isInvalidIndex(chunkHealthIndex));
if (isInvalidIndex(chunkHealthIndex))
{
continue;
}
float& health = chunkHealths[chunkHealthIndex];
if (canTakeDamage(health) && command.health > 0.0f)
{
Actor* actor = getChunkActor(chunkIndex);
if (filterActor && filterActor != actor)
{
NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorApplyFracture: chunk fracture command corresponds to other actor, command is ignored.");
}
else if (actor)
{
const uint32_t nodeIndex = m_asset->getChunkToGraphNodeMap()[chunkIndex];
if (actor->getGraphNodeCount() > 1 && !isInvalidIndex(nodeIndex))
{
for (uint32_t adjacentIndex = graphAdjacencyPartition[nodeIndex]; adjacentIndex < graphAdjacencyPartition[nodeIndex + 1]; adjacentIndex++)
{
const uint32_t bondIndex = adjacentBondIndices[adjacentIndex];
NVBLAST_ASSERT(!isInvalidIndex(bondIndex));
if (bondHealths[bondIndex] > 0.0f)
{
bondHealths[bondIndex] = 0.0f;
}
}
getFamilyGraph()->notifyNodeRemoved(actor->getIndex(), nodeIndex, &graph);
}
health -= command.health;
NvBlastChunkFractureData& outEvent = inoutbuffer[(*count)++];
outEvent.userdata = chunks[chunkIndex].userData;
outEvent.chunkIndex = chunkIndex;
outEvent.health = health;
}
}
}
//
// Hierarchical Chunk Fractures
//
uint32_t commandedChunkFractures = *count;
for (uint32_t i = 0; i < commandedChunkFractures; ++i)
{
NvBlastChunkFractureData& event = inoutbuffer[i];
const uint32_t chunkIndex = event.chunkIndex;
const float remainingDamage = -event.health;
if (remainingDamage > 0.0f) // node chunk has been damaged beyond its health
{
fractureSubSupport(chunkIndex, m_asset->m_firstSubsupportChunkIndex, remainingDamage, subChunkHealths, chunks, inoutbuffer, count, eventsSize);
}
}
}
void FamilyHeader::applyFracture(NvBlastFractureBuffers* eventBuffers, const NvBlastFractureBuffers* commands, Actor* filterActor, NvBlastLog logFn, NvBlastTimers* timers)
{
NVBLASTLL_CHECK(commands != nullptr, logFn, "NvBlastActorApplyFracture: NULL commands pointer input.", return);
NVBLASTLL_CHECK(isValid(commands), logFn, "NvBlastActorApplyFracture: commands memory is NULL but size is > 0.", return);
NVBLASTLL_CHECK(eventBuffers == nullptr || isValid(eventBuffers), logFn, "NvBlastActorApplyFracture: eventBuffers memory is NULL but size is > 0.",
eventBuffers->bondFractureCount = 0; eventBuffers->chunkFractureCount = 0; return);
#if NVBLASTLL_CHECK_PARAMS
if (eventBuffers != nullptr && eventBuffers->bondFractureCount == 0 && eventBuffers->chunkFractureCount == 0)
{
NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorApplyFracture: eventBuffers do not provide any space.");
return;
}
#endif
#if NV_PROFILE
Time time;
#else
NV_UNUSED(timers);
#endif
//
// Chunk Fracture
//
if (eventBuffers == nullptr || eventBuffers->chunkFractures == nullptr)
{
// immediate hierarchical fracture
fractureNoEvents(commands->chunkFractureCount, commands->chunkFractures, filterActor, logFn);
}
else if (eventBuffers->chunkFractures != commands->chunkFractures)
{
// immediate hierarchical fracture
uint32_t count = 0;
fractureWithEvents(commands->chunkFractureCount, commands->chunkFractures, eventBuffers->chunkFractures, eventBuffers->chunkFractureCount, &count, filterActor, logFn);
if (count > eventBuffers->chunkFractureCount)
{
NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorApplyFracture: eventBuffers too small. Chunk events were lost.");
}
else
{
eventBuffers->chunkFractureCount = count;
}
}
else if (eventBuffers->chunkFractures == commands->chunkFractures)
{
// compacting first
uint32_t count = 0;
fractureInPlaceEvents(commands->chunkFractureCount, commands->chunkFractures, eventBuffers->chunkFractureCount, &count, filterActor, logFn);
if (count > eventBuffers->chunkFractureCount)
{
NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorApplyFracture: eventBuffers too small. Chunk events were lost.");
}
else
{
eventBuffers->chunkFractureCount = count;
}
}
//
// Bond Fracture
//
uint32_t outCount = 0;
const uint32_t eventBufferSize = eventBuffers ? eventBuffers->bondFractureCount : 0;
NvBlastBond* bonds = m_asset->getBonds();
float* bondHealths = getBondHealths();
const uint32_t* graphChunkIndices = m_asset->m_graph.getChunkIndices();
for (uint32_t i = 0; i < commands->bondFractureCount; ++i)
{
const NvBlastBondFractureData& frac = commands->bondFractures[i];
NVBLAST_ASSERT(frac.nodeIndex0 < m_asset->m_graph.m_nodeCount);
NVBLAST_ASSERT(frac.nodeIndex1 < m_asset->m_graph.m_nodeCount);
uint32_t chunkIndex0 = graphChunkIndices[frac.nodeIndex0];
uint32_t chunkIndex1 = graphChunkIndices[frac.nodeIndex1];
NVBLAST_ASSERT(!isInvalidIndex(chunkIndex0) || !isInvalidIndex(chunkIndex1));
Actor* actor0 = !isInvalidIndex(chunkIndex0) ? getChunkActor(chunkIndex0) : nullptr;
Actor* actor1 = !isInvalidIndex(chunkIndex1) ? getChunkActor(chunkIndex1) : nullptr;
NVBLAST_ASSERT(actor0 != nullptr || actor1 != nullptr);
// If actors are not nullptr and different then bond is already broken
// One of actor can be nullptr which probably means it's 'world' node.
if (actor0 == actor1 || actor0 == nullptr || actor1 == nullptr)
{
Actor* actor = actor0 ? actor0 : actor1;
NVBLAST_ASSERT_WITH_MESSAGE(actor, "NvBlastActorApplyFracture: all actors in bond fracture command are nullptr, command will be safely ignored, but investigation is recommended.");
if (filterActor && filterActor != actor)
{
NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorApplyFracture: bond fracture command corresponds to other actor, command is ignored.");
}
else if (actor)
{
const uint32_t bondIndex = actor->damageBond(frac.nodeIndex0, frac.nodeIndex1, frac.health);
if (!isInvalidIndex(bondIndex))
{
if (eventBuffers && eventBuffers->bondFractures)
{
if (outCount < eventBufferSize)
{
NvBlastBondFractureData& outEvent = eventBuffers->bondFractures[outCount];
outEvent.userdata = bonds[bondIndex].userData;
outEvent.nodeIndex0 = frac.nodeIndex0;
outEvent.nodeIndex1 = frac.nodeIndex1;
outEvent.health = bondHealths[bondIndex];
}
}
outCount++;
}
}
}
}
if (eventBuffers && eventBuffers->bondFractures)
{
if (outCount > eventBufferSize)
{
NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorApplyFracture: eventBuffers too small. Bond events were lost.");
}
else
{
eventBuffers->bondFractureCount = outCount;
}
}
#if NV_PROFILE
if (timers != nullptr)
{
timers->fracture += time.getElapsedTicks();
}
#endif
}
} // namespace Blast
} // namespace Nv
// API implementation
extern "C"
{
NvBlastAssetMemSizeData NvBlastAssetMemSizeDataFromAsset(const NvBlastAsset* asset)
{
const Nv::Blast::Asset* solverAsset = reinterpret_cast<const Nv::Blast::Asset*>(asset);
NvBlastAssetMemSizeData sizeData;
if (solverAsset)
{
sizeData.bondCount = solverAsset->getBondCount();
sizeData.chunkCount = solverAsset->m_chunkCount;
sizeData.nodeCount = solverAsset->m_graph.m_nodeCount;
sizeData.lowerSupportChunkCount = solverAsset->getLowerSupportChunkCount();
sizeData.upperSupportChunkCount = solverAsset->getUpperSupportChunkCount();
}
else
{
memset(&sizeData, 0, sizeof(NvBlastAssetMemSizeData));
}
return sizeData;
}
NvBlastFamily* NvBlastAssetCreateFamily(void* mem, const NvBlastAsset* asset, NvBlastLog logFn)
{
return Nv::Blast::createFamily(mem, asset, logFn);
}
NvBlastFamily* NvBlastAssetCreateFamilyFromSizeData(void* mem, const NvBlastAssetMemSizeData& sizeData, NvBlastLog logFn)
{
return Nv::Blast::createFamily(mem, sizeData, logFn);
}
uint32_t NvBlastFamilyGetFormatVersion(const NvBlastFamily* family, NvBlastLog logFn)
{
NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetFormatVersion: NULL family pointer input.", return UINT32_MAX);
return reinterpret_cast<const Nv::Blast::FamilyHeader*>(family)->formatVersion;
}
const NvBlastAsset* NvBlastFamilyGetAsset(const NvBlastFamily* family, NvBlastLog logFn)
{
NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetAsset: NULL family pointer input.", return nullptr);
return reinterpret_cast<const Nv::Blast::FamilyHeader*>(family)->m_asset;
}
void NvBlastFamilySetAsset(NvBlastFamily* family, const NvBlastAsset* asset, NvBlastLog logFn)
{
NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilySetAsset: NULL family pointer input.", return);
NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastFamilySetAsset: NULL asset pointer input.", return);
Nv::Blast::FamilyHeader* header = reinterpret_cast<Nv::Blast::FamilyHeader*>(family);
const Nv::Blast::Asset* solverAsset = reinterpret_cast<const Nv::Blast::Asset*>(asset);
if (memcmp(&header->m_assetID, &solverAsset->m_ID, sizeof(NvBlastID)))
{
NVBLASTLL_LOG_ERROR(logFn, "NvBlastFamilySetAsset: wrong asset. Passed asset ID doesn't match family asset ID.");
return;
}
header->m_asset = solverAsset;
}
uint32_t NvBlastFamilyGetSize(const NvBlastFamily* family, NvBlastLog logFn)
{
NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetSize: NULL family pointer input.", return 0);
return reinterpret_cast<const Nv::Blast::FamilyHeader*>(family)->size;
}
NvBlastID NvBlastFamilyGetAssetID(const NvBlastFamily* family, NvBlastLog logFn)
{
NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetAssetID: NULL family pointer input.", return NvBlastID());
return reinterpret_cast<const Nv::Blast::FamilyHeader*>(family)->m_assetID;
}
uint32_t NvBlastFamilyGetActorCount(const NvBlastFamily* family, NvBlastLog logFn)
{
NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetActorCount: NULL family pointer input.", return 0);
const Nv::Blast::FamilyHeader* header = reinterpret_cast<const Nv::Blast::FamilyHeader*>(family);
return header->m_actorCount;
}
uint32_t NvBlastFamilyGetActors(NvBlastActor** actors, uint32_t actorsSize, const NvBlastFamily* family, NvBlastLog logFn)
{
NVBLASTLL_CHECK(actors != nullptr, logFn, "NvBlastFamilyGetActors: NULL actors pointer input.", return 0);
NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetActors: NULL family pointer input.", return 0);
const Nv::Blast::FamilyHeader* header = reinterpret_cast<const Nv::Blast::FamilyHeader*>(family);
// Iterate through active actors and write to supplied array
const uint32_t familyActorCount = header->getActorsArraySize();
Nv::Blast::Actor* familyActor = header->getActors();
uint32_t actorCount = 0;
for (uint32_t i = 0; actorCount < actorsSize && i < familyActorCount; ++i, ++familyActor)
{
if (familyActor->isActive())
{
actors[actorCount++] = familyActor;
}
}
return actorCount;
}
NvBlastActor* NvBlastFamilyGetActorByIndex(const NvBlastFamily* family, uint32_t actorIndex, NvBlastLog logFn)
{
NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetActorByIndex: NULL family pointer input.", return nullptr);
const Nv::Blast::FamilyHeader* header = reinterpret_cast<const Nv::Blast::FamilyHeader*>(family);
return header->getActorByIndex(actorIndex);
}
NvBlastActor* NvBlastFamilyGetChunkActor(const NvBlastFamily* family, uint32_t chunkIndex, NvBlastLog logFn)
{
NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetChunkActor: NULL family pointer input.", return nullptr);
const Nv::Blast::FamilyHeader* header = reinterpret_cast<const Nv::Blast::FamilyHeader*>(family);
NVBLASTLL_CHECK(header->m_asset != nullptr, logFn, "NvBlastFamilyGetChunkActor: NvBlastFamily has null asset set.", return nullptr);
NVBLASTLL_CHECK(chunkIndex < header->m_asset->m_chunkCount, logFn, "NvBlastFamilyGetChunkActor: bad value of chunkIndex for the given family's asset.", return nullptr);
return header->getChunkActor(chunkIndex);
}
uint32_t* NvBlastFamilyGetChunkActorIndices(const NvBlastFamily* family, NvBlastLog logFn)
{
NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetChunkActorIndices: NULL family pointer input.", return nullptr);
const Nv::Blast::FamilyHeader* header = reinterpret_cast<const Nv::Blast::FamilyHeader*>(family);
NVBLASTLL_CHECK(header->m_asset != nullptr, logFn, "NvBlastFamilyGetChunkActorIndices: NvBlastFamily has null asset set.", return nullptr);
return header->getChunkActorIndices();
}
uint32_t NvBlastFamilyGetMaxActorCount(const NvBlastFamily* family, NvBlastLog logFn)
{
NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetMaxActorCount: NULL family pointer input.", return 0);
const Nv::Blast::FamilyHeader* header = reinterpret_cast<const Nv::Blast::FamilyHeader*>(family);
return header->getActorsArraySize();
}
} // extern "C"
| 31,654 | C++ | 40.379085 | 233 | 0.6699 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastActorSerializationBlock.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "NvBlastActor.h"
#include "NvBlastActorSerializationBlock.h"
#include "NvBlastFamilyGraph.h"
#include <algorithm>
namespace Nv
{
namespace Blast
{
//////// Actor static methods for serialization ////////
Actor* Actor::deserialize(NvBlastFamily* family, const void* buffer, NvBlastLog logFn)
{
NVBLASTLL_CHECK(family != nullptr, logFn, "Actor::deserialize: NULL family pointer input.", return nullptr);
const ActorSerializationHeader* serHeader = reinterpret_cast<const ActorSerializationHeader*>(buffer);
if (serHeader->m_formatVersion != ActorSerializationFormat::Current)
{
NVBLASTLL_LOG_ERROR(logFn, "Actor::deserialize: wrong data format. Serialization data must be converted to current version.");
return nullptr;
}
FamilyHeader* header = reinterpret_cast<FamilyHeader*>(family);
const Asset* asset = header->m_asset;
const SupportGraph& graph = asset->m_graph;
const uint32_t* graphChunkIndices = graph.getChunkIndices();
const uint32_t* graphAdjacencyPartition = graph.getAdjacencyPartition();
const uint32_t* graphAdjacentNodeIndices = graph.getAdjacentNodeIndices();
const uint32_t* graphAdjacentBondIndices = graph.getAdjacentBondIndices();
Actor* actor = nullptr;
const uint32_t actorIndex = serHeader->m_index;
if (serHeader->m_index < header->getActorsArraySize())
{
if (!header->getActors()[actorIndex].isActive())
{
actor = header->borrowActor(serHeader->m_index);
}
}
if (actor == nullptr)
{
NVBLASTLL_LOG_ERROR(logFn, "Actor::deserialize: invalid actor index in serialized data. Actor not created.");
return nullptr;
}
// Commonly used data
uint32_t* chunkActorIndices = header->getChunkActorIndices();
FamilyGraph* familyGraph = header->getFamilyGraph();
#if NVBLASTLL_CHECK_PARAMS
{
const uint32_t* serVisibleChunkIndices = serHeader->getVisibleChunkIndices();
for (uint32_t i = 0; i < serHeader->m_visibleChunkCount; ++i)
{
const uint32_t visibleChunkIndex = serVisibleChunkIndices[i];
if (!isInvalidIndex(chunkActorIndices[visibleChunkIndex]))
{
NVBLASTLL_LOG_ERROR(logFn, "Actor::deserialize: visible chunk already has an actor in family. Actor not created.");
header->returnActor(*actor);
return nullptr;
}
}
}
#endif
// Visible chunk indices and chunk actor indices
{
// Add visible chunks, set chunk subtree ownership
const uint32_t* serVisibleChunkIndices = serHeader->getVisibleChunkIndices();
IndexDLink<uint32_t>* visibleChunkIndexLinks = header->getVisibleChunkIndexLinks();
for (uint32_t i = serHeader->m_visibleChunkCount; i--;) // Reverse-order, so the resulting linked list is in the original order
{
const uint32_t visibleChunkIndex = serVisibleChunkIndices[i];
NVBLAST_ASSERT(isInvalidIndex(visibleChunkIndexLinks[visibleChunkIndex].m_adj[0]) && isInvalidIndex(visibleChunkIndexLinks[visibleChunkIndex].m_adj[1]));
IndexDList<uint32_t>().insertListHead(actor->m_firstVisibleChunkIndex, visibleChunkIndexLinks, visibleChunkIndex);
for (Asset::DepthFirstIt j(*asset, visibleChunkIndex, true); (bool)j; ++j)
{
NVBLAST_ASSERT(isInvalidIndex(chunkActorIndices[(uint32_t)j]));
chunkActorIndices[(uint32_t)j] = actorIndex;
}
}
actor->m_visibleChunkCount = serHeader->m_visibleChunkCount;
}
// Graph node indices, leaf chunk count, and and island IDs
{
// Add graph nodes
const uint32_t* serGraphNodeIndices = serHeader->getGraphNodeIndices();
uint32_t* graphNodeIndexLinks = header->getGraphNodeIndexLinks();
uint32_t* islandIDs = familyGraph->getIslandIds();
for (uint32_t i = serHeader->m_graphNodeCount; i--;) // Reverse-order, so the resulting linked list is in the original order
{
const uint32_t graphNodeIndex = serGraphNodeIndices[i];
NVBLAST_ASSERT(isInvalidIndex(graphNodeIndexLinks[graphNodeIndex]));
graphNodeIndexLinks[graphNodeIndex] = actor->m_firstGraphNodeIndex;
actor->m_firstGraphNodeIndex = graphNodeIndex;
islandIDs[graphNodeIndex] = actorIndex;
}
actor->m_graphNodeCount = serHeader->m_graphNodeCount;
actor->m_leafChunkCount = serHeader->m_leafChunkCount;
}
// Using this function after the family graph data has been set up, so that it will work correctly
const bool hasExternalBonds = actor->hasExternalBonds();
// Lower support chunk healths
{
const float* serLowerSupportChunkHealths = serHeader->getLowerSupportChunkHealths();
float* subsupportHealths = header->getSubsupportChunkHealths();
const uint32_t subsupportChunkCount = asset->getUpperSupportChunkCount();
if (actor->m_graphNodeCount > 0)
{
uint32_t serLowerSupportChunkCount = 0;
float* graphNodeHealths = header->getLowerSupportChunkHealths();
for (Actor::GraphNodeIt i = *actor; (bool)i; ++i)
{
const uint32_t graphNodeIndex = (uint32_t)i;
const uint32_t supportChunkIndex = graphChunkIndices[graphNodeIndex];
if (isInvalidIndex(supportChunkIndex))
{
continue;
}
graphNodeHealths[graphNodeIndex] = serLowerSupportChunkHealths[serLowerSupportChunkCount++];
Asset::DepthFirstIt j(*asset, supportChunkIndex);
NVBLAST_ASSERT((bool)j);
++j; // Skip first (support) chunk, it's already been handled
for (; (bool)j; ++j)
{
subsupportHealths[(uint32_t)j] = serLowerSupportChunkHealths[serLowerSupportChunkCount++];
}
}
}
else // Single subsupport chunk
if (!isInvalidIndex(actor->m_firstVisibleChunkIndex))
{
NVBLAST_ASSERT(actor->m_firstVisibleChunkIndex >= subsupportChunkCount);
subsupportHealths[actor->m_firstVisibleChunkIndex - subsupportChunkCount] = *serLowerSupportChunkHealths;
}
}
// Bond healths
uint32_t serBondCount = 0;
{
const float* serBondHealths = serHeader->getBondHealths();
float* bondHealths = header->getBondHealths();
for (Actor::GraphNodeIt i = *actor; (bool)i; ++i)
{
const uint32_t graphNodeIndex = (uint32_t)i;
for (uint32_t adjacentIndex = graphAdjacencyPartition[graphNodeIndex]; adjacentIndex < graphAdjacencyPartition[graphNodeIndex + 1]; ++adjacentIndex)
{
const uint32_t adjacentNodeIndex = graphAdjacentNodeIndices[adjacentIndex];
if (adjacentNodeIndex > graphNodeIndex) // So as not to double-count
{
// Only count if the adjacent node belongs to this actor
const uint32_t adjacentChunkIndex = graphChunkIndices[adjacentNodeIndex];
if ((hasExternalBonds && isInvalidIndex(adjacentChunkIndex)) || (!isInvalidIndex(adjacentChunkIndex) && chunkActorIndices[adjacentChunkIndex] == actorIndex))
{
const uint32_t adjacentBondIndex = graphAdjacentBondIndices[adjacentIndex];
bondHealths[adjacentBondIndex] = serBondHealths[serBondCount++];
}
}
}
}
}
// Fast routes
{
const uint32_t* serFastRoute = serHeader->getFastRoute();
uint32_t* fastRoute = header->getFamilyGraph()->getFastRoute();
for (Actor::GraphNodeIt i = *actor; (bool)i; ++i)
{
fastRoute[(uint32_t)i] = *serFastRoute++;
}
}
// Hop counts
{
const uint32_t* serHopCounts = serHeader->getHopCounts();
uint32_t* hopCounts = header->getFamilyGraph()->getHopCounts();
for (Actor::GraphNodeIt i = *actor; (bool)i; ++i)
{
hopCounts[(uint32_t)i] = *serHopCounts++;
}
}
// Edge removed array
if (serBondCount > 0)
{
uint32_t serBondIndex = 0;
const FixedBoolArray* serEdgeRemovedArray = serHeader->getEdgeRemovedArray();
FixedBoolArray* edgeRemovedArray = familyGraph->getIsEdgeRemoved();
for (Actor::GraphNodeIt i = *actor; (bool)i; ++i)
{
const uint32_t graphNodeIndex = (uint32_t)i;
for (uint32_t adjacentIndex = graphAdjacencyPartition[graphNodeIndex]; adjacentIndex < graphAdjacencyPartition[graphNodeIndex + 1]; ++adjacentIndex)
{
const uint32_t adjacentNodeIndex = graphAdjacentNodeIndices[adjacentIndex];
if (adjacentNodeIndex > graphNodeIndex) // So as not to double-count
{
// Only count if the adjacent node belongs to this actor
const uint32_t adjacentChunkIndex = graphChunkIndices[adjacentNodeIndex];
if ((hasExternalBonds && isInvalidIndex(adjacentChunkIndex)) || (!isInvalidIndex(adjacentChunkIndex) && chunkActorIndices[adjacentChunkIndex] == actorIndex))
{
if (!serEdgeRemovedArray->test(serBondIndex))
{
const uint32_t adjacentBondIndex = graphAdjacentBondIndices[adjacentIndex];
edgeRemovedArray->reset(adjacentBondIndex);
}
++serBondIndex;
}
}
}
}
}
return actor;
}
//////// Actor member methods for serialization ////////
uint32_t Actor::serialize(void* buffer, uint32_t bufferSize, NvBlastLog logFn) const
{
// Set up pointers and such
const Asset* asset = getAsset();
const SupportGraph& graph = asset->m_graph;
const uint32_t* graphChunkIndices = graph.getChunkIndices();
const uint32_t* graphAdjacencyPartition = graph.getAdjacencyPartition();
const uint32_t* graphAdjacentNodeIndices = graph.getAdjacentNodeIndices();
const uint32_t* graphAdjacentBondIndices = graph.getAdjacentBondIndices();
const FamilyHeader* header = getFamilyHeader();
const uint32_t* chunkActorIndices = header->getChunkActorIndices();
const uint32_t thisActorIndex = getIndex();
const bool hasExternalBonds = this->hasExternalBonds();
// Make sure there are no dirty nodes
if (m_graphNodeCount)
{
const uint32_t* firstDirtyNodeIndices = header->getFamilyGraph()->getFirstDirtyNodeIndices();
if (!isInvalidIndex(firstDirtyNodeIndices[thisActorIndex]))
{
NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: instance graph has dirty nodes. Call Nv::Blast::Actor::findIslands before serializing.");
return 0;
}
}
uint64_t offset = 0;
// Header
ActorSerializationHeader* serHeader = reinterpret_cast<ActorSerializationHeader*>(buffer);
offset = align16(sizeof(ActorSerializationHeader));
if (offset > bufferSize)
{
return 0; // Buffer size insufficient
}
serHeader->m_formatVersion = ActorSerializationFormat::Current;
serHeader->m_size = 0; // Will be updated below
serHeader->m_index = thisActorIndex;
serHeader->m_visibleChunkCount = m_visibleChunkCount;
serHeader->m_graphNodeCount = m_graphNodeCount;
serHeader->m_leafChunkCount = m_leafChunkCount;
// Visible chunk indices
{
serHeader->m_visibleChunkIndicesOffset = (uint32_t)offset;
offset = align16(offset + m_visibleChunkCount*sizeof(uint32_t));
if (offset > bufferSize)
{
NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::Actor::serialize: buffer size exceeded.");
return 0; // Buffer size insufficient
}
uint32_t* serVisibleChunkIndices = serHeader->getVisibleChunkIndices();
uint32_t serVisibleChunkCount = 0;
for (Actor::VisibleChunkIt i = *this; (bool)i; ++i)
{
NVBLAST_ASSERT(serVisibleChunkCount < m_visibleChunkCount);
serVisibleChunkIndices[serVisibleChunkCount++] = (uint32_t)i;
}
NVBLAST_ASSERT(serVisibleChunkCount == m_visibleChunkCount);
}
// Graph node indices
{
serHeader->m_graphNodeIndicesOffset = (uint32_t)offset;
offset = align16(offset + m_graphNodeCount*sizeof(uint32_t));
if (offset > bufferSize)
{
NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: buffer size exceeded.");
return 0; // Buffer size insufficient
}
uint32_t* serGraphNodeIndices = serHeader->getGraphNodeIndices();
uint32_t serGraphNodeCount = 0;
for (Actor::GraphNodeIt i = *this; (bool)i; ++i)
{
NVBLAST_ASSERT(serGraphNodeCount < m_graphNodeCount);
serGraphNodeIndices[serGraphNodeCount++] = (uint32_t)i;
}
NVBLAST_ASSERT(serGraphNodeCount == m_graphNodeCount);
}
// Lower support chunk healths
{
serHeader->m_lowerSupportChunkHealthsOffset = (uint32_t)offset;
float* serLowerSupportChunkHealths = serHeader->getLowerSupportChunkHealths();
const float* subsupportHealths = header->getSubsupportChunkHealths();
const uint32_t subsupportChunkCount = asset->getUpperSupportChunkCount();
if (m_graphNodeCount > 0)
{
uint32_t serLowerSupportChunkCount = 0;
const float* graphNodeHealths = header->getLowerSupportChunkHealths();
for (Actor::GraphNodeIt i = *this; (bool)i; ++i)
{
const uint32_t graphNodeIndex = (uint32_t)i;
const uint32_t supportChunkIndex = graphChunkIndices[graphNodeIndex];
if (isInvalidIndex(supportChunkIndex))
{
continue;
}
serLowerSupportChunkHealths[serLowerSupportChunkCount++] = graphNodeHealths[graphNodeIndex];
offset += sizeof(float);
Asset::DepthFirstIt j(*asset, supportChunkIndex);
NVBLAST_ASSERT((bool)j);
++j; // Skip first (support) chunk, it's already been handled
for (; (bool)j; ++j)
{
if (offset >= bufferSize)
{
NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: buffer size exceeded.");
return 0; // Buffer size insufficient
}
serLowerSupportChunkHealths[serLowerSupportChunkCount++] = subsupportHealths[(uint32_t)j - subsupportChunkCount];
offset += sizeof(float);
}
}
}
else // Single subsupport chunk
if (!isInvalidIndex(m_firstVisibleChunkIndex))
{
NVBLAST_ASSERT(m_firstVisibleChunkIndex >= subsupportChunkCount);
if (offset >= bufferSize)
{
NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: buffer size exceeded.");
return 0; // Buffer size insufficient
}
*serLowerSupportChunkHealths = subsupportHealths[m_firstVisibleChunkIndex - subsupportChunkCount];
offset += sizeof(float);
}
}
offset = align16(offset);
// Bond healths
uint32_t serBondCount = 0;
{
serHeader->m_bondHealthsOffset = (uint32_t)offset;
float* serBondHealths = serHeader->getBondHealths();
const float* bondHealths = header->getBondHealths();
for (Actor::GraphNodeIt i = *this; (bool)i; ++i)
{
const uint32_t graphNodeIndex = (uint32_t)i;
for (uint32_t adjacentIndex = graphAdjacencyPartition[graphNodeIndex]; adjacentIndex < graphAdjacencyPartition[graphNodeIndex + 1]; ++adjacentIndex)
{
const uint32_t adjacentNodeIndex = graphAdjacentNodeIndices[adjacentIndex];
if (adjacentNodeIndex > graphNodeIndex) // So as not to double-count
{
// Only count if the adjacent node belongs to this actor
const uint32_t adjacentChunkIndex = graphChunkIndices[adjacentNodeIndex];
if ((hasExternalBonds && isInvalidIndex(adjacentChunkIndex)) || (!isInvalidIndex(adjacentChunkIndex) && chunkActorIndices[adjacentChunkIndex] == thisActorIndex))
{
if (offset >= bufferSize)
{
NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: buffer size exceeded.");
return 0; // Buffer size insufficient
}
const uint32_t adjacentBondIndex = graphAdjacentBondIndices[adjacentIndex];
serBondHealths[serBondCount++] = bondHealths[adjacentBondIndex];
offset += sizeof(float);
}
}
}
}
}
offset = align16(offset);
// Fast routes
{
serHeader->m_fastRouteOffset = (uint32_t)offset;
offset = align16(offset + m_graphNodeCount*sizeof(uint32_t));
if (offset > bufferSize)
{
NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: buffer size exceeded.");
return 0; // Buffer size insufficient
}
uint32_t* serFastRoute = serHeader->getFastRoute();
const uint32_t* fastRoute = header->getFamilyGraph()->getFastRoute();
for (Actor::GraphNodeIt i = *this; (bool)i; ++i)
{
*serFastRoute++ = fastRoute[(uint32_t)i];
}
}
// Hop counts
{
serHeader->m_hopCountsOffset = (uint32_t)offset;
offset = align16(offset + m_graphNodeCount*sizeof(uint32_t));
if (offset > bufferSize)
{
NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: buffer size exceeded.");
return 0; // Buffer size insufficient
}
uint32_t* serHopCounts = serHeader->getHopCounts();
const uint32_t* hopCounts = header->getFamilyGraph()->getHopCounts();
for (Actor::GraphNodeIt i = *this; (bool)i; ++i)
{
*serHopCounts++ = hopCounts[(uint32_t)i];
}
}
// Edge removed array
if (serBondCount > 0)
{
serHeader->m_edgeRemovedArrayOffset = (uint32_t)offset;
offset = align16(offset + FixedBoolArray::requiredMemorySize(serBondCount));
if (offset > bufferSize)
{
NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: buffer size exceeded.");
return 0; // Buffer size insufficient
}
uint32_t serBondIndex = 0;
FixedBoolArray* serEdgeRemovedArray = serHeader->getEdgeRemovedArray();
new (serEdgeRemovedArray)FixedBoolArray(serBondCount);
serEdgeRemovedArray->fill(); // Reset bits as we find bonds
const FixedBoolArray* edgeRemovedArray = header->getFamilyGraph()->getIsEdgeRemoved();
for (Actor::GraphNodeIt i = *this; (bool)i; ++i)
{
const uint32_t graphNodeIndex = (uint32_t)i;
for (uint32_t adjacentIndex = graphAdjacencyPartition[graphNodeIndex]; adjacentIndex < graphAdjacencyPartition[graphNodeIndex + 1]; ++adjacentIndex)
{
const uint32_t adjacentNodeIndex = graphAdjacentNodeIndices[adjacentIndex];
if (adjacentNodeIndex > graphNodeIndex) // So as not to double-count
{
// Only count if the adjacent node belongs to this actor
const uint32_t adjacentChunkIndex = graphChunkIndices[adjacentNodeIndex];
if ((hasExternalBonds && isInvalidIndex(adjacentChunkIndex)) || (!isInvalidIndex(adjacentChunkIndex) && chunkActorIndices[adjacentChunkIndex] == thisActorIndex))
{
const uint32_t adjacentBondIndex = graphAdjacentBondIndices[adjacentIndex];
if (!edgeRemovedArray->test(adjacentBondIndex))
{
serEdgeRemovedArray->reset(serBondIndex);
}
++serBondIndex;
}
}
}
}
}
// Finally record size
serHeader->m_size = static_cast<uint32_t>(offset);
return serHeader->m_size;
}
uint32_t Actor::serializationRequiredStorage(NvBlastLog logFn) const
{
const Asset* asset = getAsset();
const SupportGraph& graph = asset->m_graph;
const uint32_t* graphChunkIndices = graph.getChunkIndices();
const uint32_t* graphAdjacencyPartition = graph.getAdjacencyPartition();
const uint32_t* graphAdjacentNodeIndices = graph.getAdjacentNodeIndices();
const uint32_t* graphNodeIndexLinks = getFamilyHeader()->getGraphNodeIndexLinks();
const uint32_t* chunkActorIndices = getFamilyHeader()->getChunkActorIndices();
const uint32_t thisActorIndex = getIndex();
const bool hasExternalBonds = this->hasExternalBonds();
// Lower-support chunk count and bond counts for this actor need to be calculated. Iterate over all support chunks to count these.
uint32_t lowerSupportChunkCount = 0;
uint32_t bondCount = 0;
if (m_graphNodeCount > 0)
{
for (uint32_t graphNodeIndex = m_firstGraphNodeIndex; !isInvalidIndex(graphNodeIndex); graphNodeIndex = graphNodeIndexLinks[graphNodeIndex])
{
// Update bond count
for (uint32_t adjacentIndex = graphAdjacencyPartition[graphNodeIndex]; adjacentIndex < graphAdjacencyPartition[graphNodeIndex + 1]; ++adjacentIndex)
{
const uint32_t adjacentNodeIndex = graphAdjacentNodeIndices[adjacentIndex];
if (adjacentNodeIndex > graphNodeIndex) // So as not to double-count
{
// Only count if the adjacent node belongs to this actor or the world
const uint32_t adjacentChunkIndex = graphChunkIndices[adjacentNodeIndex];
if ((hasExternalBonds && isInvalidIndex(adjacentChunkIndex)) || (!isInvalidIndex(adjacentChunkIndex) && chunkActorIndices[adjacentChunkIndex] == thisActorIndex))
{
++bondCount;
}
}
}
// Update lower-support chunk count
const uint32_t supportChunkIndex = graphChunkIndices[graphNodeIndex];
if (isInvalidIndex(supportChunkIndex))
{
continue;
}
for (Asset::DepthFirstIt i(*asset, supportChunkIndex); (bool)i; ++i)
{
++lowerSupportChunkCount;
}
}
}
else // Subsupport chunk
{
++lowerSupportChunkCount;
}
const uint64_t dataSize = getActorSerializationSize(m_visibleChunkCount, lowerSupportChunkCount, m_graphNodeCount, bondCount);
if (dataSize > UINT32_MAX)
{
NVBLASTLL_LOG_WARNING(logFn, "Nv::Blast::Actor::serializationRequiredStorage: Serialization block size exceeds 4GB. Returning 0.\n");
return 0;
}
return static_cast<uint32_t>(dataSize);
}
} // namespace Blast
} // namespace Nv
// API implementation
extern "C"
{
uint32_t NvBlastActorGetSerializationSize(const NvBlastActor* actor, NvBlastLog logFn)
{
NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetSerializationSize: NULL actor pointer input.", return 0);
const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
if (!a.isActive())
{
NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetSerializationSize: inactive actor pointer input.");
return 0;
}
return a.serializationRequiredStorage(logFn);
}
uint32_t NvBlastActorSerialize(void* buffer, uint32_t bufferSize, const NvBlastActor* actor, NvBlastLog logFn)
{
NVBLASTLL_CHECK(buffer != nullptr, logFn, "NvBlastActorSerialize: NULL buffer pointer input.", return 0);
NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorSerialize: NULL actor pointer input.", return 0);
const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
if (!a.isActive())
{
NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorSerialize: inactive actor pointer input.");
return 0;
}
return a.serialize(buffer, bufferSize, logFn);
}
NvBlastActor* NvBlastFamilyDeserializeActor(NvBlastFamily* family, const void* buffer, NvBlastLog logFn)
{
NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyDeserializeActor: NULL family input. No actor deserialized.", return nullptr);
NVBLASTLL_CHECK(buffer != nullptr, logFn, "NvBlastFamilyDeserializeActor: NULL buffer pointer input. No actor deserialized.", return nullptr);
return Nv::Blast::Actor::deserialize(family, buffer, logFn);
}
} // extern "C"
| 26,762 | C++ | 42.945813 | 181 | 0.631418 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastChunkHierarchy.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTCHUNKHIERARCHY_H
#define NVBLASTCHUNKHIERARCHY_H
#include "NvBlastIndexFns.h"
#include "NvBlastDLink.h"
#include "NvBlast.h"
#include "NvBlastAssert.h"
#include "NvBlastIteratorBase.h"
namespace Nv
{
namespace Blast
{
/**
Chunk hierarchy depth-first iterator. Traverses subtree with root given by startChunkIndex.
Will not traverse chunks with index at or beyond chunkIndexLimit.
*/
class ChunkDepthFirstIt : public IteratorBase<uint32_t>
{
public:
/** Constructed from a chunk array. */
ChunkDepthFirstIt(const NvBlastChunk* chunks, uint32_t startChunkIndex, uint32_t chunkIndexLimit) :
IteratorBase<uint32_t>(startChunkIndex), m_chunks(chunks), m_stop(startChunkIndex), m_limit(chunkIndexLimit)
{
if (m_curr >= m_limit)
{
m_curr = invalidIndex<uint32_t>();
}
}
/** Pre-increment. Only use if valid() == true. */
uint32_t operator ++ ()
{
NVBLAST_ASSERT(!isInvalidIndex(m_curr));
const NvBlastChunk* chunk = m_chunks + m_curr;
if (chunk->childIndexStop > chunk->firstChildIndex && chunk->firstChildIndex < m_limit)
{
m_curr = chunk->firstChildIndex;
}
else
{
for (;;)
{
if (m_curr == m_stop)
{
m_curr = invalidIndex<uint32_t>();
break;
}
NVBLAST_ASSERT(!isInvalidIndex(chunk->parentChunkIndex)); // This should not be possible with this search
const NvBlastChunk* parentChunk = m_chunks + chunk->parentChunkIndex;
if (++m_curr < parentChunk->childIndexStop)
{
break; // Sibling chunk is valid, that's the next chunk
}
m_curr = chunk->parentChunkIndex;
chunk = parentChunk;
}
}
return m_curr;
}
private:
const NvBlastChunk* m_chunks;
uint32_t m_stop;
uint32_t m_limit;
};
/**
Enumerates chunk indices in a subtree with root given by chunkIndex, in breadth-first order.
Will not traverse chunks with index at or beyond chunkIndexLimit.
Returns the number of indices written to the chunkIndex array
*/
NV_INLINE uint32_t enumerateChunkHierarchyBreadthFirst
(
uint32_t* chunkIndices,
uint32_t chunkIndicesSize,
const NvBlastChunk* chunks,
uint32_t chunkIndex,
bool includeRoot = true,
uint32_t chunkIndexLimit = invalidIndex<uint32_t>()
)
{
if (chunkIndicesSize == 0)
{
return 0;
}
uint32_t chunkIndexCount = 0;
bool rootHandled = false;
if (includeRoot)
{
chunkIndices[chunkIndexCount++] = chunkIndex;
rootHandled = true;
}
for (uint32_t curr = 0; !rootHandled || curr < chunkIndexCount;)
{
const NvBlastChunk& chunk = chunks[rootHandled ? chunkIndices[curr] : chunkIndex];
if (chunk.firstChildIndex < chunkIndexLimit)
{
const uint32_t childIndexStop = chunk.childIndexStop < chunkIndexLimit ? chunk.childIndexStop : chunkIndexLimit;
const uint32_t childIndexBufferStop = chunk.firstChildIndex + (chunkIndicesSize - chunkIndexCount);
const uint32_t stop = childIndexStop < childIndexBufferStop ? childIndexStop : childIndexBufferStop;
for (uint32_t childIndex = chunk.firstChildIndex; childIndex < stop; ++childIndex)
{
chunkIndices[chunkIndexCount++] = childIndex;
}
}
if (rootHandled)
{
++curr;
}
rootHandled = true;
}
return chunkIndexCount;
}
/**
VisibilityRep must have m_firstVisibleChunkIndex and m_visibleChunkCount fields
*/
template<class VisibilityRep>
void updateVisibleChunksFromSupportChunk
(
VisibilityRep* actors,
IndexDLink<uint32_t>* visibleChunkIndexLinks,
uint32_t* chunkActorIndices,
uint32_t actorIndex,
uint32_t supportChunkIndex,
const NvBlastChunk* chunks,
uint32_t upperSupportChunkCount
)
{
uint32_t chunkIndex = supportChunkIndex;
uint32_t chunkActorIndex = chunkActorIndices[supportChunkIndex];
uint32_t newChunkActorIndex = actorIndex;
VisibilityRep& thisActor = actors[actorIndex];
do
{
if (chunkActorIndex == newChunkActorIndex)
{
break; // Nothing to do
}
const uint32_t parentChunkIndex = chunks[chunkIndex].parentChunkIndex;
const uint32_t parentChunkActorIndex = parentChunkIndex != invalidIndex<uint32_t>() ? chunkActorIndices[parentChunkIndex] : invalidIndex<uint32_t>();
const bool chunkVisible = chunkActorIndex != parentChunkActorIndex;
// If the chunk is visible, it needs to be removed from its old actor's visibility list
if (chunkVisible && !isInvalidIndex(chunkActorIndex))
{
VisibilityRep& chunkActor = actors[chunkActorIndex];
IndexDList<uint32_t>().removeFromList(chunkActor.m_firstVisibleChunkIndex, visibleChunkIndexLinks, chunkIndex);
--chunkActor.m_visibleChunkCount;
}
// Now update the chunk's actor index
const uint32_t oldChunkActorIndex = chunkActorIndices[chunkIndex];
chunkActorIndices[chunkIndex] = newChunkActorIndex;
if (newChunkActorIndex != invalidIndex<uint32_t>() && parentChunkActorIndex != newChunkActorIndex)
{
// The chunk is now visible. Add it to this actor's visibility list
IndexDList<uint32_t>().insertListHead(thisActor.m_firstVisibleChunkIndex, visibleChunkIndexLinks, chunkIndex);
++thisActor.m_visibleChunkCount;
// Remove its children from this actor's visibility list
if (actorIndex != oldChunkActorIndex)
{
const NvBlastChunk& chunk = chunks[chunkIndex];
if (chunk.firstChildIndex < upperSupportChunkCount) // Only need to deal with upper-support children
{
for (uint32_t childChunkIndex = chunk.firstChildIndex; childChunkIndex < chunk.childIndexStop; ++childChunkIndex)
{
if (chunkActorIndices[childChunkIndex] == actorIndex)
{
IndexDList<uint32_t>().removeFromList(thisActor.m_firstVisibleChunkIndex, visibleChunkIndexLinks, childChunkIndex);
--thisActor.m_visibleChunkCount;
}
}
}
}
}
if (parentChunkIndex != invalidIndex<uint32_t>())
{
// If all of its siblings have the same index, then the parent will too. Otherwise, the parent will have an invalid index and its children will be visible
const NvBlastChunk& parentChunk = chunks[parentChunkIndex];
bool uniform = true;
for (uint32_t childChunkIndex = parentChunk.firstChildIndex; uniform && childChunkIndex < parentChunk.childIndexStop; ++childChunkIndex)
{
uniform = (newChunkActorIndex == chunkActorIndices[childChunkIndex]);
}
if (!uniform)
{
newChunkActorIndex = invalidIndex<uint32_t>();
// no need to search if the parent index is invalid
// the conditional in the loop could never be true in that case
if (parentChunkActorIndex != invalidIndex<uint32_t>())
{
for (uint32_t childChunkIndex = parentChunk.firstChildIndex; childChunkIndex < parentChunk.childIndexStop; ++childChunkIndex)
{
const uint32_t childChunkActorIndex = chunkActorIndices[childChunkIndex];
if (childChunkActorIndex != invalidIndex<uint32_t>() && childChunkActorIndex == parentChunkActorIndex)
{
// The child was invisible. Add it to its actor's visibility list
VisibilityRep& childChunkActor = actors[childChunkActorIndex];
IndexDList<uint32_t>().insertListHead(childChunkActor.m_firstVisibleChunkIndex, visibleChunkIndexLinks, childChunkIndex);
++childChunkActor.m_visibleChunkCount;
}
}
}
}
}
// Climb the hierarchy
chunkIndex = parentChunkIndex;
chunkActorIndex = parentChunkActorIndex;
} while (chunkIndex != invalidIndex<uint32_t>());
}
} // namespace Blast
} // namespace Nv
#endif // ifndef NVBLASTCHUNKHIERARCHY_H
| 10,213 | C | 38.898437 | 167 | 0.643592 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastSupportGraph.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTSUPPORTGRAPH_H
#define NVBLASTSUPPORTGRAPH_H
#include "NvBlastIndexFns.h"
#include "NvBlastMemory.h"
namespace Nv
{
namespace Blast
{
/**
Describes the connectivity between support chunks via bonds.
Vertices in the support graph are termed "nodes," and represent particular chunks (NvBlastChunk) in an NvBlastAsset.
The indexing for nodes is not the same as that for chunks. Only some chunks are represented by nodes in the graph,
and these chunks are called "support chunks."
Adjacent node indices and adjacent bond indices are stored for each node, and therefore each bond is represented twice in this graph,
going from node[i] -> node[j] and from node[j] -> node[i]. Therefore the size of the getAdjacentNodeIndices() and getAdjacentBondIndices()
arrays are twice the number of bonds stored in the corresponding NvBlastAsset.
The graph is used as follows. Given a SupportGraph "graph" and node index i, (0 <= i < graph.nodeCount), one may find all
adjacent bonds and nodes using:
const uint32_t* adjacencyPartition = graph.getAdjacencyPartition();
const uint32_t* adjacentNodeIndices = graph.getAdjacentNodeIndices();
const uint32_t* adjacentBondIndices = graph.getAdjacentBondIndices();
// adj is the lookup value in adjacentNodeIndices and graph.getAdjacentBondIndices()
for (uint32_t adj = adjacencyPartition[i]; adj < adjacencyPartition[i+1]; ++adj)
{
// An adjacent node:
uint32_t adjacentNodeIndex = adjacentNodeIndices[adj];
// The corresponding bond (that connects node index i with node indexed adjacentNodeIndex:
uint32_t adjacentBondIndex = adjacentBondIndices[adj];
}
For a graph node with index i, the corresponding asset chunk index is found using graph.getChunkIndices()[i]. The reverse mapping
(obtaining a graph node index from an asset chunk index) can be done using the
NvBlastAssetGetChunkToGraphNodeMap(asset, logFn);
function. See the documentation for its use. The returned "node index" for a non-support chunk is the invalid value 0xFFFFFFFF.
*/
struct SupportGraph
{
/**
Total number of nodes in the support graph.
*/
uint32_t m_nodeCount;
/**
Indices of chunks represented by the nodes.
getChunkIndices returns an array of size m_nodeCount.
*/
NvBlastBlockArrayData(uint32_t, m_chunkIndicesOffset, getChunkIndices, m_nodeCount);
/**
Adjacency lookup table, of type uint32_t.
Partitions both the getAdjacentNodeIndices() and the getAdjacentBondIndices() arrays into subsets corresponding to each node.
The size of this array is nodeCount+1.
For 0 <= i < nodeCount, getAdjacencyPartition()[i] is the index of the first element in getAdjacentNodeIndices() (or getAdjacentBondIndices()) for nodes adjacent to the node with index i.
getAdjacencyPartition()[nodeCount] is the size of the getAdjacentNodeIndices() and getAdjacentBondIndices() arrays.
This allows one to easily count the number of nodes adjacent to a node with index i, using getAdjacencyPartition()[i+1] - getAdjacencyPartition()[i].
getAdjacencyPartition returns an array of size m_nodeCount + 1.
*/
NvBlastBlockArrayData(uint32_t, m_adjacencyPartitionOffset, getAdjacencyPartition, m_nodeCount + 1);
/**
Array of uint32_t composed of subarrays holding the indices of nodes adjacent to a given node. The subarrays may be accessed through the getAdjacencyPartition() array.
getAdjacentNodeIndices returns an array of size getAdjacencyPartition()[m_nodeCount].
*/
NvBlastBlockArrayData(uint32_t, m_adjacentNodeIndicesOffset, getAdjacentNodeIndices, getAdjacencyPartition()[m_nodeCount]);
/**
Array of uint32_t composed of subarrays holding the indices of bonds (NvBlastBond) for a given node. The subarrays may be accessed through the getAdjacencyPartition() array.
getAdjacentBondIndices returns an array of size getAdjacencyPartition()[m_nodeCount].
*/
NvBlastBlockArrayData(uint32_t, m_adjacentBondIndicesOffset, getAdjacentBondIndices, getAdjacencyPartition()[m_nodeCount]);
/**
Finds the bond between two given graph nodes (if it exists) and returns the bond index.
If no bond exists, returns invalidIndex<uint32_t>().
\return the index of the bond between the given nodes.
*/
uint32_t findBond(uint32_t nodeIndex0, uint32_t nodeIndex1) const;
};
//////// SupportGraph inline member functions ////////
NV_INLINE uint32_t SupportGraph::findBond(uint32_t nodeIndex0, uint32_t nodeIndex1) const
{
const uint32_t* adjacencyPartition = getAdjacencyPartition();
const uint32_t* adjacentNodeIndices = getAdjacentNodeIndices();
const uint32_t* adjacentBondIndices = getAdjacentBondIndices();
// Iterate through all neighbors of nodeIndex0 chunk
for (uint32_t i = adjacencyPartition[nodeIndex0]; i < adjacencyPartition[nodeIndex0 + 1]; i++)
{
if (adjacentNodeIndices[i] == nodeIndex1)
{
return adjacentBondIndices[i];
}
}
return invalidIndex<uint32_t>();
}
} // namespace Blast
} // namespace Nv
#endif // ifndef NVBLASTSUPPORTGRAPH_H
| 6,716 | C | 43.483443 | 191 | 0.749702 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastActor.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTACTOR_H
#define NVBLASTACTOR_H
#include "NvBlastAsset.h"
#include "NvBlastDLink.h"
#include "NvBlastIteratorBase.h"
#include "NvBlastSupportGraph.h"
#include "NvBlastFamilyGraph.h"
#include "NvBlastPreprocessorInternal.h"
#include <cstring>
namespace Nv
{
namespace Blast
{
// Forward declarations
class FamilyGraph;
struct FamilyHeader;
/**
Internal implementation of solver actor.
These objects are stored within the family in a single array. A pointer to a Actor class will be given
to the user through the NvBlastActor opaque type.
*/
class Actor : public NvBlastActor
{
friend struct FamilyHeader;
friend void updateVisibleChunksFromSupportChunk<>(Actor*, IndexDLink<uint32_t>*, uint32_t*, uint32_t, uint32_t, const NvBlastChunk*, uint32_t);
public:
Actor() : m_familyOffset(0), m_firstVisibleChunkIndex(UINT32_MAX), m_visibleChunkCount(0), m_firstGraphNodeIndex(UINT32_MAX), m_graphNodeCount(0), m_leafChunkCount(0) {}
//////// Accessors ////////
/**
Find the family (see FamilyHeader) that this actor belongs to.
\return a pointer to the FamilyHeader for this actor.
*/
FamilyHeader* getFamilyHeader() const;
/**
Utility to get the asset this actor is associated with, through its family.
\return the asset associated with this actor.
*/
const Asset* getAsset() const;
/**
Since this object is not deleted (unless the family is deleted), we use m_familyOffset
to determine if the actor is valid, or "active." When no actors in an instance return isActive(),
it should be safe to delete the family.
\return true iff this actor is valid for use (active).
*/
bool isActive() const;
/**
Whether or not this actor represents a subsupport chunk. If the actor contains a subsupport chunk, then it can have only that chunk.
\return true iff this actor contains a chunk which is a descendant of a support chunk.
*/
bool isSubSupportChunk() const;
/**
Whether or not this actor represents a single support chunk. If the actor contains a single support chunk, it can have no other
chunks associated with it.
\return true iff this actor contains exactly one support chunk.
*/
bool isSingleSupportChunk() const;
/**
Utility to calculate actor index.
\return the index of this actor in the FamilyHeader's getActors() array.
*/
uint32_t getIndex() const;
/**
Offset to block of memory which holds the data associated with all actors in this actor's lineage
\return the family offset.
*/
uint32_t getFamilyOffset() const;
void setFamilyOffset(uint32_t familyOffset);
/**
The number of visible chunks. This is calculated from updateVisibleChunksFromGraphNodes().
See also getFirstVisibleChunkIndex.
\return the number of chunks in the actor's visible chunk index list.
*/
uint32_t getVisibleChunkCount() const;
void setVisibleChunkCount(uint32_t visibleChunkCount);
/**
Access to visible chunk linked list for this actor. The index returned is that of a link in the FamilyHeader's getVisibleChunkIndexLinks().
\return the index of the head of the visible chunk linked list.
*/
uint32_t getFirstVisibleChunkIndex() const;
void setFirstVisibleChunkIndex(uint32_t firstVisibleChunkIndex);
/**
The number of graph nodes, corresponding to support chunks, for this actor.
See also getFirstGraphNodeIndex.
\return the number of graph nodes in the actor's graph node index list.
*/
uint32_t getGraphNodeCount() const;
void setGraphNodeCount(uint32_t graphNodeCount);
/**
The number of leaf chunks for this actor.
\return number of leaf chunks for this actor.
*/
uint32_t getLeafChunkCount() const;
void setLeafChunkCount(uint32_t leafChunkCount);
/**
Access to graph node linked list for this actor. The index returned is that of a link in the FamilyHeader's getGraphNodeIndexLinks().
\return the index of the head of the graph node linked list.
*/
uint32_t getFirstGraphNodeIndex() const;
void setFirstGraphNodeIndex(uint32_t firstGraphNodeIndex);
/**
Access to the index of the first subsupport chunk.
\return the index of the first subsupport chunk.
*/
uint32_t getFirstSubsupportChunkIndex() const;
/**
Access to the support graph.
\return the support graph associated with this actor.
*/
const SupportGraph* getGraph() const;
/**
Access the instance graph for islands searching.
Return the dynamic data generated for the support graph. (See FamilyGraph.)
This is used to store current connectivity information based upon bond and chunk healths, as well as cached intermediate data for faster incremental updates.
*/
FamilyGraph* getFamilyGraph() const;
/**
Access to the chunks, of type NvBlastChunk.
\return an array of size m_chunkCount.
*/
NvBlastChunk* getChunks() const;
/**
Access to the bonds, of type NvBlastBond.
\return an array of size m_bondCount.
*/
NvBlastBond* getBonds() const;
/**
Access to the health for each support chunk and subsupport chunk, of type float.
Use getAsset()->getContiguousLowerSupportIndex() to map lower-support chunk indices into the range of indices valid for this array.
\return a float array of chunk healths.
*/
float* getLowerSupportChunkHealths() const;
/**
Access to the start of the subsupport chunk health array.
\return the array of health values associated with all descendants of support chunks.
*/
float* getSubsupportChunkHealths() const;
/**
Bond health for the interfaces between two chunks, of type float. Since the bond is shared by two chunks, the same bond health is used for chunk[i] -> chunk[j] as for chunk[j] -> chunk[i].
\return the array of healths associated with all bonds in the support graph.
*/
float* getBondHealths() const;
/**
Graph node index links, of type uint32_t. The successor to index[i] is m_graphNodeIndexLinksOffset[i]. A value of invalidIndex<uint32_t>() indicates no successor.
getGraphNodeIndexLinks returns an array of size m_asset->m_graphNodeCount.
*/
const uint32_t* getGraphNodeIndexLinks() const;
//////// Iterators ////////
/**
Visible chunk iterator. Usage:
Given a solver actor a,
for (Actor::VisibleChunkIt i = a; (bool)i; ++i)
{
uint32_t visibleChunkIndex = (uint32_t)i;
// visibleChunkIndex references the asset index list
}
*/
class VisibleChunkIt : public DListIt<uint32_t>
{
public:
/** Constructed from an actor. */
VisibleChunkIt(const Actor& actor);
};
/**
Graph node iterator. Usage:
Given a solver actor a,
for (Actor::GraphNodeIt i = a; (bool)i; ++i)
{
uint32_t graphNodeIndex = (uint32_t)i;
// graphNodeIndex references the asset's graph node index list
}
*/
class GraphNodeIt : public LListIt<uint32_t>
{
public:
/** Constructed from an actor. */
GraphNodeIt(const Actor& actor);
};
//////// Operations ////////
/**
Create an actor from a descriptor (creates a family). This actor will represent an unfractured instance of the asset.
The asset must be in a valid state, for example each chunk hierarchy in it must contain at least one support chunk (a single
support chunk in a hierarchy corresponds to the root chunk). This will always be the case for assets created by NvBlastCreateAsset.
\param[in] family Family in which to create a new actor. The family must be valid and have no other actors in it. (See createFamily.)
\param[in] desc Actor initialization data, must be a valid pointer.
\param[in] scratch User-supplied scratch memory of size createRequiredScratch(desc) bytes.
\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
\return the new actor if the input is valid (by the conditions described above), NULL otherwise.
*/
static Actor* create(NvBlastFamily* family, const NvBlastActorDesc* desc, void* scratch, NvBlastLog logFn);
/**
Returns the size of the scratch space (in bytes) required to be passed into the create function, based upon
the family that will be passed to the create function.
\param[in] family The family being instanced.
\return the number of bytes required.
*/
static size_t createRequiredScratch(const NvBlastFamily* family, NvBlastLog logFn);
/**
Deserialize a single Actor from a buffer. An actor family must given, into which
the actor will be inserted if it is compatible. That is, it must not share any chunks or internal
IDs with the actors already present in the block.
\param[in] family Family in which to deserialize the actor.
\param[in] buffer Buffer containing the serialized actor data.
\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
\return the deserialized actor if successful, NULL otherwise.
*/
static Actor* deserialize(NvBlastFamily* family, const void* buffer, NvBlastLog logFn);
/**
Serialize actor into single-actor buffer.
\param[out] buffer User-supplied buffer, must be at least of size given by NvBlastActorGetSerializationSize(actor).
\param[in] bufferSize The size of the user-supplied buffer. The buffer size must be less than 4GB. If NvBlastActorGetSerializationSize(actor) >= 4GB, this actor cannot be serialized with this method.
\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
\return the number of bytes written to the buffer, or 0 if there is an error (such as an under-sized buffer).
*/
uint32_t serialize(void* buffer, uint32_t bufferSize, NvBlastLog logFn) const;
/**
Calculate the space required to serialize this actor.
\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
\return the required buffer size in bytes.
*/
uint32_t serializationRequiredStorage(NvBlastLog logFn) const;
/**
Release this actor's association with a family, if any. This actor should be considered deleted
after this function is called.
\return true if release was successful (actor was active).
*/
bool release();
//////// Damage and fracturing methods ////////
/**
See NvBlastActorGenerateFracture
*/
void generateFracture(NvBlastFractureBuffers* commandBuffers, const NvBlastDamageProgram& program, const void* programParams, NvBlastLog logFn, NvBlastTimers* timers) const;
/**
Damage bond between two chunks by health amount (instance graph also will be notified in case bond is broken after).
*/
uint32_t damageBond(uint32_t nodeIndex0, uint32_t nodeIndex1, float healthDamage);
/**
TODO: document
*/
void damageBond(uint32_t nodeIndex0, uint32_t nodeIndex1, uint32_t bondIndex, float healthDamage);
/**
TODO: document
*/
uint32_t damageBond(const NvBlastBondFractureData& cmd);
/**
See NvBlastActorApplyFracture
*/
void applyFracture(NvBlastFractureBuffers* eventBuffers, const NvBlastFractureBuffers* commands, NvBlastLog logFn, NvBlastTimers* timers);
/**
The scratch space required to call the findIslands function, or the split function, in bytes.
\return the number of bytes required.
*/
size_t splitRequiredScratch() const;
/**
See NvBlastActorSplit
*/
uint32_t split(NvBlastActorSplitEvent* result, uint32_t newActorsMaxCount, void* scratch, NvBlastLog logFn, NvBlastTimers* timers);
/**
Perform islands search. Bonds which are broken when their health values drop to zero (or below) may lead
to new islands of chunks which need to be split into new actors. This function labels all nodes in the instance
graph (see FamilyGraph) with a unique index per island that may be used as actor indices for new islands.
\param[in] scratch User-supplied scratch memory of size splitRequiredScratch().
\return the number of new islands found.
*/
uint32_t findIslands(void* scratch);
/**
Partition this actor into smaller pieces.
If this actor represents a single support or subsupport chunk, then after this operation
this actor will released if child chunks are created (see Return value), and its pointer no longer valid for use (unless it appears in the newActors list).
This function will not split a leaf chunk actor. In that case, the actor is not destroyed and this function returns 0.
\param[in] newActors user-supplied array of actor pointers to hold the actors generated from this partitioning.
This array must be of size equal to the number of leaf chunks in the asset, to guarantee
that all actors are reported. (See AssetDataHeader::m_leafChunkCount.)
\param[in] newActorsSize The size of the user-supplied newActors array.
\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
\return the number of new actors created. If greater than newActorsSize, some actors are not reported in the newActors array.
*/
uint32_t partition(Actor** newActors, uint32_t newActorsSize, NvBlastLog logFn);
/**
Recalculate the visible chunk list for this actor based upon it graph node list (does not modify subsupport chunk actors)
*/
void updateVisibleChunksFromGraphNodes();
/**
Partition this actor into smaller pieces if it is a single lower-support chunk actor. Use this function on single support or sub-support chunks.
After this operation, if successful (child chunks created, see Return value), this actor will released, and its pointer no longer valid for use.
This function will not split a leaf chunk actor. In that case, the actor is not destroyed and this function returns 0.
\param[in] newActors User-supplied array of actor pointers to hold the actors generated from this partitioning. Note: this actor will be released.
This array must be of size equal to the lower-support chunk's child count, to guarantee that all actors are reported.
\param[in] newActorsSize The size of the user-supplied newActors array.
\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
\return the number of new actors created.
*/
uint32_t partitionSingleLowerSupportChunk(Actor** newActors, uint32_t newActorsSize, NvBlastLog logFn);
/**
Partition this actor into smaller pieces. Use this function if this actor contains more than one support chunk.
After this operation, if successful, this actor will released, and its pointer no longer valid for use (unless it appears in the newActors list).
\param[in] newActors User-supplied array of actor pointers to hold the actors generated from this partitioning. Note: this actor will not be released,
but will hold a subset of the graph nodes that it had before the function was called.
This array must be of size equal to the number of graph nodes in the asset, to guarantee
that all actors are reported.
\param[in] newActorsSize The size of the user-supplied newActors array.
\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
\return the number of new actors created.
*/
uint32_t partitionMultipleGraphNodes(Actor** newActors, uint32_t newActorsSize, NvBlastLog logFn);
/**
\return true iff this actor contains the "external" support graph node, created when a bond contains the invalidIndex<uint32_t>() value for one of their chunkIndices.
*/
bool hasExternalBonds() const;
/**
\return true iff this actor was damaged and split() call is required.
*/
bool isSplitRequired() const;
private:
//////// Data ////////
/**
Offset to block of memory which holds the data associated with all actors in this actor's lineage.
This offset is positive. The block address is this object's pointer _minus_ the m_familyOffset.
This value is initialized to 0, which denotes an invalid actor. Actors should be obtained through
the FamilyHeader::borrowActor API, which will create a valid offset, and
the FamilyHeader::returnActor API, which will zero the offset.
*/
uint32_t m_familyOffset;
/**
The index of the head of a doubly-linked list of visible chunk indices. If m_firstVisibleChunkIndex == invalidIndex<uint32_t>(),
then there are no visible chunks.
*/
uint32_t m_firstVisibleChunkIndex;
/**
The number of elements in the visible chunk list.
*/
uint32_t m_visibleChunkCount;
/**
The index of the head of a singly-linked list of graph node indices. If m_firstGraphNodeIndex == invalidIndex<uint32_t>(),
then there are no graph nodes.
*/
uint32_t m_firstGraphNodeIndex;
/**
The number of elements in the graph node list.
*/
uint32_t m_graphNodeCount;
/**
The number of leaf chunks in this actor.
*/
uint32_t m_leafChunkCount;
};
} // namespace Blast
} // namespace Nv
#include "NvBlastFamily.h"
namespace Nv
{
namespace Blast
{
//////// Actor inline methods ////////
NV_INLINE FamilyHeader* Actor::getFamilyHeader() const
{
NVBLAST_ASSERT(isActive());
return isActive() ? (FamilyHeader*)((uintptr_t)this - (uintptr_t)m_familyOffset) : nullptr;
}
NV_INLINE const Asset* Actor::getAsset() const
{
return getFamilyHeader()->m_asset;
}
NV_INLINE bool Actor::isActive() const
{
return m_familyOffset != 0;
}
NV_INLINE bool Actor::isSubSupportChunk() const
{
return m_graphNodeCount == 0;
}
NV_INLINE bool Actor::isSingleSupportChunk() const
{
return m_graphNodeCount == 1;
}
NV_INLINE uint32_t Actor::getIndex() const
{
NVBLAST_ASSERT(isActive());
const FamilyHeader* header = getFamilyHeader();
NVBLAST_ASSERT(header != nullptr);
const size_t index = this - header->getActors();
NVBLAST_ASSERT(index <= UINT32_MAX);
return (uint32_t)index;
}
NV_INLINE uint32_t Actor::getFamilyOffset() const
{
return m_familyOffset;
}
NV_INLINE void Actor::setFamilyOffset(uint32_t familyOffset)
{
m_familyOffset = familyOffset;
}
NV_INLINE uint32_t Actor::getVisibleChunkCount() const
{
return m_visibleChunkCount;
}
NV_INLINE void Actor::setVisibleChunkCount(uint32_t visibleChunkCount)
{
m_visibleChunkCount = visibleChunkCount;
}
NV_INLINE uint32_t Actor::getFirstVisibleChunkIndex() const
{
return m_firstVisibleChunkIndex;
}
NV_INLINE void Actor::setFirstVisibleChunkIndex(uint32_t firstVisibleChunkIndex)
{
m_firstVisibleChunkIndex = firstVisibleChunkIndex;
}
NV_INLINE uint32_t Actor::getGraphNodeCount() const
{
return m_graphNodeCount;
}
NV_INLINE void Actor::setGraphNodeCount(uint32_t graphNodeCount)
{
m_graphNodeCount = graphNodeCount;
}
NV_INLINE uint32_t Actor::getLeafChunkCount() const
{
return m_leafChunkCount;
}
NV_INLINE void Actor::setLeafChunkCount(uint32_t leafChunkCount)
{
m_leafChunkCount = leafChunkCount;
}
NV_INLINE uint32_t Actor::getFirstGraphNodeIndex() const
{
return m_firstGraphNodeIndex;
}
NV_INLINE void Actor::setFirstGraphNodeIndex(uint32_t firstGraphNodeIndex)
{
m_firstGraphNodeIndex = firstGraphNodeIndex;
}
NV_INLINE uint32_t Actor::getFirstSubsupportChunkIndex() const
{
return getAsset()->m_firstSubsupportChunkIndex;
}
NV_INLINE const SupportGraph* Actor::getGraph() const
{
return &getAsset()->m_graph;
}
NV_INLINE FamilyGraph* Actor::getFamilyGraph() const
{
return getFamilyHeader()->getFamilyGraph();
}
NV_INLINE NvBlastChunk* Actor::getChunks() const
{
return getAsset()->getChunks();
}
NV_INLINE NvBlastBond* Actor::getBonds() const
{
return getAsset()->getBonds();
}
NV_INLINE float* Actor::getLowerSupportChunkHealths() const
{
return getFamilyHeader()->getLowerSupportChunkHealths();
}
NV_INLINE float* Actor::getSubsupportChunkHealths() const
{
return getFamilyHeader()->getSubsupportChunkHealths();
}
NV_INLINE float* Actor::getBondHealths() const
{
return getFamilyHeader()->getBondHealths();
}
NV_INLINE const uint32_t* Actor::getGraphNodeIndexLinks() const
{
return getFamilyHeader()->getGraphNodeIndexLinks();
}
NV_INLINE bool Actor::release()
{
// Do nothing if this actor is not currently active.
if (!isActive())
{
return false;
}
FamilyHeader* header = getFamilyHeader();
// Clear the graph node list
uint32_t* graphNodeIndexLinks = getFamilyHeader()->getGraphNodeIndexLinks();
while (!isInvalidIndex(m_firstGraphNodeIndex))
{
const uint32_t graphNodeIndex = m_firstGraphNodeIndex;
m_firstGraphNodeIndex = graphNodeIndexLinks[m_firstGraphNodeIndex];
graphNodeIndexLinks[graphNodeIndex] = invalidIndex<uint32_t>();
--m_graphNodeCount;
}
NVBLAST_ASSERT(m_graphNodeCount == 0);
const Asset* asset = getAsset();
// Clear the visible chunk list
IndexDLink<uint32_t>* visibleChunkIndexLinks = header->getVisibleChunkIndexLinks();
uint32_t* chunkActorIndices = header->getChunkActorIndices();
while (!isInvalidIndex(m_firstVisibleChunkIndex))
{
// Descendants of the visible actor may be accessed again if the actor is deserialized. Clear subtree.
for (Asset::DepthFirstIt i(*asset, m_firstVisibleChunkIndex, true); (bool)i; ++i)
{
chunkActorIndices[(uint32_t)i] = invalidIndex<uint32_t>();
}
IndexDList<uint32_t>().removeListHead(m_firstVisibleChunkIndex, visibleChunkIndexLinks);
--m_visibleChunkCount;
}
NVBLAST_ASSERT(m_visibleChunkCount == 0);
// Clear the leaf chunk count
m_leafChunkCount = 0;
// This invalidates the actor and decrements the reference count
header->returnActor(*this);
return true;
}
NV_INLINE uint32_t Actor::partition(Actor** newActors, uint32_t newActorsSize, NvBlastLog logFn)
{
NVBLASTLL_CHECK(newActorsSize == 0 || newActors != nullptr, logFn, "Nv::Blast::Actor::partition: NULL newActors pointer array input with non-zero newActorCount.", return 0);
// Call one of two partition functions depending on the actor's support status
return m_graphNodeCount <= 1 ?
partitionSingleLowerSupportChunk(newActors, newActorsSize, logFn) : // This actor will partition into subsupport chunks
partitionMultipleGraphNodes(newActors, newActorsSize, logFn); // This actor will partition into support chunks
}
NV_INLINE bool Actor::hasExternalBonds() const
{
const SupportGraph& graph = *getGraph();
if (graph.m_nodeCount == 0)
{
return false; // This shouldn't happen
}
const uint32_t lastGraphChunkIndex = graph.getChunkIndices()[graph.m_nodeCount - 1];
if (!isInvalidIndex(lastGraphChunkIndex))
{
return false; // There is no external node
}
return getFamilyGraph()->getIslandIds()[graph.m_nodeCount - 1] == getIndex();
}
NV_INLINE bool Actor::isSplitRequired() const
{
NVBLAST_ASSERT(isActive());
if (getGraphNodeCount() <= 1)
{
uint32_t chunkHealthIndex = isSingleSupportChunk() ? getIndex() : getFirstVisibleChunkIndex() - getFirstSubsupportChunkIndex() + getGraph()->m_nodeCount;
float* chunkHealths = getLowerSupportChunkHealths();
if (chunkHealths[chunkHealthIndex] <= 0.0f)
{
const uint32_t chunkIndex = m_graphNodeCount == 0 ? m_firstVisibleChunkIndex : getGraph()->getChunkIndices()[m_firstGraphNodeIndex];
if (!isInvalidIndex(chunkIndex))
{
const NvBlastChunk& chunk = getChunks()[chunkIndex];
uint32_t childCount = chunk.childIndexStop - chunk.firstChildIndex;
return childCount > 0;
}
}
}
else
{
uint32_t* firstDirtyNodeIndices = getFamilyGraph()->getFirstDirtyNodeIndices();
if (!isInvalidIndex(firstDirtyNodeIndices[getIndex()]))
{
return true;
}
}
return false;
}
//////// Actor::VisibleChunkIt inline methods ////////
NV_INLINE Actor::VisibleChunkIt::VisibleChunkIt(const Actor& actor) : DListIt<uint32_t>(actor.m_firstVisibleChunkIndex, actor.getFamilyHeader()->getVisibleChunkIndexLinks())
{
}
//////// Actor::GraphNodeIt inline methods ////////
NV_INLINE Actor::GraphNodeIt::GraphNodeIt(const Actor& actor) : LListIt<uint32_t>(actor.m_firstGraphNodeIndex, actor.getFamilyHeader()->getGraphNodeIndexLinks())
{
}
//////// Helper functions ////////
#if NVBLASTLL_CHECK_PARAMS
/**
Helper function to validate fracture buffer values being meaningful.
*/
static inline bool isValid(const NvBlastFractureBuffers* buffers)
{
if (buffers->chunkFractureCount != 0 && buffers->chunkFractures == nullptr)
return false;
if (buffers->bondFractureCount != 0 && buffers->bondFractures == nullptr)
return false;
return true;
}
#endif
} // namespace Blast
} // namespace Nv
#endif // ifndef NVBLASTACTOR_H
| 27,675 | C | 33.294919 | 208 | 0.689106 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastActor.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "NvBlastActor.h"
#include "NvBlastFamilyGraph.h"
#include "NvBlastChunkHierarchy.h"
#include "NvBlastIndexFns.h"
#include "NvBlastDLink.h"
#include "NvBlastGeometry.h"
#include "NvBlastTime.h"
#include <float.h>
#include <algorithm>
namespace Nv
{
namespace Blast
{
//////// Actor static methods ////////
size_t Actor::createRequiredScratch(const NvBlastFamily* family, NvBlastLog logFn)
{
NVBLASTLL_CHECK(family != nullptr && reinterpret_cast<const FamilyHeader*>(family)->m_asset != nullptr, logFn, "Actor::createRequiredScratch: NULL family input or asset.", return 0);
const Asset& solverAsset = *reinterpret_cast<const FamilyHeader*>(family)->m_asset;
return FamilyGraph::findIslandsRequiredScratch(solverAsset.m_graph.m_nodeCount);
}
Actor* Actor::create(NvBlastFamily* family, const NvBlastActorDesc* desc, void* scratch, NvBlastLog logFn)
{
NVBLASTLL_CHECK(family != nullptr, logFn, "Actor::create: NULL family pointer input.", return nullptr);
NVBLASTLL_CHECK(reinterpret_cast<FamilyHeader*>(family)->m_asset != nullptr, logFn, "Actor::create: family has NULL asset.", return nullptr);
NVBLASTLL_CHECK(reinterpret_cast<FamilyHeader*>(family)->m_asset->m_graph.m_nodeCount != 0, logFn, "Actor::create: family's asset has no support chunks.", return nullptr);
NVBLASTLL_CHECK(desc != nullptr, logFn, "Actor::create: NULL desc pointer input.", return nullptr);
NVBLASTLL_CHECK(scratch != nullptr, logFn, "Actor::create: NULL scratch input.", return nullptr);
FamilyHeader* header = reinterpret_cast<FamilyHeader*>(family);
if (header->m_actorCount > 0)
{
NVBLASTLL_LOG_ERROR(logFn, "Actor::create: input family is not empty.");
return nullptr;
}
const Asset& solverAsset = *static_cast<const Asset*>(header->m_asset);
const SupportGraph& graph = solverAsset.m_graph;
// Lower support chunk healths - initialize
float* lowerSupportChunkHealths = header->getLowerSupportChunkHealths();
if (desc->initialSupportChunkHealths != nullptr) // Health array given
{
const uint32_t* supportChunkIndices = graph.getChunkIndices();
for (uint32_t supportChunkNum = 0; supportChunkNum < graph.m_nodeCount; ++supportChunkNum)
{
const float initialHealth = desc->initialSupportChunkHealths[supportChunkNum];
for (Asset::DepthFirstIt i(solverAsset, supportChunkIndices[supportChunkNum]); (bool)i; ++i)
{
lowerSupportChunkHealths[solverAsset.getContiguousLowerSupportIndex((uint32_t)i)] = initialHealth;
}
}
}
else // Use uniform initialization
{
const uint32_t lowerSupportChunkCount = solverAsset.getLowerSupportChunkCount();
for (uint32_t i = 0; i < lowerSupportChunkCount; ++i)
{
lowerSupportChunkHealths[i] = desc->uniformInitialLowerSupportChunkHealth;
}
}
// Bond healths - initialize
const uint32_t bondCount = solverAsset.getBondCount();
float* bondHealths = header->getBondHealths();
if (desc->initialBondHealths != nullptr) // Health array given
{
memcpy(bondHealths, desc->initialBondHealths, bondCount * sizeof(float));
}
else // Use uniform initialization
{
for (uint32_t bondNum = 0; bondNum < bondCount; ++bondNum)
{
bondHealths[bondNum] = desc->uniformInitialBondHealth;
}
}
// Get first actor - NOTE: we don't send an event for this! May need to do so for consistency.
Actor* actor = header->borrowActor(0); // Using actor[0]
// Fill in actor fields
actor->m_firstGraphNodeIndex = 0;
actor->m_graphNodeCount = graph.m_nodeCount;
actor->m_leafChunkCount = solverAsset.m_leafChunkCount;
// Graph node index links - initialize to chain
uint32_t* graphNodeLinks = header->getGraphNodeIndexLinks();
for (uint32_t i = 0; i < graph.m_nodeCount - 1; ++i)
{
graphNodeLinks[i] = i + 1;
}
graphNodeLinks[graph.m_nodeCount - 1] = invalidIndex<uint32_t>();
// Update visible chunks (we assume that all chunks belong to one actor at the beginning)
actor->updateVisibleChunksFromGraphNodes();
// Initialize instance graph with this actor
header->getFamilyGraph()->initialize(actor->getIndex(), &graph);
// Call findIslands to set up the internal instance graph data
header->getFamilyGraph()->findIslands(actor->getIndex(), scratch, &graph);
return actor;
}
//////// Actor member methods ////////
uint32_t Actor::damageBond(uint32_t nodeIndex0, uint32_t nodeIndex1, float healthDamage)
{
const uint32_t bondIndex = getGraph()->findBond(nodeIndex0, nodeIndex1);
damageBond(nodeIndex0, nodeIndex1, bondIndex, healthDamage);
return bondIndex;
}
void Actor::damageBond(uint32_t nodeIndex0, uint32_t nodeIndex1, uint32_t bondIndex, float healthDamage)
{
if (bondIndex == invalidIndex<uint32_t>())
{
NVBLAST_ALWAYS_ASSERT();
return;
}
float* bondHealths = getBondHealths();
if (canTakeDamage(bondHealths[bondIndex]) && healthDamage > 0.0f)
{
// Subtract health
bondHealths[bondIndex] -= healthDamage;
// Was removed?
if (bondHealths[bondIndex] <= 0.0f)
{
// Notify graph that bond was removed
getFamilyGraph()->notifyEdgeRemoved(getIndex(), nodeIndex0, nodeIndex1, bondIndex, getGraph());
bondHealths[bondIndex] = 0.0f; // Doing this for single-actor serialization consistency; should not actually be necessary
}
}
}
uint32_t Actor::damageBond(const NvBlastBondFractureData& cmd)
{
NVBLAST_ASSERT(!isInvalidIndex(cmd.nodeIndex1));
return damageBond(cmd.nodeIndex0, cmd.nodeIndex1, cmd.health);
}
void Actor::generateFracture(NvBlastFractureBuffers* commandBuffers, const NvBlastDamageProgram& program, const void* programParams,
NvBlastLog logFn, NvBlastTimers* timers) const
{
NVBLASTLL_CHECK(commandBuffers != nullptr, logFn, "Actor::generateFracture: NULL commandBuffers pointer input.", return);
NVBLASTLL_CHECK(isValid(commandBuffers), logFn, "NvBlastActorGenerateFracture: commandBuffers memory is NULL but size is > 0.",
commandBuffers->bondFractureCount = 0; commandBuffers->chunkFractureCount = 0; return);
#if NVBLASTLL_CHECK_PARAMS
if (commandBuffers->bondFractureCount == 0 && commandBuffers->chunkFractureCount == 0)
{
NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorGenerateFracture: commandBuffers do not provide any space.");
return;
}
#endif
#if NV_PROFILE
Time time;
#else
NV_UNUSED(timers);
#endif
const SupportGraph* graph = getGraph();
const uint32_t graphNodeCount = getGraphNodeCount();
if (graphNodeCount > 1 && program.graphShaderFunction != nullptr)
{
const NvBlastGraphShaderActor shaderActor = {
getIndex(),
getGraphNodeCount(),
graph->m_nodeCount,
getFirstGraphNodeIndex(),
getGraphNodeIndexLinks(),
graph->getChunkIndices(),
graph->getAdjacencyPartition(),
graph->getAdjacentNodeIndices(),
graph->getAdjacentBondIndices(),
getBonds(),
getChunks(),
getBondHealths(),
getLowerSupportChunkHealths(),
getFamilyHeader()->getFamilyGraph()->getIslandIds()
};
program.graphShaderFunction(commandBuffers, &shaderActor, programParams);
}
else if (graphNodeCount <= 1 && program.subgraphShaderFunction != nullptr)
{
const NvBlastSubgraphShaderActor shaderActor = {
// The conditional (visible vs. support chunk) is needed because we allow single-child chunk chains
// This makes it possible that an actor with a single support chunk will have a different visible chunk (ancestor of the support chunk)
graphNodeCount == 1 ? graph->getChunkIndices()[getFirstGraphNodeIndex()] : getFirstVisibleChunkIndex(),
getChunks()
};
program.subgraphShaderFunction(commandBuffers, &shaderActor, programParams);
}
else
{
commandBuffers->bondFractureCount = 0;
commandBuffers->chunkFractureCount = 0;
}
#if NV_PROFILE
if (timers != nullptr)
{
timers->material += time.getElapsedTicks();
}
#endif
}
size_t Actor::splitRequiredScratch() const
{
// Scratch is reused, just need the max of these two values
return std::max(m_graphNodeCount * sizeof(uint32_t), static_cast<size_t>(FamilyGraph::findIslandsRequiredScratch(getGraph()->m_nodeCount)));
}
uint32_t Actor::split(NvBlastActorSplitEvent* result, uint32_t newActorsMaxCount, void* scratch, NvBlastLog logFn, NvBlastTimers* timers)
{
NVBLASTLL_CHECK(result != nullptr, logFn, "Actor::split: NULL result pointer input.", return 0);
NVBLASTLL_CHECK(newActorsMaxCount > 0 && result->newActors != nullptr, logFn, "NvBlastActorSplit: no space for results provided.", return 0);
NVBLASTLL_CHECK(scratch != nullptr, logFn, "Actor::split: NULL scratch pointer input.", return 0);
#if NV_PROFILE
Time time;
#else
NV_UNUSED(timers);
#endif
Actor** newActors = reinterpret_cast<Actor**>(result->newActors);
uint32_t actorsCount = 0;
if (getGraphNodeCount() <= 1)
{
uint32_t chunkHealthIndex = isSingleSupportChunk() ? getIndex() : getFirstVisibleChunkIndex() - getFirstSubsupportChunkIndex() + getGraph()->m_nodeCount;
float* chunkHealths = getLowerSupportChunkHealths();
if (chunkHealths[chunkHealthIndex] <= 0.0f)
{
actorsCount = partitionSingleLowerSupportChunk(newActors, newActorsMaxCount, logFn);
for (uint32_t i = 0; i < actorsCount; ++i)
{
Actor* newActor = newActors[i];
uint32_t firstVisible = newActor->getFirstVisibleChunkIndex();
uint32_t firstSub = newActor->getFirstSubsupportChunkIndex();
uint32_t nodeCount = newActor->getGraph()->m_nodeCount;
uint32_t newActorIndex = newActor->getIndex();
uint32_t healthIndex = newActor->isSubSupportChunk() ? firstVisible - firstSub + nodeCount : newActorIndex;
if (chunkHealths[healthIndex] <= 0.0f)
{
uint32_t brittleActors = newActors[i]->partitionSingleLowerSupportChunk(&newActors[actorsCount], newActorsMaxCount - actorsCount, logFn);
actorsCount += brittleActors;
if (brittleActors > 0)
{
actorsCount--;
newActors[i] = newActors[actorsCount];
i--;
}
}
}
}
#if NV_PROFILE
if (timers != nullptr)
{
timers->partition += time.getElapsedTicks();
}
#endif
}
else
{
findIslands(scratch);
#if NV_PROFILE
if (timers != nullptr)
{
timers->island += time.getElapsedTicks();
}
#endif
// Reuse scratch for node list
uint32_t* graphNodeIndexList = reinterpret_cast<uint32_t*>(scratch);
// Get the family header
FamilyHeader* header = getFamilyHeader();
NVBLAST_ASSERT(header != nullptr); // If m_actorEntryDataIndex is valid, this should be too
// Record nodes in this actor before splitting
const uint32_t* graphNodeIndexLinks = header->getGraphNodeIndexLinks(); // Get the links for the graph nodes
uint32_t graphNodeIndexCount = 0;
for (uint32_t graphNodeIndex = m_firstGraphNodeIndex; !isInvalidIndex(graphNodeIndex); graphNodeIndex = graphNodeIndexLinks[graphNodeIndex])
{
if (graphNodeIndexCount >= m_graphNodeCount)
{
// Safety, splitRequiredScratch() only guarantees m_graphNodeCount elements. In any case, this condition shouldn't happen.
NVBLAST_ASSERT(graphNodeIndexCount < m_graphNodeCount);
break;
}
graphNodeIndexList[graphNodeIndexCount++] = graphNodeIndex;
}
actorsCount = partitionMultipleGraphNodes(newActors, newActorsMaxCount, logFn);
if (actorsCount > 1)
{
#if NV_PROFILE
if (timers != nullptr)
{
timers->partition += time.getElapsedTicks();
}
#endif
// Get various pointers and values to iterate
const Asset* asset = getAsset();
Actor* actors = header->getActors();
IndexDLink<uint32_t>* visibleChunkIndexLinks = header->getVisibleChunkIndexLinks();
uint32_t* chunkActorIndices = header->getChunkActorIndices();
const SupportGraph& graph = asset->m_graph;
const uint32_t* graphChunkIndices = graph.getChunkIndices();
const NvBlastChunk* chunks = asset->getChunks();
const uint32_t upperSupportChunkCount = asset->getUpperSupportChunkCount();
const uint32_t* familyGraphIslandIDs = header->getFamilyGraph()->getIslandIds();
// Iterate over all graph nodes and update visible chunk lists
for (uint32_t graphNodeNum = 0; graphNodeNum < graphNodeIndexCount; ++graphNodeNum)
{
const uint32_t graphNodeIndex = graphNodeIndexList[graphNodeNum];
const uint32_t supportChunkIndex = graphChunkIndices[graphNodeIndex];
if (!isInvalidIndex(supportChunkIndex)) // Invalid if this is the world chunk
{
updateVisibleChunksFromSupportChunk<Actor>(actors, visibleChunkIndexLinks, chunkActorIndices, familyGraphIslandIDs[graphNodeIndex], graphChunkIndices[graphNodeIndex], chunks, upperSupportChunkCount);
}
}
// Remove actors with no visible chunks - this can happen if we've split such that the world node is by itself
uint32_t actualActorsCount = 0;
for (uint32_t i = 0; i < actorsCount; ++i)
{
newActors[actualActorsCount] = newActors[i];
if (newActors[actualActorsCount]->getVisibleChunkCount() > 0)
{
++actualActorsCount;
}
else
{
header->returnActor(*newActors[actualActorsCount]);
}
}
actorsCount = actualActorsCount;
#if NV_PROFILE
if (timers != nullptr)
{
timers->visibility += time.getElapsedTicks();
}
#endif
// NOTE: we MUST use header->getLowerSupportChunkHealths() instead of just getLowerSupportChunkHealths() here,
// since this actor has been made inactive at this point. Therefore Actor::getLowerSupportChunkHealths() will return
// garbage since it calls getFamilyHeader() which does not return a valid header if the actor is not active.
const float* chunkHealths = header->getLowerSupportChunkHealths();
for (uint32_t i = 0; i < actorsCount; ++i)
{
Actor* newActor = newActors[i];
if (newActor->getGraphNodeCount() <= 1)
{
const uint32_t firstVisible = newActor->getFirstVisibleChunkIndex();
const uint32_t firstSub = newActor->getFirstSubsupportChunkIndex();
const uint32_t assetNodeCount = newActor->getGraph()->m_nodeCount;
const uint32_t newActorIndex = newActor->getIndex();
const uint32_t healthIndex = newActor->isSubSupportChunk() ? firstVisible - firstSub + assetNodeCount : newActorIndex;
// this relies on visibility updated, subsupport actors only have m_firstVisibleChunkIndex to identify the chunk
if (chunkHealths[healthIndex] <= 0.0f)
{
const uint32_t brittleActors = newActor->partitionSingleLowerSupportChunk(&newActors[actorsCount], newActorsMaxCount - actorsCount, logFn);
actorsCount += brittleActors;
if (brittleActors > 0)
{
actorsCount--;
newActors[i] = newActors[actorsCount];
i--;
}
}
}
}
#if NV_PROFILE
if (timers != nullptr)
{
timers->partition += time.getElapsedTicks();
}
#endif
}
else
{
actorsCount = 0;
}
}
result->deletedActor = actorsCount == 0 ? nullptr : this;
return actorsCount;
}
uint32_t Actor::findIslands(void* scratch)
{
return getFamilyHeader()->getFamilyGraph()->findIslands(getIndex(), scratch, &getAsset()->m_graph);
}
uint32_t Actor::partitionMultipleGraphNodes(Actor** newActors, uint32_t newActorsSize, NvBlastLog logFn)
{
NVBLAST_ASSERT(newActorsSize == 0 || newActors != nullptr);
// Check for single subsupport chunk, no partitioning
if (m_graphNodeCount <= 1)
{
NVBLASTLL_LOG_WARNING(logFn, "Nv::Blast::Actor::partitionMultipleGraphNodes: actor is a single lower-support chunk, and cannot be partitioned by this function.");
return 0;
}
FamilyHeader* header = getFamilyHeader();
NVBLAST_ASSERT(header != nullptr); // If m_actorEntryDataIndex is valid, this should be too
// Get the links for the graph nodes
uint32_t* graphNodeIndexLinks = header->getGraphNodeIndexLinks();
// Get the graph chunk indices and leaf chunk counts
const Asset* asset = getAsset();
const uint32_t* graphChunkIndices = asset->m_graph.getChunkIndices();
const uint32_t* subtreeLeafChunkCounts = asset->getSubtreeLeafChunkCounts();
// Distribute graph nodes to new actors
uint32_t newActorCount = 0;
const uint32_t thisActorIndex = getIndex();
m_leafChunkCount = 0;
const uint32_t* islandIDs = header->getFamilyGraph()->getIslandIds();
uint32_t lastGraphNodeIndex = invalidIndex<uint32_t>();
uint32_t nextGraphNodeIndex = invalidIndex<uint32_t>();
bool overflow = false;
for (uint32_t graphNodeIndex = m_firstGraphNodeIndex; !isInvalidIndex(graphNodeIndex); graphNodeIndex = nextGraphNodeIndex)
{
nextGraphNodeIndex = graphNodeIndexLinks[graphNodeIndex];
const uint32_t islandID = islandIDs[graphNodeIndex];
if (islandID == thisActorIndex)
{
const uint32_t graphChunkIndex = graphChunkIndices[graphNodeIndex];
if (!isInvalidIndex(graphChunkIndex)) // Invalid if this is the world chunk
{
m_leafChunkCount += subtreeLeafChunkCounts[graphChunkIndex];
}
lastGraphNodeIndex = graphNodeIndex;
continue; // Leave the chunk in this actor
}
// Remove link from this actor
if (isInvalidIndex(lastGraphNodeIndex))
{
m_firstGraphNodeIndex = nextGraphNodeIndex;
}
else
{
graphNodeIndexLinks[lastGraphNodeIndex] = nextGraphNodeIndex;
}
graphNodeIndexLinks[graphNodeIndex] = invalidIndex<uint32_t>();
--m_graphNodeCount;
// See if the chunk had been removed
if (islandID == invalidIndex<uint32_t>())
{
continue;
}
// Get new actor if the islandID is valid
Actor* newActor = header->borrowActor(islandID);
// Check new actor to see if we're adding the first chunk
if (isInvalidIndex(newActor->m_firstGraphNodeIndex))
{
// See if we can fit it in the output list
if (newActorCount < newActorsSize)
{
newActors[newActorCount++] = newActor;
}
else
{
overflow = true;
}
}
// Put link in new actor
graphNodeIndexLinks[graphNodeIndex] = newActor->m_firstGraphNodeIndex;
newActor->m_firstGraphNodeIndex = graphNodeIndex;
++newActor->m_graphNodeCount;
// Add to the actor's leaf chunk count
const uint32_t graphChunkIndex = graphChunkIndices[graphNodeIndex];
if (!isInvalidIndex(graphChunkIndex)) // Invalid if this is the world chunk
{
newActor->m_leafChunkCount += subtreeLeafChunkCounts[graphChunkIndex];
}
}
if (m_graphNodeCount > 0)
{
// There are still chunks in this actor. See if we can fit this in the output list.
if (newActorCount < newActorsSize)
{
newActors[newActorCount++] = this;
}
else
{
overflow = true;
}
}
else
{
// No more chunks; release this actor.
release();
}
if (overflow)
{
NVBLASTLL_LOG_WARNING(logFn, "Nv::Blast::Actor::partitionMultipleGraphNodes: input newActors array could not hold all actors generated.");
}
return newActorCount;
}
uint32_t Actor::partitionSingleLowerSupportChunk(Actor** newActors, uint32_t newActorsSize, NvBlastLog logFn)
{
NVBLAST_ASSERT(newActorsSize == 0 || newActors != nullptr);
// Ensure this is a single subsupport chunk, no partitioning
if (m_graphNodeCount > 1)
{
NVBLASTLL_LOG_WARNING(logFn, "Nv::Blast::Actor::partitionSingleLowerSupportChunk: actor is not a single lower-support chunk, and cannot be partitioned by this function.");
return 0;
}
FamilyHeader* header = getFamilyHeader();
// The conditional (visible vs. support chunk) is needed because we allow single-child chunk chains
// This makes it possible that an actor with a single support chunk will have a different visible chunk (ancestor of the support chunk)
const uint32_t chunkIndex = m_graphNodeCount == 0 ? m_firstVisibleChunkIndex : getGraph()->getChunkIndices()[m_firstGraphNodeIndex];
if (isInvalidIndex(chunkIndex))
{
return 0; // This actor has no chunks; only a graph node representing the world
}
NVBLAST_ASSERT(isInvalidIndex(header->getVisibleChunkIndexLinks()[chunkIndex].m_adj[1]));
const NvBlastChunk& chunk = header->m_asset->getChunks()[chunkIndex];
uint32_t childCount = chunk.childIndexStop - chunk.firstChildIndex;
// Warn if we cannot fit all child chunks in the output list
if (childCount > newActorsSize)
{
NVBLASTLL_LOG_WARNING(logFn, "Nv::Blast::Actor::partitionSingleLowerSupportChunk: input newActors array will not hold all actors generated.");
childCount = newActorsSize;
}
// Return if no chunks will be created.
if (childCount == 0)
{
return 0;
}
// Activate a new actor for every child chunk
const Asset* asset = getAsset();
const NvBlastChunk* chunks = asset->getChunks();
const uint32_t firstChildIndex = chunks[chunkIndex].firstChildIndex;
for (uint32_t i = 0; i < childCount; ++i)
{
const uint32_t childIndex = firstChildIndex + i;
NVBLAST_ASSERT(childIndex >= asset->m_firstSubsupportChunkIndex);
const uint32_t actorIndex = asset->m_graph.m_nodeCount + (childIndex - asset->m_firstSubsupportChunkIndex);
NVBLAST_ASSERT(!header->isActorActive(actorIndex));
newActors[i] = header->borrowActor(actorIndex);
newActors[i]->m_firstVisibleChunkIndex = childIndex;
newActors[i]->m_visibleChunkCount = 1;
newActors[i]->m_leafChunkCount = asset->getSubtreeLeafChunkCounts()[childIndex];
}
// Release this actor
release();
return childCount;
}
void Actor::updateVisibleChunksFromGraphNodes()
{
// Only apply this to upper-support chunk actors
if (m_graphNodeCount == 0)
{
return;
}
const Asset* asset = getAsset();
const uint32_t thisActorIndex = getIndex();
// Get various arrays
FamilyHeader* header = getFamilyHeader();
Actor* actors = header->getActors();
IndexDLink<uint32_t>* visibleChunkIndexLinks = header->getVisibleChunkIndexLinks();
uint32_t* chunkActorIndices = header->getChunkActorIndices();
const SupportGraph& graph = asset->m_graph;
const uint32_t* graphChunkIndices = graph.getChunkIndices();
const NvBlastChunk* chunks = asset->getChunks();
const uint32_t upperSupportChunkCount = asset->getUpperSupportChunkCount();
// Iterate over all graph nodes and update visible chunk list
const uint32_t* graphNodeIndexLinks = header->getGraphNodeIndexLinks();
for (uint32_t graphNodeIndex = m_firstGraphNodeIndex; !isInvalidIndex(graphNodeIndex); graphNodeIndex = graphNodeIndexLinks[graphNodeIndex])
{
const uint32_t supportChunkIndex = graphChunkIndices[graphNodeIndex];
if (!isInvalidIndex(supportChunkIndex)) // Invalid if this is the world chunk
{
updateVisibleChunksFromSupportChunk<Actor>(actors, visibleChunkIndexLinks, chunkActorIndices, thisActorIndex, graphChunkIndices[graphNodeIndex], chunks, upperSupportChunkCount);
}
}
}
} // namespace Blast
} // namespace Nv
// API implementation
extern "C"
{
NvBlastActor* NvBlastFamilyCreateFirstActor(NvBlastFamily* family, const NvBlastActorDesc* desc, void* scratch, NvBlastLog logFn)
{
NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyCreateFirstActor: NULL family input.", return nullptr);
NVBLASTLL_CHECK(desc != nullptr, logFn, "NvBlastFamilyCreateFirstActor: NULL desc input.", return nullptr);
NVBLASTLL_CHECK(scratch != nullptr, logFn, "NvBlastFamilyCreateFirstActor: NULL scratch input.", return nullptr);
return Nv::Blast::Actor::create(family, desc, scratch, logFn);
}
size_t NvBlastFamilyGetRequiredScratchForCreateFirstActor(const NvBlastFamily* family, NvBlastLog logFn)
{
NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetRequiredScratchForCreateFirstActor: NULL family input.", return 0);
NVBLASTLL_CHECK(reinterpret_cast<const Nv::Blast::FamilyHeader*>(family)->m_asset != nullptr,
logFn, "NvBlastFamilyGetRequiredScratchForCreateFirstActor: family has NULL asset.", return 0);
return Nv::Blast::Actor::createRequiredScratch(family, logFn);
}
bool NvBlastActorDeactivate(NvBlastActor* actor, NvBlastLog logFn)
{
NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorDeactivate: NULL actor input.", return false);
Nv::Blast::Actor& a = *static_cast<Nv::Blast::Actor*>(actor);
if (!a.isActive())
{
NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorDeactivate: inactive actor input.");
}
return a.release();
}
uint32_t NvBlastActorGetVisibleChunkCount(const NvBlastActor* actor, NvBlastLog logFn)
{
NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetVisibleChunkCount: NULL actor input.", return 0);
const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
if (!a.isActive())
{
NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetVisibleChunkCount: inactive actor input.");
return 0;
}
return a.getVisibleChunkCount();
}
uint32_t NvBlastActorGetVisibleChunkIndices(uint32_t* visibleChunkIndices, uint32_t visibleChunkIndicesSize, const NvBlastActor* actor, NvBlastLog logFn)
{
NVBLASTLL_CHECK(visibleChunkIndices != nullptr, logFn, "NvBlastActorGetVisibleChunkIndices: NULL visibleChunkIndices pointer input.", return 0);
NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetVisibleChunkIndices: NULL actor pointer input.", return 0);
const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
if (!a.isActive())
{
NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetVisibleChunkIndices: inactive actor pointer input.");
return 0;
}
// Iterate through visible chunk list and write to supplied array
uint32_t indexCount = 0;
for (Nv::Blast::Actor::VisibleChunkIt i = a; indexCount < visibleChunkIndicesSize && (bool)i; ++i)
{
visibleChunkIndices[indexCount++] = (uint32_t)i;
}
return indexCount;
}
uint32_t NvBlastActorGetGraphNodeCount(const NvBlastActor* actor, NvBlastLog logFn)
{
NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetGraphNodeCount: NULL actor pointer input.", return 0);
const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
if (!a.isActive())
{
NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetGraphNodeCount: inactive actor pointer input.");
return 0;
}
return a.getGraphNodeCount();
}
uint32_t NvBlastActorGetGraphNodeIndices(uint32_t* graphNodeIndices, uint32_t graphNodeIndicesSize, const NvBlastActor* actor, NvBlastLog logFn)
{
NVBLASTLL_CHECK(graphNodeIndices != nullptr, logFn, "NvBlastActorGetGraphNodeIndices: NULL graphNodeIndices pointer input.", return 0);
NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetGraphNodeIndices: NULL actor pointer input.", return 0);
const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
if (!a.isActive())
{
NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetGraphNodeIndices: inactive actor pointer input.");
return 0;
}
// Iterate through graph node list and write to supplied array
const uint32_t* graphChunkIndices = a.getAsset()->m_graph.getChunkIndices();
uint32_t indexCount = 0;
for (Nv::Blast::Actor::GraphNodeIt i = a; indexCount < graphNodeIndicesSize && (bool)i; ++i)
{
const uint32_t graphNodeIndex = (uint32_t)i;
if (!Nv::Blast::isInvalidIndex(graphChunkIndices[graphNodeIndex]))
{
graphNodeIndices[indexCount++] = graphNodeIndex;
}
}
return indexCount;
}
const float* NvBlastActorGetBondHealths(const NvBlastActor* actor, NvBlastLog logFn)
{
NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetBondHealths: NULL actor pointer input.", return nullptr);
const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
if (!a.isActive())
{
NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetBondHealths: inactive actor pointer input.");
return nullptr;
}
return a.getFamilyHeader()->getBondHealths();
}
const float* NvBlastActorGetCachedBondHeaths(const NvBlastActor* actor, NvBlastLog logFn)
{
NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetCachedBondHeaths: NULL actor pointer input.", return nullptr);
const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
if (!a.isActive())
{
NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetCachedBondHeaths: inactive actor pointer input.");
return nullptr;
}
return a.getFamilyHeader()->getCachedBondHealths();
}
bool NvBlastActorCacheBondHeath(const NvBlastActor* actor, uint32_t bondIndex, NvBlastLog logFn)
{
NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorCacheBondHeath: NULL actor pointer input.", return nullptr);
const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
if (!a.isActive())
{
NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorCacheBondHeath: inactive actor pointer input.");
return false;
}
// copy the value over from the current bond health
Nv::Blast::FamilyHeader* familyHeader = a.getFamilyHeader();
const float curHealth = familyHeader->getBondHealths()[bondIndex];
familyHeader->getCachedBondHealths()[bondIndex] = curHealth;
return true;
}
NvBlastFamily* NvBlastActorGetFamily(const NvBlastActor* actor, NvBlastLog logFn)
{
NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetFamily: NULL actor pointer input.", return nullptr);
const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
if (!a.isActive())
{
NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetFamily: inactive actor pointer input.");
return nullptr;
}
return reinterpret_cast<NvBlastFamily*>(a.getFamilyHeader());
}
uint32_t NvBlastActorGetIndex(const NvBlastActor* actor, NvBlastLog logFn)
{
NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetIndex: NULL actor pointer input.", return Nv::Blast::invalidIndex<uint32_t>());
const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
if (!a.isActive())
{
NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetIndex: actor is not active.");
return Nv::Blast::invalidIndex<uint32_t>();
}
return a.getIndex();
}
void NvBlastActorGenerateFracture
(
NvBlastFractureBuffers* commandBuffers,
const NvBlastActor* actor,
const NvBlastDamageProgram program,
const void* programParams,
NvBlastLog logFn,
NvBlastTimers* timers
)
{
NVBLASTLL_CHECK(commandBuffers != nullptr, logFn, "NvBlastActorGenerateFracture: NULL commandBuffers pointer input.", return);
NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGenerateFracture: NULL actor pointer input.", return);
const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
if (!a.isActive())
{
NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGenerateFracture: actor is not active.");
commandBuffers->bondFractureCount = 0;
commandBuffers->chunkFractureCount = 0;
return;
}
a.generateFracture(commandBuffers, program, programParams, logFn, timers);
}
void NvBlastActorApplyFracture
(
NvBlastFractureBuffers* eventBuffers,
NvBlastActor* actor,
const NvBlastFractureBuffers* commands,
NvBlastLog logFn,
NvBlastTimers* timers
)
{
NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorApplyFracture: NULL actor pointer input.", return);
NVBLASTLL_CHECK(commands != nullptr, logFn, "NvBlastActorApplyFracture: NULL commands pointer input.", return);
NVBLASTLL_CHECK(Nv::Blast::isValid(commands), logFn, "NvBlastActorApplyFracture: commands memory is NULL but size is > 0.", return);
Nv::Blast::Actor& a = *static_cast<Nv::Blast::Actor*>(actor);
if (!a.isActive())
{
NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorApplyFracture: actor is not active.");
if (eventBuffers != nullptr)
{
eventBuffers->bondFractureCount = 0;
eventBuffers->chunkFractureCount = 0;
}
return;
}
a.getFamilyHeader()->applyFracture(eventBuffers, commands, &a, logFn, timers);
}
size_t NvBlastActorGetRequiredScratchForSplit(const NvBlastActor* actor, NvBlastLog logFn)
{
NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetRequiredScratchForSplit: NULL actor input.", return 0);
const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
if (!a.isActive())
{
NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetRequiredScratchForSplit: actor is not active.");
return 0;
}
return a.splitRequiredScratch();
}
uint32_t NvBlastActorGetMaxActorCountForSplit(const NvBlastActor* actor, NvBlastLog logFn)
{
NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetMaxActorCountForSplit: NULL actor input.", return 0);
const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
if (!a.isActive())
{
NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetMaxActorCountForSplit: actor is not active.");
return 0;
}
return a.getLeafChunkCount() + 1; // GWD-167 workaround (+1)
}
uint32_t NvBlastActorSplit
(
NvBlastActorSplitEvent* result,
NvBlastActor* actor,
uint32_t newActorsMaxCount,
void* scratch,
NvBlastLog logFn,
NvBlastTimers* timers
)
{
NVBLASTLL_CHECK(result != nullptr, logFn, "NvBlastActorSplit: NULL result pointer input.", return 0);
NVBLASTLL_CHECK(newActorsMaxCount > 0 && result->newActors != nullptr, logFn, "NvBlastActorSplit: no space for results provided.", return 0);
NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorSplit: NULL actor pointer input.", return 0);
NVBLASTLL_CHECK(scratch != nullptr, logFn, "NvBlastActorSplit: NULL scratch pointer input.", return 0);
Nv::Blast::Actor& a = *static_cast<Nv::Blast::Actor*>(actor);
if (!a.isActive())
{
NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetIndex: actor is not active.");
return 0;
}
return a.split(result, newActorsMaxCount, scratch, logFn, timers);
}
bool NvBlastActorCanFracture(const NvBlastActor* actor, NvBlastLog logFn)
{
NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorCanFracture: NULL actor input.", return false);
const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
if (!a.isActive())
{
NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorCanFracture: actor is not active.");
return false;
}
bool canFracture = true;
uint32_t graphNodeCount = a.getGraphNodeCount();
if (graphNodeCount < 2)
{
uint32_t chunkHealthIndex = graphNodeCount == 0 ?
a.getFirstVisibleChunkIndex() - a.getFirstSubsupportChunkIndex() + a.getGraph()->m_nodeCount :
a.getFirstGraphNodeIndex();
canFracture = (a.getLowerSupportChunkHealths()[chunkHealthIndex] > 0.0f);
}
return canFracture;
}
bool NvBlastActorHasExternalBonds(const NvBlastActor* actor, NvBlastLog logFn)
{
NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorHasExternalBonds: NULL actor input.", return false);
return static_cast<const Nv::Blast::Actor*>(actor)->hasExternalBonds();
}
bool NvBlastActorIsSplitRequired(const NvBlastActor* actor, NvBlastLog logFn)
{
NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorIsSplitRequired: NULL actor input.", return false);
const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor);
if (!a.isActive())
{
NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorIsSplitRequired: actor is not active.");
return false;
}
return a.isSplitRequired();
}
} // extern "C"
| 39,574 | C++ | 36.37016 | 219 | 0.667307 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastAssetHelper.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "NvBlastAsset.h"
#include "NvBlastIndexFns.h"
#include "NvBlastAssert.h"
#include "NvBlastMemory.h"
#include "NvBlastMath.h"
#include "NvBlastPreprocessorInternal.h"
#include <algorithm>
namespace Nv
{
namespace Blast
{
/**
Class to hold chunk descriptor and annotation context for sorting a list of indices
*/
class ChunksOrdered
{
public:
ChunksOrdered(const NvBlastChunkDesc* descs, const char* annotation)
: m_descs(descs), m_annotation(annotation), m_chunkMap(nullptr), m_chunkInvMap(nullptr) {}
// Map and inverse to apply to chunk descs
bool setMap(const uint32_t* map, const uint32_t* inv)
{
if ((map == nullptr) != (inv == nullptr))
{
return false;
}
m_chunkMap = map;
m_chunkInvMap = inv;
return true;
}
bool operator () (uint32_t ii0, uint32_t ii1) const
{
const uint32_t i0 = m_chunkMap ? m_chunkMap[ii0] : ii0;
const uint32_t i1 = m_chunkMap ? m_chunkMap[ii1] : ii1;
const bool upperSupport0 = (m_annotation[i0] & Asset::ChunkAnnotation::UpperSupport) != 0;
const bool upperSupport1 = (m_annotation[i1] & Asset::ChunkAnnotation::UpperSupport) != 0;
if (upperSupport0 != upperSupport1)
{
return upperSupport0; // If one is uppersupport and one is subsupport, uppersupport should come first
}
const uint32_t p0 = m_descs[i0].parentChunkDescIndex;
const uint32_t p1 = m_descs[i1].parentChunkDescIndex;
// Parent chunk index (+1 so that UINT32_MAX becomes the lowest value)
const uint32_t pp0 = 1 + (m_chunkInvMap && !isInvalidIndex(p0) ? m_chunkInvMap[p0] : p0);
const uint32_t pp1 = 1 + (m_chunkInvMap && !isInvalidIndex(p1) ? m_chunkInvMap[p1] : p1);
return pp0 < pp1; // With the same support relationship, order by parent index
}
private:
const NvBlastChunkDesc* m_descs;
const char* m_annotation;
const uint32_t* m_chunkMap;
const uint32_t* m_chunkInvMap;
};
} // namespace Blast
} // namespace Nv
using namespace Nv::Blast;
extern "C"
{
bool NvBlastBuildAssetDescChunkReorderMap(uint32_t* chunkReorderMap, const NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, void* scratch, NvBlastLog logFn)
{
NVBLASTLL_CHECK(chunkCount == 0 || chunkDescs != nullptr, logFn, "NvBlastBuildAssetDescChunkReorderMap: NULL chunkDescs input with non-zero chunkCount", return false);
NVBLASTLL_CHECK(chunkReorderMap == nullptr || chunkCount != 0, logFn, "NvBlastBuildAssetDescChunkReorderMap: NULL chunkReorderMap input with non-zero chunkCount", return false);
NVBLASTLL_CHECK(chunkCount == 0 || scratch != nullptr, logFn, "NvBlastBuildAssetDescChunkReorderMap: NULL scratch input with non-zero chunkCount", return false);
uint32_t* composedMap = static_cast<uint32_t*>(scratch); scratch = pointerOffset(scratch, chunkCount * sizeof(uint32_t));
uint32_t* chunkMap = static_cast<uint32_t*>(scratch); scratch = pointerOffset(scratch, chunkCount * sizeof(uint32_t));
char* chunkAnnotation = static_cast<char*>(scratch); scratch = pointerOffset(scratch, chunkCount * sizeof(char));
uint32_t supportChunkCount;
uint32_t leafChunkCount;
if (!Asset::ensureExactSupportCoverage(supportChunkCount, leafChunkCount, chunkAnnotation, chunkCount, const_cast<NvBlastChunkDesc*>(chunkDescs), true, logFn))
{
NVBLASTLL_LOG_ERROR(logFn, "NvBlastBuildAssetDescChunkReorderMap: chunk descriptors did not have exact coverage, map could not be built. Use NvBlastEnsureAssetExactSupportCoverage to fix descriptors.");
return false;
}
// Initialize composedMap and its inverse to identity
for (uint32_t i = 0; i < chunkCount; ++i)
{
composedMap[i] = i;
chunkReorderMap[i] = i;
}
// Create a chunk ordering operator using the composedMap
ChunksOrdered chunksOrdered(chunkDescs, chunkAnnotation);
chunksOrdered.setMap(composedMap, chunkReorderMap);
// Check initial order
bool ordered = true;
if (chunkCount > 1)
{
for (uint32_t i = chunkCount - 1; ordered && i--;)
{
ordered = !chunksOrdered(i + 1, i);
}
}
if (ordered)
{
return true; // Initially ordered, return true
}
NVBLAST_ASSERT(chunkCount > 1);
// Max depth is bounded by chunkCount, so that is the vound on the number of iterations
uint32_t iter = chunkCount;
do
{
// Reorder based on current composed map
for (uint32_t i = 0; i < chunkCount; ++i)
{
chunkMap[i] = i;
}
std::stable_sort(chunkMap, chunkMap + chunkCount, chunksOrdered);
// Fold chunkMap into composedMap
for (uint32_t i = 0; i < chunkCount; ++i)
{
chunkMap[i] = composedMap[chunkMap[i]];
}
for (uint32_t i = 0; i < chunkCount; ++i)
{
composedMap[i] = chunkMap[i];
chunkMap[i] = i;
}
invertMap(chunkReorderMap, composedMap, chunkCount);
// Check order
ordered = true;
for (uint32_t i = chunkCount - 1; ordered && i--;)
{
ordered = !chunksOrdered(i + 1, i);
}
} while (!ordered && iter--);
NVBLAST_ASSERT(ordered);
return false;
}
void NvBlastApplyAssetDescChunkReorderMap
(
NvBlastChunkDesc* reorderedChunkDescs,
const NvBlastChunkDesc* chunkDescs,
uint32_t chunkCount,
NvBlastBondDesc* bondDescs,
uint32_t bondCount,
const uint32_t* chunkReorderMap,
bool keepBondNormalChunkOrder,
NvBlastLog logFn
)
{
NVBLASTLL_CHECK(chunkCount == 0 || chunkDescs != nullptr, logFn, "NvBlastApplyAssetDescChunkReorderMap: NULL chunkDescs input with non-zero chunkCount", return);
NVBLASTLL_CHECK(reorderedChunkDescs == nullptr || chunkCount != 0, logFn, "NvBlastApplyAssetDescChunkReorderMap: NULL reorderedChunkDescs input with non-zero chunkCount", return);
NVBLASTLL_CHECK(chunkReorderMap == nullptr || chunkCount != 0, logFn, "NvBlastApplyAssetDescChunkReorderMap: NULL chunkReorderMap input with non-zero chunkCount", return);
NVBLASTLL_CHECK(bondCount == 0 || bondDescs != nullptr, logFn, "NvBlastApplyAssetDescChunkReorderMap: NULL bondDescs input with non-zero bondCount", return);
NVBLASTLL_CHECK(bondDescs == nullptr || chunkReorderMap != nullptr, logFn, "NvBlastApplyAssetDescChunkReorderMap: NULL bondDescs input with NULL chunkReorderMap", return);
// Copy chunk descs
if (reorderedChunkDescs)
{
for (uint32_t i = 0; i < chunkCount; ++i)
{
reorderedChunkDescs[chunkReorderMap[i]] = chunkDescs[i];
uint32_t& parentIndex = reorderedChunkDescs[chunkReorderMap[i]].parentChunkDescIndex;
if (parentIndex < chunkCount)
{
parentIndex = chunkReorderMap[parentIndex]; // If the parent index is valid, remap it too to reflect the new order
}
}
}
if (bondDescs)
{
for (uint32_t i = 0; i < bondCount; ++i)
{
NvBlastBondDesc& bondDesc = bondDescs[i];
uint32_t& index0 = bondDesc.chunkIndices[0];
uint32_t& index1 = bondDesc.chunkIndices[1];
const uint32_t newIndex0 = index0 < chunkCount ? chunkReorderMap[index0] : index0;
const uint32_t newIndex1 = index1 < chunkCount ? chunkReorderMap[index1] : index1;
if (keepBondNormalChunkOrder && (index0 < index1) != (newIndex0 < newIndex1))
{
VecMath::mul(bondDesc.bond.normal, -1);
}
index0 = newIndex0;
index1 = newIndex1;
}
}
}
void NvBlastApplyAssetDescChunkReorderMapInPlace
(
NvBlastChunkDesc* chunkDescs,
uint32_t chunkCount,
NvBlastBondDesc* bondDescs,
uint32_t bondCount,
const uint32_t* chunkReorderMap,
bool keepBondNormalChunkOrder,
void* scratch,
NvBlastLog logFn
)
{
NVBLASTLL_CHECK(chunkCount == 0 || chunkDescs != nullptr, logFn, "NvBlastApplyAssetDescChunkReorderMapInPlace: NULL chunkDescs input with non-zero chunkCount", return);
NVBLASTLL_CHECK(chunkCount == 0 || scratch != nullptr, logFn, "NvBlastApplyAssetDescChunkReorderMapInPlace: NULL scratch input with non-zero chunkCount", return);
NvBlastChunkDesc* chunksTemp = static_cast<NvBlastChunkDesc*>(scratch);
memcpy(chunksTemp, chunkDescs, sizeof(NvBlastChunkDesc) * chunkCount);
NvBlastApplyAssetDescChunkReorderMap(chunkDescs, chunksTemp, chunkCount, bondDescs, bondCount, chunkReorderMap, keepBondNormalChunkOrder, logFn);
}
bool NvBlastReorderAssetDescChunks
(
NvBlastChunkDesc* chunkDescs,
uint32_t chunkCount,
NvBlastBondDesc* bondDescs,
uint32_t bondCount,
uint32_t* chunkReorderMap,
bool keepBondNormalChunkOrder,
void* scratch,
NvBlastLog logFn
)
{
if (!NvBlastBuildAssetDescChunkReorderMap(chunkReorderMap, chunkDescs, chunkCount, scratch, logFn))
{
NvBlastApplyAssetDescChunkReorderMapInPlace(chunkDescs, chunkCount, bondDescs, bondCount, chunkReorderMap, keepBondNormalChunkOrder, scratch, logFn);
return false;
}
return true;
}
bool NvBlastEnsureAssetExactSupportCoverage(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, void* scratch, NvBlastLog logFn)
{
NVBLASTLL_CHECK(chunkCount == 0 || chunkDescs != nullptr, logFn, "NvBlastEnsureAssetExactSupportCoverage: NULL chunkDescs input with non-zero chunkCount", return false);
NVBLASTLL_CHECK(chunkCount == 0 || scratch != nullptr, logFn, "NvBlastEnsureAssetExactSupportCoverage: NULL scratch input with non-zero chunkCount", return false);
uint32_t supportChunkCount;
uint32_t leafChunkCount;
return Asset::ensureExactSupportCoverage(supportChunkCount, leafChunkCount, static_cast<char*>(scratch), chunkCount, chunkDescs, false, logFn);
}
} // extern "C"
| 11,513 | C++ | 38.703448 | 211 | 0.689916 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastAsset.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "NvBlastAssert.h"
#include "NvBlastAsset.h"
#include "NvBlastActor.h"
#include "NvBlastMath.h"
#include "NvBlastPreprocessorInternal.h"
#include "NvBlastIndexFns.h"
#include "NvBlastActorSerializationBlock.h"
#include "NvBlastMemory.h"
#include <algorithm>
//#include <random>
namespace Nv
{
namespace Blast
{
//////// Local helper functions ////////
/**
Helper function to validate the input parameters for NvBlastCreateAsset. See NvBlastCreateAsset for parameter definitions.
*/
static bool solverAssetBuildValidateInput(void* mem, const NvBlastAssetDesc* desc, void* scratch, NvBlastLog logFn)
{
if (mem == nullptr)
{
NVBLASTLL_LOG_ERROR(logFn, "AssetBuildValidateInput: NULL mem pointer input.");
return false;
}
if (desc == nullptr)
{
NVBLASTLL_LOG_ERROR(logFn, "AssetBuildValidateInput: NULL desc pointer input.");
return false;
}
if (desc->chunkCount == 0)
{
NVBLASTLL_LOG_ERROR(logFn, "AssetBuildValidateInput: Zero chunk count not allowed.");
return false;
}
if (desc->chunkDescs == nullptr)
{
NVBLASTLL_LOG_ERROR(logFn, "AssetBuildValidateInput: NULL chunkDescs pointer input.");
return false;
}
if (desc->bondCount != 0 && desc->bondDescs == nullptr)
{
NVBLASTLL_LOG_ERROR(logFn, "AssetBuildValidateInput: bondCount non-zero but NULL bondDescs pointer input.");
return false;
}
if (scratch == nullptr)
{
NVBLASTLL_LOG_ERROR(logFn, "AssetBuildValidateInput: NULL scratch pointer input.");
return false;
}
return true;
}
struct AssetDataOffsets
{
size_t m_chunks;
size_t m_bonds;
size_t m_subtreeLeafChunkCounts;
size_t m_supportChunkIndices;
size_t m_chunkToGraphNodeMap;
size_t m_graphAdjacencyPartition;
size_t m_graphAdjacentNodeIndices;
size_t m_graphAdjacentBondIndices;
};
static size_t createAssetDataOffsets(AssetDataOffsets& offsets, uint32_t chunkCount, uint32_t graphNodeCount, uint32_t bondCount)
{
NvBlastCreateOffsetStart(sizeof(Asset));
NvBlastCreateOffsetAlign16(offsets.m_chunks, chunkCount * sizeof(NvBlastChunk));
NvBlastCreateOffsetAlign16(offsets.m_bonds, bondCount * sizeof(NvBlastBond));
NvBlastCreateOffsetAlign16(offsets.m_subtreeLeafChunkCounts, chunkCount * sizeof(uint32_t));
NvBlastCreateOffsetAlign16(offsets.m_supportChunkIndices, graphNodeCount * sizeof(uint32_t));
NvBlastCreateOffsetAlign16(offsets.m_chunkToGraphNodeMap, chunkCount * sizeof(uint32_t));
NvBlastCreateOffsetAlign16(offsets.m_graphAdjacencyPartition, (graphNodeCount + 1) * sizeof(uint32_t));
NvBlastCreateOffsetAlign16(offsets.m_graphAdjacentNodeIndices, (2 * bondCount) * sizeof(uint32_t));
NvBlastCreateOffsetAlign16(offsets.m_graphAdjacentBondIndices, (2 * bondCount) * sizeof(uint32_t));
return NvBlastCreateOffsetEndAlign16();
}
Asset* initializeAsset(void* mem, uint32_t chunkCount, uint32_t graphNodeCount, uint32_t leafChunkCount, uint32_t firstSubsupportChunkIndex, uint32_t bondCount, NvBlastLog logFn)
{
// Data offsets
AssetDataOffsets offsets;
const size_t dataSize = createAssetDataOffsets(offsets, chunkCount, graphNodeCount, bondCount);
// Restricting our data size to < 4GB so that we may use uint32_t offsets
if (dataSize > (size_t)UINT32_MAX)
{
NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::allocateAsset: Asset data size will exceed 4GB. Instance not created.\n");
return nullptr;
}
// Zero memory and cast to Asset
Asset* asset = reinterpret_cast<Asset*>(memset(mem, 0, dataSize));
// Fill in fields
const size_t graphOffset = NV_OFFSET_OF(Asset, m_graph);
asset->m_header.dataType = NvBlastDataBlock::AssetDataBlock;
asset->m_header.formatVersion = 0; // Not currently using this field
asset->m_header.size = (uint32_t)dataSize;
asset->m_header.reserved = 0;
memset(&asset->m_ID, 0, sizeof(NvBlastID));
asset->m_chunkCount = chunkCount;
asset->m_graph.m_nodeCount = graphNodeCount;
asset->m_graph.m_chunkIndicesOffset = (uint32_t)(offsets.m_supportChunkIndices - graphOffset);
asset->m_graph.m_adjacencyPartitionOffset = (uint32_t)(offsets.m_graphAdjacencyPartition - graphOffset);
asset->m_graph.m_adjacentNodeIndicesOffset = (uint32_t)(offsets.m_graphAdjacentNodeIndices - graphOffset);
asset->m_graph.m_adjacentBondIndicesOffset = (uint32_t)(offsets.m_graphAdjacentBondIndices - graphOffset);
asset->m_leafChunkCount = leafChunkCount;
asset->m_firstSubsupportChunkIndex = firstSubsupportChunkIndex;
asset->m_bondCount = bondCount;
asset->m_chunksOffset = (uint32_t)offsets.m_chunks;
asset->m_bondsOffset = (uint32_t)offsets.m_bonds;
asset->m_subtreeLeafChunkCountsOffset = (uint32_t)offsets.m_subtreeLeafChunkCounts;
asset->m_chunkToGraphNodeMapOffset = (uint32_t)offsets.m_chunkToGraphNodeMap;
// Ensure Bonds remain aligned
NV_COMPILE_TIME_ASSERT((sizeof(NvBlastBond) & 0xf) == 0);
// Ensure Bonds are aligned - note, this requires that the block be aligned
NVBLAST_ASSERT((uintptr_t(asset->getBonds()) & 0xf) == 0);
return asset;
}
/**
Tests for a loop in a digraph starting at a given graph vertex.
Using the implied digraph given by the chunkDescs' parentChunkIndex fields, the graph is walked from the chunk descriptor chunkDescs[chunkIndex],
to determine if that walk leads to a loop.
Input:
chunkDescs - the chunk descriptors
chunkDescIndex - the index of the starting chunk descriptor
Return:
true if a loop is found, false otherwise.
*/
NV_INLINE bool testForLoop(const NvBlastChunkDesc* chunkDescs, uint32_t chunkDescIndex)
{
NVBLAST_ASSERT(!isInvalidIndex(chunkDescIndex));
uint32_t chunkDescIndex1 = chunkDescs[chunkDescIndex].parentChunkDescIndex;
if (isInvalidIndex(chunkDescIndex1))
{
return false;
}
uint32_t chunkDescIndex2 = chunkDescs[chunkDescIndex1].parentChunkDescIndex;
if (isInvalidIndex(chunkDescIndex2))
{
return false;
}
do
{
// advance index 1
chunkDescIndex1 = chunkDescs[chunkDescIndex1].parentChunkDescIndex; // No need to check for termination here. index 2 would find it first.
// advance index 2 twice and check for incidence with index 1 as well as termination
if ((chunkDescIndex2 = chunkDescs[chunkDescIndex2].parentChunkDescIndex) == chunkDescIndex1)
{
return true;
}
if (isInvalidIndex(chunkDescIndex2))
{
return false;
}
if ((chunkDescIndex2 = chunkDescs[chunkDescIndex2].parentChunkDescIndex) == chunkDescIndex1)
{
return true;
}
} while (!isInvalidIndex(chunkDescIndex2));
return false;
}
/**
Tests a set of chunk descriptors to see if the implied hierarchy describes valid trees.
A single tree implies that only one of the chunkDescs has an invalid (invalidIndex<uint32_t>()) parentChunkIndex, and all other
chunks are descendents of that chunk. Passed set of chunk is checked to contain one or more single trees.
Input:
chunkCount - the number of chunk descriptors
chunkDescs - an array of chunk descriptors of length chunkCount
logFn - message function (see NvBlastLog definition).
Return:
true if the descriptors imply a valid trees, false otherwise.
*/
static bool testForValidTrees(uint32_t chunkCount, const NvBlastChunkDesc* chunkDescs, NvBlastLog logFn)
{
for (uint32_t i = 0; i < chunkCount; ++i)
{
// Ensure there are no loops
if (testForLoop(chunkDescs, i))
{
NVBLASTLL_LOG_WARNING(logFn, "testForValidTrees: loop found. Asset will not be created.");
return false;
}
}
return true;
}
#if 0
/**
* Helper to generate random GUID
*/
static NvBlastID NvBlastExtCreateRandomID()
{
NvBlastID id;
static std::default_random_engine re;
*reinterpret_cast<uint32_t*>(&id.data[0]) = re();
*reinterpret_cast<uint32_t*>(&id.data[4]) = re();
*reinterpret_cast<uint32_t*>(&id.data[8]) = re();
*reinterpret_cast<uint32_t*>(&id.data[12]) = re();
return id;
}
#endif
// CRC-32C (iSCSI) polynomial in reversed bit order.
inline uint32_t crc32c(uint32_t crc, const char* buf, size_t len)
{
crc = ~crc;
while (len--)
{
crc ^= *buf++;
for (int k = 0; k < 8; k++)
crc = (crc >> 1) ^ (-(int)(crc & 1) & 0x82f63b78);
}
return ~crc;
}
/**
* Helper to generate GUID from NvBlastAsset memory
*/
static NvBlastID createIDFromAsset(const NvBlastAsset* asset, NvBlastLog logFn)
{
// Divide memory into quarters
const char* m0 = reinterpret_cast<const char*>(asset);
const char* m4 = m0 + NvBlastAssetGetSize(asset, logFn);
const char* m2 = m0 + (m4 - m0) / 2;
const char* m1 = m0 + (m2 - m0) / 2;
const char* m3 = m2 + (m4 - m2) / 2;
// CRC hash quarters
const uint32_t a = crc32c(0, m0, m1 - m0);
const uint32_t b = crc32c(a, m1, m2 - m1);
const uint32_t c = crc32c(b, m2, m3 - m2);
const uint32_t d = crc32c(c, m3, m4 - m3);
// Build ID out of hashes
NvBlastID id;
*reinterpret_cast<uint32_t*>(&id.data[0x0]) = a;
*reinterpret_cast<uint32_t*>(&id.data[0x4]) = b;
*reinterpret_cast<uint32_t*>(&id.data[0x8]) = c;
*reinterpret_cast<uint32_t*>(&id.data[0xc]) = d;
return id;
}
/**
Struct to hold chunk indices and bond index for sorting
Utility struct used by NvBlastCreateAsset in order to arrange bond data in a lookup table, and also to easily identify redundant input.
*/
struct BondSortData
{
BondSortData(uint32_t c0, uint32_t c1, uint32_t b) : m_c0(c0), m_c1(c1), m_b(b) {}
uint32_t m_c0;
uint32_t m_c1;
uint32_t m_b;
};
/**
Functional class for sorting a list of BondSortData
*/
class BondsOrdered
{
public:
bool operator () (const BondSortData& bond0, const BondSortData& bond1) const
{
return (bond0.m_c0 != bond1.m_c0) ? (bond0.m_c0 < bond1.m_c0) : (bond0.m_c1 != bond1.m_c1 ? bond0.m_c1 < bond1.m_c1 : bond0.m_b < bond1.m_b);
}
};
//////// Asset static functions ////////
size_t Asset::getMemorySize(const NvBlastAssetDesc* desc)
{
NVBLAST_ASSERT(desc != nullptr);
// Count graph nodes
uint32_t graphNodeCount = 0;
for (uint32_t i = 0; i < desc->chunkCount; ++i)
{
graphNodeCount += (uint32_t)((desc->chunkDescs[i].flags & NvBlastChunkDesc::SupportFlag) != 0);
}
for (uint32_t i = 0; i < desc->bondCount; ++i)
{
const NvBlastBondDesc& bondDesc = desc->bondDescs[i];
const uint32_t chunkIndex0 = bondDesc.chunkIndices[0];
const uint32_t chunkIndex1 = bondDesc.chunkIndices[1];
if ((isInvalidIndex(chunkIndex0) && chunkIndex1 < desc->chunkCount) ||
(isInvalidIndex(chunkIndex1) && chunkIndex0 < desc->chunkCount))
{
++graphNodeCount; // world node
break;
}
}
AssetDataOffsets offsets;
return createAssetDataOffsets(offsets, desc->chunkCount, graphNodeCount, desc->bondCount);
}
size_t Asset::createRequiredScratch(const NvBlastAssetDesc* desc, NvBlastLog logFn)
{
NVBLASTLL_CHECK(desc != nullptr, logFn, "Asset::createRequiredScratch: NULL desc.", return 0);
// Aligned and padded
return 16 +
align16(desc->chunkCount*sizeof(char)) +
align16(desc->chunkCount*sizeof(uint32_t)) +
align16(2 * desc->bondCount*sizeof(BondSortData)) +
align16(desc->bondCount*sizeof(uint32_t));
}
Asset* Asset::create(void* mem, const NvBlastAssetDesc* desc, void* scratch, NvBlastLog logFn)
{
#if NVBLASTLL_CHECK_PARAMS
if (!solverAssetBuildValidateInput(mem, desc, scratch, logFn))
{
return nullptr;
}
#else
NV_UNUSED(solverAssetBuildValidateInput);
#endif
NVBLASTLL_CHECK((reinterpret_cast<uintptr_t>(mem) & 0xF) == 0, logFn, "NvBlastCreateAsset: mem pointer not 16-byte aligned.", return nullptr);
// Make sure we have valid trees before proceeding
if (!testForValidTrees(desc->chunkCount, desc->chunkDescs, logFn))
{
return nullptr;
}
scratch = (void*)align16((size_t)scratch); // Bump to 16-byte alignment (see padding in NvBlastGetRequiredScratchForCreateAsset)
// reserve chunkAnnotation on scratch
char* chunkAnnotation = reinterpret_cast<char*>(scratch); scratch = pointerOffset(scratch, align16(desc->chunkCount));
// test for coverage, chunkAnnotation will be filled there.
uint32_t leafChunkCount;
uint32_t supportChunkCount;
if (!ensureExactSupportCoverage(supportChunkCount, leafChunkCount, chunkAnnotation, desc->chunkCount, const_cast<NvBlastChunkDesc*>(desc->chunkDescs), true, logFn))
{
NVBLASTLL_LOG_ERROR(logFn, "NvBlastCreateAsset: support coverage is not exact. Asset will not be created. The Asset helper function NvBlastEnsureAssetExactSupportCoverage may be used to create exact coverage.");
return nullptr;
}
// test for valid chunk order
if (!testForValidChunkOrder(desc->chunkCount, desc->chunkDescs, chunkAnnotation, scratch))
{
NVBLASTLL_LOG_ERROR(logFn, "NvBlastCreateAsset: chunks order is invalid. Asset will not be created. Use Asset helper functions such as NvBlastBuildAssetDescChunkReorderMap to fix descriptor order.");
return nullptr;
}
// Find first subsupport chunk
uint32_t firstSubsupportChunkIndex = desc->chunkCount; // Set value to chunk count if no subsupport chunks are found
for (uint32_t i = 0; i < desc->chunkCount; ++i)
{
if ((chunkAnnotation[i] & ChunkAnnotation::UpperSupport) == 0)
{
firstSubsupportChunkIndex = i;
break;
}
}
// Create map from global indices to graph node indices and initialize to invalid values
uint32_t* graphNodeIndexMap = (uint32_t*)scratch; scratch = pointerOffset(scratch, align16(desc->chunkCount * sizeof(uint32_t)));
memset(graphNodeIndexMap, 0xFF, desc->chunkCount*sizeof(uint32_t));
// Fill graphNodeIndexMap
uint32_t graphNodeCount = 0;
for (uint32_t i = 0; i < desc->chunkCount; ++i)
{
if ((chunkAnnotation[i] & ChunkAnnotation::Support) != 0)
{
graphNodeIndexMap[i] = graphNodeCount++;
}
}
NVBLAST_ASSERT(graphNodeCount == supportChunkCount);
// Scratch array for bond sorting, of size 2*desc->bondCount
BondSortData* bondSortArray = (BondSortData*)scratch; scratch = pointerOffset(scratch, align16(2 * desc->bondCount*sizeof(BondSortData)));
// Bond remapping array of size desc->bondCount
uint32_t* bondMap = (uint32_t*)scratch;
memset(bondMap, 0xFF, desc->bondCount*sizeof(uint32_t));
// Eliminate bad or redundant bonds, finding actual bond count
uint32_t bondCount = 0;
if (desc->bondCount > 0)
{
// Check for duplicates from input data as well as non-support chunk indices. All such bonds must be removed.
bool invalidFound = false;
bool duplicateFound = false;
bool nonSupportFound = false;
// Construct temp array of chunk index pairs and bond indices. This array is symmetrized to hold the reversed chunk indices as well.
uint32_t bondSortArraySize = 0;
BondSortData* t = bondSortArray;
bool addWorldNode = false;
for (uint32_t i = 0; i < desc->bondCount; ++i)
{
const NvBlastBondDesc& bondDesc = desc->bondDescs[i];
const uint32_t chunkIndex0 = bondDesc.chunkIndices[0];
const uint32_t chunkIndex1 = bondDesc.chunkIndices[1];
if ((chunkIndex0 >= desc->chunkCount && !isInvalidIndex(chunkIndex0)) ||
(chunkIndex1 >= desc->chunkCount && !isInvalidIndex(chunkIndex1)) ||
chunkIndex0 == chunkIndex1)
{
invalidFound = true;
continue;
}
uint32_t graphIndex0;
if (!isInvalidIndex(chunkIndex0))
{
graphIndex0 = graphNodeIndexMap[chunkIndex0];
}
else
{
addWorldNode = true;
graphIndex0 = graphNodeCount; // Will set graphNodeCount = supportChunkCount + 1
}
uint32_t graphIndex1;
if (!isInvalidIndex(chunkIndex1))
{
graphIndex1 = graphNodeIndexMap[chunkIndex1];
}
else
{
addWorldNode = true;
graphIndex1 = graphNodeCount; // Will set graphNodeCount = supportChunkCount + 1
}
if (isInvalidIndex(graphIndex0) || isInvalidIndex(graphIndex1))
{
nonSupportFound = true;
continue;
}
t[bondSortArraySize++] = BondSortData(graphIndex0, graphIndex1, i);
t[bondSortArraySize++] = BondSortData(graphIndex1, graphIndex0, i);
}
// Sort the temp array
std::sort(bondSortArray, bondSortArray + bondSortArraySize, BondsOrdered());
uint32_t symmetrizedBondCount = 0;
for (uint32_t i = 0; i < bondSortArraySize; ++i)
{
const bool duplicate = i > 0 && bondSortArray[i].m_c0 == bondSortArray[i - 1].m_c0 && bondSortArray[i].m_c1 == bondSortArray[i - 1].m_c1; // Since the array is sorted, uniqueness may be tested by only considering the previous element
duplicateFound = duplicateFound || duplicate;
if (!duplicate)
{ // Keep this bond
if (symmetrizedBondCount != i)
{
bondSortArray[symmetrizedBondCount] = bondSortArray[i]; // Compact array if we've dropped bonds
}
++symmetrizedBondCount;
}
}
NVBLAST_ASSERT((symmetrizedBondCount & 1) == 0); // Because we symmetrized, there should be an even number
bondCount = symmetrizedBondCount / 2;
// World node references found in bonds; add a world node
if (addWorldNode)
{
++graphNodeCount;
}
// Report warnings
if (invalidFound)
{
NVBLASTLL_LOG_WARNING(logFn, "NvBlastCreateAsset: Invalid bonds found (non-existent or same chunks referenced) and removed from asset.");
}
if (duplicateFound)
{
NVBLASTLL_LOG_WARNING(logFn, "NvBlastCreateAsset: Duplicate bonds found and removed from asset.");
}
if (nonSupportFound)
{
NVBLASTLL_LOG_WARNING(logFn, "NvBlastCreateAsset: Bonds referencing non-support chunks found and removed from asset.");
}
}
// Allocate memory for asset
Asset* asset = initializeAsset(mem, desc->chunkCount, graphNodeCount, leafChunkCount, firstSubsupportChunkIndex, bondCount, logFn);
// Asset data pointers
SupportGraph& graph = asset->m_graph;
NvBlastChunk* chunks = asset->getChunks();
NvBlastBond* bonds = asset->getBonds();
uint32_t* subtreeLeafChunkCounts = asset->getSubtreeLeafChunkCounts();
// Create chunks
uint32_t* graphChunkIndices = graph.getChunkIndices();
memset(graphChunkIndices, 0xFF, graphNodeCount * sizeof(uint32_t)); // Ensures unmapped node indices go to invalidIndex - this is important for the world node, if added
for (uint32_t i = 0; i < desc->chunkCount; ++i)
{
const NvBlastChunkDesc& chunkDesc = desc->chunkDescs[i];
NvBlastChunk& assetChunk = chunks[i];
memcpy(assetChunk.centroid, chunkDesc.centroid, 3 * sizeof(float));
assetChunk.volume = chunkDesc.volume;
assetChunk.parentChunkIndex = chunkDesc.parentChunkDescIndex;
assetChunk.firstChildIndex = invalidIndex<uint32_t>(); // Will be filled in below
assetChunk.childIndexStop = assetChunk.firstChildIndex;
assetChunk.userData = chunkDesc.userData;
const uint32_t graphNodeIndex = graphNodeIndexMap[i];
if (!isInvalidIndex(graphNodeIndex))
{
graphChunkIndices[graphNodeIndex] = i;
}
}
// Copy chunkToGraphNodeMap
memcpy(asset->getChunkToGraphNodeMap(), graphNodeIndexMap, desc->chunkCount * sizeof(uint32_t));
// Count chunk children
for (uint32_t i = 0; i < desc->chunkCount; ++i)
{
const uint32_t parentChunkIndex = chunks[i].parentChunkIndex;
if (!isInvalidIndex(parentChunkIndex))
{
if (chunks[parentChunkIndex].childIndexStop == chunks[parentChunkIndex].firstChildIndex)
{
chunks[parentChunkIndex].childIndexStop = chunks[parentChunkIndex].firstChildIndex = i;
}
++chunks[parentChunkIndex].childIndexStop;
}
}
// Create bonds
uint32_t* graphAdjacencyPartition = graph.getAdjacencyPartition();
uint32_t* graphAdjacentNodeIndices = graph.getAdjacentNodeIndices();
uint32_t* graphAdjacentBondIndices = graph.getAdjacentBondIndices();
if (bondCount > 0)
{
// Create the lookup table from the sorted array
createIndexStartLookup<uint32_t>(graphAdjacencyPartition, 0, graphNodeCount - 1, &bondSortArray->m_c0, 2 * bondCount, sizeof(BondSortData));
// Write the adjacent chunk and bond index data
uint32_t bondIndex = 0;
for (uint32_t i = 0; i < 2 * bondCount; ++i)
{
const BondSortData& bondSortData = bondSortArray[i];
graphAdjacentNodeIndices[i] = bondSortData.m_c1;
const uint32_t oldBondIndex = bondSortData.m_b;
const NvBlastBondDesc& bondDesc = desc->bondDescs[oldBondIndex];
if (isInvalidIndex(bondMap[oldBondIndex]))
{
bonds[bondIndex] = bondDesc.bond;
bondMap[oldBondIndex] = bondIndex++;
}
NVBLAST_ASSERT(bondMap[oldBondIndex] < bondCount);
graphAdjacentBondIndices[i] = bondMap[oldBondIndex];
}
}
else
{
// No bonds - zero out all partition elements (including last one, to give zero size for adjacent data arrays)
memset(graphAdjacencyPartition, 0, (graphNodeCount + 1)*sizeof(uint32_t));
}
// Count subtree leaf chunks
memset(subtreeLeafChunkCounts, 0, desc->chunkCount*sizeof(uint32_t));
uint32_t* breadthFirstChunkIndices = graphNodeIndexMap; // Reusing graphNodeIndexMap ... graphNodeIndexMap may no longer be used
for (uint32_t startChunkIndex = 0; startChunkIndex < desc->chunkCount; ++startChunkIndex)
{
if (!isInvalidIndex(chunks[startChunkIndex].parentChunkIndex))
{
break; // Only iterate through root chunks at this level
}
const uint32_t enumeratedChunkCount = enumerateChunkHierarchyBreadthFirst(breadthFirstChunkIndices, desc->chunkCount, chunks, startChunkIndex);
for (uint32_t chunkNum = enumeratedChunkCount; chunkNum--;)
{
const uint32_t chunkIndex = breadthFirstChunkIndices[chunkNum];
const NvBlastChunk& chunk = chunks[chunkIndex];
if (chunk.childIndexStop <= chunk.firstChildIndex)
{
subtreeLeafChunkCounts[chunkIndex] = 1;
}
if (!isInvalidIndex(chunk.parentChunkIndex))
{
subtreeLeafChunkCounts[chunk.parentChunkIndex] += subtreeLeafChunkCounts[chunkIndex];
}
}
}
// Assign ID after data has been created
asset->m_ID = createIDFromAsset(asset, logFn);
return asset;
}
bool Asset::ensureExactSupportCoverage(uint32_t& supportChunkCount, uint32_t& leafChunkCount, char* chunkAnnotation, uint32_t chunkCount, NvBlastChunkDesc* chunkDescs, bool testOnly, NvBlastLog logFn)
{
// Clear leafChunkCount
leafChunkCount = 0;
memset(chunkAnnotation, 0, chunkCount);
// Walk up the hierarchy from all chunks and mark all parents
for (uint32_t i = 0; i < chunkCount; ++i)
{
if (chunkAnnotation[i] & Asset::ChunkAnnotation::Parent)
{
continue;
}
uint32_t chunkDescIndex = i;
while (!isInvalidIndex(chunkDescIndex = chunkDescs[chunkDescIndex].parentChunkDescIndex))
{
chunkAnnotation[chunkDescIndex] = Asset::ChunkAnnotation::Parent; // Note as non-leaf
}
}
// Walk up the hierarchy from all leaves (counting them with leafChunkCount) and keep track of the support chunks found on each chain
// Exactly one support chunk should be found on each walk. Remove all but the highest support markings if more than one are found.
bool redundantCoverage = false;
bool insufficientCoverage = false;
for (uint32_t i = 0; i < chunkCount; ++i)
{
if (chunkAnnotation[i] & Asset::ChunkAnnotation::Parent)
{
continue;
}
++leafChunkCount;
uint32_t supportChunkDescIndex;
supportChunkDescIndex = invalidIndex<uint32_t>();
uint32_t chunkDescIndex = i;
bool doneWithChain = false;
do
{
if (chunkDescs[chunkDescIndex].flags & NvBlastChunkDesc::SupportFlag)
{
if (chunkAnnotation[chunkDescIndex] & Asset::ChunkAnnotation::Support)
{
// We've already been up this chain and marked this as support, so we have unique coverage already
doneWithChain = true;
}
chunkAnnotation[chunkDescIndex] |= Asset::ChunkAnnotation::Support; // Note as support
if (!isInvalidIndex(supportChunkDescIndex))
{
if (testOnly)
{
return false;
}
redundantCoverage = true;
chunkAnnotation[supportChunkDescIndex] &= ~Asset::ChunkAnnotation::Support; // Remove support marking
do // Run up the hierarchy from supportChunkDescIndex to chunkDescIndex and remove the supersupport markings
{
supportChunkDescIndex = chunkDescs[supportChunkDescIndex].parentChunkDescIndex;
chunkAnnotation[supportChunkDescIndex] &= ~Asset::ChunkAnnotation::SuperSupport; // Remove supersupport marking
} while (supportChunkDescIndex != chunkDescIndex);
}
supportChunkDescIndex = chunkDescIndex;
}
else
if (!isInvalidIndex(supportChunkDescIndex))
{
chunkAnnotation[chunkDescIndex] |= Asset::ChunkAnnotation::SuperSupport; // Not a support chunk and we've already found a support chunk, so this is super-support
}
} while (!doneWithChain && !isInvalidIndex(chunkDescIndex = chunkDescs[chunkDescIndex].parentChunkDescIndex));
if (isInvalidIndex(supportChunkDescIndex))
{
if (testOnly)
{
return false;
}
insufficientCoverage = true;
}
}
if (redundantCoverage)
{
NVBLASTLL_LOG_INFO(logFn, "NvBlastCreateAsset: some leaf-to-root chains had more than one support chunk. Some support chunks removed.");
}
if (insufficientCoverage)
{
// If coverage was insufficient, then walk up the hierarchy again and mark all chunks that have a support descendant.
// This will allow us to place support chunks at the highest possible level to obtain coverage.
for (uint32_t i = 0; i < chunkCount; ++i)
{
if (chunkAnnotation[i] & Asset::ChunkAnnotation::Parent)
{
continue;
}
bool supportFound = false;
uint32_t chunkDescIndex = i;
do
{
if (chunkAnnotation[chunkDescIndex] & Asset::ChunkAnnotation::Support)
{
supportFound = true;
}
else
if (supportFound)
{
chunkAnnotation[chunkDescIndex] |= Asset::ChunkAnnotation::SuperSupport; // Note that a descendant has support
}
} while (!isInvalidIndex(chunkDescIndex = chunkDescs[chunkDescIndex].parentChunkDescIndex));
}
// Now walk up the hierarchy from each leaf one more time, and make sure there is coverage
for (uint32_t i = 0; i < chunkCount; ++i)
{
if (chunkAnnotation[i] & Asset::ChunkAnnotation::Parent)
{
continue;
}
uint32_t previousChunkDescIndex;
previousChunkDescIndex = invalidIndex<uint32_t>();
uint32_t chunkDescIndex = i;
for (;;)
{
if (chunkAnnotation[chunkDescIndex] & Asset::ChunkAnnotation::Support)
{
break; // There is support along this chain
}
if (chunkAnnotation[chunkDescIndex] & Asset::ChunkAnnotation::SuperSupport)
{
NVBLAST_ASSERT(!isInvalidIndex(previousChunkDescIndex)); // This should be impossible
chunkAnnotation[previousChunkDescIndex] |= Asset::ChunkAnnotation::Support; // There is no support along this chain, and this is the highest place where we can put support
break;
}
previousChunkDescIndex = chunkDescIndex;
chunkDescIndex = chunkDescs[chunkDescIndex].parentChunkDescIndex;
if (isInvalidIndex(chunkDescIndex))
{
chunkAnnotation[previousChunkDescIndex] |= Asset::ChunkAnnotation::Support; // There was no support found anywhere in the hierarchy, so we add it at the root
break;
}
}
}
NVBLASTLL_LOG_INFO(logFn, "NvBlastCreateAsset: some leaf-to-root chains had no support chunks. Support chunks added.");
}
// Apply changes and count the number of support chunks
supportChunkCount = 0;
for (uint32_t i = 0; i < chunkCount; ++i)
{
const bool wasSupport = (chunkDescs[i].flags & NvBlastChunkDesc::SupportFlag) != 0;
const bool nowSupport = (chunkAnnotation[i] & Asset::ChunkAnnotation::Support) != 0;
if (wasSupport != nowSupport)
{
chunkDescs[i].flags ^= NvBlastChunkDesc::SupportFlag;
}
if ((chunkDescs[i].flags & NvBlastChunkDesc::SupportFlag) != 0)
{
++supportChunkCount;
}
}
return !redundantCoverage && !insufficientCoverage;
}
bool Asset::testForValidChunkOrder(uint32_t chunkCount, const NvBlastChunkDesc* chunkDescs, const char* chunkAnnotation, void* scratch)
{
char* chunkMarks = static_cast<char*>(memset(scratch, 0, chunkCount));
uint32_t currentParentChunkDescIndex = invalidIndex<uint32_t>();
for (uint32_t i = 0; i < chunkCount; ++i)
{
const uint32_t parentChunkDescIndex = chunkDescs[i].parentChunkDescIndex;
if (!isInvalidIndex(parentChunkDescIndex) && parentChunkDescIndex >= i) // 'chunks should come after their parents'
{
return false;
}
if (parentChunkDescIndex != currentParentChunkDescIndex)
{
if (!isInvalidIndex(currentParentChunkDescIndex))
{
chunkMarks[currentParentChunkDescIndex] = 1;
}
currentParentChunkDescIndex = parentChunkDescIndex;
if (isInvalidIndex(currentParentChunkDescIndex)) // 'root chunks should go first'
{
return false;
}
else if (chunkMarks[currentParentChunkDescIndex] != 0) // 'all chunks with same parent index should go in a row'
{
return false;
}
}
if (i < chunkCount - 1)
{
const bool upperSupport0 = (chunkAnnotation[i] & ChunkAnnotation::UpperSupport) != 0;
const bool upperSupport1 = (chunkAnnotation[i + 1] & ChunkAnnotation::UpperSupport) != 0;
if (!upperSupport0 && upperSupport1) // 'upper-support chunks should come before subsupport chunks'
{
return false;
}
}
}
return true;
}
} // namespace Blast
} // namespace Nv
// API implementation
extern "C"
{
size_t NvBlastGetRequiredScratchForCreateAsset(const NvBlastAssetDesc* desc, NvBlastLog logFn)
{
NVBLASTLL_CHECK(desc != nullptr, logFn, "NvBlastGetRequiredScratchForCreateAsset: NULL desc pointer input.", return 0);
return Nv::Blast::Asset::createRequiredScratch(desc, logFn);
}
size_t NvBlastGetAssetMemorySize(const NvBlastAssetDesc* desc, NvBlastLog logFn)
{
NVBLASTLL_CHECK(desc != nullptr, logFn, "NvBlastGetAssetMemorySize: NULL desc input.", return 0);
return Nv::Blast::Asset::getMemorySize(desc);
}
size_t NvBlastGetAssetMemorySizeFromSizeData(const NvBlastAssetMemSizeData& sizeData, NvBlastLog logFn)
{
NV_UNUSED(logFn);
Nv::Blast::AssetDataOffsets offsets;
return Nv::Blast::createAssetDataOffsets(offsets, sizeData.chunkCount, sizeData.nodeCount, sizeData.bondCount);
}
NvBlastAsset* NvBlastCreateAsset(void* mem, const NvBlastAssetDesc* desc, void* scratch, NvBlastLog logFn)
{
return Nv::Blast::Asset::create(mem, desc, scratch, logFn);
}
size_t NvBlastAssetGetFamilyMemorySize(const NvBlastAsset* asset, NvBlastLog logFn)
{
NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetFamilyMemorySize: NULL asset pointer input.", return 0);
return Nv::Blast::getFamilyMemorySize(reinterpret_cast<const Nv::Blast::Asset*>(asset));
}
size_t NvBlastAssetGetFamilyMemorySizeFromSizeData(const NvBlastAssetMemSizeData& sizeData, NvBlastLog logFn)
{
NV_UNUSED(logFn);
return Nv::Blast::getFamilyMemorySize(sizeData);
}
NvBlastID NvBlastAssetGetID(const NvBlastAsset* asset, NvBlastLog logFn)
{
NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetID: NULL asset pointer input.", NvBlastID zero; memset(&zero, 0, sizeof(NvBlastID)); return zero);
return ((Nv::Blast::Asset*)asset)->m_ID;
}
bool NvBlastAssetSetID(NvBlastAsset* asset, const NvBlastID* id, NvBlastLog logFn)
{
NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetSetID: NULL asset pointer input.", return false);
NVBLASTLL_CHECK(id != nullptr, logFn, "NvBlastAssetSetID: NULL id pointer input.", return false);
((Nv::Blast::Asset*)asset)->m_ID = *id;
return true;
}
uint32_t NvBlastAssetGetFormatVersion(const NvBlastAsset* asset, NvBlastLog logFn)
{
NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetFormatVersion: NULL asset input.", return UINT32_MAX);
return ((Nv::Blast::Asset*)asset)->m_header.formatVersion;
}
uint32_t NvBlastAssetGetSize(const NvBlastAsset* asset, NvBlastLog logFn)
{
NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetSize: NULL asset input.", return 0);
return ((Nv::Blast::Asset*)asset)->m_header.size;
}
uint32_t NvBlastAssetGetChunkCount(const NvBlastAsset* asset, NvBlastLog logFn)
{
NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetChunkCount: NULL asset input.", return 0);
return ((Nv::Blast::Asset*)asset)->m_chunkCount;
}
uint32_t NvBlastAssetGetSupportChunkCount(const NvBlastAsset* asset, NvBlastLog logFn)
{
NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetSupportChunkCount: NULL asset input.", return 0);
const Nv::Blast::Asset* a = reinterpret_cast<const Nv::Blast::Asset*>(asset);
const Nv::Blast::SupportGraph& graph = a->m_graph;
if (graph.m_nodeCount == 0)
{
return 0; // This shouldn't happen
}
return Nv::Blast::isInvalidIndex(graph.getChunkIndices()[graph.m_nodeCount - 1]) ? graph.m_nodeCount - 1 : graph.m_nodeCount;
}
uint32_t NvBlastAssetGetLeafChunkCount(const NvBlastAsset* asset, NvBlastLog logFn)
{
NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetLeafChunkCount: NULL asset input.", return 0);
return ((Nv::Blast::Asset*)asset)->m_leafChunkCount;
}
uint32_t NvBlastAssetGetFirstSubsupportChunkIndex(const NvBlastAsset* asset, NvBlastLog logFn)
{
NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetFirstSubsupportChunkIndex: NULL asset input.", return 0);
return ((Nv::Blast::Asset*)asset)->m_firstSubsupportChunkIndex;
}
uint32_t NvBlastAssetGetBondCount(const NvBlastAsset* asset, NvBlastLog logFn)
{
NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetBondCount: NULL asset input.", return 0);
return ((Nv::Blast::Asset*)asset)->m_bondCount;
}
const NvBlastSupportGraph NvBlastAssetGetSupportGraph(const NvBlastAsset* asset, NvBlastLog logFn)
{
NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetSupportGraph: NULL asset input.",
NvBlastSupportGraph blank; blank.nodeCount = 0; blank.chunkIndices = blank.adjacencyPartition = blank.adjacentNodeIndices = blank.adjacentBondIndices = nullptr; return blank);
const Nv::Blast::SupportGraph& supportGraph = static_cast<const Nv::Blast::Asset*>(asset)->m_graph;
NvBlastSupportGraph graph;
graph.nodeCount = supportGraph.m_nodeCount;
graph.chunkIndices = supportGraph.getChunkIndices();
graph.adjacencyPartition = supportGraph.getAdjacencyPartition();
graph.adjacentNodeIndices = supportGraph.getAdjacentNodeIndices();
graph.adjacentBondIndices = supportGraph.getAdjacentBondIndices();
return graph;
}
const uint32_t* NvBlastAssetGetChunkToGraphNodeMap(const NvBlastAsset* asset, NvBlastLog logFn)
{
NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetChunkToGraphNodeMap: NULL asset input.", return nullptr);
return static_cast<const Nv::Blast::Asset*>(asset)->getChunkToGraphNodeMap();
}
const NvBlastChunk* NvBlastAssetGetChunks(const NvBlastAsset* asset, NvBlastLog logFn)
{
NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetChunks: NULL asset input.", return 0);
return ((Nv::Blast::Asset*)asset)->getChunks();
}
const NvBlastBond* NvBlastAssetGetBonds(const NvBlastAsset* asset, NvBlastLog logFn)
{
NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetBonds: NULL asset input.", return 0);
return ((Nv::Blast::Asset*)asset)->getBonds();
}
uint32_t NvBlastAssetGetActorSerializationSizeUpperBound(const NvBlastAsset* asset, NvBlastLog logFn)
{
NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetActorSerializationSizeUpperBound: NULL asset input.", return 0);
const Nv::Blast::Asset& solverAsset = *(const Nv::Blast::Asset*)asset;
const uint32_t graphNodeCount = solverAsset.m_graph.m_nodeCount;
// Calculate serialization size for an actor with all graph nodes (and therefore all bonds), and somehow with all graph nodes visible (after all, this is an upper bound).
const uint64_t upperBound = Nv::Blast::getActorSerializationSize(graphNodeCount, solverAsset.getLowerSupportChunkCount(), graphNodeCount, solverAsset.getBondCount());
if (upperBound > UINT32_MAX)
{
NVBLASTLL_LOG_WARNING(logFn, "NvBlastAssetGetActorSerializationSizeUpperBound: Serialization block size exceeds 4GB. Returning 0.\n");
return 0;
}
return static_cast<uint32_t>(upperBound);
}
} // extern "C"
| 40,951 | C++ | 37.094884 | 248 | 0.662817 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastFamily.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTFAMILY_H
#define NVBLASTFAMILY_H
#include "NvBlastAsset.h"
#include "NvPreprocessor.h"
#include "NvBlastDLink.h"
#include "NvBlastAtomic.h"
#include "NvBlastMemory.h"
#include <cstring>
struct NvBlastAsset;
namespace Nv
{
namespace Blast
{
// Forward declarations
class FamilyGraph;
class Actor;
class Asset;
/**
Data header at the beginning of every NvBlastActor family
The block address may be cast to a valid FamilyHeader pointer.
*/
struct FamilyHeader : public NvBlastDataBlock
{
/**
The ID for the asset. This will be resolved into a pointer in the runtime data.
*/
NvBlastID m_assetID;
/**
Actors, of type Actor.
Actors with support chunks will use this array in the range [0, m_asset->m_graphNodeCount),
while subsupport actors will be placed in the range [m_asset->m_graphNodeCount, m_asset->getLowerSupportChunkCount()).
*/
NvBlastBlockArrayData(Actor, m_actorsOffset, getActors, m_asset->getLowerSupportChunkCount());
/**
Visible chunk index links, of type IndexDLink<uint32_t>.
getVisibleChunkIndexLinks returns an array of size m_asset->m_chunkCount of IndexDLink<uint32_t> (see IndexDLink).
*/
NvBlastBlockArrayData(IndexDLink<uint32_t>, m_visibleChunkIndexLinksOffset, getVisibleChunkIndexLinks, m_asset->m_chunkCount);
/**
Chunk actor IDs, of type uint32_t. These correspond to the ID of the actor which owns each chunk. A value of invalidIndex<uint32_t>() indicates no owner.
getChunkActorIndices returns an array of size m_asset->m_firstSubsupportChunkIndex.
*/
NvBlastBlockArrayData(uint32_t, m_chunkActorIndicesOffset, getChunkActorIndices, m_asset->m_firstSubsupportChunkIndex);
/**
Graph node index links, of type uint32_t. The successor to index[i] is m_graphNodeIndexLinksOffset[i]. A value of invalidIndex<uint32_t>() indicates no successor.
getGraphNodeIndexLinks returns an array of size m_asset->m_graphNodeCount.
*/
NvBlastBlockArrayData(uint32_t, m_graphNodeIndexLinksOffset, getGraphNodeIndexLinks, m_asset->m_graph.m_nodeCount);
/**
Health for each support chunk and subsupport chunk, of type float.
To access support chunks, use the corresponding graph node index in the array returned by getLowerSupportChunkHealths.
To access subsupport chunk healths, use getSubsupportChunkHealths (see documentation for details).
*/
NvBlastBlockArrayData(float, m_lowerSupportChunkHealthsOffset, getLowerSupportChunkHealths, m_asset->getLowerSupportChunkCount());
/**
Utility function to get the start of the subsupport chunk health array.
To access a subsupport chunk health indexed by i, use getSubsupportChunkHealths()[i - m_asset->m_firstSubsupportChunkIndex]
\return the array of health values associated with all descendants of support chunks.
*/
float* getSubsupportChunkHealths() const
{
NVBLAST_ASSERT(m_asset != nullptr);
return (float*)((uintptr_t)this + m_lowerSupportChunkHealthsOffset) + m_asset->m_graph.m_nodeCount;
}
/**
Bond health for the interfaces between two chunks, of type float. Since the bond is shared by two chunks, the same bond health is used for chunk[i] -> chunk[j] as for chunk[j] -> chunk[i].
getBondHealths returns the array of healths associated with all bonds in the support graph.
*/
NvBlastBlockArrayData(float, m_graphBondHealthsOffset, getBondHealths, m_asset->getBondCount());
/**
Bond health for the interfaces between two chunks, of type float. Since the bond is shared by two chunks, the same bond health is used for chunk[i] -> chunk[j] as for chunk[j] -> chunk[i].
getCachedBondHealths returns the array of manually cached healths associated with all bonds in the support graph.
*/
NvBlastBlockArrayData(float, m_graphCachedBondHealthsOffset, getCachedBondHealths, m_asset->getBondCount());
/**
The instance graph for islands searching, of type FamilyGraph.
Return the dynamic data generated for the support graph. (See FamilyGraph.)
This is used to store current connectivity information based upon bond and chunk healths, as well as cached intermediate data for faster incremental updates.
*/
NvBlastBlockData(FamilyGraph, m_familyGraphOffset, getFamilyGraph);
//////// Runtime data ////////
/**
The number of actors using this block.
*/
volatile uint32_t m_actorCount;
/**
The asset corresponding to all actors in this family.
This is runtime data and will be resolved from m_assetID.
*/
union
{
const Asset* m_asset;
uint64_t m_runtimePlaceholder; // Make sure we reserve enough room for an 8-byte pointer
};
//////// Functions ////////
/**
Gets an actor from the actor array and validates it if it is not already valid. This increments the actor reference count.
\param[in] index The index of the actor to borrow. Must be in the range [0, getActorsArraySize()).
\return A pointer to the indexed Actor.
*/
Actor* borrowActor(uint32_t index);
/**
Invalidates the actor if it is not already invalid. This decrements the actor reference count, but does not free this block when the count goes to zero.
\param[in] actor The actor to invalidate.
*/
void returnActor(Actor& actor);
/**
Returns a value to indicate whether or not the Actor with the given index is valid for use (active).
\return true iff the indexed actor is active.
*/
bool isActorActive(uint32_t index) const;
/**
Retrieve the actor from an index. If actor is inactive nullptr is returned.
\param[in] index The index of an actor.
\return A pointer to the indexed actor if the actor is active, nullptr otherwise.
*/
Actor* getActorByIndex(uint32_t index) const;
/**
Retrieve the index of an actor associated with the given chunk.
\param[in] chunkIndex The index of chunk.
\return the index of associated actor in the FamilyHeader's getActors() array.
*/
uint32_t getChunkActorIndex(uint32_t chunkIndex) const;
/**
Retrieve the index of an actor associated with the given node.
\param[in] nodeIndex The index of node.
\return the index of associated actor in the FamilyHeader's getActors() array.
*/
uint32_t getNodeActorIndex(uint32_t nodeIndex) const;
/**
Retrieve an actor associated with the given chunk.
\param[in] chunkIndex The index of chunk.
\return A pointer to the actor if the actor is active, nullptr otherwise.
*/
Actor* getChunkActor(uint32_t chunkIndex) const;
/**
Retrieve an actor associated with the given node.
\param[in] nodeIndex The index of node.
\return A pointer to the actor if the actor is active, nullptr otherwise.
*/
Actor* getNodeActor(uint32_t nodeIndex) const;
//////// Fracturing methods ////////
/**
Hierarchically distribute damage to child chunks.
\param chunkIndex asset chunk index to hierarchically damage
\param suboffset index of the first sub-support health
\param healthDamage damage strength to apply
\param chunkHealths instance chunk healths
\param chunks asset chunk collection
*/
void fractureSubSupportNoEvents(uint32_t chunkIndex, uint32_t suboffset, float healthDamage, float* chunkHealths, const NvBlastChunk* chunks);
/**
Hierarchically distribute damage to child chunks, recording a fracture event for each health damage applied.
If outBuffer is too small, events are dropped but the chunks are still damaged.
\param chunkIndex asset chunk index to hierarchically damage
\param suboffset index of the first sub-support health
\param healthDamage damage strength to apply
\param chunkHealths instance chunk healths
\param chunks asset chunk collection
\param outBuffer target buffer for fracture events
\param currentIndex current position in outBuffer - returns the number of damaged chunks
\param maxCount capacity of outBuffer
\param[in] filterActor pointer to the actor to filter commands that target other actors. May be NULL to apply all commands
\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
*/
void fractureSubSupport(uint32_t chunkIndex, uint32_t suboffset, float healthDamage, float* chunkHealths, const NvBlastChunk* chunks, NvBlastChunkFractureData* outBuffer, uint32_t* currentIndex, const uint32_t maxCount);
/**
Apply chunk fracture commands hierarchically.
\param chunkFractureCount number of chunk fracture commands to apply
\param chunkFractures array of chunk fracture commands
\param filterActor pointer to the actor to filter commands corresponding to other actors. May be NULL to apply all commands
\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
*/
void fractureNoEvents(uint32_t chunkFractureCount, const NvBlastChunkFractureData* chunkFractures, Actor* filterActor, NvBlastLog logFn);
/**
Apply chunk fracture commands hierarchically, recording a fracture event for each health damage applied.
If events array is too small, events are dropped but the chunks are still damaged.
\param chunkFractureCount number of chunk fracture commands to apply
\param commands array of chunk fracture commands
\param events target buffer for fracture events
\param eventsSize number of available entries in 'events'
\param count returns the number of damaged chunks
\param[in] filterActor pointer to the actor to filter commands that target other actors. May be NULL to apply all commands
\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
*/
void fractureWithEvents(uint32_t chunkFractureCount, const NvBlastChunkFractureData* commands, NvBlastChunkFractureData* events, uint32_t eventsSize, uint32_t* count, Actor* filterActor, NvBlastLog logFn);
/**
Apply chunk fracture commands hierarchically, recording a fracture event for each health damage applied.
In-Place version: fracture commands are replaced by fracture events.
If inoutbuffer array is too small, events are dropped but the chunks are still damaged.
\param chunkFractureCount number of chunk fracture commands to apply
\param inoutbuffer array of chunk fracture commands to be replaced by events
\param eventsSize number of available entries in inoutbuffer
\param count returns the number of damaged chunks
\param[in] filterActor pointer to the actor to filter commands that target other actors. May be NULL to apply all commands
\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
*/
void fractureInPlaceEvents(uint32_t chunkFractureCount, NvBlastChunkFractureData* inoutbuffer, uint32_t eventsSize, uint32_t* count, Actor* filterActor, NvBlastLog logFn);
/**
See NvBlastActorApplyFracture
\param[in,out] eventBuffers Target buffers to hold applied fracture events. May be NULL, in which case events are not reported.
To avoid data loss, provide an entry for every lower-support chunk and every bond in the original actor.
\param[in,out] actor The NvBlastActor to apply fracture to.
\param[in] commands The fracture commands to process.
\param[in] filterActor pointer to the actor to filter commands that target other actors. May be NULL to apply all commands
\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
\param[in,out] timers If non-NULL this struct will be filled out with profiling information for the step, in profile build configurations.
*/
void applyFracture(NvBlastFractureBuffers* eventBuffers, const NvBlastFractureBuffers* commands, Actor* filterActor, NvBlastLog logFn, NvBlastTimers* timers);
};
} // namespace Blast
} // namespace Nv
#include "NvBlastActor.h"
namespace Nv
{
namespace Blast
{
//////// FamilyHeader inline methods ////////
NV_INLINE Actor* FamilyHeader::borrowActor(uint32_t index)
{
NVBLAST_ASSERT(index < getActorsArraySize());
Actor& actor = getActors()[index];
if (actor.m_familyOffset == 0)
{
const uintptr_t offset = (uintptr_t)&actor - (uintptr_t)this;
NVBLAST_ASSERT(offset <= UINT32_MAX);
actor.m_familyOffset = (uint32_t)offset;
atomicIncrement(reinterpret_cast<volatile int32_t*>(&m_actorCount));
}
return &actor;
}
NV_INLINE void FamilyHeader::returnActor(Actor& actor)
{
if (actor.m_familyOffset != 0)
{
actor.m_familyOffset = 0;
// The actor count should be positive since this actor was valid. Check to be safe.
NVBLAST_ASSERT(m_actorCount > 0);
atomicDecrement(reinterpret_cast<volatile int32_t*>(&m_actorCount));
}
}
NV_INLINE bool FamilyHeader::isActorActive(uint32_t index) const
{
NVBLAST_ASSERT(index < getActorsArraySize());
return getActors()[index].m_familyOffset != 0;
}
NV_INLINE Actor* FamilyHeader::getActorByIndex(uint32_t index) const
{
NVBLAST_ASSERT(index < getActorsArraySize());
Actor& actor = getActors()[index];
return actor.isActive() ? &actor : nullptr;
}
NV_INLINE uint32_t FamilyHeader::getChunkActorIndex(uint32_t chunkIndex) const
{
NVBLAST_ASSERT(m_asset);
NVBLAST_ASSERT(chunkIndex < m_asset->m_chunkCount);
if (chunkIndex < m_asset->getUpperSupportChunkCount())
{
return getChunkActorIndices()[chunkIndex];
}
else
{
return chunkIndex - (m_asset->getUpperSupportChunkCount() - m_asset->m_graph.m_nodeCount);
}
}
NV_INLINE uint32_t FamilyHeader::getNodeActorIndex(uint32_t nodeIndex) const
{
NVBLAST_ASSERT(m_asset);
NVBLAST_ASSERT(nodeIndex < m_asset->m_graph.m_nodeCount);
const uint32_t chunkIndex = m_asset->m_graph.getChunkIndices()[nodeIndex];
return isInvalidIndex(chunkIndex) ? chunkIndex : getChunkActorIndices()[chunkIndex];
}
NV_INLINE Actor* FamilyHeader::getChunkActor(uint32_t chunkIndex) const
{
uint32_t actorIndex = getChunkActorIndex(chunkIndex);
return !isInvalidIndex(actorIndex) ? getActorByIndex(actorIndex) : nullptr;
}
NV_INLINE Actor* FamilyHeader::getNodeActor(uint32_t nodeIndex) const
{
uint32_t actorIndex = getNodeActorIndex(nodeIndex);
return !isInvalidIndex(actorIndex) ? getActorByIndex(actorIndex) : nullptr;
}
//////// Global functions ////////
/**
Returns the number of bytes of memory that a family created using the given asset will require. A pointer
to a block of memory of at least this size must be passed in as the mem argument of createFamily.
\param[in] asset The asset that will be passed into NvBlastAssetCreateFamily.
\param[in] sizeData Alternate version where the counts are known but there is not an existing asset.
*/
size_t getFamilyMemorySize(const Asset* asset);
size_t getFamilyMemorySize(const NvBlastAssetMemSizeData& sizeData);
} // namespace Blast
} // namespace Nv
#endif // ifndef NVBLASTFAMILY_H
| 17,257 | C | 39.228438 | 239 | 0.710726 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastActorSerializationBlock.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTACTORSERIALIZATIONBLOCK_H
#define NVBLASTACTORSERIALIZATIONBLOCK_H
#include "NvBlastFixedBoolArray.h"
namespace Nv
{
namespace Blast
{
/**
Struct-enum which keeps track of the actor serialization format.
*/
struct ActorSerializationFormat
{
enum Version
{
/** Initial version */
Initial,
// New formats must come before Count. They should be given descriptive names with more information in comments.
/** The number of serialization formats. */
Count,
/** The current version. This should always be Count-1 */
Current = Count - 1
};
};
/**
Data header at the beginning of a NvBlastActor serialization block
The block address may be cast to a valid ActorSerializationHeader pointer.
Serialization state is only valid if partition has been called since the last call to findIslands().
*/
struct ActorSerializationHeader
{
/**
A number which is incremented every time the data layout changes.
*/
uint32_t m_formatVersion;
/**
The size of the serialization block, including this header.
Memory sizes are restricted to 32-bit representable values.
*/
uint32_t m_size;
/**
The index of the actor within its family.
*/
uint32_t m_index;
/**
The number of elements in the visible chunk indices list.
*/
uint32_t m_visibleChunkCount;
/**
The number of elements in the graph node indices list.
*/
uint32_t m_graphNodeCount;
/**
The number of leaf chunks in this actor.
*/
uint32_t m_leafChunkCount;
/**
Visible chunk indices, of type uint32_t.
*/
NvBlastBlockArrayData(uint32_t, m_visibleChunkIndicesOffset, getVisibleChunkIndices, m_visibleChunkCount);
/**
Graph node indices, of type uint32_t.
*/
NvBlastBlockArrayData(uint32_t, m_graphNodeIndicesOffset, getGraphNodeIndices, m_graphNodeCount);
/**
Healths for lower support chunks in this actor, in breadth-first order from the support chunks associated with the graph nodes. Type float.
*/
NvBlastBlockData(float, m_lowerSupportChunkHealthsOffset, getLowerSupportChunkHealths);
/**
Healths for bonds associated with support chunks in this actor, in order of graph adjacency from associated graph nodes, i < j only. Type float.
*/
NvBlastBlockData(float, m_bondHealthsOffset, getBondHealths);
/**
Fast route in instance graph calculated for each graph node in this actor, of type uint32_t.
*/
NvBlastBlockArrayData(uint32_t, m_fastRouteOffset, getFastRoute, m_graphNodeCount);
/**
Hop counts in instance graph calculated for each graph node in this actor, of type uint32_t.
*/
NvBlastBlockArrayData(uint32_t, m_hopCountsOffset, getHopCounts, m_graphNodeCount);
/**
"Edge removed" bits for bonds associated with support chunks in this actor, in order of graph adjacency from associated graph nodes, i < j only. Type FixedBoolArray.
*/
NvBlastBlockData(FixedBoolArray, m_edgeRemovedArrayOffset, getEdgeRemovedArray);
};
//////// Global functions ////////
/**
A buffer size sufficient to serialize an actor with a given visible chunk count, lower support chunk count, graph node count, and bond count.
\param[in] visibleChunkCount The number of visible chunks
\param[in] lowerSupportChunkCount The number of lower-support chunks in the asset.
\param[in] graphNodeCount The number of graph nodes in the asset.
\param[in] bondCount The number of graph bonds in the asset.
\return the required buffer size in bytes.
*/
NV_INLINE size_t getActorSerializationSize(uint32_t visibleChunkCount, uint32_t lowerSupportChunkCount, uint32_t graphNodeCount, uint32_t bondCount)
{
// Family offsets
const size_t visibleChunkIndicesOffset = align16(sizeof(ActorSerializationHeader)); // size = visibleChunkCount*sizeof(uint32_t)
const size_t graphNodeIndicesOffset = align16(visibleChunkIndicesOffset + visibleChunkCount*sizeof(uint32_t)); // size = graphNodeCount*sizeof(uint32_t)
const size_t lowerSupportHealthsOffset = align16(graphNodeIndicesOffset + graphNodeCount*sizeof(uint32_t)); // size = lowerSupportChunkCount*sizeof(float)
const size_t bondHealthsOffset = align16(lowerSupportHealthsOffset + lowerSupportChunkCount*sizeof(float)); // size = bondCount*sizeof(float)
const size_t fastRouteOffset = align16(bondHealthsOffset + bondCount*sizeof(float)); // size = graphNodeCount*sizeof(uint32_t)
const size_t hopCountsOffset = align16(fastRouteOffset + graphNodeCount*sizeof(uint32_t)); // size = graphNodeCount*sizeof(uint32_t)
const size_t edgeRemovedArrayOffset = align16(hopCountsOffset + graphNodeCount*sizeof(uint32_t)); // size = 0 or FixedBoolArray::requiredMemorySize(bondCount)
return align16(edgeRemovedArrayOffset + (bondCount == 0 ? 0 : FixedBoolArray::requiredMemorySize(bondCount)));
}
} // namespace Blast
} // namespace Nv
#endif // ifndef NVBLASTACTORSERIALIZATIONBLOCK_H
| 6,710 | C | 38.710059 | 176 | 0.724292 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastFamilyGraph.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTFAMILYGRAPH_H
#define NVBLASTFAMILYGRAPH_H
#include "NvBlastSupportGraph.h"
#include "NvBlastFixedArray.h"
#include "NvBlastFixedBitmap.h"
#include "NvBlastFixedBoolArray.h"
#include "NvBlastMath.h"
#include "NvBlastFixedPriorityQueue.h"
#include "NvBlastMemory.h"
namespace Nv
{
namespace Blast
{
typedef uint32_t NodeIndex;
typedef NodeIndex IslandId;
typedef uint32_t ActorIndex;
/**
Internal implementation of family graph stored on the family.
It processes full NvBlastSupportGraph graph, stores additional information used for faster islands finding,
keeps and provides access to current islandId for every node.
*/
class FamilyGraph
{
public:
//////// ctor ////////
/**
Constructor. family graph is meant to be placed (with placement new) on family memory.
\param[in] nodeCount The number of nodes in the support graph (see SupportGraph)
\param[in] bondCount The number of bonds in the support graph (see SupportGraph)
*/
FamilyGraph(uint32_t nodeCount, const uint32_t bondCount);
/**
Returns memory needed for this class (see fillMemory).
\param[in] nodeCount The number of nodes in the graph.
\param[in] bondCount The number of bonds in the graph.
\return the number of bytes required.
*/
static size_t requiredMemorySize(uint32_t nodeCount, uint32_t bondCount)
{
return fillMemory(nullptr, nodeCount, bondCount);
}
//////// API ////////
/**
Function to initialize graph (all nodes added to dirty list for this actor)
\param[in] actorIndex The index of the actor to initialize graph with. Must be in the range [0, m_nodeCount).
\param[in] graph The static graph data for this family.
*/
void initialize(ActorIndex actorIndex, const SupportGraph* graph);
/**
Function to notify graph about removed edges. These nodes will be added to dirty list for this actor. Returns true if bond as removed.
\param[in] actorIndex The index of the actor from which the edge is removed. Must be in the range [0, m_nodeCount).
\param[in] node0 The index of the first node of removed edge. Must be in the range [0, m_nodeCount).
\param[in] node1 The index of the second node of removed edge. Must be in the range [0, m_nodeCount).
\param[in] graph The static graph data for this family.
*/
bool notifyEdgeRemoved(ActorIndex actorIndex, NodeIndex node0, NodeIndex node1, const SupportGraph* graph);
bool notifyEdgeRemoved(ActorIndex actorIndex, NodeIndex node0, NodeIndex node1, uint32_t bondIndex, const SupportGraph* graph);
bool notifyNodeRemoved(ActorIndex actorIndex, NodeIndex nodeIndex, const SupportGraph* graph);
/**
Function to find new islands by examining dirty nodes associated with this actor (they can be associated with actor if
notifyEdgeRemoved() were previously called for it.
\param[in] actorIndex The index of the actor on which graph part (edges + nodes) findIslands will be performed. Must be in the range [0, m_nodeCount).
\param[in] scratch User-supplied scratch memory of size findIslandsRequiredScratch(graphNodeCount) bytes.
\param[in] graph The static graph data for this family.
\return the number of new islands found.
*/
uint32_t findIslands(ActorIndex actorIndex, void* scratch, const SupportGraph* graph);
/**
The scratch space required to call the findIslands function, in bytes.
\param[in] graphNodeCount The number of nodes in the graph.
\return the number of bytes required.
*/
static size_t findIslandsRequiredScratch(uint32_t graphNodeCount);
//////// data getters ////////
/**
Utility function to get the start of the island ids array. This is an array of size nodeCount.
Every islandId == NodeIndex of root node in this island, it is set for every Node.
\return the array of island ids.
*/
NvBlastBlockData(IslandId, m_islandIdsOffset, getIslandIds);
/**
Utility function to get the start of the dirty node links array. This is an array of size nodeCount.
*/
NvBlastBlockData(NodeIndex, m_dirtyNodeLinksOffset, getDirtyNodeLinks);
/**
Utility function to get the start of the first dirty node indices array. This is an array of size nodeCount.
*/
NvBlastBlockData(uint32_t, m_firstDirtyNodeIndicesOffset, getFirstDirtyNodeIndices);
/**
Utility function to get the start of the fast route array. This is an array of size nodeCount.
*/
NvBlastBlockData(NodeIndex, m_fastRouteOffset, getFastRoute);
/**
Utility function to get the start of the hop counts array. This is an array of size nodeCount.
*/
NvBlastBlockData(uint32_t, m_hopCountsOffset, getHopCounts);
/**
Utility function to get the pointer of the is edge removed bitmap. This is an bitmap of size bondCount.
*/
NvBlastBlockData(FixedBoolArray, m_isEdgeRemovedOffset, getIsEdgeRemoved);
/**
Utility function to get the pointer of the is node in dirty list bitmap. This is an bitmap of size nodeCount.
*/
NvBlastBlockData(FixedBoolArray, m_isNodeInDirtyListOffset, getIsNodeInDirtyList);
//////// Debug/Test ////////
uint32_t getEdgesCount(const SupportGraph* graph) const;
bool hasEdge(NodeIndex node0, NodeIndex node1, const SupportGraph* graph) const;
bool canFindRoot(NodeIndex startNode, NodeIndex targetNode, FixedArray<NodeIndex>* visitedNodes, const SupportGraph* graph);
private:
FamilyGraph& operator = (const FamilyGraph&);
//////// internal types ////////
/**
Used to represent current graph traverse state.
*/
struct TraversalState
{
NodeIndex mNodeIndex;
uint32_t mCurrentIndex;
uint32_t mPrevIndex;
uint32_t mDepth;
TraversalState()
{
}
TraversalState(NodeIndex nodeIndex, uint32_t currentIndex, uint32_t prevIndex, uint32_t depth) :
mNodeIndex(nodeIndex), mCurrentIndex(currentIndex), mPrevIndex(prevIndex), mDepth(depth)
{
}
};
/**
Queue element for graph traversal with priority queue.
*/
struct QueueElement
{
TraversalState* mState;
uint32_t mHopCount;
QueueElement()
{
}
QueueElement(TraversalState* state, uint32_t hopCount) : mState(state), mHopCount(hopCount)
{
}
};
/**
Queue comparator for graph traversal with priority queue.
*/
struct NodeComparator
{
NodeComparator()
{
}
bool operator() (const QueueElement& node0, const QueueElement& node1) const
{
return node0.mHopCount < node1.mHopCount;
}
private:
NodeComparator& operator = (const NodeComparator&);
};
/**
PriorityQueue for graph traversal. Queue element with smallest hopCounts will be always on top.
*/
typedef FixedPriorityQueue<QueueElement, NodeComparator> NodePriorityQueue;
//////// internal operations ////////
/**
Function calculate needed memory and feel it if familyGraph is passed. FamilyGraph is designed to use
memory right after itself. So it should be initialized with placement new operation on memory of memoryNeeded() size.
\param[in] familyGraph The pointer to actual FamilyGraph instance which will be filled. Can be nullptr, function will only return required bytes and do nothing.
\param[in] nodeCount The number of nodes in the graph.
\param[in] bondCount The number of bonds in the graph.
\return the number of bytes required or filled
*/
static size_t fillMemory(FamilyGraph* familyGraph, uint32_t nodeCount, uint32_t bondCount);
/**
Function to find route from on node to another. It uses fastPath first as optimization and then if it fails it performs brute-force traverse (with hop count heuristic)
*/
bool findRoute(NodeIndex startNode, NodeIndex targetNode, IslandId islandId, FixedArray<TraversalState>* visitedNodes, FixedBitmap* isNodeWitness, NodePriorityQueue* priorityQueue, const SupportGraph* graph);
/**
Function to try finding targetNode (from startNode) with getFastRoute().
*/
bool tryFastPath(NodeIndex startNode, NodeIndex targetNode, IslandId islandId, FixedArray<TraversalState>* visitedNodes, FixedBitmap* isNodeWitness, const SupportGraph* graph);
/**
Function to unwind route upon successful finding of root node or witness.
We have found either a witness *or* the root node with this traversal. In the event of finding the root node, hopCount will be 0. In the event of finding
a witness, hopCount will be the hopCount that witness reported as being the distance to the root.
*/
void unwindRoute(uint32_t traversalIndex, NodeIndex lastNode, uint32_t hopCount, IslandId id, FixedArray<TraversalState>* visitedNodes);
/**
Function to add node to dirty node list associated with actor.
*/
void addToDirtyNodeList(ActorIndex actorIndex, NodeIndex node);
/**
Function used to get adjacentNode using index from adjacencyPartition with check for bondHealths (if it's not removed already)
*/
NodeIndex getAdjacentNode(uint32_t adjacencyIndex, const SupportGraph* graph) const
{
const uint32_t bondIndex = graph->getAdjacentBondIndices()[adjacencyIndex];
return getIsEdgeRemoved()->test(bondIndex) ? invalidIndex<uint32_t>() : graph->getAdjacentNodeIndices()[adjacencyIndex];
}
};
} // namespace Blast
} // namespace Nv
#endif // ifndef NVBLASTFAMILYGRAPH_H
| 11,342 | C | 37.063758 | 223 | 0.705519 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastFamilyGraph.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "NvBlastFamilyGraph.h"
#include "NvBlastAssert.h"
#include <vector>
#include <stack>
#define SANITY_CHECKS 0
namespace Nv
{
namespace Blast
{
size_t FamilyGraph::fillMemory(FamilyGraph* familyGraph, uint32_t nodeCount, uint32_t bondCount)
{
// calculate all offsets, and dataSize as a result
NvBlastCreateOffsetStart(sizeof(FamilyGraph));
const size_t NvBlastCreateOffsetAlign16(dirtyNodeLinksOffset, sizeof(NodeIndex) * nodeCount);
const size_t NvBlastCreateOffsetAlign16(firstDirtyNodeIndicesOffset, sizeof(uint32_t) * nodeCount);
const size_t NvBlastCreateOffsetAlign16(islandIdsOffset, sizeof(IslandId) * nodeCount);
const size_t NvBlastCreateOffsetAlign16(fastRouteOffset, sizeof(NodeIndex) * nodeCount);
const size_t NvBlastCreateOffsetAlign16(hopCountsOffset, sizeof(uint32_t) * nodeCount);
const size_t NvBlastCreateOffsetAlign16(isEdgeRemovedOffset, FixedBoolArray::requiredMemorySize(bondCount));
const size_t NvBlastCreateOffsetAlign16(isNodeInDirtyListOffset, FixedBoolArray::requiredMemorySize(nodeCount));
const size_t dataSize = NvBlastCreateOffsetEndAlign16();
// fill only if familyGraph was passed (otherwise we just used this function to get dataSize)
if (familyGraph)
{
familyGraph->m_dirtyNodeLinksOffset = static_cast<uint32_t>(dirtyNodeLinksOffset);
familyGraph->m_firstDirtyNodeIndicesOffset = static_cast<uint32_t>(firstDirtyNodeIndicesOffset);
familyGraph->m_islandIdsOffset = static_cast<uint32_t>(islandIdsOffset);
familyGraph->m_fastRouteOffset = static_cast<uint32_t>(fastRouteOffset);
familyGraph->m_hopCountsOffset = static_cast<uint32_t>(hopCountsOffset);
familyGraph->m_isEdgeRemovedOffset = static_cast<uint32_t>(isEdgeRemovedOffset);
familyGraph->m_isNodeInDirtyListOffset = static_cast<uint32_t>(isNodeInDirtyListOffset);
new (familyGraph->getIsEdgeRemoved()) FixedBoolArray(bondCount);
new (familyGraph->getIsNodeInDirtyList()) FixedBoolArray(nodeCount);
}
return dataSize;
}
FamilyGraph::FamilyGraph(uint32_t nodeCount, const uint32_t bondCount)
{
// fill memory with all internal data
// we need chunks count for size calculation
fillMemory(this, nodeCount, bondCount);
// fill arrays with invalid indices / max value (0xFFFFFFFF)
memset(getIslandIds(), 0xFF, nodeCount*sizeof(uint32_t));
memset(getFastRoute(), 0xFF, nodeCount*sizeof(uint32_t));
memset(getHopCounts(), 0xFF, nodeCount*sizeof(uint32_t)); // Initializing to large value
memset(getDirtyNodeLinks(), 0xFF, nodeCount*sizeof(uint32_t)); // No dirty list initially
memset(getFirstDirtyNodeIndices(), 0xFF, nodeCount*sizeof(uint32_t));
getIsNodeInDirtyList()->clear();
getIsEdgeRemoved()->fill();
}
/**
Graph initialization, reset all internal data to initial state. Marks all nodes dirty for this actor.
First island search probably would be the longest one, as it has to traverse whole graph and set all the optimization stuff like fastRoute and hopCounts for all nodes.
*/
void FamilyGraph::initialize(ActorIndex actorIndex, const SupportGraph* graph)
{
// used internal data pointers
NodeIndex* dirtyNodeLinks = getDirtyNodeLinks();
uint32_t* firstDirtyNodeIndices = getFirstDirtyNodeIndices();
// link dirty nodes
for (NodeIndex node = 1; node < graph->m_nodeCount; node++)
{
dirtyNodeLinks[node-1] = node;
}
firstDirtyNodeIndices[actorIndex] = 0;
getIsNodeInDirtyList()->fill();
getIsEdgeRemoved()->clear();
}
void FamilyGraph::addToDirtyNodeList(ActorIndex actorIndex, NodeIndex node)
{
// used internal data pointers
FixedBoolArray* isNodeInDirtyList = getIsNodeInDirtyList();
NodeIndex* dirtyNodeLinks = getDirtyNodeLinks();
uint32_t* firstDirtyNodeIndices = getFirstDirtyNodeIndices();
// check for bitmap first for avoid O(n) list search
if (isNodeInDirtyList->test(node))
return;
// add node to dirty node list head
dirtyNodeLinks[node] = firstDirtyNodeIndices[actorIndex];
firstDirtyNodeIndices[actorIndex] = node;
isNodeInDirtyList->set(node);
}
/**
Removes fast routes and marks involved nodes as dirty
*/
bool FamilyGraph::notifyEdgeRemoved(ActorIndex actorIndex, NodeIndex node0, NodeIndex node1, const SupportGraph* graph)
{
NVBLAST_ASSERT(node0 < graph->m_nodeCount);
NVBLAST_ASSERT(node1 < graph->m_nodeCount);
// used internal data pointers
NodeIndex* fastRoute = getFastRoute();
const uint32_t* adjacencyPartition = graph->getAdjacencyPartition();
const uint32_t* adjacentBondIndices = graph->getAdjacentBondIndices();
// search for bond
for (uint32_t adjacencyIndex = adjacencyPartition[node0]; adjacencyIndex < adjacencyPartition[node0 + 1]; adjacencyIndex++)
{
if (getAdjacentNode(adjacencyIndex, graph) == node1)
{
// found bond
const uint32_t bondIndex = adjacentBondIndices[adjacencyIndex];
// remove bond
getIsEdgeRemoved()->set(bondIndex);
// broke fast route if it goes through this edge:
if (fastRoute[node0] == node1)
fastRoute[node0] = invalidIndex<uint32_t>();
if (fastRoute[node1] == node0)
fastRoute[node1] = invalidIndex<uint32_t>();
// mark nodes dirty (add to list if doesn't exist)
addToDirtyNodeList(actorIndex, node0);
addToDirtyNodeList(actorIndex, node1);
// we don't expect to be more than one bond between 2 nodes
return true;
}
}
return false;
}
bool FamilyGraph::notifyEdgeRemoved(ActorIndex actorIndex, NodeIndex node0, NodeIndex node1, uint32_t bondIndex, const SupportGraph* graph)
{
NV_UNUSED(graph);
NVBLAST_ASSERT(node0 < graph->m_nodeCount);
NVBLAST_ASSERT(node1 < graph->m_nodeCount);
getIsEdgeRemoved()->set(bondIndex);
NodeIndex* fastRoute = getFastRoute();
// broke fast route if it goes through this edge:
if (fastRoute[node0] == node1)
fastRoute[node0] = invalidIndex<uint32_t>();
if (fastRoute[node1] == node0)
fastRoute[node1] = invalidIndex<uint32_t>();
// mark nodes dirty (add to list if doesn't exist)
addToDirtyNodeList(actorIndex, node0);
addToDirtyNodeList(actorIndex, node1);
return true;
}
bool FamilyGraph::notifyNodeRemoved(ActorIndex actorIndex, NodeIndex nodeIndex, const SupportGraph* graph)
{
NVBLAST_ASSERT(nodeIndex < graph->m_nodeCount);
// used internal data pointers
NodeIndex* fastRoute = getFastRoute();
const uint32_t* adjacencyPartition = graph->getAdjacencyPartition();
const uint32_t* adjacentBondIndices = graph->getAdjacentBondIndices();
// remove all edges leaving this node
for (uint32_t adjacencyIndex = adjacencyPartition[nodeIndex]; adjacencyIndex < adjacencyPartition[nodeIndex + 1]; adjacencyIndex++)
{
const uint32_t adjacentNodeIndex = getAdjacentNode(adjacencyIndex, graph);
if (!isInvalidIndex(adjacentNodeIndex))
{
const uint32_t bondIndex = adjacentBondIndices[adjacencyIndex];
getIsEdgeRemoved()->set(bondIndex);
if (fastRoute[adjacentNodeIndex] == nodeIndex)
fastRoute[adjacentNodeIndex] = invalidIndex<uint32_t>();
if (fastRoute[nodeIndex] == adjacentNodeIndex)
fastRoute[nodeIndex] = invalidIndex<uint32_t>();
addToDirtyNodeList(actorIndex, adjacentNodeIndex);
}
}
addToDirtyNodeList(actorIndex, nodeIndex);
// ignore this node in partition (only needed for "chunk deleted from graph")
// getIslandIds()[nodeIndex] = invalidIndex<uint32_t>();
return true;
}
void FamilyGraph::unwindRoute(uint32_t traversalIndex, NodeIndex lastNode, uint32_t hopCount, IslandId id, FixedArray<TraversalState>* visitedNodes)
{
// used internal data pointers
IslandId* islandIds = getIslandIds();
NodeIndex* fastRoute = getFastRoute();
uint32_t* hopCounts = getHopCounts();
uint32_t currIndex = traversalIndex;
uint32_t hc = hopCount + 1; //Add on 1 for the hop to the witness/root node.
do
{
TraversalState& state = visitedNodes->at(currIndex);
hopCounts[state.mNodeIndex] = hc++;
islandIds[state.mNodeIndex] = id;
fastRoute[state.mNodeIndex] = lastNode;
currIndex = state.mPrevIndex;
lastNode = state.mNodeIndex;
}
while(currIndex != invalidIndex<uint32_t>());
}
bool FamilyGraph::tryFastPath(NodeIndex startNode, NodeIndex targetNode, IslandId islandId, FixedArray<TraversalState>* visitedNodes, FixedBitmap* isNodeWitness, const SupportGraph* graph)
{
NV_UNUSED(graph);
// used internal data pointers
IslandId* islandIds = getIslandIds();
NodeIndex* fastRoute = getFastRoute();
// prepare for iterating path
NodeIndex currentNode = startNode;
uint32_t visitedNotesInitialSize = visitedNodes->size();
uint32_t depth = 0;
bool found = false;
do
{
// witness ?
if (isNodeWitness->test(currentNode))
{
// Already visited and not tagged with invalid island == a witness!
found = islandIds[currentNode] != invalidIndex<uint32_t>();
break;
}
// reached targetNode ?
if (currentNode == targetNode)
{
found = true;
break;
}
TraversalState state(currentNode, visitedNodes->size(), visitedNodes->size() - 1, depth++);
visitedNodes->pushBack(state);
NVBLAST_ASSERT(isInvalidIndex(fastRoute[currentNode]) || hasEdge(currentNode, fastRoute[currentNode], graph));
islandIds[currentNode] = invalidIndex<uint32_t>();
isNodeWitness->set(currentNode);
currentNode = fastRoute[currentNode];
} while (currentNode != invalidIndex<uint32_t>());
for (uint32_t a = visitedNotesInitialSize; a < visitedNodes->size(); ++a)
{
TraversalState& state = visitedNodes->at(a);
islandIds[state.mNodeIndex] = islandId;
}
// if fast path failed we have to remove all isWitness marks on visited nodes and nodes from visited list
if (!found)
{
for (uint32_t a = visitedNotesInitialSize; a < visitedNodes->size(); ++a)
{
TraversalState& state = visitedNodes->at(a);
isNodeWitness->reset(state.mNodeIndex);
}
visitedNodes->forceSize_Unsafe(visitedNotesInitialSize);
}
return found;
}
bool FamilyGraph::findRoute(NodeIndex startNode, NodeIndex targetNode, IslandId islandId, FixedArray<TraversalState>* visitedNodes, FixedBitmap* isNodeWitness, NodePriorityQueue* priorityQueue, const SupportGraph* graph)
{
// used internal data pointers
IslandId* islandIds = getIslandIds();
NodeIndex* fastRoute = getFastRoute();
uint32_t* hopCounts = getHopCounts();
const uint32_t* adjacencyPartition = graph->getAdjacencyPartition();
// Firstly, traverse the fast path and tag up witnesses. TryFastPath can fail. In that case, no witnesses are left but this node is permitted to report
// that it is still part of the island. Whichever node lost its fast path will be tagged as dirty and will be responsible for recovering the fast path
// and tagging up the visited nodes
if (fastRoute[startNode] != invalidIndex<uint32_t>())
{
if (tryFastPath(startNode, targetNode, islandId, visitedNodes, isNodeWitness, graph))
return true;
}
// If we got here, there was no fast path. Therefore, we need to fall back on searching for the root node. This is optimized by using "hop counts".
// These are per-node counts that indicate the expected number of hops from this node to the root node. These are lazily evaluated and updated
// as new edges are formed or when traversals occur to re-establish islands. As a result, they may be inaccurate but they still serve the purpose
// of guiding our search to minimize the chances of us doing an exhaustive search to find the root node.
islandIds[startNode] = invalidIndex<uint32_t>();
TraversalState startTraversal(startNode, visitedNodes->size(), invalidIndex<uint32_t>(), 0);
isNodeWitness->set(startNode);
QueueElement element(&visitedNodes->pushBack(startTraversal), hopCounts[startNode]);
priorityQueue->push(element);
do
{
QueueElement currentQE = priorityQueue->pop();
TraversalState& currentState = *currentQE.mState;
NodeIndex& currentNode = currentState.mNodeIndex;
// iterate all edges of currentNode
for (uint32_t adjacencyIndex = adjacencyPartition[currentNode]; adjacencyIndex < adjacencyPartition[currentNode + 1]; adjacencyIndex++)
{
NodeIndex nextIndex = getAdjacentNode(adjacencyIndex, graph);
if (nextIndex != invalidIndex<uint32_t>())
{
if (nextIndex == targetNode)
{
// targetNode found!
unwindRoute(currentState.mCurrentIndex, nextIndex, 0, islandId, visitedNodes);
return true;
}
if (isNodeWitness->test(nextIndex))
{
// We already visited this node. This means that it's either in the priority queue already or we
// visited in on a previous pass. If it was visited on a previous pass, then it already knows what island it's in.
// We now need to test the island id to find out if this node knows the root.
// If it has a valid root id, that id *is* our new root. We can guesstimate our hop count based on the node's properties
IslandId visitedIslandId = islandIds[nextIndex];
if (visitedIslandId != invalidIndex<uint32_t>())
{
// If we get here, we must have found a node that knows a route to our root node. It must not be a different island
// because that would caused me to have been visited already because totally separate islands trigger a full traversal on
// the orphaned side.
NVBLAST_ASSERT(visitedIslandId == islandId);
unwindRoute(currentState.mCurrentIndex, nextIndex, hopCounts[nextIndex], islandId, visitedNodes);
return true;
}
}
else
{
// This node has not been visited yet, so we need to push it into the stack and continue traversing
TraversalState state(nextIndex, visitedNodes->size(), currentState.mCurrentIndex, currentState.mDepth + 1);
QueueElement qe(&visitedNodes->pushBack(state), hopCounts[nextIndex]);
priorityQueue->push(qe);
isNodeWitness->set(nextIndex);
NVBLAST_ASSERT(islandIds[nextIndex] == islandId);
islandIds[nextIndex] = invalidIndex<uint32_t>(); //Flag as invalid island until we know whether we can find root or an island id.
}
}
}
} while (priorityQueue->size());
return false;
}
size_t FamilyGraph::findIslandsRequiredScratch(uint32_t graphNodeCount)
{
const size_t visitedNodesSize = align16(FixedArray<TraversalState>::requiredMemorySize(graphNodeCount));
const size_t isNodeWitnessSize = align16(FixedBitmap::requiredMemorySize(graphNodeCount));
const size_t priorityQueueSize = align16(NodePriorityQueue::requiredMemorySize(graphNodeCount));
// Aligned and padded
return 16 + visitedNodesSize
+ isNodeWitnessSize
+ priorityQueueSize;
}
uint32_t FamilyGraph::findIslands(ActorIndex actorIndex, void* scratch, const SupportGraph* graph)
{
// check if we have at least 1 dirty node for this actor before proceeding
uint32_t* firstDirtyNodeIndices = getFirstDirtyNodeIndices();
if (isInvalidIndex(firstDirtyNodeIndices[actorIndex]))
return 0;
// used internal data pointers
IslandId* islandIds = getIslandIds();
NodeIndex* fastRoute = getFastRoute();
uint32_t* hopCounts = getHopCounts();
NodeIndex* dirtyNodeLinks = getDirtyNodeLinks();
FixedBoolArray* isNodeInDirtyList = getIsNodeInDirtyList();
// prepare intermediate data on scratch
scratch = (void*)align16((size_t)scratch); // Bump to 16-byte alignment (see padding in findIslandsRequiredScratch)
const uint32_t nodeCount = graph->m_nodeCount;
FixedArray<TraversalState>* visitedNodes = new (scratch)FixedArray<TraversalState>();
scratch = pointerOffset(scratch, align16(FixedArray<TraversalState>::requiredMemorySize(nodeCount)));
FixedBitmap* isNodeWitness = new (scratch)FixedBitmap(nodeCount);
scratch = pointerOffset(scratch, align16(FixedBitmap::requiredMemorySize(nodeCount)));
NodePriorityQueue* priorityQueue = new (scratch)NodePriorityQueue();
scratch = pointerOffset(scratch, align16(NodePriorityQueue::requiredMemorySize(nodeCount)));
// reset nodes visited bitmap
isNodeWitness->clear();
uint32_t newIslandsCount = 0;
while (!isInvalidIndex(firstDirtyNodeIndices[actorIndex]))
{
// Pop head off of dirty node's list
const NodeIndex dirtyNode = firstDirtyNodeIndices[actorIndex];
firstDirtyNodeIndices[actorIndex] = dirtyNodeLinks[dirtyNode];
dirtyNodeLinks[dirtyNode] = invalidIndex<uint32_t>();
NVBLAST_ASSERT(isNodeInDirtyList->test(dirtyNode));
isNodeInDirtyList->reset(dirtyNode);
// clear PriorityQueue
priorityQueue->clear();
// if we already visited this node before in this loop it's not dirty anymore
if (isNodeWitness->test(dirtyNode))
continue;
const IslandId& islandRootNode = islandIds[dirtyNode];
IslandId islandId = islandRootNode; // the same in this implementation
// if this node is island root node we don't need to do anything
if (islandRootNode == dirtyNode)
continue;
// clear visited notes list (to fill during traverse)
visitedNodes->clear();
// try finding island root node from this dirtyNode
if (findRoute(dirtyNode, islandRootNode, islandId, visitedNodes, isNodeWitness, priorityQueue, graph))
{
// We found the root node so let's let every visited node know that we found its root
// and we can also update our hop counts because we recorded how many hops it took to reach this
// node
// We already filled in the path to the root/witness with accurate hop counts. Now we just need to fill in the estimates
// for the remaining nodes and re-define their islandIds. We approximate their path to the root by just routing them through
// the route we already found.
// This loop works because visitedNodes are recorded in the order they were visited and we already filled in the critical path
// so the remainder of the paths will just fork from that path.
for (uint32_t b = 0; b < visitedNodes->size(); ++b)
{
TraversalState& state = visitedNodes->at(b);
if (isInvalidIndex(islandIds[state.mNodeIndex]))
{
hopCounts[state.mNodeIndex] = hopCounts[visitedNodes->at(state.mPrevIndex).mNodeIndex] + 1;
fastRoute[state.mNodeIndex] = visitedNodes->at(state.mPrevIndex).mNodeIndex;
islandIds[state.mNodeIndex] = islandId;
}
}
}
else
{
// NEW ISLAND BORN!
// If I traversed and could not find the root node, then I have established a new island. In this island, I am the root node
// and I will point all my nodes towards me. Furthermore, I have established how many steps it took to reach all nodes in my island
// OK. We need to separate the islands. We have a list of nodes that are part of the new island (visitedNodes) and we know that the
// first node in that list is the root node.
#if SANITY_CHECKS
NVBLAST_ASSERT(!canFindRoot(dirtyNode, islandRootNode, NULL));
#endif
IslandId newIsland = dirtyNode;
newIslandsCount++;
hopCounts[dirtyNode] = 0;
fastRoute[dirtyNode] = invalidIndex<uint32_t>();
islandIds[dirtyNode] = newIsland;
for (uint32_t a = 1; a < visitedNodes->size(); ++a)
{
NodeIndex visitedNode = visitedNodes->at(a).mNodeIndex;
hopCounts[visitedNode] = visitedNodes->at(a).mDepth; //How many hops to root
fastRoute[visitedNode] = visitedNodes->at(visitedNodes->at(a).mPrevIndex).mNodeIndex;
islandIds[visitedNode] = newIsland;
}
}
}
// all dirty nodes processed
return newIslandsCount;
}
/**
!!! Debug/Test function.
Function to check that root between nodes exists.
*/
bool FamilyGraph::canFindRoot(NodeIndex startNode, NodeIndex targetNode, FixedArray<NodeIndex>* visitedNodes, const SupportGraph* graph)
{
if (visitedNodes)
visitedNodes->pushBack(startNode);
if (startNode == targetNode)
return true;
std::vector<bool> visitedState;
visitedState.resize(graph->m_nodeCount);
for (uint32_t i = 0; i < graph->m_nodeCount; i++)
visitedState[i] = false;
std::stack<NodeIndex> stack;
stack.push(startNode);
visitedState[startNode] = true;
const uint32_t* adjacencyPartition = graph->getAdjacencyPartition();
do
{
NodeIndex currentNode = stack.top();
stack.pop();
for (uint32_t adjacencyIndex = adjacencyPartition[currentNode]; adjacencyIndex < adjacencyPartition[currentNode + 1]; adjacencyIndex++)
{
NodeIndex nextNode = getAdjacentNode(adjacencyIndex, graph);
if (isInvalidIndex(nextNode))
continue;
if (!visitedState[nextNode])
{
if (nextNode == targetNode)
{
return true;
}
visitedState[nextNode] = true;
stack.push(nextNode);
if (visitedNodes)
visitedNodes->pushBack(nextNode);
}
}
} while (!stack.empty());
return false;
}
/**
!!! Debug/Test function.
Function to check if edge exists.
*/
bool FamilyGraph::hasEdge(NodeIndex node0, NodeIndex node1, const SupportGraph* graph) const
{
const uint32_t* adjacencyPartition = graph->getAdjacencyPartition();
uint32_t edges = 0;
for (uint32_t adjacencyIndex = adjacencyPartition[node0]; adjacencyIndex < adjacencyPartition[node0 + 1]; adjacencyIndex++)
{
if (getAdjacentNode(adjacencyIndex, graph) == node1)
{
edges++;
break;
}
}
for (uint32_t adjacencyIndex = adjacencyPartition[node1]; adjacencyIndex < adjacencyPartition[node1 + 1]; adjacencyIndex++)
{
if (getAdjacentNode(adjacencyIndex, graph) == node0)
{
edges++;
break;
}
}
return edges > 0;
}
/**
!!! Debug/Test function.
Function to calculate and return edges count
*/
uint32_t FamilyGraph::getEdgesCount(const SupportGraph* graph) const
{
const uint32_t* adjacencyPartition = graph->getAdjacencyPartition();
uint32_t edges = 0;
for (NodeIndex n = 0; n < graph->m_nodeCount; n++)
{
for (uint32_t adjacencyIndex = adjacencyPartition[n]; adjacencyIndex < adjacencyPartition[n + 1]; adjacencyIndex++)
{
if (getAdjacentNode(adjacencyIndex, graph) != invalidIndex<uint32_t>())
edges++;
}
}
NVBLAST_ASSERT(edges % 2 == 0);
return edges / 2;
}
} // namespace Nv
} // namespace Blast
| 25,750 | C++ | 38.986025 | 220 | 0.668738 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastAsset.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTASSET_H
#define NVBLASTASSET_H
#include "NvBlastSupportGraph.h"
#include "NvBlast.h"
#include "NvBlastAssert.h"
#include "NvBlastIndexFns.h"
#include "NvBlastChunkHierarchy.h"
namespace Nv
{
namespace Blast
{
class Asset : public NvBlastAsset
{
public:
/**
Struct-enum which is used to mark chunk descriptors when building an asset.
*/
struct ChunkAnnotation
{
enum Enum
{
Parent = (1 << 0),
Support = (1 << 1),
SuperSupport = (1 << 2),
// Combinations
UpperSupport = Support | SuperSupport
};
};
/**
Create an asset from a descriptor.
\param[in] mem Pointer to block of memory of at least the size given by getMemorySize(desc). Must be 16-byte aligned.
\param[in] desc Asset descriptor (see NvBlastAssetDesc).
\param[in] scratch User-supplied scratch memory of size createRequiredScratch(desc) bytes.
\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
\return the pointer to the new asset, or nullptr if unsuccessful.
*/
static Asset* create(void* mem, const NvBlastAssetDesc* desc, void* scratch, NvBlastLog logFn);
/**
Returns the number of bytes of memory that an asset created using the given descriptor will require. A pointer
to a block of memory of at least this size must be passed in as the mem argument of create.
\param[in] desc The asset descriptor that will be passed into NvBlastCreateAsset.
*/
static size_t getMemorySize(const NvBlastAssetDesc* desc);
/**
Returns the size of the scratch space (in bytes) required to be passed into the create function, based upon
the input descriptor that will be passed to the create function.
\param[in] desc The descriptor that will be passed to the create function.
\return the number of bytes required.
*/
static size_t createRequiredScratch(const NvBlastAssetDesc* desc, NvBlastLog logFn);
/**
Returns the number of upper-support chunks in this asset..
\return the number of upper-support chunks.
*/
uint32_t getUpperSupportChunkCount() const;
/**
Returns the number of lower-support chunks in this asset. This is the required actor buffer size for a Actor family.
\return the number of lower-support chunks.
*/
uint32_t getLowerSupportChunkCount() const;
/**
Returns the number of bonds in this asset's support graph.
\return the number of bonds in this asset's support graph.
*/
uint32_t getBondCount() const;
/**
Returns the number of separate chunk hierarchies in the asset. This will be the initial number of visible chunks in an actor instanced from this asset.
\return the number of separate chunk hierarchies in the asset.
*/
uint32_t getHierarchyCount() const;
/**
Maps all lower-support chunk indices to a contiguous range [0, getLowerSupportChunkCount()).
\param[in] chunkIndex Asset chunk index.
\return an index in the range [0, getLowerSupportChunkCount()) if it is a lower-support chunk, invalidIndex<uint32_t>() otherwise.
*/
uint32_t getContiguousLowerSupportIndex(uint32_t chunkIndex) const;
// Static functions
/**
Function to ensure support coverage of chunks.
Support chunks (marked in the NvBlastChunkDesc struct) must provide full coverage over the asset.
This means that from any leaf chunk to the root node, exactly one chunk must be support. If this condition
is not met, the actual support chunks will be adjusted accordingly.
Chunk order depends on support coverage, so this function should be called before chunk reordering.
\param[out] supportChunkCount The number of support chunks. NOTE - this value is not meaninful if testOnly = true and the return value is false.
\param[out] leafChunkCount The number of leaf chunks. NOTE - this value is not meaninful if testOnly = true and the return value is false.
\param[out] chunkAnnotation User-supplied char array of size chunkCount. NOTE - these values are not meaninful if testOnly = true and the return value is false.
\param[in] chunkCount The number of chunk descriptors.
\param[in] chunkDescs Array of chunk descriptors of size chunkCount. It will be updated accordingly.
\param[in] testOnly If true, this function early-outs if support coverage is not exact. If false, exact coverage is ensured by possibly modifying chunkDescs' flags.
\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
\return true iff coverage was already exact.
*/
static bool ensureExactSupportCoverage(uint32_t& supportChunkCount, uint32_t& leafChunkCount, char* chunkAnnotation, uint32_t chunkCount, NvBlastChunkDesc* chunkDescs, bool testOnly, NvBlastLog logFn);
/**
Tests a set of chunk descriptors to see if chunks are in valid chunk order.
Chunk order conditions checked:
1. 'all chunks with same parent index should go in a row'.
2. 'chunks should come after their parents'.
3. 'root chunks should go first'.
4. 'upper-support chunks should come before subsupport chunks'.
\param[in] chunkCount The number of chunk descriptors.
\param[in] chunkDescs An array of chunk descriptors of length chunkCount.
\param[in] chunkAnnotation Annotation generated from ensureExactSupportCoverage (see ensureExactSupportCoverage).
\param[in] scratch User-supplied scratch memory of chunkCount bytes.
\return true if the descriptors meet the ordering conditions, false otherwise.
*/
static bool testForValidChunkOrder(uint32_t chunkCount, const NvBlastChunkDesc* chunkDescs, const char* chunkAnnotation, void* scratch);
//////// Data ////////
/**
Asset data block header.
*/
NvBlastDataBlock m_header;
/**
ID for this asset.
*/
NvBlastID m_ID;
/**
The total number of chunks in the asset, support and non-support.
*/
uint32_t m_chunkCount;
/**
The support graph.
*/
SupportGraph m_graph;
/**
The number of leaf chunks in the asset.
*/
uint32_t m_leafChunkCount;
/**
Chunks are sorted such that subsupport chunks come last. This is the first subsupport chunk index. Equals m_chunkCount if there are no subsupport chunks.
*/
uint32_t m_firstSubsupportChunkIndex;
/**
The number of bonds in the asset.
*/
uint32_t m_bondCount;
/**
Chunks, of type NvBlastChunk.
getChunks returns an array of size m_chunkCount.
*/
NvBlastBlockArrayData(NvBlastChunk, m_chunksOffset, getChunks, m_chunkCount);
/**
Array of bond data for the interfaces between two chunks. Since the bond is shared by two chunks, the same
bond data is used for chunk[i] -> chunk[j] as for chunk[j] -> chunk[i].
The size of the array is m_graph.adjacencyPartition[m_graph.m_nodeCount]/2.
See NvBlastBond.
getBonds returns an array of size m_bondCount.
*/
NvBlastBlockArrayData(NvBlastBond, m_bondsOffset, getBonds, m_bondCount);
/**
Caching the number of leaf chunks descended from each chunk (including the chunk itself).
This data parallels the Chunks array, and is an array of the same size.
getSubtreeLeafChunkCount returns a uint32_t array of size m_chunkCount.
*/
NvBlastBlockArrayData(uint32_t, m_subtreeLeafChunkCountsOffset, getSubtreeLeafChunkCounts, m_chunkCount);
/**
Mapping from chunk index to graph node index (inverse of m_graph.getChunkIndices().
getChunkToGraphNodeMap returns a uint32_t array of size m_chunkCount.
*/
NvBlastBlockArrayData(uint32_t, m_chunkToGraphNodeMapOffset, getChunkToGraphNodeMap, m_chunkCount);
//////// Iterators ////////
/**
Chunk hierarchy depth-first iterator. Traverses subtree with root given by startChunkIndex.
If upperSupportOnly == true, then the iterator will not traverse subsuppport chunks.
*/
class DepthFirstIt : public ChunkDepthFirstIt
{
public:
/** Constructed from an asset. */
DepthFirstIt(const Asset& asset, uint32_t startChunkIndex, bool upperSupportOnly = false) :
ChunkDepthFirstIt(asset.getChunks(), startChunkIndex, upperSupportOnly ? asset.getUpperSupportChunkCount() : asset.m_chunkCount) {}
};
};
//////// Asset inline member functions ////////
NV_INLINE uint32_t Asset::getUpperSupportChunkCount() const
{
return m_firstSubsupportChunkIndex;
}
NV_INLINE uint32_t Asset::getLowerSupportChunkCount() const
{
return m_graph.m_nodeCount + (m_chunkCount - m_firstSubsupportChunkIndex);
}
NV_INLINE uint32_t Asset::getBondCount() const
{
NVBLAST_ASSERT((m_graph.getAdjacencyPartition()[m_graph.m_nodeCount] & 1) == 0); // The bidirectional graph data should have an even number of edges
return m_graph.getAdjacencyPartition()[m_graph.m_nodeCount] / 2; // Directional bonds, divide by two
}
NV_INLINE uint32_t Asset::getHierarchyCount() const
{
const NvBlastChunk* chunks = getChunks();
for (uint32_t i = 0; i < m_chunkCount; ++i)
{
if (!isInvalidIndex(chunks[i].parentChunkIndex))
{
return i;
}
}
return m_chunkCount;
}
NV_INLINE uint32_t Asset::getContiguousLowerSupportIndex(uint32_t chunkIndex) const
{
NVBLAST_ASSERT(chunkIndex < m_chunkCount);
return chunkIndex < m_firstSubsupportChunkIndex ? getChunkToGraphNodeMap()[chunkIndex] : (chunkIndex - m_firstSubsupportChunkIndex + m_graph.m_nodeCount);
}
//JDM: Expose this so serialization layer can use it.
NV_C_API Asset* initializeAsset(void* mem, uint32_t chunkCount, uint32_t graphNodeCount, uint32_t leafChunkCount, uint32_t firstSubsupportChunkIndex, uint32_t bondCount, NvBlastLog logFn);
} // namespace Blast
} // namespace Nv
#endif // ifndef NVBLASTASSET_H
| 11,691 | C | 36.354632 | 209 | 0.70473 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NvTask/include/NvGpuDispatcher.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_GPU_DISPATCHER_H
#define NV_GPU_DISPATCHER_H
#include "NvTaskDefine.h"
#include "NvTask.h"
/* forward decl to avoid including <cuda.h> */
typedef struct CUstream_st* CUstream;
namespace nvidia
{
namespace cudamanager
{
struct NvGpuCopyDesc;
class NvCudaContextManager;
}
namespace task
{
NV_PUSH_PACK_DEFAULT
class NvTaskManager;
/** \brief A GpuTask dispatcher
*
* A NvGpuDispatcher executes GpuTasks submitted by one or more TaskManagers (one
* or more scenes). It maintains a CPU worker thread which waits on GpuTask
* "groups" to be submitted. The submission API is explicitly sessioned so that
* GpuTasks are dispatched together as a group whenever possible to improve
* parallelism on the GPU.
*
* A NvGpuDispatcher cannot be allocated ad-hoc, they are created as a result of
* creating a NvCudaContextManager. Every NvCudaContextManager has a NvGpuDispatcher
* instance that can be queried. In this way, each NvGpuDispatcher is tied to
* exactly one CUDA context.
*
* A scene will use CPU fallback Tasks for GpuTasks if the NvTaskManager provided
* to it does not have a NvGpuDispatcher. For this reason, the NvGpuDispatcher must
* be assigned to the NvTaskManager before the NvTaskManager is given to a scene.
*
* Multiple TaskManagers may safely share a single NvGpuDispatcher instance, thus
* enabling scenes to share a CUDA context.
*
* Only failureDetected() is intended for use by the user. The rest of the
* nvGpuDispatcher public methods are reserved for internal use by only both
* TaskManagers and GpuTasks.
*/
class NvGpuDispatcher
{
public:
/** \brief Record the start of a simulation step
*
* A NvTaskManager calls this function to record the beginning of a simulation
* step. The NvGpuDispatcher uses this notification to initialize the
* profiler state.
*/
virtual void startSimulation() = 0;
/** \brief Record the start of a GpuTask batch submission
*
* A NvTaskManager calls this function to notify the NvGpuDispatcher that one or
* more GpuTasks are about to be submitted for execution. The NvGpuDispatcher
* will not read the incoming task queue until it receives one finishGroup()
* call for each startGroup() call. This is to ensure as many GpuTasks as
* possible are executed together as a group, generating optimal parallelism
* on the GPU.
*/
virtual void startGroup() = 0;
/** \brief Submit a GpuTask for execution
*
* Submitted tasks are pushed onto an incoming queue. The NvGpuDispatcher
* will take the contents of this queue every time the pending group count
* reaches 0 and run the group of submitted GpuTasks as an interleaved
* group.
*/
virtual void submitTask(NvTask& task) = 0;
/** \brief Record the end of a GpuTask batch submission
*
* A NvTaskManager calls this function to notify the NvGpuDispatcher that it is
* done submitting a group of GpuTasks (GpuTasks which were all make ready
* to run by the same prerequisite dependency becoming resolved). If no
* other group submissions are in progress, the NvGpuDispatcher will execute
* the set of ready tasks.
*/
virtual void finishGroup() = 0;
/** \brief Add a CUDA completion prerequisite dependency to a task
*
* A GpuTask calls this function to add a prerequisite dependency on another
* task (usually a CpuTask) preventing that task from starting until all of
* the CUDA kernels and copies already launched have been completed. The
* NvGpuDispatcher will increment that task's reference count, blocking its
* execution, until the CUDA work is complete.
*
* This is generally only required when a CPU task is expecting the results
* of the CUDA kernels to have been copied into host memory.
*
* This mechanism is not at all not required to ensure CUDA kernels and
* copies are issued in the correct order. Kernel issue order is determined
* by normal task dependencies. The rule of thumb is to only use a blocking
* completion prerequisite if the task in question depends on a completed
* GPU->Host DMA.
*
* The NvGpuDispatcher issues a blocking event record to CUDA for the purposes
* of tracking the already submitted CUDA work. When this event is
* resolved, the NvGpuDispatcher manually decrements the reference count of
* the specified task, allowing it to execute (assuming it does not have
* other pending prerequisites).
*/
virtual void addCompletionPrereq(NvBaseTask& task) = 0;
/** \brief Retrieve the NvCudaContextManager associated with this
* NvGpuDispatcher
*
* Every NvCudaContextManager has one NvGpuDispatcher, and every NvGpuDispatcher
* has one NvCudaContextManager.
*/
virtual cudamanager::NvCudaContextManager* getCudaContextManager() = 0;
/** \brief Record the end of a simulation frame
*
* A NvTaskManager calls this function to record the completion of its
* dependency graph. If profiling is enabled, the NvGpuDispatcher will
* trigger the retrieval of profiling data from the GPU at this point.
*/
virtual void stopSimulation() = 0;
/** \brief Returns true if a CUDA call has returned a non-recoverable error
*
* A return value of true indicates a fatal error has occurred. To protect
* itself, the NvGpuDispatcher enters a fall through mode that allows GpuTasks
* to complete without being executed. This allows simulations to continue
* but leaves GPU content static or corrupted.
*
* The user may try to recover from these failures by deleting GPU content
* so the visual artifacts are minimized. But there is no way to recover
* the state of the GPU actors before the failure. Once a CUDA context is
* in this state, the only recourse is to create a new CUDA context, a new
* scene, and start over.
*
* This is our "Best Effort" attempt to not turn a soft failure into a hard
* failure because continued use of a CUDA context after it has returned an
* error will usually result in a driver reset. However if the initial
* failure was serious enough, a reset may have already occurred by the time
* we learn of it.
*/
virtual bool failureDetected() const = 0;
/** \brief Force the NvGpuDispatcher into failure mode
*
* This API should be used if user code detects a non-recoverable CUDA
* error. This ensures the NvGpuDispatcher does not launch any further
* CUDA work. Subsequent calls to failureDetected() will return true.
*/
virtual void forceFailureMode() = 0;
/** \brief Returns a pointer to the current in-use profile buffer
*
* The returned pointer should be passed to all kernel launches to enable
* CTA/Warp level profiling. If a data collector is not attached, or CTA
* profiling is not enabled, the pointer will be zero.
*/
virtual void* getCurrentProfileBuffer() const = 0;
/** \brief Register kernel names with PlatformAnalyzer
*
* The returned uint16_t must be stored and used as a base offset for the ID
* passed to the KERNEL_START|STOP_EVENT macros.
*/
virtual uint16_t registerKernelNames(const char**, uint16_t count) = 0;
/** \brief Launch a copy kernel with arbitrary number of copy commands
*
* This method is intended to be called from Kernel GpuTasks, but it can
* function outside of that context as well.
*
* If count is 1, the descriptor is passed to the kernel as arguments, so it
* may be declared on the stack.
*
* If count is greater than 1, the kernel will read the descriptors out of
* host memory. Because of this, the descriptor array must be located in
* page locked (pinned) memory. The provided descriptors may be modified by
* this method (converting host pointers to their GPU mapped equivalents)
* and should be considered *owned* by CUDA until the current batch of work
* has completed, so descriptor arrays should not be freed or modified until
* you have received a completion notification.
*
* If your GPU does not support mapping of page locked memory (SM>=1.1),
* this function degrades to calling CUDA copy methods.
*/
virtual void launchCopyKernel(cudamanager::NvGpuCopyDesc* desc, uint32_t count, CUstream stream) = 0;
/** \brief Query pre launch task that runs before launching gpu kernels.
*
* This is part of an optional feature to schedule multiple gpu features
* at the same time to get kernels to run in parallel.
* \note Do *not* set the continuation on the returned task, but use addPreLaunchDependent().
*/
virtual NvBaseTask& getPreLaunchTask() = 0;
/** \brief Adds a gpu launch task that gets executed after the pre launch task.
*
* This is part of an optional feature to schedule multiple gpu features
* at the same time to get kernels to run in parallel.
* \note Each call adds a reference to the pre-launch task.
*/
virtual void addPreLaunchDependent(NvBaseTask& dependent) = 0;
/** \brief Query post launch task that runs after the gpu is done.
*
* This is part of an optional feature to schedule multiple gpu features
* at the same time to get kernels to run in parallel.
* \note Do *not* set the continuation on the returned task, but use addPostLaunchDependent().
*/
virtual NvBaseTask& getPostLaunchTask() = 0;
/** \brief Adds a task that gets executed after the post launch task.
*
* This is part of an optional feature to schedule multiple gpu features
* at the same time to get kernels to run in parallel.
* \note Each call adds a reference to the pre-launch task.
*/
virtual void addPostLaunchDependent(NvBaseTask& dependent) = 0;
protected:
/** \brief protected destructor
*
* GpuDispatchers are allocated and freed by their NvCudaContextManager.
*/
virtual ~NvGpuDispatcher() {}
};
NV_POP_PACK
} } // end nvidia namespace
#endif
| 12,162 | C | 44.048148 | 120 | 0.703009 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NvTask/include/NvTaskDefine.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_TASK_DEFINE_H
#define NV_TASK_DEFINE_H
#include "NvPreprocessor.h"
#define NV_SUPPORT_GPU ((NV_WINDOWS_FAMILY && !NV_WINRT) || NV_LINUX)
namespace nvidia
{
namespace task
{
#ifndef NV_SUPPORT_NVTASK_PROFILING
#define NV_SUPPORT_NVTASK_PROFILING 1
#endif
} } // end nvidia namespace
#endif
| 2,011 | C | 40.061224 | 74 | 0.758329 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NvTask/include/NvTaskManager.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_TASK_MANAGER_H
#define NV_TASK_MANAGER_H
#include "NvTaskDefine.h"
#include "NvSimpleTypes.h"
#include "NvErrorCallback.h"
namespace nvidia
{
namespace task
{
NV_PUSH_PACK_DEFAULT
class NvBaseTask;
class NvTask;
class NvLightCpuTask;
typedef unsigned int NvTaskID;
/**
\brief Identifies the type of each heavyweight NvTask object
\note This enum type is only used by NvTask and GpuTask objects, LightCpuTasks do not use this enum.
@see NvTask
@see NvLightCpuTask
*/
struct NvTaskType
{
/**
* \brief Identifies the type of each heavyweight NvTask object
*/
enum Enum
{
TT_CPU, //!< NvTask will be run on the CPU
TT_GPU, //!< NvTask will be run on the GPU
TT_NOT_PRESENT, //!< Return code when attempting to find a task that does not exist
TT_COMPLETED //!< NvTask execution has been completed
};
};
class NvCpuDispatcher;
class NvGpuDispatcher;
/**
\brief The NvTaskManager interface
A NvTaskManager instance holds references to user-provided dispatcher objects, when tasks are
submitted the NvTaskManager routes them to the appropriate dispatcher and handles task profiling if enabled.
@see CpuDispatcher
@see NvGpuDispatcher
*/
class NvTaskManager
{
public:
/**
\brief Set the user-provided dispatcher object for CPU tasks
\param[in] ref The dispatcher object.
@see CpuDispatcher
*/
virtual void setCpuDispatcher(NvCpuDispatcher& ref) = 0;
/**
\brief Set the user-provided dispatcher object for GPU tasks
\param[in] ref The dispatcher object.
@see NvGpuDispatcher
*/
virtual void setGpuDispatcher(NvGpuDispatcher& ref) = 0;
/**
\brief Get the user-provided dispatcher object for CPU tasks
\return The CPU dispatcher object.
@see CpuDispatcher
*/
virtual NvCpuDispatcher* getCpuDispatcher() const = 0;
/**
\brief Get the user-provided dispatcher object for GPU tasks
\return The GPU dispatcher object.
@see NvGpuDispatcher
*/
virtual NvGpuDispatcher* getGpuDispatcher() const = 0;
/**
\brief Reset any dependencies between Tasks
\note Will be called at the start of every frame before tasks are submitted.
@see NvTask
*/
virtual void resetDependencies() = 0;
/**
\brief Called by the owning scene to start the task graph.
\note All tasks with with ref count of 1 will be dispatched.
@see NvTask
*/
virtual void startSimulation() = 0;
/**
\brief Called by the owning scene at the end of a simulation step to synchronize the NvGpuDispatcher
@see NvGpuDispatcher
*/
virtual void stopSimulation() = 0;
/**
\brief Called by the worker threads to inform the NvTaskManager that a task has completed processing
\param[in] task The task which has been completed
*/
virtual void taskCompleted(NvTask& task) = 0;
/**
\brief Retrieve a task by name
\param[in] name The unique name of a task
\return The ID of the task with that name, or TT_NOT_PRESENT if not found
*/
virtual NvTaskID getNamedTask(const char* name) = 0;
/**
\brief Submit a task with a unique name.
\param[in] task The task to be executed
\param[in] name The unique name of a task
\param[in] type The type of the task (default TT_CPU)
\return The ID of the task with that name, or TT_NOT_PRESENT if not found
*/
virtual NvTaskID submitNamedTask(NvTask* task, const char* name, NvTaskType::Enum type = NvTaskType::TT_CPU) = 0;
/**
\brief Submit an unnamed task.
\param[in] task The task to be executed
\param[in] type The type of the task (default TT_CPU)
\return The ID of the task with that name, or TT_NOT_PRESENT if not found
*/
virtual NvTaskID submitUnnamedTask(NvTask& task, NvTaskType::Enum type = NvTaskType::TT_CPU) = 0;
/**
\brief Retrieve a task given a task ID
\param[in] id The ID of the task to return, a valid ID must be passed or results are undefined
\return The task associated with the ID
*/
virtual NvTask* getTaskFromID(NvTaskID id) = 0;
/**
\brief Release the NvTaskManager object, referenced dispatchers will not be released
*/
virtual void release() = 0;
/**
\brief Construct a new NvTaskManager instance with the given [optional] dispatchers
*/
static NvTaskManager* createTaskManager(NvErrorCallback& errorCallback, NvCpuDispatcher* = 0, NvGpuDispatcher* = 0);
protected:
virtual ~NvTaskManager() {}
/*! \cond PRIVATE */
virtual void finishBefore(NvTask& task, NvTaskID taskID) = 0;
virtual void startAfter(NvTask& task, NvTaskID taskID) = 0;
virtual void addReference(NvTaskID taskID) = 0;
virtual void decrReference(NvTaskID taskID) = 0;
virtual int32_t getReference(NvTaskID taskID) const = 0;
virtual void decrReference(NvLightCpuTask&) = 0;
virtual void addReference(NvLightCpuTask&) = 0;
virtual void emitStartEvent(NvBaseTask&, uint32_t threadId=0) = 0;
virtual void emitStopEvent(NvBaseTask&, uint32_t threadId=0) = 0;
/*! \endcond */
friend class NvBaseTask;
friend class NvTask;
friend class NvLightCpuTask;
friend class NvGpuWorkerThread;
};
NV_POP_PACK
} } // end nvidia namespace
#endif
| 7,112 | C | 29.13983 | 120 | 0.6991 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NvTask/include/NvCpuDispatcher.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_CPU_DISPATCHER_H
#define NV_CPU_DISPATCHER_H
#include "NvTaskDefine.h"
#include "NvSimpleTypes.h"
namespace nvidia
{
namespace task
{
class NvBaseTask;
/**
\brief A CpuDispatcher is responsible for scheduling the execution of tasks passed to it by the SDK.
A typical implementation would for example use a thread pool with the dispatcher
pushing tasks onto worker thread queues or a global queue.
@see NvBaseTask
@see NvTask
@see NvTaskManager
*/
class NvCpuDispatcher
{
public:
/**
\brief Called by the TaskManager when a task is to be queued for execution.
Upon receiving a task, the dispatcher should schedule the task
to run when resource is available. After the task has been run,
it should call the release() method and discard it's pointer.
\param[in] task The task to be run.
@see NvBaseTask
*/
virtual void submitTask( NvBaseTask& task ) = 0;
/**
\brief Returns the number of available worker threads for this dispatcher.
The SDK will use this count to control how many tasks are submitted. By
matching the number of tasks with the number of execution units task
overhead can be reduced.
*/
virtual uint32_t getWorkerCount() const = 0;
virtual ~NvCpuDispatcher() {}
};
} } // end nvidia namespace
#endif
| 3,026 | C | 35.469879 | 101 | 0.743556 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NvTask/include/NvTask.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_TASK_H
#define NV_TASK_H
#include "NvTaskDefine.h"
#include "NvTaskManager.h"
#include "NvAssert.h"
namespace nvidia
{
namespace task
{
/**
* \brief Base class of all task types
*
* NvBaseTask defines a runnable reference counted task with built-in profiling.
*/
class NvBaseTask
{
public:
NvBaseTask() : mEventID(0xFFFF), mProfileStat(0), mTm(0) {}
virtual ~NvBaseTask() {}
/**
* \brief The user-implemented run method where the task's work should be performed
*
* run() methods must be thread safe, stack friendly (no alloca, etc), and
* must never block.
*/
virtual void run() = 0;
/**
* \brief Return a user-provided task name for profiling purposes.
*
* It does not have to be unique, but unique names are helpful.
*
* \return The name of this task
*/
virtual const char* getName() const = 0;
//! \brief Implemented by derived implementation classes
virtual void addReference() = 0;
//! \brief Implemented by derived implementation classes
virtual void removeReference() = 0;
//! \brief Implemented by derived implementation classes
virtual int32_t getReference() const = 0;
/** \brief Implemented by derived implementation classes
*
* A task may assume in its release() method that the task system no longer holds
* references to it - so it may safely run its destructor, recycle itself, etc.
* provided no additional user references to the task exist
*/
virtual void release() = 0;
/**
* \brief Execute user run method with wrapping profiling events.
*
* Optional entry point for use by CpuDispatchers.
*
* \param[in] threadId The threadId of the thread that executed the task.
*/
NV_INLINE void runProfiled(uint32_t threadId=0)
{
mTm->emitStartEvent(*this, threadId);
run();
mTm->emitStopEvent(*this, threadId);
}
/**
* \brief Specify stop event statistic
*
* If called before or while the task is executing, the given value
* will appear in the task's event bar in the profile viewer
*
* \param[in] stat The stat to signal when the task is finished
*/
NV_INLINE void setProfileStat( uint16_t stat )
{
mProfileStat = stat;
}
/**
* \brief Return NvTaskManager to which this task was submitted
*
* Note, can return NULL if task was not submitted, or has been
* completed.
*/
NV_INLINE NvTaskManager* getTaskManager() const
{
return mTm;
}
protected:
uint16_t mEventID; //!< Registered profile event ID
uint16_t mProfileStat; //!< Profiling statistic
NvTaskManager* mTm; //!< Owning NvTaskManager instance
friend class NvTaskMgr;
};
/**
* \brief A NvBaseTask implementation with deferred execution and full dependencies
*
* A NvTask must be submitted to a NvTaskManager to to be executed, Tasks may
* optionally be named when they are submitted.
*/
class NvTask : public NvBaseTask
{
public:
NvTask() : mTaskID(0) {}
virtual ~NvTask() {}
//! \brief Release method implementation
virtual void release()
{
NV_ASSERT(mTm);
// clear mTm before calling taskCompleted() for safety
NvTaskManager* save = mTm;
mTm = NULL;
save->taskCompleted( *this );
}
//! \brief Inform the NvTaskManager this task must finish before the given
// task is allowed to start.
NV_INLINE void finishBefore( NvTaskID taskID )
{
NV_ASSERT(mTm);
mTm->finishBefore( *this, taskID);
}
//! \brief Inform the NvTaskManager this task cannot start until the given
// task has completed.
NV_INLINE void startAfter( NvTaskID taskID )
{
NV_ASSERT(mTm);
mTm->startAfter( *this, taskID );
}
/**
* \brief Manually increment this task's reference count. The task will
* not be allowed to run until removeReference() is called.
*/
NV_INLINE void addReference()
{
NV_ASSERT(mTm);
mTm->addReference( mTaskID );
}
/**
* \brief Manually decrement this task's reference count. If the reference
* count reaches zero, the task will be dispatched.
*/
NV_INLINE void removeReference()
{
NV_ASSERT(mTm);
mTm->decrReference( mTaskID );
}
/**
* \brief Return the ref-count for this task
*/
NV_INLINE int32_t getReference() const
{
return mTm->getReference( mTaskID );
}
/**
* \brief Return the unique ID for this task
*/
NV_INLINE NvTaskID getTaskID() const
{
return mTaskID;
}
/**
* \brief Called by NvTaskManager at submission time for initialization
*
* Perform simulation step initialization here.
*/
virtual void submitted()
{
mStreamIndex = 0;
mPreSyncRequired = false;
mProfileStat = 0;
}
/**
* \brief Specify that the GpuTask sync flag be set
*/
NV_INLINE void requestSyncPoint()
{
mPreSyncRequired = true;
}
protected:
NvTaskID mTaskID; //!< ID assigned at submission
uint32_t mStreamIndex; //!< GpuTask CUDA stream index
bool mPreSyncRequired; //!< GpuTask sync flag
friend class NvTaskMgr;
friend class NvGpuWorkerThread;
};
/**
* \brief A NvBaseTask implementation with immediate execution and simple dependencies
*
* A NvLightCpuTask bypasses the NvTaskManager launch dependencies and will be
* submitted directly to your scene's CpuDispatcher. When the run() function
* completes, it will decrement the reference count of the specified
* continuation task.
*
* You must use a full-blown NvTask if you want your task to be resolved
* by another NvTask, or you need more than a single dependency to be
* resolved when your task completes, or your task will not run on the
* CpuDispatcher.
*/
class NvLightCpuTask : public NvBaseTask
{
public:
NvLightCpuTask()
: mCont( NULL )
, mRefCount( 0 )
{
}
virtual ~NvLightCpuTask()
{
mTm = NULL;
}
/**
* \brief Initialize this task and specify the task that will have its ref count decremented on completion.
*
* Submission is deferred until the task's mRefCount is decremented to zero.
* Note that we only use the NvTaskManager to query the appropriate dispatcher.
*
* \param[in] tm The NvTaskManager this task is managed by
* \param[in] c The task to be executed when this task has finished running
*/
NV_INLINE void setContinuation(NvTaskManager& tm, NvBaseTask* c)
{
NV_ASSERT( mRefCount == 0 );
mRefCount = 1;
mCont = c;
mTm = &tm;
if( mCont )
{
mCont->addReference();
}
}
/**
* \brief Initialize this task and specify the task that will have its ref count decremented on completion.
*
* This overload of setContinuation() queries the NvTaskManager from the continuation
* task, which cannot be NULL.
* \param[in] c The task to be executed after this task has finished running
*/
NV_INLINE void setContinuation( NvBaseTask* c )
{
NV_ASSERT( c );
NV_ASSERT( mRefCount == 0 );
mRefCount = 1;
mCont = c;
if( mCont )
{
mCont->addReference();
mTm = mCont->getTaskManager();
NV_ASSERT( mTm );
}
}
/**
* \brief Retrieves continuation task
*/
NV_INLINE NvBaseTask* getContinuation() const
{
return mCont;
}
/**
* \brief Manually decrement this task's reference count. If the reference
* count reaches zero, the task will be dispatched.
*/
NV_INLINE void removeReference()
{
mTm->decrReference(*this);
}
/** \brief Return the ref-count for this task */
NV_INLINE int32_t getReference() const
{
return mRefCount;
}
/**
* \brief Manually increment this task's reference count. The task will
* not be allowed to run until removeReference() is called.
*/
NV_INLINE void addReference()
{
mTm->addReference(*this);
}
/**
* \brief called by CpuDispatcher after run method has completed
*
* Decrements the continuation task's reference count, if specified.
*/
NV_INLINE void release()
{
if( mCont )
{
mCont->removeReference();
}
}
protected:
NvBaseTask* mCont; //!< Continuation task, can be NULL
volatile int32_t mRefCount; //!< NvTask is dispatched when reaches 0
friend class NvTaskMgr;
};
} }// end physx namespace
#endif
| 10,684 | C | 28.354396 | 111 | 0.635904 |
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/buffer.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved.
#pragma once
#include "NvPreprocessor.h"
#include <assert.h>
#include <stdio.h>
#include <vector>
#if NV_WINDOWS_FAMILY
#define POD_Buffer std::vector
#else
template<typename T, int Alignment = sizeof(T)>
class POD_Buffer
{
public:
POD_Buffer() : _size(0), _capacity(0), _data(nullptr) {}
~POD_Buffer() { deallocate(); }
size_t size() const { return _size; }
void
resize(size_t new_size)
{
if (new_size > _capacity)
{
reserve(new_size);
}
_size = new_size;
}
void
reserve(size_t min_capacity)
{
if (min_capacity > _capacity)
{
void* new_data = allocate(min_capacity);
if (!!_size)
{
memcpy(new_data, _data, _size*sizeof(T));
}
deallocate();
_capacity = min_capacity;
_data = reinterpret_cast<T*>(new_data);
}
}
void
push_back(const T& e)
{
if (_size >= _capacity)
{
reserve(!!_size ? 2*_size : (size_t)16);
}
_data[_size++] = e;
}
void
pop_back()
{
if (!!_size) --_size;
}
T* data() { return _data; }
const T* data() const { return _data; }
T& operator [] (size_t index) { assert(_size > index); return _data[index]; }
const T& operator [] (size_t index) const { assert(_size > index); return _data[index]; }
T& back() { return (*this)[_size-1]; }
const T& back() const { return (*this)[_size-1]; }
private:
void*
allocate(size_t buffer_size)
{
const size_t mem_size = sizeof(T)*buffer_size;
unsigned char* mem = (unsigned char*)malloc(mem_size + Alignment);
const unsigned char offset = (unsigned char)((uintptr_t)Alignment - (uintptr_t)mem % Alignment - 1);
mem += offset;
*mem++ = offset;
return mem;
}
void
deallocate()
{
if (!!_data)
{
unsigned char* cmem = (unsigned char*)_data;
const unsigned char offset = *--cmem;
::free(cmem - offset);
}
_size = 0;
_capacity = 0;
_data = nullptr;
}
size_t _size;
size_t _capacity;
T* _data;
};
#endif
| 3,827 | C | 28.674418 | 108 | 0.610138 |
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/coupling.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved.
#pragma once
#include "solver_types.h"
#include "anglin6.h"
#include "NvCMath.h"
/**
* Bond coupling data used as a representation of a block column of a "coupling matrix" C,
* which has exactly two non-zero blocks. The non-zero blocks are of the form
*
* / 1 ~r_ij \
* C_ij = s_ij | |.
* \ 0 1 /
*
* This represents the coupling of node i by bond j. The scalar s_ij is +/-1, and for each
* bond (column j of C) s_ij must take on both signs. The matrix factor is again composed
* of blocks, each element a 3x3 matrix. The 0 and 1's are just multiples of the unit matrix,
* and ~r_ij is the 3x3 antisymmetric matrix representing "crossing with the vector r_ij on the
* left" (i.e. (~u)*v = (u) x (v)). The vector r_ij represents the displacement from node i's
* CoM to bond j's centroid.
*/
SIMD_ALIGN_32
(
struct Coupling
{
NvcVec3 offset0;
uint32_t node0;
NvcVec3 offset1;
uint32_t node1;
}
);
template <typename Elem, typename Scalar = Float_Scalar>
struct CouplingMatrixOps
{
/**
* Sparse matrix-vector multiply y = C*x, where C is a "coupling matrix" represented by columns
* of type Coupling (see the comments for Coupling).
*
* \param[out] y Resulting column Elem vector of length M.
* \param[in] C Input M x N coupling matrix.
* \param[in] x Input column Elem vector of length N.
* \param[in] M The number of rows in y and C.
* \param[in] N The number of rows in x and columns in C.
*/
inline void
rmul(Elem* y, const Coupling* C, const Elem* x, uint32_t M, uint32_t N)
{
memset(y, 0, sizeof(AngLin6)*M);
for (uint32_t j = 0 ; j < N; ++j)
{
const Coupling& c = C[j];
const AngLin6& x_j = x[j];
AngLin6& y0 = y[c.node0];
AngLin6& y1 = y[c.node1];
y0.ang += x_j.ang - (c.offset0^x_j.lin);
y0.lin += x_j.lin;
y1.ang -= x_j.ang - (c.offset1^x_j.lin);
y1.lin -= x_j.lin;
}
}
/**
* Sparse matrix-vector multiply y = x*C, where C is a "coupling matrix" represented by columns
* of type Coupling (see the comments for Coupling).
*
* \param[out] y Resulting row Elem vector of length N.
* \param[in] x Input row Elem vector, must be long enough to be indexed by all values in B's representation.
* \param[in] C Input M x N couping matrix.
* \param[in] M The number of columns in x and rows in C.
* \param[in] N The number of columns in y and C.
*/
inline void
lmul(Elem* y, const Elem* x, const Coupling* C, uint32_t M, uint32_t N)
{
NV_UNUSED(M);
for (uint32_t j = 0; j < N; ++j)
{
const Coupling& c = C[j];
const AngLin6& x0 = x[c.node0];
const AngLin6& x1 = x[c.node1];
AngLin6& y_j = y[j];
y_j.ang = x0.ang - x1.ang;
y_j.lin = x0.lin - x1.lin + (c.offset0^x0.ang) - (c.offset1^x1.ang);
}
}
};
template <typename Elem>
struct CouplingMatrixOps<Elem, SIMD_Scalar>
{
/**
* Sparse matrix-vector multiply y = C*x, where C is a "coupling matrix" represented by columns
* of type Coupling (see the comments for Coupling).
*
* \param[out] y Resulting column Elem vector of length M.
* \param[in] C Input M x N coupling matrix.
* \param[in] x Input column Elem vector of length N.
* \param[in] M The number of rows in y and C.
* \param[in] N The number of rows in x and columns in C.
*/
inline void
rmul(Elem* y, const Coupling* C, const Elem* x, uint32_t M, uint32_t N)
{
memset(y, 0, sizeof(AngLin6)*M);
for (uint32_t j = 0 ; j < N; ++j)
{
const Coupling& c = C[j];
const AngLin6& x_j = x[j];
AngLin6& y0 = y[c.node0];
AngLin6& y1 = y[c.node1];
__m256 _x = _mm256_load_ps(&x_j.ang.x);
__m256 _y0 = _mm256_load_ps(&y0.ang.x);
__m256 _y1 = _mm256_load_ps(&y1.ang.x);
__m256 _c = _mm256_load_ps(&c.offset0.x);
_y0 = _mm256_add_ps(_y0, _x);
_y1 = _mm256_sub_ps(_y1, _x);
__m128 _xl = _mm256_extractf128_ps(_x, 1);
__m256 _a = pair_cross3(_mm256_set_m128(_xl, _xl), _c);
_y0 = _mm256_add_ps(_y0, _mm256_set_m128(_mm_setzero_ps(), _mm256_castps256_ps128(_a)));
_y1 = _mm256_sub_ps(_y1, _mm256_set_m128(_mm_setzero_ps(), _mm256_extractf128_ps(_a, 1)));
_mm256_store_ps(&y0.ang.x, _y0);
_mm256_store_ps(&y1.ang.x, _y1);
}
}
/**
* Sparse matrix-vector multiply y = x*C, where C is a "coupling matrix" represented by columns
* of type Coupling (see the comments for Coupling).
*
* \param[out] y Resulting row Elem vector of length N.
* \param[in] x Input row Elem vector, must be long enough to be indexed by all values in B's representation.
* \param[in] C Input M x N couping matrix.
* \param[in] M The number of columns in x and rows in C.
* \param[in] N The number of columns in y and C.
*/
inline void
lmul(Elem* y, const Elem* x, const Coupling* C, uint32_t M, uint32_t N)
{
NV_UNUSED(M);
for (uint32_t j = 0; j < N; ++j)
{
const Coupling& c = C[j];
const AngLin6& x0 = x[c.node0];
const AngLin6& x1 = x[c.node1];
AngLin6& y_j = y[j];
__m256 _x0 = _mm256_load_ps(&x0.ang.x);
__m256 _x1 = _mm256_load_ps(&x1.ang.x);
__m256 _c = _mm256_load_ps(&c.offset0.x);
__m256 _y = _mm256_sub_ps(_x0, _x1);
__m256 _a = pair_cross3(_c, _mm256_set_m128(_mm256_castps256_ps128(_x1), _mm256_castps256_ps128(_x0)));
_y = _mm256_add_ps(_y, _mm256_set_m128(_mm_sub_ps(_mm256_castps256_ps128(_a), _mm256_extractf128_ps(_a, 1)), _mm_setzero_ps()));
_mm256_store_ps(&y_j.ang.x, _y);
}
}
private:
inline __m256
pair_cross3(const __m256& v0, const __m256& v1)
{
__m256 prep0 = _mm256_shuffle_ps(v0, v0, 0xc9);
__m256 prep1 = _mm256_shuffle_ps(v1, v1, 0xc9);
__m256 res_shuffled = _mm256_fmsub_ps(v0, prep1, _mm256_mul_ps(prep0, v1));
return _mm256_shuffle_ps(res_shuffled, res_shuffled, 0xc9);
}
};
| 8,039 | C | 38.219512 | 140 | 0.591243 |
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/bond.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved.
#pragma once
#include "coupling.h"
#include "inertia.h"
#include "anglin6.h"
/**
* BondMatrix
*
* Given a BondMatrix B, when (B^T)*B is applied to a vector of bond impulses, the result
* is a vector of the differences between the the resulting accelerations of the nodes
* joined by each bond.
*
* This is done in block form, so a vector is composed of vector elements. Each element
* is a 6-dimensional vector, composed of a linear part followed by an angular part.
* Matrix blocks are likewise 6x6.
*
* This matrix is composed of two sparse matrices:
* An M x M block diagonal matrix I, where the i^th diagonal block is the 6x6 matrix:
*
* / I_i 0 \
* I_ii = | |
* \ 0 m_i /
*
* Except for possibly I_i, each "element" in I_ii is a multiple of the 3x3 unit matrix. I_i is a
* 3x3 symmetric inertia tensor. See the definition of Inertia<TensorType> for its representation.
*
* The second component is the coupling matrix C, see documentation for Coupling.
*
* The matrix represented by this object is (I^-1/2)*C, an M x N matrix.
*
* NOTE: I and C are _not_ stored as described above, for efficiency.
*/
template <typename TensorType>
struct BondMatrix
{
/** Constructor clears member data. */
BondMatrix() : C(nullptr), sqrt_I_inv(nullptr), scratch(nullptr), M(0), N(0) {}
/**
* Set fields (shallow pointer copy).
*
* \param[in] _C Coupling matrix, see the documentation for Coupling.
* \param[in] _sqrt_I_inv The inverse of the square root of the diagonal mass and inertia tensor, represented by a
* vector of _M Inertia structs for the diagonal values. The i^th element is the reciprocal
* of the square root of the mass and inertia tensor of node i.
* \param[in] _scratch Scratch memory required to carry out a multiply. Must be at least _M*sizeof(AngLin6) bytes.
* \param[in] _M The number of nodes.
* \param[in] _N The number of bonds.
*/
void
set(const Coupling* _C, const Inertia<TensorType>* _sqrt_I_inv, void* _scratch, uint32_t _M, uint32_t _N)
{
C = _C;
sqrt_I_inv = _sqrt_I_inv;
scratch = _scratch;
M = _M;
N = _N;
}
const Coupling* C;
const Inertia<TensorType>* sqrt_I_inv;
void* scratch;
uint32_t M, N;
};
typedef BondMatrix<float> BondMatrixS;
typedef BondMatrix<NvcVec3> BondMatrixD;
typedef BondMatrix<NvcMat33> BondMatrixG;
template<typename TensorType, typename Scalar>
struct BondMatrixOps
{
/**
* Matrix-vector multiply y = B*x.
*
* \param[out] y Resulting column vector of length N.
* \param[in] B Input MxN matrix representation.
* \param[in] x Input column vector of length M.
* \param[in] M Number of rows in B.
* \param[in] N Number of columns in B.
*/
inline void
rmul(AngLin6* y, const BondMatrix<TensorType>& B, const AngLin6* x, uint32_t M, uint32_t N) const
{
NV_UNUSED(M); // BondMatrix stores these
NV_UNUSED(N);
// Calculate y = C*x (apply C)
CouplingMatrixOps<AngLin6, Scalar>().rmul(y, B.C, x, B.M, B.N);
// Calculate y = (I^-1/2)*C*x (apply I^-1/2)
InertiaMatrixOps<Scalar>().mul(y, B.sqrt_I_inv, y, B.M);
}
/**
* Matrix-vector multiply y = x*B.
*
* \param[out] y Resulting row vector of length B.N.
* \param[in] x Input row vector of length B.N.
* \param[in] B Input matrix representation.
* \param[in] M Number of rows in B.
* \param[in] N Number of columns in B.
*/
inline void
lmul(AngLin6* y, const AngLin6* x, const BondMatrix<TensorType>& B, uint32_t M, uint32_t N) const
{
NV_UNUSED(M); // BondMatrix stores these
NV_UNUSED(N);
AngLin6* s = (AngLin6*)B.scratch; // M-sized scratch s
// Calculate s = (I^-1/2)*x (apply I^-1/2)
InertiaMatrixOps<Scalar>().mul(s, B.sqrt_I_inv, x, B.M);
// Calculate y = (C^T)*(I^-1/2)*x (apply C^T)
CouplingMatrixOps<AngLin6, Scalar>().lmul(y, s, B.C, B.M, B.N);
}
};
template<typename Scalar>
using BondMatrixOpsS = BondMatrixOps<float, Scalar>;
template<typename Scalar>
using BondMatrixOpsD = BondMatrixOps<float, NvcVec3>;
template<typename Scalar>
using BondMatrixOpsG = BondMatrixOps<float, NvcMat33>;
| 6,063 | C | 37.624204 | 124 | 0.652812 |
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/solver_types.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved.
#pragma once
#include "NvCTypes.h"
#include "simd/simd.h"
/**
* Scalar types for SIMD and non-SIMD calculations.
* Currently also used as a template argument to distinguish code paths. May need a different
* scheme if two codepaths use the same scalar type.
*/
typedef __m128 SIMD_Scalar;
typedef float Float_Scalar;
/**
* Holds the components of a rigid body description that are necessary for the stress solver.
*/
template<typename InertiaType>
struct SolverNode
{
NvcVec3 CoM;
float mass;
InertiaType inertia;
};
typedef SolverNode<float> SolverNodeS;
typedef SolverNode<NvcVec3> SolverNodeD;
typedef SolverNode<NvcMat33> SolverNodeG;
/**
* Holds the components of a rigid body bond description that are necessary for the stress solver.
*/
struct SolverBond
{
NvcVec3 centroid;
uint32_t nodes[2]; // Index into accompanying SolverNode<InertiaType> array.
};
| 2,498 | C | 36.298507 | 98 | 0.7494 |
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/stress.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved.
#pragma once
#include "bond.h"
#include "buffer.h"
class StressProcessor
{
public:
/** Constructor clears member data. */
StressProcessor() : m_mass_scale(0.0f), m_length_scale(0.0f), m_can_resume(false) {}
/** Parameters controlling the data preparation. */
struct DataParams
{
bool equalizeMasses = false; // Use the geometric mean of the nodes' masses instead of the individual masses.
bool centerBonds = false; // Place the bond position halfway between adjoining nodes' CoMs.
};
/** Parameters controlling the solver behavior. */
struct SolverParams
{
uint32_t maxIter = 0; // The maximum number of iterations. If 0, use CGNR for default value.
float tolerance = 1.e-6f; // The relative tolerance threshold for convergence. Iteration will stop when this is reached.
bool warmStart = false; // Whether or not to use the solve function's 'impulses' parameter as a starting input vector.
};
/**
* Build the internal representation of the stress network from nodes and bonds.
* This only needs to be called initially, and any time the nodes or bonds change.
*
* \param[in] nodes Array of SolverNodeS (scalar inertia).
* \param[in] N_nodes Number of elements in the nodes array.
* \param[in] bonds Array of SolverBond. The node indices in each bond entry correspond to the ordering of the nodes array.
* \param[in] N_bonds Number of elements in the bonds array.
* \param[in] params Parameters affecting the processing of the input data (see DataParams).
*/
void prepare(const SolverNodeS* nodes, uint32_t N_nodes, const SolverBond* bonds, uint32_t N_bonds, const DataParams& params);
/**
* Solve for the bond impulses given the velocities of each node. The function prepare(...) must be called
* before this can be used, but then solve(...) may be called multiple times.
*
* The vector elements (impulses and velocities) hold linear and angular parts.
*
* \param[out] impulses Output array of impulses exerted by each bond. For a warm or hot start, this is also used as an input.
* Must be of length N_bonds passed into the prepare(...) function.
* \param[in] velocities Input array of external velocities on each node. Must be of length N_nodes passed into the prepare(...) function.
* \param[in] params Parameters affecting the solver characteristics (see SolverParams).
* \param[out] error_sq (Optional) If not NULL, *error_sq will be filled with the angular and linear square errors (solver residuals). Default = NULL.
* \param[in] resume (Optional) Set to true if impulses and velocities have not changed since last call, to resume solving. Default = false.
*
* \return the number of iterations taken to converge, if it converges. Otherwise, returns minus the number of iterations before exiting.
*/
int solve(AngLin6* impulses, const AngLin6* velocities, const SolverParams& params, AngLin6ErrorSq* error_sq = nullptr, bool resume = false);
/**
* Removes the indexed bond from the solver.
*
* \param[in] bondIndex The index of the bond to remove. Must be less than getBondCount().
*
* \return true iff successful.
*/
bool removeBond(uint32_t bondIndex);
/**
* \return the number of nodes in the stress network. (Set by prepare(...).)
*/
uint32_t getNodeCount() const { return (uint32_t)m_recip_sqrt_I.size(); }
/**
* \return the number of bonds in the stress network. (Set by prepare(...), possibly reduced by removeBond(...).)
*/
uint32_t getBondCount() const { return (uint32_t)m_couplings.size(); }
/**
* \return whether or not the solver uses SIMD. If the device and OS support SSE, AVX, and FMA instruction sets, SIMD is used.
*/
static bool usingSIMD() { return s_use_simd; }
protected:
float m_mass_scale;
float m_length_scale;
POD_Buffer<InertiaS> m_recip_sqrt_I;
POD_Buffer<Coupling> m_couplings;
BondMatrixS m_B;
POD_Buffer<AngLin6> m_rhs;
POD_Buffer<AngLin6> m_B_scratch;
POD_Buffer<AngLin6> m_solver_cache;
bool m_can_resume;
static const bool s_use_simd;
};
| 6,085 | C | 49.716666 | 159 | 0.672309 |
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/inertia.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved.
#pragma once
#include "solver_types.h"
#include "NvCMath.h"
/**
* Holds an inertia component and a mass component.
* The inertial component is represented by a TensorType, which may be a float (representing a multiple of
* the unit matrix), an NvcVec3 (representing the non-zero components of a diagonal inertia tensor), or a
* 3x3 symmetric matrix representing a general inertia tensor.
*
* This structure might also be used to store reciprocals, or powers (e.g. square roots) of these quantities.
*/
template <typename TensorType>
struct Inertia
{
TensorType I;
float m;
};
typedef Inertia<float> InertiaS;
typedef Inertia<NvcVec3> InertiaD;
typedef Inertia<NvcMat33> InertiaG;
template<typename Scalar = Float_Scalar>
struct InertiaMatrixOps
{
/**
* Matrix-vector multiply y = I*x.
*
* Apply a block-diagonal inertia matrix I to a vector of AngLin6 elements.
* x and y may be the same vector.
*
* \param[out] y Resulting column vector of length N.
* \param[in] I Input inertia matrix representation.
* \param[in] x Input column vector of length N.
* \param[in] N Number of columns in x and y, and the square size of I.
*
* x and y may be the same vector.
*/
inline void
mul(AngLin6* y, const InertiaS* I, const AngLin6* x, uint32_t N)
{
for (uint32_t i = 0; i < N; ++i)
{
const InertiaS& I_i = I[i];
const AngLin6& x_i = x[i];
AngLin6& y_i = y[i];
y_i.ang = I_i.I*x_i.ang;
y_i.lin = I_i.m*x_i.lin;
}
}
};
template<>
struct InertiaMatrixOps<SIMD_Scalar>
{
/**
* Matrix-vector multiply y = I*x.
*
* Apply a block-diagonal inertia matrix I to a vector of AngLin6 elements.
*
* \param[out] y Resulting column vector of length N.
* \param[in] I Input inertia matrix representation.
* \param[in] x Input column vector of length N.
* \param[in] N Number of columns in x and y, and the square size of I.
*
* x and y may be the same vector.
*/
inline void
mul(AngLin6* y, const InertiaS* I, const AngLin6* x, uint32_t N)
{
for (uint32_t i = 0; i < N; ++i)
{
const InertiaS& I_i = I[i];
const AngLin6& x_i = x[i];
AngLin6& y_i = y[i];
__m256 _x = _mm256_load_ps(&x_i.ang.x);
__m128 _Il = _mm_load1_ps(&I_i.I);
__m128 _Ih = _mm_load1_ps(&I_i.m);
__m256 _I = _mm256_set_m128(_Ih,_Il);
__m256 _y = _mm256_mul_ps(_I, _x);
_mm256_store_ps(&y_i.ang.x, _y);
}
}
};
| 4,263 | C | 35.444444 | 109 | 0.645555 |
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/anglin6.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved.
#pragma once
#include "NvCMath.h"
#include "simd/simd.h"
/**
* Holds an angular and linear component, for angular and linear velocities, accelerations, impulses, torques and forces, etc.
*/
SIMD_ALIGN_32(
struct AngLin6
{
SIMD_ALIGN_16(NvcVec3 ang);
SIMD_ALIGN_16(NvcVec3 lin);
}
);
/**
* Holds the angular and linear components of the calculated error.
*/
struct AngLin6ErrorSq
{
float ang, lin;
};
/**
* SISD AngLin6 operations.
*/
template<typename Scalar = float>
struct AngLin6Ops
{
/** r = x + y */
inline void add(AngLin6& r, const AngLin6& x, const AngLin6& y) { r.ang = x.ang + y.ang; r.lin = x.lin + y.lin; }
/** r = x - y */
inline void sub(AngLin6& r, const AngLin6& x, const AngLin6& y) { r.ang = x.ang - y.ang; r.lin = x.lin - y.lin; }
/** r = c*x + y */
inline void madd(AngLin6& r, float c, const AngLin6& x, const AngLin6& y) { r.ang = c*x.ang + y.ang; r.lin = c*x.lin + y.lin; }
/** r = -c*x + y */
inline void nmadd(AngLin6& r, float c, const AngLin6& x, const AngLin6& y) { r.ang = y.ang - c*x.ang; r.lin = y.lin - c*x.lin; }
/** Vector add */
inline void vadd(AngLin6* r, const AngLin6* x, const AngLin6* y, uint32_t N) { while (N--) add(*r++, *x++, *y++); }
/** Vector sub */
inline void vsub(AngLin6* r, const AngLin6* x, const AngLin6* y, uint32_t N) { while (N--) sub(*r++, *x++, *y++); }
/** Vector madd */
inline void vmadd(AngLin6* r, float c, const AngLin6* x, const AngLin6* y, uint32_t N) { while (N--) madd(*r++, c, *x++, *y++); }
/** Vector nmadd */
inline void vnmadd(AngLin6* r, float c, const AngLin6* x, const AngLin6* y, uint32_t N) { while (N--) nmadd(*r++, c, *x++, *y++); }
/**
* Vector-of-vectors dot product.
*
* \param[in] v Vector of AngLin6, of length N.
* \param[in] w Vector of AngLin6, of length N.
* \param[in] N Number of elements in v and w.
*
* return (v|w).
*/
inline float
dot(const AngLin6* v, const AngLin6* w, uint32_t N)
{
float result = 0.0f;
for (uint32_t i = 0; i < N; ++i)
{
const AngLin6& v_i = v[i];
const AngLin6& w_i = w[i];
result += (v_i.ang|w_i.ang) + (v_i.lin|w_i.lin);
}
return result;
}
/**
* Vector-of-vectors length squared.
*
* Equivalent to dot(v, v N), but could be faster in some cases
*
* \param[in] v Vector of AngLin6, of length N.
* \param[in] N Number of elements in v.
*
* return |v|^2.
*/
inline float
length_sq(const AngLin6* v, uint32_t N)
{
float result = 0.0f;
for (uint32_t i = 0; i < N; ++i)
{
const AngLin6& v_i = v[i];
result += (v_i.ang|v_i.ang) + (v_i.lin|v_i.lin);
}
return result;
}
/**
* Vector-of-vectors length squared, split into angular and linear contributions.
*
* \param[out] error_sq Sum of the squared angular and linear parts of v.
* \param[in] v Vector of AngLin6, of length N.
* \param[in] N Number of elements in v.
*
* \return the sum of the squared angular and linear errors, error.ang + error.lin.
*/
inline float
calculate_error(AngLin6ErrorSq& error_sq, const AngLin6* v, uint32_t N)
{
error_sq.ang = error_sq.lin = 0.0f;
for (uint32_t i = 0; i < N; ++i)
{
const AngLin6& v_i = v[i];
error_sq.ang += v_i.ang|v_i.ang;
error_sq.lin += v_i.lin|v_i.lin;
}
return error_sq.ang + error_sq.lin;
}
};
/**
* SIMD AngLin6 operations.
*/
template<>
struct AngLin6Ops<__m128>
{
/** r = x + y */
inline void
add(AngLin6& r, const AngLin6& x, const AngLin6& y)
{
__m256 _x = _mm256_load_ps(&x.ang.x);
__m256 _y = _mm256_load_ps(&y.ang.x);
__m256 _r = _mm256_add_ps(_x, _y);
_mm256_store_ps(&r.ang.x, _r);
}
/** r = x - y */
inline void
sub(AngLin6& r, const AngLin6& x, const AngLin6& y)
{
__m256 _x = _mm256_load_ps(&x.ang.x);
__m256 _y = _mm256_load_ps(&y.ang.x);
__m256 _r = _mm256_sub_ps(_x, _y);
_mm256_store_ps(&r.ang.x, _r);
}
/** r = c*x + y */
inline void
madd(AngLin6& r, __m128 c, const AngLin6& x, const AngLin6& y)
{
__m256 _c = _mm256_set_m128(c, c);
__m256 _x = _mm256_load_ps(&x.ang.x);
__m256 _y = _mm256_load_ps(&y.ang.x);
__m256 _r = _mm256_fmadd_ps(_c, _x, _y);
_mm256_store_ps(&r.ang.x, _r);
}
/** r = -c*x + y */
inline void
nmadd(AngLin6& r, __m128 c, const AngLin6& x, const AngLin6& y)
{
__m256 _c = _mm256_set_m128(c, c);
__m256 _x = _mm256_load_ps(&x.ang.x);
__m256 _y = _mm256_load_ps(&y.ang.x);
__m256 _r = _mm256_fnmadd_ps(_c, _x, _y);
_mm256_store_ps(&r.ang.x, _r);
}
/** Vector add */
inline void vadd(AngLin6* r, const AngLin6* x, const AngLin6* y, uint32_t N) { while (N--) add(*r++, *x++, *y++); }
/** Vector sub */
inline void vsub(AngLin6* r, const AngLin6* x, const AngLin6* y, uint32_t N) { while (N--) sub(*r++, *x++, *y++); }
/** Vector madd */
inline void vmadd(AngLin6* r, __m128 c, const AngLin6* x, const AngLin6* y, uint32_t N) { while (N--) madd(*r++, c, *x++, *y++); }
/** Vector nmadd */
inline void vnmadd(AngLin6* r, __m128 c, const AngLin6* x, const AngLin6* y, uint32_t N) { while (N--) nmadd(*r++, c, *x++, *y++); }
/**
* Vector-of-vectors dot product.
*
* \param[in] v Vector of AngLin6, of length N.
* \param[in] w Vector of AngLin6, of length N.
* \param[in] N Number of elements in v and w.
*
* return (v|w).
*/
inline __m128
dot(const AngLin6* v, const AngLin6* w, uint32_t N)
{
__m256 _res = _mm256_setzero_ps();
for (uint32_t i = 0; i < N; ++i)
{
__m256 _v = _mm256_load_ps((const float*)(v+i));
__m256 _w = _mm256_load_ps((const float*)(w+i));
_res = _mm256_add_ps(_res, _mm256_dp_ps(_v, _w, 0x7f));
}
return _mm_add_ps(_mm256_castps256_ps128(_res), _mm256_extractf128_ps(_res, 1));
}
/**
* Vector-of-vectors length squared.
*
* Equivalent to dot(v, v N), but could be faster in some cases
*
* \param[in] v Vector of AngLin6, of length N.
* \param[in] N Number of elements in v.
*
* return |v|^2.
*/
inline __m128
length_sq(const AngLin6* v, uint32_t N)
{
__m256 _res = _mm256_setzero_ps();
for (uint32_t i = 0; i < N; ++i)
{
__m256 _v = _mm256_load_ps((const float*)(v+i));
_res = _mm256_add_ps(_res, _mm256_dp_ps(_v, _v, 0x7f));
}
return _mm_add_ps(_mm256_castps256_ps128(_res), _mm256_extractf128_ps(_res, 1));
}
/**
* Vector-of-vectors length squared, split into angular and linear contributions.
*
* \param[out] error_sq Sum of the squared angular and linear parts of v.
* \param[in] v Vector of AngLin6, of length N.
* \param[in] N Number of elements in v.
*
* \return the sum of the squared angular and linear errors, error.ang + error.lin.
*/
inline __m128
calculate_error(AngLin6ErrorSq& error_sq, const AngLin6* v, uint32_t N)
{
__m256 _res = _mm256_setzero_ps();
for (uint32_t i = 0; i < N; ++i)
{
__m256 _v = _mm256_load_ps((const float*)(v+i));
_res = _mm256_add_ps(_res, _mm256_dp_ps(_v, _v, 0x7f));
}
__m128 _ang_sq = _mm256_castps256_ps128(_res);
__m128 _lin_sq = _mm256_extractf128_ps(_res, 1);
_mm_store_ss(&error_sq.ang, _ang_sq);
_mm_store_ss(&error_sq.lin, _lin_sq);
return _mm_add_ps(_ang_sq, _lin_sq);
}
};
| 9,664 | C | 33.151943 | 139 | 0.552566 |
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/stress.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved.
#include "stress.h"
#include "math/cgnr.h"
#include "simd/simd_device_query.h"
#include <algorithm>
#include <cmath>
#define MASS_AND_LENGTH_SCALING 1
typedef CGNR<AngLin6, AngLin6Ops<Float_Scalar>, BondMatrixS, BondMatrixOpsS<Float_Scalar>, Float_Scalar, AngLin6ErrorSq> CGNR_SISD;
typedef CGNR<AngLin6, AngLin6Ops<SIMD_Scalar>, BondMatrixS, BondMatrixOpsS<SIMD_Scalar>, SIMD_Scalar, AngLin6ErrorSq> CGNR_SIMD;
/**
* StressProcessor static members
*/
// Check for SSE, FMA3, and AVX support
const bool
StressProcessor::s_use_simd =
device_supports_instruction_set(InstructionSet::SSE) && // Basic SSE
device_supports_instruction_set(InstructionSet::FMA3) && // Fused Multiply-Add instructions
device_supports_instruction_set(InstructionSet::OSXSAVE) && // OS uses XSAVE and XRSTORE instructions allowing saving YMM registers on context switch
device_supports_instruction_set(InstructionSet::AVX) && // Advanced Vector Extensions (256 bit operations)
os_supports_avx_restore(); // OS has enabled the required extended state for AVX
/**
* StressProcessor methods
*/
void
StressProcessor::prepare(const SolverNodeS* nodes, uint32_t N_nodes, const SolverBond* bonds, uint32_t N_bonds, const DataParams& params)
{
m_recip_sqrt_I.resize(N_nodes);
m_couplings.resize(N_bonds);
m_rhs.resize(N_nodes);
m_B_scratch.resize(N_nodes);
m_solver_cache.resize(s_use_simd ? CGNR_SIMD().required_cache_size(N_nodes, N_bonds) : CGNR_SISD().required_cache_size(N_nodes, N_bonds));
m_can_resume = false;
// Calculate bond offsets and length scale
uint32_t offsets_to_scale = 0;
m_length_scale = 0.0f;
for (uint32_t i = 0; i < N_bonds; ++i)
{
const SolverBond& bond = bonds[i];
const uint32_t b0 = bond.nodes[0];
const uint32_t b1 = bond.nodes[1];
Coupling& c = m_couplings[i];
NvcVec3 offset0, offset1;
if (!params.centerBonds)
{
offset0 = nodes[b0].mass > 0 ? bond.centroid - nodes[b0].CoM : nodes[b1].CoM - bond.centroid;
offset1 = nodes[b1].mass > 0 ? bond.centroid - nodes[b1].CoM : nodes[b0].CoM - bond.centroid;
}
else
{
if (nodes[b0].mass <= 0)
{
offset1 = bond.centroid - nodes[b1].CoM;
offset0 = -offset1;
}
else
if (nodes[b1].mass <= 0)
{
offset0 = bond.centroid - nodes[b0].CoM;
offset1 = -offset0;
}
else
{
offset0 = 0.5f*(nodes[b1].CoM - nodes[b0].CoM);
offset1 = -offset0;
}
}
if (nodes[b0].mass > 0.0f)
{
++offsets_to_scale;
m_length_scale += std::sqrt(offset0|offset0);
}
if (nodes[b1].mass > 0.0f)
{
++offsets_to_scale;
m_length_scale += std::sqrt(offset1|offset1);
}
c.offset0 = offset0;
c.node0 = bond.nodes[0];
c.offset1 = offset1;
c.node1 = bond.nodes[1];
}
#if MASS_AND_LENGTH_SCALING
m_length_scale = offsets_to_scale ? m_length_scale / offsets_to_scale : 1.0f;
#else
m_length_scale = 1.0f;
#endif
// Scale offsets by length scale
const float recip_length_scale = 1.0f/m_length_scale;
for (uint32_t j = 0; j < N_bonds; ++j)
{
Coupling& coupling = m_couplings[j];
coupling.offset0 *= recip_length_scale;
coupling.offset1 *= recip_length_scale;
}
// Set mass scale to geometric mean of the masses
m_mass_scale = 0.0f;
uint32_t nonzero_mass_count = 0;
for (uint32_t i = 0; i < N_nodes; ++i)
{
if (nodes[i].mass > 0.0f)
{
m_mass_scale += std::log(nodes[i].mass);
++nonzero_mass_count;
}
}
#if MASS_AND_LENGTH_SCALING
m_mass_scale = nonzero_mass_count ? std::exp(m_mass_scale / nonzero_mass_count) : 1.0f;
#else
m_mass_scale = 1.0f;
#endif
// Generate I^-1/2
std::vector<InertiaS> invI(N_nodes);
const float inertia_scale = m_mass_scale*m_length_scale*m_length_scale;
if (!params.equalizeMasses)
{
for (uint32_t i = 0; i < N_nodes; ++i)
{
invI[i] =
{
nodes[i].inertia > 0.0f ? inertia_scale/nodes[i].inertia : 0.0f,
nodes[i].mass > 0.0f ? m_mass_scale/nodes[i].mass : 0.0f
};
m_recip_sqrt_I[i] = { std::sqrt(invI[i].I), std::sqrt(invI[i].m) };
}
}
else
{
for (uint32_t i = 0; i < N_nodes; ++i)
{
invI[i] =
{
nodes[i].inertia > 0.0f ? 1.0f : 0.0f,
nodes[i].mass > 0.0f ? 1.0f : 0.0f
};
m_recip_sqrt_I[i] = { std::sqrt(invI[i].I), std::sqrt(invI[i].m) };
}
}
// Create sparse matrix representation for B = (I^-1/2)*C
m_B.set(m_couplings.data(), m_recip_sqrt_I.data(), m_B_scratch.data(), N_nodes, N_bonds);
}
int
StressProcessor::solve(AngLin6* impulses, const AngLin6* velocities, const SolverParams& params, AngLin6ErrorSq* error_sq /* = nullptr */, bool resume /* = false */)
{
const InertiaS* sqrt_I_inv = m_recip_sqrt_I.data();
const uint32_t N_nodes = getNodeCount();
const uint32_t N_bonds = getBondCount();
void* cache = m_solver_cache.data();
const float recip_length_scale = 1.0f/m_length_scale;
// Apply length and mass scaling to impulses if warm-starting
if (params.warmStart)
{
const float recip_mass_scale = 1.0f/m_mass_scale;
const float recip_linear_impulse_scale = recip_length_scale*recip_mass_scale;
const float recip_angular_impulse_scale = recip_length_scale*recip_linear_impulse_scale;
for (uint32_t j = 0; j < N_bonds; ++j)
{
impulses[j].ang *= recip_angular_impulse_scale;
impulses[j].lin *= recip_linear_impulse_scale;
}
}
// Calculate r.h.s. vector b = -(I^1/2)*velocities
AngLin6* b = m_rhs.data();
for (uint32_t i = 0; i < N_nodes; ++i)
{
const InertiaS& I_i = sqrt_I_inv[i];
const AngLin6& v_i = velocities[i];
AngLin6& b_i = b[i];
b_i.ang = v_i.ang/(-(I_i.I > 0 ? I_i.I : 1.0f));
b_i.lin = (-recip_length_scale/(I_i.m > 0 ? I_i.m : 1.0f))*v_i.lin;
}
// Solve B*J = b for J, where B = (I^-1/2)*C and b = -(I^1/2)*v.
// Since CGNR does this by solving (B^T)*B*J = (B^T)*b, this actually solves
// (C^T)*(I^-1)*C*J = -(C^T)*v for J, which is the equation we really wanted to solve.
const uint32_t maxIter = params.maxIter ? params.maxIter : 6*std::max(N_nodes, N_bonds);
// Set solver warmth
const unsigned warmth = params.warmStart ? (m_can_resume && resume ? 2 : 1) : 0;
// Choose solver based on parameters
const int result = s_use_simd ?
CGNR_SIMD().solve(impulses, m_B, b, N_nodes, N_bonds, cache, error_sq, params.tolerance, maxIter, warmth) :
CGNR_SISD().solve(impulses, m_B, b, N_nodes, N_bonds, cache, error_sq, params.tolerance, maxIter, warmth);
// Undo length and mass scaling
const float linear_impulse_scale = m_length_scale*m_mass_scale;
const float angular_impulse_scale = m_length_scale*linear_impulse_scale;
for (uint32_t j = 0; j < N_bonds; ++j)
{
impulses[j].ang *= angular_impulse_scale;
impulses[j].lin *= linear_impulse_scale;
}
m_can_resume = true;
return result;
}
bool
StressProcessor::removeBond(uint32_t bondIndex)
{
if (bondIndex >= getBondCount()) return false;
m_couplings[bondIndex] = m_couplings.back();
m_couplings.pop_back();
--m_B.N;
m_can_resume = false;
return true;
}
| 9,381 | C++ | 34.94636 | 165 | 0.609636 |
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/math/cgnr.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved.
#pragma once
#include <stdint.h>
#include <cstring> // for memcpy, memset
#include "simd/simd.h"
template<typename Elem, typename ElemOps, typename Mat, typename MatOps, typename Scalar = float, typename Error = float>
struct CGNR
{
/**
* Conjugate Gradient Normal Equation Residual (CGNR) solver for systems of M equations and N unknowns.
*
* Based on Matrix Computations (4th ed.) by Golub and Van Loan, section 11.3.9.
*
* Solves A*x = b.
*
* Template arguments:
* Elem: the type of element used in the vectors x and b, and (implicitly) in the matrix A.
*
* ElemOps: a class which defines various functions on Elem type and vectors of Elem type.
*
* Mat: the explicit type used to represent the matrix, allowing e.g. for sparse representations.
*
* MatOps: a class which defines the functions rmul and lmul, which multiply a matrix of type Mat
* by an Elem-typed vector on the right and left, respectively. The function signatures must be:
*
* void rmul(Elem* y, const Mat& A, const Elem* x, uint32_t M, uint32_t N); // y = A*x
* void lmul(Elem* y, const Elem* x, const Mat& A, uint32_t M, uint32_t N); // y = x*A
*
* Scalar: set to float by default. May be used to keep all operations in a particular representation, e.g. SIMD registers.
*
* \param[out] x User-supplied Elem vector of length N, filled with the solution upon exit (if successful).
* \param[in] A System M x N matrix of type Mat.
* \param[in] b Right hand side of equation to be solved, an Elem vector of length M.
* \param[in] M The number of rows in A and elements in b.
* \param[in] N The number of columns in A and elements in x.
* \param[in] cache Cache memory provided by the user, must be at least required_cache_size(M, N) bytes, and sizeof(Elem)-byte aligned.
* \param[out] error_ptr If not null, returns the square magnitude error calculated from residual.
* \param[in] tol (Optional) relative convergence threshold for |(A^T)*(A*x-b)|/|b|. Default value is 10^-6.
* \param[in] max_it (Optional) the maximum number of internal iterations. If set to 0, the maximum is N. Default value is 0.
* \param[in] warmth (Optional) valid values are 0, 1, and 2. 0 => cold, clears the x vector and ignores the cache.
* 1 => warm, uses the x vector as a starting solution, but still ignores the cache. 2 => hot, uses the x
* vector as a starting solution, and the cache must be valid. Default value is 0.
* N.B. if warmth == 2, then this function must have been called previously, and the equation values
* (x, A, b, M, and N) as well as the cache must not have been changed since the last call.
*
* return the number of iterations taken to converge, if it converges. Otherwise, returns minus the number of iterations before exiting.
*/
int
solve
(
Elem* x,
const Mat& A,
const Elem* b,
uint32_t M,
uint32_t N,
void* cache,
Error* error_ptr = nullptr,
float tol = 1.e-6f,
uint32_t max_it = 0,
unsigned warmth = 0
)
{
// Cache and temporary storage
static_assert(sizeof(Elem) >= sizeof(Scalar), "sizeof(Elem) must be at least as great as sizeof(Scalar).");
float* z_last_sq_mem = (float*)cache; cache = (Elem*)z_last_sq_mem + 1; // Elem-sized storage
float* delta_sq_mem = (float*)cache; cache = (Elem*)delta_sq_mem + 1; // Elem-sized storage
Elem* z = (Elem*)cache; cache = z + N; // Array of length N
Elem* p = (Elem*)cache; cache = p + N; // Array of length N
Elem* r = (Elem*)cache; cache = r + M; // Array of length M
Elem* s = (Elem*)cache; // Array of length M
Scalar z_last_sq, delta_sq;
load_float(z_last_sq, z_last_sq_mem);
load_float(delta_sq, delta_sq_mem);
if (warmth < 2) // Not hot
{
delta_sq = mul(tol*tol, ElemOps().length_sq(b, M)); // Calculate allowed residual length squared and cache it
store_float(delta_sq_mem, delta_sq);
memcpy(r, b, sizeof(Elem)*M); // Initialize residual r = b
if (warmth) // Warm start, r = b - A*x
{
MatOps().rmul(s, A, x, M, N);
ElemOps().vsub(r, r, s, M);
}
else memset(x, 0, sizeof(Elem)*N); // Cold start, x = 0 so r = b
warmth = 0; // This lets p be initialized in the loop below
}
Error error;
// Iterate
if (!max_it) max_it = N; // Default to a maximum of N iterations
uint32_t it = 0;
do
{
MatOps().lmul(z, r, A, M, N); // Set z = (A^T)*r
const Scalar z_sq = ElemOps().calculate_error(error, z, N); // Calculate residual (of modified equation) length squared
if (le(z_sq, delta_sq)) break; // Terminate (convergence) if within tolerance
if (warmth || warmth++) ElemOps().vmadd(p, div(z_sq, z_last_sq), p, z, N); // If not cold set p = z + (|z|^2/|z_last|^2)*p, and make warm hereafter
else memcpy(p, z, sizeof(Elem)*N); // If cold set p = z
z_last_sq = z_sq;
MatOps().rmul(s, A, p, M, N); // Calculate s = A*p
const Scalar mu = div(z_sq, ElemOps().length_sq(s, M)); // mu = |z|^2 / |A*p|^2
ElemOps().vmadd(x, mu, p, x, N); // x += mu*p
ElemOps().vnmadd(r, mu, s, r, M); // r -= mu*s
} while (++it < max_it);
// Store off remainder of state (the rest was maintained in memory with array operations)
store_float(z_last_sq_mem, z_last_sq);
// Store off the error if requested
if (error_ptr) *error_ptr = error;
// Return the number of iterations used if successful. Otherwise return minus the number of iterations performed
return it < max_it ? (int)it : -(int)it;
}
/**
* \param[in] M See solve(...) for a description.
* \param[in] N See solve(...) for a description.
*
* \return the required cache size (in bytes) for the given values of M and N.
*/
size_t required_cache_size(uint32_t M, uint32_t N) { return 2*(M+N+1)*sizeof(Elem); }
};
| 8,613 | C | 54.217948 | 160 | 0.574132 |
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/simd/simd.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved.
#pragma once
#include <xmmintrin.h>
#include <emmintrin.h>
#include <immintrin.h>
#if defined(__GNUC__) // missing with gcc
#define _mm256_set_m128(vh, vl) _mm256_insertf128_ps(_mm256_castps128_ps256(vl), (vh), 1)
#endif
#define SIMD_ALIGN_16(code) NV_ALIGN_PREFIX(16) code NV_ALIGN_SUFFIX(16)
#define SIMD_ALIGN_32(code) NV_ALIGN_PREFIX(32) code NV_ALIGN_SUFFIX(32)
inline __m128 add(const __m128& a, const __m128& b) { return _mm_add_ps(a, b); }
inline __m128 add(float a, const __m128& b) { return _mm_add_ps(_mm_load1_ps(&a), b); }
inline __m128 add(const __m128& a, float b) { return _mm_add_ps(a, _mm_load1_ps(&b)); }
inline float add(float a, float b) { return a + b; }
inline __m128 sub(const __m128& a, const __m128& b) { return _mm_sub_ps(a, b); }
inline __m128 sub(float a, const __m128& b) { return _mm_sub_ps(_mm_load1_ps(&a), b); }
inline __m128 sub(const __m128& a, float b) { return _mm_sub_ps(a, _mm_load1_ps(&b)); }
inline float sub(float a, float b) { return a - b; }
inline __m128 mul(const __m128& a, const __m128& b) { return _mm_mul_ps(a, b); }
inline __m128 mul(float a, const __m128& b) { return _mm_mul_ps(_mm_load1_ps(&a), b); }
inline __m128 mul(const __m128& a, float b) { return _mm_mul_ps(a, _mm_load1_ps(&b)); }
inline float mul(float a, float b) { return a * b; }
inline __m128 div(const __m128& a, const __m128& b) { return _mm_div_ps(a, b); }
inline __m128 div(float a, const __m128& b) { return _mm_div_ps(_mm_load1_ps(&a), b); }
inline __m128 div(const __m128& a, float b) { return _mm_div_ps(a, _mm_load1_ps(&b)); }
inline float div(float a, float b) { return a / b; }
inline bool lt(const __m128& a, const __m128& b) { return !!_mm_comilt_ss(a, b); }
inline bool gt(const __m128& a, const __m128& b) { return !!_mm_comigt_ss(a, b); }
inline bool le(const __m128& a, const __m128& b) { return !!_mm_comile_ss(a, b); }
inline bool ge(const __m128& a, const __m128& b) { return !!_mm_comige_ss(a, b); }
inline bool eq(const __m128& a, const __m128& b) { return !!_mm_comieq_ss(a, b); }
inline bool ne(const __m128& a, const __m128& b) { return !!_mm_comineq_ss(a, b); }
inline bool lt(const float a, const float b) { return a < b; }
inline bool gt(const float a, const float b) { return a > b; }
inline bool le(const float a, const float b) { return a <= b; }
inline bool ge(const float a, const float b) { return a >= b; }
inline bool eq(const float a, const float b) { return a == b; }
inline bool ne(const float a, const float b) { return a != b; }
inline float to_float(const __m128& x) { float f; _mm_store_ss(&f, x); return f; }
inline float to_float(float x) { return x; }
inline void from_float(__m128& x, float y) { x = _mm_load1_ps(&y); }
inline void from_float(float& x, float y) { x = y; }
inline void set_zero(__m128& x) { x = _mm_setzero_ps(); }
inline void set_zero(float& x) { x = 0.0f; }
inline void store_float(float* mem, const __m128& f) { _mm_store_ps(mem, f); }
inline void store_float(float* mem, float f) { *mem = f; }
inline void load_float(__m128& f, const float* mem) { f = _mm_load_ps(mem); }
inline void load_float(float& f, const float* mem) { f = *mem; }
inline __m128 prep_cross3(const __m128& v) { return _mm_shuffle_ps(v, v, 0xc9); } // w z y x -> w x z y
inline __m128
cross3(const __m128& v0, const __m128& v1)
{
__m128 prep0 = prep_cross3(v0);
__m128 prep1 = prep_cross3(v1);
__m128 res_shuffled = _mm_sub_ps(_mm_mul_ps(v0, prep1), _mm_mul_ps(prep0, v1));
return _mm_shuffle_ps(res_shuffled, res_shuffled, 0xc9);
}
inline __m128
cross3_prep0(const __m128& v0, const __m128& prep0, const __m128& v1)
{
__m128 prep1 = prep_cross3(v1);
__m128 res_shuffled = _mm_sub_ps(_mm_mul_ps(v0, prep1), _mm_mul_ps(prep0, v1));
return _mm_shuffle_ps(res_shuffled, res_shuffled, 0xc9);
}
inline __m128
cross3_prep1(const __m128& v0, const __m128& v1, const __m128& prep1)
{
__m128 prep0 = prep_cross3(v0);
__m128 res_shuffled = _mm_sub_ps(_mm_mul_ps(v0, prep1), _mm_mul_ps(prep0, v1));
return _mm_shuffle_ps(res_shuffled, res_shuffled, 0xc9);
}
| 5,930 | C | 50.12931 | 103 | 0.631872 |
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/simd/simd_device_query.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved.
#pragma once
#include <utility>
#include <stdio.h>
inline static constexpr uint32_t
instSetCode(uint8_t fn, uint8_t bitset, uint8_t bit)
{
return (uint32_t)fn << 16 | (uint32_t)bitset << 8 | (uint32_t)bit;
}
inline static void
extractInstSetBitsetAndBit(int& fn, int& bitset, int& bit, uint32_t code)
{
fn = (int)(code >> 16);
bitset = (int)(code >> 8)&0xff;
bit = (int)(code & 0xff);
}
struct InstructionSet
{
enum Enum
{
MMX = instSetCode(1, 3, 23),
SSE = instSetCode(1, 3, 25),
SSE2 = instSetCode(1, 3, 26),
SSE3 = instSetCode(1, 2, 0),
SSSE3 = instSetCode(1, 2, 9),
SSE4_1 = instSetCode(1, 2, 19),
SSE4_2 = instSetCode(1, 2, 20),
OSXSAVE = instSetCode(1, 2, 27),
AVX = instSetCode(1, 2, 28),
AVX2 = instSetCode(7, 1, 5),
FMA3 = instSetCode(1, 2, 12),
AVX512F = instSetCode(7, 1, 16),
AVX512PF = instSetCode(7, 1, 26),
AVX512ER = instSetCode(7, 1, 27),
AVX512CD = instSetCode(7, 1, 28)
};
};
#define InstructionSetEntry(_name) { #_name, InstructionSet::_name }
constexpr std::pair<const char*, uint32_t> sInstructionSetLookup[] =
{
InstructionSetEntry(MMX),
InstructionSetEntry(SSE),
InstructionSetEntry(SSE2),
InstructionSetEntry(SSE3),
InstructionSetEntry(SSSE3),
InstructionSetEntry(SSE4_1),
InstructionSetEntry(SSE4_2),
InstructionSetEntry(OSXSAVE),
InstructionSetEntry(AVX),
InstructionSetEntry(AVX2),
InstructionSetEntry(FMA3),
InstructionSetEntry(AVX512F),
InstructionSetEntry(AVX512PF),
InstructionSetEntry(AVX512ER),
InstructionSetEntry(AVX512CD),
};
#if NV_WINDOWS_FAMILY
#include <intrin.h> // for __cpuidex
inline void cpuid(int cpui[4], int fn) { __cpuidex(cpui, fn, 0); }
inline bool os_supports_avx_restore() { return ((uint32_t)_xgetbv(0) & 6) == 6; }
#else
#include <cpuid.h> // for __cpuid_count
inline void cpuid(int cpui[4], int fn) { __cpuid_count(fn, 0, cpui[0], cpui[1], cpui[2], cpui[3]); }
inline bool os_supports_avx_restore()
{
uint32_t xcr0;
__asm__("xgetbv" : "=a" (xcr0) : "c" (0) : "%edx");
return (xcr0 & 6) == 6;
}
#endif
static bool
device_supports_instruction_set(uint32_t inst_set)
{
int fn, bitset, bit;
extractInstSetBitsetAndBit(fn, bitset, bit, inst_set);
int cpui[4];
cpuid(cpui, 0);
if (cpui[0] < fn) return false;
cpuid(cpui, fn);
return !!((cpui[bitset] >> bit) & 1);
}
static void
print_supported_instruction_sets()
{
printf("Supported instruction sets:\n");
for (std::pair<const char*, uint32_t> entry : sInstructionSetLookup)
{
printf("%s: %s\n", entry.first, device_supports_instruction_set(entry.second) ? "yes" : "no");
}
}
| 4,326 | C | 32.284615 | 102 | 0.676375 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFileBuffer/include/NsMemoryBuffer.h | /*
* Copyright 2009-2011 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
#ifndef NS_MEMORY_BUFFER_H
#define NS_MEMORY_BUFFER_H
#include "Ns.h"
#include "NsUserAllocated.h"
#include "NsAlignedMalloc.h"
#include "NvFileBuf.h"
#include "NvAssert.h"
namespace nvidia
{
namespace general_NvIOStream2
{
using namespace shdfnd;
const uint32_t BUFFER_SIZE_DEFAULT = 4096;
//Use this class if you want to use your own allocator
template<class Allocator>
class NvMemoryBufferBase : public NvFileBuf, public Allocator
{
NV_NOCOPY(NvMemoryBufferBase)
void init(const void *readMem, uint32_t readLen)
{
mAllocator = this;
mReadBuffer = mReadLoc = static_cast<const uint8_t *>(readMem);
mReadStop = &mReadLoc[readLen];
mWriteBuffer = mWriteLoc = mWriteStop = NULL;
mWriteBufferSize = 0;
mDefaultWriteBufferSize = BUFFER_SIZE_DEFAULT;
mOpenMode = OPEN_READ_ONLY;
mSeekType = SEEKABLE_READ;
}
void init(uint32_t defaultWriteBufferSize)
{
mAllocator = this;
mReadBuffer = mReadLoc = mReadStop = NULL;
mWriteBuffer = mWriteLoc = mWriteStop = NULL;
mWriteBufferSize = 0;
mDefaultWriteBufferSize = defaultWriteBufferSize;
mOpenMode = OPEN_READ_WRITE_NEW;
mSeekType = SEEKABLE_READWRITE;
}
public:
NvMemoryBufferBase(const void *readMem,uint32_t readLen)
{
init(readMem, readLen);
}
NvMemoryBufferBase(const void *readMem,uint32_t readLen, const Allocator &alloc): Allocator(alloc)
{
init(readMem, readLen);
}
NvMemoryBufferBase(uint32_t defaultWriteBufferSize = BUFFER_SIZE_DEFAULT)
{
init(defaultWriteBufferSize);
}
NvMemoryBufferBase(uint32_t defaultWriteBufferSize, const Allocator &alloc): Allocator(alloc)
{
init(defaultWriteBufferSize);
}
virtual ~NvMemoryBufferBase(void)
{
reset();
}
void setAllocator(Allocator *allocator)
{
mAllocator = allocator;
}
void initWriteBuffer(uint32_t size)
{
if ( mWriteBuffer == NULL )
{
if ( size < mDefaultWriteBufferSize ) size = mDefaultWriteBufferSize;
mWriteBuffer = static_cast<uint8_t *>(mAllocator->allocate(size));
NV_ASSERT( mWriteBuffer );
mWriteLoc = mWriteBuffer;
mWriteStop = &mWriteBuffer[size];
mWriteBufferSize = size;
mReadBuffer = mWriteBuffer;
mReadStop = &mWriteBuffer[size];
mReadLoc = mWriteBuffer;
}
}
void reset(void)
{
mAllocator->deallocate(mWriteBuffer);
mWriteBuffer = NULL;
mWriteBufferSize = 0;
mWriteLoc = NULL;
mWriteStop = NULL;
mReadBuffer = NULL;
mReadStop = NULL;
mReadLoc = NULL;
}
virtual OpenMode getOpenMode(void) const
{
return mOpenMode;
}
SeekType isSeekable(void) const
{
return mSeekType;
}
virtual uint32_t read(void* buffer, uint32_t size)
{
if ( (mReadLoc+size) > mReadStop )
{
size = uint32_t(mReadStop - mReadLoc);
}
if ( size != 0 )
{
memmove(buffer,mReadLoc,size);
mReadLoc+=size;
}
return size;
}
virtual uint32_t peek(void* buffer, uint32_t size)
{
if ( (mReadLoc+size) > mReadStop )
{
size = uint32_t(mReadStop - mReadLoc);
}
if ( size != 0 )
{
memmove(buffer,mReadLoc,size);
}
return size;
}
virtual uint32_t write(const void* buffer, uint32_t size)
{
NV_ASSERT( mOpenMode == OPEN_READ_WRITE_NEW );
if ( mOpenMode == OPEN_READ_WRITE_NEW )
{
if ( (mWriteLoc+size) > mWriteStop )
growWriteBuffer(size);
memmove(mWriteLoc,buffer,size);
mWriteLoc+=size;
mReadStop = mWriteLoc;
}
else
{
size = 0;
}
return size;
}
NV_INLINE const uint8_t * getReadLoc(void) const { return mReadLoc; }
NV_INLINE void advanceReadLoc(uint32_t len)
{
NV_ASSERT(mReadBuffer);
if ( mReadBuffer )
{
mReadLoc+=len;
if ( mReadLoc >= mReadStop )
{
mReadLoc = mReadStop;
}
}
}
virtual uint32_t tellRead(void) const
{
uint32_t ret=0;
if ( mReadBuffer )
{
ret = uint32_t(mReadLoc-mReadBuffer);
}
return ret;
}
virtual uint32_t tellWrite(void) const
{
return uint32_t(mWriteLoc-mWriteBuffer);
}
virtual uint32_t seekRead(uint32_t loc)
{
uint32_t ret = 0;
NV_ASSERT(mReadBuffer);
if ( mReadBuffer )
{
mReadLoc = &mReadBuffer[loc];
if ( mReadLoc >= mReadStop )
{
mReadLoc = mReadStop;
}
ret = uint32_t(mReadLoc-mReadBuffer);
}
return ret;
}
virtual uint32_t seekWrite(uint32_t loc)
{
uint32_t ret = 0;
NV_ASSERT( mOpenMode == OPEN_READ_WRITE_NEW );
if ( mWriteBuffer )
{
if ( loc > mWriteBufferSize )
{
mWriteLoc = mWriteStop;
growWriteBuffer(loc - mWriteBufferSize);
}
mWriteLoc = &mWriteBuffer[loc];
ret = uint32_t(mWriteLoc-mWriteBuffer);
}
return ret;
}
virtual void flush(void)
{
}
virtual uint32_t getFileLength(void) const
{
uint32_t ret = 0;
if ( mReadBuffer )
{
ret = uint32_t(mReadStop-mReadBuffer);
}
else if ( mWriteBuffer )
{
ret = uint32_t(mWriteLoc-mWriteBuffer);
}
return ret;
}
uint32_t getWriteBufferSize(void) const
{
return uint32_t(mWriteLoc-mWriteBuffer);
}
void setWriteLoc(uint8_t *writeLoc)
{
NV_ASSERT(writeLoc >= mWriteBuffer && writeLoc < mWriteStop );
mWriteLoc = writeLoc;
mReadStop = mWriteLoc;
}
const uint8_t * getWriteBuffer(void) const
{
return mWriteBuffer;
}
/**
* Attention: if you use aligned allocator you cannot free memory with NV_FREE macros instead use deallocate method from base
*/
uint8_t * getWriteBufferOwnership(uint32_t &dataLen) // return the write buffer, and zero it out, the caller is taking ownership of the memory
{
uint8_t *ret = mWriteBuffer;
dataLen = uint32_t(mWriteLoc-mWriteBuffer);
mWriteBuffer = NULL;
mWriteLoc = NULL;
mWriteStop = NULL;
mWriteBufferSize = 0;
return ret;
}
void alignRead(uint32_t a)
{
uint32_t loc = tellRead();
uint32_t aloc = ((loc+(a-1))/a)*a;
if ( aloc != loc )
{
seekRead(aloc);
}
}
void alignWrite(uint32_t a)
{
uint32_t loc = tellWrite();
uint32_t aloc = ((loc+(a-1))/a)*a;
if ( aloc != loc )
{
seekWrite(aloc);
}
}
private:
// double the size of the write buffer or at least as large as the 'size' value passed in.
void growWriteBuffer(uint32_t size)
{
if ( mWriteBuffer == NULL )
{
if ( size < mDefaultWriteBufferSize ) size = mDefaultWriteBufferSize;
initWriteBuffer(size);
}
else
{
uint32_t oldWriteIndex = uint32_t(mWriteLoc - mWriteBuffer);
uint32_t newSize = mWriteBufferSize*2;
uint32_t avail = newSize-oldWriteIndex;
if ( size >= avail ) newSize = newSize+size;
uint8_t *writeBuffer = static_cast<uint8_t *>(mAllocator->allocate(newSize));
NV_ASSERT( writeBuffer );
memmove(writeBuffer,mWriteBuffer,mWriteBufferSize);
mAllocator->deallocate(mWriteBuffer);
mWriteBuffer = writeBuffer;
mWriteBufferSize = newSize;
mWriteLoc = &mWriteBuffer[oldWriteIndex];
mWriteStop = &mWriteBuffer[mWriteBufferSize];
uint32_t oldReadLoc = uint32_t(mReadLoc-mReadBuffer);
mReadBuffer = mWriteBuffer;
mReadStop = mWriteLoc;
mReadLoc = &mReadBuffer[oldReadLoc];
}
}
const uint8_t *mReadBuffer;
const uint8_t *mReadLoc;
const uint8_t *mReadStop;
uint8_t *mWriteBuffer;
uint8_t *mWriteLoc;
uint8_t *mWriteStop;
uint32_t mWriteBufferSize;
uint32_t mDefaultWriteBufferSize;
Allocator *mAllocator;
OpenMode mOpenMode;
SeekType mSeekType;
};
class NvMemoryBufferAllocator
{
public:
NvMemoryBufferAllocator(uint32_t a = 0) : alignment(a) {}
virtual void * allocate(uint32_t size)
{
switch(alignment)
{
case 0:
return NV_ALLOC(size, NV_DEBUG_EXP("NvMemoryBufferAllocator"));
case 16 :
return nvidia::AlignedAllocator<16>().allocate(size, __FILE__, __LINE__);
case 32 :
return nvidia::AlignedAllocator<32>().allocate(size, __FILE__, __LINE__);
case 64 :
return nvidia::AlignedAllocator<64>().allocate(size, __FILE__, __LINE__);
case 128 :
return nvidia::AlignedAllocator<128>().allocate(size, __FILE__, __LINE__);
default :
NV_ASSERT(0);
}
return NULL;
}
virtual void deallocate(void *mem)
{
switch(alignment)
{
case 0:
NV_FREE(mem);
break;
case 16 :
nvidia::AlignedAllocator<16>().deallocate(mem);
break;
case 32 :
nvidia::AlignedAllocator<32>().deallocate(mem);
break;
case 64 :
nvidia::AlignedAllocator<64>().deallocate(mem);
break;
case 128 :
nvidia::AlignedAllocator<128>().deallocate(mem);
break;
default :
NV_ASSERT(0);
}
}
virtual ~NvMemoryBufferAllocator(void) {}
private:
NvMemoryBufferAllocator& operator=(const NvMemoryBufferAllocator&);
const uint32_t alignment;
};
//Use this class if you want to use PhysX memory allocator
class NsMemoryBuffer: public NvMemoryBufferBase<NvMemoryBufferAllocator>, public UserAllocated
{
NV_NOCOPY(NsMemoryBuffer)
typedef NvMemoryBufferBase<NvMemoryBufferAllocator> BaseClass;
public:
NsMemoryBuffer(const void *readMem,uint32_t readLen): BaseClass(readMem, readLen) {}
NsMemoryBuffer(const void *readMem,uint32_t readLen, uint32_t alignment): BaseClass(readMem, readLen, NvMemoryBufferAllocator(alignment)) {}
NsMemoryBuffer(uint32_t defaultWriteBufferSize=BUFFER_SIZE_DEFAULT): BaseClass(defaultWriteBufferSize) {}
NsMemoryBuffer(uint32_t defaultWriteBufferSize,uint32_t alignment): BaseClass(defaultWriteBufferSize, NvMemoryBufferAllocator(alignment)) {}
};
}
using namespace general_NvIOStream2;
}
#endif // NV_MEMORY_BUFFER_H
| 13,123 | C | 27.655022 | 146 | 0.592776 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFileBuffer/include/NvFileBuf.h | /*
* Copyright 2009-2011 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
#ifndef NV_FILE_BUF_H
#define NV_FILE_BUF_H
#include "NvCTypes.h"
/** \addtogroup foundation
@{
*/
namespace nvidia
{
namespace general_NvIOStream2
{
NV_PUSH_PACK_DEFAULT
/**
\brief Callback class for data serialization.
The user needs to supply an NvFileBuf implementation to a number of methods to allow the SDK to read or write
chunks of binary data. This allows flexibility for the source/destination of the data. For example the NvFileBuf
could store data in a file, memory buffer or custom file format.
\note It is the users responsibility to ensure that the data is written to the appropriate offset.
*/
class NvFileBuf
{
public:
enum EndianMode
{
ENDIAN_NONE = 0, // do no conversion for endian mode
ENDIAN_BIG = 1, // always read/write data as natively big endian (Power PC, etc.)
ENDIAN_LITTLE = 2 // always read/write data as natively little endian (Intel, etc.) Default Behavior!
};
NvFileBuf(EndianMode mode=ENDIAN_LITTLE)
{
setEndianMode(mode);
}
virtual ~NvFileBuf(void)
{
}
/**
\brief Declares a constant to seek to the end of the stream.
*
* Does not support streams longer than 32 bits
*/
static const uint32_t STREAM_SEEK_END=0xFFFFFFFF;
enum OpenMode
{
OPEN_FILE_NOT_FOUND,
OPEN_READ_ONLY, // open file buffer stream for read only access
OPEN_WRITE_ONLY, // open file buffer stream for write only access
OPEN_READ_WRITE_NEW, // open a new file for both read/write access
OPEN_READ_WRITE_EXISTING // open an existing file for both read/write access
};
virtual OpenMode getOpenMode(void) const = 0;
bool isOpen(void) const
{
return getOpenMode()!=OPEN_FILE_NOT_FOUND;
}
enum SeekType
{
SEEKABLE_NO = 0,
SEEKABLE_READ = 0x1,
SEEKABLE_WRITE = 0x2,
SEEKABLE_READWRITE = 0x3
};
virtual SeekType isSeekable(void) const = 0;
void setEndianMode(EndianMode e)
{
mEndianMode = e;
if ( (e==ENDIAN_BIG && !isBigEndian() ) ||
(e==ENDIAN_LITTLE && isBigEndian() ) )
{
mEndianSwap = true;
}
else
{
mEndianSwap = false;
}
}
EndianMode getEndianMode(void) const
{
return mEndianMode;
}
virtual uint32_t getFileLength(void) const = 0;
/**
\brief Seeks the stream to a particular location for reading
*
* If the location passed exceeds the length of the stream, then it will seek to the end.
* Returns the location it ended up at (useful if you seek to the end) to get the file position
*/
virtual uint32_t seekRead(uint32_t loc) = 0;
/**
\brief Seeks the stream to a particular location for writing
*
* If the location passed exceeds the length of the stream, then it will seek to the end.
* Returns the location it ended up at (useful if you seek to the end) to get the file position
*/
virtual uint32_t seekWrite(uint32_t loc) = 0;
/**
\brief Reads from the stream into a buffer.
\param[out] mem The buffer to read the stream into.
\param[in] len The number of bytes to stream into the buffer
\return Returns the actual number of bytes read. If not equal to the length requested, then reached end of stream.
*/
virtual uint32_t read(void *mem,uint32_t len) = 0;
/**
\brief Reads from the stream into a buffer but does not advance the read location.
\param[out] mem The buffer to read the stream into.
\param[in] len The number of bytes to stream into the buffer
\return Returns the actual number of bytes read. If not equal to the length requested, then reached end of stream.
*/
virtual uint32_t peek(void *mem,uint32_t len) = 0;
/**
\brief Writes a buffer of memory to the stream
\param[in] mem The address of a buffer of memory to send to the stream.
\param[in] len The number of bytes to send to the stream.
\return Returns the actual number of bytes sent to the stream. If not equal to the length specific, then the stream is full or unable to write for some reason.
*/
virtual uint32_t write(const void *mem,uint32_t len) = 0;
/**
\brief Reports the current stream location read aqccess.
\return Returns the current stream read location.
*/
virtual uint32_t tellRead(void) const = 0;
/**
\brief Reports the current stream location for write access.
\return Returns the current stream write location.
*/
virtual uint32_t tellWrite(void) const = 0;
/**
\brief Causes any temporarily cached data to be flushed to the stream.
*/
virtual void flush(void) = 0;
/**
\brief Close the stream.
*/
virtual void close(void) {}
void release(void)
{
delete this;
}
static NV_INLINE bool isBigEndian()
{
int32_t i = 1;
return *(reinterpret_cast<char*>(&i))==0;
}
NV_INLINE void swap2Bytes(void* _data) const
{
char *data = static_cast<char *>(_data);
char one_byte;
one_byte = data[0]; data[0] = data[1]; data[1] = one_byte;
}
NV_INLINE void swap4Bytes(void* _data) const
{
char *data = static_cast<char *>(_data);
char one_byte;
one_byte = data[0]; data[0] = data[3]; data[3] = one_byte;
one_byte = data[1]; data[1] = data[2]; data[2] = one_byte;
}
NV_INLINE void swap8Bytes(void *_data) const
{
char *data = static_cast<char *>(_data);
char one_byte;
one_byte = data[0]; data[0] = data[7]; data[7] = one_byte;
one_byte = data[1]; data[1] = data[6]; data[6] = one_byte;
one_byte = data[2]; data[2] = data[5]; data[5] = one_byte;
one_byte = data[3]; data[3] = data[4]; data[4] = one_byte;
}
NV_INLINE void storeDword(uint32_t v)
{
if ( mEndianSwap )
swap4Bytes(&v);
write(&v,sizeof(v));
}
NV_INLINE void storeFloat(float v)
{
if ( mEndianSwap )
swap4Bytes(&v);
write(&v,sizeof(v));
}
NV_INLINE void storeDouble(double v)
{
if ( mEndianSwap )
swap8Bytes(&v);
write(&v,sizeof(v));
}
NV_INLINE void storeByte(uint8_t b)
{
write(&b,sizeof(b));
}
NV_INLINE void storeWord(uint16_t w)
{
if ( mEndianSwap )
swap2Bytes(&w);
write(&w,sizeof(w));
}
uint8_t readByte(void)
{
uint8_t v=0;
read(&v,sizeof(v));
return v;
}
uint16_t readWord(void)
{
uint16_t v=0;
read(&v,sizeof(v));
if ( mEndianSwap )
swap2Bytes(&v);
return v;
}
uint32_t readDword(void)
{
uint32_t v=0;
read(&v,sizeof(v));
if ( mEndianSwap )
swap4Bytes(&v);
return v;
}
float readFloat(void)
{
float v=0;
read(&v,sizeof(v));
if ( mEndianSwap )
swap4Bytes(&v);
return v;
}
double readDouble(void)
{
double v=0;
read(&v,sizeof(v));
if ( mEndianSwap )
swap8Bytes(&v);
return v;
}
private:
bool mEndianSwap; // whether or not the endian should be swapped on the current platform
EndianMode mEndianMode; // the current endian mode behavior for the stream
};
NV_POP_PACK
} // end of namespace
using namespace general_NvIOStream2;
namespace general_NvIOStream = general_NvIOStream2;
} // end of namespace
#endif // NV_FILE_BUF_H
| 9,603 | C | 27.330383 | 164 | 0.622514 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsArray.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_NSFOUNDATION_NSARRAY_H
#define NV_NSFOUNDATION_NSARRAY_H
#include "NvAssert.h"
#include "NsAllocator.h"
#include "NsBasicTemplates.h"
#include "NvIntrinsics.h"
#if NV_LINUX || NV_ANDROID || (NV_IOS && !NV_A64) || NV_OSX || NV_PS3 || NV_PSP2 || NV_WIIU
#include <tr1/type_traits>
#elif NV_WINRT || NV_XBOXONE || (NV_IOS && NV_A64) || NV_WIN64 || NV_X360 || NV_WIN32 || NV_PS4
#include <type_traits>
#if NV_IOS && NV_A64
namespace std { namespace tr1 { using std::is_pod; } }
#endif
#else
#error "OS with no defined path to type_traits.h"
#endif
#if NV_VC == 9 || NV_VC == 10
#pragma warning(push)
#pragma warning(disable : 4347) // behavior change: 'function template' is called instead of 'function'
#endif
namespace nvidia
{
namespace shdfnd
{
template <class Serializer>
void exportArray(Serializer& stream, const void* data, uint32_t size, uint32_t sizeOfElement, uint32_t capacity);
char* importArray(char* address, void** data, uint32_t size, uint32_t sizeOfElement, uint32_t capacity);
/*!
An array is a sequential container.
Implementation note
* entries between 0 and size are valid objects
* we use inheritance to build this because the array is included inline in a lot
of objects and we want the allocator to take no space if it's not stateful, which
aggregation doesn't allow. Also, we want the metadata at the front for the inline
case where the allocator contains some inline storage space
*/
template <class T, class Alloc = typename AllocatorTraits<T>::Type>
class Array : protected Alloc
{
public:
typedef T* Iterator;
typedef const T* ConstIterator;
explicit Array(const NvEMPTY v) : Alloc(v)
{
if(mData)
mCapacity |= NV_SIGN_BITMASK;
}
/*!
Default array constructor. Initialize an empty array
*/
NV_INLINE explicit Array(const Alloc& alloc = Alloc()) : Alloc(alloc), mData(0), mSize(0), mCapacity(0)
{
}
/*!
Initialize array with given capacity
*/
NV_INLINE explicit Array(uint32_t size, const T& a = T(), const Alloc& alloc = Alloc())
: Alloc(alloc), mData(0), mSize(0), mCapacity(0)
{
resize(size, a);
}
/*!
Copy-constructor. Copy all entries from other array
*/
template <class A>
NV_INLINE explicit Array(const Array<T, A>& other, const Alloc& alloc = Alloc())
: Alloc(alloc)
{
copy(other);
}
// This is necessary else the basic default copy constructor is used in the case of both arrays being of the same
// template instance
// The C++ standard clearly states that a template constructor is never a copy constructor [2]. In other words,
// the presence of a template constructor does not suppress the implicit declaration of the copy constructor.
// Also never make a copy constructor explicit, or copy-initialization* will no longer work. This is because
// 'binding an rvalue to a const reference requires an accessible copy constructor' (http://gcc.gnu.org/bugs/)
// *http://stackoverflow.com/questions/1051379/is-there-a-difference-in-c-between-copy-initialization-and-assignment-initializ
NV_INLINE Array(const Array& other, const Alloc& alloc = Alloc()) : Alloc(alloc)
{
copy(other);
}
/*!
Initialize array with given length
*/
NV_INLINE explicit Array(const T* first, const T* last, const Alloc& alloc = Alloc())
: Alloc(alloc), mSize(last < first ? 0 : uint32_t(last - first)), mCapacity(mSize)
{
mData = allocate(mSize);
copy(mData, mData + mSize, first);
}
/*!
Destructor
*/
NV_INLINE ~Array()
{
destroy(mData, mData + mSize);
if(capacity() && !isInUserMemory())
deallocate(mData);
}
/*!
Assignment operator. Copy content (deep-copy)
*/
template <class A>
NV_INLINE Array& operator=(const Array<T, A>& rhs)
{
if(&rhs == this)
return *this;
clear();
reserve(rhs.mSize);
copy(mData, mData + rhs.mSize, rhs.mData);
mSize = rhs.mSize;
return *this;
}
NV_INLINE Array& operator=(const Array& t) // Needs to be declared, see comment at copy-constructor
{
return operator=<Alloc>(t);
}
NV_FORCE_INLINE static bool isArrayOfPOD()
{
#if NV_VC>=14
return std::is_trivially_copyable<T>::value;
#else
return std::tr1::is_pod<T>::value;
#endif
}
/*!
Array indexing operator.
\param i
The index of the element that will be returned.
\return
The element i in the array.
*/
NV_FORCE_INLINE const T& operator[](uint32_t i) const
{
NV_ASSERT(i < mSize);
return mData[i];
}
/*!
Array indexing operator.
\param i
The index of the element that will be returned.
\return
The element i in the array.
*/
NV_FORCE_INLINE T& operator[](uint32_t i)
{
NV_ASSERT(i < mSize);
return mData[i];
}
/*!
Returns a pointer to the initial element of the array.
\return
a pointer to the initial element of the array.
*/
NV_FORCE_INLINE ConstIterator begin() const
{
return mData;
}
NV_FORCE_INLINE Iterator begin()
{
return mData;
}
/*!
Returns an iterator beyond the last element of the array. Do not dereference.
\return
a pointer to the element beyond the last element of the array.
*/
NV_FORCE_INLINE ConstIterator end() const
{
return mData + mSize;
}
NV_FORCE_INLINE Iterator end()
{
return mData + mSize;
}
/*!
Returns a reference to the first element of the array. Undefined if the array is empty.
\return a reference to the first element of the array
*/
NV_FORCE_INLINE const T& front() const
{
NV_ASSERT(mSize);
return mData[0];
}
NV_FORCE_INLINE T& front()
{
NV_ASSERT(mSize);
return mData[0];
}
/*!
Returns a reference to the last element of the array. Undefined if the array is empty
\return a reference to the last element of the array
*/
NV_FORCE_INLINE const T& back() const
{
NV_ASSERT(mSize);
return mData[mSize - 1];
}
NV_FORCE_INLINE T& back()
{
NV_ASSERT(mSize);
return mData[mSize - 1];
}
/*!
Returns the number of entries in the array. This can, and probably will,
differ from the array capacity.
\return
The number of of entries in the array.
*/
NV_FORCE_INLINE uint32_t size() const
{
return mSize;
}
/*!
Clears the array.
*/
NV_INLINE void clear()
{
destroy(mData, mData + mSize);
mSize = 0;
}
/*!
Returns whether the array is empty (i.e. whether its size is 0).
\return
true if the array is empty
*/
NV_FORCE_INLINE bool empty() const
{
return mSize == 0;
}
/*!
Finds the first occurrence of an element in the array.
\param a
The element to find.
*/
NV_INLINE Iterator find(const T& a)
{
uint32_t index;
for(index = 0; index < mSize && mData[index] != a; index++)
;
return mData + index;
}
NV_INLINE ConstIterator find(const T& a) const
{
uint32_t index;
for(index = 0; index < mSize && mData[index] != a; index++)
;
return mData + index;
}
/////////////////////////////////////////////////////////////////////////
/*!
Adds one element to the end of the array. Operation is O(1).
\param a
The element that will be added to this array.
*/
/////////////////////////////////////////////////////////////////////////
NV_FORCE_INLINE T& pushBack(const T& a)
{
if(capacity() <= mSize)
return growAndPushBack(a);
NV_PLACEMENT_NEW(reinterpret_cast<void*>(mData + mSize), T)(a);
return mData[mSize++];
}
/////////////////////////////////////////////////////////////////////////
/*!
Returns the element at the end of the array. Only legal if the array is non-empty.
*/
/////////////////////////////////////////////////////////////////////////
NV_INLINE T popBack()
{
NV_ASSERT(mSize);
T t = mData[mSize - 1];
if (!isArrayOfPOD())
{
mData[--mSize].~T();
}
else
{
--mSize;
}
return t;
}
/////////////////////////////////////////////////////////////////////////
/*!
Construct one element at the end of the array. Operation is O(1).
*/
/////////////////////////////////////////////////////////////////////////
NV_INLINE T& insert()
{
if(capacity() <= mSize)
grow(capacityIncrement());
T* ptr = mData + mSize++;
new (ptr) T; // not 'T()' because PODs should not get default-initialized.
return *ptr;
}
/////////////////////////////////////////////////////////////////////////
/*!
Subtracts the element on position i from the array and replace it with
the last element.
Operation is O(1)
\param i
The position of the element that will be subtracted from this array.
*/
/////////////////////////////////////////////////////////////////////////
NV_INLINE void replaceWithLast(uint32_t i)
{
NV_ASSERT(i < mSize);
mData[i] = mData[--mSize];
if (!isArrayOfPOD())
{
mData[mSize].~T();
}
}
NV_INLINE void replaceWithLast(Iterator i)
{
replaceWithLast(static_cast<uint32_t>(i - mData));
}
/////////////////////////////////////////////////////////////////////////
/*!
Replaces the first occurrence of the element a with the last element
Operation is O(n)
\param a
The position of the element that will be subtracted from this array.
\return true if the element has been removed.
*/
/////////////////////////////////////////////////////////////////////////
NV_INLINE bool findAndReplaceWithLast(const T& a)
{
uint32_t index = 0;
while(index < mSize && mData[index] != a)
++index;
if(index == mSize)
return false;
replaceWithLast(index);
return true;
}
/////////////////////////////////////////////////////////////////////////
/*!
Subtracts the element on position i from the array. Shift the entire
array one step.
Operation is O(n)
\param i
The position of the element that will be subtracted from this array.
*/
/////////////////////////////////////////////////////////////////////////
NV_INLINE void remove(uint32_t i)
{
NV_ASSERT(i < mSize);
if (isArrayOfPOD())
{
if (i + 1 != mSize)
{
nvidia::intrinsics::memMove(mData + i, mData + i + 1, (mSize - i - 1) * sizeof(T));
}
}
else
{
for(T* it = mData + i; it->~T(), ++i < mSize; ++it)
new (it) T(mData[i]);
}
--mSize;
}
/////////////////////////////////////////////////////////////////////////
/*!
Removes a range from the array. Shifts the array so order is maintained.
Operation is O(n)
\param begin
The starting position of the element that will be subtracted from this array.
\param count
The number of elments that will be subtracted from this array.
*/
/////////////////////////////////////////////////////////////////////////
NV_INLINE void removeRange(uint32_t begin, uint32_t count)
{
NV_ASSERT(begin < mSize);
NV_ASSERT((begin + count) <= mSize);
if (!isArrayOfPOD())
{
for(uint32_t i = 0; i < count; i++)
{
mData[begin + i].~T(); // call the destructor on the ones being removed first.
}
}
T* dest = &mData[begin]; // location we are copying the tail end objects to
T* src = &mData[begin + count]; // start of tail objects
uint32_t move_count = mSize - (begin + count); // compute remainder that needs to be copied down
if (isArrayOfPOD())
{
nvidia::intrinsics::memMove(dest, src, move_count * sizeof(T));
}
else
{
for(uint32_t i = 0; i < move_count; i++)
{
new (dest) T(*src); // copy the old one to the new location
src->~T(); // call the destructor on the old location
dest++;
src++;
}
}
mSize -= count;
}
//////////////////////////////////////////////////////////////////////////
/*!
Resize array
*/
//////////////////////////////////////////////////////////////////////////
NV_NOINLINE void resize(const uint32_t size, const T& a = T());
NV_NOINLINE void resizeUninitialized(const uint32_t size);
//////////////////////////////////////////////////////////////////////////
/*!
Resize array such that only as much memory is allocated to hold the
existing elements
*/
//////////////////////////////////////////////////////////////////////////
NV_INLINE void shrink()
{
recreate(mSize);
}
//////////////////////////////////////////////////////////////////////////
/*!
Deletes all array elements and frees memory.
*/
//////////////////////////////////////////////////////////////////////////
NV_INLINE void reset()
{
resize(0);
shrink();
}
//////////////////////////////////////////////////////////////////////////
/*!
Ensure that the array has at least size capacity.
*/
//////////////////////////////////////////////////////////////////////////
NV_INLINE void reserve(const uint32_t capacity)
{
if(capacity > this->capacity())
grow(capacity);
}
//////////////////////////////////////////////////////////////////////////
/*!
Query the capacity(allocated mem) for the array.
*/
//////////////////////////////////////////////////////////////////////////
NV_FORCE_INLINE uint32_t capacity() const
{
return mCapacity & ~NV_SIGN_BITMASK;
}
//////////////////////////////////////////////////////////////////////////
/*!
Unsafe function to force the size of the array
*/
//////////////////////////////////////////////////////////////////////////
NV_FORCE_INLINE void forceSize_Unsafe(uint32_t size)
{
NV_ASSERT(size <= mCapacity);
mSize = size;
}
//////////////////////////////////////////////////////////////////////////
/*!
Swap contents of an array without allocating temporary storage
*/
//////////////////////////////////////////////////////////////////////////
NV_INLINE void swap(Array<T, Alloc>& other)
{
shdfnd::swap(mData, other.mData);
shdfnd::swap(mSize, other.mSize);
shdfnd::swap(mCapacity, other.mCapacity);
}
//////////////////////////////////////////////////////////////////////////
/*!
Assign a range of values to this vector (resizes to length of range)
*/
//////////////////////////////////////////////////////////////////////////
NV_INLINE void assign(const T* first, const T* last)
{
resizeUninitialized(uint32_t(last - first));
copy(begin(), end(), first);
}
// We need one bit to mark arrays that have been deserialized from a user-provided memory block.
// For alignment & memory saving purpose we store that bit in the rarely used capacity member.
NV_FORCE_INLINE uint32_t isInUserMemory() const
{
return mCapacity & NV_SIGN_BITMASK;
}
/// return reference to allocator
NV_INLINE Alloc& getAllocator()
{
return *this;
}
protected:
// constructor for where we don't own the memory
Array(T* memory, uint32_t size, uint32_t capacity, const Alloc& alloc = Alloc())
: Alloc(alloc), mData(memory), mSize(size), mCapacity(capacity | NV_SIGN_BITMASK)
{
}
template <class A>
NV_NOINLINE void copy(const Array<T, A>& other);
NV_INLINE T* allocate(uint32_t size)
{
if(size > 0)
{
T* p = reinterpret_cast<T*>(Alloc::allocate(sizeof(T) * size, __FILE__, __LINE__));
/**
Mark a specified amount of memory with 0xcd pattern. This is used to check that the meta data
definition for serialized classes is complete in checked builds.
*/
#if NV_CHECKED
if(p)
{
for(uint32_t i = 0; i < (sizeof(T) * size); ++i)
reinterpret_cast<uint8_t*>(p)[i] = 0xcd;
}
#endif
return p;
}
return 0;
}
NV_INLINE void deallocate(void* mem)
{
Alloc::deallocate(mem);
}
static NV_INLINE bool isZeroInit(const T& object)
{
char ZeroBuffOnStack[sizeof(object)] = {};
return memcmp(&object, ZeroBuffOnStack, sizeof(object)) == 0;
}
static NV_INLINE void create(T* first, T* last, const T& a)
{
if (isArrayOfPOD() && isZeroInit(a))
{
if(last>first)
nvidia::intrinsics::memZero(first, uint32_t((last-first) * sizeof(T)));
}
else
{
for(; first<last; ++first)
::new(first)T(a);
}
}
static NV_INLINE void copy(T* first, T* last, const T* src)
{
if (last <= first)
return;
if (isArrayOfPOD())
{
nvidia::intrinsics::memCopy(first, src, uint32_t((last-first) * sizeof(T)));
}
else
{
for(; first<last; ++first, ++src)
::new (first)T(*src);
}
}
static NV_INLINE void destroy(T* first, T* last)
{
if (!isArrayOfPOD())
{
for(; first < last; ++first)
first->~T();
}
}
/*!
Called when pushBack() needs to grow the array.
\param a The element that will be added to this array.
*/
NV_NOINLINE T& growAndPushBack(const T& a);
/*!
Resizes the available memory for the array.
\param capacity
The number of entries that the set should be able to hold.
*/
NV_INLINE void grow(uint32_t capacity)
{
NV_ASSERT(this->capacity() < capacity);
recreate(capacity);
}
/*!
Creates a new memory block, copies all entries to the new block and destroys old entries.
\param capacity
The number of entries that the set should be able to hold.
*/
NV_NOINLINE void recreate(uint32_t capacity);
// The idea here is to prevent accidental bugs with pushBack or insert. Unfortunately
// it interacts badly with InlineArrays with smaller inline allocations.
// TODO(dsequeira): policy template arg, this is exactly what they're for.
NV_INLINE uint32_t capacityIncrement() const
{
const uint32_t capacity = this->capacity();
return capacity == 0 ? 1 : capacity * 2;
}
T* mData;
uint32_t mSize;
uint32_t mCapacity;
};
template <class T, class Alloc>
NV_NOINLINE void Array<T, Alloc>::resize(const uint32_t size, const T& a)
{
reserve(size);
create(mData + mSize, mData + size, a);
destroy(mData + size, mData + mSize);
mSize = size;
}
template <class T, class Alloc>
template <class A>
NV_NOINLINE void Array<T, Alloc>::copy(const Array<T, A>& other)
{
if(!other.empty())
{
mData = allocate(mSize = mCapacity = other.size());
copy(mData, mData + mSize, other.begin());
}
else
{
mData = NULL;
mSize = 0;
mCapacity = 0;
}
// mData = allocate(other.mSize);
// mSize = other.mSize;
// mCapacity = other.mSize;
// copy(mData, mData + mSize, other.mData);
}
template <class T, class Alloc>
NV_NOINLINE void Array<T, Alloc>::resizeUninitialized(const uint32_t size)
{
reserve(size);
mSize = size;
}
template <class T, class Alloc>
NV_NOINLINE T& Array<T, Alloc>::growAndPushBack(const T& a)
{
uint32_t capacity = capacityIncrement();
T* newData = allocate(capacity);
NV_ASSERT((!capacity) || (newData && (newData != mData)));
copy(newData, newData + mSize, mData);
// inserting element before destroying old array
// avoids referencing destroyed object when duplicating array element.
NV_PLACEMENT_NEW(reinterpret_cast<void*>(newData + mSize), T)(a);
destroy(mData, mData + mSize);
if(!isInUserMemory())
deallocate(mData);
mData = newData;
mCapacity = capacity;
return mData[mSize++];
}
template <class T, class Alloc>
NV_NOINLINE void Array<T, Alloc>::recreate(uint32_t capacity)
{
T* newData = allocate(capacity);
NV_ASSERT((!capacity) || (newData && (newData != mData)));
copy(newData, newData + mSize, mData);
destroy(mData, mData + mSize);
if(!isInUserMemory())
deallocate(mData);
mData = newData;
mCapacity = capacity;
}
template <class T, class Alloc>
NV_INLINE void swap(Array<T, Alloc>& x, Array<T, Alloc>& y)
{
x.swap(y);
}
} // namespace shdfnd
} // namespace nvidia
#if NV_VC == 9 || NV_VC == 10
#pragma warning(pop)
#endif
#endif // #ifndef NV_NSFOUNDATION_NSARRAY_H
| 23,231 | C | 28.003745 | 130 | 0.536697 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsBasicTemplates.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_NSFOUNDATION_NSBASICTEMPLATES_H
#define NV_NSFOUNDATION_NSBASICTEMPLATES_H
#include "Ns.h"
namespace nvidia
{
namespace shdfnd
{
template <typename A>
struct Equal
{
bool operator()(const A& a, const A& b) const
{
return a == b;
}
};
template <typename A>
struct Less
{
bool operator()(const A& a, const A& b) const
{
return a < b;
}
};
template <typename A>
struct Greater
{
bool operator()(const A& a, const A& b) const
{
return a > b;
}
};
template <class F, class S>
class Pair
{
public:
F first;
S second;
Pair() : first(F()), second(S())
{
}
Pair(const F& f, const S& s) : first(f), second(s)
{
}
Pair(const Pair& p) : first(p.first), second(p.second)
{
}
// CN - fix for /.../NsBasicTemplates.h(61) : warning C4512: 'nvidia::shdfnd::Pair<F,S>' : assignment operator could
// not be generated
Pair& operator=(const Pair& p)
{
first = p.first;
second = p.second;
return *this;
}
bool operator==(const Pair& p) const
{
return first == p.first && second == p.second;
}
bool operator<(const Pair& p) const
{
if(first < p.first)
return true;
else
return !(p.first < first) && (second < p.second);
}
};
template <unsigned int A>
struct LogTwo
{
static const unsigned int value = LogTwo<(A >> 1)>::value + 1;
};
template <>
struct LogTwo<1>
{
static const unsigned int value = 0;
};
template <typename T>
struct UnConst
{
typedef T Type;
};
template <typename T>
struct UnConst<const T>
{
typedef T Type;
};
template <typename T>
T pointerOffset(void* p, ptrdiff_t offset)
{
return reinterpret_cast<T>(reinterpret_cast<char*>(p) + offset);
}
template <typename T>
T pointerOffset(const void* p, ptrdiff_t offset)
{
return reinterpret_cast<T>(reinterpret_cast<const char*>(p) + offset);
}
template <class T>
NV_CUDA_CALLABLE NV_INLINE void swap(T& x, T& y)
{
const T tmp = x;
x = y;
y = tmp;
}
} // namespace shdfnd
} // namespace nvidia
#endif // #ifndef NV_NSFOUNDATION_NSBASICTEMPLATES_H
| 3,865 | C | 25.479452 | 120 | 0.671669 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsUserAllocated.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_NSFOUNDATION_NSUSERALLOCATED_H
#define NV_NSFOUNDATION_NSUSERALLOCATED_H
#include "NsAllocator.h"
namespace nvidia
{
namespace shdfnd
{
/**
Provides new and delete using a UserAllocator.
Guarantees that 'delete x;' uses the UserAllocator too.
*/
class UserAllocated
{
public:
// NV_SERIALIZATION
NV_INLINE void* operator new(size_t, void* address)
{
return address;
}
//~NV_SERIALIZATION
// Matching operator delete to the above operator new. Don't ask me
// how this makes any sense - Nuernberger.
NV_INLINE void operator delete(void*, void*)
{
}
template <typename Alloc>
NV_INLINE void* operator new(size_t size, Alloc alloc, const char* fileName, int line)
{
return alloc.allocate(size, fileName, line);
}
template <typename Alloc>
NV_INLINE void* operator new [](size_t size, Alloc alloc, const char* fileName, int line)
{ return alloc.allocate(size, fileName, line); }
// placement delete
template <typename Alloc>
NV_INLINE void operator delete(void* ptr, Alloc alloc, const char* fileName, int line)
{
NV_UNUSED(fileName);
NV_UNUSED(line);
alloc.deallocate(ptr);
}
template <typename Alloc>
NV_INLINE void operator delete [](void* ptr, Alloc alloc, const char* fileName, int line)
{
NV_UNUSED(fileName);
NV_UNUSED(line);
alloc.deallocate(ptr);
} NV_INLINE void
operator delete(void* ptr)
{
NonTrackingAllocator().deallocate(ptr);
}
NV_INLINE void operator delete [](void* ptr)
{ NonTrackingAllocator().deallocate(ptr); }
};
} // namespace shdfnd
} // namespace nvidia
#endif // #ifndef NV_NSFOUNDATION_NSUSERALLOCATED_H
| 3,439 | C | 36.391304 | 93 | 0.714743 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsIntrinsics.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_NSFOUNDATION_NSINTRINSICS_H
#define NV_NSFOUNDATION_NSINTRINSICS_H
#include "NvPreprocessor.h"
#if(NV_WINDOWS_FAMILY || NV_WINRT)
#include "platform/windows/NsWindowsIntrinsics.h"
#elif NV_X360
#include "xbox360/NsXbox360Intrinsics.h"
#elif(NV_LINUX || NV_ANDROID || NV_APPLE_FAMILY || NV_PS4)
#include "platform/unix/NsUnixIntrinsics.h"
#elif NV_PS3
#include "ps3/NsPS3Intrinsics.h"
#elif NV_PSP2
#include "psp2/NsPSP2Intrinsics.h"
#elif NV_WIIU
#include "wiiu/NsWiiUIntrinsics.h"
#elif NV_XBOXONE
#include "XboxOne/NsXboxOneIntrinsics.h"
#else
#error "Platform not supported!"
#endif
#endif // #ifndef NV_NSFOUNDATION_NSINTRINSICS_H
| 2,345 | C | 43.26415 | 74 | 0.76887 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsHashInternals.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_NSFOUNDATION_NSHASHINTERNALS_H
#define NV_NSFOUNDATION_NSHASHINTERNALS_H
#include "NsBasicTemplates.h"
#include "NsArray.h"
#include "NsBitUtils.h"
#include "NsHash.h"
#include "NvIntrinsics.h"
#if NV_VC
#pragma warning(push)
#pragma warning(disable : 4127) // conditional expression is constant
#endif
namespace nvidia
{
namespace shdfnd
{
namespace internal
{
template <class Entry, class Key, class HashFn, class GetKey, class Allocator, bool compacting>
class HashBase : private Allocator
{
void init(uint32_t initialTableSize, float loadFactor)
{
mBuffer = NULL;
mEntries = NULL;
mEntriesNext = NULL;
mHash = NULL;
mEntriesCapacity = 0;
mHashSize = 0;
mLoadFactor = loadFactor;
mFreeList = uint32_t(EOL);
mTimestamp = 0;
mEntriesCount = 0;
if(initialTableSize)
reserveInternal(initialTableSize);
}
public:
typedef Entry EntryType;
HashBase(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : Allocator(NV_DEBUG_EXP("hashBase"))
{
init(initialTableSize, loadFactor);
}
HashBase(uint32_t initialTableSize, float loadFactor, const Allocator& alloc) : Allocator(alloc)
{
init(initialTableSize, loadFactor);
}
HashBase(const Allocator& alloc) : Allocator(alloc)
{
init(64, 0.75f);
}
~HashBase()
{
destroy(); // No need to clear()
if(mBuffer)
Allocator::deallocate(mBuffer);
}
static const uint32_t EOL = 0xffffffff;
NV_INLINE Entry* create(const Key& k, bool& exists)
{
uint32_t h = 0;
if(mHashSize)
{
h = hash(k);
uint32_t index = mHash[h];
while(index != EOL && !HashFn().equal(GetKey()(mEntries[index]), k))
index = mEntriesNext[index];
exists = index != EOL;
if(exists)
return mEntries + index;
}
else
exists = false;
if(freeListEmpty())
{
grow();
h = hash(k);
}
uint32_t entryIndex = freeListGetNext();
mEntriesNext[entryIndex] = mHash[h];
mHash[h] = entryIndex;
mEntriesCount++;
mTimestamp++;
return mEntries + entryIndex;
}
NV_INLINE const Entry* find(const Key& k) const
{
if(!mHashSize)
return NULL;
const uint32_t h = hash(k);
uint32_t index = mHash[h];
while(index != EOL && !HashFn().equal(GetKey()(mEntries[index]), k))
index = mEntriesNext[index];
return index != EOL ? mEntries + index : NULL;
}
NV_INLINE bool erase(const Key& k)
{
if(!mHashSize)
return false;
const uint32_t h = hash(k);
uint32_t* ptr = mHash + h;
while(*ptr != EOL && !HashFn().equal(GetKey()(mEntries[*ptr]), k))
ptr = mEntriesNext + *ptr;
if(*ptr == EOL)
return false;
const uint32_t index = *ptr;
*ptr = mEntriesNext[index];
mEntries[index].~Entry();
mEntriesCount--;
mTimestamp++;
if(compacting && index != mEntriesCount)
replaceWithLast(index);
freeListAdd(index);
return true;
}
NV_INLINE uint32_t size() const
{
return mEntriesCount;
}
NV_INLINE uint32_t capacity() const
{
return mHashSize;
}
void clear()
{
if(!mHashSize || mEntriesCount == 0)
return;
destroy();
intrinsics::memSet(mHash, EOL, mHashSize * sizeof(uint32_t));
const uint32_t sizeMinus1 = mEntriesCapacity - 1;
for(uint32_t i = 0; i < sizeMinus1; i++)
{
prefetchLine(mEntriesNext + i, 128);
mEntriesNext[i] = i + 1;
}
mEntriesNext[mEntriesCapacity - 1] = uint32_t(EOL);
mFreeList = 0;
mEntriesCount = 0;
}
void reserve(uint32_t size)
{
if(size > mHashSize)
reserveInternal(size);
}
NV_INLINE const Entry* getEntries() const
{
return mEntries;
}
NV_INLINE Entry* insertUnique(const Key& k)
{
NV_ASSERT(find(k) == NULL);
uint32_t h = hash(k);
uint32_t entryIndex = freeListGetNext();
mEntriesNext[entryIndex] = mHash[h];
mHash[h] = entryIndex;
mEntriesCount++;
mTimestamp++;
return mEntries + entryIndex;
}
private:
void destroy()
{
for(uint32_t i = 0; i < mHashSize; i++)
{
for(uint32_t j = mHash[i]; j != EOL; j = mEntriesNext[j])
mEntries[j].~Entry();
}
}
template <typename HK, typename GK, class A, bool comp>
NV_NOINLINE void copy(const HashBase<Entry, Key, HK, GK, A, comp>& other);
// free list management - if we're coalescing, then we use mFreeList to hold
// the top of the free list and it should always be equal to size(). Otherwise,
// we build a free list in the next() pointers.
NV_INLINE void freeListAdd(uint32_t index)
{
if(compacting)
{
mFreeList--;
NV_ASSERT(mFreeList == mEntriesCount);
}
else
{
mEntriesNext[index] = mFreeList;
mFreeList = index;
}
}
NV_INLINE void freeListAdd(uint32_t start, uint32_t end)
{
if(!compacting)
{
for(uint32_t i = start; i < end - 1; i++) // add the new entries to the free list
mEntriesNext[i] = i + 1;
// link in old free list
mEntriesNext[end - 1] = mFreeList;
NV_ASSERT(mFreeList != end - 1);
mFreeList = start;
}
else if(mFreeList == EOL) // don't reset the free ptr for the compacting hash unless it's empty
mFreeList = start;
}
NV_INLINE uint32_t freeListGetNext()
{
NV_ASSERT(!freeListEmpty());
if(compacting)
{
NV_ASSERT(mFreeList == mEntriesCount);
return mFreeList++;
}
else
{
uint32_t entryIndex = mFreeList;
mFreeList = mEntriesNext[mFreeList];
return entryIndex;
}
}
NV_INLINE bool freeListEmpty() const
{
if(compacting)
return mEntriesCount == mEntriesCapacity;
else
return mFreeList == EOL;
}
NV_INLINE void replaceWithLast(uint32_t index)
{
NV_PLACEMENT_NEW(mEntries + index, Entry)(mEntries[mEntriesCount]);
mEntries[mEntriesCount].~Entry();
mEntriesNext[index] = mEntriesNext[mEntriesCount];
uint32_t h = hash(GetKey()(mEntries[index]));
uint32_t* ptr;
for(ptr = mHash + h; *ptr != mEntriesCount; ptr = mEntriesNext + *ptr)
NV_ASSERT(*ptr != EOL);
*ptr = index;
}
NV_INLINE uint32_t hash(const Key& k, uint32_t hashSize) const
{
return HashFn()(k) & (hashSize - 1);
}
NV_INLINE uint32_t hash(const Key& k) const
{
return hash(k, mHashSize);
}
void reserveInternal(uint32_t size)
{
if(!isPowerOfTwo(size))
size = nextPowerOfTwo(size);
NV_ASSERT(!(size & (size - 1)));
// decide whether iteration can be done on the entries directly
bool resizeCompact = compacting || freeListEmpty();
// define new table sizes
uint32_t oldEntriesCapacity = mEntriesCapacity;
uint32_t newEntriesCapacity = uint32_t(float(size) * mLoadFactor);
uint32_t newHashSize = size;
// allocate new common buffer and setup pointers to new tables
uint8_t* newBuffer;
uint32_t* newHash;
uint32_t* newEntriesNext;
Entry* newEntries;
{
uint32_t newHashByteOffset = 0;
uint32_t newEntriesNextBytesOffset = newHashByteOffset + newHashSize * sizeof(uint32_t);
uint32_t newEntriesByteOffset = newEntriesNextBytesOffset + newEntriesCapacity * sizeof(uint32_t);
newEntriesByteOffset += (16 - (newEntriesByteOffset & 15)) & 15;
uint32_t newBufferByteSize = newEntriesByteOffset + newEntriesCapacity * sizeof(Entry);
newBuffer = reinterpret_cast<uint8_t*>(Allocator::allocate(newBufferByteSize, __FILE__, __LINE__));
NV_ASSERT(newBuffer);
newHash = reinterpret_cast<uint32_t*>(newBuffer + newHashByteOffset);
newEntriesNext = reinterpret_cast<uint32_t*>(newBuffer + newEntriesNextBytesOffset);
newEntries = reinterpret_cast<Entry*>(newBuffer + newEntriesByteOffset);
}
// initialize new hash table
intrinsics::memSet(newHash, uint32_t(EOL), newHashSize * sizeof(uint32_t));
// iterate over old entries, re-hash and create new entries
if(resizeCompact)
{
// check that old free list is empty - we don't need to copy the next entries
NV_ASSERT(compacting || mFreeList == EOL);
for(uint32_t index = 0; index < mEntriesCount; ++index)
{
uint32_t h = hash(GetKey()(mEntries[index]), newHashSize);
newEntriesNext[index] = newHash[h];
newHash[h] = index;
NV_PLACEMENT_NEW(newEntries + index, Entry)(mEntries[index]);
mEntries[index].~Entry();
}
}
else
{
// copy old free list, only required for non compact resizing
intrinsics::memCopy(newEntriesNext, mEntriesNext, mEntriesCapacity * sizeof(uint32_t));
for(uint32_t bucket = 0; bucket < mHashSize; bucket++)
{
uint32_t index = mHash[bucket];
while(index != EOL)
{
uint32_t h = hash(GetKey()(mEntries[index]), newHashSize);
newEntriesNext[index] = newHash[h];
NV_ASSERT(index != newHash[h]);
newHash[h] = index;
NV_PLACEMENT_NEW(newEntries + index, Entry)(mEntries[index]);
mEntries[index].~Entry();
index = mEntriesNext[index];
}
}
}
// swap buffer and pointers
Allocator::deallocate(mBuffer);
mBuffer = newBuffer;
mHash = newHash;
mHashSize = newHashSize;
mEntriesNext = newEntriesNext;
mEntries = newEntries;
mEntriesCapacity = newEntriesCapacity;
freeListAdd(oldEntriesCapacity, newEntriesCapacity);
}
void grow()
{
NV_ASSERT((mFreeList == EOL) || (compacting && (mEntriesCount == mEntriesCapacity)));
uint32_t size = mHashSize == 0 ? 16 : mHashSize * 2;
reserve(size);
}
uint8_t* mBuffer;
Entry* mEntries;
uint32_t* mEntriesNext; // same size as mEntries
uint32_t* mHash;
uint32_t mEntriesCapacity;
uint32_t mHashSize;
float mLoadFactor;
uint32_t mFreeList;
uint32_t mTimestamp;
uint32_t mEntriesCount; // number of entries
public:
class Iter
{
public:
NV_INLINE Iter(HashBase& b) : mBucket(0), mEntry(uint32_t(b.EOL)), mTimestamp(b.mTimestamp), mBase(b)
{
if(mBase.mEntriesCapacity > 0)
{
mEntry = mBase.mHash[0];
skip();
}
}
NV_INLINE void check() const
{
NV_ASSERT(mTimestamp == mBase.mTimestamp);
}
NV_INLINE Entry operator*() const
{
check();
return mBase.mEntries[mEntry];
}
NV_INLINE Entry* operator->() const
{
check();
return mBase.mEntries + mEntry;
}
NV_INLINE Iter operator++()
{
check();
advance();
return *this;
}
NV_INLINE Iter operator++(int)
{
check();
Iter i = *this;
advance();
return i;
}
NV_INLINE bool done() const
{
check();
return mEntry == mBase.EOL;
}
private:
NV_INLINE void advance()
{
mEntry = mBase.mEntriesNext[mEntry];
skip();
}
NV_INLINE void skip()
{
while(mEntry == mBase.EOL)
{
if(++mBucket == mBase.mHashSize)
break;
mEntry = mBase.mHash[mBucket];
}
}
Iter& operator=(const Iter&);
uint32_t mBucket;
uint32_t mEntry;
uint32_t mTimestamp;
HashBase& mBase;
};
};
template <class Entry, class Key, class HashFn, class GetKey, class Allocator, bool compacting>
template <typename HK, typename GK, class A, bool comp>
NV_NOINLINE void
HashBase<Entry, Key, HashFn, GetKey, Allocator, compacting>::copy(const HashBase<Entry, Key, HK, GK, A, comp>& other)
{
reserve(other.mEntriesCount);
for(uint32_t i = 0; i < other.mEntriesCount; i++)
{
for(uint32_t j = other.mHash[i]; j != EOL; j = other.mEntriesNext[j])
{
const Entry& otherEntry = other.mEntries[j];
bool exists;
Entry* newEntry = create(GK()(otherEntry), exists);
NV_ASSERT(!exists);
NV_PLACEMENT_NEW(newEntry, Entry)(otherEntry);
}
}
}
template <class Key, class HashFn, class Allocator = typename AllocatorTraits<Key>::Type, bool Coalesced = false>
class HashSetBase
{
NV_NOCOPY(HashSetBase)
public:
struct GetKey
{
NV_INLINE const Key& operator()(const Key& e)
{
return e;
}
};
typedef HashBase<Key, Key, HashFn, GetKey, Allocator, Coalesced> BaseMap;
typedef typename BaseMap::Iter Iterator;
HashSetBase(uint32_t initialTableSize, float loadFactor, const Allocator& alloc)
: mBase(initialTableSize, loadFactor, alloc)
{
}
HashSetBase(const Allocator& alloc) : mBase(64, 0.75f, alloc)
{
}
HashSetBase(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : mBase(initialTableSize, loadFactor)
{
}
bool insert(const Key& k)
{
bool exists;
Key* e = mBase.create(k, exists);
if(!exists)
NV_PLACEMENT_NEW(e, Key)(k);
return !exists;
}
NV_INLINE bool contains(const Key& k) const
{
return mBase.find(k) != 0;
}
NV_INLINE bool erase(const Key& k)
{
return mBase.erase(k);
}
NV_INLINE uint32_t size() const
{
return mBase.size();
}
NV_INLINE uint32_t capacity() const
{
return mBase.capacity();
}
NV_INLINE void reserve(uint32_t size)
{
mBase.reserve(size);
}
NV_INLINE void clear()
{
mBase.clear();
}
protected:
BaseMap mBase;
};
template <class Key, class Value, class HashFn, class Allocator = typename AllocatorTraits<Pair<const Key, Value> >::Type>
class HashMapBase
{
NV_NOCOPY(HashMapBase)
public:
typedef Pair<const Key, Value> Entry;
struct GetKey
{
NV_INLINE const Key& operator()(const Entry& e)
{
return e.first;
}
};
typedef HashBase<Entry, Key, HashFn, GetKey, Allocator, true> BaseMap;
typedef typename BaseMap::Iter Iterator;
HashMapBase(uint32_t initialTableSize, float loadFactor, const Allocator& alloc)
: mBase(initialTableSize, loadFactor, alloc)
{
}
HashMapBase(const Allocator& alloc) : mBase(64, 0.75f, alloc)
{
}
HashMapBase(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : mBase(initialTableSize, loadFactor)
{
}
bool insert(const Key /*&*/ k, const Value /*&*/ v)
{
bool exists;
Entry* e = mBase.create(k, exists);
if(!exists)
NV_PLACEMENT_NEW(e, Entry)(k, v);
return !exists;
}
Value& operator[](const Key& k)
{
bool exists;
Entry* e = mBase.create(k, exists);
if(!exists)
NV_PLACEMENT_NEW(e, Entry)(k, Value());
return e->second;
}
NV_INLINE const Entry* find(const Key& k) const
{
return mBase.find(k);
}
NV_INLINE bool erase(const Key& k)
{
return mBase.erase(k);
}
NV_INLINE uint32_t size() const
{
return mBase.size();
}
NV_INLINE uint32_t capacity() const
{
return mBase.capacity();
}
NV_INLINE Iterator getIterator()
{
return Iterator(mBase);
}
NV_INLINE void reserve(uint32_t size)
{
mBase.reserve(size);
}
NV_INLINE void clear()
{
mBase.clear();
}
protected:
BaseMap mBase;
};
}
} // namespace shdfnd
} // namespace nvidia
#if NV_VC
#pragma warning(pop)
#endif
#endif // #ifndef NV_NSFOUNDATION_NSHASHINTERNALS_H
| 18,702 | C | 26.464023 | 122 | 0.574537 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/Ns.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_NSFOUNDATION_NS_H
#define NV_NSFOUNDATION_NS_H
/*! \file top level include file for shared foundation */
#include "Nv.h"
/**
Platform specific defines
*/
#if NV_WINDOWS_FAMILY || NV_XBOXONE
#pragma intrinsic(memcmp)
#pragma intrinsic(memcpy)
#pragma intrinsic(memset)
#pragma intrinsic(abs)
#pragma intrinsic(labs)
#endif
// An expression that should expand to nothing in non NV_CHECKED builds.
// We currently use this only for tagging the purpose of containers for memory use tracking.
#if NV_CHECKED
#define NV_DEBUG_EXP(x) (x)
#else
#define NV_DEBUG_EXP(x)
#endif
#define NV_SIGN_BITMASK 0x80000000
namespace nvidia
{
namespace shdfnd
{
// Int-as-bool type - has some uses for efficiency and with SIMD
typedef int IntBool;
static const IntBool IntFalse = 0;
static const IntBool IntTrue = 1;
}
} // namespace nvidia
#endif // #ifndef NV_NSFOUNDATION_NS_H
| 2,574 | C | 35.785714 | 92 | 0.761072 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NvUnionCast.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_FOUNDATION_NV_UNION_CAST_H
#define NV_FOUNDATION_NV_UNION_CAST_H
#include "NvPreprocessor.h"
/** \addtogroup foundation
@{
*/
#if !NV_DOXYGEN
namespace nvidia
{
#endif
template<class A, class B> NV_FORCE_INLINE A NvUnionCast(B b)
{
union AB
{
AB(B bb)
: _b(bb)
{
}
B _b;
A _a;
} u(b);
return u._a;
}
#if !NV_DOXYGEN
} // namespace nvidia
#endif
/** @} */
#endif
| 2,094 | C | 31.734375 | 74 | 0.7383 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsInlineAllocator.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_NSFOUNDATION_NSINLINEALLOCATOR_H
#define NV_NSFOUNDATION_NSINLINEALLOCATOR_H
#include "NsUserAllocated.h"
namespace nvidia
{
namespace shdfnd
{
// this is used by the array class to allocate some space for a small number
// of objects along with the metadata
template <uint32_t N, typename BaseAllocator>
class InlineAllocator : private BaseAllocator
{
public:
InlineAllocator(const NvEMPTY v) : BaseAllocator(v)
{
}
InlineAllocator(const BaseAllocator& alloc = BaseAllocator()) : BaseAllocator(alloc), mBufferUsed(false)
{
}
InlineAllocator(const InlineAllocator& aloc) : BaseAllocator(aloc), mBufferUsed(false)
{
}
void* allocate(uint32_t size, const char* filename, int line)
{
if(!mBufferUsed && size <= N)
{
mBufferUsed = true;
return mBuffer;
}
return BaseAllocator::allocate(size, filename, line);
}
void deallocate(void* ptr)
{
if(ptr == mBuffer)
mBufferUsed = false;
else
BaseAllocator::deallocate(ptr);
}
NV_FORCE_INLINE uint8_t* getInlineBuffer()
{
return mBuffer;
}
NV_FORCE_INLINE bool isBufferUsed() const
{
return mBufferUsed;
}
protected:
uint8_t mBuffer[N];
bool mBufferUsed;
};
} // namespace shdfnd
} // namespace nvidia
#endif // #ifndef NV_NSFOUNDATION_NSINLINEALLOCATOR_H
| 3,118 | C | 33.274725 | 108 | 0.713278 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsHashMap.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_NSFOUNDATION_NSHASHMAP_H
#define NV_NSFOUNDATION_NSHASHMAP_H
#include "NsHashInternals.h"
// TODO: make this doxy-format
//
// This header defines two hash maps. Hash maps
// * support custom initial table sizes (rounded up internally to power-of-2)
// * support custom static allocator objects
// * auto-resize, based on a load factor (i.e. a 64-entry .75 load factor hash will resize
// when the 49th element is inserted)
// * are based on open hashing
// * have O(1) contains, erase
//
// Maps have STL-like copying semantics, and properly initialize and destruct copies of objects
//
// There are two forms of map: coalesced and uncoalesced. Coalesced maps keep the entries in the
// initial segment of an array, so are fast to iterate over; however deletion is approximately
// twice as expensive.
//
// HashMap<T>:
// bool insert(const Key& k, const Value& v) O(1) amortized (exponential resize policy)
// Value & operator[](const Key& k) O(1) for existing objects, else O(1) amortized
// const Entry * find(const Key& k); O(1)
// bool erase(const T& k); O(1)
// uint32_t size(); constant
// void reserve(uint32_t size); O(MAX(currentOccupancy,size))
// void clear(); O(currentOccupancy) (with zero constant for objects
// without
// destructors)
// Iterator getIterator();
//
// operator[] creates an entry if one does not exist, initializing with the default constructor.
// CoalescedHashMap<T> does not support getIterator, but instead supports
// const Key *getEntries();
//
// Use of iterators:
//
// for(HashMap::Iterator iter = test.getIterator(); !iter.done(); ++iter)
// myFunction(iter->first, iter->second);
namespace nvidia
{
namespace shdfnd
{
template <class Key, class Value, class HashFn = Hash<Key>, class Allocator = NonTrackingAllocator>
class HashMap : public internal::HashMapBase<Key, Value, HashFn, Allocator>
{
public:
typedef internal::HashMapBase<Key, Value, HashFn, Allocator> HashMapBase;
typedef typename HashMapBase::Iterator Iterator;
HashMap(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : HashMapBase(initialTableSize, loadFactor)
{
}
HashMap(uint32_t initialTableSize, float loadFactor, const Allocator& alloc)
: HashMapBase(initialTableSize, loadFactor, alloc)
{
}
HashMap(const Allocator& alloc) : HashMapBase(64, 0.75f, alloc)
{
}
Iterator getIterator()
{
return Iterator(HashMapBase::mBase);
}
};
template <class Key, class Value, class HashFn = Hash<Key>, class Allocator = NonTrackingAllocator>
class CoalescedHashMap : public internal::HashMapBase<Key, Value, HashFn, Allocator>
{
public:
typedef internal::HashMapBase<Key, Value, HashFn, Allocator> HashMapBase;
CoalescedHashMap(uint32_t initialTableSize = 64, float loadFactor = 0.75f)
: HashMapBase(initialTableSize, loadFactor)
{
}
const Pair<const Key, Value>* getEntries() const
{
return HashMapBase::mBase.getEntries();
}
};
} // namespace shdfnd
} // namespace nvidia
#endif // #ifndef NV_NSFOUNDATION_NSHASHMAP_H
| 5,061 | C | 41.898305 | 115 | 0.690773 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsHash.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_NSFOUNDATION_NSHASH_H
#define NV_NSFOUNDATION_NSHASH_H
#include "Ns.h"
#include "NsBasicTemplates.h"
#if NV_VC
#pragma warning(push)
#pragma warning(disable : 4302)
#endif
#if NV_LINUX
#include "NvSimpleTypes.h"
#endif
/*!
Central definition of hash functions
*/
namespace nvidia
{
namespace shdfnd
{
// Hash functions
// Thomas Wang's 32 bit mix
// http://www.cris.com/~Ttwang/tech/inthash.htm
NV_FORCE_INLINE uint32_t hash(const uint32_t key)
{
uint32_t k = key;
k += ~(k << 15);
k ^= (k >> 10);
k += (k << 3);
k ^= (k >> 6);
k += ~(k << 11);
k ^= (k >> 16);
return uint32_t(k);
}
NV_FORCE_INLINE uint32_t hash(const int32_t key)
{
return hash(uint32_t(key));
}
// Thomas Wang's 64 bit mix
// http://www.cris.com/~Ttwang/tech/inthash.htm
NV_FORCE_INLINE uint32_t hash(const uint64_t key)
{
uint64_t k = key;
k += ~(k << 32);
k ^= (k >> 22);
k += ~(k << 13);
k ^= (k >> 8);
k += (k << 3);
k ^= (k >> 15);
k += ~(k << 27);
k ^= (k >> 31);
return uint32_t(UINT32_MAX & k);
}
#if NV_APPLE_FAMILY
// hash for size_t, to make gcc happy
NV_INLINE uint32_t hash(const size_t key)
{
#if NV_P64_FAMILY
return hash(uint64_t(key));
#else
return hash(uint32_t(key));
#endif
}
#endif
// Hash function for pointers
NV_INLINE uint32_t hash(const void* ptr)
{
#if NV_P64_FAMILY
return hash(uint64_t(ptr));
#else
return hash(uint32_t(UINT32_MAX & size_t(ptr)));
#endif
}
// Hash function for pairs
template <typename F, typename S>
NV_INLINE uint32_t hash(const Pair<F, S>& p)
{
uint32_t seed = 0x876543;
uint32_t m = 1000007;
return hash(p.second) ^ (m * (hash(p.first) ^ (m * seed)));
}
// hash object for hash map template parameter
template <class Key>
struct Hash
{
uint32_t operator()(const Key& k) const
{
return hash(k);
}
bool equal(const Key& k0, const Key& k1) const
{
return k0 == k1;
}
};
// specialization for strings
template <>
struct Hash<const char*>
{
public:
uint32_t operator()(const char* _string) const
{
// "DJB" string hash
const uint8_t* string = reinterpret_cast<const uint8_t*>(_string);
uint32_t h = 5381;
for(const uint8_t* ptr = string; *ptr; ptr++)
h = ((h << 5) + h) ^ uint32_t(*ptr);
return h;
}
bool equal(const char* string0, const char* string1) const
{
return !strcmp(string0, string1);
}
};
} // namespace shdfnd
} // namespace nvidia
#if NV_VC
#pragma warning(pop)
#endif
#endif // #ifndef NV_NSFOUNDATION_NSHASH_H
| 4,292 | C | 25.5 | 74 | 0.662162 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsAoS.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NS_AOS_H
#define NS_AOS_H
#include "NvPreprocessor.h"
#if NV_WINDOWS_FAMILY && !NV_NEON
#include "platform/windows/NsWindowsAoS.h"
#elif NV_X360
#include "xbox360/NsXbox360AoS.h"
#elif (NV_LINUX || NV_ANDROID || NV_APPLE || NV_PS4 || (NV_WINRT && NV_NEON))
#include "platform/unix/NsUnixAoS.h"
#elif NV_PS3
#include "ps3/NsPS3AoS.h"
#elif NV_PSP2
#include "psp2/NsPSP2AoS.h"
#elif NV_XBOXONE
#include "XboxOne/NsXboxOneAoS.h"
#else
#error "Platform not supported!"
#endif
#endif
| 2,212 | C | 40.754716 | 77 | 0.74774 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsFPU.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_NSFOUNDATION_NSFPU_H
#define NV_NSFOUNDATION_NSFPU_H
#include "Ns.h"
#include "NsIntrinsics.h"
// unsigned integer representation of a floating-point value.
#if NV_PS3
NV_FORCE_INLINE unsigned int NV_IR(const float x)
{
union
{
int i;
float f;
} u;
u.f = x;
return u.i;
}
NV_FORCE_INLINE int NV_SIR(const float x)
{
union
{
int i;
float f;
} u;
u.f = x;
return u.i;
}
NV_FORCE_INLINE float NV_FR(const unsigned int x)
{
union
{
unsigned int i;
float f;
} u;
u.i = x;
return u.f;
}
#else
#define NV_IR(x) ((uint32_t&)(x))
#define NV_SIR(x) ((int32_t&)(x))
#define NV_FR(x) ((float&)(x))
#endif
// signed integer representation of a floating-point value.
// Floating-point representation of a integer value.
#define NV_SIGN_BITMASK 0x80000000
#define NV_FPU_GUARD shdfnd::FPUGuard scopedFpGuard;
#define NV_SIMD_GUARD shdfnd::SIMDGuard scopedFpGuard;
#define NV_SUPPORT_GUARDS (NV_WINDOWS_FAMILY || NV_XBOXONE || NV_LINUX || NV_PS4 || NV_OSX)
namespace nvidia
{
namespace shdfnd
{
// sets the default SDK state for scalar and SIMD units
class NV_FOUNDATION_API FPUGuard
{
public:
FPUGuard(); // set fpu control word for PhysX
~FPUGuard(); // restore fpu control word
private:
uint32_t mControlWords[8];
};
// sets default SDK state for simd unit only, lighter weight than FPUGuard
class SIMDGuard
{
public:
NV_INLINE SIMDGuard(); // set simd control word for PhysX
NV_INLINE ~SIMDGuard(); // restore simd control word
private:
#if NV_SUPPORT_GUARDS
uint32_t mControlWord;
#endif
};
/**
\brief Enables floating point exceptions for the scalar and SIMD unit
*/
NV_FOUNDATION_API void enableFPExceptions();
/**
\brief Disables floating point exceptions for the scalar and SIMD unit
*/
NV_FOUNDATION_API void disableFPExceptions();
} // namespace shdfnd
} // namespace nvidia
#if NV_WINDOWS_FAMILY || NV_XBOXONE
#include "platform/windows/NsWindowsFPU.h"
#elif NV_LINUX || NV_PS4 || NV_OSX
#include "platform/unix/NsUnixFPU.h"
#else
NV_INLINE nvidia::shdfnd::SIMDGuard::SIMDGuard()
{
}
NV_INLINE nvidia::shdfnd::SIMDGuard::~SIMDGuard()
{
}
#endif
#endif // #ifndef NV_NSFOUNDATION_NSFPU_H
| 3,951 | C | 26.830986 | 91 | 0.715515 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsInlineArray.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_NSFOUNDATION_NSINLINEARRAY_H
#define NV_NSFOUNDATION_NSINLINEARRAY_H
#include "NsArray.h"
#include "NsInlineAllocator.h"
namespace nvidia
{
namespace shdfnd
{
// array that pre-allocates for N elements
template <typename T, uint32_t N, typename Alloc = typename AllocatorTraits<T>::Type>
class InlineArray : public Array<T, InlineAllocator<N * sizeof(T), Alloc> >
{
typedef InlineAllocator<N * sizeof(T), Alloc> Allocator;
public:
InlineArray(const NvEMPTY v) : Array<T, Allocator>(v)
{
if(isInlined())
this->mData = reinterpret_cast<T*>(Array<T, Allocator>::getInlineBuffer());
}
NV_INLINE bool isInlined() const
{
return Allocator::isBufferUsed();
}
NV_INLINE explicit InlineArray(const Alloc& alloc = Alloc()) : Array<T, Allocator>(alloc)
{
this->mData = this->allocate(N);
this->mCapacity = N;
}
};
} // namespace shdfnd
} // namespace nvidia
#endif // #ifndef NV_NSFOUNDATION_NSINLINEARRAY_H
| 2,697 | C | 38.67647 | 93 | 0.732295 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsAllocator.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_NSFOUNDATION_NSALLOCATOR_H
#define NV_NSFOUNDATION_NSALLOCATOR_H
#include "NvAllocatorCallback.h"
#include "Ns.h"
#include "NsGlobals.h"
#if(NV_WINDOWS_FAMILY || NV_WINRT || NV_X360 || NV_XBOXONE)
#include <exception>
#include <typeinfo.h>
#endif
#if(NV_APPLE_FAMILY)
#include <typeinfo>
#endif
#if NV_WIIU
#pragma ghs nowarning 193 // warning #193-D: zero used for undefined preprocessing identifier
#endif
#include <new>
#if NV_WIIU
#pragma ghs endnowarning
#endif
// Allocation macros going through user allocator
#if NV_CHECKED
#define NV_ALLOC(n, name) nvidia::shdfnd::NamedAllocator(name).allocate(n, __FILE__, __LINE__)
#else
#define NV_ALLOC(n, name) nvidia::shdfnd::NonTrackingAllocator().allocate(n, __FILE__, __LINE__)
#endif
#define NV_ALLOC_TEMP(n, name) NV_ALLOC(n, name)
#define NV_FREE(x) nvidia::shdfnd::NonTrackingAllocator().deallocate(x)
#define NV_FREE_AND_RESET(x) \
{ \
NV_FREE(x); \
x = 0; \
}
// The following macros support plain-old-types and classes derived from UserAllocated.
#define NV_NEW(T) new (nvidia::shdfnd::ReflectionAllocator<T>(), __FILE__, __LINE__) T
#define NV_NEW_TEMP(T) NV_NEW(T)
#define NV_DELETE(x) delete x
#define NV_DELETE_AND_RESET(x) \
{ \
NV_DELETE(x); \
x = 0; \
}
#define NV_DELETE_POD(x) \
{ \
NV_FREE(x); \
x = 0; \
}
#define NV_DELETE_ARRAY(x) \
{ \
NV_DELETE([] x); \
x = 0; \
}
// aligned allocation
#define NV_ALIGNED16_ALLOC(n) nvidia::shdfnd::AlignedAllocator<16>().allocate(n, __FILE__, __LINE__)
#define NV_ALIGNED16_FREE(x) nvidia::shdfnd::AlignedAllocator<16>().deallocate(x)
//! placement new macro to make it easy to spot bad use of 'new'
#define NV_PLACEMENT_NEW(p, T) new (p) T
#if NV_DEBUG || NV_CHECKED
#define NV_USE_NAMED_ALLOCATOR 1
#else
#define NV_USE_NAMED_ALLOCATOR 0
#endif
// Don't use inline for alloca !!!
#if NV_WINDOWS_FAMILY || NV_WINRT
#include <malloc.h>
#define NvAlloca(x) _alloca(x)
#elif NV_LINUX || NV_ANDROID
#include <malloc.h>
#define NvAlloca(x) alloca(x)
#elif NV_PSP2
#include <alloca.h>
#define NvAlloca(x) alloca(x)
#elif NV_APPLE_FAMILY
#include <alloca.h>
#define NvAlloca(x) alloca(x)
#elif NV_PS3
#include <alloca.h>
#define NvAlloca(x) alloca(x)
#elif NV_X360
#include <malloc.h>
#define NvAlloca(x) _alloca(x)
#elif NV_WIIU
#include <alloca.h>
#define NvAlloca(x) alloca(x)
#elif NV_PS4
#include <memory.h>
#define NvAlloca(x) alloca(x)
#elif NV_XBOXONE
#include <malloc.h>
#define NvAlloca(x) alloca(x)
#endif
#define NvAllocaAligned(x, alignment) ((size_t(NvAlloca(x + alignment)) + (alignment - 1)) & ~size_t(alignment - 1))
namespace nvidia
{
namespace shdfnd
{
/*
* Bootstrap allocator using malloc/free.
* Don't use unless your objects get allocated before foundation is initialized.
*/
class RawAllocator
{
public:
RawAllocator(const char* = 0)
{
}
void* allocate(size_t size, const char*, int)
{
// malloc returns valid pointer for size==0, no need to check
return ::malloc(size);
}
void deallocate(void* ptr)
{
// free(0) is guaranteed to have no side effect, no need to check
::free(ptr);
}
};
/*
* Allocator that simply calls straight back to the application without tracking.
* This is used by the heap (Foundation::mNamedAllocMap) that tracks allocations
* because it needs to be able to grow as a result of an allocation.
* Making the hash table re-entrant to deal with this may not make sense.
*/
class NonTrackingAllocator
{
public:
NV_FORCE_INLINE NonTrackingAllocator(const char* = 0)
{
}
NV_FORCE_INLINE void* allocate(size_t size, const char* file, int line)
{
return !size ? 0 : getAllocator().allocate(size, "NonTrackedAlloc", file, line);
}
NV_FORCE_INLINE void deallocate(void* ptr)
{
if(ptr)
getAllocator().deallocate(ptr);
}
};
/**
Allocator used to access the global NvAllocatorCallback instance using a dynamic name.
*/
void initializeNamedAllocatorGlobals();
void terminateNamedAllocatorGlobals();
#if NV_USE_NAMED_ALLOCATOR // can be slow, so only use in debug/checked
class NV_FOUNDATION_API NamedAllocator
{
public:
NamedAllocator(const NvEMPTY);
NamedAllocator(const char* name = 0); // todo: should not have default argument!
NamedAllocator(const NamedAllocator&);
~NamedAllocator();
NamedAllocator& operator=(const NamedAllocator&);
void* allocate(size_t size, const char* filename, int line);
void deallocate(void* ptr);
};
#else
class NamedAllocator;
#endif // NV_DEBUG
/**
Allocator used to access the global NvAllocatorCallback instance using a static name derived from T.
*/
template <typename T>
class ReflectionAllocator
{
static const char* getName()
{
if(!getReflectionAllocatorReportsNames())
return "<allocation names disabled>";
#if NV_GCC_FAMILY
return __PRETTY_FUNCTION__;
#else
// name() calls malloc(), raw_name() wouldn't
return typeid(T).name();
#endif
}
public:
ReflectionAllocator(const NvEMPTY)
{
}
ReflectionAllocator(const char* = 0)
{
}
inline ReflectionAllocator(const ReflectionAllocator&)
{
}
void* allocate(size_t size, const char* filename, int line)
{
return size ? getAllocator().allocate(size, getName(), filename, line) : 0;
}
void deallocate(void* ptr)
{
if(ptr)
getAllocator().deallocate(ptr);
}
};
template <typename T>
struct AllocatorTraits
{
#if NV_USE_NAMED_ALLOCATOR
typedef NamedAllocator Type;
#else
typedef ReflectionAllocator<T> Type;
#endif
};
// if you get a build error here, you are trying to NV_NEW a class
// that is neither plain-old-type nor derived from UserAllocated
template <typename T, typename X>
union EnableIfPod
{
int i;
T t;
typedef X Type;
};
} // namespace shdfnd
} // namespace nvidia
// Global placement new for ReflectionAllocator templated by
// plain-old-type. Allows using NV_NEW for pointers and built-in-types.
//
// ATTENTION: You need to use NV_DELETE_POD or NV_FREE to deallocate
// memory, not NV_DELETE. NV_DELETE_POD redirects to NV_FREE.
//
// Rationale: NV_DELETE uses global operator delete(void*), which we dont' want to overload.
// Any other definition of NV_DELETE couldn't support array syntax 'NV_DELETE([]a);'.
// NV_DELETE_POD was preferred over NV_DELETE_ARRAY because it is used
// less often and applies to both single instances and arrays.
template <typename T>
NV_INLINE void* operator new(size_t size, nvidia::shdfnd::ReflectionAllocator<T> alloc, const char* fileName,
typename nvidia::shdfnd::EnableIfPod<T, int>::Type line)
{
return alloc.allocate(size, fileName, line);
}
template <typename T>
NV_INLINE void* operator new [](size_t size, nvidia::shdfnd::ReflectionAllocator<T> alloc, const char* fileName,
typename nvidia::shdfnd::EnableIfPod<T, int>::Type line)
{ return alloc.allocate(size, fileName, line); }
// If construction after placement new throws, this placement delete is being called.
template <typename T>
NV_INLINE void operator delete(void* ptr, nvidia::shdfnd::ReflectionAllocator<T> alloc, const char* fileName,
typename nvidia::shdfnd::EnableIfPod<T, int>::Type line)
{
NV_UNUSED(fileName);
NV_UNUSED(line);
alloc.deallocate(ptr);
}
// If construction after placement new throws, this placement delete is being called.
template <typename T>
NV_INLINE void operator delete [](void* ptr, nvidia::shdfnd::ReflectionAllocator<T> alloc, const char* fileName,
typename nvidia::shdfnd::EnableIfPod<T, int>::Type line)
{
NV_UNUSED(fileName);
NV_UNUSED(line);
alloc.deallocate(ptr);
}
#endif // #ifndef NV_NSFOUNDATION_NSALLOCATOR_H
| 11,235 | C | 35.01282 | 120 | 0.594304 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsVecMathAoSScalar.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_PHYSICS_COMMON_VECMATH_INLINE_SCALAR
#define NV_PHYSICS_COMMON_VECMATH_INLINE_SCALAR
#if COMPILE_VECTOR_INTRINSICS
#error Scalar version should not be included when using vector intrinsics.
#endif
//Remove this define when all platforms use simd solver.
#define NV_SUPPORT_SIMD
struct VecU8V;
struct VecI16V;
struct VecU16V;
struct VecI32V;
struct VecU32V;
struct Vec4V;
typedef Vec4V QuatV;
NV_ALIGN_PREFIX(16)
struct FloatV
{
float x;
float pad[3];
FloatV(){}
FloatV(const float _x)
: x(_x)
{
}
}
NV_ALIGN_SUFFIX(16);
NV_ALIGN_PREFIX(16)
struct Vec4V
{
float x, y, z, w;
Vec4V(){}
Vec4V(const float _x, const float _y, const float _z, const float _w)
: x(_x),
y(_y),
z(_z),
w(_w)
{
}
}
NV_ALIGN_SUFFIX(16);
NV_ALIGN_PREFIX(16)
struct Vec3V
{
float x,y,z;
float pad;
Vec3V(){}
Vec3V(const float _x, const float _y, const float _z)
: x(_x),
y(_y),
z(_z),
pad(0.0f)
{
}
}
NV_ALIGN_SUFFIX(16);
NV_ALIGN_PREFIX(16)
struct BoolV
{
uint32_t ux, uy, uz, uw;
BoolV(){}
BoolV(const uint32_t _x, const uint32_t _y, const uint32_t _z, const uint32_t _w)
: ux(_x),
uy(_y),
uz(_z),
uw(_w)
{
}
}
NV_ALIGN_SUFFIX(16);
struct Mat33V
{
Mat33V(){}
Mat33V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2)
: col0(c0),
col1(c1),
col2(c2)
{
}
Vec3V col0;
Vec3V col1;
Vec3V col2;
};
struct Mat34V
{
Mat34V(){}
Mat34V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2, const Vec3V& c3)
: col0(c0),
col1(c1),
col2(c2),
col3(c3)
{
}
Vec3V col0;
Vec3V col1;
Vec3V col2;
Vec3V col3;
};
struct Mat43V
{
Mat43V(){}
Mat43V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2)
: col0(c0),
col1(c1),
col2(c2)
{
}
Vec4V col0;
Vec4V col1;
Vec4V col2;
};
struct Mat44V
{
Mat44V(){}
Mat44V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2, const Vec4V& c3)
: col0(c0),
col1(c1),
col2(c2),
col3(c3)
{
}
Vec4V col0;
Vec4V col1;
Vec4V col2;
Vec4V col3;
};
NV_ALIGN_PREFIX(16)
struct VecU32V
{
uint32_t u32[4];
NV_FORCE_INLINE VecU32V() {}
NV_FORCE_INLINE VecU32V(uint32_t a, uint32_t b, uint32_t c, uint32_t d) { u32[0] = a; u32[1] = b; u32[2] = c; u32[3] = d; }
}
NV_ALIGN_SUFFIX(16);
NV_ALIGN_PREFIX(16)
struct VecI32V
{
int32_t i32[4];
NV_FORCE_INLINE VecI32V() {}
NV_FORCE_INLINE VecI32V(int32_t a, int32_t b, int32_t c, int32_t d) { i32[0] = a; i32[1] = b; i32[2] = c; i32[3] = d; }
}
NV_ALIGN_SUFFIX(16);
NV_ALIGN_PREFIX(16)
struct VecI16V
{
int16_t i16[8];
NV_FORCE_INLINE VecI16V() {}
NV_FORCE_INLINE VecI16V(int16_t a, int16_t b, int16_t c, int16_t d, int16_t e, int16_t f, int16_t g, int16_t h)
{ i16[0] = a; i16[1] = b; i16[2] = c; i16[3] = d; i16[4] = e; i16[5] = f; i16[6] = g; i16[7] = h; }
}
NV_ALIGN_SUFFIX(16);
NV_ALIGN_PREFIX(16)
struct VecU16V
{
union { uint16_t u16[8]; int16_t i16[8]; };
NV_FORCE_INLINE VecU16V() {}
NV_FORCE_INLINE VecU16V(uint16_t a, uint16_t b, uint16_t c, uint16_t d, uint16_t e, uint16_t f, uint16_t g, uint16_t h)
{ u16[0] = a; u16[1] = b; u16[2] = c; u16[3] = d; u16[4] = e; u16[5] = f; u16[6] = g; u16[7] = h; }
}
NV_ALIGN_SUFFIX(16);
NV_ALIGN_PREFIX(16)
struct VecU8V
{
uint8_t u8[8];
NV_FORCE_INLINE VecU8V() {}
NV_FORCE_INLINE VecU8V(uint8_t a, uint8_t b, uint8_t c, uint8_t d) { u8[0] = a; u8[1] = b; u8[2] = c; u8[3] = d; }
}
NV_ALIGN_SUFFIX(16);
#define FloatVArg FloatV&
#define Vec3VArg Vec3V&
#define Vec4VArg Vec4V&
#define BoolVArg BoolV&
#define VecU32VArg VecU32V&
#define VecI32VArg VecI32V&
#define VecU16VArg VecU16V&
#define VecI16VArg VecI16V&
#define VecU8VArg VecU8V&
#define QuatVArg QuatV&
#define VecCrossV Vec3V
typedef VecI32V VecShiftV;
#define VecShiftVArg VecShiftV&
#endif //NV_PHYSICS_COMMON_VECMATH_INLINE_SCALAR
| 5,842 | C | 23.86383 | 127 | 0.629921 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsInlineAoS.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef PS_INLINE_AOS_H
#define PS_INLINE_AOS_H
#include "NvPreprocessor.h"
#if NV_WINDOWS_FAMILY
#include "platform/windows/NsWindowsTrigConstants.h"
#include "platform/windows/NsWindowsInlineAoS.h"
#elif NV_X360
#include "xbox360/NsXbox360InlineAoS.h"
#elif (NV_LINUX || NV_ANDROID || NV_APPLE || NV_PS4 || (NV_WINRT && NV_NEON))
#include "platform/unix/NsUnixTrigConstants.h"
#include "platform/unix/NsUnixInlineAoS.h"
#elif NV_PS3
#include "ps3/NsPS3InlineAoS.h"
#elif NV_PSP2
#include "psp2/NsPSP2InlineAoS.h"
#elif NV_XBOXONE
#include "XboxOne/NsXboxOneTrigConstants.h"
#include "XboxOne/NsXboxOneInlineAoS.h"
#else
#error "Platform not supported!"
#endif
#endif
| 2,409 | C | 42.818181 | 77 | 0.754255 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsVecMathAoSScalarInline.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_PHYSICS_COMMON_VECMATH_SCALAR_INLINE
#define NV_PHYSICS_COMMON_VECMATH_SCALAR_INLINE
#if COMPILE_VECTOR_INTRINSICS
#error Scalar version should not be included when using vector intrinsics.
#endif
/////////////////////////////////////////////////////////////////////
////INTERNAL USE ONLY AND TESTS
/////////////////////////////////////////////////////////////////////
namespace internalScalarSimd
{
NV_FORCE_INLINE bool hasZeroElementInFloatV(const FloatV a)
{
return (0==a.x);
}
NV_FORCE_INLINE bool hasZeroElementInVec3V(const Vec3V a)
{
return (0==a.x || 0==a.y || 0==a.z);
}
NV_FORCE_INLINE bool hasZeroElementInVec4V(const Vec4V a)
{
return (0==a.x || 0==a.y || 0==a.z || 0==a.w);
}
}
namespace _VecMathTests
{
NV_FORCE_INLINE bool allElementsEqualFloatV(const FloatV a, const FloatV b)
{
return (a.x==b.x);
}
NV_FORCE_INLINE bool allElementsEqualVec3V(const Vec3V a, const Vec3V b)
{
return (a.x==b.x && a.y==b.y && a.z==b.z);
}
NV_FORCE_INLINE bool allElementsEqualVec4V(const Vec4V a, const Vec4V b)
{
return (a.x==b.x && a.y==b.y && a.z==b.z && a.w==b.w);
}
NV_FORCE_INLINE bool allElementsEqualBoolV(const BoolV a, const BoolV b)
{
return (a.ux==b.ux && a.uy==b.uy && a.uz==b.uz && a.uw==b.uw);
}
NV_FORCE_INLINE bool allElementsEqualVecU32V(const VecU32V a, const VecU32V b)
{
return (a.u32[0]==b.u32[0] && a.u32[1]==b.u32[1] && a.u32[2]==b.u32[2] && a.u32[3]==b.u32[3]);
}
NV_FORCE_INLINE bool allElementsEqualVecI32V(const VecI32V a, const VecI32V b)
{
return (a.i32[0]==b.i32[0] && a.i32[1]==b.i32[1] && a.i32[2]==b.i32[2] && a.i32[3]==b.i32[3]);
}
#define VECMATH_AOS_EPSILON (1e-3f)
NV_FORCE_INLINE bool allElementsNearEqualFloatV(const FloatV a, const FloatV b)
{
const float cx=a.x-b.x;
return (cx>-VECMATH_AOS_EPSILON && cx<VECMATH_AOS_EPSILON);
}
NV_FORCE_INLINE bool allElementsNearEqualVec3V(const Vec3V a, const Vec3V b)
{
const float cx=a.x-b.x;
const float cy=a.y-b.y;
const float cz=a.z-b.z;
return
(
cx>-VECMATH_AOS_EPSILON && cx<VECMATH_AOS_EPSILON &&
cy>-VECMATH_AOS_EPSILON && cy<VECMATH_AOS_EPSILON &&
cz>-VECMATH_AOS_EPSILON && cz<VECMATH_AOS_EPSILON
);
}
NV_FORCE_INLINE bool allElementsNearEqualVec4V(const Vec4V a, const Vec4V b)
{
const float cx=a.x-b.x;
const float cy=a.y-b.y;
const float cz=a.z-b.z;
const float cw=a.w-b.w;
return
(
cx>-VECMATH_AOS_EPSILON && cx<VECMATH_AOS_EPSILON &&
cy>-VECMATH_AOS_EPSILON && cy<VECMATH_AOS_EPSILON &&
cz>-VECMATH_AOS_EPSILON && cz<VECMATH_AOS_EPSILON &&
cw>-VECMATH_AOS_EPSILON && cw<VECMATH_AOS_EPSILON
);
}
}
///////////////////////////////////////////////////////
NV_FORCE_INLINE bool isValidVec3V(const Vec3V a)
{
return a.pad == 0.f;
}
NV_FORCE_INLINE bool isFiniteFloatV(const FloatV a)
{
return NvIsFinite(a.x);
}
NV_FORCE_INLINE bool isFiniteVec3V(const Vec3V a)
{
return NvIsFinite(a.x) && NvIsFinite(a.y) && NvIsFinite(a.z);
}
NV_FORCE_INLINE bool isFiniteVec4V(const Vec4V a)
{
return NvIsFinite(a.x) && NvIsFinite(a.y) && NvIsFinite(a.z) && NvIsFinite(a.w);
}
/////////////////////////////////////////////////////////////////////
////VECTORISED FUNCTION IMPLEMENTATIONS
/////////////////////////////////////////////////////////////////////
NV_FORCE_INLINE FloatV FLoad(const float f)
{
return FloatV(f);
}
NV_FORCE_INLINE Vec3V V3Load(const float f)
{
return Vec3V(f,f,f);
}
NV_FORCE_INLINE Vec4V V4Load(const float f)
{
return Vec4V(f,f,f,f);
}
NV_FORCE_INLINE BoolV BLoad(const bool f)
{
#if NV_ARM
// SD: Android ARM builds fail if this is done with a cast.
// Might also fail because of something else but the select
// operator here seems to fix everything that failed in release builds.
return f ? BTTTT() : BFFFF();
#else
uint32_t i=-(int32_t)f;
return BoolV(i,i,i,i);
#endif
}
NV_FORCE_INLINE Vec3V V3LoadA(const NvVec3& f)
{
VECMATHAOS_ASSERT(0 == (reinterpret_cast<uint64_t>(&f) & 0x0f));
return Vec3V(f.x,f.y,f.z);
}
NV_FORCE_INLINE Vec3V V3LoadU(const NvVec3& f)
{
return Vec3V(f.x,f.y,f.z);
}
NV_FORCE_INLINE Vec3V V3LoadUnsafeA(const NvVec3& f)
{
return Vec3V(f.x,f.y,f.z);
}
NV_FORCE_INLINE Vec3V V3LoadA(const float* const f)
{
return Vec3V(f[0], f[1], f[2]);
}
NV_FORCE_INLINE Vec3V V3LoadU(const float* const f)
{
return Vec3V(f[0], f[1], f[2]);
}
NV_FORCE_INLINE Vec3V Vec3V_From_Vec4V(Vec4V f)
{
return Vec3V(f.x,f.y,f.z);
}
NV_FORCE_INLINE Vec3V Vec3V_From_Vec4V_WUndefined(const Vec4V v)
{
return Vec3V(v.x, v.y, v.z);
}
NV_FORCE_INLINE Vec4V Vec4V_From_Vec3V(Vec3V f)
{
return Vec4V(f.x,f.y,f.z, 0.0f);
}
NV_FORCE_INLINE Vec4V Vec4V_From_FloatV(FloatV f)
{
return Vec4V(f.x,f.x,f.x,f.x);
}
NV_FORCE_INLINE Vec3V Vec3V_From_FloatV(FloatV f)
{
return Vec3V(f.x,f.x,f.x);
}
NV_FORCE_INLINE Vec3V Vec3V_From_FloatV_WUndefined(FloatV f)
{
return Vec3V(f.x,f.x,f.x);
}
NV_FORCE_INLINE Vec4V V4LoadA(const float* const f)
{
VECMATHAOS_ASSERT(0 == ((uint64_t)f & 0x0f));
return Vec4V(f[0],f[1],f[2],f[3]);
}
NV_FORCE_INLINE void V4StoreA(const Vec4V a, float* f)
{
VECMATHAOS_ASSERT(0 == ((uint64_t)f & 0x0f));
*reinterpret_cast<Vec4V*>(f) = a;
}
NV_FORCE_INLINE void V4StoreU(const Vec4V a, float* f)
{
*reinterpret_cast<Vec4V*>(f) = a;
}
NV_FORCE_INLINE void BStoreA(const BoolV a, uint32_t* f)
{
VECMATHAOS_ASSERT(0 == ((uint64_t)f & 0x0f));
*reinterpret_cast<BoolV*>(f) = a;
}
NV_FORCE_INLINE void U4StoreA(const VecU32V uv, uint32_t* u)
{
VECMATHAOS_ASSERT(0 == ((uint64_t)u & 0x0f));
*reinterpret_cast<VecU32V*>(u) = uv;
}
NV_FORCE_INLINE void I4StoreA(const VecI32V iv, int32_t* i)
{
VECMATHAOS_ASSERT(0 == ((uint64_t)i & 0x0f));
*reinterpret_cast<VecI32V*>(i) = iv;
}
NV_FORCE_INLINE Vec4V V4LoadU(const float* const f)
{
return Vec4V(f[0],f[1],f[2],f[3]);
}
NV_FORCE_INLINE Vec4V Vec4V_From_NvVec3_WUndefined(const NvVec3& f)
{
return Vec4V(f[0],f[1],f[2],0.f);
}
NV_FORCE_INLINE BoolV BLoad(const bool* const f)
{
return BoolV(-(int32_t)f[0],-(int32_t)f[1],-(int32_t)f[2],-(int32_t)f[3]);
}
NV_FORCE_INLINE float FStore(const FloatV a)
{
return a.x;
}
NV_FORCE_INLINE void FStore(const FloatV a, float* NV_RESTRICT f)
{
*f = a.x;
}
NV_FORCE_INLINE void V3StoreA(const Vec3V a, NvVec3& f)
{
f=NvVec3(a.x,a.y,a.z);
}
NV_FORCE_INLINE void V3StoreU(const Vec3V a, NvVec3& f)
{
f=NvVec3(a.x,a.y,a.z);
}
//////////////////////////
//FLOATV
//////////////////////////
NV_FORCE_INLINE FloatV FZero()
{
return FLoad(0.0f);
}
NV_FORCE_INLINE FloatV FOne()
{
return FLoad(1.0f);
}
NV_FORCE_INLINE FloatV FHalf()
{
return FLoad(0.5f);
}
NV_FORCE_INLINE FloatV FEps()
{
return FLoad(NV_EPS_REAL);
}
NV_FORCE_INLINE FloatV FEps6()
{
return FLoad(1e-6f);
}
NV_FORCE_INLINE FloatV FMax()
{
return FLoad(NV_MAX_REAL);
}
NV_FORCE_INLINE FloatV FNegMax()
{
return FLoad(-NV_MAX_REAL);
}
NV_FORCE_INLINE FloatV FNeg(const FloatV f)
{
return FloatV(-f.x);
}
NV_FORCE_INLINE FloatV FAdd(const FloatV a, const FloatV b)
{
return FloatV(a.x+b.x);
}
NV_FORCE_INLINE FloatV FSub(const FloatV a, const FloatV b)
{
return FloatV(a.x-b.x);
}
NV_FORCE_INLINE FloatV FMul(const FloatV a, const FloatV b)
{
return FloatV(a.x*b.x);
}
NV_FORCE_INLINE FloatV FDiv(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(!internalScalarSimd::hasZeroElementInFloatV(b));
return FloatV(a.x/b.x);
}
NV_FORCE_INLINE FloatV FDivFast(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(!internalScalarSimd::hasZeroElementInFloatV(b));
return FloatV(a.x/b.x);
}
NV_FORCE_INLINE FloatV FRecip(const FloatV a)
{
VECMATHAOS_ASSERT(!internalScalarSimd::hasZeroElementInFloatV(a));
return (1.0f/a.x);
}
NV_FORCE_INLINE FloatV FRecipFast(const FloatV a)
{
VECMATHAOS_ASSERT(!internalScalarSimd::hasZeroElementInFloatV(a));
return (1.0f/a.x);
}
NV_FORCE_INLINE FloatV FRsqrt(const FloatV a)
{
VECMATHAOS_ASSERT(!internalScalarSimd::hasZeroElementInFloatV(a));
return NvRecipSqrt(a.x);
}
NV_FORCE_INLINE FloatV FSqrt(const FloatV a)
{
VECMATHAOS_ASSERT(!internalScalarSimd::hasZeroElementInFloatV(a));
return NvSqrt(a.x);
}
NV_FORCE_INLINE FloatV FRsqrtFast(const FloatV a)
{
VECMATHAOS_ASSERT(!internalScalarSimd::hasZeroElementInFloatV(a));
return NvRecipSqrt(a.x);
}
NV_FORCE_INLINE FloatV FScaleAdd(const FloatV a, const FloatV b, const FloatV c)
{
return FAdd(FMul(a,b),c);
}
NV_FORCE_INLINE FloatV FNegScaleSub(const FloatV a, const FloatV b, const FloatV c)
{
return FSub(c,FMul(a,b));
}
NV_FORCE_INLINE FloatV FAbs(const FloatV a)
{
return FloatV(NvAbs(a.x));
}
NV_FORCE_INLINE FloatV FSel(const BoolV c, const FloatV a, const FloatV b)
{
return FloatV(c.ux ? a.x : b.x);
}
NV_FORCE_INLINE BoolV FIsGrtr(const FloatV a, const FloatV b)
{
return BLoad(a.x>b.x);
}
NV_FORCE_INLINE BoolV FIsGrtrOrEq(const FloatV a, const FloatV b)
{
return BLoad(a.x>=b.x);
}
NV_FORCE_INLINE BoolV FIsEq(const FloatV a, const FloatV b)
{
return BLoad(a.x==b.x);
}
NV_FORCE_INLINE FloatV FMax(const FloatV a, const FloatV b)
{
return (a.x>b.x ? FloatV(a.x) : FloatV(b.x));
}
NV_FORCE_INLINE FloatV FMin(const FloatV a, const FloatV b)
{
return (a.x>b.x ? FloatV(b.x) : FloatV(a.x));
}
NV_FORCE_INLINE FloatV FClamp(const FloatV a, const FloatV minV, const FloatV maxV)
{
return FMax(FMin(a,maxV),minV);
}
NV_FORCE_INLINE uint32_t FAllGrtr(const FloatV a, const FloatV b)
{
return (a.x > b.x);
}
NV_FORCE_INLINE uint32_t FAllGrtrOrEq(const FloatV a, const FloatV b)
{
return (a.x >= b.x);
}
NV_FORCE_INLINE uint32_t FAllEq(const FloatV a, const FloatV b)
{
return(a.x == b.x);
}
NV_FORCE_INLINE FloatV FRound(const FloatV a)
{
return floor(a.x + 0.5f);
}
NV_FORCE_INLINE FloatV FSin(const FloatV a)
{
return sinf(a.x);
}
NV_FORCE_INLINE FloatV FCos(const FloatV a)
{
return cosf(a.x);
}
NV_FORCE_INLINE uint32_t FOutOfBounds(const FloatV a, const FloatV min, const FloatV max)
{
return (a.x>max.x || a.x<min.x);
}
NV_FORCE_INLINE uint32_t FInBounds(const FloatV a, const FloatV min, const FloatV max)
{
return (a.x>=min.x && a.x<=max.x);
}
NV_FORCE_INLINE uint32_t FOutOfBounds(const FloatV a, const FloatV bounds)
{
return FOutOfBounds(a, FNeg(bounds), bounds);
}
NV_FORCE_INLINE uint32_t FInBounds(const FloatV a, const FloatV bounds)
{
return FInBounds(a, FNeg(bounds), bounds);
}
/////////////////////
//VEC3V
/////////////////////
NV_FORCE_INLINE Vec3V V3Splat(const FloatV f)
{
return Vec3V(f.x,f.x,f.x);
}
NV_FORCE_INLINE Vec3V V3Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z)
{
return Vec3V(x.x,y.x,z.x);
}
NV_FORCE_INLINE Vec3V V3UnitX()
{
return Vec3V(1.0f,0.0f,0.0f);
}
NV_FORCE_INLINE Vec3V V3UnitY()
{
return Vec3V(0.0f,1.0f,0.0f);
}
NV_FORCE_INLINE Vec3V V3UnitZ()
{
return Vec3V(0.0f,0.0f,1.0f);
}
NV_FORCE_INLINE FloatV V3GetX(const Vec3V f)
{
return FloatV(f.x);
}
NV_FORCE_INLINE FloatV V3GetY(const Vec3V f)
{
return FloatV(f.y);
}
NV_FORCE_INLINE FloatV V3GetZ(const Vec3V f)
{
return FloatV(f.z);
}
NV_FORCE_INLINE Vec3V V3SetX(const Vec3V v, const FloatV f)
{
return Vec3V(f.x,v.y,v.z);
}
NV_FORCE_INLINE Vec3V V3SetY(const Vec3V v, const FloatV f)
{
return Vec3V(v.x,f.x,v.z);
}
NV_FORCE_INLINE Vec3V V3SetZ(const Vec3V v, const FloatV f)
{
return Vec3V(v.x,v.y,f.x);
}
NV_FORCE_INLINE Vec3V V3ColX(const Vec3V a, const Vec3V b, const Vec3V c)
{
return Vec3V(a.x,b.x,c.x);
}
NV_FORCE_INLINE Vec3V V3ColY(const Vec3V a, const Vec3V b, const Vec3V c)
{
return Vec3V(a.y,b.y,c.y);
}
NV_FORCE_INLINE Vec3V V3ColZ(const Vec3V a, const Vec3V b, const Vec3V c)
{
return Vec3V(a.z,b.z,c.z);
}
NV_FORCE_INLINE Vec3V V3Zero()
{
return V3Load(0.0f);
}
NV_FORCE_INLINE Vec3V V3One()
{
return V3Load(1.0f);
}
NV_FORCE_INLINE Vec3V V3Eps()
{
return V3Load(NV_EPS_REAL);
}
NV_FORCE_INLINE Vec3V V3Neg(const Vec3V c)
{
return Vec3V(-c.x,-c.y,-c.z);
}
NV_FORCE_INLINE Vec3V V3Add(const Vec3V a, const Vec3V b)
{
return Vec3V(a.x+b.x,a.y+b.y,a.z+b.z);
}
NV_FORCE_INLINE Vec3V V3Sub(const Vec3V a, const Vec3V b)
{
return Vec3V(a.x-b.x,a.y-b.y,a.z-b.z);
}
NV_FORCE_INLINE Vec3V V3Scale(const Vec3V a, const FloatV b)
{
return Vec3V(a.x*b.x,a.y*b.x,a.z*b.x);
}
NV_FORCE_INLINE Vec3V V3Mul(const Vec3V a, const Vec3V b)
{
return Vec3V(a.x*b.x,a.y*b.y,a.z*b.z);
}
NV_FORCE_INLINE Vec3V V3ScaleInv(const Vec3V a, const FloatV b)
{
const float bInv=1.0f/b.x;
return Vec3V(a.x*bInv,a.y*bInv,a.z*bInv);
}
NV_FORCE_INLINE Vec3V V3Div(const Vec3V a, const Vec3V b)
{
return Vec3V(a.x/b.x,a.y/b.y,a.z/b.z);
}
NV_FORCE_INLINE Vec3V V3ScaleInvFast(const Vec3V a, const FloatV b)
{
const float bInv=1.0f/b.x;
return Vec3V(a.x*bInv,a.y*bInv,a.z*bInv);
}
NV_FORCE_INLINE Vec3V V3DivFast(const Vec3V a, const Vec3V b)
{
return Vec3V(a.x/b.x,a.y/b.y,a.z/b.z);
}
NV_FORCE_INLINE Vec3V V3Recip(const Vec3V a)
{
return Vec3V(1.0f/a.x,1.0f/a.y,1.0f/a.z);
}
NV_FORCE_INLINE Vec3V V3RecipFast(const Vec3V a)
{
return Vec3V(1.0f/a.x,1.0f/a.y,1.0f/a.z);
}
NV_FORCE_INLINE Vec3V V3Rsqrt(const Vec3V a)
{
return Vec3V(NvRecipSqrt(a.x),NvRecipSqrt(a.y),NvRecipSqrt(a.z));
}
NV_FORCE_INLINE Vec3V V3RsqrtFast(const Vec3V a)
{
return Vec3V(NvRecipSqrt(a.x),NvRecipSqrt(a.y),NvRecipSqrt(a.z));
}
NV_FORCE_INLINE Vec3V V3ScaleAdd(const Vec3V a, const FloatV b, const Vec3V c)
{
return V3Add(V3Scale(a,b),c);
}
NV_FORCE_INLINE Vec3V V3NegScaleSub(const Vec3V a, const FloatV b, const Vec3V c)
{
return V3Sub(c,V3Scale(a,b));
}
NV_FORCE_INLINE Vec3V V3MulAdd(const Vec3V a, const Vec3V b, const Vec3V c)
{
return V3Add(V3Mul(a,b),c);
}
NV_FORCE_INLINE Vec3V V3NegMulSub(const Vec3V a, const Vec3V b, const Vec3V c)
{
return V3Sub(c,V3Mul(a,b));
}
NV_FORCE_INLINE FloatV V3Dot(const Vec3V a, const Vec3V b)
{
return FloatV(a.x*b.x+a.y*b.y+a.z*b.z);
}
NV_FORCE_INLINE VecCrossV V3PrepareCross(const Vec3VArg normal)
{
return normal;
}
NV_FORCE_INLINE Vec3V V3Cross(const Vec3V a, const Vec3V b)
{
return Vec3V
(
a.y*b.z-a.z*b.y,
a.z*b.x-a.x*b.z,
a.x*b.y-a.y*b.x
);
}
NV_FORCE_INLINE FloatV V3Length(const Vec3V a)
{
return FloatV(NvSqrt(a.x*a.x + a.y*a.y + a.z*a.z));
}
NV_FORCE_INLINE FloatV V3LengthSq(const Vec3V a)
{
return FloatV(a.x*a.x + a.y*a.y + a.z*a.z);
}
NV_FORCE_INLINE Vec3V V3Normalize(const Vec3V a)
{
VECMATHAOS_ASSERT(a.x!=0 || a.y!=0 || a.z!=0);
const float lengthInv=1.0f/(NvSqrt(a.x*a.x + a.y*a.y + a.z*a.z));
return Vec3V(a.x*lengthInv,a.y*lengthInv,a.z*lengthInv);
}
NV_FORCE_INLINE Vec3V V3NormalizeSafe(const Vec3V a)
{
const float length=NvSqrt(a.x*a.x + a.y*a.y + a.z*a.z);
if(NV_EPS_REAL >= length)
{
return Vec3V(0.0f,0.0f,0.0f);
}
else
{
const float lengthInv=1.0f/length;
return Vec3V(a.x*lengthInv,a.y*lengthInv,a.z*lengthInv);
}
}
NV_FORCE_INLINE Vec3V V3NormalizeFast(const Vec3V a)
{
VECMATHAOS_ASSERT(a.x!=0 || a.y!=0 || a.z!=0);
const float lengthInv=1.0f/(NvSqrt(a.x*a.x + a.y*a.y + a.z*a.z));
return Vec3V(a.x*lengthInv,a.y*lengthInv,a.z*lengthInv);
}
NV_FORCE_INLINE Vec3V V3Sel(const BoolV c, const Vec3V a, const Vec3V b)
{
return Vec3V(c.ux ? a.x : b.x, c.uy ? a.y : b.y, c.uz ? a.z : b.z);
}
NV_FORCE_INLINE BoolV V3IsGrtr(const Vec3V a, const Vec3V b)
{
return BoolV(a.x>b.x ? -1 : 0, a.y>b.y ? -1 : 0, a.z>b.z ? -1 : 0, 0);
}
NV_FORCE_INLINE BoolV V3IsGrtrOrEq(const Vec3V a, const Vec3V b)
{
return BoolV(a.x>=b.x ? (uint32_t)-1 : 0, a.y>=b.y ? (uint32_t)-1 : 0, a.z>=b.z ? (uint32_t)-1 : 0, (uint32_t)-1);
}
NV_FORCE_INLINE BoolV V3IsEq(const Vec3V a, const Vec3V b)
{
return BoolV(a.x==b.x ? (uint32_t)-1 : 0, a.y==b.y ? (uint32_t)-1 : 0, a.z==b.z ? (uint32_t)-1 : 0, (uint32_t)-1);
}
NV_FORCE_INLINE Vec3V V3Max(const Vec3V a, const Vec3V b)
{
return Vec3V(a.x>b.x ? a.x : b.x, a.y>b.y ? a.y : b.y, a.z>b.z ? a.z : b.z);
}
NV_FORCE_INLINE Vec3V V3Min(const Vec3V a, const Vec3V b)
{
return Vec3V(a.x<b.x ? a.x : b.x, a.y<b.y ? a.y : b.y, a.z<b.z ? a.z : b.z);
}
//Extract the maximum value from a
NV_FORCE_INLINE FloatV V3ExtractMax(const Vec3V a)
{
const float t0 = (a.x >= a.y) ? a.x : a.y;
return t0 >= a.z ? t0 : a.z;
}
//Extract the maximum value from a
NV_FORCE_INLINE FloatV V3ExtractMin(const Vec3V a)
{
const float t0 = (a.x <= a.y) ? a.x : a.y;
return t0 <= a.z ? t0 : a.z;
}
//return (a >= 0.0f) ? 1.0f : -1.0f;
NV_FORCE_INLINE Vec3V V3Sign(const Vec3V a)
{
return Vec3V((a.x >= 0.f ? 1.f : -1.f), (a.y >= 0.f ? 1.f : -1.f), (a.z >= 0.f ? 1.f : -1.f));
}
NV_FORCE_INLINE Vec3V V3Clamp(const Vec3V a, const Vec3V minV, const Vec3V maxV)
{
return V3Max(V3Min(a,maxV),minV);
}
NV_FORCE_INLINE Vec3V V3Abs(const Vec3V a)
{
return V3Max(a,V3Neg(a));
}
NV_FORCE_INLINE uint32_t V3AllGrtr(const Vec3V a, const Vec3V b)
{
return ((a.x > b.x) & (a.y > b.y) & (a.z > b.z)) ? 1 : 0;
}
NV_FORCE_INLINE uint32_t V3AllGrtrOrEq(const Vec3V a, const Vec3V b)
{
return ((a.x >= b.x) & (a.y >= b.y) & (a.z >= b.z)) ? 1 : 0;
}
NV_FORCE_INLINE uint32_t V3AllEq(const Vec3V a, const Vec3V b)
{
return ((a.x == b.x) & (a.y == b.y) & (a.z == b.z)) ? 1 : 0;
}
NV_FORCE_INLINE Vec3V V3Round(const Vec3V a)
{
return Vec3V(floor(a.x + 0.5f), floor(a.y + 0.5f), floor(a.z + 0.5f));
}
NV_FORCE_INLINE Vec3V V3Sin(const Vec3V a)
{
return Vec3V(sinf(a.x), sinf(a.y), sinf(a.z));
}
NV_FORCE_INLINE Vec3V V3Cos(const Vec3V a)
{
return Vec3V(cosf(a.x), cosf(a.y), cosf(a.z));
}
NV_FORCE_INLINE Vec3V V3PermYZZ(const Vec3V a)
{
return Vec3V(a.y,a.z,a.z);
}
NV_FORCE_INLINE Vec3V V3PermXYX(const Vec3V a)
{
return Vec3V(a.x,a.y,a.x);
}
NV_FORCE_INLINE Vec3V V3PermYZX(const Vec3V a)
{
return Vec3V(a.y,a.z,a.x);
}
NV_FORCE_INLINE Vec3V V3PermZXY(const Vec3V a)
{
return Vec3V(a.z,a.x,a.y);
}
NV_FORCE_INLINE Vec3V V3PermZZY(const Vec3V a)
{
return Vec3V(a.z,a.z,a.y);
}
NV_FORCE_INLINE Vec3V V3PermYXX(const Vec3V a)
{
return Vec3V(a.y,a.x,a.x);
}
NV_FORCE_INLINE Vec3V V3Perm_Zero_1Z_0Y(const Vec3V v0, const Vec3V v1)
{
return Vec3V(0.0f, v1.z, v0.y);
}
NV_FORCE_INLINE Vec3V V3Perm_0Z_Zero_1X(const Vec3V v0, const Vec3V v1)
{
return Vec3V(v0.z, 0.0f, v1.x);
}
NV_FORCE_INLINE Vec3V V3Perm_1Y_0X_Zero(const Vec3V v0, const Vec3V v1)
{
return Vec3V(v1.y, v0.x, 0.0f);
}
NV_FORCE_INLINE FloatV V3SumElems(const Vec3V a)
{
return FloatV(a.x + a.y + a.z);
}
NV_FORCE_INLINE uint32_t V3OutOfBounds(const Vec3V a, const Vec3V min, const Vec3V max)
{
return (a.x>max.x || a.y>max.y || a.z>max.z ||
a.x<min.x || a.y<min.y || a.z<min.z);
}
NV_FORCE_INLINE uint32_t V3InBounds(const Vec3V a, const Vec3V min, const Vec3V max)
{
return (a.x<=max.x && a.y<=max.y && a.z<=max.z &&
a.x>=min.x && a.y>=min.y && a.z>=min.z);
}
NV_FORCE_INLINE uint32_t V3OutOfBounds(const Vec3V a, const Vec3V bounds)
{
return V3OutOfBounds(a, V3Neg(bounds), bounds);
}
NV_FORCE_INLINE uint32_t V3InBounds(const Vec3V a, const Vec3V bounds)
{
return V3InBounds(a, V3Neg(bounds), bounds);
}
/////////////////////////
//VEC4V
/////////////////////////
NV_FORCE_INLINE Vec4V V4Splat(const FloatV f)
{
return Vec4V(f.x,f.x,f.x,f.x);
}
NV_FORCE_INLINE Vec4V V4Merge(const FloatV* const floatVArray)
{
return Vec4V(floatVArray[0].x,floatVArray[1].x,floatVArray[2].x,floatVArray[3].x);
}
NV_FORCE_INLINE Vec4V V4Merge(const FloatVArg x,const FloatVArg y, const FloatVArg z, const FloatVArg w)
{
return Vec4V(x.x,y.x,z.x,w.x);
}
NV_FORCE_INLINE Vec4V V4MergeW(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w)
{
return Vec4V(x.w, y.w, z.w, w.w);
}
NV_FORCE_INLINE Vec4V V4MergeZ(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w)
{
return Vec4V(x.z, y.z, z.z, w.z);
}
NV_FORCE_INLINE Vec4V V4MergeY(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w)
{
return Vec4V(x.y, y.y, z.y, w.y);
}
NV_FORCE_INLINE Vec4V V4MergeX(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w)
{
return Vec4V(x.x, y.x, z.x, w.x);
}
NV_FORCE_INLINE Vec4V V4UnpackXY(const Vec4VArg a, const Vec4VArg b)
{
return Vec4V(a.x, b.x, a.y, b.y);
}
NV_FORCE_INLINE Vec4V V4UnpackZW(const Vec4VArg a, const Vec4VArg b)
{
return Vec4V(a.z, b.z, a.w, b.w);
}
NV_FORCE_INLINE Vec4V V4UnitX()
{
return Vec4V(1.0f,0.0f,0.0f,0.0f);
}
NV_FORCE_INLINE Vec4V V4UnitY()
{
return Vec4V(0.0f,1.0f,0.0f,0.0f);
}
NV_FORCE_INLINE Vec4V V4UnitZ()
{
return Vec4V(0.0f,0.0f,1.0f,0.0f);
}
NV_FORCE_INLINE Vec4V V4UnitW()
{
return Vec4V(0.0f,0.0f,0.0f,1.0f);
}
NV_FORCE_INLINE FloatV V4GetX(const Vec4V f)
{
return FloatV(f.x);
}
NV_FORCE_INLINE FloatV V4GetY(const Vec4V f)
{
return FloatV(f.y);
}
NV_FORCE_INLINE FloatV V4GetZ(const Vec4V f)
{
return FloatV(f.z);
}
NV_FORCE_INLINE FloatV V4GetW(const Vec4V f)
{
return FloatV(f.w);
}
NV_FORCE_INLINE Vec4V V4SetX(const Vec4V v, const FloatV f)
{
return Vec4V(f.x,v.y,v.z,v.w);
}
NV_FORCE_INLINE Vec4V V4SetY(const Vec4V v, const FloatV f)
{
return Vec4V(v.x,f.x,v.z,v.w);
}
NV_FORCE_INLINE Vec4V V4SetZ(const Vec4V v, const FloatV f)
{
return Vec4V(v.x,v.y,f.x,v.w);
}
NV_FORCE_INLINE Vec4V V4SetW(const Vec4V v, const FloatV f)
{
return Vec4V(v.x,v.y,v.z,f.x);
}
NV_FORCE_INLINE Vec4V V4SetW(const Vec3V v, const FloatV f)
{
return Vec4V(v.x,v.y,v.z,f.x);
}
NV_FORCE_INLINE Vec4V V4ClearW(const Vec4V v)
{
return Vec4V(v.x,v.y,v.z,0);
}
NV_FORCE_INLINE Vec4V V4Perm_YXWZ(const Vec4V v)
{
return Vec4V(v.y, v.x, v.w, v.z);
}
NV_FORCE_INLINE Vec4V V4Perm_XZXZ(const Vec4V v)
{
return Vec4V(v.x, v.z, v.x, v.z);
}
NV_FORCE_INLINE Vec4V V4Perm_YWYW(const Vec4V v)
{
return Vec4V(v.y, v.w, v.y, v.w);
}
template<uint8_t _x, uint8_t _y, uint8_t _z, uint8_t _w> NV_FORCE_INLINE Vec4V V4Perm(const Vec4V v)
{
const float f[4] = {v.x,v.y,v.z,v.w};
return Vec4V(f[_x], f[_y], f[_z], f[_w]);
}
NV_FORCE_INLINE Vec4V V4Zero()
{
return V4Load(0.0f);
}
NV_FORCE_INLINE Vec4V V4One()
{
return V4Load(1.0f);
}
NV_FORCE_INLINE Vec4V V4Eps()
{
return V4Load(NV_EPS_REAL);
}
NV_FORCE_INLINE Vec4V V4Neg(const Vec4V c)
{
return Vec4V(-c.x,-c.y,-c.z,-c.w);
}
NV_FORCE_INLINE Vec4V V4Add(const Vec4V a, const Vec4V b)
{
return Vec4V(a.x+b.x,a.y+b.y,a.z+b.z,a.w+b.w);
}
NV_FORCE_INLINE Vec4V V4Sub(const Vec4V a, const Vec4V b)
{
return Vec4V(a.x-b.x,a.y-b.y,a.z-b.z,a.w-b.w);
}
NV_FORCE_INLINE Vec4V V4Scale(const Vec4V a, const FloatV b)
{
return Vec4V(a.x*b.x,a.y*b.x,a.z*b.x,a.w*b.x);
}
NV_FORCE_INLINE Vec4V V4Mul(const Vec4V a, const Vec4V b)
{
return Vec4V(a.x*b.x,a.y*b.y,a.z*b.z,a.w*b.w);
}
NV_FORCE_INLINE Vec4V V4ScaleInv(const Vec4V a, const FloatV b)
{
const float bInv=1.0f/b.x;
return Vec4V(a.x*bInv,a.y*bInv,a.z*bInv,a.w*bInv);
}
NV_FORCE_INLINE Vec4V V4Div(const Vec4V a, const Vec4V b)
{
VECMATHAOS_ASSERT(b.x!=0 && b.y!=0 && b.z!=0 && b.w!=0);
return Vec4V(a.x/b.x,a.y/b.y,a.z/b.z,a.w/b.w);
}
NV_FORCE_INLINE Vec4V V4ScaleInvFast(const Vec4V a, const FloatV b)
{
const float bInv=1.0f/b.x;
return Vec4V(a.x*bInv,a.y*bInv,a.z*bInv,a.w*bInv);
}
NV_FORCE_INLINE Vec4V V4DivFast(const Vec4V a, const Vec4V b)
{
return Vec4V(a.x/b.x,a.y/b.y,a.z/b.z,a.w/b.w);
}
NV_FORCE_INLINE Vec4V V4Recip(const Vec4V a)
{
return Vec4V(1.0f/a.x,1.0f/a.y,1.0f/a.z,1.0f/a.w);
}
NV_FORCE_INLINE Vec4V V4RecipFast(const Vec4V a)
{
return Vec4V(1.0f/a.x,1.0f/a.y,1.0f/a.z,1.0f/a.w);
}
NV_FORCE_INLINE Vec4V V4Rsqrt(const Vec4V a)
{
return Vec4V(NvRecipSqrt(a.x),NvRecipSqrt(a.y),NvRecipSqrt(a.z),NvRecipSqrt(a.w));
}
NV_FORCE_INLINE Vec4V V4RsqrtFast(const Vec4V a)
{
return Vec4V(NvRecipSqrt(a.x),NvRecipSqrt(a.y),NvRecipSqrt(a.z),NvRecipSqrt(a.w));
}
NV_FORCE_INLINE Vec4V V4Sqrt(const Vec4V a)
{
return Vec4V(NvSqrt(a.x),NvSqrt(a.y),NvSqrt(a.z),NvSqrt(a.w));
}
NV_FORCE_INLINE Vec4V V4ScaleAdd(const Vec4V a, const FloatV b, const Vec4V c)
{
return V4Add(V4Scale(a,b),c);
}
NV_FORCE_INLINE Vec4V V4NegScaleSub(const Vec4V a, const FloatV b, const Vec4V c)
{
return V4Sub(c,V4Scale(a,b));
}
NV_FORCE_INLINE Vec4V V4MulAdd(const Vec4V a, const Vec4V b, const Vec4V c)
{
return V4Add(V4Mul(a,b),c);
}
NV_FORCE_INLINE Vec4V V4NegMulSub(const Vec4V a, const Vec4V b, const Vec4V c)
{
return V4Sub(c,V4Mul(a,b));
}
NV_FORCE_INLINE FloatV V4SumElements(const Vec4V a)
{
return FloatV(a.x + a.y + a.z + a.w);
}
NV_FORCE_INLINE FloatV V4Dot(const Vec4V a, const Vec4V b)
{
return FloatV(a.x*b.x+a.y*b.y+a.z*b.z+a.w*b.w);
}
NV_FORCE_INLINE FloatV V4Length(const Vec4V a)
{
return FloatV(NvSqrt(a.x*a.x + a.y*a.y +a.z*a.z + a.w*a.w));
}
NV_FORCE_INLINE FloatV V4LengthSq(const Vec4V a)
{
return V4Dot(a,a);
}
NV_FORCE_INLINE Vec4V V4Normalize(const Vec4V a)
{
VECMATHAOS_ASSERT(0!=a.x || 0!=a.y || 0!=a.z || 0!=a.w);
const FloatV length=FloatV(V4Length(a));
return V4ScaleInv(a,length);
}
NV_FORCE_INLINE Vec4V V4NormalizeSafe(const Vec4V a)
{
const FloatV length=FloatV(V4Length(a));
if(NV_EPS_REAL>=length.x)
{
return Vec4V(0.0f,0.0f,0.0f,0.0f);
}
else
{
return V4ScaleInv(a,length);
}
}
NV_FORCE_INLINE Vec4V V4NormalizeFast(const Vec4V a)
{
VECMATHAOS_ASSERT(0!=a.x || 0!=a.y || 0!=a.z || 0!=a.w);
const FloatV length=FloatV(V4Length(a));
return V4ScaleInv(a,length);
}
NV_FORCE_INLINE Vec4V V4Sel(const BoolV c, const Vec4V a, const Vec4V b)
{
return Vec4V(c.ux ? a.x : b.x, c.uy ? a.y : b.y, c.uz ? a.z : b.z, c.uw ? a.w : b.w);
}
NV_FORCE_INLINE BoolV V4IsGrtr(const Vec4V a, const Vec4V b)
{
return BoolV(a.x>b.x ? -1 : 0, a.y>b.y ? -1 : 0, a.z>b.z ? -1 : 0, a.w>b.w ? -1 : 0);
};
NV_FORCE_INLINE BoolV V4IsGrtrOrEq(const Vec4V a, const Vec4V b)
{
return BoolV(a.x>=b.x ? -1 : 0, a.y>=b.y ? -1 : 0, a.z>=b.z ? -1 : 0, a.w>=b.w ? -1 : 0);
}
NV_FORCE_INLINE BoolV V4IsEq(const Vec4V a, const Vec4V b)
{
return BoolV(a.x==b.x ? -1 : 0, a.y==b.y ? -1 : 0, a.z==b.z ? -1 : 0, a.w==b.w ? -1 : 0);
}
NV_FORCE_INLINE Vec4V V4Max(const Vec4V a, const Vec4V b)
{
return Vec4V(a.x>b.x ? a.x : b.x, a.y>b.y ? a.y : b.y, a.z>b.z ? a.z : b.z, a.w>b.w ? a.w : b.w);
}
NV_FORCE_INLINE Vec4V V4Min(const Vec4V a, const Vec4V b)
{
return Vec4V(a.x<b.x ? a.x : b.x, a.y<b.y ? a.y : b.y, a.z<b.z ? a.z : b.z, a.w<b.w ? a.w : b.w);
}
//Extract the maximum value from a
NV_FORCE_INLINE FloatV V4ExtractMax(const Vec4V a)
{
const float t0 = (a.x >= a.y) ? a.x : a.y;
const float t1 = (a.z >= a.w) ? a.x : a.w;
return t0 >= t1 ? t0 : t1;
}
//Extract the maximum value from a
NV_FORCE_INLINE FloatV V4ExtractMin(const Vec4V a)
{
const float t0 = (a.x <= a.y) ? a.x : a.y;
const float t1 = (a.z <= a.w) ? a.x : a.w;
return t0 <= t1 ? t0 : t1;
}
NV_FORCE_INLINE Vec4V V4Clamp(const Vec4V a, const Vec4V minV, const Vec4V maxV)
{
return V4Max(V4Min(a,maxV),minV);
}
NV_FORCE_INLINE Vec4V V4Round(const Vec4V a)
{
return Vec4V(floor(a.x + 0.5f), floor(a.y + 0.5f), floor(a.z + 0.5f), floor(a.w + 0.5f));
}
NV_FORCE_INLINE Vec4V V4Sin(const Vec4V a)
{
return Vec4V(sinf(a.x), sinf(a.y), sinf(a.z), sinf(a.w));
}
NV_FORCE_INLINE Vec4V V4Cos(const Vec4V a)
{
return Vec4V(cosf(a.x), cosf(a.y), cosf(a.z), cosf(a.w));
}
NV_FORCE_INLINE uint32_t V4AllGrtr(const Vec4V a, const Vec4V b)
{
return ((a.x > b.x) & (a.y > b.y) & (a.z > b.z) & (a.w > b.w)) ? 1 : 0;
}
NV_FORCE_INLINE uint32_t V4AllGrtrOrEq(const Vec4V a, const Vec4V b)
{
return ((a.x >= b.x) & (a.y >= b.y) & (a.z >= b.z) & (a.w >= b.w)) ? 1 : 0;
}
NV_FORCE_INLINE uint32_t V4AllEq(const Vec4V a, const Vec4V b)
{
return ((a.x == b.x) & (a.y == b.y) & (a.z == b.z) & (a.w == b.w)) ? 1 : 0;
}
NV_FORCE_INLINE void V4Transpose(Vec4V& col0, Vec4V& col1, Vec4V& col2, Vec4V& col3)
{
const float t01 = col0.y, t02 = col0.z, t03 = col0.w;
const float t12 = col1.z, t13 = col1.w;
const float t23 = col2.w;
col0.y = col1.x;
col0.z = col2.x;
col0.w = col3.x;
col1.z = col2.y;
col1.w = col3.y;
col2.w = col3.z;
col1.x = t01;
col2.x = t02;
col3.x = t03;
col2.y = t12;
col3.y = t13;
col3.z = t23;
}
NV_FORCE_INLINE BoolV BFFFF()
{
return BoolV(0, 0, 0, 0);
}
NV_FORCE_INLINE BoolV BFFFT()
{
return BoolV(0, 0, 0, (uint32_t)-1);
}
NV_FORCE_INLINE BoolV BFFTF()
{
return BoolV(0, 0, (uint32_t)-1, 0);
}
NV_FORCE_INLINE BoolV BFFTT()
{
return BoolV(0, 0, (uint32_t)-1, (uint32_t)-1);
}
NV_FORCE_INLINE BoolV BFTFF()
{
return BoolV(0, (uint32_t)-1, 0, 0);
}
NV_FORCE_INLINE BoolV BFTFT()
{
return BoolV(0, (uint32_t)-1, 0, (uint32_t)-1);
}
NV_FORCE_INLINE BoolV BFTTF()
{
return BoolV(0, (uint32_t)-1, (uint32_t)-1, 0);
}
NV_FORCE_INLINE BoolV BFTTT()
{
return BoolV(0, (uint32_t)-1, (uint32_t)-1, (uint32_t)-1);
}
NV_FORCE_INLINE BoolV BTFFF()
{
return BoolV((uint32_t)-1, 0, 0, 0);
}
NV_FORCE_INLINE BoolV BTFFT()
{
return BoolV((uint32_t)-1, 0, 0, (uint32_t)-1);
}
NV_FORCE_INLINE BoolV BTFTF()
{
return BoolV ((uint32_t)-1, 0, (uint32_t)-1, 0);
}
NV_FORCE_INLINE BoolV BTFTT()
{
return BoolV((uint32_t)-1, 0, (uint32_t)-1, (uint32_t)-1);
}
NV_FORCE_INLINE BoolV BTTFF()
{
return BoolV((uint32_t)-1, (uint32_t)-1, 0, 0);
}
NV_FORCE_INLINE BoolV BTTFT()
{
return BoolV((uint32_t)-1, (uint32_t)-1, 0, (uint32_t)-1);
}
NV_FORCE_INLINE BoolV BTTTF()
{
return BoolV((uint32_t)-1, (uint32_t)-1, (uint32_t)-1, 0);
}
NV_FORCE_INLINE BoolV BTTTT()
{
return BoolV((uint32_t)-1, (uint32_t)-1, (uint32_t)-1, (uint32_t)-1);
}
NV_FORCE_INLINE BoolV BXMask() {return BTFFF();}
NV_FORCE_INLINE BoolV BYMask() {return BFTFF();}
NV_FORCE_INLINE BoolV BZMask() {return BFFTF();}
NV_FORCE_INLINE BoolV BWMask() {return BFFFT();}
NV_FORCE_INLINE BoolV BGetX(const BoolV a)
{
return BoolV(a.ux, a.ux, a.ux, a.ux);
}
NV_FORCE_INLINE BoolV BGetY(const BoolV a)
{
return BoolV(a.uy, a.uy, a.uy, a.uy);
}
NV_FORCE_INLINE BoolV BGetZ(const BoolV a)
{
return BoolV(a.uz, a.uz, a.uz, a.uz);
}
NV_FORCE_INLINE BoolV BGetW(const BoolV a)
{
return BoolV(a.uw, a.uw, a.uw, a.uw);
}
NV_FORCE_INLINE BoolV BSetX(const BoolV v, const BoolV f)
{
return BoolV(f.ux,v.uy,v.uz,v.uw);
}
NV_FORCE_INLINE BoolV BSetY(const BoolV v, const BoolV f)
{
return BoolV(v.ux, f.uy, v.uz, v.uw);
}
NV_FORCE_INLINE BoolV BSetZ(const BoolV v, const BoolV f)
{
return BoolV(v.ux, v.uy, f.uz, v.uw);
}
NV_FORCE_INLINE BoolV BSetW(const BoolV v, const BoolV f)
{
return BoolV(v.ux, v.uy, v.uz, f.uw);
}
template<int index> BoolV BSplatElement(BoolV a)
{
uint32_t* b=(uint32_t*)&a;
return BoolV(b[index], b[index], b[index], b[index]);
}
NV_FORCE_INLINE BoolV BAnd(const BoolV a, const BoolV b)
{
return BoolV(a.ux && b.ux ? (uint32_t)-1 : 0, a.uy && b.uy ? (uint32_t)-1 : 0, a.uz && b.uz ? (uint32_t)-1 : 0, a.uw && b.uw ? (uint32_t)-1 : 0);
}
NV_FORCE_INLINE BoolV BAndNot(const BoolV a, const BoolV b)
{
return BoolV(a.ux & ~b.ux, a.uy & ~b.uy, a.uz & ~b.uz, a.uw & ~b.uw);
}
NV_FORCE_INLINE BoolV BNot(const BoolV a)
{
return BoolV(~a.ux, ~a.uy, ~a.uz, ~a.uw);
}
NV_FORCE_INLINE BoolV BOr(const BoolV a, const BoolV b)
{
return BoolV(a.ux || b.ux ? (uint32_t)-1 : 0, a.uy || b.uy ? (uint32_t)-1 : 0, a.uz || b.uz ? (uint32_t)-1 : 0, a.uw || b.uw ? (uint32_t)-1 : 0);
}
NV_FORCE_INLINE uint32_t BAllEq(const BoolV a, const BoolV b)
{
return (a.ux==b.ux && a.uy==b.uy && a.uz==b.uz && a.uw==b.uw ? 1 : 0);
}
NV_FORCE_INLINE BoolV BAllTrue4(const BoolV a)
{
return (a.ux & a.uy & a.uz & a.uw) ? BTTTT() : BFFFF();
}
NV_FORCE_INLINE BoolV BAnyTrue4(const BoolV a)
{
return (a.ux | a.uy | a.uz | a.uw) ? BTTTT() : BFFFF();
}
NV_FORCE_INLINE BoolV BAllTrue3(const BoolV a)
{
return (a.ux & a.uy & a.uz) ? BTTTT() : BFFFF();
}
NV_FORCE_INLINE BoolV BAnyTrue3(const BoolV a)
{
return (a.ux | a.uy | a.uz) ? BTTTT() : BFFFF();
}
NV_FORCE_INLINE uint32_t BAllEqTTTT(const BoolV a)
{
return BAllEq(a, BTTTT());
}
NV_FORCE_INLINE uint32_t BAllEqFFFF(const BoolV a)
{
return BAllEq(a, BFFFF());
}
NV_FORCE_INLINE uint32_t BGetBitMask(const BoolV a)
{
return (a.ux & 1) | (a.uy & 2) | (a.uz & 4) | (a.uw & 8);
}
//////////////////////////////////
//MAT33V
//////////////////////////////////
NV_FORCE_INLINE Vec3V M33MulV3(const Mat33V& a, const Vec3V b)
{
return Vec3V
(
a.col0.x*b.x + a.col1.x*b.y + a.col2.x*b.z,
a.col0.y*b.x + a.col1.y*b.y + a.col2.y*b.z,
a.col0.z*b.x + a.col1.z*b.y + a.col2.z*b.z
);
}
NV_FORCE_INLINE Vec3V M33TrnspsMulV3(const Mat33V& a, const Vec3V b)
{
return Vec3V
(
a.col0.x*b.x + a.col0.y*b.y + a.col0.z*b.z,
a.col1.x*b.x + a.col1.y*b.y + a.col1.z*b.z,
a.col2.x*b.x + a.col2.y*b.y + a.col2.z*b.z
);
}
NV_FORCE_INLINE Vec3V M33MulV3AddV3(const Mat33V& A, const Vec3V b, const Vec3V c)
{
const FloatV x=V3GetX(b);
const FloatV y=V3GetY(b);
const FloatV z=V3GetZ(b);
Vec3V result = V3ScaleAdd(A.col0, x, c);
result = V3ScaleAdd(A.col1, y, result);
return V3ScaleAdd(A.col2, z, result);
}
NV_FORCE_INLINE Mat33V M33MulM33(const Mat33V& a, const Mat33V& b)
{
return Mat33V(M33MulV3(a,b.col0),M33MulV3(a,b.col1),M33MulV3(a,b.col2));
}
NV_FORCE_INLINE Mat33V M33Add(const Mat33V& a, const Mat33V& b)
{
return Mat33V(V3Add(a.col0,b.col0),V3Add(a.col1,b.col1),V3Add(a.col2,b.col2));
}
NV_FORCE_INLINE Mat33V M33Scale(const Mat33V& a, const FloatV& b)
{
return Mat33V(V3Scale(a.col0,b),V3Scale(a.col1,b),V3Scale(a.col2,b));
}
NV_FORCE_INLINE Mat33V M33Sub(const Mat33V& a, const Mat33V& b)
{
return Mat33V(V3Sub(a.col0,b.col0),V3Sub(a.col1,b.col1),V3Sub(a.col2,b.col2));
}
NV_FORCE_INLINE Mat33V M33Neg(const Mat33V& a)
{
return Mat33V(V3Neg(a.col0),V3Neg(a.col1),V3Neg(a.col2));
}
NV_FORCE_INLINE Mat33V M33Abs(const Mat33V& a)
{
return Mat33V(V3Abs(a.col0),V3Abs(a.col1),V3Abs(a.col2));
}
NV_FORCE_INLINE Mat33V M33Diagonal(const Vec3VArg d)
{
const Vec3V x = V3Mul(V3UnitX(), d);
const Vec3V y = V3Mul(V3UnitY(), d);
const Vec3V z = V3Mul(V3UnitZ(), d);
return Mat33V(x, y, z);
}
NV_FORCE_INLINE Mat33V M33Inverse(const Mat33V& a)
{
const float det = a.col0.x*(a.col1.y*a.col2.z - a.col1.z*a.col2.y)
-a.col1.x*(a.col0.y*a.col2.z - a.col2.y*a.col0.z)
+a.col2.x*(a.col0.y*a.col1.z - a.col1.y*a.col0.z);
const float invDet = 1.0f/det;
Mat33V ret;
ret.col0.x = invDet*(a.col1.y*a.col2.z - a.col2.y*a.col1.z);
ret.col0.y = invDet*(a.col2.y*a.col0.z - a.col0.y*a.col2.z);
ret.col0.z = invDet*(a.col0.y*a.col1.z - a.col1.y*a.col0.z);
ret.col1.x = invDet*(a.col2.x*a.col1.z - a.col1.x*a.col2.z);
ret.col1.y = invDet*(a.col0.x*a.col2.z - a.col2.x*a.col0.z);
ret.col1.z = invDet*(a.col1.x*a.col0.z - a.col0.x*a.col1.z);
ret.col2.x = invDet*(a.col1.x*a.col2.y - a.col2.x*a.col1.y);
ret.col2.y = invDet*(a.col2.x*a.col0.y - a.col0.x*a.col2.y);
ret.col2.z = invDet*(a.col0.x*a.col1.y - a.col1.x*a.col0.y);
return ret;
}
NV_FORCE_INLINE Mat33V Mat33V_From_NvMat33(const NvMat33 &m)
{
return Mat33V(V3LoadU(m.column0),
V3LoadU(m.column1),
V3LoadU(m.column2));
}
NV_FORCE_INLINE void NvMat33_From_Mat33V(const Mat33V &m, NvMat33 &out)
{
NV_ASSERT((size_t(&out)&15)==0);
V3StoreU(m.col0, out.column0);
V3StoreU(m.col1, out.column1);
V3StoreU(m.col2, out.column2);
}
NV_FORCE_INLINE Mat33V M33Trnsps(const Mat33V& a)
{
return Mat33V(Vec3V(a.col0.x,a.col1.x,a.col2.x),Vec3V(a.col0.y,a.col1.y,a.col2.y),Vec3V(a.col0.z,a.col1.z,a.col2.z));
}
NV_FORCE_INLINE Mat33V M33Identity()
{
return Mat33V
(
V3UnitX(),
V3UnitY(),
V3UnitZ()
);
}
//////////////////////////////////
//MAT34V
//////////////////////////////////
NV_FORCE_INLINE Vec3V M34MulV3(const Mat34V& a, const Vec3V b)
{
return Vec3V
(
a.col0.x*b.x + a.col1.x*b.y + a.col2.x*b.z + a.col3.x,
a.col0.y*b.x + a.col1.y*b.y + a.col2.y*b.z + a.col3.y,
a.col0.z*b.x + a.col1.z*b.y + a.col2.z*b.z + a.col3.z
);
}
NV_FORCE_INLINE Vec3V M34Mul33V3(const Mat34V& a, const Vec3V b)
{
return Vec3V
(
a.col0.x*b.x + a.col1.x*b.y + a.col2.x*b.z,
a.col0.y*b.x + a.col1.y*b.y + a.col2.y*b.z,
a.col0.z*b.x + a.col1.z*b.y + a.col2.z*b.z
);
}
NV_FORCE_INLINE Vec3V M34TrnspsMul33V3(const Mat34V& a, const Vec3V b)
{
return Vec3V
(
a.col0.x*b.x + a.col0.y*b.y + a.col0.z*b.z,
a.col1.x*b.x + a.col1.y*b.y + a.col1.z*b.z,
a.col2.x*b.x + a.col2.y*b.y + a.col2.z*b.z
);
}
NV_FORCE_INLINE Mat34V M34MulM34(const Mat34V& a, const Mat34V& b)
{
return Mat34V(M34Mul33V3(a,b.col0),M34Mul33V3(a,b.col1),M34Mul33V3(a,b.col2),M34MulV3(a,b.col3));
}
NV_FORCE_INLINE Mat33V M34MulM33(const Mat34V& a, const Mat33V& b)
{
return Mat33V(M34Mul33V3(a,b.col0),M34Mul33V3(a,b.col1),M34Mul33V3(a,b.col2));
}
NV_FORCE_INLINE Mat33V M34Mul33V3(const Mat34V& a, const Mat33V& b)
{
return Mat33V(M34Mul33V3(a,b.col0),M34Mul33V3(a,b.col1),M34Mul33V3(a,b.col2));
}
NV_FORCE_INLINE Mat33V M34Mul33MM34(const Mat34V& a, const Mat34V& b)
{
return Mat33V(M34Mul33V3(a,b.col0),M34Mul33V3(a,b.col1),M34Mul33V3(a,b.col2));
}
NV_FORCE_INLINE Mat34V M34Add(const Mat34V& a, const Mat34V& b)
{
return Mat34V(V3Add(a.col0,b.col0),V3Add(a.col1,b.col1),V3Add(a.col2,b.col2),V3Add(a.col3,b.col3));
}
NV_FORCE_INLINE Mat33V M34Trnsps33(const Mat34V& a)
{
return Mat33V(Vec3V(a.col0.x,a.col1.x,a.col2.x),Vec3V(a.col0.y,a.col1.y,a.col2.y),Vec3V(a.col0.z,a.col1.z,a.col2.z));
}
//////////////////////////////////
//MAT44V
//////////////////////////////////
NV_FORCE_INLINE Vec4V M44MulV4(const Mat44V& a, const Vec4V b)
{
return Vec4V
(
a.col0.x*b.x + a.col1.x*b.y + a.col2.x*b.z + a.col3.x*b.w,
a.col0.y*b.x + a.col1.y*b.y + a.col2.y*b.z + a.col3.y*b.w,
a.col0.z*b.x + a.col1.z*b.y + a.col2.z*b.z + a.col3.z*b.w,
a.col0.w*b.x + a.col1.w*b.y + a.col2.w*b.z + a.col3.w*b.w
);
}
NV_FORCE_INLINE Vec4V M44TrnspsMulV4(const Mat44V& a, const Vec4V b)
{
return Vec4V
(
a.col0.x*b.x + a.col0.y*b.y + a.col0.z*b.z + a.col0.w*b.w,
a.col1.x*b.x + a.col1.y*b.y + a.col1.z*b.z + a.col1.w*b.w,
a.col2.x*b.x + a.col2.y*b.y + a.col2.z*b.z + a.col2.w*b.w,
a.col3.x*b.x + a.col3.y*b.y + a.col3.z*b.z + a.col3.w*b.w
);
}
NV_FORCE_INLINE Mat44V M44MulM44(const Mat44V& a, const Mat44V& b)
{
return Mat44V(M44MulV4(a,b.col0),M44MulV4(a,b.col1),M44MulV4(a,b.col2),M44MulV4(a,b.col3));
}
NV_FORCE_INLINE Mat44V M44Add(const Mat44V& a, const Mat44V& b)
{
return Mat44V(V4Add(a.col0,b.col0),V4Add(a.col1,b.col1),V4Add(a.col2,b.col2),V4Add(a.col3,b.col3));
}
NV_FORCE_INLINE Mat44V M44Inverse(const Mat44V& a)
{
float tmp[12];
float dst[16];
float det;
const float src[16] =
{
a.col0.x, a.col0.y, a.col0.z, a.col0.w,
a.col1.x, a.col1.y, a.col1.z, a.col1.w,
a.col2.x, a.col2.y, a.col2.z, a.col2.w,
a.col3.x, a.col3.y, a.col3.z, a.col3.w
};
tmp[0] = src[10] * src[15];
tmp[1] = src[11] * src[14];
tmp[2] = src[9] * src[15];
tmp[3] = src[11] * src[13];
tmp[4] = src[9] * src[14];
tmp[5] = src[10] * src[13];
tmp[6] = src[8] * src[15];
tmp[7] = src[11] * src[12];
tmp[8] = src[8] * src[14];
tmp[9] = src[10] * src[12];
tmp[10] = src[8] * src[13];
tmp[11] = src[9] * src[12];
dst[0] = tmp[0]*src[5] + tmp[3]*src[6] + tmp[4]*src[7];
dst[0] -= tmp[1]*src[5] + tmp[2]*src[6] + tmp[5]*src[7];
dst[1] = tmp[1]*src[4] + tmp[6]*src[6] + tmp[9]*src[7];
dst[1] -= tmp[0]*src[4] + tmp[7]*src[6] + tmp[8]*src[7];
dst[2] = tmp[2]*src[4] + tmp[7]*src[5] + tmp[10]*src[7];
dst[2] -= tmp[3]*src[4] + tmp[6]*src[5] + tmp[11]*src[7];
dst[3] = tmp[5]*src[4] + tmp[8]*src[5] + tmp[11]*src[6];
dst[3] -= tmp[4]*src[4] + tmp[9]*src[5] + tmp[10]*src[6];
dst[4] = tmp[1]*src[1] + tmp[2]*src[2] + tmp[5]*src[3];
dst[4] -= tmp[0]*src[1] + tmp[3]*src[2] + tmp[4]*src[3];
dst[5] = tmp[0]*src[0] + tmp[7]*src[2] + tmp[8]*src[3];
dst[5] -= tmp[1]*src[0] + tmp[6]*src[2] + tmp[9]*src[3];
dst[6] = tmp[3]*src[0] + tmp[6]*src[1] + tmp[11]*src[3];
dst[6] -= tmp[2]*src[0] + tmp[7]*src[1] + tmp[10]*src[3];
dst[7] = tmp[4]*src[0] + tmp[9]*src[1] + tmp[10]*src[2];
dst[7] -= tmp[5]*src[0] + tmp[8]*src[1] + tmp[11]*src[2];
tmp[0] = src[2]*src[7];
tmp[1] = src[3]*src[6];
tmp[2] = src[1]*src[7];
tmp[3] = src[3]*src[5];
tmp[4] = src[1]*src[6];
tmp[5] = src[2]*src[5];
tmp[6] = src[0]*src[7];
tmp[7] = src[3]*src[4];
tmp[8] = src[0]*src[6];
tmp[9] = src[2]*src[4];
tmp[10] = src[0]*src[5];
tmp[11] = src[1]*src[4];
dst[8] = tmp[0]*src[13] + tmp[3]*src[14] + tmp[4]*src[15];
dst[8] -= tmp[1]*src[13] + tmp[2]*src[14] + tmp[5]*src[15];
dst[9] = tmp[1]*src[12] + tmp[6]*src[14] + tmp[9]*src[15];
dst[9] -= tmp[0]*src[12] + tmp[7]*src[14] + tmp[8]*src[15];
dst[10] = tmp[2]*src[12] + tmp[7]*src[13] + tmp[10]*src[15];
dst[10]-= tmp[3]*src[12] + tmp[6]*src[13] + tmp[11]*src[15];
dst[11] = tmp[5]*src[12] + tmp[8]*src[13] + tmp[11]*src[14];
dst[11]-= tmp[4]*src[12] + tmp[9]*src[13] + tmp[10]*src[14];
dst[12] = tmp[2]*src[10] + tmp[5]*src[11] + tmp[1]*src[9];
dst[12]-= tmp[4]*src[11] + tmp[0]*src[9] + tmp[3]*src[10];
dst[13] = tmp[8]*src[11] + tmp[0]*src[8] + tmp[7]*src[10];
dst[13]-= tmp[6]*src[10] + tmp[9]*src[11] + tmp[1]*src[8];
dst[14] = tmp[6]*src[9] + tmp[11]*src[11] + tmp[3]*src[8];
dst[14]-= tmp[10]*src[11] + tmp[2]*src[8] + tmp[7]*src[9];
dst[15] = tmp[10]*src[10] + tmp[4]*src[8] + tmp[9]*src[9];
dst[15]-= tmp[8]*src[9] + tmp[11]*src[10] + tmp[5]*src[8];
det=src[0]*dst[0]+src[1]*dst[1]+src[2]*dst[2]+src[3]*dst[3];
det = 1.0f/det;
for(uint32_t j=0;j<16;j++)
{
dst[j] *= det;
}
return Mat44V
(
Vec4V(dst[0],dst[4],dst[8],dst[12]),
Vec4V(dst[1],dst[5],dst[9],dst[13]),
Vec4V(dst[2],dst[6],dst[10],dst[14]),
Vec4V(dst[3],dst[7],dst[11],dst[15])
);
}
NV_FORCE_INLINE Mat44V M44Trnsps(const Mat44V& a)
{
return Mat44V
(
Vec4V(a.col0.x,a.col1.x,a.col2.x,a.col3.x),
Vec4V(a.col0.y,a.col1.y,a.col2.y,a.col3.y),
Vec4V(a.col0.z,a.col1.z,a.col2.z,a.col3.z),
Vec4V(a.col0.w,a.col1.w,a.col2.w,a.col3.w)
);
}
NV_FORCE_INLINE Vec4V V4LoadXYZW(const float& x, const float& y, const float& z, const float& w)
{
return Vec4V(x, y, z, w);
}
/*
NV_FORCE_INLINE VecU16V V4U32PK(VecU32V a, VecU32V b)
{
return VecU16V(
uint16_t(NvClamp<uint32_t>((a).u32[0], 0, 0xFFFF)),
uint16_t(NvClamp<uint32_t>((a).u32[1], 0, 0xFFFF)),
uint16_t(NvClamp<uint32_t>((a).u32[2], 0, 0xFFFF)),
uint16_t(NvClamp<uint32_t>((a).u32[3], 0, 0xFFFF)),
uint16_t(NvClamp<uint32_t>((b).u32[0], 0, 0xFFFF)),
uint16_t(NvClamp<uint32_t>((b).u32[1], 0, 0xFFFF)),
uint16_t(NvClamp<uint32_t>((b).u32[2], 0, 0xFFFF)),
uint16_t(NvClamp<uint32_t>((b).u32[3], 0, 0xFFFF)));
}
*/
NV_FORCE_INLINE VecU32V V4U32Sel(const BoolV c, const VecU32V a, const VecU32V b)
{
return VecU32V(
c.ux ? a.u32[0] : b.u32[0],
c.uy ? a.u32[1] : b.u32[1],
c.uz ? a.u32[2] : b.u32[2],
c.uw ? a.u32[3] : b.u32[3]
);
}
NV_FORCE_INLINE VecU32V V4U32or(VecU32V a, VecU32V b)
{
return VecU32V((a).u32[0]|(b).u32[0], (a).u32[1]|(b).u32[1], (a).u32[2]|(b).u32[2], (a).u32[3]|(b).u32[3]);
}
NV_FORCE_INLINE VecU32V V4U32and(VecU32V a, VecU32V b)
{
return VecU32V((a).u32[0]&(b).u32[0], (a).u32[1]&(b).u32[1], (a).u32[2]&(b).u32[2], (a).u32[3]&(b).u32[3]);
}
NV_FORCE_INLINE VecU32V V4U32Andc(VecU32V a, VecU32V b)
{
return VecU32V((a).u32[0]&~(b).u32[0], (a).u32[1]&~(b).u32[1], (a).u32[2]&~(b).u32[2], (a).u32[3]&~(b).u32[3]);
}
/*
NV_FORCE_INLINE VecU16V V4U16Or(VecU16V a, VecU16V b)
{
return VecU16V(
(a).u16[0]|(b).u16[0], (a).u16[1]|(b).u16[1], (a).u16[2]|(b).u16[2], (a).u16[3]|(b).u16[3],
(a).u16[4]|(b).u16[4], (a).u16[5]|(b).u16[5], (a).u16[6]|(b).u16[6], (a).u16[7]|(b).u16[7]);
}
*/
/*
NV_FORCE_INLINE VecU16V V4U16And(VecU16V a, VecU16V b)
{
return VecU16V(
(a).u16[0]&(b).u16[0], (a).u16[1]&(b).u16[1], (a).u16[2]&(b).u16[2], (a).u16[3]&(b).u16[3],
(a).u16[4]&(b).u16[4], (a).u16[5]&(b).u16[5], (a).u16[6]&(b).u16[6], (a).u16[7]&(b).u16[7]);
}
*/
/*
NV_FORCE_INLINE VecU16V V4U16Andc(VecU16V a, VecU16V b)
{
return VecU16V(
(a).u16[0]&~(b).u16[0], (a).u16[1]&~(b).u16[1], (a).u16[2]&~(b).u16[2], (a).u16[3]&~(b).u16[3],
(a).u16[4]&~(b).u16[4], (a).u16[5]&~(b).u16[5], (a).u16[6]&~(b).u16[6], (a).u16[7]&~(b).u16[7]);
}
*/
/*
template<int a> NV_FORCE_INLINE VecI32V V4ISplat()
{
return VecI32V(a, a, a, a);
}
template<uint32_t a> NV_FORCE_INLINE VecU32V V4USplat()
{
return VecU32V(a, a, a, a);
}
*/
/*
NV_FORCE_INLINE void V4U16StoreAligned(VecU16V val, VecU16V* address)
{
*address = val;
}
*/
NV_FORCE_INLINE void V4U32StoreAligned(VecU32V val, VecU32V* address)
{
*address = val;
}
NV_FORCE_INLINE Vec4V V4Andc(const Vec4V a, const VecU32V b)
{
VecU32V r = V4U32Andc(*reinterpret_cast<const VecU32V*>(&a),b);
return (*reinterpret_cast<const Vec4V*>(&r));
}
NV_FORCE_INLINE VecU32V V4IsGrtrV32u(const Vec4V a, const Vec4V b)
{
return VecU32V(
a.x > b.x ? 0xFFFFffff : 0,
a.y > b.y ? 0xFFFFffff : 0,
a.z > b.z ? 0xFFFFffff : 0,
a.w > b.w ? 0xFFFFffff : 0);
}
NV_FORCE_INLINE VecU16V V4U16LoadAligned(VecU16V* addr)
{
return *addr;
}
NV_FORCE_INLINE VecU16V V4U16LoadUnaligned(VecU16V* addr)
{
return *addr;
}
NV_FORCE_INLINE VecU16V V4U16CompareGt(VecU16V a, VecU16V b)
{
return VecU16V(
(a).u16[0]>(b).u16[0], (a).u16[1]>(b).u16[1], (a).u16[2]>(b).u16[2], (a).u16[3]>(b).u16[3],
(a).u16[4]>(b).u16[4], (a).u16[5]>(b).u16[5], (a).u16[6]>(b).u16[6], (a).u16[7]>(b).u16[7]);
}
NV_FORCE_INLINE VecU16V V4I16CompareGt(VecU16V a, VecU16V b)
{
return VecU16V(
(a).i16[0]>(b).i16[0], (a).i16[1]>(b).i16[1], (a).i16[2]>(b).i16[2], (a).i16[3]>(b).i16[3],
(a).i16[4]>(b).i16[4], (a).i16[5]>(b).i16[5], (a).i16[6]>(b).i16[6], (a).i16[7]>(b).i16[7]);
}
NV_FORCE_INLINE Vec4V Vec4V_From_VecU32V(VecU32V a)
{
return Vec4V(float((a).u32[0]), float((a).u32[1]), float((a).u32[2]), float((a).u32[3]));
}
NV_FORCE_INLINE Vec4V Vec4V_From_VecI32V(VecI32V a)
{
return Vec4V(float((a).i32[0]), float((a).i32[1]), float((a).i32[2]), float((a).i32[3]));
}
NV_FORCE_INLINE VecI32V VecI32V_From_Vec4V(Vec4V a)
{
float* data = (float*)&a;
return VecI32V(int32_t(data[0]), int32_t(data[1]), int32_t(data[2]), int32_t(data[3]));
}
NV_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecU32V(VecU32V a)
{
Vec4V b = *reinterpret_cast<Vec4V*>(&a);
return b;
}
NV_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecI32V(VecI32V a)
{
Vec4V b = *reinterpret_cast<Vec4V*>(&a);
return b;
}
NV_FORCE_INLINE VecU32V VecU32V_ReinterpretFrom_Vec4V(Vec4V a)
{
VecU32V b = *reinterpret_cast<VecU32V*>(&a);
return b;
}
NV_FORCE_INLINE VecI32V VecI32V_ReinterpretFrom_Vec4V(Vec4V a)
{
VecI32V b= *reinterpret_cast<VecI32V*>(&a);
return b;
}
template<int index> NV_FORCE_INLINE VecU32V V4U32SplatElement(VecU32V a)
{
return VecU32V((a).u32[index], (a).u32[index], (a).u32[index], (a).u32[index]);
}
template<int index> NV_FORCE_INLINE VecU32V V4U32SplatElement(BoolV a)
{
const uint32_t u = (&a.ux)[index];
return VecU32V(u, u, u, u);
}
template<int index> NV_FORCE_INLINE Vec4V V4SplatElement(Vec4V a)
{
float* data = (float*)&a;
return Vec4V(data[index], data[index], data[index], data[index]);
}
template<int index> NV_FORCE_INLINE VecU16V V4U16SplatElement(VecU16V a)
{
return VecU16V(
(a).u16[index], (a).u16[index], (a).u16[index], (a).u16[index],
(a).u16[index], (a).u16[index], (a).u16[index], (a).u16[index]);
}
template<int imm> NV_FORCE_INLINE VecI16V V4I16SplatImmediate()
{
return VecI16V(imm, imm, imm, imm, imm, imm, imm, imm);
}
template<uint16_t imm> NV_FORCE_INLINE VecU16V V4U16SplatImmediate()
{
return VecU16V(imm, imm, imm, imm, imm, imm, imm, imm);
}
NV_FORCE_INLINE VecU16V V4U16SubtractModulo(VecU16V a, VecU16V b)
{
return VecU16V(
(a).u16[0] - (b).u16[0], (a).u16[1] - (b).u16[1], (a).u16[2] - (b).u16[2], (a).u16[3] - (b).u16[3],
(a).u16[4] - (b).u16[4], (a).u16[5] - (b).u16[5], (a).u16[6] - (b).u16[6], (a).u16[7] - (b).u16[7]);
}
NV_FORCE_INLINE VecU16V V4U16AddModulo(VecU16V a, VecU16V b)
{
return VecU16V(
(a).u16[0] + (b).u16[0], (a).u16[1] + (b).u16[1], (a).u16[2] + (b).u16[2], (a).u16[3] + (b).u16[3],
(a).u16[4] + (b).u16[4], (a).u16[5] + (b).u16[5], (a).u16[6] + (b).u16[6], (a).u16[7] + (b).u16[7]);
}
NV_FORCE_INLINE VecU32V V4U16GetLo16(VecU16V a)
{
return VecU32V((a).u16[0], (a).u16[2], (a).u16[4], (a).u16[6]);
}
NV_FORCE_INLINE VecU32V V4U16GetHi16(VecU16V a)
{
return VecU32V((a).u16[1], (a).u16[3], (a).u16[5], (a).u16[7]);
}
NV_FORCE_INLINE VecU32V VecU32VLoadXYZW(uint32_t x, uint32_t y, uint32_t z, uint32_t w)
{
return VecU32V(x, y, z, w);
}
NV_FORCE_INLINE Vec4V V4Abs(const Vec4V a)
{
return V4Max(a,V4Neg(a));
}
NV_FORCE_INLINE BoolV V4IsEqU32(const VecU32V a, const VecU32V b)
{
return BoolV(a.u32[0]==b.u32[0] ? -1 : 0, a.u32[1]==b.u32[1] ? -1 : 0, a.u32[2]==b.u32[2] ? -1 : 0, a.u32[3]==b.u32[3] ? -1 : 0);
}
NV_FORCE_INLINE VecU32V U4Load(const uint32_t i)
{
return VecU32V(i, i, i, i);
}
NV_FORCE_INLINE VecU32V U4LoadU(const uint32_t* i)
{
return VecU32V(i[0], i[1], i[2], i[3]);
}
NV_FORCE_INLINE VecU32V U4LoadA(const uint32_t* i)
{
return VecU32V(i[0], i[1], i[2], i[3]);
}
NV_FORCE_INLINE VecI32V I4Load(const int32_t i)
{
return VecI32V(i, i, i, i);
}
NV_FORCE_INLINE VecI32V I4LoadU(const int32_t* i)
{
return VecI32V(i[0], i[1], i[2], i[3]);
}
NV_FORCE_INLINE VecI32V I4LoadA(const int32_t* i)
{
return VecI32V(i[0], i[1], i[2], i[3]);
}
NV_FORCE_INLINE VecI32V VecI32V_Add(const VecI32VArg a, const VecI32VArg b)
{
return VecI32V(a.i32[0] + b.i32[0], a.i32[1] + b.i32[1], a.i32[2] + b.i32[2], a.i32[3] + b.i32[3]);
}
NV_FORCE_INLINE VecI32V VecI32V_Sub(const VecI32VArg a, const VecI32VArg b)
{
return VecI32V(a.i32[0] - b.i32[0], a.i32[1] - b.i32[1], a.i32[2] - b.i32[2], a.i32[3] - b.i32[3]);
}
NV_FORCE_INLINE BoolV VecI32V_IsGrtr(const VecI32VArg a, const VecI32VArg b)
{
return BoolV(a.i32[0] > b.i32[0] ? -1 : 0, a.i32[1] > b.i32[1] ? -1 : 0, a.i32[2] > b.i32[2] ? -1 : 0, a.i32[3] > b.i32[3] ? -1 : 0);
}
NV_FORCE_INLINE BoolV VecI32V_IsEq(const VecI32VArg a, const VecI32VArg b)
{
return BoolV(a.i32[0] == b.i32[0] ? -1 : 0, a.i32[1] == b.i32[1] ? -1 : 0, a.i32[2] == b.i32[2] ? -1 : 0, a.i32[3] == b.i32[3] ? -1 : 0);
}
NV_FORCE_INLINE VecI32V V4I32Sel(const BoolV c, const VecI32V a, const VecI32V b)
{
return VecI32V(
c.ux ? a.i32[0] : b.i32[0],
c.uy ? a.i32[1] : b.i32[1],
c.uz ? a.i32[2] : b.i32[2],
c.uw ? a.i32[3] : b.i32[3]
);
}
NV_FORCE_INLINE VecI32V VecI32V_Zero()
{
return VecI32V(0,0,0,0);
}
NV_FORCE_INLINE VecI32V VecI32V_One()
{
return VecI32V(1,1,1,1);
}
NV_FORCE_INLINE VecI32V VecI32V_Two()
{
return VecI32V(2,2,2,2);
}
NV_FORCE_INLINE VecI32V VecI32V_MinusOne()
{
return VecI32V(-1,-1,-1,-1);
}
NV_FORCE_INLINE VecU32V U4Zero()
{
return VecU32V(0,0,0,0);
}
NV_FORCE_INLINE VecU32V U4One()
{
return VecU32V(1,1,1,1);
}
NV_FORCE_INLINE VecU32V U4Two()
{
return VecU32V(2,2,2,2);
}
NV_FORCE_INLINE VecShiftV VecI32V_PrepareShift(const VecI32VArg shift)
{
return shift;
}
NV_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const VecShiftVArg count)
{
return VecI32V(a.i32[0] << count.i32[0], a.i32[1] << count.i32[1], a.i32[2] << count.i32[2], a.i32[3] << count.i32[3]);
}
NV_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const VecShiftVArg count)
{
return VecI32V(a.i32[0] >> count.i32[0], a.i32[1] >> count.i32[1], a.i32[2] >> count.i32[2], a.i32[3] >> count.i32[3]);
}
NV_FORCE_INLINE VecI32V VecI32V_And(const VecI32VArg a, const VecI32VArg b)
{
return VecI32V(a.i32[0]&b.i32[0], a.i32[1]&b.i32[1], a.i32[2]&b.i32[2], a.i32[3]&b.i32[3]);
}
NV_FORCE_INLINE VecI32V VecI32V_Or(const VecI32VArg a, const VecI32VArg b)
{
return VecI32V(a.i32[0]|b.i32[0], a.i32[1]|b.i32[1], a.i32[2]|b.i32[2], a.i32[3]|b.i32[3]);
}
NV_FORCE_INLINE VecI32V VecI32V_GetX(const VecI32VArg a)
{
return VecI32V(a.i32[0], a.i32[0], a.i32[0], a.i32[0]);
}
NV_FORCE_INLINE VecI32V VecI32V_GetY(const VecI32VArg a)
{
return VecI32V(a.i32[1], a.i32[1], a.i32[1], a.i32[1]);
}
NV_FORCE_INLINE VecI32V VecI32V_GetZ(const VecI32VArg a)
{
return VecI32V(a.i32[2], a.i32[2], a.i32[2], a.i32[2]);
}
NV_FORCE_INLINE VecI32V VecI32V_GetW(const VecI32VArg a)
{
return VecI32V(a.i32[3], a.i32[3], a.i32[3], a.i32[3]);
}
NV_FORCE_INLINE VecI32V VecI32V_Sel(const BoolV c, const VecI32VArg a, const VecI32VArg b)
{
return VecI32V(c.ux ? a.i32[0] : b.i32[0], c.uy ? a.i32[1] : b.i32[1], c.uz ? a.i32[2] : b.i32[2], c.uw ? a.i32[3] : b.i32[3]);
}
NV_FORCE_INLINE VecI32V VecI32V_Merge(const VecI32VArg a, const VecI32VArg b, const VecI32VArg c, const VecI32VArg d)
{
return VecI32V(a.i32[0], b.i32[0], c.i32[0], d.i32[0]);
}
NV_FORCE_INLINE void NvI32_From_VecI32V(const VecI32VArg a, int32_t* i)
{
*i = a.i32[0];
}
NV_FORCE_INLINE VecI32V VecI32V_From_BoolV(const BoolVArg b)
{
return VecI32V(b.ux, b.uy, b.uz, b.uw);
}
NV_FORCE_INLINE VecU32V VecU32V_From_BoolV(const BoolVArg b)
{
return VecU32V(b.ux, b.uy, b.uz, b.uw);
}
//not used
/*
NV_FORCE_INLINE Vec4V V4LoadAligned(Vec4V* addr)
{
return *addr;
}
*/
/*
NV_FORCE_INLINE Vec4V V4LoadUnaligned(Vec4V* addr)
{
return *addr;
}
*/
/*
NV_FORCE_INLINE Vec4V V4Ceil(const Vec4V a)
{
return Vec4V(NvCeil(a.x), NvCeil(a.y), NvCeil(a.z), NvCeil(a.w));
}
NV_FORCE_INLINE Vec4V V4Floor(const Vec4V a)
{
return Vec4V(NvFloor(a.x), NvFloor(a.y), NvFloor(a.z), NvFloor(a.w));
}
*/
/*
NV_FORCE_INLINE VecU32V V4ConvertToU32VSaturate(const Vec4V a, uint32_t power)
{
NV_ASSERT(power == 0 && "Non-zero power not supported in convertToU32VSaturate");
NV_UNUSED(power); // prevent warning in release builds
float ffffFFFFasFloat = float(0xFFFF0000);
return VecU32V(
uint32_t(NvClamp<float>((a).x, 0.0f, ffffFFFFasFloat)),
uint32_t(NvClamp<float>((a).y, 0.0f, ffffFFFFasFloat)),
uint32_t(NvClamp<float>((a).z, 0.0f, ffffFFFFasFloat)),
uint32_t(NvClamp<float>((a).w, 0.0f, ffffFFFFasFloat)));
}
*/
#endif //NV_PHYSICS_COMMON_VECMATH_SCALAR_INLINE
| 57,653 | C | 24.309043 | 150 | 0.606317 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsAlignedMalloc.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_NSFOUNDATION_NSALIGNEDMALLOC_H
#define NV_NSFOUNDATION_NSALIGNEDMALLOC_H
#include "NsUserAllocated.h"
/*!
Allocate aligned memory.
Alignment must be a power of 2!
-- should be templated by a base allocator
*/
namespace nvidia
{
namespace shdfnd
{
/**
Allocator, which is used to access the global NvAllocatorCallback instance
(used for dynamic data types template instantiation), which can align memory
*/
// SCS: AlignedMalloc with 3 params not found, seems not used on PC either
// disabled for now to avoid GCC error
template <uint32_t N, typename BaseAllocator = NonTrackingAllocator>
class AlignedAllocator : public BaseAllocator
{
public:
AlignedAllocator(const BaseAllocator& base = BaseAllocator()) : BaseAllocator(base)
{
}
void* allocate(size_t size, const char* file, int line)
{
size_t pad = N - 1 + sizeof(size_t); // store offset for delete.
uint8_t* base = reinterpret_cast<uint8_t*>(BaseAllocator::allocate(size + pad, file, line));
if(!base)
return NULL;
uint8_t* ptr = reinterpret_cast<uint8_t*>(size_t(base + pad) & ~(size_t(N) - 1)); // aligned pointer, ensuring N is a size_t
// wide mask
reinterpret_cast<size_t*>(ptr)[-1] = size_t(ptr - base); // store offset
return ptr;
}
void deallocate(void* ptr)
{
if(ptr == NULL)
return;
uint8_t* base = reinterpret_cast<uint8_t*>(ptr) - reinterpret_cast<size_t*>(ptr)[-1];
BaseAllocator::deallocate(base);
}
};
} // namespace shdfnd
} // namespace nvidia
#endif // #ifndef NV_NSFOUNDATION_NSALIGNEDMALLOC_H
| 3,330 | C | 37.287356 | 132 | 0.717417 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsVecQuat.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_PHYSICS_COMMON_VECQUAT
#define NV_PHYSICS_COMMON_VECQUAT
//#include "NsInlineAoS.h"
#ifndef NV_PIDIV2
#define NV_PIDIV2 1.570796327f
#endif
//////////////////////////////////
//QuatV
//////////////////////////////////
NV_FORCE_INLINE QuatV QuatVLoadXYZW(const float x, const float y, const float z, const float w)
{
return V4LoadXYZW(x, y, z, w);
}
NV_FORCE_INLINE QuatV QuatVLoadU(const float* v)
{
return V4LoadU(v);
}
NV_FORCE_INLINE QuatV QuatVLoadA(const float* v)
{
return V4LoadA(v);
}
NV_FORCE_INLINE QuatV QuatV_From_RotationAxisAngle(const Vec3V u, const FloatV a)
{
//q = cos(a/2) + u*sin(a/2)
const FloatV half = FLoad(0.5f);
const FloatV hangle = FMul(a, half);
const FloatV piByTwo(FLoad(NV_PIDIV2));
const FloatV PiByTwoMinHangle(FSub(piByTwo, hangle));
const Vec4V hangle2(Vec4V_From_Vec3V(V3Merge(hangle, PiByTwoMinHangle, hangle)));
/*const FloatV sina = FSin(hangle);
const FloatV cosa = FCos(hangle);*/
const Vec4V _sina = V4Sin(hangle2);
const FloatV sina = V4GetX(_sina);
const FloatV cosa = V4GetY(_sina);
const Vec3V v = V3Scale(u, sina);
//return V4Sel(BTTTF(), Vec4V_From_Vec3V(v), V4Splat(cosa));
return V4SetW(Vec4V_From_Vec3V(v) , cosa);
}
//Normalize
NV_FORCE_INLINE QuatV QuatNormalize(const QuatV q)
{
return V4Normalize(q);
}
NV_FORCE_INLINE FloatV QuatLength(const QuatV q)
{
return V4Length(q);
}
NV_FORCE_INLINE FloatV QuatLengthSq(const QuatV q)
{
return V4LengthSq(q);
}
NV_FORCE_INLINE FloatV QuatDot(const QuatV a, const QuatV b) // convert this NvQuat to a unit quaternion
{
return V4Dot(a, b);
}
NV_FORCE_INLINE QuatV QuatConjugate(const QuatV q)
{
return V4SetW(V4Neg(q), V4GetW(q));
}
NV_FORCE_INLINE Vec3V QuatGetImaginaryPart(const QuatV q)
{
return Vec3V_From_Vec4V(q);
}
/** brief computes rotation of x-axis */
NV_FORCE_INLINE Vec3V QuatGetBasisVector0(const QuatV q)
{
/*const float x2 = x*2.0f;
const float w2 = w*2.0f;
return NvVec3( (w * w2) - 1.0f + x*x2,
(z * w2) + y*x2,
(-y * w2) + z*x2);*/
const FloatV two = FLoad(2.f);
const FloatV w = V4GetW(q);
const Vec3V u = Vec3V_From_Vec4V(q);
const FloatV x2 = FMul(V3GetX(u), two);
const FloatV w2 = FMul(w, two);
const Vec3V a = V3Scale(u, x2);
const Vec3V tmp = V3Merge(w, V3GetZ(u), FNeg(V3GetY(u)));
//const Vec3V b = V3Scale(tmp, w2);
//const Vec3V ab = V3Add(a, b);
const Vec3V ab = V3ScaleAdd(tmp, w2, a);
return V3SetX(ab, FSub(V3GetX(ab), FOne()));
}
/** brief computes rotation of y-axis */
NV_FORCE_INLINE Vec3V QuatGetBasisVector1(const QuatV q)
{
/*const float y2 = y*2.0f;
const float w2 = w*2.0f;
return NvVec3( (-z * w2) + x*y2,
(w * w2) - 1.0f + y*y2,
(x * w2) + z*y2);*/
const FloatV two = FLoad(2.f);
const FloatV w = V4GetW(q);
const Vec3V u = Vec3V_From_Vec4V(q);
const FloatV y2 = FMul(V3GetY(u), two);
const FloatV w2 = FMul(w, two);
const Vec3V a = V3Scale(u, y2);
const Vec3V tmp = V3Merge(FNeg(V3GetZ(u)),w, V3GetX(u));
//const Vec3V b = V3Scale(tmp, w2);
//const Vec3V ab = V3Add(a, b);
const Vec3V ab = V3ScaleAdd(tmp, w2, a);
return V3SetY(ab, FSub(V3GetY(ab), FOne()));
}
/** brief computes rotation of z-axis */
NV_FORCE_INLINE Vec3V QuatGetBasisVector2(const QuatV q)
{
/*const float z2 = z*2.0f;
const float w2 = w*2.0f;
return NvVec3( (y * w2) + x*z2,
(-x * w2) + y*z2,
(w * w2) - 1.0f + z*z2);*/
const FloatV two = FLoad(2.f);
const FloatV w = V4GetW(q);
const Vec3V u = Vec3V_From_Vec4V(q);
const FloatV z2 = FMul(V3GetZ(u), two);
const FloatV w2 = FMul(w, two);
const Vec3V a = V3Scale(u, z2);
const Vec3V tmp = V3Merge(V3GetY(u), FNeg(V3GetX(u)), w);
/*const Vec3V b = V3Scale(tmp, w2);
const Vec3V ab = V3Add(a, b);*/
const Vec3V ab = V3ScaleAdd(tmp, w2, a);
return V3SetZ(ab, FSub(V3GetZ(ab), FOne()));
}
NV_FORCE_INLINE Vec3V QuatRotate(const QuatV q, const Vec3V v)
{
/*
const NvVec3 qv(x,y,z);
return (v*(w*w-0.5f) + (qv.cross(v))*w + qv*(qv.dot(v)))*2;
*/
const FloatV two = FLoad(2.f);
//const FloatV half = FloatV_From_F32(0.5f);
const FloatV nhalf = FLoad(-0.5f);
const Vec3V u = Vec3V_From_Vec4V(q);
const FloatV w = V4GetW(q);
//const FloatV w2 = FSub(FMul(w, w), half);
const FloatV w2 = FScaleAdd(w, w, nhalf);
const Vec3V a = V3Scale(v, w2);
//const Vec3V b = V3Scale(V3Cross(u, v), w);
//const Vec3V c = V3Scale(u, V3Dot(u, v));
//return V3Scale(V3Add(V3Add(a, b), c), two);
const Vec3V temp = V3ScaleAdd(V3Cross(u, v), w, a);
return V3Scale(V3ScaleAdd(u, V3Dot(u, v), temp), two);
}
NV_FORCE_INLINE Vec3V QuatTransform(const QuatV q, const Vec3V p, const Vec3V v)
{
//p + q.rotate(v)
const FloatV two = FLoad(2.f);
//const FloatV half = FloatV_From_F32(0.5f);
const FloatV nhalf = FLoad(-0.5f);
const Vec3V u = Vec3V_From_Vec4V(q);
const FloatV w = V4GetW(q);
//const FloatV w2 = FSub(FMul(w, w), half);
const FloatV w2 = FScaleAdd(w, w, nhalf);
const Vec3V a = V3Scale(v, w2);
/*const Vec3V b = V3Scale(V3Cross(u, v), w);
const Vec3V c = V3Scale(u, V3Dot(u, v));
return V3ScaleAdd(V3Add(V3Add(a, b), c), two, p);*/
const Vec3V temp = V3ScaleAdd(V3Cross(u, v), w, a);
const Vec3V z = V3ScaleAdd(u, V3Dot(u, v), temp);
return V3ScaleAdd(z, two, p);
}
NV_FORCE_INLINE Vec3V QuatRotateInv(const QuatV q, const Vec3V v)
{
// const NvVec3 qv(x,y,z);
// return (v*(w*w-0.5f) - (qv.cross(v))*w + qv*(qv.dot(v)))*2;
const FloatV two = FLoad(2.f);
const FloatV nhalf = FLoad(-0.5f);
const Vec3V u = Vec3V_From_Vec4V(q);
const FloatV w = V4GetW(q);
const FloatV w2 = FScaleAdd(w, w, nhalf);
const Vec3V a = V3Scale(v, w2);
/*const Vec3V b = V3Scale(V3Cross(u, v), w);
const Vec3V c = V3Scale(u, V3Dot(u, v));
return V3Scale(V3Add(V3Sub(a, b), c), two);*/
const Vec3V temp = V3NegScaleSub(V3Cross(u, v), w, a);
return V3Scale(V3ScaleAdd(u, V3Dot(u, v), temp), two);
}
NV_FORCE_INLINE QuatV QuatMul(const QuatV a, const QuatV b)
{
const Vec3V imagA = Vec3V_From_Vec4V(a);
const Vec3V imagB = Vec3V_From_Vec4V(b);
const FloatV rA = V4GetW(a);
const FloatV rB = V4GetW(b);
const FloatV real = FSub(FMul(rA, rB), V3Dot(imagA, imagB));
const Vec3V v0 = V3Scale(imagA, rB);
const Vec3V v1 = V3Scale(imagB, rA);
const Vec3V v2 = V3Cross(imagA, imagB);
const Vec3V imag = V3Add(V3Add(v0, v1), v2);
return V4SetW(Vec4V_From_Vec3V(imag), real);
}
NV_FORCE_INLINE QuatV QuatAdd(const QuatV a, const QuatV b)
{
return V4Add(a, b);
}
NV_FORCE_INLINE QuatV QuatNeg(const QuatV q)
{
return V4Neg(q);
}
NV_FORCE_INLINE QuatV QuatSub(const QuatV a, const QuatV b)
{
return V4Sub(a, b);
}
NV_FORCE_INLINE QuatV QuatScale(const QuatV a, const FloatV b)
{
return V4Scale(a, b);
}
NV_FORCE_INLINE QuatV QuatMerge(const FloatV* const floatVArray)
{
return V4Merge(floatVArray);
}
NV_FORCE_INLINE QuatV QuatMerge(const FloatVArg x, const FloatVArg y, const FloatVArg z, const FloatVArg w)
{
return V4Merge(x, y, z, w);
}
NV_FORCE_INLINE QuatV QuatIdentity()
{
return V4SetW(V4Zero(), FOne());
}
NV_FORCE_INLINE bool isFiniteQuatV(const QuatV q)
{
return isFiniteVec4V(q);
}
NV_FORCE_INLINE bool isValidQuatV(const QuatV q)
{
const FloatV unitTolerance = FLoad((float)1e-4);
const FloatV tmp = FAbs(FSub(QuatLength(q), FOne()));
const BoolV con = FIsGrtr(unitTolerance, tmp);
return isFiniteVec4V(q) & (BAllEq(con, BTTTT())==1);
}
NV_FORCE_INLINE bool isSaneQuatV(const QuatV q)
{
const FloatV unitTolerance = FLoad((float)1e-2);
const FloatV tmp = FAbs(FSub(QuatLength(q), FOne()));
const BoolV con = FIsGrtr(unitTolerance, tmp);
return isFiniteVec4V(q) & (BAllEq(con, BTTTT())==1);
}
NV_FORCE_INLINE void QuatGetMat33V(const QuatVArg q, Vec3V& column0, Vec3V& column1, Vec3V& column2)
{
const FloatV one = FOne();
const FloatV x = V4GetX(q);
const FloatV y = V4GetY(q);
const FloatV z = V4GetZ(q);
const FloatV w = V4GetW(q);
const FloatV x2 = FAdd(x, x);
const FloatV y2 = FAdd(y, y);
const FloatV z2 = FAdd(z, z);
const FloatV xx = FMul(x2,x);
const FloatV yy = FMul(y2,y);
const FloatV zz = FMul(z2,z);
const FloatV xy = FMul(x2,y);
const FloatV xz = FMul(x2,z);
const FloatV xw = FMul(x2,w);
const FloatV yz = FMul(y2,z);
const FloatV yw = FMul(y2,w);
const FloatV zw = FMul(z2,w);
const FloatV v = FSub(one, xx);
column0 = V3Merge(FSub(FSub(one, yy), zz), FAdd(xy, zw), FSub(xz, yw));
column1 = V3Merge(FSub(xy, zw), FSub(v ,zz), FAdd(yz, xw));
column2 = V3Merge(FAdd(xz, yw), FSub(yz, xw), FSub(v, yy));
}
NV_FORCE_INLINE Mat33V QuatGetMat33V(const QuatVArg q)
{
//const FloatV two = FloatV_From_F32(2.f);
//const FloatV one = FOne();
//const FloatV x = V4GetX(q);
//const FloatV y = V4GetY(q);
//const FloatV z = V4GetZ(q);
//const Vec4V _q = V4Mul(q, two);
//
////const FloatV w = V4GetW(q);
//const Vec4V t0 = V4Mul(_q, x); // 2xx, 2xy, 2xz, 2xw
//const Vec4V t1 = V4Mul(_q, y); // 2xy, 2yy, 2yz, 2yw
//const Vec4V t2 = V4Mul(_q, z); // 2xz, 2yz, 2zz, 2zw
////const Vec4V t3 = V4Mul(_q, w); // 2xw, 2yw, 2zw, 2ww
//const FloatV xx2 = V4GetX(t0);
//const FloatV xy2 = V4GetY(t0);
//const FloatV xz2 = V4GetZ(t0);
//const FloatV xw2 = V4GetW(t0);
//const FloatV yy2 = V4GetY(t1);
//const FloatV yz2 = V4GetZ(t1);
//const FloatV yw2 = V4GetW(t1);
//const FloatV zz2 = V4GetZ(t2);
//const FloatV zw2 = V4GetW(t2);
////const FloatV ww2 = V4GetW(t3);
//const FloatV c00 = FSub(one, FAdd(yy2, zz2));
//const FloatV c01 = FSub(xy2, zw2);
//const FloatV c02 = FAdd(xz2, yw2);
//const FloatV c10 = FAdd(xy2, zw2);
//const FloatV c11 = FSub(one, FAdd(xx2, zz2));
//const FloatV c12 = FSub(yz2, xw2);
//const FloatV c20 = FSub(xz2, yw2);
//const FloatV c21 = FAdd(yz2, xw2);
//const FloatV c22 = FSub(one, FAdd(xx2, yy2));
//const Vec3V c0 = V3Merge(c00, c10, c20);
//const Vec3V c1 = V3Merge(c01, c11, c21);
//const Vec3V c2 = V3Merge(c02, c12, c22);
//return Mat33V(c0, c1, c2);
const FloatV one = FOne();
const FloatV x = V4GetX(q);
const FloatV y = V4GetY(q);
const FloatV z = V4GetZ(q);
const FloatV w = V4GetW(q);
const FloatV x2 = FAdd(x, x);
const FloatV y2 = FAdd(y, y);
const FloatV z2 = FAdd(z, z);
const FloatV xx = FMul(x2,x);
const FloatV yy = FMul(y2,y);
const FloatV zz = FMul(z2,z);
const FloatV xy = FMul(x2,y);
const FloatV xz = FMul(x2,z);
const FloatV xw = FMul(x2,w);
const FloatV yz = FMul(y2,z);
const FloatV yw = FMul(y2,w);
const FloatV zw = FMul(z2,w);
const FloatV v = FSub(one, xx);
const Vec3V column0 = V3Merge(FSub(FSub(one, yy), zz), FAdd(xy, zw), FSub(xz, yw));
const Vec3V column1 = V3Merge(FSub(xy, zw), FSub(v ,zz), FAdd(yz, xw));
const Vec3V column2 = V3Merge(FAdd(xz, yw), FSub(yz, xw), FSub(v, yy));
return Mat33V(column0, column1, column2);
}
NV_FORCE_INLINE QuatV Mat33GetQuatV(const Mat33V& a)
{
const FloatV one = FOne();
const FloatV zero = FZero();
const FloatV half = FLoad(0.5f);
const FloatV two = FLoad(2.f);
const FloatV scale = FLoad(0.25f);
const FloatV a00 = V3GetX(a.col0);
const FloatV a11 = V3GetY(a.col1);
const FloatV a22 = V3GetZ(a.col2);
const FloatV a21 = V3GetZ(a.col1);//row=2, col=1;
const FloatV a12 = V3GetY(a.col2);//row=1, col=2;
const FloatV a02 = V3GetX(a.col2);//row=0, col=2;
const FloatV a20 = V3GetZ(a.col0);//row=2, col=0;
const FloatV a10 = V3GetY(a.col0);//row=1, col=0;
const FloatV a01 = V3GetX(a.col1);//row=0, col=1;
const Vec3V vec0 = V3Merge(a21, a02, a10);
const Vec3V vec1 = V3Merge(a12, a20, a01);
const Vec3V v = V3Sub(vec0, vec1);
const Vec3V g = V3Add(vec0, vec1);
const FloatV trace = FAdd(a00, FAdd(a11, a22));
if(FAllGrtrOrEq(trace, zero))
{
const FloatV h = FSqrt(FAdd(trace, one));
const FloatV w = FMul(half, h);
const FloatV s = FMul(half, FRecip(h));
const Vec3V u = V3Scale(v, s);
return V4SetW(Vec4V_From_Vec3V(u), w);
}
else
{
const FloatV ntrace = FNeg(trace);
const Vec3V d= V3Merge(a00, a11, a22);
const BoolV con0 = BAllTrue3(V3IsGrtrOrEq(V3Splat(a00), d));
const BoolV con1 = BAllTrue3(V3IsGrtrOrEq(V3Splat(a11), d));
const FloatV t0 = FAdd(one, FScaleAdd(a00, two, ntrace));
const FloatV t1 = FAdd(one, FScaleAdd(a11, two, ntrace));
const FloatV t2 = FAdd(one, FScaleAdd(a22, two, ntrace));
const FloatV t = FSel(con0, t0, FSel(con1, t1, t2));
const FloatV h = FMul(two, FSqrt(t));
const FloatV s = FRecip(h);
const FloatV g0 = FMul(scale, h);
const Vec3V vs = V3Scale(v, s);
const Vec3V gs = V3Scale(g, s);
const FloatV gsx = V3GetX(gs);
const FloatV gsy = V3GetY(gs);
const FloatV gsz = V3GetZ(gs);
//vs.x= (a21 - a12)*s; vs.y=(a02 - a20)*s; vs.z=(a10 - a01)*s;
//gs.x= (a21 + a12)*s; gs.y=(a02 + a20)*s; gs.z=(a10 + a01)*s;
const Vec4V v0 = V4Merge(g0, gsz, gsy, V3GetX(vs));
const Vec4V v1 = V4Merge(gsz, g0, gsx, V3GetY(vs));
const Vec4V v2 = V4Merge(gsy, gsx, g0, V3GetZ(vs));
return V4Sel(con0, v0, V4Sel(con1, v1, v2));
}
}
#endif | 15,562 | C | 30.313883 | 107 | 0.620293 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsBitUtils.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_NSFOUNDATION_NSBITUTILS_H
#define NV_NSFOUNDATION_NSBITUTILS_H
#include "NvIntrinsics.h"
#include "NsIntrinsics.h"
#include "NvAssert.h"
#include "Ns.h"
namespace nvidia
{
namespace shdfnd
{
NV_INLINE uint32_t bitCount(uint32_t v)
{
// from http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
uint32_t const w = v - ((v >> 1) & 0x55555555);
uint32_t const x = (w & 0x33333333) + ((w >> 2) & 0x33333333);
return (((x + (x >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24;
}
NV_INLINE bool isPowerOfTwo(uint32_t x)
{
return x != 0 && (x & (x - 1)) == 0;
}
// "Next Largest Power of 2
// Given a binary integer value x, the next largest power of 2 can be computed by a SWAR algorithm
// that recursively "folds" the upper bits into the lower bits. This process yields a bit vector with
// the same most significant 1 as x, but all 1's below it. Adding 1 to that value yields the next
// largest power of 2. For a 32-bit value:"
NV_INLINE uint32_t nextPowerOfTwo(uint32_t x)
{
x |= (x >> 1);
x |= (x >> 2);
x |= (x >> 4);
x |= (x >> 8);
x |= (x >> 16);
return x + 1;
}
/*!
Return the index of the highest set bit. Not valid for zero arg.
*/
NV_INLINE uint32_t lowestSetBit(uint32_t x)
{
NV_ASSERT(x);
return lowestSetBitUnsafe(x);
}
/*!
Return the index of the highest set bit. Not valid for zero arg.
*/
NV_INLINE uint32_t highestSetBit(uint32_t x)
{
NV_ASSERT(x);
return highestSetBitUnsafe(x);
}
// Helper function to approximate log2 of an integer value
// assumes that the input is actually power of two.
// todo: replace 2 usages with 'highestSetBit'
NV_INLINE uint32_t ilog2(uint32_t num)
{
for(uint32_t i = 0; i < 32; i++)
{
num >>= 1;
if(num == 0)
return i;
}
NV_ASSERT(0);
return uint32_t(-1);
}
} // namespace shdfnd
} // namespace nvidia
#endif // #ifndef NV_NSFOUNDATION_NSBITUTILS_H
| 3,631 | C | 32.321101 | 101 | 0.699532 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsVecMath.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_PHYSICS_COMMON_VECMATH
#define NV_PHYSICS_COMMON_VECMATH
#include "Ns.h"
#include "NsIntrinsics.h"
#include "NvPreprocessor.h"
#include "NvVec3.h"
#include "NvVec4.h"
#include "NvMat33.h"
#include "NvUnionCast.h"
//We can activate asserts in vectorised functions for testing.
//NEVER submit with asserts activated.
//Only activate asserts for local testing.
#define AOS_ASSERTS_ON 0
//We can opt to use the scalar version of vectorised functions.
//This can catch type safety issues and might even work out more optimal on pc.
//It will also be useful for benchmarking and testing.
//NEVER submit with vector intrinsics deactivated without good reason.
//AM: deactivating SIMD for debug win64 just so autobuild will also exercise
//non-SIMD path, until a dedicated non-SIMD platform sich as Arm comes online.
//TODO: dima: reference all platforms with SIMD support here,
//all unknown/experimental cases should better default to NO SIMD.
#if NV_X86 || NV_X64 || NV_WINRT || NV_PS3 || NV_X360 || (NV_LINUX && (NV_X86 || NV_X64)) || (NV_ANDROID && NV_NEON) || NV_XBOXONE
#define COMPILE_VECTOR_INTRINSICS 1 // use SIMD
#else
#define COMPILE_VECTOR_INTRINSICS 0 // do not use SIMD
#endif
#if AOS_ASSERTS_ON
#define VECMATHAOS_ASSERT NV_ASSERT
#else
#define VECMATHAOS_ASSERT(x) {}
#endif
#if COMPILE_VECTOR_INTRINSICS && (NV_X86 || NV_X64) && (NV_LINUX || NV_ANDROID || NV_APPLE || NV_PS4 || (NV_WINRT && NV_NEON))
// only SSE2 compatible platforms should reach this
#include <xmmintrin.h>
#endif
namespace nvidia
{
namespace shdfnd
{
namespace aos
{
//Basic AoS types are
//FloatV - 16-byte aligned representation of float.
//Vec3V - 16-byte aligned representation of NvVec3 stored as (x y z 0).
//Vec4V - 16-byte aligned representation of vector of 4 floats stored as (x y z w).
//BoolV - 16-byte aligned representation of vector of 4 bools stored as (x y z w).
//VecU32V - 16-byte aligned representation of 4 unsigned ints stored as (x y z w).
//VecI32V - 16-byte aligned representation of 4 signed ints stored as (x y z w).
//Mat33V - 16-byte aligned representation of any 3x3 matrix.
//Mat34V - 16-byte aligned representation of transformation matrix (rotation in col1,col2,col3 and translation in col4).
//Mat44V - 16-byte aligned representation of any 4x4 matrix.
#if COMPILE_VECTOR_INTRINSICS
#include "NsAoS.h"
#else
#include "NsVecMathAoSScalar.h"
#endif
//////////////////////////////////////////
//Construct a simd type from a scalar type
//////////////////////////////////////////
//FloatV
//(f,f,f,f)
NV_FORCE_INLINE FloatV FLoad(const float f);
//Vec3V
//(f,f,f,0)
NV_FORCE_INLINE Vec3V V3Load(const float f);
//(f.x,f.y,f.z,0)
NV_FORCE_INLINE Vec3V V3LoadU(const NvVec3& f);
//(f.x,f.y,f.z,0), f must be 16-byte aligned
NV_FORCE_INLINE Vec3V V3LoadA(const NvVec3& f);
//(f.x,f.y,f.z,w_undefined), f must be 16-byte aligned
NV_FORCE_INLINE Vec3V V3LoadUnsafeA(const NvVec3& f);
//(f.x,f.y,f.z,0)
NV_FORCE_INLINE Vec3V V3LoadU(const float* f);
//(f.x,f.y,f.z,0), f must be 16-byte aligned
NV_FORCE_INLINE Vec3V V3LoadA(const float* f);
//Vec4V
//(f,f,f,f)
NV_FORCE_INLINE Vec4V V4Load(const float f);
//(f[0],f[1],f[2],f[3])
NV_FORCE_INLINE Vec4V V4LoadU(const float* const f);
//(f[0],f[1],f[2],f[3]), f must be 16-byte aligned
NV_FORCE_INLINE Vec4V V4LoadA(const float* const f);
//(x,y,z,w)
NV_FORCE_INLINE Vec4V V4LoadXYZW(const float& x, const float& y, const float& z, const float& w);
//BoolV
//(f,f,f,f)
NV_FORCE_INLINE BoolV BLoad(const bool f);
//(f[0],f[1],f[2],f[3])
NV_FORCE_INLINE BoolV BLoad(const bool* const f);
//VecU32V
//(f,f,f,f)
NV_FORCE_INLINE VecU32V U4Load(const uint32_t f);
//(f[0],f[1],f[2],f[3])
NV_FORCE_INLINE VecU32V U4LoadU(const uint32_t* f);
//(f[0],f[1],f[2],f[3]), f must be 16-byte aligned
NV_FORCE_INLINE VecU32V U4LoadA(const uint32_t* f);
//((U32)x, (U32)y, (U32)z, (U32)w)
NV_DEPRECATED NV_FORCE_INLINE VecU32V VecU32VLoadXYZW(uint32_t x, uint32_t y, uint32_t z, uint32_t w);
//VecI32V
//(i,i,i,i)
NV_FORCE_INLINE VecI32V I4Load(const int32_t i);
//(i,i,i,i)
NV_FORCE_INLINE VecI32V I4LoadU(const int32_t* i);
//(i,i,i,i)
NV_FORCE_INLINE VecI32V I4LoadA(const int32_t* i);
//QuatV
//(x = v[0], y=v[1], z=v[2], w=v3[3]) and array don't need to aligned
NV_FORCE_INLINE QuatV QuatVLoadU(const float* v);
//(x = v[0], y=v[1], z=v[2], w=v3[3]) and array need to aligned, fast load
NV_FORCE_INLINE QuatV QuatVLoadA(const float* v);
//(x, y, z, w)
NV_DEPRECATED NV_FORCE_INLINE QuatV QuatVLoadXYZW(const float x, const float y, const float z, const float w);
//not added to public api
Vec4V Vec4V_From_NvVec3_WUndefined(const NvVec3& v);
///////////////////////////////////////////////////
//Construct a simd type from a different simd type
///////////////////////////////////////////////////
//Vec3V
//(v.x,v.y,v.z,0)
NV_FORCE_INLINE Vec3V Vec3V_From_Vec4V(Vec4V v) ;
//(v.x,v.y,v.z,undefined)
NV_FORCE_INLINE Vec3V Vec3V_From_Vec4V_WUndefined(const Vec4V v);
//Vec4V
//(f.x,f.y,f.z,f.w)
NV_FORCE_INLINE Vec4V Vec4V_From_Vec3V(Vec3V f);
//((float)f.x, (float)f.y, (float)f.z, (float)f.w)
NV_FORCE_INLINE Vec4V Vec4V_From_VecU32V(VecU32V a);
//((float)f.x, (float)f.y, (float)f.z, (float)f.w)
NV_FORCE_INLINE Vec4V Vec4V_From_VecI32V(VecI32V a);
//(*(reinterpret_cast<float*>(&f.x), (reinterpret_cast<float*>(&f.y), (reinterpret_cast<float*>(&f.z), (reinterpret_cast<float*>(&f.w))
NV_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecU32V(VecU32V a);
//(*(reinterpret_cast<float*>(&f.x), (reinterpret_cast<float*>(&f.y), (reinterpret_cast<float*>(&f.z), (reinterpret_cast<float*>(&f.w))
NV_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecI32V(VecI32V a);
//VecU32V
//(*(reinterpret_cast<uint32_t*>(&f.x), (reinterpret_cast<uint32_t*>(&f.y), (reinterpret_cast<uint32_t*>(&f.z), (reinterpret_cast<uint32_t*>(&f.w))
NV_FORCE_INLINE VecU32V VecU32V_ReinterpretFrom_Vec4V(Vec4V a);
//(b[0], b[1], b[2], b[3])
NV_DEPRECATED NV_FORCE_INLINE VecU32V VecU32V_From_BoolV(const BoolVArg b);
//VecI32V
//(*(reinterpret_cast<int32_t*>(&f.x), (reinterpret_cast<int32_t*>(&f.y), (reinterpret_cast<int32_t*>(&f.z), (reinterpret_cast<int32_t*>(&f.w))
NV_FORCE_INLINE VecI32V VecI32V_ReinterpretFrom_Vec4V(Vec4V a);
//((I32)a.x, (I32)a.y, (I32)a.z, (I32)a.w)
NV_DEPRECATED NV_FORCE_INLINE VecI32V VecI32V_From_Vec4V(Vec4V a);
//((I32)b.x, (I32)b.y, (I32)b.z, (I32)b.w)
NV_DEPRECATED NV_FORCE_INLINE VecI32V VecI32V_From_BoolV(const BoolVArg b);
///////////////////////////////////////////////////
//Convert from a simd type back to a scalar type
///////////////////////////////////////////////////
//FloatV
//a.x
NV_DEPRECATED NV_FORCE_INLINE float FStore(const FloatV a);
//a.x
NV_FORCE_INLINE void FStore(const FloatV a, float* NV_RESTRICT f);
//Vec3V
//(a.x,a.y,a.z)
NV_FORCE_INLINE void V3StoreA(const Vec3V a, NvVec3& f);
//(a.x,a.y,a.z)
NV_FORCE_INLINE void V3StoreU(const Vec3V a, NvVec3& f);
//Vec4V
NV_FORCE_INLINE void V4StoreA(const Vec4V a, float* f);
NV_FORCE_INLINE void V4StoreU(const Vec4V a, float* f);
//BoolV
NV_FORCE_INLINE void BStoreA(const BoolV b, uint32_t* f);
//VecU32V
NV_FORCE_INLINE void U4StoreA(const VecU32V uv, uint32_t* u);
//VecI32V
NV_FORCE_INLINE void I4StoreA(const VecI32V iv, int32_t* i);
//////////////////////////////////////////////////////////////////
//Test that simd types have elements in the floating point range
//////////////////////////////////////////////////////////////////
//check for each component is valid ie in floating point range
NV_FORCE_INLINE bool isFiniteFloatV(const FloatV a);
//check for each component is valid ie in floating point range
NV_FORCE_INLINE bool isFiniteVec3V(const Vec3V a);
//check for each component is valid ie in floating point range
NV_FORCE_INLINE bool isFiniteVec4V(const Vec4V a);
//Check that w-component is zero.
NV_FORCE_INLINE bool isValidVec3V(const Vec3V a);
//////////////////////////////////////////////////////////////////
//Tests that all elements of two 16-byte types are completely equivalent.
//Use these tests for unit testing and asserts only.
//////////////////////////////////////////////////////////////////
namespace _VecMathTests
{
NV_FORCE_INLINE bool allElementsEqualFloatV(const FloatV a, const FloatV b);
NV_FORCE_INLINE bool allElementsEqualVec3V(const Vec3V a, const Vec3V b);
NV_FORCE_INLINE bool allElementsEqualVec4V(const Vec4V a, const Vec4V b);
NV_FORCE_INLINE bool allElementsEqualBoolV(const BoolV a, const BoolV b);
NV_FORCE_INLINE bool allElementsEqualVecU32V(const VecU32V a, const VecU32V b);
NV_FORCE_INLINE bool allElementsEqualVecI32V(const VecI32V a, const VecI32V b);
NV_FORCE_INLINE bool allElementsEqualMat33V(const Mat33V& a, const Mat33V& b)
{
return
(
allElementsEqualVec3V(a.col0,b.col0) &&
allElementsEqualVec3V(a.col1,b.col1) &&
allElementsEqualVec3V(a.col2,b.col2)
);
}
NV_FORCE_INLINE bool allElementsEqualMat34V(const Mat34V& a, const Mat34V& b)
{
return
(
allElementsEqualVec3V(a.col0,b.col0) &&
allElementsEqualVec3V(a.col1,b.col1) &&
allElementsEqualVec3V(a.col2,b.col2) &&
allElementsEqualVec3V(a.col3,b.col3)
);
}
NV_FORCE_INLINE bool allElementsEqualMat44V(const Mat44V& a, const Mat44V& b)
{
return
(
allElementsEqualVec4V(a.col0,b.col0) &&
allElementsEqualVec4V(a.col1,b.col1) &&
allElementsEqualVec4V(a.col2,b.col2) &&
allElementsEqualVec4V(a.col3,b.col3)
);
}
NV_FORCE_INLINE bool allElementsNearEqualFloatV(const FloatV a, const FloatV b);
NV_FORCE_INLINE bool allElementsNearEqualVec3V(const Vec3V a, const Vec3V b);
NV_FORCE_INLINE bool allElementsNearEqualVec4V(const Vec4V a, const Vec4V b);
NV_FORCE_INLINE bool allElementsNearEqualMat33V(const Mat33V& a, const Mat33V& b)
{
return
(
allElementsNearEqualVec3V(a.col0,b.col0) &&
allElementsNearEqualVec3V(a.col1,b.col1) &&
allElementsNearEqualVec3V(a.col2,b.col2)
);
}
NV_FORCE_INLINE bool allElementsNearEqualMat34V(const Mat34V& a, const Mat34V& b)
{
return
(
allElementsNearEqualVec3V(a.col0,b.col0) &&
allElementsNearEqualVec3V(a.col1,b.col1) &&
allElementsNearEqualVec3V(a.col2,b.col2) &&
allElementsNearEqualVec3V(a.col3,b.col3)
);
}
NV_FORCE_INLINE bool allElementsNearEqualMat44V(const Mat44V& a, const Mat44V& b)
{
return
(
allElementsNearEqualVec4V(a.col0,b.col0) &&
allElementsNearEqualVec4V(a.col1,b.col1) &&
allElementsNearEqualVec4V(a.col2,b.col2) &&
allElementsNearEqualVec4V(a.col3,b.col3)
);
}
};
//////////////////////////////////////////////////////////////////
//Math operations on FloatV
//////////////////////////////////////////////////////////////////
//(0,0,0,0)
NV_FORCE_INLINE FloatV FZero();
//(1,1,1,1)
NV_FORCE_INLINE FloatV FOne();
//(0.5,0.5,0.5,0.5)
NV_FORCE_INLINE FloatV FHalf();
//(NV_EPS_REAL,NV_EPS_REAL,NV_EPS_REAL,NV_EPS_REAL)
NV_FORCE_INLINE FloatV FEps();
//(NV_MAX_REAL, NV_MAX_REAL, NV_MAX_REAL NV_MAX_REAL)
NV_FORCE_INLINE FloatV FMax();
//(-NV_MAX_REAL, -NV_MAX_REAL, -NV_MAX_REAL -NV_MAX_REAL)
NV_FORCE_INLINE FloatV FNegMax();
//(1e-6f, 1e-6f, 1e-6f, 1e-6f)
NV_FORCE_INLINE FloatV FEps6();
//((float*)&1, (float*)&1, (float*)&1, (float*)&1)
//-f (per component)
NV_FORCE_INLINE FloatV FNeg(const FloatV f) ;
//a+b (per component)
NV_FORCE_INLINE FloatV FAdd(const FloatV a, const FloatV b);
//a-b (per component)
NV_FORCE_INLINE FloatV FSub(const FloatV a, const FloatV b) ;
//a*b (per component)
NV_FORCE_INLINE FloatV FMul(const FloatV a, const FloatV b) ;
//a/b (per component)
NV_FORCE_INLINE FloatV FDiv(const FloatV a, const FloatV b);
//a/b (per component)
NV_FORCE_INLINE FloatV FDivFast(const FloatV a, const FloatV b);
//1.0f/a
NV_FORCE_INLINE FloatV FRecip(const FloatV a);
//1.0f/a
NV_FORCE_INLINE FloatV FRecipFast(const FloatV a);
//1.0f/sqrt(a)
NV_FORCE_INLINE FloatV FRsqrt(const FloatV a);
//1.0f/sqrt(a)
NV_FORCE_INLINE FloatV FRsqrtFast(const FloatV a);
//sqrt(a)
NV_FORCE_INLINE FloatV FSqrt(const FloatV a);
//a*b+c
NV_FORCE_INLINE FloatV FScaleAdd(const FloatV a, const FloatV b, const FloatV c);
//c-a*b
NV_FORCE_INLINE FloatV FNegScaleSub(const FloatV a, const FloatV b, const FloatV c);
//fabs(a)
NV_FORCE_INLINE FloatV FAbs(const FloatV a);
//c ? a : b (per component)
NV_FORCE_INLINE FloatV FSel(const BoolV c, const FloatV a, const FloatV b);
//a>b (per component)
NV_FORCE_INLINE BoolV FIsGrtr(const FloatV a, const FloatV b);
//a>=b (per component)
NV_FORCE_INLINE BoolV FIsGrtrOrEq(const FloatV a, const FloatV b);
//a==b (per component)
NV_FORCE_INLINE BoolV FIsEq(const FloatV a, const FloatV b);
//Max(a,b) (per component)
NV_FORCE_INLINE FloatV FMax(const FloatV a, const FloatV b);
//Min(a,b) (per component)
NV_FORCE_INLINE FloatV FMin(const FloatV a, const FloatV b);
//Clamp(a,b) (per component)
NV_FORCE_INLINE FloatV FClamp(const FloatV a, const FloatV minV, const FloatV maxV);
//a*b+c
NV_DEPRECATED NV_FORCE_INLINE FloatV FMulAdd(const FloatV a, const FloatV b, const FloatV c) { return FScaleAdd(a,b,c); }
//c-a*b
NV_DEPRECATED NV_FORCE_INLINE FloatV FNegMulSub(const FloatV a, const FloatV b, const FloatV c) { return FNegScaleSub(a,b,c); }
//a.x>b.x
NV_FORCE_INLINE uint32_t FAllGrtr(const FloatV a, const FloatV b);
//a.x>=b.x
NV_FORCE_INLINE uint32_t FAllGrtrOrEq(const FloatV a, const FloatV b);
//a.x==b.x
NV_FORCE_INLINE uint32_t FAllEq(const FloatV a, const FloatV b);
//a<min || a>max
NV_FORCE_INLINE uint32_t FOutOfBounds(const FloatV a, const FloatV min, const FloatV max);
//a>=min && a<=max
NV_FORCE_INLINE uint32_t FInBounds(const FloatV a, const FloatV min, const FloatV max);
//a<-bounds || a>bounds
NV_FORCE_INLINE uint32_t FOutOfBounds(const FloatV a, const FloatV bounds);
//a>=-bounds && a<=bounds
NV_FORCE_INLINE uint32_t FInBounds(const FloatV a, const FloatV bounds);
//round float a to the near int
NV_FORCE_INLINE FloatV FRound(const FloatV a);
//calculate the sin of float a
NV_FORCE_INLINE FloatV FSin(const FloatV a);
//calculate the cos of float b
NV_FORCE_INLINE FloatV FCos(const FloatV a);
//////////////////////////////////////////////////////////////////
//Math operations on Vec3V
//////////////////////////////////////////////////////////////////
//(f,f,f,f)
NV_FORCE_INLINE Vec3V V3Splat(const FloatV f);
//(x,y,z)
NV_FORCE_INLINE Vec3V V3Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z);
//(1,0,0,0)
NV_FORCE_INLINE Vec3V V3UnitX();
//(0,1,0,0)
NV_FORCE_INLINE Vec3V V3UnitY();
//(0,0,1,0)
NV_FORCE_INLINE Vec3V V3UnitZ();
//(f.x,f.x,f.x,f.x)
NV_FORCE_INLINE FloatV V3GetX(const Vec3V f);
//(f.y,f.y,f.y,f.y)
NV_FORCE_INLINE FloatV V3GetY(const Vec3V f);
//(f.z,f.z,f.z,f.z)
NV_FORCE_INLINE FloatV V3GetZ(const Vec3V f);
//(f,v.y,v.z,v.w)
NV_FORCE_INLINE Vec3V V3SetX(const Vec3V v, const FloatV f);
//(v.x,f,v.z,v.w)
NV_FORCE_INLINE Vec3V V3SetY(const Vec3V v, const FloatV f);
//(v.x,v.y,f,v.w)
NV_FORCE_INLINE Vec3V V3SetZ(const Vec3V v, const FloatV f);
//v.x=f
NV_FORCE_INLINE void V3WriteX(Vec3V& v, const float f);
//v.y=f
NV_FORCE_INLINE void V3WriteY(Vec3V& v, const float f);
//v.z=f
NV_FORCE_INLINE void V3WriteZ(Vec3V& v, const float f);
//v.x=f.x, v.y=f.y, v.z=f.z
NV_FORCE_INLINE void V3WriteXYZ(Vec3V& v, const NvVec3& f);
//return v.x
NV_FORCE_INLINE float V3ReadX(const Vec3V& v);
//return v.y
NV_FORCE_INLINE float V3ReadY(const Vec3V& v);
//return v.y
NV_FORCE_INLINE float V3ReadZ(const Vec3V& v);
//return (v.x,v.y,v.z)
NV_FORCE_INLINE const NvVec3& V3ReadXYZ(const Vec3V& v);
//(a.x, b.x, c.x)
NV_FORCE_INLINE Vec3V V3ColX(const Vec3V a, const Vec3V b, const Vec3V c);
//(a.y, b.y, c.y)
NV_FORCE_INLINE Vec3V V3ColY(const Vec3V a, const Vec3V b, const Vec3V c);
//(a.z, b.z, c.z)
NV_FORCE_INLINE Vec3V V3ColZ(const Vec3V a, const Vec3V b, const Vec3V c);
//(0,0,0,0)
NV_FORCE_INLINE Vec3V V3Zero();
//(1,1,1,1)
NV_FORCE_INLINE Vec3V V3One();
//(NV_EPS_REAL,NV_EPS_REAL,NV_EPS_REAL,NV_EPS_REAL)
NV_FORCE_INLINE Vec3V V3Eps();
//-c (per component)
NV_FORCE_INLINE Vec3V V3Neg(const Vec3V c);
//a+b (per component)
NV_FORCE_INLINE Vec3V V3Add(const Vec3V a, const Vec3V b);
//a-b (per component)
NV_FORCE_INLINE Vec3V V3Sub(const Vec3V a, const Vec3V b);
//a*b (per component)
NV_FORCE_INLINE Vec3V V3Scale(const Vec3V a, const FloatV b);
//a*b (per component)
NV_FORCE_INLINE Vec3V V3Mul(const Vec3V a, const Vec3V b);
//a/b (per component)
NV_FORCE_INLINE Vec3V V3ScaleInv(const Vec3V a, const FloatV b);
//a/b (per component)
NV_FORCE_INLINE Vec3V V3Div(const Vec3V a, const Vec3V b);
//a/b (per component)
NV_FORCE_INLINE Vec3V V3ScaleInvFast(const Vec3V a, const FloatV b);
//a/b (per component)
NV_FORCE_INLINE Vec3V V3DivFast(const Vec3V a, const Vec3V b);
//1.0f/a
NV_FORCE_INLINE Vec3V V3Recip(const Vec3V a);
//1.0f/a
NV_FORCE_INLINE Vec3V V3RecipFast(const Vec3V a);
//1.0f/sqrt(a)
NV_FORCE_INLINE Vec3V V3Rsqrt(const Vec3V a);
//1.0f/sqrt(a)
NV_FORCE_INLINE Vec3V V3RsqrtFast(const Vec3V a);
//a*b+c
NV_FORCE_INLINE Vec3V V3ScaleAdd(const Vec3V a, const FloatV b, const Vec3V c);
//c-a*b
NV_FORCE_INLINE Vec3V V3NegScaleSub(const Vec3V a, const FloatV b, const Vec3V c);
//a*b+c
NV_FORCE_INLINE Vec3V V3MulAdd(const Vec3V a, const Vec3V b, const Vec3V c);
//c-a*b
NV_FORCE_INLINE Vec3V V3NegMulSub(const Vec3V a, const Vec3V b, const Vec3V c);
//fabs(a)
NV_FORCE_INLINE Vec3V V3Abs(const Vec3V a);
//a.b
NV_FORCE_INLINE FloatV V3Dot(const Vec3V a, const Vec3V b);
//aXb
NV_FORCE_INLINE Vec3V V3Cross(const Vec3V a, const Vec3V b);
//|a.a|^1/2
NV_FORCE_INLINE FloatV V3Length(const Vec3V a);
//a.a
NV_FORCE_INLINE FloatV V3LengthSq(const Vec3V a);
//a*|a.a|^-1/2
NV_FORCE_INLINE Vec3V V3Normalize(const Vec3V a);
//a.a>0 ? a*|a.a|^-1/2 : (0,0,0,0)
NV_FORCE_INLINE FloatV V3Length(const Vec3V a);
//a*|a.a|^-1/2
NV_FORCE_INLINE Vec3V V3NormalizeSafe(const Vec3V a);
//a.x + a.y +a.z
NV_FORCE_INLINE FloatV V3SumElems(const Vec3V a);
//c ? a : b (per component)
NV_FORCE_INLINE Vec3V V3Sel(const BoolV c, const Vec3V a, const Vec3V b);
//a>b (per component)
NV_FORCE_INLINE BoolV V3IsGrtr(const Vec3V a, const Vec3V b);
//a>=b (per component)
NV_FORCE_INLINE BoolV V3IsGrtrOrEq(const Vec3V a, const Vec3V b);
//a==b (per component)
NV_FORCE_INLINE BoolV V3IsEq(const Vec3V a, const Vec3V b);
//Max(a,b) (per component)
NV_FORCE_INLINE Vec3V V3Max(const Vec3V a, const Vec3V b);
//Min(a,b) (per component)
NV_FORCE_INLINE Vec3V V3Min(const Vec3V a, const Vec3V b);
//Extract the maximum value from a
NV_FORCE_INLINE FloatV V3ExtractMax(const Vec3V a);
//Extract the maximum value from a
NV_FORCE_INLINE FloatV V3ExtractMin(const Vec3V a);
//Clamp(a,b) (per component)
NV_FORCE_INLINE Vec3V V3Clamp(const Vec3V a, const Vec3V minV, const Vec3V maxV);
//Extract the sign for each component
NV_FORCE_INLINE Vec3V V3Sign(const Vec3V a);
//Test all components.
//(a.x>b.x && a.y>b.y && a.z>b.z)
NV_FORCE_INLINE uint32_t V3AllGrtr(const Vec3V a, const Vec3V b);
//(a.x>=b.x && a.y>=b.y && a.z>=b.z)
NV_FORCE_INLINE uint32_t V3AllGrtrOrEq(const Vec3V a, const Vec3V b);
//(a.x==b.x && a.y==b.y && a.z==b.z)
NV_FORCE_INLINE uint32_t V3AllEq(const Vec3V a, const Vec3V b);
//a.x<min.x || a.y<min.y || a.z<min.z || a.x>max.x || a.y>max.y || a.z>max.z
NV_FORCE_INLINE uint32_t V3OutOfBounds(const Vec3V a, const Vec3V min, const Vec3V max);
//a.x>=min.x && a.y>=min.y && a.z>=min.z && a.x<=max.x && a.y<=max.y && a.z<=max.z
NV_FORCE_INLINE uint32_t V3InBounds(const Vec3V a, const Vec3V min, const Vec3V max);
//a.x<-bounds.x || a.y<=-bounds.y || a.z<bounds.z || a.x>bounds.x || a.y>bounds.y || a.z>bounds.z
NV_FORCE_INLINE uint32_t V3OutOfBounds(const Vec3V a, const Vec3V bounds);
//a.x>=-bounds.x && a.y>=-bounds.y && a.z>=-bounds.z && a.x<=bounds.x && a.y<=bounds.y && a.z<=bounds.z
NV_FORCE_INLINE uint32_t V3InBounds(const Vec3V a, const Vec3V bounds);
//(floor(a.x + 0.5f), floor(a.y + 0.5f), floor(a.z + 0.5f))
NV_FORCE_INLINE Vec3V V3Round(const Vec3V a);
//(sinf(a.x), sinf(a.y), sinf(a.z))
NV_FORCE_INLINE Vec3V V3Sin(const Vec3V a);
//(cosf(a.x), cosf(a.y), cosf(a.z))
NV_FORCE_INLINE Vec3V V3Cos(const Vec3V a);
//(a.y,a.z,a.z)
NV_FORCE_INLINE Vec3V V3PermYZZ(const Vec3V a);
//(a.x,a.y,a.x)
NV_FORCE_INLINE Vec3V V3PermXYX(const Vec3V a);
//(a.y,a.z,a.x)
NV_FORCE_INLINE Vec3V V3PermYZX(const Vec3V a);
//(a.z, a.x, a.y)
NV_FORCE_INLINE Vec3V V3PermZXY(const Vec3V a);
//(a.z,a.z,a.y)
NV_FORCE_INLINE Vec3V V3PermZZY(const Vec3V a);
//(a.y,a.x,a.x)
NV_FORCE_INLINE Vec3V V3PermYXX(const Vec3V a);
//(0, v1.z, v0.y)
NV_FORCE_INLINE Vec3V V3Perm_Zero_1Z_0Y(const Vec3V v0, const Vec3V v1);
//(v0.z, 0, v1.x)
NV_FORCE_INLINE Vec3V V3Perm_0Z_Zero_1X(const Vec3V v0, const Vec3V v1);
//(v1.y, v0.x, 0)
NV_FORCE_INLINE Vec3V V3Perm_1Y_0X_Zero(const Vec3V v0, const Vec3V v1);
//////////////////////////////////////////////////////////////////
//Math operations on Vec4V
//////////////////////////////////////////////////////////////////
//(f,f,f,f)
NV_FORCE_INLINE Vec4V V4Splat(const FloatV f);
//(f[0],f[1],f[2],f[3])
NV_FORCE_INLINE Vec4V V4Merge(const FloatV* const f);
//(x,y,z,w)
NV_FORCE_INLINE Vec4V V4Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z, const FloatVArg w);
//(x.w, y.w, z.w, w.w)
NV_FORCE_INLINE Vec4V V4MergeW(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w);
//(x.z, y.z, z.z, w.z)
NV_FORCE_INLINE Vec4V V4MergeZ(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w);
//(x.y, y.y, z.y, w.y)
NV_FORCE_INLINE Vec4V V4MergeY(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w);
//(x.x, y.x, z.x, w.x)
NV_FORCE_INLINE Vec4V V4MergeX(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w);
//(a.x, b.x, a.y, b.y)
NV_FORCE_INLINE Vec4V V4UnpackXY(const Vec4VArg a, const Vec4VArg b);
//(a.z, b.z, a.w, b.w)
NV_FORCE_INLINE Vec4V V4UnpackZW(const Vec4VArg a, const Vec4VArg b);
//(1,0,0,0)
NV_FORCE_INLINE Vec4V V4UnitW();
//(0,1,0,0)
NV_FORCE_INLINE Vec4V V4UnitY();
//(0,0,1,0)
NV_FORCE_INLINE Vec4V V4UnitZ();
//(0,0,0,1)
NV_FORCE_INLINE Vec4V V4UnitW();
//(f.x,f.x,f.x,f.x)
NV_FORCE_INLINE FloatV V4GetX(const Vec4V f);
//(f.y,f.y,f.y,f.y)
NV_FORCE_INLINE FloatV V4GetY(const Vec4V f);
//(f.z,f.z,f.z,f.z)
NV_FORCE_INLINE FloatV V4GetZ(const Vec4V f);
//(f.w,f.w,f.w,f.w)
NV_FORCE_INLINE FloatV V4GetW(const Vec4V f);
//(f,v.y,v.z,v.w)
NV_FORCE_INLINE Vec4V V4SetX(const Vec4V v, const FloatV f);
//(v.x,f,v.z,v.w)
NV_FORCE_INLINE Vec4V V4SetY(const Vec4V v, const FloatV f);
//(v.x,v.y,f,v.w)
NV_FORCE_INLINE Vec4V V4SetZ(const Vec4V v, const FloatV f);
//(v.x,v.y,v.z,f)
NV_FORCE_INLINE Vec4V V4SetW(const Vec4V v, const FloatV f);
//(v.x,v.y,v.z,0)
NV_FORCE_INLINE Vec4V V4ClearW(const Vec4V v);
//(a[elementIndex], a[elementIndex], a[elementIndex], a[elementIndex])
template<int elementIndex> NV_FORCE_INLINE Vec4V V4SplatElement(Vec4V a);
//v.x=f
NV_FORCE_INLINE void V4WriteX(Vec4V& v, const float f);
//v.y=f
NV_FORCE_INLINE void V4WriteY(Vec4V& v, const float f);
//v.z=f
NV_FORCE_INLINE void V4WriteZ(Vec4V& v, const float f);
//v.w=f
NV_FORCE_INLINE void V4WriteW(Vec4V& v, const float f);
//v.x=f.x, v.y=f.y, v.z=f.z
NV_FORCE_INLINE void V4WriteXYZ(Vec4V& v, const NvVec3& f);
//return v.x
NV_FORCE_INLINE float V4ReadX(const Vec4V& v);
//return v.y
NV_FORCE_INLINE float V4ReadY(const Vec4V& v);
//return v.z
NV_FORCE_INLINE float V4ReadZ(const Vec4V& v);
//return v.w
NV_FORCE_INLINE float V4ReadW(const Vec4V& v);
//return (v.x,v.y,v.z)
NV_FORCE_INLINE const NvVec3& V4ReadXYZ(const Vec4V& v);
//(0,0,0,0)
NV_FORCE_INLINE Vec4V V4Zero();
//(1,1,1,1)
NV_FORCE_INLINE Vec4V V4One();
//(NV_EPS_REAL,NV_EPS_REAL,NV_EPS_REAL,NV_EPS_REAL)
NV_FORCE_INLINE Vec4V V4Eps();
//-c (per component)
NV_FORCE_INLINE Vec4V V4Neg(const Vec4V c);
//a+b (per component)
NV_FORCE_INLINE Vec4V V4Add(const Vec4V a, const Vec4V b);
//a-b (per component)
NV_FORCE_INLINE Vec4V V4Sub(const Vec4V a, const Vec4V b);
//a*b (per component)
NV_FORCE_INLINE Vec4V V4Scale(const Vec4V a, const FloatV b);
//a*b (per component)
NV_FORCE_INLINE Vec4V V4Mul(const Vec4V a, const Vec4V b);
//a/b (per component)
NV_FORCE_INLINE Vec4V V4ScaleInv(const Vec4V a, const FloatV b);
//a/b (per component)
NV_FORCE_INLINE Vec4V V4Div(const Vec4V a, const Vec4V b);
//a/b (per component)
NV_FORCE_INLINE Vec4V V4ScaleInvFast(const Vec4V a, const FloatV b);
//a/b (per component)
NV_FORCE_INLINE Vec4V V4DivFast(const Vec4V a, const Vec4V b);
//1.0f/a
NV_FORCE_INLINE Vec4V V4Recip(const Vec4V a);
//1.0f/a
NV_FORCE_INLINE Vec4V V4RecipFast(const Vec4V a);
//1.0f/sqrt(a)
NV_FORCE_INLINE Vec4V V4Rsqrt(const Vec4V a);
//1.0f/sqrt(a)
NV_FORCE_INLINE Vec4V V4RsqrtFast(const Vec4V a);
//a*b+c
NV_FORCE_INLINE Vec4V V4ScaleAdd(const Vec4V a, const FloatV b, const Vec4V c);
//c-a*b
NV_FORCE_INLINE Vec4V V4NegScaleSub(const Vec4V a, const FloatV b, const Vec4V c);
//a*b+c
NV_FORCE_INLINE Vec4V V4MulAdd(const Vec4V a, const Vec4V b, const Vec4V c);
//c-a*b
NV_FORCE_INLINE Vec4V V4NegMulSub(const Vec4V a, const Vec4V b, const Vec4V c);
//fabs(a)
NV_FORCE_INLINE Vec4V V4Abs(const Vec4V a);
//bitwise a & ~b
NV_FORCE_INLINE Vec4V V4Andc(const Vec4V a, const VecU32V b);
//a.b
NV_FORCE_INLINE FloatV V4Dot(const Vec4V a, const Vec4V b);
//|a.a|^1/2
NV_FORCE_INLINE FloatV V4Length(const Vec4V a);
//a.a
NV_FORCE_INLINE FloatV V4LengthSq(const Vec4V a);
//a*|a.a|^-1/2
NV_FORCE_INLINE Vec4V V4Normalize(const Vec4V a);
//a.a>0 ? a*|a.a|^-1/2 : (0,0,0,0)
NV_FORCE_INLINE Vec4V V4NormalizeSafe(const Vec4V a);
//a*|a.a|^-1/2
NV_FORCE_INLINE Vec4V V4NormalizeFast(const Vec4V a);
//c ? a : b (per component)
NV_FORCE_INLINE Vec4V V4Sel(const BoolV c, const Vec4V a, const Vec4V b);
//a>b (per component)
NV_FORCE_INLINE BoolV V4IsGrtr(const Vec4V a, const Vec4V b);
//a>=b (per component)
NV_FORCE_INLINE BoolV V4IsGrtrOrEq(const Vec4V a, const Vec4V b);
//a==b (per component)
NV_FORCE_INLINE BoolV V4IsEq(const Vec4V a, const Vec4V b);
//Max(a,b) (per component)
NV_FORCE_INLINE Vec4V V4Max(const Vec4V a, const Vec4V b);
//Min(a,b) (per component)
NV_FORCE_INLINE Vec4V V4Min(const Vec4V a, const Vec4V b);
//Get the maximum component from a
NV_FORCE_INLINE FloatV V4ExtractMax(const Vec4V a);
//Get the minimum component from a
NV_FORCE_INLINE FloatV V4ExtractMin(const Vec4V a);
//Clamp(a,b) (per component)
NV_FORCE_INLINE Vec4V V4Clamp(const Vec4V a, const Vec4V minV, const Vec4V maxV);
//return 1 if all components of a are greater than all components of b.
NV_FORCE_INLINE uint32_t V4AllGrtr(const Vec4V a, const Vec4V b);
//return 1 if all components of a are greater than or equal to all components of b
NV_FORCE_INLINE uint32_t V4AllGrtrOrEq(const Vec4V a, const Vec4V b);
//return 1 if all components of a are equal to all components of b
NV_FORCE_INLINE uint32_t V4AllEq(const Vec4V a, const Vec4V b);
// round(a)(per component)
NV_FORCE_INLINE Vec4V V4Round(const Vec4V a);
// sin(a) (per component)
NV_FORCE_INLINE Vec4V V4Sin(const Vec4V a);
//cos(a) (per component)
NV_FORCE_INLINE Vec4V V4Cos(const Vec4V a);
//Permute v into a new vec4v with YXWZ format
NV_FORCE_INLINE Vec4V V4Perm_YXWZ(const Vec4V v);
//Permute v into a new vec4v with XZXZ format
NV_FORCE_INLINE Vec4V V4Perm_XZXZ(const Vec4V v);
//Permute v into a new vec4v with YWYW format
NV_FORCE_INLINE Vec4V V4Perm_YWYW(const Vec4V v);
//Permute v into a new vec4v with format {a[x], a[y], a[z], a[w]}
//V4Perm<1,3,1,3> is equal to V4Perm_YWYW
//V4Perm<0,2,0,2> is equal to V4Perm_XZXZ
//V3Perm<1,0,3,2> is equal to V4Perm_YXWZ
template<uint8_t x, uint8_t y, uint8_t z, uint8_t w> NV_FORCE_INLINE Vec4V V4Perm(const Vec4V a);
//q = cos(a/2) + u*sin(a/2)
NV_FORCE_INLINE QuatV QuatV_From_RotationAxisAngle(const Vec3V u, const FloatV a);
// convert q to a unit quaternion
NV_FORCE_INLINE QuatV QuatNormalize(const QuatV q);
//|q.q|^1/2
NV_FORCE_INLINE FloatV QuatLength(const QuatV q);
//q.q
NV_FORCE_INLINE FloatV QuatLengthSq(const QuatV q);
//a.b
NV_FORCE_INLINE FloatV QuatDot(const QuatV a, const QuatV b);
//(-q.x, -q.y, -q.z, q.w)
NV_FORCE_INLINE QuatV QuatConjugate(const QuatV q);
//(q.x, q.y, q.z)
NV_FORCE_INLINE Vec3V QuatGetImaginaryPart(const QuatV q);
//convert quaterion to matrix 33
NV_FORCE_INLINE Mat33V QuatGetMat33V(const QuatVArg q);
//convert matrix 33 to quaterion
NV_FORCE_INLINE QuatV Mat33GetQuatV(const Mat33V& a);
// brief computes rotation of x-axis
NV_FORCE_INLINE Vec3V QuatGetBasisVector0(const QuatV q);
// brief computes rotation of y-axis
NV_FORCE_INLINE Vec3V QuatGetBasisVector1(const QuatV q);
// brief computes rotation of z-axis
NV_FORCE_INLINE Vec3V QuatGetBasisVector2(const QuatV q);
// calculate the rotation vector from q and v
NV_FORCE_INLINE Vec3V QuatRotate(const QuatV q, const Vec3V v);
// calculate the roation vector from the conjuate quaterion and v
NV_FORCE_INLINE Vec3V QuatRotateInv(const QuatV q, const Vec3V v);
// quaternion multiplication
NV_FORCE_INLINE QuatV QuatMul(const QuatV a, const QuatV b);
// quaternion add
NV_FORCE_INLINE QuatV QuatAdd(const QuatV a, const QuatV b);
// (-q.x, -q.y, -q.z, -q.w)
NV_FORCE_INLINE QuatV QuatNeg(const QuatV q);
// (a.x - b.x, a.y-b.y, a.z-b.z, a.w-b.w )
NV_FORCE_INLINE QuatV QuatSub(const QuatV a, const QuatV b);
// (a.x*b, a.y*b, a.z*b, a.w*b)
NV_FORCE_INLINE QuatV QuatScale(const QuatV a, const FloatV b);
// (x = v[0], y = v[1], z = v[2], w =v[3])
NV_FORCE_INLINE QuatV QuatMerge(const FloatV* const v);
// (x = v[0], y = v[1], z = v[2], w =v[3])
NV_FORCE_INLINE QuatV QuatMerge(const FloatVArg x, const FloatVArg y, const FloatVArg z, const FloatVArg w);
// (x = 0.f, y = 0.f, z = 0.f, w = 1.f)
NV_FORCE_INLINE QuatV QuatIdentity();
//check for each component is valid
NV_FORCE_INLINE bool isFiniteQuatV(const QuatV q);
//check for each component is valid
NV_FORCE_INLINE bool isValidQuatV(const QuatV q);
//check for each component is valid
NV_FORCE_INLINE bool isSaneQuatV(const QuatV q);
//transpose 4x4 matrix represented by its columns
NV_FORCE_INLINE void V4Transpose(Vec4V& col0, Vec4V& col1, Vec4V& col2, Vec4V& col3);
//Math operations on 16-byte aligned booleans.
//x=false y=false z=false w=false
NV_FORCE_INLINE BoolV BFFFF();
//x=false y=false z=false w=true
NV_FORCE_INLINE BoolV BFFFT();
//x=false y=false z=true w=false
NV_FORCE_INLINE BoolV BFFTF();
//x=false y=false z=true w=true
NV_FORCE_INLINE BoolV BFFTT();
//x=false y=true z=false w=false
NV_FORCE_INLINE BoolV BFTFF();
//x=false y=true z=false w=true
NV_FORCE_INLINE BoolV BFTFT();
//x=false y=true z=true w=false
NV_FORCE_INLINE BoolV BFTTF();
//x=false y=true z=true w=true
NV_FORCE_INLINE BoolV BFTTT();
//x=true y=false z=false w=false
NV_FORCE_INLINE BoolV BTFFF();
//x=true y=false z=false w=true
NV_FORCE_INLINE BoolV BTFFT();
//x=true y=false z=true w=false
NV_FORCE_INLINE BoolV BTFTF();
//x=true y=false z=true w=true
NV_FORCE_INLINE BoolV BTFTT();
//x=true y=true z=false w=false
NV_FORCE_INLINE BoolV BTTFF();
//x=true y=true z=false w=true
NV_FORCE_INLINE BoolV BTTFT();
//x=true y=true z=true w=false
NV_FORCE_INLINE BoolV BTTTF();
//x=true y=true z=true w=true
NV_FORCE_INLINE BoolV BTTTT();
//x=false y=false z=false w=true
NV_FORCE_INLINE BoolV BWMask();
//x=true y=false z=false w=false
NV_FORCE_INLINE BoolV BXMask();
//x=false y=true z=false w=false
NV_FORCE_INLINE BoolV BYMask();
//x=false y=false z=true w=false
NV_FORCE_INLINE BoolV BZMask();
//get x component
NV_FORCE_INLINE BoolV BGetX(const BoolV f);
//get y component
NV_FORCE_INLINE BoolV BGetY(const BoolV f);
//get z component
NV_FORCE_INLINE BoolV BGetZ(const BoolV f);
//get w component
NV_FORCE_INLINE BoolV BGetW(const BoolV f);
//Use elementIndex to splat xxxx or yyyy or zzzz or wwww
template<int elementIndex> NV_FORCE_INLINE BoolV BSplatElement(Vec4V a);
//component-wise && (AND)
NV_FORCE_INLINE BoolV BAnd(const BoolV a, const BoolV b);
//component-wise || (OR)
NV_FORCE_INLINE BoolV BOr(const BoolV a, const BoolV b);
//component-wise not
NV_FORCE_INLINE BoolV BNot(const BoolV a);
//if all four components are true, return true, otherwise return false
NV_FORCE_INLINE BoolV BAllTrue4(const BoolV a);
//if any four components is true, return true, otherwise return false
NV_FORCE_INLINE BoolV BAnyTrue4(const BoolV a);
//if all three(0, 1, 2) components are true, return true, otherwise return false
NV_FORCE_INLINE BoolV BAllTrue3(const BoolV a);
//if any three (0, 1, 2) components is true, return true, otherwise return false
NV_FORCE_INLINE BoolV BAnyTrue3(const BoolV a);
//Return 1 if all components equal, zero otherwise.
NV_FORCE_INLINE uint32_t BAllEq(const BoolV a, const BoolV b);
// Specialized/faster BAllEq function for b==TTTT
NV_FORCE_INLINE uint32_t BAllEqTTTT(const BoolV a);
// Specialized/faster BAllEq function for b==FFFF
NV_FORCE_INLINE uint32_t BAllEqFFFF(const BoolV a);
/// Get BoolV as bits set in an uint32_t. A bit in the output is set if the element is 'true' in the input.
/// There is a bit for each element in a, with element 0s value held in bit0, element 1 in bit 1s and so forth.
/// If nothing is true in the input it will return 0, and if all are true if will return 0xf.
/// NOTE! That performance of the function varies considerably by platform, thus it is recommended to use
/// where your algorithm really needs a BoolV in an integer variable.
NV_FORCE_INLINE uint32_t BGetBitMask(const BoolV a);
//VecI32V stuff
NV_FORCE_INLINE VecI32V VecI32V_Zero();
NV_FORCE_INLINE VecI32V VecI32V_One();
NV_FORCE_INLINE VecI32V VecI32V_Two();
NV_FORCE_INLINE VecI32V VecI32V_MinusOne();
//Compute a shift parameter for VecI32V_LeftShift and VecI32V_RightShift
//Each element of shift must be identical ie the vector must have form {count, count, count, count} with count>=0
NV_FORCE_INLINE VecShiftV VecI32V_PrepareShift(const VecI32VArg shift);
//Shift each element of a leftwards by the same amount
//Compute shift with VecI32V_PrepareShift
//{a.x<<shift[0], a.y<<shift[0], a.z<<shift[0], a.w<<shift[0]}
NV_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const VecShiftVArg shift);
//Shift each element of a rightwards by the same amount
//Compute shift with VecI32V_PrepareShift
//{a.x>>shift[0], a.y>>shift[0], a.z>>shift[0], a.w>>shift[0]}
NV_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const VecShiftVArg shift);
NV_FORCE_INLINE VecI32V VecI32V_Add(const VecI32VArg a, const VecI32VArg b);
NV_FORCE_INLINE VecI32V VecI32V_Or(const VecI32VArg a, const VecI32VArg b);
NV_FORCE_INLINE VecI32V VecI32V_GetX(const VecI32VArg a);
NV_FORCE_INLINE VecI32V VecI32V_GetY(const VecI32VArg a);
NV_FORCE_INLINE VecI32V VecI32V_GetZ(const VecI32VArg a);
NV_FORCE_INLINE VecI32V VecI32V_GetW(const VecI32VArg a);
NV_FORCE_INLINE VecI32V VecI32V_Sub(const VecI32VArg a, const VecI32VArg b);
NV_FORCE_INLINE BoolV VecI32V_IsGrtr(const VecI32VArg a, const VecI32VArg b);
NV_FORCE_INLINE BoolV VecI32V_IsEq(const VecI32VArg a, const VecI32VArg b);
NV_FORCE_INLINE VecI32V V4I32Sel(const BoolV c, const VecI32V a, const VecI32V b);
//VecU32V stuff
NV_FORCE_INLINE VecU32V U4Zero();
NV_FORCE_INLINE VecU32V U4One();
NV_FORCE_INLINE VecU32V U4Two();
NV_FORCE_INLINE BoolV V4IsEqU32(const VecU32V a, const VecU32V b);
NV_FORCE_INLINE VecU32V V4U32Sel(const BoolV c, const VecU32V a, const VecU32V b);
NV_FORCE_INLINE VecU32V V4U32or(VecU32V a, VecU32V b);
NV_FORCE_INLINE VecU32V V4U32and(VecU32V a, VecU32V b);
NV_FORCE_INLINE VecU32V V4U32Andc(VecU32V a, VecU32V b);
//VecU32 - why does this not return a bool?
NV_FORCE_INLINE VecU32V V4IsGrtrV32u(const Vec4V a, const Vec4V b);
//Math operations on 16-byte aligned Mat33s (represents any 3x3 matrix)
//a*b
NV_FORCE_INLINE Vec3V M33MulV3(const Mat33V& a, const Vec3V b);
//A*x + b
NV_FORCE_INLINE Vec3V M33MulV3AddV3(const Mat33V& A, const Vec3V b, const Vec3V c);
//transpose(a) * b
NV_FORCE_INLINE Vec3V M33TrnspsMulV3(const Mat33V& a, const Vec3V b);
//a*b
NV_FORCE_INLINE Mat33V M33MulM33(const Mat33V& a, const Mat33V& b);
//a+b
NV_FORCE_INLINE Mat33V M33Add(const Mat33V& a, const Mat33V& b);
//a+b
NV_FORCE_INLINE Mat33V M33Sub(const Mat33V& a, const Mat33V& b);
//-a
NV_FORCE_INLINE Mat33V M33Neg(const Mat33V& a);
//absolute value of the matrix
NV_FORCE_INLINE Mat33V M33Abs(const Mat33V& a);
//inverse mat
NV_FORCE_INLINE Mat33V M33Inverse(const Mat33V& a);
//transpose(a)
NV_FORCE_INLINE Mat33V M33Trnsps(const Mat33V& a);
//create an identity matrix
NV_FORCE_INLINE Mat33V M33Identity();
//create a vec3 to store the diagonal element of the M33
NV_FORCE_INLINE Mat33V M33Diagonal(const Vec3VArg);
//Not implemented
//return 1 if all components of a are equal to all components of b
//NV_FORCE_INLINE uint32_t V4U32AllEq(const VecU32V a, const VecU32V b);
//v.w=f
//NV_FORCE_INLINE void V3WriteW(Vec3V& v, const float f);
//NV_FORCE_INLINE float V3ReadW(const Vec3V& v);
//Not used
//NV_FORCE_INLINE Vec4V V4LoadAligned(Vec4V* addr);
//NV_FORCE_INLINE Vec4V V4LoadUnaligned(Vec4V* addr);
//floor(a)(per component)
//NV_FORCE_INLINE Vec4V V4Floor(Vec4V a);
//ceil(a) (per component)
//NV_FORCE_INLINE Vec4V V4Ceil(Vec4V a);
//NV_FORCE_INLINE VecU32V V4ConvertToU32VSaturate(const Vec4V a, uint32_t power);
//Math operations on 16-byte aligned Mat34s (represents transformation matrix - rotation and translation).
//namespace _Mat34V
//{
// //a*b
// NV_FORCE_INLINE Vec3V multiplyV(const Mat34V& a, const Vec3V b);
// //a_rotation * b
// NV_FORCE_INLINE Vec3V multiply3X3V(const Mat34V& a, const Vec3V b);
// //transpose(a_rotation)*b
// NV_FORCE_INLINE Vec3V multiplyTranspose3X3V(const Mat34V& a, const Vec3V b);
// //a*b
// NV_FORCE_INLINE Mat34V multiplyV(const Mat34V& a, const Mat34V& b);
// //a_rotation*b
// NV_FORCE_INLINE Mat33V multiply3X3V(const Mat34V& a, const Mat33V& b);
// //a_rotation*b_rotation
// NV_FORCE_INLINE Mat33V multiply3X3V(const Mat34V& a, const Mat34V& b);
// //a+b
// NV_FORCE_INLINE Mat34V addV(const Mat34V& a, const Mat34V& b);
// //a^-1
// NV_FORCE_INLINE Mat34V getInverseV(const Mat34V& a);
// //transpose(a_rotation)
// NV_FORCE_INLINE Mat33V getTranspose3X3(const Mat34V& a);
//}; //namespace _Mat34V
//a*b
//#define M34MulV3(a,b) (M34MulV3(a,b))
////a_rotation * b
//#define M34Mul33V3(a,b) (M34Mul33V3(a,b))
////transpose(a_rotation)*b
//#define M34TrnspsMul33V3(a,b) (M34TrnspsMul33V3(a,b))
////a*b
//#define M34MulM34(a,b) (_Mat34V::multiplyV(a,b))
//a_rotation*b
//#define M34MulM33(a,b) (M34MulM33(a,b))
//a_rotation*b_rotation
//#define M34Mul33MM34(a,b) (M34MulM33(a,b))
//a+b
//#define M34Add(a,b) (M34Add(a,b))
////a^-1
//#define M34Inverse(a,b) (M34Inverse(a))
//transpose(a_rotation)
//#define M34Trnsps33(a) (M33Trnsps3X3(a))
//Math operations on 16-byte aligned Mat44s (represents any 4x4 matrix)
//namespace _Mat44V
//{
// //a*b
// NV_FORCE_INLINE Vec4V multiplyV(const Mat44V& a, const Vec4V b);
// //transpose(a)*b
// NV_FORCE_INLINE Vec4V multiplyTransposeV(const Mat44V& a, const Vec4V b);
// //a*b
// NV_FORCE_INLINE Mat44V multiplyV(const Mat44V& a, const Mat44V& b);
// //a+b
// NV_FORCE_INLINE Mat44V addV(const Mat44V& a, const Mat44V& b);
// //a&-1
// NV_FORCE_INLINE Mat44V getInverseV(const Mat44V& a);
// //transpose(a)
// NV_FORCE_INLINE Mat44V getTransposeV(const Mat44V& a);
//}; //namespace _Mat44V
//namespace _VecU32V
//{
// // pack 8 U32s to 8 U16s with saturation
// NV_FORCE_INLINE VecU16V pack2U32VToU16VSaturate(VecU32V a, VecU32V b);
// NV_FORCE_INLINE VecU32V orV(VecU32V a, VecU32V b);
// NV_FORCE_INLINE VecU32V andV(VecU32V a, VecU32V b);
// NV_FORCE_INLINE VecU32V andcV(VecU32V a, VecU32V b);
// // conversion from integer to float
// NV_FORCE_INLINE Vec4V convertToVec4V(VecU32V a);
// // splat a[elementIndex] into all fields of a
// template<int elementIndex>
// NV_FORCE_INLINE VecU32V splatElement(VecU32V a);
// NV_FORCE_INLINE void storeAligned(VecU32V a, VecU32V* address);
//};
//namespace _VecI32V
//{
// template<int a> NV_FORCE_INLINE VecI32V splatI32();
//};
//
//namespace _VecU16V
//{
// NV_FORCE_INLINE VecU16V orV(VecU16V a, VecU16V b);
// NV_FORCE_INLINE VecU16V andV(VecU16V a, VecU16V b);
// NV_FORCE_INLINE VecU16V andcV(VecU16V a, VecU16V b);
// NV_FORCE_INLINE void storeAligned(VecU16V val, VecU16V *address);
// NV_FORCE_INLINE VecU16V loadAligned(VecU16V* addr);
// NV_FORCE_INLINE VecU16V loadUnaligned(VecU16V* addr);
// NV_FORCE_INLINE VecU16V compareGt(VecU16V a, VecU16V b);
// template<int elementIndex>
// NV_FORCE_INLINE VecU16V splatElement(VecU16V a);
// NV_FORCE_INLINE VecU16V subtractModulo(VecU16V a, VecU16V b);
// NV_FORCE_INLINE VecU16V addModulo(VecU16V a, VecU16V b);
// NV_FORCE_INLINE VecU32V getLo16(VecU16V a); // [0,2,4,6] 16-bit values to [0,1,2,3] 32-bit vector
// NV_FORCE_INLINE VecU32V getHi16(VecU16V a); // [1,3,5,7] 16-bit values to [0,1,2,3] 32-bit vector
//};
//
//namespace _VecI16V
//{
// template <int val> NV_FORCE_INLINE VecI16V splatImmediate();
//};
//
//namespace _VecU8V
//{
//};
//a*b
//#define M44MulV4(a,b) (M44MulV4(a,b))
////transpose(a)*b
//#define M44TrnspsMulV4(a,b) (M44TrnspsMulV4(a,b))
////a*b
//#define M44MulM44(a,b) (M44MulM44(a,b))
////a+b
//#define M44Add(a,b) (M44Add(a,b))
////a&-1
//#define M44Inverse(a) (M44Inverse(a))
////transpose(a)
//#define M44Trnsps(a) (M44Trnsps(a))
// dsequeira: these used to be assert'd out in SIMD builds, but they're necessary if
// we want to be able to write some scalar functions which run using SIMD data structures
NV_FORCE_INLINE void V3WriteX(Vec3V& v, const float f)
{
((NvVec3 &)v).x=f;
}
NV_FORCE_INLINE void V3WriteY(Vec3V& v, const float f)
{
((NvVec3 &)v).y=f;
}
NV_FORCE_INLINE void V3WriteZ(Vec3V& v, const float f)
{
((NvVec3 &)v).z=f;
}
NV_FORCE_INLINE void V3WriteXYZ(Vec3V& v, const NvVec3& f)
{
(NvVec3 &)v = f;
}
NV_FORCE_INLINE float V3ReadX(const Vec3V& v)
{
return ((NvVec3 &)v).x;
}
NV_FORCE_INLINE float V3ReadY(const Vec3V& v)
{
return ((NvVec3 &)v).y;
}
NV_FORCE_INLINE float V3ReadZ(const Vec3V& v)
{
return ((NvVec3 &)v).z;
}
NV_FORCE_INLINE const NvVec3& V3ReadXYZ(const Vec3V& v)
{
return (NvVec3&)v;
}
NV_FORCE_INLINE void V4WriteX(Vec4V& v, const float f)
{
((NvVec4&)v).x=f;
}
NV_FORCE_INLINE void V4WriteY(Vec4V& v, const float f)
{
((NvVec4&)v).y=f;
}
NV_FORCE_INLINE void V4WriteZ(Vec4V& v, const float f)
{
((NvVec4&)v).z=f;
}
NV_FORCE_INLINE void V4WriteW(Vec4V& v, const float f)
{
((NvVec4&)v).w=f;
}
NV_FORCE_INLINE void V4WriteXYZ(Vec4V& v, const NvVec3& f)
{
((NvVec3&)v)=f;
}
NV_FORCE_INLINE float V4ReadX(const Vec4V& v)
{
return ((NvVec4&)v).x;
}
NV_FORCE_INLINE float V4ReadY(const Vec4V& v)
{
return ((NvVec4&)v).y;
}
NV_FORCE_INLINE float V4ReadZ(const Vec4V& v)
{
return ((NvVec4&)v).z;
}
NV_FORCE_INLINE float V4ReadW(const Vec4V& v)
{
return ((NvVec4&)v).w;
}
NV_FORCE_INLINE const NvVec3& V4ReadXYZ(const Vec4V& v)
{
return (NvVec3&)v;
}
//this macro trnasposes 4 Vec4V into 3 Vec4V (assuming that the W component can be ignored
#define NV_TRANSPOSE_44_34(inA, inB, inC, inD, outA, outB, outC) \
outA = V4UnpackXY(inA, inC); \
inA = V4UnpackZW(inA, inC); \
inC = V4UnpackXY(inB, inD); \
inB = V4UnpackZW(inB, inD); \
outB = V4UnpackZW(outA, inC); \
outA = V4UnpackXY(outA, inC); \
outC = V4UnpackXY(inA, inB);
//this macro transposes 3 Vec4V into 4 Vec4V (with W components as garbage!)
#define NV_TRANSPOSE_34_44(inA, inB, inC, outA, outB, outC, outD) \
outA = V4UnpackXY(inA, inC); \
inA = V4UnpackZW(inA, inC); \
outC = V4UnpackXY(inB, inB); \
inC = V4UnpackZW(inB, inB); \
outB = V4UnpackZW(outA, outC); \
outA = V4UnpackXY(outA, outC); \
outC = V4UnpackXY(inA, inC); \
outD = V4UnpackZW(inA, inC);
#define NV_TRANSPOSE_44(inA, inB, inC, inD, outA, outB, outC, outD) \
outA = V4UnpackXY(inA, inC); \
inA = V4UnpackZW(inA, inC); \
inC = V4UnpackXY(inB, inD); \
inB = V4UnpackZW(inB, inD); \
outB = V4UnpackZW(outA, inC); \
outA = V4UnpackXY(outA, inC); \
outC = V4UnpackXY(inA, inB); \
outD = V4UnpackZW(inA, inB);
//In all platforms except 360, this is a fast way of calculating 4 dot product at once. On 360, it may be faster to call V3Dot 4 times because there is an
//instruction to perform a dot product that completes in 14 cycles
//It returns a Vec4V, where each element is the dot product of one pair of Vec3Vs
NV_FORCE_INLINE Vec4V V3Dot4(const Vec3VArg a0, const Vec3VArg b0, const Vec3VArg a1, const Vec3VArg b1, const Vec3VArg a2,
const Vec3VArg b2, const Vec3VArg a3, const Vec3VArg b3)
{
Vec4V a0b0 = Vec4V_From_Vec3V(V3Mul(a0, b0));
Vec4V a1b1 = Vec4V_From_Vec3V(V3Mul(a1, b1));
Vec4V a2b2 = Vec4V_From_Vec3V(V3Mul(a2, b2));
Vec4V a3b3 = Vec4V_From_Vec3V(V3Mul(a3, b3));
Vec4V aTrnsps, bTrnsps, cTrnsps;
NV_TRANSPOSE_44_34(a0b0, a1b1, a2b2, a3b3, aTrnsps, bTrnsps, cTrnsps);
return V4Add(V4Add(aTrnsps, bTrnsps), cTrnsps);
}
//Now for the cross-platform implementations of the 16-byte aligned maths functions (win32/360/ppu/spu etc).
#if COMPILE_VECTOR_INTRINSICS
#include "NsInlineAoS.h"
#else // #if COMPILE_VECTOR_INTRINSICS
#include "NsVecMathAoSScalarInline.h"
#endif // #if !COMPILE_VECTOR_INTRINSICS
#include "NsVecQuat.h"
} // namespace aos
} // namespace shdfnd
} // namespace nvidia
#endif //PS_VECMATH_H
| 48,663 | C | 35.561983 | 154 | 0.67351 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsHashSet.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_NSFOUNDATION_NSHASHSET_H
#define NV_NSFOUNDATION_NSHASHSET_H
#include "NsHashInternals.h"
// TODO: make this doxy-format
// This header defines two hash sets. Hash sets
// * support custom initial table sizes (rounded up internally to power-of-2)
// * support custom static allocator objects
// * auto-resize, based on a load factor (i.e. a 64-entry .75 load factor hash will resize
// when the 49th element is inserted)
// * are based on open hashing
//
// Sets have STL-like copying semantics, and properly initialize and destruct copies of objects
//
// There are two forms of set: coalesced and uncoalesced. Coalesced sets keep the entries in the
// initial segment of an array, so are fast to iterate over; however deletion is approximately
// twice as expensive.
//
// HashSet<T>:
// bool insert(const T& k) amortized O(1) (exponential resize policy)
// bool contains(const T& k) const; O(1)
// bool erase(const T& k); O(1)
// uint32_t size() const; constant
// void reserve(uint32_t size); O(MAX(size, currentOccupancy))
// void clear(); O(currentOccupancy) (with zero constant for objects without
// destructors)
// Iterator getIterator();
//
// Use of iterators:
//
// for(HashSet::Iterator iter = test.getIterator(); !iter.done(); ++iter)
// myFunction(*iter);
//
// CoalescedHashSet<T> does not support getIterator, but instead supports
// const Key *getEntries();
//
// insertion into a set already containing the element fails returning false, as does
// erasure of an element not in the set
//
namespace nvidia
{
namespace shdfnd
{
template <class Key, class HashFn = Hash<Key>, class Allocator = NonTrackingAllocator>
class HashSet : public internal::HashSetBase<Key, HashFn, Allocator, false>
{
public:
typedef internal::HashSetBase<Key, HashFn, Allocator, false> HashSetBase;
typedef typename HashSetBase::Iterator Iterator;
HashSet(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : HashSetBase(initialTableSize, loadFactor)
{
}
HashSet(uint32_t initialTableSize, float loadFactor, const Allocator& alloc)
: HashSetBase(initialTableSize, loadFactor, alloc)
{
}
HashSet(const Allocator& alloc) : HashSetBase(64, 0.75f, alloc)
{
}
Iterator getIterator()
{
return Iterator(HashSetBase::mBase);
}
};
template <class Key, class HashFn = Hash<Key>, class Allocator = NonTrackingAllocator>
class CoalescedHashSet : public internal::HashSetBase<Key, HashFn, Allocator, true>
{
public:
typedef typename internal::HashSetBase<Key, HashFn, Allocator, true> HashSetBase;
CoalescedHashSet(uint32_t initialTableSize = 64, float loadFactor = 0.75f)
: HashSetBase(initialTableSize, loadFactor)
{
}
CoalescedHashSet(uint32_t initialTableSize, float loadFactor, const Allocator& alloc)
: HashSetBase(initialTableSize, loadFactor, alloc)
{
}
CoalescedHashSet(const Allocator& alloc) : HashSetBase(64, 0.75f, alloc)
{
}
const Key* getEntries() const
{
return HashSetBase::mBase.getEntries();
}
};
} // namespace shdfnd
} // namespace nvidia
#endif // #ifndef NV_NSFOUNDATION_NSHASHSET_H
| 5,112 | C | 39.259842 | 119 | 0.697183 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsGlobals.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_NSFOUNDATION_NSGLOBALS_H
#define NV_NSFOUNDATION_NSGLOBALS_H
#include "NvErrors.h"
namespace nvidia
{
class NvAssertHandler;
class NvErrorCallback;
class NvAllocatorCallback;
class NvProfilerCallback;
namespace shdfnd
{
// note: it's illegal to initialize the shared foundation twice without terminating in between
NV_FOUNDATION_API void initializeSharedFoundation(uint32_t version, NvAllocatorCallback&, NvErrorCallback&);
NV_FOUNDATION_API bool sharedFoundationIsInitialized();
NV_FOUNDATION_API void terminateSharedFoundation();
// number of times foundation has been init'd. 0 means never initialized, so if we wrap we go from UINT32_MAX to 1. Used
// for things that happen at most once (e.g. some warnings)
NV_FOUNDATION_API uint32_t getInitializationCount();
NV_FOUNDATION_API NvAllocatorCallback& getAllocator();
NV_FOUNDATION_API NvErrorCallback& getErrorCallback();
// on some platforms (notably 360) the CRT does non-recoverable allocations when asked for type names. Hence
// we provide a mechanism to disable this capability
NV_FOUNDATION_API void setReflectionAllocatorReportsNames(bool val);
NV_FOUNDATION_API bool getReflectionAllocatorReportsNames();
NV_FOUNDATION_API NvProfilerCallback *getProfilerCallback();
NV_FOUNDATION_API void setProfilerCallback(NvProfilerCallback *profiler);
}
}
#endif // #ifndef NV_NSFOUNDATION_NSGLOBALS_H
| 3,074 | C | 41.708333 | 120 | 0.786597 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/platform/unix/NsUnixTrigConstants.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef PS_UNIX_TRIG_CONSTANTS_H
#define PS_UNIX_TRIG_CONSTANTS_H
//#define NV_GLOBALCONST extern const __declspec(selectany)
#if NV_WINRT
#define NV_GLOBALCONST extern const __declspec(selectany)
#else
#define NV_GLOBALCONST extern const __attribute__((weak))
#endif
NV_ALIGN_PREFIX(16)
struct NV_VECTORF32
{
float f[4];
} NV_ALIGN_SUFFIX(16);
NV_GLOBALCONST NV_VECTORF32 g_NVSinCoefficients0 = {{1.0f, -0.166666667f, 8.333333333e-3f, -1.984126984e-4f}};
NV_GLOBALCONST NV_VECTORF32 g_NVSinCoefficients1 = {{2.755731922e-6f, -2.505210839e-8f, 1.605904384e-10f, -7.647163732e-13f}};
NV_GLOBALCONST NV_VECTORF32 g_NVSinCoefficients2 = {{2.811457254e-15f, -8.220635247e-18f, 1.957294106e-20f, -3.868170171e-23f}};
NV_GLOBALCONST NV_VECTORF32 g_NVCosCoefficients0 = {{1.0f, -0.5f, 4.166666667e-2f, -1.388888889e-3f}};
NV_GLOBALCONST NV_VECTORF32 g_NVCosCoefficients1 = {{2.480158730e-5f, -2.755731922e-7f, 2.087675699e-9f, -1.147074560e-11f}};
NV_GLOBALCONST NV_VECTORF32 g_NVCosCoefficients2 = {{4.779477332e-14f, -1.561920697e-16f, 4.110317623e-19f, -8.896791392e-22f}};
NV_GLOBALCONST NV_VECTORF32 g_NVTanCoefficients0 = {{1.0f, 0.333333333f, 0.133333333f, 5.396825397e-2f}};
NV_GLOBALCONST NV_VECTORF32 g_NVTanCoefficients1 = {{2.186948854e-2f, 8.863235530e-3f, 3.592128167e-3f, 1.455834485e-3f}};
NV_GLOBALCONST NV_VECTORF32 g_NVTanCoefficients2 = {{5.900274264e-4f, 2.391290764e-4f, 9.691537707e-5f, 3.927832950e-5f}};
NV_GLOBALCONST NV_VECTORF32 g_NVASinCoefficients0 = {{-0.05806367563904f, -0.41861972469416f, 0.22480114791621f, 2.17337241360606f}};
NV_GLOBALCONST NV_VECTORF32 g_NVASinCoefficients1 = {{0.61657275907170f, 4.29696498283455f, -1.18942822255452f, -6.53784832094831f}};
NV_GLOBALCONST NV_VECTORF32 g_NVASinCoefficients2 = {{-1.36926553863413f, -4.48179294237210f, 1.41810672941833f, 5.48179257935713f}};
NV_GLOBALCONST NV_VECTORF32 g_NVATanCoefficients0 = {{1.0f, 0.333333334f, 0.2f, 0.142857143f}};
NV_GLOBALCONST NV_VECTORF32 g_NVATanCoefficients1 = {{1.111111111e-1f, 9.090909091e-2f, 7.692307692e-2f, 6.666666667e-2f}};
NV_GLOBALCONST NV_VECTORF32 g_NVATanCoefficients2 = {{5.882352941e-2f, 5.263157895e-2f, 4.761904762e-2f, 4.347826087e-2f}};
NV_GLOBALCONST NV_VECTORF32 g_NVSinEstCoefficients = {{1.0f, -1.66521856991541e-1f, 8.199913018755e-3f, -1.61475937228e-4f}};
NV_GLOBALCONST NV_VECTORF32 g_NVCosEstCoefficients = {{1.0f, -4.95348008918096e-1f, 3.878259962881e-2f, -9.24587976263e-4f}};
NV_GLOBALCONST NV_VECTORF32 g_NVTanEstCoefficients = {{2.484f, -1.954923183e-1f, 2.467401101f, NvInvPi}};
NV_GLOBALCONST NV_VECTORF32 g_NVATanEstCoefficients = {{7.689891418951e-1f, 1.104742493348f, 8.661844266006e-1f, NvPiDivTwo}};
NV_GLOBALCONST NV_VECTORF32 g_NVASinEstCoefficients = {{-1.36178272886711f, 2.37949493464538f, -8.08228565650486e-1f, 2.78440142746736e-1f}};
NV_GLOBALCONST NV_VECTORF32 g_NVASinEstConstants = {{1.00000011921f, NvPiDivTwo, 0.0f, 0.0f}};
NV_GLOBALCONST NV_VECTORF32 g_NVPiConstants0 = {{NvPi, NvTwoPi, NvInvPi, NvInvTwoPi}};
NV_GLOBALCONST NV_VECTORF32 g_NVReciprocalTwoPi = {{NvInvTwoPi, NvInvTwoPi, NvInvTwoPi, NvInvTwoPi}};
NV_GLOBALCONST NV_VECTORF32 g_NVTwoPi = {{NvTwoPi, NvTwoPi, NvTwoPi, NvTwoPi}};
#endif
| 4,945 | C | 67.694444 | 141 | 0.757735 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/platform/unix/NsUnixAoS.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef PS_UNIX_AOS_H
#define PS_UNIX_AOS_H
// no includes here! this file should be included from NvcVecMath.h only!!!
#if !COMPILE_VECTOR_INTRINSICS
#error Vector intrinsics should not be included when using scalar implementation.
#endif
#if NV_X86 || NV_X64
# include "sse2/NsUnixSse2AoS.h"
#elif NV_NEON
# include "neon/NsUnixNeonAoS.h"
#else
# error No SIMD implementation for this unix platform.
#endif
#endif //PS_UNIX_AOS_H
| 2,139 | C | 43.583332 | 81 | 0.761571 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/platform/unix/NsUnixFPU.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_UNIX_NSUNIXFPU_H
#define NV_UNIX_NSUNIXFPU_H
#include "NvPreprocessor.h"
#if NV_LINUX || NV_PS4 || NV_OSX
#if NV_X86 || NV_X64
#include <xmmintrin.h>
#elif NV_NEON
#include <arm_neon.h>
#endif
NV_INLINE nvidia::shdfnd::SIMDGuard::SIMDGuard()
{
mControlWord = _mm_getcsr();
// set default (disable exceptions: _MM_MASK_MASK) and FTZ (_MM_FLUSH_ZERO_ON), DAZ (_MM_DENORMALS_ZERO_ON: (1<<6))
_mm_setcsr(_MM_MASK_MASK | _MM_FLUSH_ZERO_ON | (1 << 6));
}
NV_INLINE nvidia::shdfnd::SIMDGuard::~SIMDGuard()
{
// restore control word and clear exception flags
// (setting exception state flags cause exceptions on the first following fp operation)
_mm_setcsr(mControlWord & ~_MM_EXCEPT_MASK);
}
#else
#error No SIMD implementation for this unix platform.
#endif // NV_LINUX || NV_PS4 || NV_OSX
#endif // #ifndef NV_UNIX_NSUNIXFPU_H
| 2,561 | C | 40.999999 | 119 | 0.738774 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/platform/unix/NsUnixInlineAoS.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef PS_UNIX_INLINE_AOS_H
#define PS_UNIX_INLINE_AOS_H
#if !COMPILE_VECTOR_INTRINSICS
#error Vector intrinsics should not be included when using scalar implementation.
#endif
//Remove this define when all platforms use simd solver.
#define NV_SUPPORT_SIMD
#if NV_X86 || NV_X64
# include "sse2/NsUnixSse2InlineAoS.h"
#elif NV_NEON
# include "neon/NsUnixNeonInlineAoS.h"
#else
# error No SIMD implementation for this unix platform.
#endif
#endif //PS_UNIX_INLINE_AOS_H
| 2,177 | C | 43.448979 | 81 | 0.765273 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/platform/unix/NsUnixIntrinsics.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_UNIX_NSUNIXINTRINSICS_H
#define NV_UNIX_NSUNIXINTRINSICS_H
#include "Ns.h"
#include "NvAssert.h"
#include <math.h>
#if NV_ANDROID
#include <signal.h> // for Ns::debugBreak() { raise(SIGTRAP); }
#endif
#if 0
#include <libkern/OSAtomic.h>
#endif
// this file is for internal intrinsics - that is, intrinsics that are used in
// cross platform code but do not appear in the API
#if !(NV_LINUX || NV_ANDROID || NV_PS4 || NV_APPLE_FAMILY)
#error "This file should only be included by unix builds!!"
#endif
namespace nvidia
{
namespace shdfnd
{
NV_FORCE_INLINE void memoryBarrier()
{
__sync_synchronize();
}
/*!
Return the index of the highest set bit. Undefined for zero arg.
*/
NV_INLINE uint32_t highestSetBitUnsafe(uint32_t v)
{
return 31 -__builtin_clz(v);
}
/*!
Return the index of the highest set bit. Undefined for zero arg.
*/
NV_INLINE int32_t lowestSetBitUnsafe(uint32_t v)
{
return __builtin_ctz(v);
}
/*!
Returns the index of the highest set bit. Undefined for zero arg.
*/
NV_INLINE uint32_t countLeadingZeros(uint32_t v)
{
return __builtin_clz(v);
}
/*!
Prefetch aligned 64B x86, 32b ARM around \c ptr+offset.
*/
NV_FORCE_INLINE void prefetchLine(const void* ptr, uint32_t offset = 0)
{
__builtin_prefetch(reinterpret_cast<const char * NV_RESTRICT>(ptr) + offset, 0, 3);
}
/*!
Prefetch \c count bytes starting at \c ptr.
*/
#if NV_ANDROID || NV_IOS
NV_FORCE_INLINE void prefetch(const void* ptr, uint32_t count = 1)
{
const char* cp = static_cast<const char*>(ptr);
size_t p = reinterpret_cast<size_t>(ptr);
uint32_t startLine = uint32_t(p >> 5), endLine = uint32_t((p + count - 1) >> 5);
uint32_t lines = endLine - startLine + 1;
do
{
prefetchLine(cp);
cp += 32;
} while(--lines);
}
#else
NV_FORCE_INLINE void prefetch(const void* ptr, uint32_t count = 1)
{
const char* cp = reinterpret_cast<const char*>(ptr);
uint64_t p = size_t(ptr);
uint64_t startLine = p >> 6, endLine = (p + count - 1) >> 6;
uint64_t lines = endLine - startLine + 1;
do
{
prefetchLine(cp);
cp += 64;
} while(--lines);
}
#endif
//! \brief platform-specific reciprocal
NV_CUDA_CALLABLE NV_FORCE_INLINE float recipFast(float a)
{
return 1.0f / a;
}
//! \brief platform-specific fast reciprocal square root
NV_CUDA_CALLABLE NV_FORCE_INLINE float recipSqrtFast(float a)
{
return 1.0f / ::sqrtf(a);
}
//! \brief platform-specific floor
NV_CUDA_CALLABLE NV_FORCE_INLINE float floatFloor(float x)
{
return ::floorf(x);
}
#define NS_EXPECT_TRUE(x) x
#define NS_EXPECT_FALSE(x) x
} // namespace shdfnd
} // namespace nvidia
#endif // #ifndef NV_UNIX_NSUNIXINTRINSICS_H
| 4,388 | C | 28.26 | 87 | 0.705333 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/platform/unix/sse2/NsUnixSse2InlineAoS.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef PS_UNIX_SSE2_INLINE_AOS_H
#define PS_UNIX_SSE2_INLINE_AOS_H
#if !COMPILE_VECTOR_INTRINSICS
#error Vector intrinsics should not be included when using scalar implementation.
#endif
//Remove this define when all platforms use simd solver.
#define NV_SUPPORT_SIMD
#ifdef __SSE4_2__
#include "smmintrin.h"
#endif
#define _NV_FPCLASS_SNAN 0x0001 /* signaling NaN */
#define _NV_FPCLASS_QNAN 0x0002 /* quiet NaN */
#define _NV_FPCLASS_NINF 0x0004 /* negative infinity */
#define _NV_FPCLASS_PINF 0x0200 /* positive infinity */
NV_FORCE_INLINE __m128 m128_I2F(__m128i n) { return _mm_castsi128_ps(n); }
NV_FORCE_INLINE __m128i m128_F2I(__m128 n) { return _mm_castps_si128(n); }
namespace internalUnitSSE2Simd
{
NV_FORCE_INLINE uint32_t BAllTrue4_R(const BoolV a)
{
const int32_t moveMask = _mm_movemask_ps(a);
return moveMask == (0xf);
}
NV_FORCE_INLINE uint32_t BAnyTrue4_R(const BoolV a)
{
const int32_t moveMask = _mm_movemask_ps(a);
return moveMask != (0x0);
}
NV_FORCE_INLINE uint32_t BAllTrue3_R(const BoolV a)
{
const int32_t moveMask = _mm_movemask_ps(a);
return (moveMask & 0x7) == (0x7);
}
NV_FORCE_INLINE uint32_t BAnyTrue3_R(const BoolV a)
{
const int32_t moveMask = _mm_movemask_ps(a);
return (moveMask & 0x7) != (0x0);
}
NV_FORCE_INLINE uint32_t FiniteTestEq(const Vec4V a, const Vec4V b)
{
//This is a bit of a bodge.
//_mm_comieq_ss returns 1 if either value is nan so we need to re-cast a and b with true encoded as a non-nan number.
//There must be a better way of doing this in sse.
const BoolV one = FOne();
const BoolV zero = FZero();
const BoolV a1 =V4Sel(a,one,zero);
const BoolV b1 =V4Sel(b,one,zero);
return
(
_mm_comieq_ss(a1, b1) &&
_mm_comieq_ss(_mm_shuffle_ps(a1, a1, _MM_SHUFFLE(1,1,1,1)),_mm_shuffle_ps(b1, b1, _MM_SHUFFLE(1,1,1,1))) &&
_mm_comieq_ss(_mm_shuffle_ps(a1, a1, _MM_SHUFFLE(2,2,2,2)),_mm_shuffle_ps(b1, b1, _MM_SHUFFLE(2,2,2,2))) &&
_mm_comieq_ss(_mm_shuffle_ps(a1, a1, _MM_SHUFFLE(3,3,3,3)),_mm_shuffle_ps(b1, b1, _MM_SHUFFLE(3,3,3,3)))
);
}
const NV_ALIGN(16, uint32_t gMaskXYZ[4])={0xffffffff, 0xffffffff, 0xffffffff, 0};
}
namespace _VecMathTests
{
NV_FORCE_INLINE bool allElementsEqualFloatV(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return(_mm_comieq_ss(a,b)!=0);
}
NV_FORCE_INLINE bool allElementsEqualVec3V(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return V3AllEq(a, b) != 0;
}
NV_FORCE_INLINE bool allElementsEqualVec4V(const Vec4V a, const Vec4V b)
{
return V4AllEq(a, b) != 0;
}
NV_FORCE_INLINE bool allElementsEqualBoolV(const BoolV a, const BoolV b)
{
return internalUnitSSE2Simd::BAllTrue4_R(VecI32V_IsEq(a, b)) != 0;
}
NV_FORCE_INLINE bool allElementsEqualVecU32V(const VecU32V a, const VecU32V b)
{
return internalUnitSSE2Simd::BAllTrue4_R(V4IsEqU32(a, b)) != 0;
}
NV_FORCE_INLINE bool allElementsEqualVecI32V(const VecI32V a, const VecI32V b)
{
BoolV c = m128_I2F(_mm_cmpeq_epi32(m128_F2I(a), m128_F2I(b)));
return internalUnitSSE2Simd::BAllTrue4_R(c) != 0;
}
#define VECMATH_AOS_EPSILON (1e-3f)
NV_FORCE_INLINE bool allElementsNearEqualFloatV(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
const FloatV c=FSub(a,b);
const FloatV minError=FLoad(-VECMATH_AOS_EPSILON);
const FloatV maxError=FLoad(VECMATH_AOS_EPSILON);
return (_mm_comigt_ss(c,minError) && _mm_comilt_ss(c,maxError));
}
NV_FORCE_INLINE bool allElementsNearEqualVec3V(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
const Vec3V c=V3Sub(a,b);
const Vec3V minError=V3Load(-VECMATH_AOS_EPSILON);
const Vec3V maxError=V3Load(VECMATH_AOS_EPSILON);
return
(
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0,0,0,0)),minError) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0,0,0,0)),maxError) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1,1,1,1)),minError) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1,1,1,1)),maxError) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2,2,2,2)),minError) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2,2,2,2)),maxError)
);
}
NV_FORCE_INLINE bool allElementsNearEqualVec4V(const Vec4V a, const Vec4V b)
{
const Vec4V c=V4Sub(a,b);
const Vec4V minError=V4Load(-VECMATH_AOS_EPSILON);
const Vec4V maxError=V4Load(VECMATH_AOS_EPSILON);
return
(
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0,0,0,0)),minError) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0,0,0,0)),maxError) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1,1,1,1)),minError) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1,1,1,1)),maxError) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2,2,2,2)),minError) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2,2,2,2)),maxError) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(3,3,3,3)),minError) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(3,3,3,3)),maxError)
);
}
}
/////////////////////////////////////////////////////////////////////
////FUNCTIONS USED ONLY FOR ASSERTS IN VECTORISED IMPLEMENTATIONS
/////////////////////////////////////////////////////////////////////
NV_FORCE_INLINE bool isValidFloatV(const FloatV a)
{
return
(
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)),_mm_shuffle_ps(a, a, _MM_SHUFFLE(1,1,1,1))) &&
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)),_mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2))) &&
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)),_mm_shuffle_ps(a, a, _MM_SHUFFLE(3,3,3,3)))
);
}
NV_FORCE_INLINE bool isValidVec3V(const Vec3V a)
{
return (_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(3,3,3,3)),FZero()) ? true : false);
}
NV_FORCE_INLINE bool isFiniteFloatV(const FloatV a)
{
float badNumber =
nvidia::NvUnionCast<float, uint32_t>(_NV_FPCLASS_SNAN | _NV_FPCLASS_QNAN | _NV_FPCLASS_NINF | _NV_FPCLASS_PINF);
const FloatV vBadNum = FLoad((float&)badNumber);
const BoolV vMask = BAnd(vBadNum, a);
return internalUnitSSE2Simd::FiniteTestEq(vMask, BFFFF()) == 1;
}
NV_FORCE_INLINE bool isFiniteVec3V(const Vec3V a)
{
float badNumber =
nvidia::NvUnionCast<float, uint32_t>(_NV_FPCLASS_SNAN | _NV_FPCLASS_QNAN | _NV_FPCLASS_NINF | _NV_FPCLASS_PINF);
const Vec3V vBadNum = V3Load((float&)badNumber);
const BoolV vMask = BAnd(BAnd(vBadNum, a), BTTTF());
return internalUnitSSE2Simd::FiniteTestEq(vMask, BFFFF()) == 1;
}
NV_FORCE_INLINE bool isFiniteVec4V(const Vec4V a)
{
/*Vec4V a;
NV_ALIGN(16, float f[4]);
F32Array_Aligned_From_Vec4V(a, f);
return NvIsFinite(f[0])
&& NvIsFinite(f[1])
&& NvIsFinite(f[2])
&& NvIsFinite(f[3]);*/
float badNumber =
nvidia::NvUnionCast<float, uint32_t>(_NV_FPCLASS_SNAN | _NV_FPCLASS_QNAN | _NV_FPCLASS_NINF | _NV_FPCLASS_PINF);
const Vec4V vBadNum = V4Load((float&)badNumber);
const BoolV vMask = BAnd(vBadNum, a);
return internalUnitSSE2Simd::FiniteTestEq(vMask, BFFFF()) == 1;
}
NV_FORCE_INLINE bool hasZeroElementinFloatV(const FloatV a)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
return (_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)),FZero()) ? true : false);
}
NV_FORCE_INLINE bool hasZeroElementInVec3V(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
return
(
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)),FZero()) ||
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(1,1,1,1)),FZero()) ||
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2)),FZero())
);
}
NV_FORCE_INLINE bool hasZeroElementInVec4V(const Vec4V a)
{
return
(
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)),FZero()) ||
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(1,1,1,1)),FZero()) ||
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2)),FZero()) ||
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(3,3,3,3)),FZero())
);
}
/////////////////////////////////////////////////////////////////////
////VECTORISED FUNCTION IMPLEMENTATIONS
/////////////////////////////////////////////////////////////////////
NV_FORCE_INLINE FloatV FLoad(const float f)
{
return (_mm_load1_ps(&f));
}
NV_FORCE_INLINE Vec3V V3Load(const float f)
{
return _mm_set_ps(0.0f,f,f,f);
}
NV_FORCE_INLINE Vec4V V4Load(const float f)
{
return (_mm_load1_ps(&f));
}
NV_FORCE_INLINE BoolV BLoad(const bool f)
{
const uint32_t i=-(int32_t)f;
return _mm_load1_ps((float*)&i);
}
NV_FORCE_INLINE Vec3V V3LoadA(const NvVec3& f)
{
VECMATHAOS_ASSERT(0 == ((size_t)&f & 0x0f));
return _mm_and_ps((Vec3V&)f, (VecI32V&)internalUnitSSE2Simd::gMaskXYZ);
}
NV_FORCE_INLINE Vec3V V3LoadU(const NvVec3& f)
{
return (_mm_set_ps(0.0f,f.z,f.y,f.x));
}
NV_FORCE_INLINE Vec3V V3LoadUnsafeA(const NvVec3& f)
{
return (_mm_set_ps(0.0f,f.z,f.y,f.x));
}
NV_FORCE_INLINE Vec3V V3LoadA(const float* const f)
{
VECMATHAOS_ASSERT(0 == ((uint64_t)f & 0x0f));
return _mm_and_ps((Vec3V&)*f, (VecI32V&)internalUnitSSE2Simd::gMaskXYZ);
}
NV_FORCE_INLINE Vec3V V3LoadU(const float* const i)
{
return (_mm_set_ps(0.0f,i[2],i[1],i[0]));
}
NV_FORCE_INLINE Vec3V Vec3V_From_Vec4V(Vec4V v)
{
return V4ClearW(v);
}
NV_FORCE_INLINE Vec3V Vec3V_From_Vec4V_WUndefined(const Vec4V v)
{
return v;
}
NV_FORCE_INLINE Vec4V Vec4V_From_Vec3V(Vec3V f)
{
return f; //ok if it is implemented as the same type.
}
NV_FORCE_INLINE Vec4V Vec4V_From_NvVec3_WUndefined(const NvVec3& f)
{
return (_mm_set_ps(0.0f,f.z,f.y,f.x));
}
NV_FORCE_INLINE Vec4V Vec4V_From_FloatV(FloatV f)
{
return f;
}
NV_FORCE_INLINE Vec3V Vec3V_From_FloatV(FloatV f)
{
return Vec3V_From_Vec4V(Vec4V_From_FloatV(f));
}
NV_FORCE_INLINE Vec3V Vec3V_From_FloatV_WUndefined(FloatV f)
{
return Vec3V_From_Vec4V_WUndefined(Vec4V_From_FloatV(f));
}
NV_FORCE_INLINE Mat33V Mat33V_From_NvMat33(const NvMat33 &m)
{
return Mat33V(V3LoadU(m.column0),
V3LoadU(m.column1),
V3LoadU(m.column2));
}
NV_FORCE_INLINE void NvMat33_From_Mat33V(const Mat33V &m, NvMat33 &out)
{
NV_ASSERT((size_t(&out)&15)==0);
V3StoreU(m.col0, out.column0);
V3StoreU(m.col1, out.column1);
V3StoreU(m.col2, out.column2);
}
NV_FORCE_INLINE Vec4V V4LoadA(const float* const f)
{
VECMATHAOS_ASSERT(0 == ((uint64_t)f & 0x0f));
return (_mm_load_ps(f));
}
NV_FORCE_INLINE void V4StoreA(Vec4V a, float* f)
{
VECMATHAOS_ASSERT(0 == ((uint64_t)f & 0x0f));
_mm_store_ps(f,a);
}
NV_FORCE_INLINE void V4StoreU(const Vec4V a, float* f)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(0 == ((int)&a & 0x0F));
_mm_storeu_ps(f,a);
}
NV_FORCE_INLINE void BStoreA(const BoolV a, uint32_t* f)
{
VECMATHAOS_ASSERT(0 == ((uint64_t)f & 0x0f));
_mm_store_ps((float*)f,a);
}
NV_FORCE_INLINE void U4StoreA(const VecU32V uv, uint32_t* u)
{
VECMATHAOS_ASSERT(0 == ((uint64_t)u & 0x0f));
_mm_store_ps((float*)u,uv);
}
NV_FORCE_INLINE void I4StoreA(const VecI32V iv, int32_t* i)
{
VECMATHAOS_ASSERT(0 == ((uint64_t)i & 0x0f));
_mm_store_ps((float*)i,iv);
}
NV_FORCE_INLINE Vec4V V4LoadU(const float* const f)
{
return (_mm_loadu_ps(f));
}
NV_FORCE_INLINE BoolV BLoad(const bool* const f)
{
const NV_ALIGN(16, int32_t) b[4]={-(int32_t)f[0],-(int32_t)f[1],-(int32_t)f[2],-(int32_t)f[3]};
return _mm_load_ps((float*)&b);
}
NV_FORCE_INLINE float FStore(const FloatV a)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
float f;
_mm_store_ss(&f,a);
return f;
}
NV_FORCE_INLINE void FStore(const FloatV a, float* NV_RESTRICT f)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
_mm_store_ss(f,a);
}
NV_FORCE_INLINE void V3StoreA(const Vec3V a, NvVec3& f)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(0 == ((int)&a & 0x0F));
VECMATHAOS_ASSERT(0 == ((int)&f & 0x0F));
NV_ALIGN(16,float) f2[4];
_mm_store_ps(f2,a);
f=NvVec3(f2[0],f2[1],f2[2]);
}
NV_FORCE_INLINE void V3StoreU(const Vec3V a, NvVec3& f)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(0 == ((int)&a & 0x0F));
NV_ALIGN(16,float) f2[4];
_mm_store_ps(f2,a);
f=NvVec3(f2[0],f2[1],f2[2]);
}
NV_FORCE_INLINE VecI32V U4Load(const uint32_t i)
{
return (_mm_load1_ps((float*)&i));
}
NV_FORCE_INLINE VecU32V U4LoadU(const uint32_t* i)
{
return _mm_loadu_ps((float*)i);
}
NV_FORCE_INLINE VecU32V U4LoadA(const uint32_t* i)
{
VECMATHAOS_ASSERT(0==((size_t)i & 0x0f));
return _mm_load_ps((float*)i);
}
//////////////////////////////////
//FLOATV
//////////////////////////////////
NV_FORCE_INLINE FloatV FZero()
{
return FLoad(0.0f);
}
NV_FORCE_INLINE FloatV FOne()
{
return FLoad(1.0f);
}
NV_FORCE_INLINE FloatV FHalf()
{
return FLoad(0.5f);
}
NV_FORCE_INLINE FloatV FEps()
{
return FLoad(NV_EPS_REAL);
}
NV_FORCE_INLINE FloatV FEps6()
{
return FLoad(1e-6f);
}
NV_FORCE_INLINE FloatV FMax()
{
return FLoad(NV_MAX_REAL);
}
NV_FORCE_INLINE FloatV FNegMax()
{
return FLoad(-NV_MAX_REAL);
}
NV_FORCE_INLINE FloatV IZero()
{
const uint32_t zero = 0;
return _mm_load1_ps((float*)&zero);
}
NV_FORCE_INLINE FloatV IOne()
{
const uint32_t one = 1;
return _mm_load1_ps((float*)&one);
}
NV_FORCE_INLINE FloatV ITwo()
{
const uint32_t two = 2;
return _mm_load1_ps((float*)&two);
}
NV_FORCE_INLINE FloatV IThree()
{
const uint32_t three = 3;
return _mm_load1_ps((float*)&three);
}
NV_FORCE_INLINE FloatV IFour()
{
uint32_t four = 4;
return _mm_load1_ps((float*)&four);
}
NV_FORCE_INLINE FloatV FNeg(const FloatV f)
{
VECMATHAOS_ASSERT(isValidFloatV(f));
return _mm_sub_ps( _mm_setzero_ps(), f);
}
NV_FORCE_INLINE FloatV FAdd(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_add_ps(a,b);
}
NV_FORCE_INLINE FloatV FSub(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_sub_ps(a,b);
}
NV_FORCE_INLINE FloatV FMul(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_mul_ps(a,b);
}
NV_FORCE_INLINE FloatV FDiv(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_div_ps(a,b);
}
NV_FORCE_INLINE FloatV FDivFast(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_mul_ps(a,_mm_rcp_ps(b));
}
NV_FORCE_INLINE FloatV FRecip(const FloatV a)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
return _mm_div_ps(FOne(),a);
}
NV_FORCE_INLINE FloatV FRecipFast(const FloatV a)
{
return _mm_rcp_ps(a);
}
NV_FORCE_INLINE FloatV FRsqrt(const FloatV a)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
return _mm_div_ps(FOne(),_mm_sqrt_ps(a));
}
NV_FORCE_INLINE FloatV FSqrt(const FloatV a)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
return _mm_sqrt_ps(a);
}
NV_FORCE_INLINE FloatV FRsqrtFast(const FloatV a)
{
return _mm_rsqrt_ps(a);
}
NV_FORCE_INLINE FloatV FScaleAdd(const FloatV a, const FloatV b, const FloatV c)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
VECMATHAOS_ASSERT(isValidFloatV(c));
return FAdd(FMul(a,b),c);
}
NV_FORCE_INLINE FloatV FNegScaleSub(const FloatV a, const FloatV b, const FloatV c)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
VECMATHAOS_ASSERT(isValidFloatV(c));
return FSub(c,FMul(a,b));
}
NV_FORCE_INLINE FloatV FAbs(const FloatV a)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
NV_ALIGN(16,const uint32_t) absMask[4] = {0x7fFFffFF, 0x7fFFffFF, 0x7fFFffFF, 0x7fFFffFF};
return _mm_and_ps(a, _mm_load_ps((float*)absMask));
}
NV_FORCE_INLINE FloatV FSel(const BoolV c, const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(_VecMathTests::allElementsEqualBoolV(c,BTTTT()) || _VecMathTests::allElementsEqualBoolV(c,BFFFF()));
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a));
}
NV_FORCE_INLINE BoolV FIsGrtr(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_cmpgt_ps(a,b);
}
NV_FORCE_INLINE BoolV FIsGrtrOrEq(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_cmpge_ps(a,b);
}
NV_FORCE_INLINE BoolV FIsEq(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_cmpeq_ps(a,b);
}
NV_FORCE_INLINE FloatV FMax(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_max_ps(a, b);
}
NV_FORCE_INLINE FloatV FMin(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_min_ps(a, b);
}
NV_FORCE_INLINE FloatV FClamp(const FloatV a, const FloatV minV, const FloatV maxV)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(minV));
VECMATHAOS_ASSERT(isValidFloatV(maxV));
return FMax(FMin(a,maxV),minV);
}
NV_FORCE_INLINE uint32_t FAllGrtr(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return(_mm_comigt_ss(a,b));
}
NV_FORCE_INLINE uint32_t FAllGrtrOrEq(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return(_mm_comige_ss(a,b));
}
NV_FORCE_INLINE uint32_t FAllEq(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return(_mm_comieq_ss(a,b));
}
NV_FORCE_INLINE FloatV FRound(const FloatV a)
{
#ifdef __SSE4_2__
return _mm_round_ps( a, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC );
#else
//return _mm_round_ps(a, 0x0);
const FloatV half = FLoad(0.5f);
const __m128 signBit = _mm_cvtepi32_ps(_mm_srli_epi32(_mm_cvtps_epi32(a), 31));
const FloatV aRound = FSub(FAdd(a, half), signBit);
__m128i tmp = _mm_cvttps_epi32(aRound);
return _mm_cvtepi32_ps(tmp);
#endif
}
NV_FORCE_INLINE FloatV FSin(const FloatV a)
{
//Vec4V V1, V2, V3, V5, V7, V9, V11, V13, V15, V17, V19, V21, V23;
//Vec4V S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11;
FloatV Result;
// Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI
const FloatV recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f);
const FloatV twoPi = V4LoadA(g_NVTwoPi.f);
const FloatV tmp = FMul(a, recipTwoPi);
const FloatV b = FRound(tmp);
const FloatV V1 = FNegMulSub(twoPi, b, a);
// sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! -
// V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI)
const FloatV V2 = FMul(V1, V1);
const FloatV V3 = FMul(V2, V1);
const FloatV V5 = FMul(V3, V2);
const FloatV V7 = FMul(V5, V2);
const FloatV V9 = FMul(V7, V2);
const FloatV V11 = FMul(V9, V2);
const FloatV V13 = FMul(V11, V2);
const FloatV V15 = FMul(V13, V2);
const FloatV V17 = FMul(V15, V2);
const FloatV V19 = FMul(V17, V2);
const FloatV V21 = FMul(V19, V2);
const FloatV V23 = FMul(V21, V2);
const Vec4V sinCoefficients0 = V4LoadA(g_NVSinCoefficients0.f);
const Vec4V sinCoefficients1 = V4LoadA(g_NVSinCoefficients1.f);
const Vec4V sinCoefficients2 = V4LoadA(g_NVSinCoefficients2.f);
const FloatV S1 = V4GetY(sinCoefficients0);
const FloatV S2 = V4GetZ(sinCoefficients0);
const FloatV S3 = V4GetW(sinCoefficients0);
const FloatV S4 = V4GetX(sinCoefficients1);
const FloatV S5 = V4GetY(sinCoefficients1);
const FloatV S6 = V4GetZ(sinCoefficients1);
const FloatV S7 = V4GetW(sinCoefficients1);
const FloatV S8 = V4GetX(sinCoefficients2);
const FloatV S9 = V4GetY(sinCoefficients2);
const FloatV S10 = V4GetZ(sinCoefficients2);
const FloatV S11 = V4GetW(sinCoefficients2);
Result = FMulAdd(S1, V3, V1);
Result = FMulAdd(S2, V5, Result);
Result = FMulAdd(S3, V7, Result);
Result = FMulAdd(S4, V9, Result);
Result = FMulAdd(S5, V11, Result);
Result = FMulAdd(S6, V13, Result);
Result = FMulAdd(S7, V15, Result);
Result = FMulAdd(S8, V17, Result);
Result = FMulAdd(S9, V19, Result);
Result = FMulAdd(S10, V21, Result);
Result = FMulAdd(S11, V23, Result);
return Result;
}
NV_FORCE_INLINE FloatV FCos(const FloatV a)
{
//XMVECTOR V1, V2, V4, V6, V8, V10, V12, V14, V16, V18, V20, V22;
//XMVECTOR C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11;
FloatV Result;
// Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI
const FloatV recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f);
const FloatV twoPi = V4LoadA(g_NVTwoPi.f);
const FloatV tmp = FMul(a, recipTwoPi);
const FloatV b = FRound(tmp);
const FloatV V1 = FNegMulSub(twoPi, b, a);
// cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! -
// V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI)
const FloatV V2 = FMul(V1, V1);
const FloatV V4 = FMul(V2, V2);
const FloatV V6 = FMul(V4, V2);
const FloatV V8 = FMul(V4, V4);
const FloatV V10 = FMul(V6, V4);
const FloatV V12 = FMul(V6, V6);
const FloatV V14 = FMul(V8, V6);
const FloatV V16 = FMul(V8, V8);
const FloatV V18 = FMul(V10, V8);
const FloatV V20 = FMul(V10, V10);
const FloatV V22 = FMul(V12, V10);
const Vec4V cosCoefficients0 = V4LoadA(g_NVCosCoefficients0.f);
const Vec4V cosCoefficients1 = V4LoadA(g_NVCosCoefficients1.f);
const Vec4V cosCoefficients2 = V4LoadA(g_NVCosCoefficients2.f);
const FloatV C1 = V4GetY(cosCoefficients0);
const FloatV C2 = V4GetZ(cosCoefficients0);
const FloatV C3 = V4GetW(cosCoefficients0);
const FloatV C4 = V4GetX(cosCoefficients1);
const FloatV C5 = V4GetY(cosCoefficients1);
const FloatV C6 = V4GetZ(cosCoefficients1);
const FloatV C7 = V4GetW(cosCoefficients1);
const FloatV C8 = V4GetX(cosCoefficients2);
const FloatV C9 = V4GetY(cosCoefficients2);
const FloatV C10 = V4GetZ(cosCoefficients2);
const FloatV C11 = V4GetW(cosCoefficients2);
Result = FMulAdd(C1, V2, V4One());
Result = FMulAdd(C2, V4, Result);
Result = FMulAdd(C3, V6, Result);
Result = FMulAdd(C4, V8, Result);
Result = FMulAdd(C5, V10, Result);
Result = FMulAdd(C6, V12, Result);
Result = FMulAdd(C7, V14, Result);
Result = FMulAdd(C8, V16, Result);
Result = FMulAdd(C9, V18, Result);
Result = FMulAdd(C10, V20, Result);
Result = FMulAdd(C11, V22, Result);
return Result;
}
NV_FORCE_INLINE uint32_t FOutOfBounds(const FloatV a, const FloatV min, const FloatV max)
{
const BoolV ffff = BFFFF();
const BoolV c = BOr(FIsGrtr(a, max), FIsGrtr(min, a));
return !BAllEq(c, ffff);
}
NV_FORCE_INLINE uint32_t FInBounds(const FloatV a, const FloatV min, const FloatV max)
{
const BoolV tttt = BTTTT();
const BoolV c = BAnd(FIsGrtrOrEq(a, min), FIsGrtrOrEq(max, a));
return BAllEq(c, tttt);
}
NV_FORCE_INLINE uint32_t FOutOfBounds(const FloatV a, const FloatV bounds)
{
return FOutOfBounds(a, FNeg(bounds), bounds);
}
NV_FORCE_INLINE uint32_t FInBounds(const FloatV a, const FloatV bounds)
{
return FInBounds(a, FNeg(bounds), bounds);
}
//////////////////////////////////
//VEC3V
//////////////////////////////////
NV_FORCE_INLINE Vec3V V3Splat(const FloatV f)
{
VECMATHAOS_ASSERT(isValidFloatV(f));
const __m128 zero=FZero();
const __m128 fff0 = _mm_move_ss(f, zero);
return _mm_shuffle_ps(fff0, fff0, _MM_SHUFFLE(0,1,2,3));
}
NV_FORCE_INLINE Vec3V V3Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z)
{
VECMATHAOS_ASSERT(isValidFloatV(x));
VECMATHAOS_ASSERT(isValidFloatV(y));
VECMATHAOS_ASSERT(isValidFloatV(z));
// static on zero causes compiler crash on x64 debug_opt
const __m128 zero=FZero();
const __m128 xy = _mm_move_ss(x, y);
const __m128 z0 = _mm_move_ss(zero, z);
return _mm_shuffle_ps(xy, z0, _MM_SHUFFLE(1,0,0,1));
}
NV_FORCE_INLINE Vec3V V3UnitX()
{
const NV_ALIGN(16,float) x[4]={1.0f,0.0f,0.0f,0.0f};
const __m128 x128=_mm_load_ps(x);
return x128;
}
NV_FORCE_INLINE Vec3V V3UnitY()
{
const NV_ALIGN(16,float) y[4]={0.0f,1.0f,0.0f,0.0f};
const __m128 y128=_mm_load_ps(y);
return y128;
}
NV_FORCE_INLINE Vec3V V3UnitZ()
{
const NV_ALIGN(16,float) z[4]={0.0f,0.0f,1.0f,0.0f};
const __m128 z128=_mm_load_ps(z);
return z128;
}
NV_FORCE_INLINE FloatV V3GetX(const Vec3V f)
{
VECMATHAOS_ASSERT(isValidVec3V(f));
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(0,0,0,0));
}
NV_FORCE_INLINE FloatV V3GetY(const Vec3V f)
{
VECMATHAOS_ASSERT(isValidVec3V(f));
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(1,1,1,1));
}
NV_FORCE_INLINE FloatV V3GetZ(const Vec3V f)
{
VECMATHAOS_ASSERT(isValidVec3V(f));
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(2,2,2,2));
}
NV_FORCE_INLINE Vec3V V3SetX(const Vec3V v, const FloatV f)
{
VECMATHAOS_ASSERT(isValidVec3V(v));
VECMATHAOS_ASSERT(isValidFloatV(f));
return V3Sel(BFTTT(),v,f);
}
NV_FORCE_INLINE Vec3V V3SetY(const Vec3V v, const FloatV f)
{
VECMATHAOS_ASSERT(isValidVec3V(v));
VECMATHAOS_ASSERT(isValidFloatV(f));
return V3Sel(BTFTT(),v,f);
}
NV_FORCE_INLINE Vec3V V3SetZ(const Vec3V v, const FloatV f)
{
VECMATHAOS_ASSERT(isValidVec3V(v));
VECMATHAOS_ASSERT(isValidFloatV(f));
return V3Sel(BTTFT(),v,f);
}
NV_FORCE_INLINE Vec3V V3ColX(const Vec3V a, const Vec3V b, const Vec3V c)
{
Vec3V r = _mm_shuffle_ps(a,c,_MM_SHUFFLE(3,0,3,0));
return V3SetY(r, V3GetX(b));
}
NV_FORCE_INLINE Vec3V V3ColY(const Vec3V a, const Vec3V b, const Vec3V c)
{
Vec3V r = _mm_shuffle_ps(a,c,_MM_SHUFFLE(3,1,3,1));
return V3SetY(r, V3GetY(b));
}
NV_FORCE_INLINE Vec3V V3ColZ(const Vec3V a, const Vec3V b, const Vec3V c)
{
Vec3V r = _mm_shuffle_ps(a,c,_MM_SHUFFLE(3,2,3,2));
return V3SetY(r, V3GetZ(b));
}
NV_FORCE_INLINE Vec3V V3Zero()
{
return V3Load(0.0f);
}
NV_FORCE_INLINE Vec3V V3Eps()
{
return V3Load(NV_EPS_REAL);
}
NV_FORCE_INLINE Vec3V V3One()
{
return V3Load(1.0f);
}
NV_FORCE_INLINE Vec3V V3Neg(const Vec3V f)
{
VECMATHAOS_ASSERT(isValidVec3V(f));
return _mm_sub_ps( _mm_setzero_ps(), f);
}
NV_FORCE_INLINE Vec3V V3Add(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return _mm_add_ps(a,b);
}
NV_FORCE_INLINE Vec3V V3Sub(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return _mm_sub_ps(a,b);
}
NV_FORCE_INLINE Vec3V V3Scale(const Vec3V a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_mul_ps(a,b);
}
NV_FORCE_INLINE Vec3V V3Mul(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return _mm_mul_ps(a,b);
}
NV_FORCE_INLINE Vec3V V3ScaleInv(const Vec3V a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_div_ps(a,b);
}
NV_FORCE_INLINE Vec3V V3Div(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
const __m128 one=V3One();
const __m128 tttf=BTTTF();
const __m128 b1=V3Sel(tttf,b,one);
return _mm_div_ps(a,b1);
}
NV_FORCE_INLINE Vec3V V3ScaleInvFast(const Vec3V a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_mul_ps(a,_mm_rcp_ps(b));
}
NV_FORCE_INLINE Vec3V V3DivFast(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
const __m128 one=V3One();
const __m128 tttf=BTTTF();
const __m128 b1=V3Sel(tttf,b,one);
return _mm_mul_ps(a,_mm_rcp_ps(b1));
}
NV_FORCE_INLINE Vec3V V3Recip(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
const __m128 zero=V3Zero();
const __m128 tttf=BTTTF();
const __m128 recipA=_mm_div_ps(V3One(),a);
return V3Sel(tttf,recipA,zero);
}
NV_FORCE_INLINE Vec3V V3RecipFast(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
const __m128 zero=V3Zero();
const __m128 tttf=BTTTF();
const __m128 recipA=_mm_rcp_ps(a);
return V3Sel(tttf,recipA,zero);
}
NV_FORCE_INLINE Vec3V V3Rsqrt(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
const __m128 zero=V3Zero();
const __m128 tttf=BTTTF();
const __m128 recipA=_mm_div_ps(V3One(),_mm_sqrt_ps(a));
return V3Sel(tttf,recipA,zero);
}
NV_FORCE_INLINE Vec3V V3RsqrtFast(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
const __m128 zero=V3Zero();
const __m128 tttf=BTTTF();
const __m128 recipA=_mm_rsqrt_ps(a);
return V3Sel(tttf,recipA,zero);
}
NV_FORCE_INLINE Vec3V V3ScaleAdd(const Vec3V a, const FloatV b, const Vec3V c)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
VECMATHAOS_ASSERT(isValidVec3V(c));
return V3Add(V3Scale(a,b),c);
}
NV_FORCE_INLINE Vec3V V3NegScaleSub(const Vec3V a, const FloatV b, const Vec3V c)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
VECMATHAOS_ASSERT(isValidVec3V(c));
return V3Sub(c,V3Scale(a,b));
}
NV_FORCE_INLINE Vec3V V3MulAdd(const Vec3V a, const Vec3V b, const Vec3V c)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
VECMATHAOS_ASSERT(isValidVec3V(c));
return V3Add(V3Mul(a,b),c);
}
NV_FORCE_INLINE Vec3V V3NegMulSub(const Vec3V a, const Vec3V b, const Vec3V c)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
VECMATHAOS_ASSERT(isValidVec3V(c));
return V3Sub(c, V3Mul(a,b));
}
NV_FORCE_INLINE Vec3V V3Abs(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
return V3Max(a,V3Neg(a));
}
NV_FORCE_INLINE FloatV V3Dot(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
#ifdef __SSE4_2__
return _mm_dp_ps(a, b, 0x7f);
#else
__m128 dot1 = _mm_mul_ps(a, b); //w,z,y,x
__m128 shuf1 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(0,0,0,0)); //z,y,x,w
__m128 shuf2 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(1,1,1,1)); //y,x,w,z
__m128 shuf3 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(2,2,2,2)); //x,w,z,y
return _mm_add_ps(_mm_add_ps(shuf1, shuf2), shuf3);
#endif
}
NV_FORCE_INLINE Vec3V V3Cross(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
__m128 r1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 0, 2)); //z,x,y,w
__m128 r2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1)); //y,z,x,w
__m128 l1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); //y,z,x,w
__m128 l2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 1, 0, 2)); //z,x,y,w
return _mm_sub_ps(_mm_mul_ps(l1, l2), _mm_mul_ps(r1,r2));
}
NV_FORCE_INLINE VecCrossV V3PrepareCross(const Vec3V a)
{
VecCrossV v;
v.mR1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 0, 2)); //z,x,y,w
v.mL1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); //y,z,x,w
return v;
}
NV_FORCE_INLINE Vec3V V3Cross(const VecCrossV& a, const Vec3V b)
{
__m128 r2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1)); //y,z,x,w
__m128 l2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 1, 0, 2)); //z,x,y,w
return _mm_sub_ps(_mm_mul_ps(a.mL1, l2), _mm_mul_ps(a.mR1, r2));
}
NV_FORCE_INLINE Vec3V V3Cross(const Vec3V a, const VecCrossV& b)
{
__m128 r2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); //y,z,x,w
__m128 l2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 0, 2)); //z,x,y,w
return _mm_sub_ps(_mm_mul_ps(b.mR1, r2), _mm_mul_ps(b.mL1, l2));
}
NV_FORCE_INLINE Vec3V V3Cross(const VecCrossV& a, const VecCrossV& b)
{
return _mm_sub_ps(_mm_mul_ps(a.mL1, b.mR1), _mm_mul_ps(a.mR1, b.mL1));
}
NV_FORCE_INLINE FloatV V3Length(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
return _mm_sqrt_ps(V3Dot(a,a));
}
NV_FORCE_INLINE FloatV V3LengthSq(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
return V3Dot(a,a);
}
NV_FORCE_INLINE Vec3V V3Normalize(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(V3Dot(a,a)!=FZero())
return V3ScaleInv(a, _mm_sqrt_ps(V3Dot(a,a)));
}
NV_FORCE_INLINE Vec3V V3NormalizeFast(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
return V3Mul(a, _mm_rsqrt_ps(V3Dot(a,a)));
}
NV_FORCE_INLINE Vec3V V3NormalizeSafe(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
const __m128 zero=V3Zero();
const __m128 eps=V3Eps();
const __m128 length=V3Length(a);
const __m128 isGreaterThanZero=FIsGrtr(length,eps);
return V3Sel(isGreaterThanZero,V3ScaleInv(a,length),zero);
}
NV_FORCE_INLINE Vec3V V3Sel(const BoolV c, const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return _mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a));
}
NV_FORCE_INLINE BoolV V3IsGrtr(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return _mm_cmpgt_ps(a,b);
}
NV_FORCE_INLINE BoolV V3IsGrtrOrEq(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return _mm_cmpge_ps(a,b);
}
NV_FORCE_INLINE BoolV V3IsEq(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return _mm_cmpeq_ps(a,b);
}
NV_FORCE_INLINE Vec3V V3Max(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return _mm_max_ps(a, b);
}
NV_FORCE_INLINE Vec3V V3Min(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return _mm_min_ps(a, b);
}
//Extract the maximum value from a
NV_FORCE_INLINE FloatV V3ExtractMax(const Vec3V a)
{
const __m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0));
const __m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1,1,1,1));
const __m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2));
return _mm_max_ps(_mm_max_ps(shuf1, shuf2), shuf3);
}
//Extract the maximum value from a
NV_FORCE_INLINE FloatV V3ExtractMin(const Vec3V a)
{
const __m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0));
const __m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1,1,1,1));
const __m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2));
return _mm_min_ps(_mm_min_ps(shuf1, shuf2), shuf3);
}
//return (a >= 0.0f) ? 1.0f : -1.0f;
NV_FORCE_INLINE Vec3V V3Sign(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
const __m128 zero = V3Zero();
const __m128 one = V3One();
const __m128 none = V3Neg(one);
return V3Sel(V3IsGrtrOrEq(a, zero), one, none);
}
NV_FORCE_INLINE Vec3V V3Clamp(const Vec3V a, const Vec3V minV, const Vec3V maxV)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(minV));
VECMATHAOS_ASSERT(isValidVec3V(maxV));
return V3Max(V3Min(a,maxV),minV);
}
NV_FORCE_INLINE uint32_t V3AllGrtr(const Vec3V a, const Vec3V b)
{
return internalUnitSSE2Simd::BAllTrue3_R(V4IsGrtr(a, b));
}
NV_FORCE_INLINE uint32_t V3AllGrtrOrEq(const Vec3V a, const Vec3V b)
{
return internalUnitSSE2Simd::BAllTrue3_R(V4IsGrtrOrEq(a, b));
}
NV_FORCE_INLINE uint32_t V3AllEq(const Vec3V a, const Vec3V b)
{
return internalUnitSSE2Simd::BAllTrue3_R(V4IsEq(a, b));
}
NV_FORCE_INLINE Vec3V V3Round(const Vec3V a)
{
#ifdef __SSE4_2__
return _mm_round_ps( a, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC );
#else
//return _mm_round_ps(a, 0x0);
const Vec3V half = V3Load(0.5f);
const __m128 signBit = _mm_cvtepi32_ps(_mm_srli_epi32(_mm_cvtps_epi32(a), 31));
const Vec3V aRound = V3Sub(V3Add(a, half), signBit);
__m128i tmp = _mm_cvttps_epi32(aRound);
return _mm_cvtepi32_ps(tmp);
#endif
}
NV_FORCE_INLINE Vec3V V3Sin(const Vec3V a)
{
//Vec4V V1, V2, V3, V5, V7, V9, V11, V13, V15, V17, V19, V21, V23;
//Vec4V S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11;
Vec3V Result;
// Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI
const Vec3V recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f);
const Vec3V twoPi = V4LoadA(g_NVTwoPi.f);
const Vec3V tmp = V3Mul(a, recipTwoPi);
const Vec3V b = V3Round(tmp);
const Vec3V V1 = V3NegMulSub(twoPi, b, a);
// sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! -
// V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI)
const Vec3V V2 = V3Mul(V1, V1);
const Vec3V V3 = V3Mul(V2, V1);
const Vec3V V5 = V3Mul(V3, V2);
const Vec3V V7 = V3Mul(V5, V2);
const Vec3V V9 = V3Mul(V7, V2);
const Vec3V V11 = V3Mul(V9, V2);
const Vec3V V13 = V3Mul(V11, V2);
const Vec3V V15 = V3Mul(V13, V2);
const Vec3V V17 = V3Mul(V15, V2);
const Vec3V V19 = V3Mul(V17, V2);
const Vec3V V21 = V3Mul(V19, V2);
const Vec3V V23 = V3Mul(V21, V2);
const Vec4V sinCoefficients0 = V4LoadA(g_NVSinCoefficients0.f);
const Vec4V sinCoefficients1 = V4LoadA(g_NVSinCoefficients1.f);
const Vec4V sinCoefficients2 = V4LoadA(g_NVSinCoefficients2.f);
const FloatV S1 = V4GetY(sinCoefficients0);
const FloatV S2 = V4GetZ(sinCoefficients0);
const FloatV S3 = V4GetW(sinCoefficients0);
const FloatV S4 = V4GetX(sinCoefficients1);
const FloatV S5 = V4GetY(sinCoefficients1);
const FloatV S6 = V4GetZ(sinCoefficients1);
const FloatV S7 = V4GetW(sinCoefficients1);
const FloatV S8 = V4GetX(sinCoefficients2);
const FloatV S9 = V4GetY(sinCoefficients2);
const FloatV S10 = V4GetZ(sinCoefficients2);
const FloatV S11 = V4GetW(sinCoefficients2);
Result = V3MulAdd(S1, V3, V1);
Result = V3MulAdd(S2, V5, Result);
Result = V3MulAdd(S3, V7, Result);
Result = V3MulAdd(S4, V9, Result);
Result = V3MulAdd(S5, V11, Result);
Result = V3MulAdd(S6, V13, Result);
Result = V3MulAdd(S7, V15, Result);
Result = V3MulAdd(S8, V17, Result);
Result = V3MulAdd(S9, V19, Result);
Result = V3MulAdd(S10, V21, Result);
Result = V3MulAdd(S11, V23, Result);
return Result;
}
NV_FORCE_INLINE Vec3V V3Cos(const Vec3V a)
{
//XMVECTOR V1, V2, V4, V6, V8, V10, V12, V14, V16, V18, V20, V22;
//XMVECTOR C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11;
Vec3V Result;
// Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI
const Vec3V recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f);
const Vec3V twoPi = V4LoadA(g_NVTwoPi.f);
const Vec3V tmp = V3Mul(a, recipTwoPi);
const Vec3V b = V3Round(tmp);
const Vec3V V1 = V3NegMulSub(twoPi, b, a);
// cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! -
// V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI)
const Vec3V V2 = V3Mul(V1, V1);
const Vec3V V4 = V3Mul(V2, V2);
const Vec3V V6 = V3Mul(V4, V2);
const Vec3V V8 = V3Mul(V4, V4);
const Vec3V V10 = V3Mul(V6, V4);
const Vec3V V12 = V3Mul(V6, V6);
const Vec3V V14 = V3Mul(V8, V6);
const Vec3V V16 = V3Mul(V8, V8);
const Vec3V V18 = V3Mul(V10, V8);
const Vec3V V20 = V3Mul(V10, V10);
const Vec3V V22 = V3Mul(V12, V10);
const Vec4V cosCoefficients0 = V4LoadA(g_NVCosCoefficients0.f);
const Vec4V cosCoefficients1 = V4LoadA(g_NVCosCoefficients1.f);
const Vec4V cosCoefficients2 = V4LoadA(g_NVCosCoefficients2.f);
const FloatV C1 = V4GetY(cosCoefficients0);
const FloatV C2 = V4GetZ(cosCoefficients0);
const FloatV C3 = V4GetW(cosCoefficients0);
const FloatV C4 = V4GetX(cosCoefficients1);
const FloatV C5 = V4GetY(cosCoefficients1);
const FloatV C6 = V4GetZ(cosCoefficients1);
const FloatV C7 = V4GetW(cosCoefficients1);
const FloatV C8 = V4GetX(cosCoefficients2);
const FloatV C9 = V4GetY(cosCoefficients2);
const FloatV C10 = V4GetZ(cosCoefficients2);
const FloatV C11 = V4GetW(cosCoefficients2);
Result = V3MulAdd(C1, V2, V4One());
Result = V3MulAdd(C2, V4, Result);
Result = V3MulAdd(C3, V6, Result);
Result = V3MulAdd(C4, V8, Result);
Result = V3MulAdd(C5, V10, Result);
Result = V3MulAdd(C6, V12, Result);
Result = V3MulAdd(C7, V14, Result);
Result = V3MulAdd(C8, V16, Result);
Result = V3MulAdd(C9, V18, Result);
Result = V3MulAdd(C10, V20, Result);
Result = V3MulAdd(C11, V22, Result);
return Result;
}
NV_FORCE_INLINE Vec3V V3PermYZZ(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
return _mm_shuffle_ps(a,a,_MM_SHUFFLE(3,2,2,1));
}
NV_FORCE_INLINE Vec3V V3PermXYX(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
return _mm_shuffle_ps(a,a,_MM_SHUFFLE(3,0,1,0));
}
NV_FORCE_INLINE Vec3V V3PermYZX(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
return _mm_shuffle_ps(a,a,_MM_SHUFFLE(3,0,2,1));
}
NV_FORCE_INLINE Vec3V V3PermZXY(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3,1,0,2));
}
NV_FORCE_INLINE Vec3V V3PermZZY(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3,1,2,2));
}
NV_FORCE_INLINE Vec3V V3PermYXX(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3,0,0,1));
}
NV_FORCE_INLINE Vec3V V3Perm_Zero_1Z_0Y(const Vec3V v0, const Vec3V v1)
{
VECMATHAOS_ASSERT(isValidVec3V(v0));
VECMATHAOS_ASSERT(isValidVec3V(v1));
return _mm_shuffle_ps(v1, v0, _MM_SHUFFLE(3,1,2,3));
}
NV_FORCE_INLINE Vec3V V3Perm_0Z_Zero_1X(const Vec3V v0, const Vec3V v1)
{
VECMATHAOS_ASSERT(isValidVec3V(v0));
VECMATHAOS_ASSERT(isValidVec3V(v1));
return _mm_shuffle_ps(v0, v1, _MM_SHUFFLE(3,0,3,2));
}
NV_FORCE_INLINE Vec3V V3Perm_1Y_0X_Zero(const Vec3V v0, const Vec3V v1)
{
VECMATHAOS_ASSERT(isValidVec3V(v0));
VECMATHAOS_ASSERT(isValidVec3V(v1));
//There must be a better way to do this.
Vec3V v2=V3Zero();
FloatV y1=V3GetY(v1);
FloatV x0=V3GetX(v0);
v2=V3SetX(v2,y1);
return V3SetY(v2,x0);
}
NV_FORCE_INLINE FloatV V3SumElems(const Vec3V a)
{
#ifdef __SSE4_2__
Vec3V r = _mm_hadd_ps(a,a);
r = _mm_hadd_ps(r,r);
return r;
#else
VECMATHAOS_ASSERT(isValidVec3V(a));
__m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)); //z,y,x,w
__m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1,1,1,1)); //y,x,w,z
__m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2)); //x,w,z,y
return _mm_add_ps(_mm_add_ps(shuf1, shuf2), shuf3);
#endif
}
NV_FORCE_INLINE uint32_t V3OutOfBounds(const Vec3V a, const Vec3V min, const Vec3V max)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(min));
VECMATHAOS_ASSERT(isValidVec3V(max));
const BoolV ffff = BFFFF();
const BoolV c = BOr(V3IsGrtr(a, max), V3IsGrtr(min, a));
return !BAllEq(c, ffff);
}
NV_FORCE_INLINE uint32_t V3InBounds(const Vec3V a, const Vec3V min, const Vec3V max)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(min));
VECMATHAOS_ASSERT(isValidVec3V(max));
const BoolV tttt = BTTTT();
const BoolV c = BAnd(V3IsGrtrOrEq(a, min), V3IsGrtrOrEq(max, a));
return BAllEq(c, tttt);
}
NV_FORCE_INLINE uint32_t V3OutOfBounds(const Vec3V a, const Vec3V bounds)
{
return V3OutOfBounds(a, V3Neg(bounds), bounds);
}
NV_FORCE_INLINE uint32_t V3InBounds(const Vec3V a, const Vec3V bounds)
{
return V3InBounds(a, V3Neg(bounds), bounds);
}
//////////////////////////////////
//VEC4V
//////////////////////////////////
NV_FORCE_INLINE Vec4V V4Splat(const FloatV f)
{
VECMATHAOS_ASSERT(isValidFloatV(f));
//return _mm_shuffle_ps(f, f, _MM_SHUFFLE(0,0,0,0));
return f;
}
NV_FORCE_INLINE Vec4V V4Merge(const FloatV* const floatVArray)
{
VECMATHAOS_ASSERT(isValidFloatV(floatVArray[0]));
VECMATHAOS_ASSERT(isValidFloatV(floatVArray[1]));
VECMATHAOS_ASSERT(isValidFloatV(floatVArray[2]));
VECMATHAOS_ASSERT(isValidFloatV(floatVArray[3]));
__m128 xw = _mm_move_ss(floatVArray[1], floatVArray[0]); //y, y, y, x
__m128 yz = _mm_move_ss(floatVArray[2], floatVArray[3]); //z, z, z, w
return (_mm_shuffle_ps(xw,yz,_MM_SHUFFLE(0,2,1,0)));
}
NV_FORCE_INLINE Vec4V V4Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z, const FloatVArg w)
{
VECMATHAOS_ASSERT(isValidFloatV(x));
VECMATHAOS_ASSERT(isValidFloatV(y));
VECMATHAOS_ASSERT(isValidFloatV(z));
VECMATHAOS_ASSERT(isValidFloatV(w));
__m128 xw = _mm_move_ss(y, x); //y, y, y, x
__m128 yz = _mm_move_ss(z, w); //z, z, z, w
return (_mm_shuffle_ps(xw,yz,_MM_SHUFFLE(0,2,1,0)));
}
NV_FORCE_INLINE Vec4V V4MergeW(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w)
{
const Vec4V xz = _mm_unpackhi_ps(x, z);
const Vec4V yw = _mm_unpackhi_ps(y, w);
return _mm_unpackhi_ps(xz, yw);
}
NV_FORCE_INLINE Vec4V V4MergeZ(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w)
{
const Vec4V xz = _mm_unpackhi_ps(x, z);
const Vec4V yw = _mm_unpackhi_ps(y, w);
return _mm_unpacklo_ps(xz, yw);
}
NV_FORCE_INLINE Vec4V V4MergeY(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w)
{
const Vec4V xz = _mm_unpacklo_ps(x, z);
const Vec4V yw = _mm_unpacklo_ps(y, w);
return _mm_unpackhi_ps(xz, yw);
}
NV_FORCE_INLINE Vec4V V4MergeX(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w)
{
const Vec4V xz = _mm_unpacklo_ps(x, z);
const Vec4V yw = _mm_unpacklo_ps(y, w);
return _mm_unpacklo_ps(xz, yw);
}
NV_FORCE_INLINE Vec4V V4UnpackXY(const Vec4VArg a, const Vec4VArg b)
{
return _mm_unpacklo_ps(a, b);
}
NV_FORCE_INLINE Vec4V V4UnpackZW(const Vec4VArg a, const Vec4VArg b)
{
return _mm_unpackhi_ps(a, b);
}
NV_FORCE_INLINE Vec4V V4UnitW()
{
const NV_ALIGN(16,float) w[4]={0.0f,0.0f,0.0f,1.0f};
const __m128 w128=_mm_load_ps(w);
return w128;
}
NV_FORCE_INLINE Vec4V V4UnitX()
{
const NV_ALIGN(16,float) x[4]={1.0f,0.0f,0.0f,0.0f};
const __m128 x128=_mm_load_ps(x);
return x128;
}
NV_FORCE_INLINE Vec4V V4UnitY()
{
const NV_ALIGN(16,float) y[4]={0.0f,1.0f,0.0f,0.0f};
const __m128 y128=_mm_load_ps(y);
return y128;
}
NV_FORCE_INLINE Vec4V V4UnitZ()
{
const NV_ALIGN(16,float) z[4]={0.0f,0.0f,1.0f,0.0f};
const __m128 z128=_mm_load_ps(z);
return z128;
}
NV_FORCE_INLINE FloatV V4GetW(const Vec4V f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(3,3,3,3));
}
NV_FORCE_INLINE FloatV V4GetX(const Vec4V f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(0,0,0,0));
}
NV_FORCE_INLINE FloatV V4GetY(const Vec4V f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(1,1,1,1));
}
NV_FORCE_INLINE FloatV V4GetZ(const Vec4V f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(2,2,2,2));
}
NV_FORCE_INLINE Vec4V V4SetW(const Vec4V v, const FloatV f)
{
VECMATHAOS_ASSERT(isValidFloatV(f));
return V4Sel(BTTTF(),v,f);
}
NV_FORCE_INLINE Vec4V V4SetX(const Vec4V v, const FloatV f)
{
VECMATHAOS_ASSERT(isValidFloatV(f));
return V4Sel(BFTTT(),v,f);
}
NV_FORCE_INLINE Vec4V V4SetY(const Vec4V v, const FloatV f)
{
VECMATHAOS_ASSERT(isValidFloatV(f));
return V4Sel(BTFTT(),v,f);
}
NV_FORCE_INLINE Vec4V V4SetZ(const Vec4V v, const FloatV f)
{
VECMATHAOS_ASSERT(isValidVec3V(v));
VECMATHAOS_ASSERT(isValidFloatV(f));
return V4Sel(BTTFT(),v,f);
}
NV_FORCE_INLINE Vec4V V4ClearW(const Vec4V v)
{
return _mm_and_ps(v, (VecI32V&)internalUnitSSE2Simd::gMaskXYZ);
}
NV_FORCE_INLINE Vec4V V4Perm_YXWZ(const Vec4V a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,3,0,1));
}
NV_FORCE_INLINE Vec4V V4Perm_XZXZ(const Vec4V a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,0,2,0));
}
NV_FORCE_INLINE Vec4V V4Perm_YWYW(const Vec4V a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3,1,3,1));
}
template<uint8_t x, uint8_t y, uint8_t z, uint8_t w> NV_FORCE_INLINE Vec4V V4Perm(const Vec4V a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(w, z, y, x));
}
NV_FORCE_INLINE Vec4V V4Zero()
{
return V4Load(0.0f);
}
NV_FORCE_INLINE Vec4V V4One()
{
return V4Load(1.0f);
}
NV_FORCE_INLINE Vec4V V4Eps()
{
return V4Load(NV_EPS_REAL);
}
NV_FORCE_INLINE Vec4V V4Neg(const Vec4V f)
{
return _mm_sub_ps( _mm_setzero_ps(), f);
}
NV_FORCE_INLINE Vec4V V4Add(const Vec4V a, const Vec4V b)
{
return _mm_add_ps(a,b);
}
NV_FORCE_INLINE Vec4V V4Sub(const Vec4V a, const Vec4V b)
{
return _mm_sub_ps(a,b);
}
NV_FORCE_INLINE Vec4V V4Scale(const Vec4V a, const FloatV b)
{
return _mm_mul_ps(a,b);
}
NV_FORCE_INLINE Vec4V V4Mul(const Vec4V a, const Vec4V b)
{
return _mm_mul_ps(a,b);
}
NV_FORCE_INLINE Vec4V V4ScaleInv(const Vec4V a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_div_ps(a,b);
}
NV_FORCE_INLINE Vec4V V4Div(const Vec4V a, const Vec4V b)
{
return _mm_div_ps(a,b);
}
NV_FORCE_INLINE Vec4V V4ScaleInvFast(const Vec4V a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_mul_ps(a,_mm_rcp_ps(b));
}
NV_FORCE_INLINE Vec4V V4DivFast(const Vec4V a, const Vec4V b)
{
return _mm_mul_ps(a,_mm_rcp_ps(b));
}
NV_FORCE_INLINE Vec4V V4Recip(const Vec4V a)
{
return _mm_div_ps(V4One(),a);
}
NV_FORCE_INLINE Vec4V V4RecipFast(const Vec4V a)
{
return _mm_rcp_ps(a);
}
NV_FORCE_INLINE Vec4V V4Rsqrt(const Vec4V a)
{
return _mm_div_ps(V4One(),_mm_sqrt_ps(a));
}
NV_FORCE_INLINE Vec4V V4RsqrtFast(const Vec4V a)
{
return _mm_rsqrt_ps(a);
}
NV_FORCE_INLINE Vec4V V4Sqrt(const Vec4V a)
{
return _mm_sqrt_ps(a);
}
NV_FORCE_INLINE Vec4V V4ScaleAdd(const Vec4V a, const FloatV b, const Vec4V c)
{
VECMATHAOS_ASSERT(isValidFloatV(b));
return V4Add(V4Scale(a,b),c);
}
NV_FORCE_INLINE Vec4V V4NegScaleSub(const Vec4V a, const FloatV b, const Vec4V c)
{
VECMATHAOS_ASSERT(isValidFloatV(b));
return V4Sub(c,V4Scale(a,b));
}
NV_FORCE_INLINE Vec4V V4MulAdd(const Vec4V a, const Vec4V b, const Vec4V c)
{
return V4Add(V4Mul(a,b),c);
}
NV_FORCE_INLINE Vec4V V4NegMulSub(const Vec4V a, const Vec4V b, const Vec4V c)
{
return V4Sub(c,V4Mul(a,b));
}
NV_FORCE_INLINE Vec4V V4Abs(const Vec4V a)
{
return V4Max(a,V4Neg(a));
}
NV_FORCE_INLINE FloatV V4SumElements(const Vec4V a)
{
#ifdef __SSE4_2__
Vec4V r = _mm_hadd_ps(a,a);
r = _mm_hadd_ps(r,r);
return r;
#else
const Vec4V xy = V4UnpackXY(a, a); //x,x,y,y
const Vec4V zw = V4UnpackZW(a, a); //z,z,w,w
const Vec4V xz_yw = V4Add(xy, zw); //x+z,x+z,y+w,y+w
const FloatV xz = V4GetX(xz_yw); //x+z
const FloatV yw = V4GetZ(xz_yw); //y+w
return FAdd(xz, yw); //sum
#endif
}
NV_FORCE_INLINE FloatV V4Dot(const Vec4V a, const Vec4V b)
{
#ifdef __SSE4_2__
return _mm_dp_ps(a, b, 0xff);
#else
__m128 dot1 = _mm_mul_ps(a, b); //x,y,z,w
__m128 shuf1 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(2,1,0,3)); //w,x,y,z
__m128 shuf2 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(1,0,3,2)); //z,w,x,y
__m128 shuf3 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(0,3,2,1)); //y,z,w,x
return _mm_add_ps(_mm_add_ps(shuf2, shuf3), _mm_add_ps(dot1,shuf1));
#endif
}
NV_FORCE_INLINE FloatV V4Length(const Vec4V a)
{
return _mm_sqrt_ps(V4Dot(a,a));
}
NV_FORCE_INLINE FloatV V4LengthSq(const Vec4V a)
{
return V4Dot(a,a);
}
NV_FORCE_INLINE Vec4V V4Normalize(const Vec4V a)
{
VECMATHAOS_ASSERT(V4Dot(a,a)!=FZero())
return V4ScaleInv(a,_mm_sqrt_ps(V4Dot(a,a)));
}
NV_FORCE_INLINE Vec4V V4NormalizeFast(const Vec4V a)
{
return V4ScaleInvFast(a,_mm_sqrt_ps(V4Dot(a,a)));
}
NV_FORCE_INLINE Vec4V V4NormalizeSafe(const Vec4V a)
{
const __m128 zero=FZero();
const __m128 eps=V3Eps();
const __m128 length=V4Length(a);
const __m128 isGreaterThanZero=V4IsGrtr(length,eps);
return V4Sel(isGreaterThanZero,V4ScaleInv(a,length),zero);
}
NV_FORCE_INLINE BoolV V4IsEqU32(const VecU32V a, const VecU32V b)
{
return m128_I2F(_mm_cmpeq_epi32(m128_F2I(a), m128_F2I(b)));
}
NV_FORCE_INLINE Vec4V V4Sel(const BoolV c, const Vec4V a, const Vec4V b)
{
return _mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a));
}
NV_FORCE_INLINE BoolV V4IsGrtr(const Vec4V a, const Vec4V b)
{
return _mm_cmpgt_ps(a,b);
}
NV_FORCE_INLINE BoolV V4IsGrtrOrEq(const Vec4V a, const Vec4V b)
{
return _mm_cmpge_ps(a,b);
}
NV_FORCE_INLINE BoolV V4IsEq(const Vec4V a, const Vec4V b)
{
return _mm_cmpeq_ps(a,b);
}
NV_FORCE_INLINE Vec4V V4Max(const Vec4V a, const Vec4V b)
{
return _mm_max_ps(a, b);
}
NV_FORCE_INLINE Vec4V V4Min(const Vec4V a, const Vec4V b)
{
return _mm_min_ps(a, b);
}
NV_FORCE_INLINE FloatV V4ExtractMax(const Vec4V a)
{
__m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,1,0,3));
__m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1,0,3,2));
__m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0,3,2,1));
return _mm_max_ps(_mm_max_ps(a, shuf1), _mm_max_ps(shuf2, shuf3));
}
NV_FORCE_INLINE FloatV V4ExtractMin(const Vec4V a)
{
__m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,1,0,3));
__m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1,0,3,2));
__m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0,3,2,1));
return _mm_min_ps(_mm_min_ps(a, shuf1), _mm_min_ps(shuf2, shuf3));
}
NV_FORCE_INLINE Vec4V V4Clamp(const Vec4V a, const Vec4V minV, const Vec4V maxV)
{
return V4Max(V4Min(a,maxV),minV);
}
NV_FORCE_INLINE uint32_t V4AllGrtr(const Vec4V a, const Vec4V b)
{
return internalUnitSSE2Simd::BAllTrue4_R(V4IsGrtr(a, b));
}
NV_FORCE_INLINE uint32_t V4AllGrtrOrEq(const Vec4V a, const Vec4V b)
{
return internalUnitSSE2Simd::BAllTrue4_R(V4IsGrtrOrEq(a, b));
}
NV_FORCE_INLINE uint32_t V4AllEq(const Vec4V a, const Vec4V b)
{
return internalUnitSSE2Simd::BAllTrue4_R(V4IsEq(a, b));
}
NV_FORCE_INLINE Vec4V V4Round(const Vec4V a)
{
#ifdef __SSE4_2__
return _mm_round_ps( a, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC );
#else
//return _mm_round_ps(a, 0x0);
const Vec4V half = V4Load(0.5f);
const __m128 signBit = _mm_cvtepi32_ps(_mm_srli_epi32(_mm_cvtps_epi32(a), 31));
const Vec4V aRound = V4Sub(V4Add(a, half), signBit);
__m128i tmp = _mm_cvttps_epi32(aRound);
return _mm_cvtepi32_ps(tmp);
#endif
}
NV_FORCE_INLINE Vec4V V4Sin(const Vec4V a)
{
//Vec4V V1, V2, V3, V5, V7, V9, V11, V13, V15, V17, V19, V21, V23;
//Vec4V S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11;
Vec4V Result;
const Vec4V recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f);
const Vec4V twoPi = V4LoadA(g_NVTwoPi.f);
const Vec4V tmp = V4Mul(a, recipTwoPi);
const Vec4V b = V4Round(tmp);
const Vec4V V1 = V4NegMulSub(twoPi, b, a);
// sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! -
// V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI)
const Vec4V V2 = V4Mul(V1, V1);
const Vec4V V3 = V4Mul(V2, V1);
const Vec4V V5 = V4Mul(V3, V2);
const Vec4V V7 = V4Mul(V5, V2);
const Vec4V V9 = V4Mul(V7, V2);
const Vec4V V11 = V4Mul(V9, V2);
const Vec4V V13 = V4Mul(V11, V2);
const Vec4V V15 = V4Mul(V13, V2);
const Vec4V V17 = V4Mul(V15, V2);
const Vec4V V19 = V4Mul(V17, V2);
const Vec4V V21 = V4Mul(V19, V2);
const Vec4V V23 = V4Mul(V21, V2);
const Vec4V sinCoefficients0 = V4LoadA(g_NVSinCoefficients0.f);
const Vec4V sinCoefficients1 = V4LoadA(g_NVSinCoefficients1.f);
const Vec4V sinCoefficients2 = V4LoadA(g_NVSinCoefficients2.f);
const FloatV S1 = V4GetY(sinCoefficients0);
const FloatV S2 = V4GetZ(sinCoefficients0);
const FloatV S3 = V4GetW(sinCoefficients0);
const FloatV S4 = V4GetX(sinCoefficients1);
const FloatV S5 = V4GetY(sinCoefficients1);
const FloatV S6 = V4GetZ(sinCoefficients1);
const FloatV S7 = V4GetW(sinCoefficients1);
const FloatV S8 = V4GetX(sinCoefficients2);
const FloatV S9 = V4GetY(sinCoefficients2);
const FloatV S10 = V4GetZ(sinCoefficients2);
const FloatV S11 = V4GetW(sinCoefficients2);
Result = V4MulAdd(S1, V3, V1);
Result = V4MulAdd(S2, V5, Result);
Result = V4MulAdd(S3, V7, Result);
Result = V4MulAdd(S4, V9, Result);
Result = V4MulAdd(S5, V11, Result);
Result = V4MulAdd(S6, V13, Result);
Result = V4MulAdd(S7, V15, Result);
Result = V4MulAdd(S8, V17, Result);
Result = V4MulAdd(S9, V19, Result);
Result = V4MulAdd(S10, V21, Result);
Result = V4MulAdd(S11, V23, Result);
return Result;
}
NV_FORCE_INLINE Vec4V V4Cos(const Vec4V a)
{
//XMVECTOR V1, V2, V4, V6, V8, V10, V12, V14, V16, V18, V20, V22;
//XMVECTOR C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11;
Vec4V Result;
const Vec4V recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f);
const Vec4V twoPi = V4LoadA(g_NVTwoPi.f);
const Vec4V tmp = V4Mul(a, recipTwoPi);
const Vec4V b = V4Round(tmp);
const Vec4V V1 = V4NegMulSub(twoPi, b, a);
// cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! -
// V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI)
const Vec4V V2 = V4Mul(V1, V1);
const Vec4V V4 = V4Mul(V2, V2);
const Vec4V V6 = V4Mul(V4, V2);
const Vec4V V8 = V4Mul(V4, V4);
const Vec4V V10 = V4Mul(V6, V4);
const Vec4V V12 = V4Mul(V6, V6);
const Vec4V V14 = V4Mul(V8, V6);
const Vec4V V16 = V4Mul(V8, V8);
const Vec4V V18 = V4Mul(V10, V8);
const Vec4V V20 = V4Mul(V10, V10);
const Vec4V V22 = V4Mul(V12, V10);
const Vec4V cosCoefficients0 = V4LoadA(g_NVCosCoefficients0.f);
const Vec4V cosCoefficients1 = V4LoadA(g_NVCosCoefficients1.f);
const Vec4V cosCoefficients2 = V4LoadA(g_NVCosCoefficients2.f);
const FloatV C1 = V4GetY(cosCoefficients0);
const FloatV C2 = V4GetZ(cosCoefficients0);
const FloatV C3 = V4GetW(cosCoefficients0);
const FloatV C4 = V4GetX(cosCoefficients1);
const FloatV C5 = V4GetY(cosCoefficients1);
const FloatV C6 = V4GetZ(cosCoefficients1);
const FloatV C7 = V4GetW(cosCoefficients1);
const FloatV C8 = V4GetX(cosCoefficients2);
const FloatV C9 = V4GetY(cosCoefficients2);
const FloatV C10 = V4GetZ(cosCoefficients2);
const FloatV C11 = V4GetW(cosCoefficients2);
Result = V4MulAdd(C1, V2, V4One());
Result = V4MulAdd(C2, V4, Result);
Result = V4MulAdd(C3, V6, Result);
Result = V4MulAdd(C4, V8, Result);
Result = V4MulAdd(C5, V10, Result);
Result = V4MulAdd(C6, V12, Result);
Result = V4MulAdd(C7, V14, Result);
Result = V4MulAdd(C8, V16, Result);
Result = V4MulAdd(C9, V18, Result);
Result = V4MulAdd(C10, V20, Result);
Result = V4MulAdd(C11, V22, Result);
return Result;
}
NV_FORCE_INLINE void V4Transpose(Vec4V& col0, Vec4V& col1, Vec4V& col2, Vec4V& col3)
{
Vec4V tmp0 = _mm_unpacklo_ps(col0, col1);
Vec4V tmp2 = _mm_unpacklo_ps(col2, col3);
Vec4V tmp1 = _mm_unpackhi_ps(col0, col1);
Vec4V tmp3 = _mm_unpackhi_ps(col2, col3);
col0 = _mm_movelh_ps(tmp0, tmp2);
col1 = _mm_movehl_ps(tmp2, tmp0);
col2 = _mm_movelh_ps(tmp1, tmp3);
col3 = _mm_movehl_ps(tmp3, tmp1);
}
//////////////////////////////////
//BoolV
//////////////////////////////////
NV_FORCE_INLINE BoolV BFFFF()
{
return _mm_setzero_ps();
}
NV_FORCE_INLINE BoolV BFFFT()
{
/*const NV_ALIGN(16, uint32_t f[4])={0,0,0,0xFFFFFFFF};
const __m128 ffft=_mm_load_ps((float*)&f);
return ffft;*/
return m128_I2F(_mm_set_epi32(-1, 0, 0, 0));
}
NV_FORCE_INLINE BoolV BFFTF()
{
/*const NV_ALIGN(16, uint32_t f[4])={0,0,0xFFFFFFFF,0};
const __m128 fftf=_mm_load_ps((float*)&f);
return fftf;*/
return m128_I2F(_mm_set_epi32(0, -1, 0, 0));
}
NV_FORCE_INLINE BoolV BFFTT()
{
/*const NV_ALIGN(16, uint32_t f[4])={0,0,0xFFFFFFFF,0xFFFFFFFF};
const __m128 fftt=_mm_load_ps((float*)&f);
return fftt;*/
return m128_I2F(_mm_set_epi32(-1, -1, 0, 0));
}
NV_FORCE_INLINE BoolV BFTFF()
{
/*const NV_ALIGN(16, uint32_t f[4])={0,0xFFFFFFFF,0,0};
const __m128 ftff=_mm_load_ps((float*)&f);
return ftff;*/
return m128_I2F(_mm_set_epi32(0, 0, -1, 0));
}
NV_FORCE_INLINE BoolV BFTFT()
{
/*const NV_ALIGN(16, uint32_t f[4])={0,0xFFFFFFFF,0,0xFFFFFFFF};
const __m128 ftft=_mm_load_ps((float*)&f);
return ftft;*/
return m128_I2F(_mm_set_epi32(-1, 0, -1, 0));
}
NV_FORCE_INLINE BoolV BFTTF()
{
/*const NV_ALIGN(16, uint32_t f[4])={0,0xFFFFFFFF,0xFFFFFFFF,0};
const __m128 fttf=_mm_load_ps((float*)&f);
return fttf;*/
return m128_I2F(_mm_set_epi32(0, -1, -1, 0));
}
NV_FORCE_INLINE BoolV BFTTT()
{
/*const NV_ALIGN(16, uint32_t f[4])={0,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF};
const __m128 fttt=_mm_load_ps((float*)&f);
return fttt;*/
return m128_I2F(_mm_set_epi32(-1, -1, -1, 0));
}
NV_FORCE_INLINE BoolV BTFFF()
{
//const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0,0,0};
//const __m128 tfff=_mm_load_ps((float*)&f);
//return tfff;
return m128_I2F(_mm_set_epi32(0, 0, 0, -1));
}
NV_FORCE_INLINE BoolV BTFFT()
{
/*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0,0,0xFFFFFFFF};
const __m128 tfft=_mm_load_ps((float*)&f);
return tfft;*/
return m128_I2F(_mm_set_epi32(-1, 0, 0, -1));
}
NV_FORCE_INLINE BoolV BTFTF()
{
/*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0,0xFFFFFFFF,0};
const __m128 tftf=_mm_load_ps((float*)&f);
return tftf;*/
return m128_I2F(_mm_set_epi32(0, -1, 0, -1));
}
NV_FORCE_INLINE BoolV BTFTT()
{
/*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0,0xFFFFFFFF,0xFFFFFFFF};
const __m128 tftt=_mm_load_ps((float*)&f);
return tftt;*/
return m128_I2F(_mm_set_epi32(-1, -1, 0, -1));
}
NV_FORCE_INLINE BoolV BTTFF()
{
/*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0xFFFFFFFF,0,0};
const __m128 ttff=_mm_load_ps((float*)&f);
return ttff;*/
return m128_I2F(_mm_set_epi32(0, 0, -1, -1));
}
NV_FORCE_INLINE BoolV BTTFT()
{
/*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0xFFFFFFFF,0,0xFFFFFFFF};
const __m128 ttft=_mm_load_ps((float*)&f);
return ttft;*/
return m128_I2F(_mm_set_epi32(-1, 0, -1, -1));
}
NV_FORCE_INLINE BoolV BTTTF()
{
/*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0};
const __m128 tttf=_mm_load_ps((float*)&f);
return tttf;*/
return m128_I2F(_mm_set_epi32(0, -1, -1, -1));
}
NV_FORCE_INLINE BoolV BTTTT()
{
/*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF};
const __m128 tttt=_mm_load_ps((float*)&f);
return tttt;*/
return m128_I2F(_mm_set_epi32(-1, -1, -1, -1));
}
NV_FORCE_INLINE BoolV BXMask()
{
/*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0,0,0};
const __m128 tfff=_mm_load_ps((float*)&f);
return tfff;*/
return m128_I2F(_mm_set_epi32(0, 0, 0, -1));
}
NV_FORCE_INLINE BoolV BYMask()
{
/*const NV_ALIGN(16, uint32_t f[4])={0,0xFFFFFFFF,0,0};
const __m128 ftff=_mm_load_ps((float*)&f);
return ftff;*/
return m128_I2F(_mm_set_epi32(0, 0, -1, 0));
}
NV_FORCE_INLINE BoolV BZMask()
{
/*const NV_ALIGN(16, uint32_t f[4])={0,0,0xFFFFFFFF,0};
const __m128 fftf=_mm_load_ps((float*)&f);
return fftf;*/
return m128_I2F(_mm_set_epi32(0, -1, 0, 0));
}
NV_FORCE_INLINE BoolV BWMask()
{
/*const NV_ALIGN(16, uint32_t f[4])={0,0,0,0xFFFFFFFF};
const __m128 ffft=_mm_load_ps((float*)&f);
return ffft;*/
return m128_I2F(_mm_set_epi32(-1, 0, 0, 0));
}
NV_FORCE_INLINE BoolV BGetX(const BoolV f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(0,0,0,0));
}
NV_FORCE_INLINE BoolV BGetY(const BoolV f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(1,1,1,1));
}
NV_FORCE_INLINE BoolV BGetZ(const BoolV f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(2,2,2,2));
}
NV_FORCE_INLINE BoolV BGetW(const BoolV f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(3,3,3,3));
}
NV_FORCE_INLINE BoolV BSetX(const BoolV v, const BoolV f)
{
return V4Sel(BFTTT(),v,f);
}
NV_FORCE_INLINE BoolV BSetY(const BoolV v, const BoolV f)
{
return V4Sel(BTFTT(),v,f);
}
NV_FORCE_INLINE BoolV BSetZ(const BoolV v, const BoolV f)
{
return V4Sel(BTTFT(),v,f);
}
NV_FORCE_INLINE BoolV BSetW(const BoolV v, const BoolV f)
{
return V4Sel(BTTTF(),v,f);
}
NV_FORCE_INLINE BoolV BAnd(const BoolV a, const BoolV b)
{
return (_mm_and_ps(a,b));
}
NV_FORCE_INLINE BoolV BNot(const BoolV a)
{
const BoolV bAllTrue(BTTTT());
return _mm_xor_ps(a, bAllTrue);
}
NV_FORCE_INLINE BoolV BAndNot(const BoolV a, const BoolV b)
{
return (_mm_andnot_ps(b,a));
}
NV_FORCE_INLINE BoolV BOr(const BoolV a, const BoolV b)
{
return (_mm_or_ps(a,b));
}
NV_FORCE_INLINE BoolV BAllTrue4(const BoolV a)
{
const BoolV bTmp = _mm_and_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,1,0,1)), _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,3,2,3)));
return _mm_and_ps(_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(0,0,0,0)), _mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(1,1,1,1)));
}
NV_FORCE_INLINE BoolV BAnyTrue4(const BoolV a)
{
const BoolV bTmp = _mm_or_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,1,0,1)), _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,3,2,3)));
return _mm_or_ps(_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(0,0,0,0)), _mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(1,1,1,1)));
}
NV_FORCE_INLINE BoolV BAllTrue3(const BoolV a)
{
const BoolV bTmp = _mm_and_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,1,0,1)), _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2)));
return _mm_and_ps(_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(0,0,0,0)), _mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(1,1,1,1)));
}
NV_FORCE_INLINE BoolV BAnyTrue3(const BoolV a)
{
const BoolV bTmp = _mm_or_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,1,0,1)), _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2)));
return _mm_or_ps(_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(0,0,0,0)), _mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(1,1,1,1)));
}
NV_FORCE_INLINE uint32_t BAllEq(const BoolV a, const BoolV b)
{
const BoolV bTest = m128_I2F(_mm_cmpeq_epi32(m128_F2I(a), m128_F2I(b)));
return internalUnitSSE2Simd::BAllTrue4_R(bTest);
}
NV_FORCE_INLINE uint32_t BAllEqTTTT(const BoolV a)
{
return uint32_t(_mm_movemask_ps(a)==15);
}
NV_FORCE_INLINE uint32_t BAllEqFFFF(const BoolV a)
{
return uint32_t(_mm_movemask_ps(a)==0);
}
NV_FORCE_INLINE uint32_t BGetBitMask(const BoolV a)
{
return uint32_t(_mm_movemask_ps(a));
}
//////////////////////////////////
//MAT33V
//////////////////////////////////
NV_FORCE_INLINE Vec3V M33MulV3(const Mat33V& a, const Vec3V b)
{
const FloatV x=V3GetX(b);
const FloatV y=V3GetY(b);
const FloatV z=V3GetZ(b);
const Vec3V v0=V3Scale(a.col0,x);
const Vec3V v1=V3Scale(a.col1,y);
const Vec3V v2=V3Scale(a.col2,z);
const Vec3V v0PlusV1=V3Add(v0,v1);
return V3Add(v0PlusV1,v2);
}
NV_FORCE_INLINE Vec3V M33TrnspsMulV3(const Mat33V& a, const Vec3V b)
{
const FloatV x=V3Dot(a.col0,b);
const FloatV y=V3Dot(a.col1,b);
const FloatV z=V3Dot(a.col2,b);
return V3Merge(x,y,z);
}
NV_FORCE_INLINE Vec3V M33MulV3AddV3(const Mat33V& A, const Vec3V b, const Vec3V c)
{
const FloatV x=V3GetX(b);
const FloatV y=V3GetY(b);
const FloatV z=V3GetZ(b);
Vec3V result = V3MulAdd(A.col0, x, c);
result = V3MulAdd(A.col1, y, result);
return V3MulAdd(A.col2, z, result);
}
NV_FORCE_INLINE Mat33V M33MulM33(const Mat33V& a, const Mat33V& b)
{
return Mat33V(M33MulV3(a,b.col0),M33MulV3(a,b.col1),M33MulV3(a,b.col2));
}
NV_FORCE_INLINE Mat33V M33Add(const Mat33V& a, const Mat33V& b)
{
return Mat33V(V3Add(a.col0,b.col0),V3Add(a.col1,b.col1),V3Add(a.col2,b.col2));
}
NV_FORCE_INLINE Mat33V M33Scale(const Mat33V& a, const FloatV& b)
{
return Mat33V(V3Scale(a.col0,b),V3Scale(a.col1,b),V3Scale(a.col2,b));
}
NV_FORCE_INLINE Mat33V M33Inverse(const Mat33V& a)
{
const BoolV tfft=BTFFT();
const BoolV tttf=BTTTF();
const FloatV zero=FZero();
const Vec3V cross01 = V3Cross(a.col0,a.col1);
const Vec3V cross12 = V3Cross(a.col1,a.col2);
const Vec3V cross20 = V3Cross(a.col2,a.col0);
const FloatV dot = V3Dot(cross01,a.col2);
const FloatV invDet = _mm_rcp_ps(dot);
const Vec3V mergeh = _mm_unpacklo_ps(cross12,cross01);
const Vec3V mergel = _mm_unpackhi_ps(cross12,cross01);
Vec3V colInv0 = _mm_unpacklo_ps(mergeh,cross20);
colInv0 = _mm_or_ps(_mm_andnot_ps(tttf, zero), _mm_and_ps(tttf, colInv0));
const Vec3V zppd=_mm_shuffle_ps(mergeh,cross20,_MM_SHUFFLE(3,0,0,2));
const Vec3V pbwp=_mm_shuffle_ps(cross20,mergeh,_MM_SHUFFLE(3,3,1,0));
const Vec3V colInv1=_mm_or_ps(_mm_andnot_ps(BTFFT(), pbwp), _mm_and_ps(BTFFT(), zppd));
const Vec3V xppd=_mm_shuffle_ps(mergel,cross20,_MM_SHUFFLE(3,0,0,0));
const Vec3V pcyp=_mm_shuffle_ps(cross20,mergel,_MM_SHUFFLE(3,1,2,0));
const Vec3V colInv2=_mm_or_ps(_mm_andnot_ps(tfft, pcyp), _mm_and_ps(tfft, xppd));
return Mat33V
(
_mm_mul_ps(colInv0,invDet),
_mm_mul_ps(colInv1,invDet),
_mm_mul_ps(colInv2,invDet)
);
}
NV_FORCE_INLINE Mat33V M33Trnsps(const Mat33V& a)
{
return Mat33V
(
V3Merge(V3GetX(a.col0),V3GetX(a.col1),V3GetX(a.col2)),
V3Merge(V3GetY(a.col0),V3GetY(a.col1),V3GetY(a.col2)),
V3Merge(V3GetZ(a.col0),V3GetZ(a.col1),V3GetZ(a.col2))
);
}
NV_FORCE_INLINE Mat33V M33Identity()
{
return Mat33V
(
V3UnitX(),
V3UnitY(),
V3UnitZ()
);
}
NV_FORCE_INLINE Mat33V M33Sub(const Mat33V& a, const Mat33V& b)
{
return Mat33V(V3Sub(a.col0,b.col0),V3Sub(a.col1,b.col1),V3Sub(a.col2,b.col2));
}
NV_FORCE_INLINE Mat33V M33Neg(const Mat33V& a)
{
return Mat33V(V3Neg(a.col0),V3Neg(a.col1),V3Neg(a.col2));
}
NV_FORCE_INLINE Mat33V M33Abs(const Mat33V& a)
{
return Mat33V(V3Abs(a.col0),V3Abs(a.col1),V3Abs(a.col2));
}
NV_FORCE_INLINE Mat33V PromoteVec3V(const Vec3V v)
{
const BoolV bTFFF = BTFFF();
const BoolV bFTFF = BFTFF();
const BoolV bFFTF = BTFTF();
const Vec3V zero = V3Zero();
return Mat33V( V3Sel(bTFFF, v, zero),
V3Sel(bFTFF, v, zero),
V3Sel(bFFTF, v, zero));
}
NV_FORCE_INLINE Mat33V M33Diagonal(const Vec3VArg d)
{
const FloatV x = V3Mul(V3UnitX(), d);
const FloatV y = V3Mul(V3UnitY(), d);
const FloatV z = V3Mul(V3UnitZ(), d);
return Mat33V(x, y, z);
}
//////////////////////////////////
//MAT34V
//////////////////////////////////
NV_FORCE_INLINE Vec3V M34MulV3(const Mat34V& a, const Vec3V b)
{
const FloatV x=V3GetX(b);
const FloatV y=V3GetY(b);
const FloatV z=V3GetZ(b);
const Vec3V v0=V3Scale(a.col0,x);
const Vec3V v1=V3Scale(a.col1,y);
const Vec3V v2=V3Scale(a.col2,z);
const Vec3V v0PlusV1=V3Add(v0,v1);
const Vec3V v0PlusV1Plusv2=V3Add(v0PlusV1,v2);
return (V3Add(v0PlusV1Plusv2,a.col3));
}
NV_FORCE_INLINE Vec3V M34Mul33V3(const Mat34V& a, const Vec3V b)
{
const FloatV x=V3GetX(b);
const FloatV y=V3GetY(b);
const FloatV z=V3GetZ(b);
const Vec3V v0=V3Scale(a.col0,x);
const Vec3V v1=V3Scale(a.col1,y);
const Vec3V v2=V3Scale(a.col2,z);
const Vec3V v0PlusV1=V3Add(v0,v1);
return V3Add(v0PlusV1,v2);
}
NV_FORCE_INLINE Vec3V M34TrnspsMul33V3(const Mat34V& a, const Vec3V b)
{
const FloatV x=V3Dot(a.col0,b);
const FloatV y=V3Dot(a.col1,b);
const FloatV z=V3Dot(a.col2,b);
return V3Merge(x,y,z);
}
NV_FORCE_INLINE Mat34V M34MulM34(const Mat34V& a, const Mat34V& b)
{
return Mat34V(M34Mul33V3(a,b.col0), M34Mul33V3(a,b.col1),M34Mul33V3(a,b.col2),M34MulV3(a,b.col3));
}
NV_FORCE_INLINE Mat33V M34MulM33(const Mat34V& a, const Mat33V& b)
{
return Mat33V(M34Mul33V3(a,b.col0),M34Mul33V3(a,b.col1),M34Mul33V3(a,b.col2));
}
NV_FORCE_INLINE Mat33V M34Mul33MM34(const Mat34V& a, const Mat34V& b)
{
return Mat33V(M34Mul33V3(a,b.col0),M34Mul33V3(a,b.col1),M34Mul33V3(a,b.col2));
}
NV_FORCE_INLINE Mat34V M34Add(const Mat34V& a, const Mat34V& b)
{
return Mat34V(V3Add(a.col0,b.col0),V3Add(a.col1,b.col1),V3Add(a.col2,b.col2),V3Add(a.col3,b.col3));
}
NV_FORCE_INLINE Mat33V M34Trnsps33(const Mat34V& a)
{
return Mat33V
(
V3Merge(V3GetX(a.col0),V3GetX(a.col1),V3GetX(a.col2)),
V3Merge(V3GetY(a.col0),V3GetY(a.col1),V3GetY(a.col2)),
V3Merge(V3GetZ(a.col0),V3GetZ(a.col1),V3GetZ(a.col2))
);
}
//////////////////////////////////
//MAT44V
//////////////////////////////////
NV_FORCE_INLINE Vec4V M44MulV4(const Mat44V& a, const Vec4V b)
{
const FloatV x=V4GetX(b);
const FloatV y=V4GetY(b);
const FloatV z=V4GetZ(b);
const FloatV w=V4GetW(b);
const Vec4V v0=V4Scale(a.col0,x);
const Vec4V v1=V4Scale(a.col1,y);
const Vec4V v2=V4Scale(a.col2,z);
const Vec4V v3=V4Scale(a.col3,w);
const Vec4V v0PlusV1=V4Add(v0,v1);
const Vec4V v0PlusV1Plusv2=V4Add(v0PlusV1,v2);
return (V4Add(v0PlusV1Plusv2,v3));
}
NV_FORCE_INLINE Vec4V M44TrnspsMulV4(const Mat44V& a, const Vec4V b)
{
NV_ALIGN(16,FloatV) dotProdArray[4]=
{
V4Dot(a.col0,b),
V4Dot(a.col1,b),
V4Dot(a.col2,b),
V4Dot(a.col3,b)
};
return V4Merge(dotProdArray);
}
NV_FORCE_INLINE Mat44V M44MulM44(const Mat44V& a, const Mat44V& b)
{
return Mat44V(M44MulV4(a,b.col0),M44MulV4(a,b.col1),M44MulV4(a,b.col2),M44MulV4(a,b.col3));
}
NV_FORCE_INLINE Mat44V M44Add(const Mat44V& a, const Mat44V& b)
{
return Mat44V(V4Add(a.col0,b.col0),V4Add(a.col1,b.col1),V4Add(a.col2,b.col2),V4Add(a.col3,b.col3));
}
NV_FORCE_INLINE Mat44V M44Trnsps(const Mat44V& a)
{
const Vec4V v0 = _mm_unpacklo_ps(a.col0, a.col2);
const Vec4V v1 = _mm_unpackhi_ps(a.col0, a.col2);
const Vec4V v2 = _mm_unpacklo_ps(a.col1, a.col3);
const Vec4V v3 = _mm_unpackhi_ps(a.col1, a.col3);
return Mat44V( _mm_unpacklo_ps(v0, v2),_mm_unpackhi_ps(v0, v2),_mm_unpacklo_ps(v1, v3),_mm_unpackhi_ps(v1, v3));
}
NV_FORCE_INLINE Mat44V M44Inverse(const Mat44V& a)
{
__m128 minor0, minor1, minor2, minor3;
__m128 row0, row1, row2, row3;
__m128 det, tmp1;
tmp1=V4Zero();
row1=V4Zero();
row3=V4Zero();
row0=a.col0;
row1=_mm_shuffle_ps(a.col1,a.col1,_MM_SHUFFLE(1,0,3,2));
row2=a.col2;
row3=_mm_shuffle_ps(a.col3,a.col3,_MM_SHUFFLE(1,0,3,2));
tmp1 = _mm_mul_ps(row2, row3);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
minor0 = _mm_mul_ps(row1, tmp1);
minor1 = _mm_mul_ps(row0, tmp1);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
minor0 = _mm_sub_ps(_mm_mul_ps(row1, tmp1), minor0);
minor1 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor1);
minor1 = _mm_shuffle_ps(minor1, minor1, 0x4E);
tmp1 = _mm_mul_ps(row1, row2);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
minor0 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor0);
minor3 = _mm_mul_ps(row0, tmp1);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row3, tmp1));
minor3 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor3);
minor3 = _mm_shuffle_ps(minor3, minor3, 0x4E);
tmp1 = _mm_mul_ps(_mm_shuffle_ps(row1, row1, 0x4E), row3);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
row2 = _mm_shuffle_ps(row2, row2, 0x4E);
minor0 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor0);
minor2 = _mm_mul_ps(row0, tmp1);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row2, tmp1));
minor2 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor2);
minor2 = _mm_shuffle_ps(minor2, minor2, 0x4E);
tmp1 = _mm_mul_ps(row0, row1);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
minor2 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor2);
minor3 = _mm_sub_ps(_mm_mul_ps(row2, tmp1), minor3);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
minor2 = _mm_sub_ps(_mm_mul_ps(row3, tmp1), minor2);
minor3 = _mm_sub_ps(minor3, _mm_mul_ps(row2, tmp1));
tmp1 = _mm_mul_ps(row0, row3);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row2, tmp1));
minor2 = _mm_add_ps(_mm_mul_ps(row1, tmp1), minor2);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
minor1 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor1);
minor2 = _mm_sub_ps(minor2, _mm_mul_ps(row1, tmp1));
tmp1 = _mm_mul_ps(row0, row2);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
minor1 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor1);
minor3 = _mm_sub_ps(minor3, _mm_mul_ps(row1, tmp1));
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row3, tmp1));
minor3 = _mm_add_ps(_mm_mul_ps(row1, tmp1), minor3);
det = _mm_mul_ps(row0, minor0);
det = _mm_add_ps(_mm_shuffle_ps(det, det, 0x4E), det);
det = _mm_add_ss(_mm_shuffle_ps(det, det, 0xB1), det);
tmp1 = _mm_rcp_ss(det);
#if 0
det = _mm_sub_ss(_mm_add_ss(tmp1, tmp1), _mm_mul_ss(det, _mm_mul_ss(tmp1, tmp1)));
det = _mm_shuffle_ps(det, det, 0x00);
#else
det= _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(0,0,0,0));
#endif
minor0 = _mm_mul_ps(det, minor0);
minor1 = _mm_mul_ps(det, minor1);
minor2 = _mm_mul_ps(det, minor2);
minor3 = _mm_mul_ps(det, minor3);
Mat44V invTrans(minor0,minor1,minor2,minor3);
return M44Trnsps(invTrans);
}
NV_FORCE_INLINE Vec4V V4LoadXYZW(const float& x, const float& y, const float& z, const float& w)
{
return _mm_set_ps(w, z, y, x);
}
/*
// AP: work in progress - use proper SSE intrinsics where possible
NV_FORCE_INLINE VecU16V V4U32PK(VecU32V a, VecU32V b)
{
VecU16V result;
result.m128_u16[0] = uint16_t(NvClamp<uint32_t>((a).m128_u32[0], 0, 0xFFFF));
result.m128_u16[1] = uint16_t(NvClamp<uint32_t>((a).m128_u32[1], 0, 0xFFFF));
result.m128_u16[2] = uint16_t(NvClamp<uint32_t>((a).m128_u32[2], 0, 0xFFFF));
result.m128_u16[3] = uint16_t(NvClamp<uint32_t>((a).m128_u32[3], 0, 0xFFFF));
result.m128_u16[4] = uint16_t(NvClamp<uint32_t>((b).m128_u32[0], 0, 0xFFFF));
result.m128_u16[5] = uint16_t(NvClamp<uint32_t>((b).m128_u32[1], 0, 0xFFFF));
result.m128_u16[6] = uint16_t(NvClamp<uint32_t>((b).m128_u32[2], 0, 0xFFFF));
result.m128_u16[7] = uint16_t(NvClamp<uint32_t>((b).m128_u32[3], 0, 0xFFFF));
return result;
}
*/
NV_FORCE_INLINE VecU32V V4U32Sel(const BoolV c, const VecU32V a, const VecU32V b)
{
return m128_I2F(_mm_or_si128(
_mm_andnot_si128(m128_F2I(c), m128_F2I(b)),
_mm_and_si128(m128_F2I(c), m128_F2I(a))
));
}
NV_FORCE_INLINE VecU32V V4U32or(VecU32V a, VecU32V b)
{
return m128_I2F(_mm_or_si128(m128_F2I(a), m128_F2I(b)));
}
NV_FORCE_INLINE VecU32V V4U32and(VecU32V a, VecU32V b)
{
return m128_I2F(_mm_and_si128(m128_F2I(a), m128_F2I(b)));
}
NV_FORCE_INLINE VecU32V V4U32Andc(VecU32V a, VecU32V b)
{
return m128_I2F(_mm_andnot_si128(m128_F2I(b), m128_F2I(a)));
}
/*
NV_FORCE_INLINE VecU16V V4U16Or(VecU16V a, VecU16V b)
{
return m128_I2F(_mm_or_si128(m128_F2I(a), m128_F2I(b)));
}
*/
/*
NV_FORCE_INLINE VecU16V V4U16And(VecU16V a, VecU16V b)
{
return m128_I2F(_mm_and_si128(m128_F2I(a), m128_F2I(b)));
}
*/
/*
NV_FORCE_INLINE VecU16V V4U16Andc(VecU16V a, VecU16V b)
{
return m128_I2F(_mm_andnot_si128(m128_F2I(b), m128_F2I(a)));
}
*/
NV_FORCE_INLINE VecI32V I4Load(const int32_t i)
{
return (_mm_load1_ps((float*)&i));
}
NV_FORCE_INLINE VecI32V I4LoadU(const int32_t* i)
{
return _mm_loadu_ps((float*)i);
}
NV_FORCE_INLINE VecI32V I4LoadA(const int32_t* i)
{
return _mm_load_ps((float*)i);
}
NV_FORCE_INLINE VecI32V VecI32V_Add(const VecI32VArg a, const VecI32VArg b)
{
return m128_I2F(_mm_add_epi32(m128_F2I(a), m128_F2I(b)));
}
NV_FORCE_INLINE VecI32V VecI32V_Sub(const VecI32VArg a, const VecI32VArg b)
{
return m128_I2F(_mm_sub_epi32(m128_F2I(a), m128_F2I(b)));
}
NV_FORCE_INLINE BoolV VecI32V_IsGrtr(const VecI32VArg a, const VecI32VArg b)
{
return m128_I2F(_mm_cmpgt_epi32(m128_F2I(a), m128_F2I(b)));
}
NV_FORCE_INLINE BoolV VecI32V_IsEq(const VecI32VArg a, const VecI32VArg b)
{
return m128_I2F(_mm_cmpeq_epi32(m128_F2I(a), m128_F2I(b)));
}
NV_FORCE_INLINE VecI32V V4I32Sel(const BoolV c, const VecI32V a, const VecI32V b)
{
return V4U32Sel(c, a, b);
}
NV_FORCE_INLINE VecI32V VecI32V_Zero()
{
return V4Zero();
}
NV_FORCE_INLINE VecI32V VecI32V_One()
{
return I4Load(1);
}
NV_FORCE_INLINE VecI32V VecI32V_Two()
{
return I4Load(2);
}
NV_FORCE_INLINE VecI32V VecI32V_MinusOne()
{
return I4Load(-1);
}
NV_FORCE_INLINE VecU32V U4Zero()
{
return U4Load(0);
}
NV_FORCE_INLINE VecU32V U4One()
{
return U4Load(1);
}
NV_FORCE_INLINE VecU32V U4Two()
{
return U4Load(2);
}
NV_FORCE_INLINE VecI32V VecI32V_Sel(const BoolV c, const VecI32VArg a, const VecI32VArg b)
{
VECMATHAOS_ASSERT(_VecMathTests::allElementsEqualBoolV(c,BTTTT()) || _VecMathTests::allElementsEqualBoolV(c,BFFFF()));
return _mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a));
}
NV_FORCE_INLINE VecShiftV VecI32V_PrepareShift(const VecI32VArg shift)
{
VecShiftV s;
s.shift = VecI32V_Sel(BTFFF(), shift, VecI32V_Zero());
return s;
}
NV_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const VecShiftVArg count)
{
return m128_I2F(_mm_sll_epi32(m128_F2I(a), m128_F2I(count.shift)));
}
NV_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const VecShiftVArg count)
{
return m128_I2F(_mm_srl_epi32(m128_F2I(a), m128_F2I(count.shift)));
}
NV_FORCE_INLINE VecI32V VecI32V_And(const VecI32VArg a, const VecI32VArg b)
{
return _mm_and_ps(a, b);
}
NV_FORCE_INLINE VecI32V VecI32V_Or(const VecI32VArg a, const VecI32VArg b)
{
return _mm_or_ps(a, b);
}
NV_FORCE_INLINE VecI32V VecI32V_GetX(const VecI32VArg a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0));
}
NV_FORCE_INLINE VecI32V VecI32V_GetY(const VecI32VArg a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(1,1,1,1));
}
NV_FORCE_INLINE VecI32V VecI32V_GetZ(const VecI32VArg a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2));
}
NV_FORCE_INLINE VecI32V VecI32V_GetW(const VecI32VArg a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3,3,3,3));
}
NV_FORCE_INLINE void NvI32_From_VecI32V(const VecI32VArg a, int32_t* i)
{
_mm_store_ss((float*)i,a);
}
NV_FORCE_INLINE VecI32V VecI32V_Merge(const VecI32VArg a, const VecI32VArg b, const VecI32VArg c, const VecI32VArg d)
{
return V4Merge(a, b, c, d);
}
NV_FORCE_INLINE VecI32V VecI32V_From_BoolV(const BoolVArg a)
{
return a;
}
NV_FORCE_INLINE VecU32V VecU32V_From_BoolV(const BoolVArg a)
{
return a;
}
/*
template<int a> NV_FORCE_INLINE VecI32V V4ISplat()
{
VecI32V result;
result.m128_i32[0] = a;
result.m128_i32[1] = a;
result.m128_i32[2] = a;
result.m128_i32[3] = a;
return result;
}
template<uint32_t a> NV_FORCE_INLINE VecU32V V4USplat()
{
VecU32V result;
result.m128_u32[0] = a;
result.m128_u32[1] = a;
result.m128_u32[2] = a;
result.m128_u32[3] = a;
return result;
}
*/
/*
NV_FORCE_INLINE void V4U16StoreAligned(VecU16V val, VecU16V* address)
{
*address = val;
}
*/
NV_FORCE_INLINE void V4U32StoreAligned(VecU32V val, VecU32V* address)
{
*address = val;
}
NV_FORCE_INLINE Vec4V V4LoadAligned(Vec4V* addr)
{
return *addr;
}
NV_FORCE_INLINE Vec4V V4LoadUnaligned(Vec4V* addr)
{
return V4LoadU((float*)addr);
}
NV_FORCE_INLINE Vec4V V4Andc(const Vec4V a, const VecU32V b)
{
VecU32V result32(a);
result32 = V4U32Andc(result32, b);
return Vec4V(result32);
}
NV_FORCE_INLINE VecU32V V4IsGrtrV32u(const Vec4V a, const Vec4V b)
{
return V4IsGrtr(a, b);
}
NV_FORCE_INLINE VecU16V V4U16LoadAligned(VecU16V* addr)
{
return *addr;
}
NV_FORCE_INLINE VecU16V V4U16LoadUnaligned(VecU16V* addr)
{
return *addr;
}
NV_FORCE_INLINE VecU16V V4U16CompareGt(VecU16V a, VecU16V b)
{
// _mm_cmpgt_epi16 doesn't work for unsigned values unfortunately
// return m128_I2F(_mm_cmpgt_epi16(m128_F2I(a), m128_F2I(b)));
VecU16V result;
result.m128_u16[0] = (a).m128_u16[0]>(b).m128_u16[0];
result.m128_u16[1] = (a).m128_u16[1]>(b).m128_u16[1];
result.m128_u16[2] = (a).m128_u16[2]>(b).m128_u16[2];
result.m128_u16[3] = (a).m128_u16[3]>(b).m128_u16[3];
result.m128_u16[4] = (a).m128_u16[4]>(b).m128_u16[4];
result.m128_u16[5] = (a).m128_u16[5]>(b).m128_u16[5];
result.m128_u16[6] = (a).m128_u16[6]>(b).m128_u16[6];
result.m128_u16[7] = (a).m128_u16[7]>(b).m128_u16[7];
return result;
}
NV_FORCE_INLINE VecU16V V4I16CompareGt(VecU16V a, VecU16V b)
{
return m128_I2F(_mm_cmpgt_epi16(m128_F2I(a), m128_F2I(b)));
}
NV_FORCE_INLINE Vec4V Vec4V_From_VecU32V(VecU32V a)
{
Vec4V result = V4LoadXYZW(float(a.m128_u32[0]), float(a.m128_u32[1]), float(a.m128_u32[2]), float(a.m128_u32[3]));
return result;
}
NV_FORCE_INLINE Vec4V Vec4V_From_VecI32V(VecI32V in)
{
return _mm_cvtepi32_ps(m128_F2I(in));
}
NV_FORCE_INLINE VecI32V VecI32V_From_Vec4V(Vec4V a)
{
return _mm_cvttps_epi32(a);
}
NV_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecU32V(VecU32V a)
{
return Vec4V(a);
}
NV_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecI32V(VecI32V a)
{
return Vec4V(a);
}
NV_FORCE_INLINE VecU32V VecU32V_ReinterpretFrom_Vec4V(Vec4V a)
{
return VecU32V(a);
}
NV_FORCE_INLINE VecI32V VecI32V_ReinterpretFrom_Vec4V(Vec4V a)
{
return VecI32V(a);
}
/*
template<int index> NV_FORCE_INLINE BoolV BSplatElement(BoolV a)
{
BoolV result;
result[0] = result[1] = result[2] = result[3] = a[index];
return result;
}
*/
template<int index> BoolV BSplatElement(BoolV a)
{
float* data = (float*)&a;
return V4Load(data[index]);
}
template<int index> NV_FORCE_INLINE VecU32V V4U32SplatElement(VecU32V a)
{
VecU32V result;
result.m128_u32[0] = result.m128_u32[1] = result.m128_u32[2] = result.m128_u32[3] = a.m128_u32[index];
return result;
}
template<int index> NV_FORCE_INLINE Vec4V V4SplatElement(Vec4V a)
{
float* data = (float*)&a;
return V4Load(data[index]);
}
template<int index> NV_FORCE_INLINE VecU16V V4U16SplatElement(VecU16V a)
{
VecU16V result;
for (int i = 0; i < 8; i ++)
result.m128_u16[i] = a.m128_u16[index];
return result;
}
template<int imm> NV_FORCE_INLINE VecI16V V4I16SplatImmediate()
{
VecI16V result;
result.m128_i16[0] = imm;
result.m128_i16[1] = imm;
result.m128_i16[2] = imm;
result.m128_i16[3] = imm;
result.m128_i16[4] = imm;
result.m128_i16[5] = imm;
result.m128_i16[6] = imm;
result.m128_i16[7] = imm;
return result;
}
template<uint16_t imm> NV_FORCE_INLINE VecU16V V4U16SplatImmediate()
{
VecU16V result;
result.m128_u16[0] = imm;
result.m128_u16[1] = imm;
result.m128_u16[2] = imm;
result.m128_u16[3] = imm;
result.m128_u16[4] = imm;
result.m128_u16[5] = imm;
result.m128_u16[6] = imm;
result.m128_u16[7] = imm;
return result;
}
NV_FORCE_INLINE VecU16V V4U16SubtractModulo(VecU16V a, VecU16V b)
{
return m128_I2F(_mm_sub_epi16(m128_F2I(a), m128_F2I(b)));
}
NV_FORCE_INLINE VecU16V V4U16AddModulo(VecU16V a, VecU16V b)
{
return m128_I2F(_mm_add_epi16(m128_F2I(a), m128_F2I(b)));
}
NV_FORCE_INLINE VecU32V V4U16GetLo16(VecU16V a)
{
VecU32V result;
result.m128_u32[0] = a.m128_u16[0];
result.m128_u32[1] = a.m128_u16[2];
result.m128_u32[2] = a.m128_u16[4];
result.m128_u32[3] = a.m128_u16[6];
return result;
}
NV_FORCE_INLINE VecU32V V4U16GetHi16(VecU16V a)
{
VecU32V result;
result.m128_u32[0] = a.m128_u16[1];
result.m128_u32[1] = a.m128_u16[3];
result.m128_u32[2] = a.m128_u16[5];
result.m128_u32[3] = a.m128_u16[7];
return result;
}
NV_FORCE_INLINE VecU32V VecU32VLoadXYZW(uint32_t x, uint32_t y, uint32_t z, uint32_t w)
{
VecU32V result;
result.m128_u32[0] = x;
result.m128_u32[1] = y;
result.m128_u32[2] = z;
result.m128_u32[3] = w;
return result;
}
NV_FORCE_INLINE Vec4V V4Ceil(const Vec4V in)
{
UnionM128 a(in);
return V4LoadXYZW(NvCeil(a.m128_f32[0]), NvCeil(a.m128_f32[1]), NvCeil(a.m128_f32[2]), NvCeil(a.m128_f32[3]));
}
NV_FORCE_INLINE Vec4V V4Floor(const Vec4V in)
{
UnionM128 a(in);
return V4LoadXYZW(NvFloor(a.m128_f32[0]), NvFloor(a.m128_f32[1]), NvFloor(a.m128_f32[2]), NvFloor(a.m128_f32[3]));
}
NV_FORCE_INLINE VecU32V V4ConvertToU32VSaturate(const Vec4V in, uint32_t power)
{
NV_ASSERT(power == 0 && "Non-zero power not supported in convertToU32VSaturate");
NV_UNUSED(power); // prevent warning in release builds
float ffffFFFFasFloat = float(0xFFFF0000);
UnionM128 a(in);
VecU32V result;
result.m128_u32[0] = uint32_t(NvClamp<float>((a).m128_f32[0], 0.0f, ffffFFFFasFloat));
result.m128_u32[1] = uint32_t(NvClamp<float>((a).m128_f32[1], 0.0f, ffffFFFFasFloat));
result.m128_u32[2] = uint32_t(NvClamp<float>((a).m128_f32[2], 0.0f, ffffFFFFasFloat));
result.m128_u32[3] = uint32_t(NvClamp<float>((a).m128_f32[3], 0.0f, ffffFFFFasFloat));
return result;
}
#endif //PS_UNIX_SSE2_INLINE_AOS_H
| 91,920 | C | 27.770266 | 125 | 0.64332 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/platform/unix/sse2/NsUnixSse2AoS.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef PS_UNIX_SSE2_AOS_H
#define PS_UNIX_SSE2_AOS_H
// no includes here! this file should be included from NvcVecMath.h only!!!
#if !COMPILE_VECTOR_INTRINSICS
#error Vector intrinsics should not be included when using scalar implementation.
#endif
typedef union UnionM128
{
UnionM128(){}
UnionM128(__m128 in)
{
m128 = in;
}
UnionM128(__m128i in)
{
m128i = in;
}
operator __m128()
{
return m128;
}
operator const __m128() const
{
return m128;
}
float m128_f32[4];
__int8_t m128_i8[16];
__int16_t m128_i16[8];
__int32_t m128_i32[4];
__int64_t m128_i64[2];
__uint16_t m128_u16[8];
__uint32_t m128_u32[4];
__uint64_t m128_u64[2];
__m128 m128;
__m128i m128i;
} UnionM128;
typedef __m128 FloatV;
typedef __m128 Vec3V;
typedef __m128 Vec4V;
typedef __m128 BoolV;
typedef __m128 QuatV;
//typedef __m128 VecU32V;
//typedef __m128 VecI32V;
//typedef __m128 VecU16V;
//typedef __m128 VecI16V;
//typedef __m128 VecU8V;
typedef UnionM128 VecU32V;
typedef UnionM128 VecI32V;
typedef UnionM128 VecU16V;
typedef UnionM128 VecI16V;
typedef UnionM128 VecU8V;
#define FloatVArg FloatV&
#define Vec3VArg Vec3V&
#define Vec4VArg Vec4V&
#define BoolVArg BoolV&
#define VecU32VArg VecU32V&
#define VecI32VArg VecI32V&
#define VecU16VArg VecU16V&
#define VecI16VArg VecI16V&
#define VecU8VArg VecU8V&
#define QuatVArg QuatV&
//Optimization for situations in which you cross product multiple vectors with the same vector.
//Avoids 2X shuffles per product
struct VecCrossV
{
Vec3V mL1;
Vec3V mR1;
};
struct VecShiftV
{
VecI32V shift;
};
#define VecShiftVArg VecShiftV&
NV_ALIGN_PREFIX(16)
struct Mat33V
{
Mat33V(){}
Mat33V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2)
: col0(c0),
col1(c1),
col2(c2)
{
}
Vec3V NV_ALIGN(16,col0);
Vec3V NV_ALIGN(16,col1);
Vec3V NV_ALIGN(16,col2);
}NV_ALIGN_SUFFIX(16);
NV_ALIGN_PREFIX(16)
struct Mat34V
{
Mat34V(){}
Mat34V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2, const Vec3V& c3)
: col0(c0),
col1(c1),
col2(c2),
col3(c3)
{
}
Vec3V NV_ALIGN(16,col0);
Vec3V NV_ALIGN(16,col1);
Vec3V NV_ALIGN(16,col2);
Vec3V NV_ALIGN(16,col3);
}NV_ALIGN_SUFFIX(16);
NV_ALIGN_PREFIX(16)
struct Mat43V
{
Mat43V(){}
Mat43V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2)
: col0(c0),
col1(c1),
col2(c2)
{
}
Vec4V NV_ALIGN(16,col0);
Vec4V NV_ALIGN(16,col1);
Vec4V NV_ALIGN(16,col2);
}NV_ALIGN_SUFFIX(16);
NV_ALIGN_PREFIX(16)
struct Mat44V
{
Mat44V(){}
Mat44V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2, const Vec4V& c3)
: col0(c0),
col1(c1),
col2(c2),
col3(c3)
{
}
Vec4V NV_ALIGN(16,col0);
Vec4V NV_ALIGN(16,col1);
Vec4V NV_ALIGN(16,col2);
Vec4V NV_ALIGN(16,col3);
}NV_ALIGN_SUFFIX(16);
#endif //PS_UNIX_SSE2_AOS_H
| 4,797 | C | 25.076087 | 95 | 0.664582 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/platform/unix/neon/NsUnixNeonAoS.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef PS_UNIX_NEON_AOS_H
#define PS_UNIX_NEON_AOS_H
// no includes here! this file should be included from NvcVecMath.h only!!!
#if !COMPILE_VECTOR_INTRINSICS
#error Vector intrinsics should not be included when using scalar implementation.
#endif
// only ARM NEON compatible platforms should reach this
#include <arm_neon.h>
typedef float32x2_t FloatV;
typedef float32x4_t Vec3V;
typedef float32x4_t Vec4V;
typedef uint32x4_t BoolV;
typedef float32x4_t QuatV;
typedef uint32x4_t VecU32V;
typedef int32x4_t VecI32V;
typedef uint16x8_t VecU16V;
typedef int16x8_t VecI16V;
typedef uint8x16_t VecU8V;
#define FloatVArg FloatV&
#define Vec3VArg Vec3V&
#define Vec4VArg Vec4V&
#define BoolVArg BoolV&
#define VecU32VArg VecU32V&
#define VecI32VArg VecI32V&
#define VecU16VArg VecU16V&
#define VecI16VArg VecI16V&
#define VecU8VArg VecU8V&
#define QuatVArg QuatV&
//KS - TODO - make an actual VecCrossV type for NEON
#define VecCrossV Vec3V
typedef VecI32V VecShiftV;
#define VecShiftVArg VecShiftV&
NV_ALIGN_PREFIX(16)
struct Mat33V
{
Mat33V(){}
Mat33V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2)
: col0(c0),
col1(c1),
col2(c2)
{
}
Vec3V NV_ALIGN(16,col0);
Vec3V NV_ALIGN(16,col1);
Vec3V NV_ALIGN(16,col2);
}NV_ALIGN_SUFFIX(16);
NV_ALIGN_PREFIX(16)
struct Mat34V
{
Mat34V(){}
Mat34V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2, const Vec3V& c3)
: col0(c0),
col1(c1),
col2(c2),
col3(c3)
{
}
Vec3V NV_ALIGN(16,col0);
Vec3V NV_ALIGN(16,col1);
Vec3V NV_ALIGN(16,col2);
Vec3V NV_ALIGN(16,col3);
}NV_ALIGN_SUFFIX(16);
NV_ALIGN_PREFIX(16)
struct Mat43V
{
Mat43V(){}
Mat43V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2)
: col0(c0),
col1(c1),
col2(c2)
{
}
Vec4V NV_ALIGN(16,col0);
Vec4V NV_ALIGN(16,col1);
Vec4V NV_ALIGN(16,col2);
}NV_ALIGN_SUFFIX(16);
NV_ALIGN_PREFIX(16)
struct Mat44V
{
Mat44V(){}
Mat44V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2, const Vec4V& c3)
: col0(c0),
col1(c1),
col2(c2),
col3(c3)
{
}
Vec4V NV_ALIGN(16,col0);
Vec4V NV_ALIGN(16,col1);
Vec4V NV_ALIGN(16,col2);
Vec4V NV_ALIGN(16,col3);
}NV_ALIGN_SUFFIX(16);
#endif //PS_UNIX_NEON_AOS_H
| 4,061 | C | 28.223021 | 81 | 0.69835 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/platform/unix/neon/NsUnixNeonInlineAoS.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef PS_UNIX_NEON_INLINE_AOS_H
#define PS_UNIX_NEON_INLINE_AOS_H
#if !COMPILE_VECTOR_INTRINSICS
#error Vector intrinsics should not be included when using scalar implementation.
#endif
// improved estimates
#define VRECIPEQ recipq_newton<1>
#define VRECIPE recip_newton<1>
#define VRECIPSQRTEQ rsqrtq_newton<1>
#define VRECIPSQRTE rsqrt_newton<1>
// "exact"
#define VRECIPQ recipq_newton<4>
#define VRECIP recip_newton<4>
#define VRECIPSQRTQ rsqrtq_newton<4>
#define VRECIPSQRT rsqrt_newton<4>
#define VECMATH_AOS_EPSILON (1e-3f)
//Remove this define when all platforms use simd solver.
#define NV_SUPPORT_SIMD
namespace internalUnitNeonSimd
{
NV_FORCE_INLINE uint32_t BAllTrue4_R(const BoolV a)
{
const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a));
const uint16x4_t dLow = vmovn_u32(a);
uint16x8_t combined = vcombine_u16(dLow, dHigh);
const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined));
return uint32_t(vget_lane_u32(finalReduce, 0) == 0xffffFFFF);
}
NV_FORCE_INLINE uint32_t BAnyTrue4_R(const BoolV a)
{
const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a));
const uint16x4_t dLow = vmovn_u32(a);
uint16x8_t combined = vcombine_u16(dLow, dHigh);
const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined));
return uint32_t(vget_lane_u32(finalReduce, 0) != 0x0);
}
NV_FORCE_INLINE uint32_t BAllTrue3_R(const BoolV a)
{
const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a));
const uint16x4_t dLow = vmovn_u32(a);
uint16x8_t combined = vcombine_u16(dLow, dHigh);
const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined));
return uint32_t((vget_lane_u32(finalReduce, 0) & 0xffFFff) == 0xffFFff);
}
NV_FORCE_INLINE uint32_t BAnyTrue3_R(const BoolV a)
{
const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a));
const uint16x4_t dLow = vmovn_u32(a);
uint16x8_t combined = vcombine_u16(dLow, dHigh);
const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined));
return uint32_t((vget_lane_u32(finalReduce, 0) & 0xffFFff) != 0);
}
}
namespace _VecMathTests
{
NV_FORCE_INLINE bool allElementsEqualFloatV(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return vget_lane_u32(vceq_f32(a, b), 0) != 0;
}
NV_FORCE_INLINE bool allElementsEqualVec3V(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return V3AllEq(a, b) != 0;
}
NV_FORCE_INLINE bool allElementsEqualVec4V(const Vec4V a, const Vec4V b)
{
return V4AllEq(a, b) != 0;
}
NV_FORCE_INLINE bool allElementsEqualBoolV(const BoolV a, const BoolV b)
{
return internalUnitNeonSimd::BAllTrue4_R(vceqq_u32(a, b)) != 0;
}
NV_FORCE_INLINE uint32_t V4U32AllEq(const VecU32V a, const VecU32V b)
{
return internalUnitNeonSimd::BAllTrue4_R(V4IsEqU32(a, b));
}
NV_FORCE_INLINE bool allElementsEqualVecU32V(const VecU32V a, const VecU32V b)
{
return V4U32AllEq(a, b) != 0;
}
NV_FORCE_INLINE BoolV V4IsEqI32(const VecI32V a, const VecI32V b)
{
return vceqq_s32(a, b);
}
NV_FORCE_INLINE uint32_t V4I32AllEq(const VecI32V a, const VecI32V b)
{
return internalUnitNeonSimd::BAllTrue4_R(V4IsEqI32(a, b));
}
NV_FORCE_INLINE bool allElementsEqualVecI32V(const VecI32V a, const VecI32V b)
{
return V4I32AllEq(a, b) != 0;
}
NV_FORCE_INLINE bool allElementsNearEqualFloatV(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
const float32x2_t c = vsub_f32(a, b);
const float32x2_t error = vdup_n_f32(VECMATH_AOS_EPSILON);
// absolute compare abs(error) > abs(c)
#if NV_WINRT
const uint32x2_t greater = vacgt_f32(error, c);
#else
const uint32x2_t greater = vcagt_f32(error, c);
#endif
const uint32x2_t min = vpmin_u32(greater, greater);
return vget_lane_u32(min, 0) != 0x0;
}
NV_FORCE_INLINE bool allElementsNearEqualVec3V(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
const float32x4_t c = vsubq_f32(a, b);
const float32x4_t error = vdupq_n_f32(VECMATH_AOS_EPSILON);
// absolute compare abs(error) > abs(c)
#if NV_WINRT
const uint32x4_t greater = vacgtq_f32(error, c);
#else
const uint32x4_t greater = vcagtq_f32(error, c);
#endif
return internalUnitNeonSimd::BAllTrue3_R(greater) != 0;
}
NV_FORCE_INLINE bool allElementsNearEqualVec4V(const Vec4V a, const Vec4V b)
{
const float32x4_t c = vsubq_f32(a, b);
const float32x4_t error = vdupq_n_f32(VECMATH_AOS_EPSILON);
// absolute compare abs(error) > abs(c)
#if NV_WINRT
const uint32x4_t greater = vacgtq_f32(error, c);
#else
const uint32x4_t greater = vcagtq_f32(error, c);
#endif
return internalUnitNeonSimd::BAllTrue4_R(greater) != 0x0;
}
}
#if 0 // debugging printfs
#include <stdio.h>
NV_FORCE_INLINE void printVec(const float32x4_t& v, const char* name)
{
NV_ALIGN(16, float32_t) data[4];
vst1q_f32(data, v);
printf("%s: (%f, %f, %f, %f)\n", name, data[0], data[1], data[2], data[3]);
}
NV_FORCE_INLINE void printVec(const float32x2_t& v, const char* name)
{
NV_ALIGN(16, float32_t) data[2];
vst1_f32(data, v);
printf("%s: (%f, %f)\n", name, data[0], data[1]);
}
NV_FORCE_INLINE void printVec(const uint32x4_t& v, const char* name)
{
NV_ALIGN(16, uint32_t) data[4];
vst1q_u32(data, v);
printf("%s: (0x%x, 0x%x, 0x%x, 0x%x)\n", name, data[0], data[1], data[2], data[3]);
}
NV_FORCE_INLINE void printVec(const uint16x8_t& v, const char* name)
{
NV_ALIGN(16, uint16_t) data[8];
vst1q_u16(data, v);
printf("%s: (0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n", name, data[0], data[1], data[2], data[3],
data[4], data[5], data[6], data[7]);
}
NV_FORCE_INLINE void printVec(const int32x4_t& v, const char* name)
{
NV_ALIGN(16, int32_t) data[4];
vst1q_s32(data, v);
printf("%s: (0x%x, 0x%x, 0x%x, 0x%x)\n", name, data[0], data[1], data[2], data[3]);
}
NV_FORCE_INLINE void printVec(const int16x8_t& v, const char* name)
{
NV_ALIGN(16, int16_t) data[8];
vst1q_s16(data, v);
printf("%s: (0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n", name, data[0], data[1], data[2], data[3],
data[4], data[5], data[6], data[7]);
}
NV_FORCE_INLINE void printVec(const uint16x4_t& v, const char* name)
{
NV_ALIGN(16, uint16_t) data[4];
vst1_u16(data, v);
printf("%s: (0x%x, 0x%x, 0x%x, 0x%x)\n", name, data[0], data[1], data[2], data[3]);
}
NV_FORCE_INLINE void printVec(const uint32x2_t& v, const char* name)
{
NV_ALIGN(16, uint32_t) data[2];
vst1_u32(data, v);
printf("%s: (0x%x, 0x%x)\n", name, data[0], data[1]);
}
NV_FORCE_INLINE void printVar(const uint32_t v, const char* name)
{
printf("%s: 0x%x\n", name, v);
}
NV_FORCE_INLINE void printVar(const float v, const char* name)
{
printf("%s: %f\n", name, v);
}
#define PRINT_VAR(X) printVar((X), #X)
#define PRINT_VEC(X) printVec((X), #X)
#define PRINT_VEC_TITLE(TITLE, X) printVec((X), TITLE #X)
#endif // debugging printf
/////////////////////////////////////////////////////////////////////
////FUNCTIONS USED ONLY FOR ASSERTS IN VECTORISED IMPLEMENTATIONS
/////////////////////////////////////////////////////////////////////
NV_FORCE_INLINE bool isValidFloatV(const FloatV a)
{
NV_ALIGN(16,float) data[4];
vst1_f32((float32_t*)data, a);
if(isFiniteFloatV(a))
return data[0] == data[1];
else
{
uint32_t* intData = (uint32_t*)data;
return intData[0] == intData[1];
}
}
NV_FORCE_INLINE bool isValidVec3V(const Vec3V a)
{
const float32_t w = vgetq_lane_f32(a, 3);
if(isFiniteVec3V(a))
return w == 0.0f;
else
{
NV_ALIGN(16,float) data[4];
vst1q_f32((float32_t*)data, a);
uint32_t* intData = (uint32_t*)data;
return !intData[3] || ((intData[0] == intData[1]) && (intData[0] == intData[2]) && (intData[0] == intData[3]));
}
}
NV_FORCE_INLINE bool isFiniteFloatV(const FloatV a)
{
NV_ALIGN(16,float) data[4];
vst1_f32((float32_t*)data, a);
return NvIsFinite(data[0]) && NvIsFinite(data[1]);
}
NV_FORCE_INLINE bool isFiniteVec3V(const Vec3V a)
{
NV_ALIGN(16,float) data[4];
vst1q_f32((float32_t*)data, a);
return NvIsFinite(data[0]) && NvIsFinite(data[1]) && NvIsFinite(data[2]);
}
NV_FORCE_INLINE bool isFiniteVec4V(const Vec4V a)
{
NV_ALIGN(16,float) data[4];
vst1q_f32((float32_t*)data, a);
return NvIsFinite(data[0]) && NvIsFinite(data[1]) && NvIsFinite(data[2]) && NvIsFinite(data[3]);
}
NV_FORCE_INLINE bool hasZeroElementinFloatV(const FloatV a)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
return vget_lane_u32(vreinterpret_u32_f32(a), 0) == 0;
}
NV_FORCE_INLINE bool hasZeroElementInVec3V(const Vec3V a)
{
const uint32x2_t dLow = vget_low_u32(vreinterpretq_u32_f32(a));
const uint32x2_t dMin = vpmin_u32(dLow, dLow);
return vget_lane_u32(dMin, 0) == 0 || vgetq_lane_u32(vreinterpretq_u32_f32(a), 2) == 0;
}
NV_FORCE_INLINE bool hasZeroElementInVec4V(const Vec4V a)
{
const uint32x2_t dHigh = vget_high_u32(vreinterpretq_u32_f32(a));
const uint32x2_t dLow = vget_low_u32(vreinterpretq_u32_f32(a));
const uint32x2_t dMin = vmin_u32(dHigh, dLow);
const uint32x2_t pairMin = vpmin_u32(dMin, dMin);
return vget_lane_u32(pairMin, 0) == 0;
}
/////////////////////////////////////////////////////////////////////
////VECTORISED FUNCTION IMPLEMENTATIONS
/////////////////////////////////////////////////////////////////////
NV_FORCE_INLINE FloatV FLoad(const float f)
{
return vdup_n_f32(reinterpret_cast<const float32_t&>(f));
}
NV_FORCE_INLINE FloatV FLoadA(const float* const f)
{
VECMATHAOS_ASSERT(0 == ((uint64_t)f & 0x0f));
return vld1_f32((const float32_t*)f);
}
NV_FORCE_INLINE Vec3V V3Load(const float f)
{
NV_ALIGN(16, float) data[4] = {f, f, f, 0.0f};
return V4LoadA(data);
}
NV_FORCE_INLINE Vec4V V4Load(const float f)
{
return vdupq_n_f32(reinterpret_cast<const float32_t&>(f));
}
NV_FORCE_INLINE BoolV BLoad(const bool f)
{
const uint32_t i=uint32_t(-(int32_t)f);
return vdupq_n_u32(i);
}
NV_FORCE_INLINE Vec3V V3LoadA(const NvVec3& f)
{
VECMATHAOS_ASSERT(0 == ((size_t)&f & 0x0f));
NV_ALIGN(16, float) data[4] = {f.x, f.y, f.z, 0.0f};
return V4LoadA(data);
}
NV_FORCE_INLINE Vec3V V3LoadU(const NvVec3& f)
{
NV_ALIGN(16, float) data[4] = {f.x, f.y, f.z, 0.0f};
return V4LoadA(data);
}
NV_FORCE_INLINE Vec3V V3LoadUnsafeA(const NvVec3& f)
{
NV_ALIGN(16, float) data[4] = {f.x, f.y, f.z, 0.0f};
return V4LoadA(data);
}
NV_FORCE_INLINE Vec3V V3LoadA(const float* f)
{
VECMATHAOS_ASSERT(0 == ((size_t)&f & 0x0f));
NV_ALIGN(16, float) data[4] = {f[0], f[1], f[2], 0.0f};
return V4LoadA(data);
}
NV_FORCE_INLINE Vec3V V3LoadU(const float* f)
{
NV_ALIGN(16, float) data[4] = {f[0], f[1], f[2], 0.0f};
return V4LoadA(data);
}
NV_FORCE_INLINE Vec3V Vec3V_From_Vec4V(Vec4V v)
{
return vsetq_lane_f32(0.0f, v, 3);
}
NV_FORCE_INLINE Vec3V Vec3V_From_Vec4V_WUndefined(Vec4V v)
{
return v;
}
NV_FORCE_INLINE Vec4V Vec4V_From_Vec3V(Vec3V f)
{
return f; //ok if it is implemented as the same type.
}
NV_FORCE_INLINE Vec4V Vec4V_From_FloatV(FloatV f)
{
return vcombine_f32(f, f);
}
NV_FORCE_INLINE Vec3V Vec3V_From_FloatV(FloatV f)
{
return Vec3V_From_Vec4V(Vec4V_From_FloatV(f));
}
NV_FORCE_INLINE Vec3V Vec3V_From_FloatV_WUndefined(FloatV f)
{
return Vec3V_From_Vec4V_WUndefined(Vec4V_From_FloatV(f));
}
NV_FORCE_INLINE Vec4V Vec4V_From_NvVec3_WUndefined(const NvVec3& f)
{
NV_ALIGN(16, float) data[4] = {f.x, f.y, f.z, 0.0f};
return V4LoadA(data);
}
NV_FORCE_INLINE Mat33V Mat33V_From_NvMat33(const NvMat33 &m)
{
return Mat33V(V3LoadU(m.column0),
V3LoadU(m.column1),
V3LoadU(m.column2));
}
NV_FORCE_INLINE void NvMat33_From_Mat33V(const Mat33V &m, NvMat33 &out)
{
NV_ASSERT((size_t(&out)&15)==0);
V3StoreU(m.col0, out.column0);
V3StoreU(m.col1, out.column1);
V3StoreU(m.col2, out.column2);
}
NV_FORCE_INLINE Vec4V V4LoadA(const float* const f)
{
VECMATHAOS_ASSERT(0 == ((uint64_t)f & 0x0f));
return vld1q_f32((const float32_t*)f);
}
NV_FORCE_INLINE void V4StoreA(Vec4V a, float* f)
{
VECMATHAOS_ASSERT(0 == ((uint64_t)f & 0x0f));
vst1q_f32((float32_t*)f,a);
}
NV_FORCE_INLINE void V4StoreU(const Vec4V a, float* f)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(0 == ((int)&a & 0x0F));
NV_ALIGN(16,float) f2[4];
vst1q_f32((float32_t*)f2, a);
f[0] = f2[0];
f[1] = f2[1];
f[2] = f2[2];
f[3] = f2[3];
}
NV_FORCE_INLINE void BStoreA(const BoolV a, uint32_t* u)
{
VECMATHAOS_ASSERT(0 == ((uint64_t)u & 0x0f));
vst1q_u32((uint32_t*)u,a);
}
NV_FORCE_INLINE void U4StoreA(const VecU32V uv, uint32_t* u)
{
VECMATHAOS_ASSERT(0 == ((uint64_t)u & 0x0f));
vst1q_u32((uint32_t*)u,uv);
}
NV_FORCE_INLINE void I4StoreA(const VecI32V iv, int32_t* i)
{
VECMATHAOS_ASSERT(0 == ((uint64_t)i & 0x0f));
vst1q_s32((int32_t*)i,iv);
}
NV_FORCE_INLINE Vec4V V4LoadU(const float* const f)
{
return vld1q_f32((const float32_t*)f);
}
NV_FORCE_INLINE BoolV BLoad(const bool* const f)
{
const NV_ALIGN(16, uint32_t) b[4]={(uint32_t)(-(int32_t)f[0]), (uint32_t)(-(int32_t)f[1]), (uint32_t)(-(int32_t)f[2]), (uint32_t)(-(int32_t)f[3])};
return vld1q_u32(b);
}
NV_FORCE_INLINE float FStore(const FloatV a)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
float f = vget_lane_f32(a, 0);
return f;
}
NV_FORCE_INLINE void FStore(const FloatV a, float* NV_RESTRICT f)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
//vst1q_lane_f32(f, a, 0); // causes vst1 alignment bug
*f = vget_lane_f32(a, 0);
}
NV_FORCE_INLINE void V3StoreA(const Vec3V a, NvVec3& f)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(0 == ((int)&a & 0x0F));
VECMATHAOS_ASSERT(0 == ((int)&f & 0x0F));
NV_ALIGN(16,float) f2[4];
vst1q_f32((float32_t*)f2, a);
f = NvVec3(f2[0], f2[1], f2[2]);
}
NV_FORCE_INLINE void V3StoreU(const Vec3V a, NvVec3& f)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(0 == ((int)&a & 0x0F));
NV_ALIGN(16,float) f2[4];
vst1q_f32((float32_t*)f2, a);
f = NvVec3(f2[0], f2[1], f2[2]);
}
//////////////////////////////////
//FLOATV
//////////////////////////////////
NV_FORCE_INLINE FloatV FZero()
{
return FLoad(0.0f);
}
NV_FORCE_INLINE FloatV FOne()
{
return FLoad(1.0f);
}
NV_FORCE_INLINE FloatV FHalf()
{
return FLoad(0.5f);
}
NV_FORCE_INLINE FloatV FEps()
{
return FLoad(NV_EPS_REAL);
}
NV_FORCE_INLINE FloatV FEps6()
{
return FLoad(1e-6f);
}
NV_FORCE_INLINE FloatV FMax()
{
return FLoad(NV_MAX_REAL);
}
NV_FORCE_INLINE FloatV FNegMax()
{
return FLoad(-NV_MAX_REAL);
}
NV_FORCE_INLINE FloatV IZero()
{
return vreinterpret_f32_u32(vdup_n_u32(0));
}
NV_FORCE_INLINE FloatV IOne()
{
return vreinterpret_f32_u32(vdup_n_u32(1));
}
NV_FORCE_INLINE FloatV ITwo()
{
return vreinterpret_f32_u32(vdup_n_u32(2));
}
NV_FORCE_INLINE FloatV IThree()
{
return vreinterpret_f32_u32(vdup_n_u32(3));
}
NV_FORCE_INLINE FloatV IFour()
{
return vreinterpret_f32_u32(vdup_n_u32(4));
}
NV_FORCE_INLINE FloatV FNeg(const FloatV f)
{
VECMATHAOS_ASSERT(isValidFloatV(f));
return vneg_f32(f);
}
NV_FORCE_INLINE FloatV FAdd(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return vadd_f32(a, b);
}
NV_FORCE_INLINE FloatV FSub(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return vsub_f32(a, b);
}
NV_FORCE_INLINE FloatV FMul(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return vmul_f32(a, b);
}
template <int n>
NV_FORCE_INLINE float32x2_t recip_newton(const float32x2_t& in)
{
float32x2_t recip = vrecpe_f32(in);
for(int i=0; i<n; ++i)
recip = vmul_f32(recip, vrecps_f32(in, recip));
return recip;
}
template <int n>
NV_FORCE_INLINE float32x4_t recipq_newton(const float32x4_t& in)
{
float32x4_t recip = vrecpeq_f32(in);
for(int i=0; i<n; ++i)
recip = vmulq_f32(recip, vrecpsq_f32(recip, in));
return recip;
}
template <int n>
NV_FORCE_INLINE float32x2_t rsqrt_newton(const float32x2_t& in)
{
float32x2_t rsqrt = vrsqrte_f32(in);
for(int i=0; i<n; ++i)
rsqrt = vmul_f32(rsqrt, vrsqrts_f32(vmul_f32(rsqrt, rsqrt), in));
return rsqrt;
}
template <int n>
NV_FORCE_INLINE float32x4_t rsqrtq_newton(const float32x4_t& in)
{
float32x4_t rsqrt = vrsqrteq_f32(in);
for(int i=0; i<n; ++i)
rsqrt = vmulq_f32(rsqrt, vrsqrtsq_f32(vmulq_f32(rsqrt, rsqrt), in));
return rsqrt;
}
NV_FORCE_INLINE FloatV FDiv(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return vmul_f32(a, VRECIP(b));
}
NV_FORCE_INLINE FloatV FDivFast(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return vmul_f32(a, VRECIPE(b));
}
NV_FORCE_INLINE FloatV FRecip(const FloatV a)
{
return VRECIP(a);
}
NV_FORCE_INLINE FloatV FRecipFast(const FloatV a)
{
return VRECIPE(a);
}
NV_FORCE_INLINE FloatV FRsqrt(const FloatV a)
{
return VRECIPSQRT(a);
}
NV_FORCE_INLINE FloatV FSqrt(const FloatV a)
{
return vmul_f32(a, VRECIPSQRT(a));
}
NV_FORCE_INLINE FloatV FRsqrtFast(const FloatV a)
{
return VRECIPSQRTE(a);
}
NV_FORCE_INLINE FloatV FScaleAdd(const FloatV a, const FloatV b, const FloatV c)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
VECMATHAOS_ASSERT(isValidFloatV(c));
return vmla_f32(c, a, b);
}
NV_FORCE_INLINE FloatV FNegScaleSub(const FloatV a, const FloatV b, const FloatV c)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
VECMATHAOS_ASSERT(isValidFloatV(c));
return vmls_f32(c, a, b);
}
NV_FORCE_INLINE FloatV FAbs(const FloatV a)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
return vabs_f32(a);
}
NV_FORCE_INLINE FloatV FSel(const BoolV c, const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(_VecMathTests::allElementsEqualBoolV(c,BTTTT()) || _VecMathTests::allElementsEqualBoolV(c,BFFFF()));
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return vbsl_f32(vget_low_u32(c), a, b);
}
NV_FORCE_INLINE BoolV FIsGrtr(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return vdupq_lane_u32(vcgt_f32(a, b), 0);
}
NV_FORCE_INLINE BoolV FIsGrtrOrEq(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return vdupq_lane_u32(vcge_f32(a, b), 0);
}
NV_FORCE_INLINE BoolV FIsEq(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return vdupq_lane_u32(vceq_f32(a, b), 0);
}
NV_FORCE_INLINE FloatV FMax(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return vmax_f32(a, b);
}
NV_FORCE_INLINE FloatV FMin(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return vmin_f32(a, b);
}
NV_FORCE_INLINE FloatV FClamp(const FloatV a, const FloatV minV, const FloatV maxV)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(minV));
VECMATHAOS_ASSERT(isValidFloatV(maxV));
return FMax(FMin(a,maxV),minV);
}
NV_FORCE_INLINE uint32_t FAllGrtr(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return vget_lane_u32(vcgt_f32(a, b), 0);
}
NV_FORCE_INLINE uint32_t FAllGrtrOrEq(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return vget_lane_u32(vcge_f32(a, b), 0);
}
NV_FORCE_INLINE uint32_t FAllEq(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return vget_lane_u32(vceq_f32(a, b), 0);
}
NV_FORCE_INLINE FloatV FRound(const FloatV a)
{
//truncate(a + (0.5f - sign(a)))
const float32x2_t half = vdup_n_f32(0.5f);
const float32x2_t sign = vcvt_f32_u32((vshr_n_u32(vreinterpret_u32_f32(a), 31)));
const float32x2_t aPlusHalf = vadd_f32(a, half);
const float32x2_t aRound = vsub_f32(aPlusHalf, sign);
int32x2_t tmp = vcvt_s32_f32(aRound);
return vcvt_f32_s32(tmp);
}
NV_FORCE_INLINE FloatV FSin(const FloatV a)
{
//Vec4V V1, V2, V3, V5, V7, V9, V11, V13, V15, V17, V19, V21, V23;
//Vec4V S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11;
FloatV Result;
// Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI
const FloatV recipTwoPi = FLoadA(g_NVReciprocalTwoPi.f);
const FloatV twoPi = FLoadA(g_NVTwoPi.f);
const FloatV tmp = FMul(a, recipTwoPi);
const FloatV b = FRound(tmp);
const FloatV V1 = FNegMulSub(twoPi, b, a);
// sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! -
// V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI)
const FloatV V2 = FMul(V1, V1);
const FloatV V3 = FMul(V2, V1);
const FloatV V5 = FMul(V3, V2);
const FloatV V7 = FMul(V5, V2);
const FloatV V9 = FMul(V7, V2);
const FloatV V11 = FMul(V9, V2);
const FloatV V13 = FMul(V11, V2);
const FloatV V15 = FMul(V13, V2);
const FloatV V17 = FMul(V15, V2);
const FloatV V19 = FMul(V17, V2);
const FloatV V21 = FMul(V19, V2);
const FloatV V23 = FMul(V21, V2);
const Vec4V sinCoefficients0 = V4LoadA(g_NVSinCoefficients0.f);
const Vec4V sinCoefficients1 = V4LoadA(g_NVSinCoefficients1.f);
const Vec4V sinCoefficients2 = V4LoadA(g_NVSinCoefficients2.f);
const FloatV S1 = V4GetY(sinCoefficients0);
const FloatV S2 = V4GetZ(sinCoefficients0);
const FloatV S3 = V4GetW(sinCoefficients0);
const FloatV S4 = V4GetX(sinCoefficients1);
const FloatV S5 = V4GetY(sinCoefficients1);
const FloatV S6 = V4GetZ(sinCoefficients1);
const FloatV S7 = V4GetW(sinCoefficients1);
const FloatV S8 = V4GetX(sinCoefficients2);
const FloatV S9 = V4GetY(sinCoefficients2);
const FloatV S10 = V4GetZ(sinCoefficients2);
const FloatV S11 = V4GetW(sinCoefficients2);
Result = FMulAdd(S1, V3, V1);
Result = FMulAdd(S2, V5, Result);
Result = FMulAdd(S3, V7, Result);
Result = FMulAdd(S4, V9, Result);
Result = FMulAdd(S5, V11, Result);
Result = FMulAdd(S6, V13, Result);
Result = FMulAdd(S7, V15, Result);
Result = FMulAdd(S8, V17, Result);
Result = FMulAdd(S9, V19, Result);
Result = FMulAdd(S10, V21, Result);
Result = FMulAdd(S11, V23, Result);
return Result;
}
NV_FORCE_INLINE FloatV FCos(const FloatV a)
{
//XMVECTOR V1, V2, V4, V6, V8, V10, V12, V14, V16, V18, V20, V22;
//XMVECTOR C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11;
FloatV Result;
// Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI
const FloatV recipTwoPi = FLoadA(g_NVReciprocalTwoPi.f);
const FloatV twoPi = FLoadA(g_NVTwoPi.f);
const FloatV tmp = FMul(a, recipTwoPi);
const FloatV b = FRound(tmp);
const FloatV V1 = FNegMulSub(twoPi, b, a);
// cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! -
// V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI)
const FloatV V2 = FMul(V1, V1);
const FloatV V4 = FMul(V2, V2);
const FloatV V6 = FMul(V4, V2);
const FloatV V8 = FMul(V4, V4);
const FloatV V10 = FMul(V6, V4);
const FloatV V12 = FMul(V6, V6);
const FloatV V14 = FMul(V8, V6);
const FloatV V16 = FMul(V8, V8);
const FloatV V18 = FMul(V10, V8);
const FloatV V20 = FMul(V10, V10);
const FloatV V22 = FMul(V12, V10);
const Vec4V cosCoefficients0 = V4LoadA(g_NVCosCoefficients0.f);
const Vec4V cosCoefficients1 = V4LoadA(g_NVCosCoefficients1.f);
const Vec4V cosCoefficients2 = V4LoadA(g_NVCosCoefficients2.f);
const FloatV C1 = V4GetY(cosCoefficients0);
const FloatV C2 = V4GetZ(cosCoefficients0);
const FloatV C3 = V4GetW(cosCoefficients0);
const FloatV C4 = V4GetX(cosCoefficients1);
const FloatV C5 = V4GetY(cosCoefficients1);
const FloatV C6 = V4GetZ(cosCoefficients1);
const FloatV C7 = V4GetW(cosCoefficients1);
const FloatV C8 = V4GetX(cosCoefficients2);
const FloatV C9 = V4GetY(cosCoefficients2);
const FloatV C10 = V4GetZ(cosCoefficients2);
const FloatV C11 = V4GetW(cosCoefficients2);
Result = FMulAdd(C1, V2, FOne());
Result = FMulAdd(C2, V4, Result);
Result = FMulAdd(C3, V6, Result);
Result = FMulAdd(C4, V8, Result);
Result = FMulAdd(C5, V10, Result);
Result = FMulAdd(C6, V12, Result);
Result = FMulAdd(C7, V14, Result);
Result = FMulAdd(C8, V16, Result);
Result = FMulAdd(C9, V18, Result);
Result = FMulAdd(C10, V20, Result);
Result = FMulAdd(C11, V22, Result);
return Result;
}
NV_FORCE_INLINE uint32_t FOutOfBounds(const FloatV a, const FloatV min, const FloatV max)
{
const BoolV ffff = BFFFF();
const BoolV c = BOr(FIsGrtr(a, max), FIsGrtr(min, a));
return uint32_t(!BAllEq(c, ffff));
}
NV_FORCE_INLINE uint32_t FInBounds(const FloatV a, const FloatV min, const FloatV max)
{
const BoolV tttt = BTTTT();
const BoolV c = BAnd(FIsGrtrOrEq(a, min), FIsGrtrOrEq(max, a));
return uint32_t(BAllEq(c, tttt));
}
NV_FORCE_INLINE uint32_t FOutOfBounds(const FloatV a, const FloatV bounds)
{
#if NV_WINRT
const uint32x2_t greater = vacgt_f32(a, bounds);
#else
const uint32x2_t greater = vcagt_f32(a, bounds);
#endif
return vget_lane_u32(greater, 0);
}
NV_FORCE_INLINE uint32_t FInBounds(const FloatV a, const FloatV bounds)
{
#if NV_WINRT
const uint32x2_t geq = vacge_f32(bounds, a);
#else
const uint32x2_t geq = vcage_f32(bounds, a);
#endif
return vget_lane_u32(geq, 0);
}
//////////////////////////////////
//VEC3V
//////////////////////////////////
NV_FORCE_INLINE Vec3V V3Splat(const FloatV f)
{
VECMATHAOS_ASSERT(isValidFloatV(f));
#if NV_WINRT
const uint32x2_t mask = { 0x00000000ffffFFFFULL };
#else
const uint32x2_t mask = {0xffffFFFF, 0x0};
#endif
const uint32x2_t uHigh = vreinterpret_u32_f32(f);
const float32x2_t dHigh = vreinterpret_f32_u32(vand_u32(uHigh, mask));
return vcombine_f32(f, dHigh);
}
NV_FORCE_INLINE Vec3V V3Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z)
{
VECMATHAOS_ASSERT(isValidFloatV(x));
VECMATHAOS_ASSERT(isValidFloatV(y));
VECMATHAOS_ASSERT(isValidFloatV(z));
#if NV_WINRT
const uint32x2_t mask = { 0x00000000ffffFFFFULL };
#else
const uint32x2_t mask = {0xffffFFFF, 0x0};
#endif
const uint32x2_t dHigh = vand_u32(vreinterpret_u32_f32(z), mask);
const uint32x2_t dLow = vext_u32(vreinterpret_u32_f32(x), vreinterpret_u32_f32(y), 1);
return vreinterpretq_f32_u32(vcombine_u32(dLow, dHigh));
}
NV_FORCE_INLINE Vec3V V3UnitX()
{
#if NV_WINRT
const float32x4_t x = { 0x000000003f800000ULL, 0x0ULL};
#else
const float32x4_t x = { 1.0f, 0.0f, 0.0f, 0.0f};
#endif // NV_WINRT
return x;
}
NV_FORCE_INLINE Vec3V V3UnitY()
{
#if NV_WINRT
const float32x4_t y = { 0x3f80000000000000ULL, 0x0ULL};
#else
const float32x4_t y = {0, 1.0f, 0, 0};
#endif
return y;
}
NV_FORCE_INLINE Vec3V V3UnitZ()
{
#if NV_WINRT
const float32x4_t z = { 0x0ULL, 0x000000003f800000ULL };
#else
const float32x4_t z = {0, 0, 1.0f, 0};
#endif
return z;
}
NV_FORCE_INLINE FloatV V3GetX(const Vec3V f)
{
const float32x2_t fLow = vget_low_f32(f);
return vdup_lane_f32(fLow, 0);
}
NV_FORCE_INLINE FloatV V3GetY(const Vec3V f)
{
const float32x2_t fLow = vget_low_f32(f);
return vdup_lane_f32(fLow, 1);
}
NV_FORCE_INLINE FloatV V3GetZ(const Vec3V f)
{
const float32x2_t fhigh = vget_high_f32(f);
return vdup_lane_f32(fhigh, 0);
}
NV_FORCE_INLINE Vec3V V3SetX(const Vec3V v, const FloatV f)
{
VECMATHAOS_ASSERT(isValidVec3V(v));
VECMATHAOS_ASSERT(isValidFloatV(f));
return V3Sel(BFTTT(),v, vcombine_f32(f, f));
}
NV_FORCE_INLINE Vec3V V3SetY(const Vec3V v, const FloatV f)
{
VECMATHAOS_ASSERT(isValidVec3V(v));
VECMATHAOS_ASSERT(isValidFloatV(f));
return V3Sel(BTFTT(),v,vcombine_f32(f, f));
}
NV_FORCE_INLINE Vec3V V3SetZ(const Vec3V v, const FloatV f)
{
VECMATHAOS_ASSERT(isValidVec3V(v));
VECMATHAOS_ASSERT(isValidFloatV(f));
return V3Sel(BTTFT(),v,vcombine_f32(f, f));
}
NV_FORCE_INLINE Vec3V V3ColX(const Vec3V a, const Vec3V b, const Vec3V c)
{
const float32x2_t aLow = vget_low_f32(a);
const float32x2_t bLow = vget_low_f32(b);
const float32x2_t cLow = vget_low_f32(c);
const float32x2_t zero = vdup_n_f32(0.0f);
const float32x2x2_t zipL = vzip_f32(aLow, bLow);
const float32x2x2_t zipH = vzip_f32(cLow, zero);
return vcombine_f32(zipL.val[0], zipH.val[0]);
}
NV_FORCE_INLINE Vec3V V3ColY(const Vec3V a, const Vec3V b, const Vec3V c)
{
const float32x2_t aLow = vget_low_f32(a);
const float32x2_t bLow = vget_low_f32(b);
const float32x2_t cLow = vget_low_f32(c);
const float32x2_t zero = vdup_n_f32(0.0f);
const float32x2x2_t zipL = vzip_f32(aLow, bLow);
const float32x2x2_t zipH = vzip_f32(cLow, zero);
return vcombine_f32(zipL.val[1], zipH.val[1]);
}
NV_FORCE_INLINE Vec3V V3ColZ(const Vec3V a, const Vec3V b, const Vec3V c)
{
const float32x2_t aHi = vget_high_f32(a);
const float32x2_t bHi = vget_high_f32(b);
const float32x2_t cHi = vget_high_f32(c);
const float32x2x2_t zipL = vzip_f32(aHi, bHi);
return vcombine_f32(zipL.val[0], cHi);
}
NV_FORCE_INLINE Vec3V V3Zero()
{
return vdupq_n_f32(0.0f);
}
NV_FORCE_INLINE Vec3V V3Eps()
{
return V3Load(NV_EPS_REAL);
}
NV_FORCE_INLINE Vec3V V3One()
{
return V3Load(1.0f);
}
NV_FORCE_INLINE Vec3V V3Neg(const Vec3V f)
{
VECMATHAOS_ASSERT(isValidVec3V(f));
return vnegq_f32(f);
}
NV_FORCE_INLINE Vec3V V3Add(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return vaddq_f32(a, b);
}
NV_FORCE_INLINE Vec3V V3Add(const Vec3V a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return vaddq_f32(a, Vec3V_From_FloatV(b));
}
NV_FORCE_INLINE Vec3V V3Sub(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return vsubq_f32(a, b);
}
NV_FORCE_INLINE Vec3V V3Sub(const Vec3V a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return vsubq_f32(a, Vec3V_From_FloatV(b));
}
NV_FORCE_INLINE Vec3V V3Scale(const Vec3V a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return vmulq_lane_f32(a, b, 0);
}
NV_FORCE_INLINE Vec3V V3Mul(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return vmulq_f32(a, b);
}
NV_FORCE_INLINE Vec3V V3ScaleInv(const Vec3V a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
float32x2_t invB = VRECIP(b);
return vsetq_lane_f32(0.0f, vmulq_lane_f32(a, invB, 0), 3);
}
NV_FORCE_INLINE Vec3V V3Div(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
float32x4_t invB = VRECIPQ(b);
invB = vsetq_lane_f32(0.0f, invB, 3);
return vmulq_f32(a, invB);
}
NV_FORCE_INLINE Vec3V V3ScaleInvFast(const Vec3V a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
const float32x2_t invB = VRECIPE(b);
return vmulq_lane_f32(a, invB, 0);
}
NV_FORCE_INLINE Vec3V V3DivFast(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
float32x4_t invB = VRECIPEQ(b);
invB = vsetq_lane_f32(0.0f, invB, 3);
return vmulq_f32(a, invB);
}
NV_FORCE_INLINE Vec3V V3Recip(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
const float32x4_t recipA = VRECIPQ(a);
return vsetq_lane_f32(0.0f, recipA, 3);
}
NV_FORCE_INLINE Vec3V V3RecipFast(const Vec3V a)
{
const float32x4_t recipA = VRECIPEQ(a);
return vsetq_lane_f32(0.0f, recipA, 3);
}
NV_FORCE_INLINE Vec3V V3Rsqrt(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
const float32x4_t rSqrA = VRECIPSQRTQ(a);
return vsetq_lane_f32(0.0f, rSqrA, 3);
}
NV_FORCE_INLINE Vec3V V3RsqrtFast(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
const float32x4_t rSqrA = VRECIPSQRTEQ(a);
return vsetq_lane_f32(0.0f, rSqrA, 3);
}
NV_FORCE_INLINE Vec3V V3ScaleAdd(const Vec3V a, const FloatV b, const Vec3V c)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
VECMATHAOS_ASSERT(isValidVec3V(c));
return vmlaq_lane_f32(c, a, b, 0);
}
NV_FORCE_INLINE Vec3V V3NegScaleSub(const Vec3V a, const FloatV b, const Vec3V c)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
VECMATHAOS_ASSERT(isValidVec3V(c));
return vmlsq_lane_f32(c, a, b, 0);
}
NV_FORCE_INLINE Vec3V V3MulAdd(const Vec3V a, const Vec3V b, const Vec3V c)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
VECMATHAOS_ASSERT(isValidVec3V(c));
return vmlaq_f32(c, a, b);
}
NV_FORCE_INLINE Vec3V V3NegMulSub(const Vec3V a, const Vec3V b, const Vec3V c)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
VECMATHAOS_ASSERT(isValidVec3V(c));
return vmlsq_f32(c, a, b);
}
NV_FORCE_INLINE Vec3V V3Abs(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
return vabsq_f32(a);
}
NV_FORCE_INLINE FloatV V3Dot(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
NV_ASSERT(isValidVec3V(a));
NV_ASSERT(isValidVec3V(b));
// const uint32x2_t mask = {0xffffFFFF, 0x0};
const float32x4_t tmp = vmulq_f32(a, b);
const float32x2_t low = vget_low_f32(tmp);
const float32x2_t high = vget_high_f32(tmp);
// const float32x2_t high = vreinterpret_f32_u32(vand_u32(vreinterpret_u32_f32(high_), mask));
const float32x2_t sumTmp = vpadd_f32(low, high); // = {0+z, x+y}
const float32x2_t sum0ZYX = vpadd_f32(sumTmp, sumTmp); // = {x+y+z, x+y+z}
return sum0ZYX;
}
NV_FORCE_INLINE Vec3V V3Cross(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
#if NV_WINRT
const uint32x2_t TF = { 0x00000000ffffFFFFULL };
#else
const uint32x2_t TF = {0xffffFFFF, 0x0};
#endif
const float32x2_t ay_ax = vget_low_f32(a); // d2
const float32x2_t aw_az = vget_high_f32(a); // d3
const float32x2_t by_bx = vget_low_f32(b); // d4
const float32x2_t bw_bz = vget_high_f32(b); // d5
// Hi, Lo
const float32x2_t bz_by = vext_f32(by_bx, bw_bz, 1); // bz, by
const float32x2_t az_ay = vext_f32(ay_ax, aw_az, 1); // az, ay
const float32x2_t azbx = vmul_f32(aw_az, by_bx); // 0, az*bx
const float32x2_t aybz_axby = vmul_f32(ay_ax, bz_by); // ay*bz, ax*by
const float32x2_t azbxSUBaxbz = vmls_f32(azbx, bw_bz, ay_ax); // 0, az*bx-ax*bz
const float32x2_t aybzSUBazby_axbySUBaybx = vmls_f32(aybz_axby, by_bx, az_ay); // ay*bz-az*by, ax*by-ay*bx
const float32x2_t retLow = vext_f32(aybzSUBazby_axbySUBaybx, azbxSUBaxbz, 1); // az*bx-ax*bz, ay*bz-az*by
const uint32x2_t retHigh = vand_u32(TF, vreinterpret_u32_f32(aybzSUBazby_axbySUBaybx)); // 0, ax*by-ay*bx
return vcombine_f32(retLow, vreinterpret_f32_u32(retHigh));
}
NV_FORCE_INLINE VecCrossV V3PrepareCross(const Vec3V a)
{
return a;
}
NV_FORCE_INLINE FloatV V3Length(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
NV_ASSERT(isValidVec3V(a));
// const uint32x2_t mask = {0xffffFFFF, 0x0};
const float32x4_t tmp = vmulq_f32(a, a);
const float32x2_t low = vget_low_f32(tmp);
const float32x2_t high = vget_high_f32(tmp);
// const float32x2_t high = vreinterpret_f32_u32(vand_u32(vreinterpret_u32_f32(high_), mask));
const float32x2_t sumTmp = vpadd_f32(low, high); // = {0+z, x+y}
const float32x2_t sum0ZYX = vpadd_f32(sumTmp, sumTmp); // = {x+y+z, x+y+z}
const float32x2_t len = vmul_f32(VRECIPSQRTE(sum0ZYX), sum0ZYX);
return len;
}
NV_FORCE_INLINE FloatV V3LengthSq(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
return V3Dot(a,a);
}
NV_FORCE_INLINE Vec3V V3Normalize(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
return V3ScaleInv(a, V3Length(a));
}
NV_FORCE_INLINE Vec3V V3NormalizeFast(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
return V3Scale(a, VRECIPSQRTE(V3Dot(a,a)));
}
NV_FORCE_INLINE Vec3V V3NormalizeSafe(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
const FloatV zero = vdup_n_f32(0.0f);
const FloatV length = V3Length(a);
const uint32x4_t isGreaterThanZero = FIsGrtr(length, zero);
return V3Sel(isGreaterThanZero, V3ScaleInv(a, length), vdupq_lane_f32(zero, 0));
}
NV_FORCE_INLINE Vec3V V3Sel(const BoolV c, const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return vbslq_f32(c, a, b);
}
NV_FORCE_INLINE BoolV V3IsGrtr(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return vcgtq_f32(a, b);
}
NV_FORCE_INLINE BoolV V3IsGrtrOrEq(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return vcgeq_f32(a, b);
}
NV_FORCE_INLINE BoolV V3IsEq(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return vceqq_f32(a, b);
}
NV_FORCE_INLINE Vec3V V3Max(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return vmaxq_f32(a, b);
}
NV_FORCE_INLINE Vec3V V3Min(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return vminq_f32(a, b);
}
//Extract the maximum value from a
NV_FORCE_INLINE FloatV V3ExtractMax(const Vec3V a)
{
const float32x2_t low = vget_low_f32(a);
const float32x2_t high = vget_high_f32(a);
const float32x2_t zz = vdup_lane_f32(high, 0);
const float32x2_t max0 = vpmax_f32(zz, low);
const float32x2_t max1 = vpmax_f32(max0, max0);
return max1;
}
//Extract the maximum value from a
NV_FORCE_INLINE FloatV V3ExtractMin(const Vec3V a)
{
const float32x2_t low = vget_low_f32(a);
const float32x2_t high = vget_high_f32(a);
const float32x2_t zz = vdup_lane_f32(high, 0);
const float32x2_t min0 = vpmin_f32(zz, low);
const float32x2_t min1 = vpmin_f32(min0, min0);
return min1;
}
//return (a >= 0.0f) ? 1.0f : -1.0f;
NV_FORCE_INLINE Vec3V V3Sign(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
const Vec3V zero = V3Zero();
const Vec3V one = V3One();
const Vec3V none = V3Neg(one);
return V3Sel(V3IsGrtrOrEq(a, zero), one, none);
}
NV_FORCE_INLINE Vec3V V3Clamp(const Vec3V a, const Vec3V minV, const Vec3V maxV)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(minV));
VECMATHAOS_ASSERT(isValidVec3V(maxV));
return V3Max(V3Min(a,maxV),minV);
}
NV_FORCE_INLINE uint32_t V3AllGrtr(const Vec3V a, const Vec3V b)
{
return internalUnitNeonSimd::BAllTrue3_R(V4IsGrtr(a, b));
}
NV_FORCE_INLINE uint32_t V3AllGrtrOrEq(const Vec3V a, const Vec3V b)
{
return internalUnitNeonSimd::BAllTrue3_R(V4IsGrtrOrEq(a, b));
}
NV_FORCE_INLINE uint32_t V3AllEq(const Vec3V a, const Vec3V b)
{
return internalUnitNeonSimd::BAllTrue3_R(V4IsEq(a, b));
}
NV_FORCE_INLINE Vec3V V3Round(const Vec3V a)
{
//truncate(a + (0.5f - sign(a)))
const Vec3V half = V3Load(0.5f);
const float32x4_t sign = vcvtq_f32_u32((vshrq_n_u32(vreinterpretq_u32_f32(a), 31)));
const Vec3V aPlusHalf = V3Add(a, half);
const Vec3V aRound = V3Sub(aPlusHalf, sign);
return vcvtq_f32_s32(vcvtq_s32_f32(aRound));
}
NV_FORCE_INLINE Vec3V V3Sin(const Vec3V a)
{
//Vec4V V1, V2, V3, V5, V7, V9, V11, V13, V15, V17, V19, V21, V23;
//Vec4V S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11;
Vec3V Result;
// Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI
const Vec3V recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f);
const Vec3V twoPi = V4LoadA(g_NVTwoPi.f);
const Vec3V tmp = V3Mul(a, recipTwoPi);
const Vec3V b = V3Round(tmp);
const Vec3V V1 = V3NegMulSub(twoPi, b, a);
// sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! -
// V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI)
const Vec3V V2 = V3Mul(V1, V1);
const Vec3V V3 = V3Mul(V2, V1);
const Vec3V V5 = V3Mul(V3, V2);
const Vec3V V7 = V3Mul(V5, V2);
const Vec3V V9 = V3Mul(V7, V2);
const Vec3V V11 = V3Mul(V9, V2);
const Vec3V V13 = V3Mul(V11, V2);
const Vec3V V15 = V3Mul(V13, V2);
const Vec3V V17 = V3Mul(V15, V2);
const Vec3V V19 = V3Mul(V17, V2);
const Vec3V V21 = V3Mul(V19, V2);
const Vec3V V23 = V3Mul(V21, V2);
const Vec4V sinCoefficients0 = V4LoadA(g_NVSinCoefficients0.f);
const Vec4V sinCoefficients1 = V4LoadA(g_NVSinCoefficients1.f);
const Vec4V sinCoefficients2 = V4LoadA(g_NVSinCoefficients2.f);
const FloatV S1 = V4GetY(sinCoefficients0);
const FloatV S2 = V4GetZ(sinCoefficients0);
const FloatV S3 = V4GetW(sinCoefficients0);
const FloatV S4 = V4GetX(sinCoefficients1);
const FloatV S5 = V4GetY(sinCoefficients1);
const FloatV S6 = V4GetZ(sinCoefficients1);
const FloatV S7 = V4GetW(sinCoefficients1);
const FloatV S8 = V4GetX(sinCoefficients2);
const FloatV S9 = V4GetY(sinCoefficients2);
const FloatV S10 = V4GetZ(sinCoefficients2);
const FloatV S11 = V4GetW(sinCoefficients2);
Result = V3ScaleAdd(V3, S1, V1);
Result = V3ScaleAdd(V5, S2, Result);
Result = V3ScaleAdd(V7, S3, Result);
Result = V3ScaleAdd(V9, S4, Result);
Result = V3ScaleAdd(V11, S5, Result);
Result = V3ScaleAdd(V13, S6, Result);
Result = V3ScaleAdd(V15, S7, Result);
Result = V3ScaleAdd(V17, S8, Result);
Result = V3ScaleAdd(V19, S9, Result);
Result = V3ScaleAdd(V21, S10,Result);
Result = V3ScaleAdd(V23, S11,Result);
return Result;
}
NV_FORCE_INLINE Vec3V V3Cos(const Vec3V a)
{
//XMVECTOR V1, V2, V4, V6, V8, V10, V12, V14, V16, V18, V20, V22;
//XMVECTOR C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11;
Vec3V Result;
// Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI
const Vec3V recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f);
const Vec3V twoPi = V4LoadA(g_NVTwoPi.f);
const Vec3V tmp = V3Mul(a, recipTwoPi);
const Vec3V b = V3Round(tmp);
const Vec3V V1 = V3NegMulSub(twoPi, b, a);
// cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! -
// V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI)
const Vec3V V2 = V3Mul(V1, V1);
const Vec3V V4 = V3Mul(V2, V2);
const Vec3V V6 = V3Mul(V4, V2);
const Vec3V V8 = V3Mul(V4, V4);
const Vec3V V10 = V3Mul(V6, V4);
const Vec3V V12 = V3Mul(V6, V6);
const Vec3V V14 = V3Mul(V8, V6);
const Vec3V V16 = V3Mul(V8, V8);
const Vec3V V18 = V3Mul(V10, V8);
const Vec3V V20 = V3Mul(V10, V10);
const Vec3V V22 = V3Mul(V12, V10);
const Vec4V cosCoefficients0 = V4LoadA(g_NVCosCoefficients0.f);
const Vec4V cosCoefficients1 = V4LoadA(g_NVCosCoefficients1.f);
const Vec4V cosCoefficients2 = V4LoadA(g_NVCosCoefficients2.f);
const FloatV C1 = V4GetY(cosCoefficients0);
const FloatV C2 = V4GetZ(cosCoefficients0);
const FloatV C3 = V4GetW(cosCoefficients0);
const FloatV C4 = V4GetX(cosCoefficients1);
const FloatV C5 = V4GetY(cosCoefficients1);
const FloatV C6 = V4GetZ(cosCoefficients1);
const FloatV C7 = V4GetW(cosCoefficients1);
const FloatV C8 = V4GetX(cosCoefficients2);
const FloatV C9 = V4GetY(cosCoefficients2);
const FloatV C10 = V4GetZ(cosCoefficients2);
const FloatV C11 = V4GetW(cosCoefficients2);
Result = V3ScaleAdd(V2, C1, V4One());
Result = V3ScaleAdd(V4, C2, Result);
Result = V3ScaleAdd(V6, C3, Result);
Result = V3ScaleAdd(V8, C4, Result);
Result = V3ScaleAdd(V10, C5, Result);
Result = V3ScaleAdd(V12, C6, Result);
Result = V3ScaleAdd(V14, C7, Result);
Result = V3ScaleAdd(V16, C8, Result);
Result = V3ScaleAdd(V18, C9, Result);
Result = V3ScaleAdd(V20, C10,Result);
Result = V3ScaleAdd(V22, C11,Result);
return Result;
}
NV_FORCE_INLINE Vec3V V3PermYZZ(const Vec3V a)
{
const float32x2_t xy = vget_low_f32(a);
const float32x2_t zw = vget_high_f32(a);
const float32x2_t yz = vext_f32(xy, zw, 1);
return vcombine_f32(yz, zw);
}
NV_FORCE_INLINE Vec3V V3PermXYX(const Vec3V a)
{
#if NV_WINRT
const uint32x2_t mask = { 0x00000000ffffFFFFULL };
#else
const uint32x2_t mask = {0xffffFFFF, 0x0};
#endif
const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(a));
const uint32x2_t xw = vand_u32(xy, mask);
return vreinterpretq_f32_u32(vcombine_u32(xy, xw));
}
NV_FORCE_INLINE Vec3V V3PermYZX(const Vec3V a)
{
#if NV_WINRT
const uint32x2_t mask = { 0x00000000ffffFFFFULL };
#else
const uint32x2_t mask = {0xffffFFFF, 0x0};
#endif
const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(a));
const uint32x2_t zw = vget_high_u32(vreinterpretq_u32_f32(a));
const uint32x2_t yz = vext_u32(xy, zw, 1);
const uint32x2_t xw = vand_u32(xy, mask);
return vreinterpretq_f32_u32(vcombine_u32(yz, xw));
}
NV_FORCE_INLINE Vec3V V3PermZXY(const Vec3V a)
{
const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(a));
const uint32x2_t zw = vget_high_u32(vreinterpretq_u32_f32(a));
const uint32x2_t wz = vrev64_u32(zw);
const uint32x2_t zx = vext_u32(wz, xy, 1);
const uint32x2_t yw = vext_u32(xy, wz, 1);
return vreinterpretq_f32_u32(vcombine_u32(zx, yw));
}
NV_FORCE_INLINE Vec3V V3PermZZY(const Vec3V a)
{
const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(a));
const uint32x2_t zw = vget_high_u32(vreinterpretq_u32_f32(a));
const uint32x2_t wz = vrev64_u32(zw);
const uint32x2_t yw = vext_u32(xy, wz, 1);
const uint32x2_t zz = vdup_lane_u32(wz, 1);
return vreinterpretq_f32_u32(vcombine_u32(zz, yw));
}
NV_FORCE_INLINE Vec3V V3PermYXX(const Vec3V a)
{
#if NV_WINRT
const uint32x2_t mask = { 0x00000000ffffFFFFULL };
#else
const uint32x2_t mask = {0xffffFFFF, 0x0};
#endif
const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(a));
const uint32x2_t yx = vrev64_u32(xy);
const uint32x2_t xw = vand_u32(xy, mask);
return vreinterpretq_f32_u32(vcombine_u32(yx, xw));
}
NV_FORCE_INLINE Vec3V V3Perm_Zero_1Z_0Y(const Vec3V v0, const Vec3V v1)
{
const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(v0));
const uint32x2_t zw = vget_high_u32(vreinterpretq_u32_f32(v1));
const uint32x2_t wz = vrev64_u32(zw);
const uint32x2_t yw = vext_u32(xy, wz, 1);
return vreinterpretq_f32_u32(vcombine_u32(wz, yw));
}
NV_FORCE_INLINE Vec3V V3Perm_0Z_Zero_1X(const Vec3V v0, const Vec3V v1)
{
#if NV_WINRT
const uint32x2_t mask = { 0x00000000ffffFFFFULL };
#else
const uint32x2_t mask = {0xffffFFFF, 0x0};
#endif
const uint32x2_t zw = vget_high_u32(vreinterpretq_u32_f32(v0));
const uint32x2_t xy = vget_low_u32(vreinterpretq_u32_f32(v1));
const uint32x2_t xw = vand_u32(xy, mask);
return vreinterpretq_f32_u32(vcombine_u32(zw, xw));
}
NV_FORCE_INLINE Vec3V V3Perm_1Y_0X_Zero(const Vec3V v0, const Vec3V v1)
{
const uint32x2_t axy = vget_low_u32(vreinterpretq_u32_f32(v0));
const uint32x2_t bxy = vget_low_u32(vreinterpretq_u32_f32(v1));
const uint32x2_t byax = vext_u32(bxy, axy, 1);
const uint32x2_t ww = vdup_n_u32(0);
return vreinterpretq_f32_u32(vcombine_u32(byax, ww));
}
NV_FORCE_INLINE FloatV V3SumElems(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
NV_ASSERT(isValidVec3V(a));
//const uint32x2_t mask = {0xffffFFFF, 0x0};
const float32x2_t low = vget_low_f32(a);
const float32x2_t high = vget_high_f32(a);
//const float32x2_t high = vreinterpret_f32_u32(vand_u32(vreinterpret_u32_f32(high_), mask));
const float32x2_t sumTmp = vpadd_f32(low, high); // = {0+z, x+y}
const float32x2_t sum0ZYX = vpadd_f32(sumTmp, sumTmp); // = {x+y+z, x+y+z}
return sum0ZYX;
}
NV_FORCE_INLINE uint32_t V3OutOfBounds(const Vec3V a, const Vec3V min, const Vec3V max)
{
const BoolV c = BOr(V3IsGrtr(a, max), V3IsGrtr(min, a));
return internalUnitNeonSimd::BAnyTrue3_R(c);
}
NV_FORCE_INLINE uint32_t V3InBounds(const Vec3V a, const Vec3V min, const Vec3V max)
{
const BoolV c = BAnd(V3IsGrtrOrEq(a, min), V3IsGrtrOrEq(max, a));
return internalUnitNeonSimd::BAllTrue4_R(c);
}
NV_FORCE_INLINE uint32_t V3OutOfBounds(const Vec3V a, const Vec3V bounds)
{
#if NV_WINRT
const uint32x4_t greater = vacgtq_f32(a, bounds);
#else
const uint32x4_t greater = vcagtq_f32(a, bounds);
#endif
return internalUnitNeonSimd::BAnyTrue3_R(greater);
}
NV_FORCE_INLINE uint32_t V3InBounds(const Vec3V a, const Vec3V bounds)
{
#if NV_WINRT
const uint32x4_t geq = vacgeq_f32(bounds, a);
#else
const uint32x4_t geq = vcageq_f32(bounds, a);
#endif
return internalUnitNeonSimd::BAllTrue4_R(geq);
}
//////////////////////////////////
//VEC4V
//////////////////////////////////
NV_FORCE_INLINE Vec4V V4Splat(const FloatV f)
{
VECMATHAOS_ASSERT(isValidFloatV(f));
return vcombine_f32(f, f);
}
NV_FORCE_INLINE Vec4V V4Merge(const FloatV* const floatVArray)
{
VECMATHAOS_ASSERT(isValidFloatV(floatVArray[0]));
VECMATHAOS_ASSERT(isValidFloatV(floatVArray[1]));
VECMATHAOS_ASSERT(isValidFloatV(floatVArray[2]));
VECMATHAOS_ASSERT(isValidFloatV(floatVArray[3]));
const uint32x2_t xLow = vreinterpret_u32_f32(floatVArray[0]);
const uint32x2_t yLow = vreinterpret_u32_f32(floatVArray[1]);
const uint32x2_t zLow = vreinterpret_u32_f32(floatVArray[2]);
const uint32x2_t wLow = vreinterpret_u32_f32(floatVArray[3]);
const uint32x2_t dLow = vext_u32(xLow, yLow, 1);
const uint32x2_t dHigh = vext_u32(zLow, wLow, 1);
return vreinterpretq_f32_u32(vcombine_u32(dLow, dHigh));
}
NV_FORCE_INLINE Vec4V V4Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z, const FloatVArg w)
{
VECMATHAOS_ASSERT(isValidFloatV(x));
VECMATHAOS_ASSERT(isValidFloatV(y));
VECMATHAOS_ASSERT(isValidFloatV(z));
VECMATHAOS_ASSERT(isValidFloatV(w));
const uint32x2_t xLow = vreinterpret_u32_f32(x);
const uint32x2_t yLow = vreinterpret_u32_f32(y);
const uint32x2_t zLow = vreinterpret_u32_f32(z);
const uint32x2_t wLow = vreinterpret_u32_f32(w);
const uint32x2_t dLow = vext_u32(xLow, yLow, 1);
const uint32x2_t dHigh = vext_u32(zLow, wLow, 1);
return vreinterpretq_f32_u32(vcombine_u32(dLow, dHigh));
}
NV_FORCE_INLINE Vec4V V4MergeW(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w)
{
const float32x2_t xx = vget_high_f32(x);
const float32x2_t yy = vget_high_f32(y);
const float32x2_t zz = vget_high_f32(z);
const float32x2_t ww = vget_high_f32(w);
const float32x2x2_t zipL = vzip_f32(xx, yy);
const float32x2x2_t zipH = vzip_f32(zz, ww);
return vcombine_f32(zipL.val[1], zipH.val[1]);
}
NV_FORCE_INLINE Vec4V V4MergeZ(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w)
{
const float32x2_t xx = vget_high_f32(x);
const float32x2_t yy = vget_high_f32(y);
const float32x2_t zz = vget_high_f32(z);
const float32x2_t ww = vget_high_f32(w);
const float32x2x2_t zipL = vzip_f32(xx, yy);
const float32x2x2_t zipH = vzip_f32(zz, ww);
return vcombine_f32(zipL.val[0], zipH.val[0]);
}
NV_FORCE_INLINE Vec4V V4MergeY(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w)
{
const float32x2_t xx = vget_low_f32(x);
const float32x2_t yy = vget_low_f32(y);
const float32x2_t zz = vget_low_f32(z);
const float32x2_t ww = vget_low_f32(w);
const float32x2x2_t zipL = vzip_f32(xx, yy);
const float32x2x2_t zipH = vzip_f32(zz, ww);
return vcombine_f32(zipL.val[1], zipH.val[1]);
}
NV_FORCE_INLINE Vec4V V4MergeX(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w)
{
const float32x2_t xx = vget_low_f32(x);
const float32x2_t yy = vget_low_f32(y);
const float32x2_t zz = vget_low_f32(z);
const float32x2_t ww = vget_low_f32(w);
const float32x2x2_t zipL = vzip_f32(xx, yy);
const float32x2x2_t zipH = vzip_f32(zz, ww);
return vcombine_f32(zipL.val[0], zipH.val[0]);
}
NV_FORCE_INLINE Vec4V V4UnpackXY(const Vec4VArg a, const Vec4VArg b)
{
return vzipq_f32(a, b).val[0];
}
NV_FORCE_INLINE Vec4V V4UnpackZW(const Vec4VArg a, const Vec4VArg b)
{
return vzipq_f32(a, b).val[1];
}
NV_FORCE_INLINE Vec4V V4UnitW()
{
const float32x2_t zeros = vreinterpret_f32_u32(vmov_n_u32(0));
const float32x2_t ones = vmov_n_f32(1.0f);
const float32x2_t zo = vext_f32(zeros, ones, 1);
return vcombine_f32(zeros, zo);
}
NV_FORCE_INLINE Vec4V V4UnitX()
{
const float32x2_t zeros = vreinterpret_f32_u32(vmov_n_u32(0));
const float32x2_t ones = vmov_n_f32(1.0f);
const float32x2_t oz = vext_f32(ones, zeros, 1);
return vcombine_f32(oz, zeros);
}
NV_FORCE_INLINE Vec4V V4UnitY()
{
const float32x2_t zeros = vreinterpret_f32_u32(vmov_n_u32(0));
const float32x2_t ones = vmov_n_f32(1.0f);
const float32x2_t zo = vext_f32(zeros, ones, 1);
return vcombine_f32(zo, zeros);
}
NV_FORCE_INLINE Vec4V V4UnitZ()
{
const float32x2_t zeros = vreinterpret_f32_u32(vmov_n_u32(0));
const float32x2_t ones = vmov_n_f32(1.0f);
const float32x2_t oz = vext_f32(ones, zeros, 1);
return vcombine_f32(zeros, oz);
}
NV_FORCE_INLINE FloatV V4GetW(const Vec4V f)
{
const float32x2_t fhigh = vget_high_f32(f);
return vdup_lane_f32(fhigh, 1);
}
NV_FORCE_INLINE FloatV V4GetX(const Vec4V f)
{
const float32x2_t fLow = vget_low_f32(f);
return vdup_lane_f32(fLow, 0);
}
NV_FORCE_INLINE FloatV V4GetY(const Vec4V f)
{
const float32x2_t fLow = vget_low_f32(f);
return vdup_lane_f32(fLow, 1);
}
NV_FORCE_INLINE FloatV V4GetZ(const Vec4V f)
{
const float32x2_t fhigh = vget_high_f32(f);
return vdup_lane_f32(fhigh, 0);
}
NV_FORCE_INLINE Vec4V V4SetW(const Vec4V v, const FloatV f)
{
VECMATHAOS_ASSERT(isValidFloatV(f));
return V4Sel(BTTTF(), v, vcombine_f32(f, f));
}
NV_FORCE_INLINE Vec4V V4SetX(const Vec4V v, const FloatV f)
{
VECMATHAOS_ASSERT(isValidFloatV(f));
return V4Sel(BFTTT(), v, vcombine_f32(f, f));
}
NV_FORCE_INLINE Vec4V V4SetY(const Vec4V v, const FloatV f)
{
VECMATHAOS_ASSERT(isValidFloatV(f));
return V4Sel(BTFTT(), v, vcombine_f32(f, f));
}
NV_FORCE_INLINE Vec4V V4SetZ(const Vec4V v, const FloatV f)
{
VECMATHAOS_ASSERT(isValidVec3V(v));
VECMATHAOS_ASSERT(isValidFloatV(f));
return V4Sel(BTTFT(), v, vcombine_f32(f, f));
}
NV_FORCE_INLINE Vec4V V4ClearW(const Vec4V v)
{
VECMATHAOS_ASSERT(isValidVec3V(v));
return V4Sel(BTTTF(), v, V4Zero());
}
NV_FORCE_INLINE Vec4V V4Perm_YXWZ(const Vec4V a)
{
const float32x2_t xy = vget_low_f32(a);
const float32x2_t zw = vget_high_f32(a);
const float32x2_t yx = vext_f32(xy, xy, 1);
const float32x2_t wz = vext_f32(zw, zw, 1);
return vcombine_f32(yx, wz);
}
NV_FORCE_INLINE Vec4V V4Perm_XZXZ(const Vec4V a)
{
const float32x2_t xy = vget_low_f32(a);
const float32x2_t zw = vget_high_f32(a);
const float32x2x2_t xzyw = vzip_f32(xy, zw);
return vcombine_f32(xzyw.val[0], xzyw.val[0]);
}
NV_FORCE_INLINE Vec4V V4Perm_YWYW(const Vec4V a)
{
const float32x2_t xy = vget_low_f32(a);
const float32x2_t zw = vget_high_f32(a);
const float32x2x2_t xzyw = vzip_f32(xy, zw);
return vcombine_f32(xzyw.val[1], xzyw.val[1]);
}
template<uint8_t E0, uint8_t E1, uint8_t E2, uint8_t E3> NV_FORCE_INLINE Vec4V V4Perm(const Vec4V V)
{
static const uint32_t ControlElement[ 4 ] =
{
#if 1
0x03020100, // XM_SWIZZLE_X
0x07060504, // XM_SWIZZLE_Y
0x0B0A0908, // XM_SWIZZLE_Z
0x0F0E0D0C, // XM_SWIZZLE_W
#else
0x00010203, // XM_SWIZZLE_X
0x04050607, // XM_SWIZZLE_Y
0x08090A0B, // XM_SWIZZLE_Z
0x0C0D0E0F, // XM_SWIZZLE_W
#endif
};
uint8x8x2_t tbl;
tbl.val[0] = vreinterpret_u8_f32(vget_low_f32(V));
tbl.val[1] = vreinterpret_u8_f32(vget_high_f32(V));
uint8x8_t idx = vcreate_u8( ((uint64_t)ControlElement[E0]) | (((uint64_t)ControlElement[E1]) << 32) );
const uint8x8_t rL = vtbl2_u8( tbl, idx );
idx = vcreate_u8( ((uint64_t)ControlElement[E2]) | (((uint64_t)ControlElement[E3]) << 32) );
const uint8x8_t rH = vtbl2_u8( tbl, idx );
return vreinterpretq_f32_u8(vcombine_u8( rL, rH ));
}
NV_FORCE_INLINE Vec4V V4Zero()
{
return vreinterpretq_f32_u32(vmovq_n_u32(0));
}
NV_FORCE_INLINE Vec4V V4One()
{
return vmovq_n_f32(1.0f);
}
NV_FORCE_INLINE Vec4V V4Eps()
{
return V4Load(NV_EPS_REAL);
}
NV_FORCE_INLINE Vec4V V4Neg(const Vec4V f)
{
return vnegq_f32(f);
}
NV_FORCE_INLINE Vec4V V4Add(const Vec4V a, const Vec4V b)
{
return vaddq_f32(a, b);
}
NV_FORCE_INLINE Vec4V V4Sub(const Vec4V a, const Vec4V b)
{
return vsubq_f32(a, b);
}
NV_FORCE_INLINE Vec4V V4Scale(const Vec4V a, const FloatV b)
{
return vmulq_lane_f32(a, b, 0);
}
NV_FORCE_INLINE Vec4V V4Mul(const Vec4V a, const Vec4V b)
{
return vmulq_f32(a, b);
}
NV_FORCE_INLINE Vec4V V4ScaleInv(const Vec4V a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(b));
const float32x2_t invB = VRECIP(b);
return vmulq_lane_f32(a, invB, 0);
}
NV_FORCE_INLINE Vec4V V4Div(const Vec4V a, const Vec4V b)
{
const float32x4_t invB = VRECIPQ(b);
return vmulq_f32(a, invB);
}
NV_FORCE_INLINE Vec4V V4ScaleInvFast(const Vec4V a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(b));
const float32x2_t invB = VRECIPE(b);
return vmulq_lane_f32(a, invB, 0);
}
NV_FORCE_INLINE Vec4V V4DivFast(const Vec4V a, const Vec4V b)
{
const float32x4_t invB = VRECIPEQ(b);
return vmulq_f32(a, invB);
}
NV_FORCE_INLINE Vec4V V4Recip(const Vec4V a)
{
return VRECIPQ(a);
}
NV_FORCE_INLINE Vec4V V4RecipFast(const Vec4V a)
{
return VRECIPEQ(a);
}
NV_FORCE_INLINE Vec4V V4Rsqrt(const Vec4V a)
{
return VRECIPSQRTQ(a);
}
NV_FORCE_INLINE Vec4V V4RsqrtFast(const Vec4V a)
{
return VRECIPSQRTEQ(a);
}
NV_FORCE_INLINE Vec4V V4Sqrt(const Vec4V a)
{
return V4Mul(a, VRECIPSQRTQ(a));
}
NV_FORCE_INLINE Vec4V V4ScaleAdd(const Vec4V a, const FloatV b, const Vec4V c)
{
VECMATHAOS_ASSERT(isValidFloatV(b));
return vmlaq_lane_f32(c, a, b, 0);
}
NV_FORCE_INLINE Vec4V V4NegScaleSub(const Vec4V a, const FloatV b, const Vec4V c)
{
VECMATHAOS_ASSERT(isValidFloatV(b));
return vmlsq_lane_f32(c, a, b, 0);
}
NV_FORCE_INLINE Vec4V V4MulAdd(const Vec4V a, const Vec4V b, const Vec4V c)
{
return vmlaq_f32(c, a, b);
}
NV_FORCE_INLINE Vec4V V4NegMulSub(const Vec4V a, const Vec4V b, const Vec4V c)
{
return vmlsq_f32(c, a, b);
}
NV_FORCE_INLINE Vec4V V4Abs(const Vec4V a)
{
return vabsq_f32(a);
}
NV_FORCE_INLINE FloatV V4SumElements(const Vec4V a)
{
const Vec4V xy = V4UnpackXY(a, a); //x,x,y,y
const Vec4V zw = V4UnpackZW(a, a); //z,z,w,w
const Vec4V xz_yw = V4Add(xy, zw); //x+z,x+z,y+w,y+w
const FloatV xz = V4GetX(xz_yw); //x+z
const FloatV yw = V4GetZ(xz_yw); //y+w
return FAdd(xz, yw); //sum
}
NV_FORCE_INLINE FloatV V4Dot(const Vec4V a, const Vec4V b)
{
const float32x4_t tmp = vmulq_f32(a, b);
const float32x2_t low = vget_low_f32(tmp);
const float32x2_t high = vget_high_f32(tmp);
const float32x2_t sumTmp = vpadd_f32(low, high); // = {z+w, x+y}
const float32x2_t sumWZYX = vpadd_f32(sumTmp, sumTmp); // = {x+y+z+w, x+y+z+w}
return sumWZYX;
}
NV_FORCE_INLINE FloatV V4Length(const Vec4V a)
{
const float32x4_t tmp = vmulq_f32(a, a);
const float32x2_t low = vget_low_f32(tmp);
const float32x2_t high = vget_high_f32(tmp);
const float32x2_t sumTmp = vpadd_f32(low, high); // = {0+z, x+y}
const float32x2_t sumWZYX = vpadd_f32(sumTmp, sumTmp); // = {x+y+z, x+y+z}
const float32x2_t len = vmul_f32(VRECIPSQRTE(sumWZYX), sumWZYX);
return len;
}
NV_FORCE_INLINE FloatV V4LengthSq(const Vec4V a)
{
return V4Dot(a,a);
}
NV_FORCE_INLINE Vec4V V4Normalize(const Vec4V a)
{
return V4ScaleInv(a, V4Length(a));
}
NV_FORCE_INLINE Vec4V V4NormalizeFast(const Vec4V a)
{
return V4Scale(a, FRsqrtFast(V4Dot(a,a)));
}
NV_FORCE_INLINE Vec4V V4NormalizeSafe(const Vec4V a)
{
const FloatV zero = FZero();
const FloatV length = V4Length(a);
const uint32x4_t isGreaterThanZero = FIsGrtr(length, zero);
return V4Sel(isGreaterThanZero, V4ScaleInv(a, length), vcombine_f32(zero, zero));
}
NV_FORCE_INLINE BoolV V4IsEqU32(const VecU32V a, const VecU32V b)
{
return vceqq_u32(a, b);
}
NV_FORCE_INLINE Vec4V V4Sel(const BoolV c, const Vec4V a, const Vec4V b)
{
return vbslq_f32(c, a, b);
}
NV_FORCE_INLINE BoolV V4IsGrtr(const Vec4V a, const Vec4V b)
{
return vcgtq_f32(a, b);
}
NV_FORCE_INLINE BoolV V4IsGrtrOrEq(const Vec4V a, const Vec4V b)
{
return vcgeq_f32(a, b);
}
NV_FORCE_INLINE BoolV V4IsEq(const Vec4V a, const Vec4V b)
{
return vceqq_f32(a, b);
}
NV_FORCE_INLINE Vec4V V4Max(const Vec4V a, const Vec4V b)
{
return vmaxq_f32(a, b);
}
NV_FORCE_INLINE Vec4V V4Min(const Vec4V a, const Vec4V b)
{
return vminq_f32(a, b);
}
NV_FORCE_INLINE FloatV V4ExtractMax(const Vec4V a)
{
const float32x2_t low = vget_low_f32(a);
const float32x2_t high = vget_high_f32(a);
const float32x2_t max0 = vpmax_f32(high, low);
const float32x2_t max1 = vpmax_f32(max0, max0);
return max1;
}
NV_FORCE_INLINE FloatV V4ExtractMin(const Vec4V a)
{
const float32x2_t low = vget_low_f32(a);
const float32x2_t high = vget_high_f32(a);
const float32x2_t min0 = vpmin_f32(high, low);
const float32x2_t min1 = vpmin_f32(min0, min0);
return min1;
}
NV_FORCE_INLINE Vec4V V4Clamp(const Vec4V a, const Vec4V minV, const Vec4V maxV)
{
return V4Max(V4Min(a,maxV),minV);
}
NV_FORCE_INLINE uint32_t V4AllGrtr(const Vec4V a, const Vec4V b)
{
return internalUnitNeonSimd::BAllTrue4_R(V4IsGrtr(a, b));
}
NV_FORCE_INLINE uint32_t V4AllGrtrOrEq(const Vec4V a, const Vec4V b)
{
return internalUnitNeonSimd::BAllTrue4_R(V4IsGrtrOrEq(a, b));
}
NV_FORCE_INLINE uint32_t V4AllEq(const Vec4V a, const Vec4V b)
{
return internalUnitNeonSimd::BAllTrue4_R(V4IsEq(a, b));
}
NV_FORCE_INLINE Vec4V V4Round(const Vec4V a)
{
//truncate(a + (0.5f - sign(a)))
const Vec4V half = V4Load(0.5f);
const float32x4_t sign = vcvtq_f32_u32((vshrq_n_u32(vreinterpretq_u32_f32(a), 31)));
const Vec4V aPlusHalf = V4Add(a, half);
const Vec4V aRound = V4Sub(aPlusHalf, sign);
return vcvtq_f32_s32(vcvtq_s32_f32(aRound));
}
NV_FORCE_INLINE Vec4V V4Sin(const Vec4V a)
{
//Vec4V V1, V2, V3, V5, V7, V9, V11, V13, V15, V17, V19, V21, V23;
//Vec4V S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11;
Vec4V Result;
const Vec4V recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f);
const Vec4V twoPi = V4LoadA(g_NVTwoPi.f);
const Vec4V tmp = V4Mul(a, recipTwoPi);
const Vec4V b = V4Round(tmp);
const Vec4V V1 = V4NegMulSub(twoPi, b, a);
// sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! -
// V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI)
const Vec4V V2 = V4Mul(V1, V1);
const Vec4V V3 = V4Mul(V2, V1);
const Vec4V V5 = V4Mul(V3, V2);
const Vec4V V7 = V4Mul(V5, V2);
const Vec4V V9 = V4Mul(V7, V2);
const Vec4V V11 = V4Mul(V9, V2);
const Vec4V V13 = V4Mul(V11, V2);
const Vec4V V15 = V4Mul(V13, V2);
const Vec4V V17 = V4Mul(V15, V2);
const Vec4V V19 = V4Mul(V17, V2);
const Vec4V V21 = V4Mul(V19, V2);
const Vec4V V23 = V4Mul(V21, V2);
const Vec4V sinCoefficients0 = V4LoadA(g_NVSinCoefficients0.f);
const Vec4V sinCoefficients1 = V4LoadA(g_NVSinCoefficients1.f);
const Vec4V sinCoefficients2 = V4LoadA(g_NVSinCoefficients2.f);
const FloatV S1 = V4GetY(sinCoefficients0);
const FloatV S2 = V4GetZ(sinCoefficients0);
const FloatV S3 = V4GetW(sinCoefficients0);
const FloatV S4 = V4GetX(sinCoefficients1);
const FloatV S5 = V4GetY(sinCoefficients1);
const FloatV S6 = V4GetZ(sinCoefficients1);
const FloatV S7 = V4GetW(sinCoefficients1);
const FloatV S8 = V4GetX(sinCoefficients2);
const FloatV S9 = V4GetY(sinCoefficients2);
const FloatV S10 = V4GetZ(sinCoefficients2);
const FloatV S11 = V4GetW(sinCoefficients2);
Result = V4ScaleAdd(V3, S1, V1);
Result = V4ScaleAdd(V5, S2, Result);
Result = V4ScaleAdd(V7, S3, Result);
Result = V4ScaleAdd(V9, S4, Result);
Result = V4ScaleAdd(V11, S5, Result);
Result = V4ScaleAdd(V13, S6, Result);
Result = V4ScaleAdd(V15, S7, Result);
Result = V4ScaleAdd(V17, S8, Result);
Result = V4ScaleAdd(V19, S9, Result);
Result = V4ScaleAdd(V21, S10,Result);
Result = V4ScaleAdd(V23, S11,Result);
return Result;
}
NV_FORCE_INLINE Vec4V V4Cos(const Vec4V a)
{
//XMVECTOR V1, V2, V4, V6, V8, V10, V12, V14, V16, V18, V20, V22;
//XMVECTOR C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11;
Vec4V Result;
const Vec4V recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f);
const Vec4V twoPi = V4LoadA(g_NVTwoPi.f);
const Vec4V tmp = V4Mul(a, recipTwoPi);
const Vec4V b = V4Round(tmp);
const Vec4V V1 = V4NegMulSub(twoPi, b, a);
// cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! -
// V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI)
const Vec4V V2 = V4Mul(V1, V1);
const Vec4V V4 = V4Mul(V2, V2);
const Vec4V V6 = V4Mul(V4, V2);
const Vec4V V8 = V4Mul(V4, V4);
const Vec4V V10 = V4Mul(V6, V4);
const Vec4V V12 = V4Mul(V6, V6);
const Vec4V V14 = V4Mul(V8, V6);
const Vec4V V16 = V4Mul(V8, V8);
const Vec4V V18 = V4Mul(V10, V8);
const Vec4V V20 = V4Mul(V10, V10);
const Vec4V V22 = V4Mul(V12, V10);
const Vec4V cosCoefficients0 = V4LoadA(g_NVCosCoefficients0.f);
const Vec4V cosCoefficients1 = V4LoadA(g_NVCosCoefficients1.f);
const Vec4V cosCoefficients2 = V4LoadA(g_NVCosCoefficients2.f);
const FloatV C1 = V4GetY(cosCoefficients0);
const FloatV C2 = V4GetZ(cosCoefficients0);
const FloatV C3 = V4GetW(cosCoefficients0);
const FloatV C4 = V4GetX(cosCoefficients1);
const FloatV C5 = V4GetY(cosCoefficients1);
const FloatV C6 = V4GetZ(cosCoefficients1);
const FloatV C7 = V4GetW(cosCoefficients1);
const FloatV C8 = V4GetX(cosCoefficients2);
const FloatV C9 = V4GetY(cosCoefficients2);
const FloatV C10 = V4GetZ(cosCoefficients2);
const FloatV C11 = V4GetW(cosCoefficients2);
Result = V4ScaleAdd(V2, C1, V4One());
Result = V4ScaleAdd(V4, C2, Result);
Result = V4ScaleAdd(V6, C3, Result);
Result = V4ScaleAdd(V8, C4, Result);
Result = V4ScaleAdd(V10, C5, Result);
Result = V4ScaleAdd(V12, C6, Result);
Result = V4ScaleAdd(V14, C7, Result);
Result = V4ScaleAdd(V16, C8, Result);
Result = V4ScaleAdd(V18, C9, Result);
Result = V4ScaleAdd(V20, C10,Result);
Result = V4ScaleAdd(V22, C11,Result);
return Result;
}
NV_FORCE_INLINE void V4Transpose(Vec4V& col0, Vec4V& col1, Vec4V& col2, Vec4V& col3)
{
const float32x4x2_t v0v1 = vzipq_f32(col0, col2);
const float32x4x2_t v2v3 = vzipq_f32(col1, col3);
const float32x4x2_t zip0 = vzipq_f32(v0v1.val[0], v2v3.val[0]);
const float32x4x2_t zip1 = vzipq_f32(v0v1.val[1], v2v3.val[1]);
col0 = zip0.val[0];
col1 = zip0.val[1];
col2 = zip1.val[0];
col3 = zip1.val[1];
}
//////////////////////////////////
//VEC4V
//////////////////////////////////
NV_FORCE_INLINE BoolV BFFFF()
{
return vmovq_n_u32(0);
}
NV_FORCE_INLINE BoolV BFFFT()
{
const uint32x2_t zeros = vmov_n_u32(0);
const uint32x2_t ones = vmov_n_u32(0xffffFFFF);
const uint32x2_t zo = vext_u32(zeros, ones, 1);
return vcombine_u32(zeros, zo);
}
NV_FORCE_INLINE BoolV BFFTF()
{
const uint32x2_t zeros = vmov_n_u32(0);
const uint32x2_t ones = vmov_n_u32(0xffffFFFF);
const uint32x2_t oz = vext_u32(ones, zeros, 1);
return vcombine_u32(zeros, oz);
}
NV_FORCE_INLINE BoolV BFFTT()
{
const uint32x2_t zeros = vmov_n_u32(0);
const uint32x2_t ones = vmov_n_u32(0xffffFFFF);
return vcombine_u32(zeros, ones);
}
NV_FORCE_INLINE BoolV BFTFF()
{
const uint32x2_t zeros = vmov_n_u32(0);
const uint32x2_t ones = vmov_n_u32(0xffffFFFF);
const uint32x2_t zo = vext_u32(zeros, ones, 1);
return vcombine_u32(zo, zeros);
}
NV_FORCE_INLINE BoolV BFTFT()
{
const uint32x2_t zeros = vmov_n_u32(0);
const uint32x2_t ones = vmov_n_u32(0xffffFFFF);
const uint32x2_t zo = vext_u32(zeros, ones, 1);
return vcombine_u32(zo, zo);
}
NV_FORCE_INLINE BoolV BFTTF()
{
const uint32x2_t zeros = vmov_n_u32(0);
const uint32x2_t ones = vmov_n_u32(0xffffFFFF);
const uint32x2_t zo = vext_u32(zeros, ones, 1);
const uint32x2_t oz = vext_u32(ones, zeros, 1);
return vcombine_u32(zo, oz);
}
NV_FORCE_INLINE BoolV BFTTT()
{
const uint32x2_t zeros = vmov_n_u32(0);
const uint32x2_t ones = vmov_n_u32(0xffffFFFF);
const uint32x2_t zo = vext_u32(zeros, ones, 1);
return vcombine_u32(zo, ones);
}
NV_FORCE_INLINE BoolV BTFFF()
{
const uint32x2_t zeros = vmov_n_u32(0);
const uint32x2_t ones = vmov_n_u32(0xffffFFFF);
//const uint32x2_t zo = vext_u32(zeros, ones, 1);
const uint32x2_t oz = vext_u32(ones, zeros, 1);
return vcombine_u32(oz, zeros);
}
NV_FORCE_INLINE BoolV BTFFT()
{
const uint32x2_t zeros = vmov_n_u32(0);
const uint32x2_t ones = vmov_n_u32(0xffffFFFF);
const uint32x2_t zo = vext_u32(zeros, ones, 1);
const uint32x2_t oz = vext_u32(ones, zeros, 1);
return vcombine_u32(oz, zo);
}
NV_FORCE_INLINE BoolV BTFTF()
{
const uint32x2_t zeros = vmov_n_u32(0);
const uint32x2_t ones = vmov_n_u32(0xffffFFFF);
const uint32x2_t oz = vext_u32(ones, zeros, 1);
return vcombine_u32(oz, oz);
}
NV_FORCE_INLINE BoolV BTFTT()
{
const uint32x2_t zeros = vmov_n_u32(0);
const uint32x2_t ones = vmov_n_u32(0xffffFFFF);
const uint32x2_t oz = vext_u32(ones, zeros, 1);
return vcombine_u32(oz, ones);
}
NV_FORCE_INLINE BoolV BTTFF()
{
const uint32x2_t zeros = vmov_n_u32(0);
const uint32x2_t ones = vmov_n_u32(0xffffFFFF);
return vcombine_u32(ones, zeros);
}
NV_FORCE_INLINE BoolV BTTFT()
{
const uint32x2_t zeros = vmov_n_u32(0);
const uint32x2_t ones = vmov_n_u32(0xffffFFFF);
const uint32x2_t zo = vext_u32(zeros, ones, 1);
return vcombine_u32(ones, zo);
}
NV_FORCE_INLINE BoolV BTTTF()
{
const uint32x2_t zeros = vmov_n_u32(0);
const uint32x2_t ones = vmov_n_u32(0xffffFFFF);
const uint32x2_t oz = vext_u32(ones, zeros, 1);
return vcombine_u32(ones, oz);
}
NV_FORCE_INLINE BoolV BTTTT()
{
return vmovq_n_u32(0xffffFFFF);
}
NV_FORCE_INLINE BoolV BXMask()
{
return BTFFF();
}
NV_FORCE_INLINE BoolV BYMask()
{
return BFTFF();
}
NV_FORCE_INLINE BoolV BZMask()
{
return BFFTF();
}
NV_FORCE_INLINE BoolV BWMask()
{
return BFFFT();
}
NV_FORCE_INLINE BoolV BGetX(const BoolV f)
{
const uint32x2_t fLow = vget_low_u32(f);
return vdupq_lane_u32(fLow, 0);
}
NV_FORCE_INLINE BoolV BGetY(const BoolV f)
{
const uint32x2_t fLow = vget_low_u32(f);
return vdupq_lane_u32(fLow, 1);
}
NV_FORCE_INLINE BoolV BGetZ(const BoolV f)
{
const uint32x2_t fHigh = vget_high_u32(f);
return vdupq_lane_u32(fHigh, 0);
}
NV_FORCE_INLINE BoolV BGetW(const BoolV f)
{
const uint32x2_t fHigh = vget_high_u32(f);
return vdupq_lane_u32(fHigh, 1);
}
NV_FORCE_INLINE BoolV BSetX(const BoolV v, const BoolV f)
{
return vbslq_u32(BFTTT(), v, f);
}
NV_FORCE_INLINE BoolV BSetY(const BoolV v, const BoolV f)
{
return vbslq_u32(BTFTT(), v, f);
}
NV_FORCE_INLINE BoolV BSetZ(const BoolV v, const BoolV f)
{
return vbslq_u32(BTTFT(), v, f);
}
NV_FORCE_INLINE BoolV BSetW(const BoolV v, const BoolV f)
{
return vbslq_u32(BTTTF(), v, f);
}
NV_FORCE_INLINE BoolV BAnd(const BoolV a, const BoolV b)
{
return vandq_u32(a, b);
}
NV_FORCE_INLINE BoolV BNot(const BoolV a)
{
return vmvnq_u32(a);
}
NV_FORCE_INLINE BoolV BAndNot(const BoolV a, const BoolV b)
{
//return vbicq_u32(a, b);
return vandq_u32(a, vmvnq_u32(b));
}
NV_FORCE_INLINE BoolV BOr(const BoolV a, const BoolV b)
{
return vorrq_u32(a, b);
}
NV_FORCE_INLINE BoolV BAllTrue4(const BoolV a)
{
const uint32x2_t allTrue = vmov_n_u32(0xffffFFFF);
const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a));
const uint16x4_t dLow = vmovn_u32(a);
uint16x8_t combined = vcombine_u16(dLow, dHigh);
const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined));
const uint32x2_t result = vceq_u32(finalReduce, allTrue);
return vdupq_lane_u32(result, 0);
}
NV_FORCE_INLINE BoolV BAnyTrue4(const BoolV a)
{
const uint32x2_t allTrue = vmov_n_u32(0xffffFFFF);
const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a));
const uint16x4_t dLow = vmovn_u32(a);
uint16x8_t combined = vcombine_u16(dLow, dHigh);
const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined));
const uint32x2_t result = vtst_u32(finalReduce, allTrue);
return vdupq_lane_u32(result, 0);
}
NV_FORCE_INLINE BoolV BAllTrue3(const BoolV a)
{
const uint32x2_t allTrue3 = vmov_n_u32(0x00ffFFFF);
const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a));
const uint16x4_t dLow = vmovn_u32(a);
uint16x8_t combined = vcombine_u16(dLow, dHigh);
const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined));
const uint32x2_t result = vceq_u32(vand_u32(finalReduce,allTrue3), allTrue3);
return vdupq_lane_u32(result, 0);
}
NV_FORCE_INLINE BoolV BAnyTrue3(const BoolV a)
{
const uint32x2_t allTrue3 = vmov_n_u32(0x00ffFFFF);
const uint16x4_t dHigh = vget_high_u16(vreinterpretq_u16_u32(a));
const uint16x4_t dLow = vmovn_u32(a);
uint16x8_t combined = vcombine_u16(dLow, dHigh);
const uint32x2_t finalReduce = vreinterpret_u32_u8(vmovn_u16(combined));
const uint32x2_t result = vtst_u32(vand_u32(finalReduce,allTrue3), allTrue3);
return vdupq_lane_u32(result, 0);
}
NV_FORCE_INLINE uint32_t BAllEq(const BoolV a, const BoolV b)
{
const BoolV bTest = vceqq_u32(a, b);
return internalUnitNeonSimd::BAllTrue4_R(bTest);
}
NV_FORCE_INLINE uint32_t BAllEqTTTT(const BoolV a)
{
return BAllEq(a, BTTTT());
}
NV_FORCE_INLINE uint32_t BAllEqFFFF(const BoolV a)
{
return BAllEq(a, BFFFF());
}
NV_FORCE_INLINE uint32_t BGetBitMask(const BoolV a)
{
static NV_ALIGN(16, const uint32_t) bitMaskData[4] = { 1, 2, 4, 8 };
const uint32x4_t bitMask = *(reinterpret_cast<const uint32x4_t*>(bitMaskData));
const uint32x4_t t0 = vandq_u32(a, bitMask);
const uint32x2_t t1 = vpadd_u32(vget_low_u32(t0), vget_high_u32(t0)); // Pairwise add (0 + 1), (2 + 3)
return uint32_t(vget_lane_u32(vpadd_u32(t1, t1), 0));
}
//////////////////////////////////
//MAT33V
//////////////////////////////////
NV_FORCE_INLINE Vec3V M33MulV3(const Mat33V& a, const Vec3V b)
{
const FloatV x=V3GetX(b);
const FloatV y=V3GetY(b);
const FloatV z=V3GetZ(b);
const Vec3V v0=V3Scale(a.col0,x);
const Vec3V v1=V3Scale(a.col1,y);
const Vec3V v2=V3Scale(a.col2,z);
const Vec3V v0PlusV1=V3Add(v0,v1);
return V3Add(v0PlusV1,v2);
}
NV_FORCE_INLINE Vec3V M33TrnspsMulV3(const Mat33V& a, const Vec3V b)
{
const FloatV x=V3Dot(a.col0,b);
const FloatV y=V3Dot(a.col1,b);
const FloatV z=V3Dot(a.col2,b);
return V3Merge(x,y,z);
}
NV_FORCE_INLINE Vec3V M33MulV3AddV3(const Mat33V& A, const Vec3V b, const Vec3V c)
{
const FloatV x=V3GetX(b);
const FloatV y=V3GetY(b);
const FloatV z=V3GetZ(b);
Vec3V result = V3ScaleAdd(A.col0, x, c);
result = V3ScaleAdd(A.col1, y, result);
return V3ScaleAdd(A.col2, z, result);
}
NV_FORCE_INLINE Mat33V M33MulM33(const Mat33V& a, const Mat33V& b)
{
return Mat33V(M33MulV3(a,b.col0),M33MulV3(a,b.col1),M33MulV3(a,b.col2));
}
NV_FORCE_INLINE Mat33V M33Add(const Mat33V& a, const Mat33V& b)
{
return Mat33V(V3Add(a.col0,b.col0),V3Add(a.col1,b.col1),V3Add(a.col2,b.col2));
}
NV_FORCE_INLINE Mat33V M33Scale(const Mat33V& a, const FloatV& b)
{
return Mat33V(V3Scale(a.col0,b),V3Scale(a.col1,b),V3Scale(a.col2,b));
}
NV_FORCE_INLINE Mat33V M33Inverse(const Mat33V& a)
{
const float32x2_t zeros = vreinterpret_f32_u32(vmov_n_u32(0));
const BoolV btttf = BTTTF();
const Vec3V cross01 = V3Cross(a.col0,a.col1);
const Vec3V cross12 = V3Cross(a.col1,a.col2);
const Vec3V cross20 = V3Cross(a.col2,a.col0);
const FloatV dot = V3Dot(cross01,a.col2);
const FloatV invDet = FRecipFast(dot);
const float32x4x2_t merge = vzipq_f32(cross12, cross01);
const float32x4_t mergeh = merge.val[0];
const float32x4_t mergel = merge.val[1];
//const Vec3V colInv0 = XMVectorPermute(mergeh,cross20,NvPermuteControl(0,4,1,7));
const float32x4_t colInv0_xxyy = vzipq_f32(mergeh, cross20).val[0];
const float32x4_t colInv0 = vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(colInv0_xxyy), btttf));
//const Vec3V colInv1 = XMVectorPermute(mergeh,cross20,NvPermuteControl(2,5,3,7));
const float32x2_t zw0 = vget_high_f32(mergeh);
const float32x2_t xy1 = vget_low_f32(cross20);
const float32x2_t yzero1 = vext_f32(xy1, zeros, 1);
const float32x2x2_t merge1 = vzip_f32(zw0, yzero1);
const float32x4_t colInv1 = vcombine_f32(merge1.val[0], merge1.val[1]);
//const Vec3V colInv2 = XMVectorPermute(mergel,cross20,NvPermuteControl(0,6,1,7));
const float32x2_t x0y0 = vget_low_f32(mergel);
const float32x2_t z1w1 = vget_high_f32(cross20);
const float32x2x2_t merge2 = vzip_f32(x0y0, z1w1);
const float32x4_t colInv2 = vcombine_f32(merge2.val[0], merge2.val[1]);
return Mat33V
(
vmulq_lane_f32(colInv0, invDet, 0),
vmulq_lane_f32(colInv1, invDet, 0),
vmulq_lane_f32(colInv2, invDet, 0)
);
}
NV_FORCE_INLINE Mat33V M33Trnsps(const Mat33V& a)
{
return Mat33V
(
V3Merge(V3GetX(a.col0),V3GetX(a.col1),V3GetX(a.col2)),
V3Merge(V3GetY(a.col0),V3GetY(a.col1),V3GetY(a.col2)),
V3Merge(V3GetZ(a.col0),V3GetZ(a.col1),V3GetZ(a.col2))
);
}
NV_FORCE_INLINE Mat33V M33Identity()
{
return Mat33V
(
V3UnitX(),
V3UnitY(),
V3UnitZ()
);
}
NV_FORCE_INLINE Mat33V M33Sub(const Mat33V& a, const Mat33V& b)
{
return Mat33V(V3Sub(a.col0,b.col0),V3Sub(a.col1,b.col1),V3Sub(a.col2,b.col2));
}
NV_FORCE_INLINE Mat33V M33Neg(const Mat33V& a)
{
return Mat33V(V3Neg(a.col0),V3Neg(a.col1),V3Neg(a.col2));
}
NV_FORCE_INLINE Mat33V M33Abs(const Mat33V& a)
{
return Mat33V(V3Abs(a.col0),V3Abs(a.col1),V3Abs(a.col2));
}
NV_FORCE_INLINE Mat33V PromoteVec3V(const Vec3V v)
{
const BoolV bTFFF = BTFFF();
const BoolV bFTFF = BFTFF();
const BoolV bFFTF = BTFTF();
const Vec3V zero = V3Zero();
return Mat33V( V3Sel(bTFFF, v, zero),
V3Sel(bFTFF, v, zero),
V3Sel(bFFTF, v, zero));
}
NV_FORCE_INLINE Mat33V M33Diagonal(const Vec3VArg d)
{
const Vec3V x = V3Mul(V3UnitX(), d);
const Vec3V y = V3Mul(V3UnitY(), d);
const Vec3V z = V3Mul(V3UnitZ(), d);
return Mat33V(x, y, z);
}
//////////////////////////////////
//MAT34V
//////////////////////////////////
NV_FORCE_INLINE Vec3V M34MulV3(const Mat34V& a, const Vec3V b)
{
const FloatV x=V3GetX(b);
const FloatV y=V3GetY(b);
const FloatV z=V3GetZ(b);
const Vec3V v0=V3Scale(a.col0,x);
const Vec3V v1=V3Scale(a.col1,y);
const Vec3V v2=V3Scale(a.col2,z);
const Vec3V v0PlusV1=V3Add(v0,v1);
const Vec3V v0PlusV1Plusv2=V3Add(v0PlusV1,v2);
return (V3Add(v0PlusV1Plusv2,a.col3));
}
NV_FORCE_INLINE Vec3V M34Mul33V3(const Mat34V& a, const Vec3V b)
{
const FloatV x=V3GetX(b);
const FloatV y=V3GetY(b);
const FloatV z=V3GetZ(b);
const Vec3V v0=V3Scale(a.col0,x);
const Vec3V v1=V3Scale(a.col1,y);
const Vec3V v2=V3Scale(a.col2,z);
const Vec3V v0PlusV1=V3Add(v0,v1);
return V3Add(v0PlusV1,v2);
}
NV_FORCE_INLINE Vec3V M34TrnspsMul33V3(const Mat34V& a, const Vec3V b)
{
const FloatV x=V3Dot(a.col0,b);
const FloatV y=V3Dot(a.col1,b);
const FloatV z=V3Dot(a.col2,b);
return V3Merge(x,y,z);
}
NV_FORCE_INLINE Mat34V M34MulM34(const Mat34V& a, const Mat34V& b)
{
return Mat34V(M34Mul33V3(a,b.col0), M34Mul33V3(a,b.col1),M34Mul33V3(a,b.col2),M34MulV3(a,b.col3));
}
NV_FORCE_INLINE Mat33V M34MulM33(const Mat34V& a, const Mat33V& b)
{
return Mat33V(M34Mul33V3(a,b.col0),M34Mul33V3(a,b.col1),M34Mul33V3(a,b.col2));
}
NV_FORCE_INLINE Mat33V M34Mul33MM34(const Mat34V& a, const Mat34V& b)
{
return Mat33V(M34Mul33V3(a,b.col0),M34Mul33V3(a,b.col1),M34Mul33V3(a,b.col2));
}
NV_FORCE_INLINE Mat34V M34Add(const Mat34V& a, const Mat34V& b)
{
return Mat34V(V3Add(a.col0,b.col0),V3Add(a.col1,b.col1),V3Add(a.col2,b.col2),V3Add(a.col3,b.col3));
}
NV_FORCE_INLINE Mat33V M34Trnsps33(const Mat34V& a)
{
return Mat33V
(
V3Merge(V3GetX(a.col0),V3GetX(a.col1),V3GetX(a.col2)),
V3Merge(V3GetY(a.col0),V3GetY(a.col1),V3GetY(a.col2)),
V3Merge(V3GetZ(a.col0),V3GetZ(a.col1),V3GetZ(a.col2))
);
}
//////////////////////////////////
//MAT44V
//////////////////////////////////
NV_FORCE_INLINE Vec4V M44MulV4(const Mat44V& a, const Vec4V b)
{
const FloatV x=V4GetX(b);
const FloatV y=V4GetY(b);
const FloatV z=V4GetZ(b);
const FloatV w=V4GetW(b);
const Vec4V v0=V4Scale(a.col0,x);
const Vec4V v1=V4Scale(a.col1,y);
const Vec4V v2=V4Scale(a.col2,z);
const Vec4V v3=V4Scale(a.col3,w);
const Vec4V v0PlusV1=V4Add(v0,v1);
const Vec4V v0PlusV1Plusv2=V4Add(v0PlusV1,v2);
return (V4Add(v0PlusV1Plusv2,v3));
}
NV_FORCE_INLINE Vec4V M44TrnspsMulV4(const Mat44V& a, const Vec4V b)
{
return V4Merge(V4Dot(a.col0,b), V4Dot(a.col1,b), V4Dot(a.col2,b), V4Dot(a.col3,b));
}
NV_FORCE_INLINE Mat44V M44MulM44(const Mat44V& a, const Mat44V& b)
{
return Mat44V(M44MulV4(a,b.col0),M44MulV4(a,b.col1),M44MulV4(a,b.col2),M44MulV4(a,b.col3));
}
NV_FORCE_INLINE Mat44V M44Add(const Mat44V& a, const Mat44V& b)
{
return Mat44V(V4Add(a.col0,b.col0),V4Add(a.col1,b.col1),V4Add(a.col2,b.col2),V4Add(a.col3,b.col3));
}
NV_FORCE_INLINE Mat44V M44Trnsps(const Mat44V& a)
{
// asm volatile(
// "vzip.f32 %q0, %q2 \n\t"
// "vzip.f32 %q1, %q3 \n\t"
// "vzip.f32 %q0, %q1 \n\t"
// "vzip.f32 %q2, %q3 \n\t"
// : "+w" (a.col0), "+w" (a.col1), "+w" (a.col2), "+w" a.col3));
const float32x4x2_t v0v1 = vzipq_f32(a.col0, a.col2);
const float32x4x2_t v2v3 = vzipq_f32(a.col1, a.col3);
const float32x4x2_t zip0 = vzipq_f32(v0v1.val[0], v2v3.val[0]);
const float32x4x2_t zip1 = vzipq_f32(v0v1.val[1], v2v3.val[1]);
return Mat44V(zip0.val[0], zip0.val[1], zip1.val[0], zip1.val[1]);
}
NV_FORCE_INLINE Mat44V M44Inverse(const Mat44V& a)
{
float32x4_t minor0, minor1, minor2, minor3;
float32x4_t row0, row1, row2, row3;
float32x4_t det, tmp1;
tmp1 = vmovq_n_f32(0.0f);
row1 = vmovq_n_f32(0.0f);
row3 = vmovq_n_f32(0.0f);
row0 = a.col0;
row1 = vextq_f32(a.col1, a.col1, 2);
row2 = a.col2;
row3 = vextq_f32(a.col3, a.col3, 2);
tmp1 = vmulq_f32(row2, row3);
tmp1 = vrev64q_f32(tmp1);
minor0 = vmulq_f32(row1, tmp1);
minor1 = vmulq_f32(row0, tmp1);
tmp1 = vextq_f32(tmp1, tmp1, 2);
minor0 = vsubq_f32(vmulq_f32(row1, tmp1), minor0);
minor1 = vsubq_f32(vmulq_f32(row0, tmp1), minor1);
minor1 = vextq_f32(minor1, minor1, 2);
tmp1 = vmulq_f32(row1, row2);
tmp1 = vrev64q_f32(tmp1);
minor0 = vaddq_f32(vmulq_f32(row3, tmp1), minor0);
minor3 = vmulq_f32(row0, tmp1);
tmp1 = vextq_f32(tmp1, tmp1, 2);
minor0 = vsubq_f32(minor0, vmulq_f32(row3, tmp1));
minor3 = vsubq_f32(vmulq_f32(row0, tmp1), minor3);
minor3 = vextq_f32(minor3, minor3, 2);
tmp1 = vmulq_f32(vextq_f32(row1, row1, 2), row3);
tmp1 = vrev64q_f32(tmp1);
row2 = vextq_f32(row2, row2, 2);
minor0 = vaddq_f32(vmulq_f32(row2, tmp1), minor0);
minor2 = vmulq_f32(row0, tmp1);
tmp1 = vextq_f32(tmp1, tmp1, 2);
minor0 = vsubq_f32(minor0, vmulq_f32(row2, tmp1));
minor2 = vsubq_f32(vmulq_f32(row0, tmp1), minor2);
minor2 = vextq_f32(minor2, minor2, 2);
tmp1 = vmulq_f32(row0, row1);
tmp1 = vrev64q_f32(tmp1);
minor2 = vaddq_f32(vmulq_f32(row3, tmp1), minor2);
minor3 = vsubq_f32(vmulq_f32(row2, tmp1), minor3);
tmp1 = vextq_f32(tmp1, tmp1, 2);
minor2 = vsubq_f32(vmulq_f32(row3, tmp1), minor2);
minor3 = vsubq_f32(minor3, vmulq_f32(row2, tmp1));
tmp1 = vmulq_f32(row0, row3);
tmp1 = vrev64q_f32(tmp1);
minor1 = vsubq_f32(minor1, vmulq_f32(row2, tmp1));
minor2 = vaddq_f32(vmulq_f32(row1, tmp1), minor2);
tmp1 = vextq_f32(tmp1, tmp1, 2);
minor1 = vaddq_f32(vmulq_f32(row2, tmp1), minor1);
minor2 = vsubq_f32(minor2, vmulq_f32(row1, tmp1));
tmp1 = vmulq_f32(row0, row2);
tmp1 = vrev64q_f32(tmp1);
minor1 = vaddq_f32(vmulq_f32(row3, tmp1), minor1);
minor3 = vsubq_f32(minor3, vmulq_f32(row1, tmp1));
tmp1 = vextq_f32(tmp1, tmp1, 2);
minor1 = vsubq_f32(minor1, vmulq_f32(row3, tmp1));
minor3 = vaddq_f32(vmulq_f32(row1, tmp1), minor3);
det = vmulq_f32(row0, minor0);
det = vaddq_f32(vextq_f32(det, det, 2), det);
det = vaddq_f32(vrev64q_f32(det), det);
det = vdupq_lane_f32(VRECIPE(vget_low_f32(det)), 0);
minor0 = vmulq_f32(det, minor0);
minor1 = vmulq_f32(det, minor1);
minor2 = vmulq_f32(det, minor2);
minor3 = vmulq_f32(det, minor3);
Mat44V invTrans(minor0,minor1,minor2,minor3);
return M44Trnsps(invTrans);
}
NV_FORCE_INLINE Vec4V V4LoadXYZW(const float& x, const float& y, const float& z, const float& w)
{
#if NV_WINRT
NV_ALIGN(16,float) r[4] = {x, y, z ,w};
return vld1q_f32((const float32_t*)r);
#else
const float32x4_t ret = {x, y, z, w};
return ret;
#endif // NV_WINRT
}
/*
NV_FORCE_INLINE VecU16V V4U32PK(VecU32V a, VecU32V b)
{
return vcombine_u16(vqmovn_u32(a), vqmovn_u32(b));
}
*/
NV_FORCE_INLINE VecU32V V4U32Sel(const BoolV c, const VecU32V a, const VecU32V b)
{
return vbslq_u32(c, a, b);
}
NV_FORCE_INLINE VecU32V V4U32or(VecU32V a, VecU32V b)
{
return vorrq_u32(a, b);
}
NV_FORCE_INLINE VecU32V V4U32and(VecU32V a, VecU32V b)
{
return vandq_u32(a, b);
}
NV_FORCE_INLINE VecU32V V4U32Andc(VecU32V a, VecU32V b)
{
//return vbicq_u32(a, b); // creates gcc compiler bug in RTreeQueries.cpp
return vandq_u32(a, vmvnq_u32(b));
}
/*
NV_FORCE_INLINE VecU16V V4U16Or(VecU16V a, VecU16V b)
{
return vorrq_u16(a, b);
}
*/
/*
NV_FORCE_INLINE VecU16V V4U16And(VecU16V a, VecU16V b)
{
return vandq_u16(a, b);
}
*/
/*
NV_FORCE_INLINE VecU16V V4U16Andc(VecU16V a, VecU16V b)
{
return vbicq_u16(a, b);
}
*/
NV_FORCE_INLINE VecI32V I4Load(const int32_t i)
{
return vdupq_n_s32(i);
}
NV_FORCE_INLINE VecI32V I4LoadU(const int32_t* i)
{
return vld1q_s32(i);
}
NV_FORCE_INLINE VecI32V I4LoadA(const int32_t* i)
{
return vld1q_s32(i);
}
NV_FORCE_INLINE VecI32V VecI32V_Add(const VecI32VArg a, const VecI32VArg b)
{
return vaddq_s32(a, b);
}
NV_FORCE_INLINE VecI32V VecI32V_Sub(const VecI32VArg a, const VecI32VArg b)
{
return vsubq_s32(a, b);
}
NV_FORCE_INLINE BoolV VecI32V_IsGrtr(const VecI32VArg a, const VecI32VArg b)
{
return vcgtq_s32(a, b);
}
NV_FORCE_INLINE BoolV VecI32V_IsEq(const VecI32VArg a, const VecI32VArg b)
{
return vceqq_s32(a, b);
}
NV_FORCE_INLINE VecI32V V4I32Sel(const BoolV c, const VecI32V a, const VecI32V b)
{
return vbslq_s32(c, a, b);
}
NV_FORCE_INLINE VecI32V VecI32V_Zero()
{
return vdupq_n_s32(0);
}
NV_FORCE_INLINE VecI32V VecI32V_One()
{
return vdupq_n_s32(1);
}
NV_FORCE_INLINE VecI32V VecI32V_Two()
{
return vdupq_n_s32(2);
}
NV_FORCE_INLINE VecI32V VecI32V_MinusOne()
{
return vdupq_n_s32(-1);
}
NV_FORCE_INLINE VecU32V U4Zero()
{
return U4Load(0);
}
NV_FORCE_INLINE VecU32V U4One()
{
return U4Load(1);
}
NV_FORCE_INLINE VecU32V U4Two()
{
return U4Load(2);
}
NV_FORCE_INLINE VecShiftV VecI32V_PrepareShift(const VecI32VArg shift)
{
return shift;
}
NV_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const VecShiftVArg count)
{
return vshlq_s32(a, count);
}
NV_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const VecShiftVArg count)
{
return vshlq_s32(a, VecI32V_Sub(I4Load(0), count));
}
NV_FORCE_INLINE VecI32V VecI32V_And(const VecI32VArg a, const VecI32VArg b)
{
return vandq_s32(a, b);
}
NV_FORCE_INLINE VecI32V VecI32V_Or(const VecI32VArg a, const VecI32VArg b)
{
return vorrq_s32(a, b);
}
NV_FORCE_INLINE VecI32V VecI32V_GetX(const VecI32VArg f)
{
const int32x2_t fLow = vget_low_s32(f);
return vdupq_lane_s32(fLow, 0);
}
NV_FORCE_INLINE VecI32V VecI32V_GetY(const VecI32VArg f)
{
const int32x2_t fLow = vget_low_s32(f);
return vdupq_lane_s32(fLow, 1);
}
NV_FORCE_INLINE VecI32V VecI32V_GetZ(const VecI32VArg f)
{
const int32x2_t fHigh = vget_high_s32(f);
return vdupq_lane_s32(fHigh, 0);
}
NV_FORCE_INLINE VecI32V VecI32V_GetW(const VecI32VArg f)
{
const int32x2_t fHigh = vget_high_s32(f);
return vdupq_lane_s32(fHigh, 1);
}
NV_FORCE_INLINE VecI32V VecI32V_Sel(const BoolV c, const VecI32VArg a, const VecI32VArg b)
{
VECMATHAOS_ASSERT(_VecMathTests::allElementsEqualBoolV(c,BTTTT()) || _VecMathTests::allElementsEqualBoolV(c,BFFFF()));
return vbslq_s32(c, a, b);
}
NV_FORCE_INLINE void NvI32_From_VecI32V(const VecI32VArg a, int32_t* i)
{
*i = vgetq_lane_s32(a, 0);
}
NV_FORCE_INLINE VecI32V VecI32V_Merge(const VecI32VArg a, const VecI32VArg b, const VecI32VArg c, const VecI32VArg d)
{
const int32x2_t aLow = vget_low_s32(a);
const int32x2_t bLow = vget_low_s32(b);
const int32x2_t cLow = vget_low_s32(c);
const int32x2_t dLow = vget_low_s32(d);
const int32x2_t low = vext_s32(aLow, bLow, 1);
const int32x2_t high = vext_s32(cLow, dLow, 1);
return vcombine_s32(low, high);
}
NV_FORCE_INLINE VecI32V VecI32V_From_BoolV(const BoolVArg a)
{
return reinterpret_cast<const int32x4_t&>(a);
}
NV_FORCE_INLINE VecU32V VecU32V_From_BoolV(const BoolVArg a)
{
return reinterpret_cast<const uint32x4_t&>(a);
}
/*
template<int a> NV_FORCE_INLINE VecI32V V4ISplat()
{
return vdupq_n_s32(a);
}
template<uint32_t a> NV_FORCE_INLINE VecU32V V4USplat()
{
return vdupq_n_u32(a);
}
*/
/*
NV_FORCE_INLINE void V4U16StoreAligned(VecU16V val, VecU16V* address)
{
vst1q_u16((uint16_t*)address, val);
}
*/
NV_FORCE_INLINE void V4U32StoreAligned(VecU32V val, VecU32V* address)
{
vst1q_u32((uint32_t*)address, val);
}
NV_FORCE_INLINE Vec4V V4LoadAligned(Vec4V* addr)
{
return vld1q_f32((float32_t*)addr);
}
NV_FORCE_INLINE Vec4V V4LoadUnaligned(Vec4V* addr)
{
return vld1q_f32((float32_t*)addr);
}
NV_FORCE_INLINE Vec4V V4Andc(const Vec4V a, const VecU32V b)
{
return vreinterpretq_f32_u32(V4U32Andc(vreinterpretq_u32_f32(a), b));
}
NV_FORCE_INLINE VecU32V V4IsGrtrV32u(const Vec4V a, const Vec4V b)
{
return V4IsGrtr(a, b);
}
NV_FORCE_INLINE VecU16V V4U16LoadAligned(VecU16V* addr)
{
return vld1q_u16((uint16_t*)addr);
}
NV_FORCE_INLINE VecU16V V4U16LoadUnaligned(VecU16V* addr)
{
return vld1q_u16((uint16_t*)addr);
}
NV_FORCE_INLINE VecU16V V4U16CompareGt(VecU16V a, VecU16V b)
{
return vcgtq_u16(a, b);
}
NV_FORCE_INLINE VecU16V V4I16CompareGt(VecU16V a, VecU16V b)
{
return vcgtq_s16((VecI16V&)a, (VecI16V&)b);
}
NV_FORCE_INLINE Vec4V Vec4V_From_VecU32V(VecU32V a)
{
return vcvtq_f32_u32(a);
}
NV_FORCE_INLINE Vec4V Vec4V_From_VecI32V(VecI32V a)
{
return vcvtq_f32_s32(a);
}
NV_FORCE_INLINE VecI32V VecI32V_From_Vec4V(Vec4V a)
{
return vcvtq_s32_f32(a);
}
NV_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecU32V(VecU32V a)
{
return vreinterpretq_f32_u32(a);
}
NV_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecI32V(VecI32V a)
{
return vreinterpretq_f32_s32(a);
}
NV_FORCE_INLINE VecU32V VecU32V_ReinterpretFrom_Vec4V(Vec4V a)
{
return vreinterpretq_u32_f32(a);
}
NV_FORCE_INLINE VecI32V VecI32V_ReinterpretFrom_Vec4V(Vec4V a)
{
return vreinterpretq_s32_f32(a);
}
template<int index> NV_FORCE_INLINE BoolV BSplatElement(BoolV a)
{
#if NV_WINRT
if(index == 0)
{
return vdupq_lane_u32(vget_low_u32(a), 0);
}
else if (index == 1)
{
return vdupq_lane_u32(vget_low_u32(a), 1);
}
#else
if(index < 2)
{
return vdupq_lane_u32(vget_low_u32(a), index);
}
#endif
else if(index == 2)
{
return vdupq_lane_u32(vget_high_u32(a), 0);
}
else if(index == 3)
{
return vdupq_lane_u32(vget_high_u32(a), 1);
}
}
template<int index> NV_FORCE_INLINE VecU32V V4U32SplatElement(VecU32V a)
{
const int highIndex = index-2;
#if NV_WINRT
if(index == 0)
{
return vdupq_lane_u32(vget_low_u32(a), 0);
}
else if (index == 1)
{
return vdupq_lane_u32(vget_low_u32(a), 1);
}
#else
if(index < 2)
{
return vdupq_lane_u32(vget_low_u32(a), index);
}
#endif
else if(index == 2)
{
return vdupq_lane_u32(vget_high_u32(a), 0);
}
else if(index == 3)
{
return vdupq_lane_u32(vget_high_u32(a), 1);
}
}
template<int index> NV_FORCE_INLINE Vec4V V4SplatElement(Vec4V a)
{
#if NV_WINRT
if(index == 0)
{
return vdupq_lane_f32(vget_low_f32(a), 0);
}
else if (index == 1)
{
return vdupq_lane_f32(vget_low_f32(a), 1);
}
#else
if(index < 2)
{
return vdupq_lane_f32(vget_low_f32(a), index);
}
#endif
else if(index == 2)
{
return vdupq_lane_f32(vget_high_f32(a), 0);
}
else if(index == 3)
{
return vdupq_lane_f32(vget_high_f32(a), 1);
}
}
template<int index> NV_FORCE_INLINE VecU16V V4U16SplatElement(VecU16V a)
{
#if NV_WINRT
if(index == 0)
{
return vdupq_lane_u16(vget_low_u16(a), 0);
}
else if(index == 1)
{
return vdupq_lane_u16(vget_low_u16(a), 1);
}
else if(index == 2)
{
return vdupq_lane_u16(vget_low_u16(a), 2);
}
else if(index == 3)
{
return vdupq_lane_u16(vget_low_u16(a), 3);
}
#else
if(index < 4)
{
return vdupq_lane_u16(vget_low_u16(a),index);
}
#endif
else if(index == 4)
{
return vdupq_lane_u16(vget_high_u16(a), 0);
}
else if(index == 5)
{
return vdupq_lane_u16(vget_high_u16(a), 1);
}
else if(index == 6)
{
return vdupq_lane_u16(vget_high_u16(a), 2);
}
else if(index == 7)
{
return vdupq_lane_u16(vget_high_u16(a), 3);
}
}
template<int imm> NV_FORCE_INLINE VecI16V V4I16SplatImmediate()
{
return vdupq_n_s16(imm);
}
template<uint16_t imm> NV_FORCE_INLINE VecU16V V4U16SplatImmediate()
{
return vdupq_n_u16(imm);
}
NV_FORCE_INLINE VecU16V V4U16SubtractModulo(VecU16V a, VecU16V b)
{
return vsubq_u16(a, b);
}
NV_FORCE_INLINE VecU16V V4U16AddModulo(VecU16V a, VecU16V b)
{
return vaddq_u16(a, b);
}
NV_FORCE_INLINE VecU32V V4U16GetLo16(VecU16V a)
{
const uint16x4x2_t ret = vuzp_u16(vget_low_u16(a), vget_high_u16(a));
return vmovl_u16(ret.val[0]);
}
NV_FORCE_INLINE VecU32V V4U16GetHi16(VecU16V a)
{
const uint16x4x2_t ret = vuzp_u16(vget_low_u16(a), vget_high_u16(a));
return vmovl_u16(ret.val[1]);
}
NV_FORCE_INLINE VecU32V VecU32VLoadXYZW(uint32_t x, uint32_t y, uint32_t z, uint32_t w)
{
#if NV_WINRT
NV_ALIGN(16,uint32_t) r[4] = {x, y, z ,w};
return vld1q_u32((const uint32_t*)r);
#else
const uint32x4_t ret = {x, y, z, w};
return ret;
#endif
}
NV_FORCE_INLINE VecU32V U4Load(const uint32_t i)
{
return vdupq_n_u32(i);
}
NV_FORCE_INLINE VecU32V U4LoadU(const uint32_t* i)
{
return vld1q_u32(i);
}
NV_FORCE_INLINE VecU32V U4LoadA(const uint32_t* i)
{
return vld1q_u32(i);
}
NV_FORCE_INLINE Vec4V V4Ceil(const Vec4V in)
{
const float32x4_t ones = vdupq_n_f32(1.0f);
const float32x4_t rdToZero = vcvtq_f32_s32(vcvtq_s32_f32(in));
const float32x4_t rdToZeroPlusOne = vaddq_f32(rdToZero, ones);
const uint32x4_t gt = vcgtq_f32(in, rdToZero);
return vbslq_f32(gt, rdToZeroPlusOne, rdToZero);
}
NV_FORCE_INLINE Vec4V V4Floor(const Vec4V in)
{
const float32x4_t ones = vdupq_n_f32(1.0f);
const float32x4_t rdToZero = vcvtq_f32_s32(vcvtq_s32_f32(in));
const float32x4_t rdToZeroMinusOne = vsubq_f32(rdToZero, ones);
const uint32x4_t lt = vcltq_f32(in, rdToZero);
return vbslq_f32(lt, rdToZeroMinusOne, rdToZero);
}
NV_FORCE_INLINE VecU32V V4ConvertToU32VSaturate(const Vec4V in, uint32_t power)
{
NV_ASSERT(power == 0 && "Non-zero power not supported in convertToU32VSaturate");
NV_UNUSED(power); // prevent warning in release builds
return vcvtq_u32_f32(in);
}
#endif //PS_UNIX_NEON_INLINE_AOS_H
| 100,910 | C | 27.077629 | 151 | 0.655554 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/platform/windows/NsWindowsAoS.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef PS_WINDOWS_AOS_H
#define PS_WINDOWS_AOS_H
// no includes here! this file should be included from NvcVecMath.h only!!!
#if !COMPILE_VECTOR_INTRINSICS
#error Vector intrinsics should not be included when using scalar implementation.
#endif
typedef __m128 FloatV;
typedef __m128 Vec3V;
typedef __m128 Vec4V;
typedef __m128 BoolV;
typedef __m128 VecU32V;
typedef __m128 VecI32V;
typedef __m128 VecU16V;
typedef __m128 VecI16V;
typedef __m128 VecU8V;
typedef __m128 QuatV;
#define FloatVArg FloatV&
#define Vec3VArg Vec3V&
#define Vec4VArg Vec4V&
#define BoolVArg BoolV&
#define VecU32VArg VecU32V&
#define VecI32VArg VecI32V&
#define VecU16VArg VecU16V&
#define VecI16VArg VecI16V&
#define VecU8VArg VecU8V&
#define QuatVArg QuatV&
//Optimization for situations in which you cross product multiple vectors with the same vector.
//Avoids 2X shuffles per product
struct VecCrossV
{
Vec3V mL1;
Vec3V mR1;
};
struct VecShiftV
{
VecI32V shift;
};
#define VecShiftVArg VecShiftV&
NV_ALIGN_PREFIX(16)
struct Mat33V
{
Mat33V(){}
Mat33V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2)
: col0(c0),
col1(c1),
col2(c2)
{
}
Vec3V NV_ALIGN(16,col0);
Vec3V NV_ALIGN(16,col1);
Vec3V NV_ALIGN(16,col2);
}NV_ALIGN_SUFFIX(16);
NV_ALIGN_PREFIX(16)
struct Mat34V
{
Mat34V(){}
Mat34V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2, const Vec3V& c3)
: col0(c0),
col1(c1),
col2(c2),
col3(c3)
{
}
Vec3V NV_ALIGN(16,col0);
Vec3V NV_ALIGN(16,col1);
Vec3V NV_ALIGN(16,col2);
Vec3V NV_ALIGN(16,col3);
}NV_ALIGN_SUFFIX(16);
NV_ALIGN_PREFIX(16)
struct Mat43V
{
Mat43V(){}
Mat43V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2)
: col0(c0),
col1(c1),
col2(c2)
{
}
Vec4V NV_ALIGN(16,col0);
Vec4V NV_ALIGN(16,col1);
Vec4V NV_ALIGN(16,col2);
}NV_ALIGN_SUFFIX(16);
NV_ALIGN_PREFIX(16)
struct Mat44V
{
Mat44V(){}
Mat44V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2, const Vec4V& c3)
: col0(c0),
col1(c1),
col2(c2),
col3(c3)
{
}
Vec4V NV_ALIGN(16,col0);
Vec4V NV_ALIGN(16,col1);
Vec4V NV_ALIGN(16,col2);
Vec4V NV_ALIGN(16,col3);
}NV_ALIGN_SUFFIX(16);
#endif //PS_WINDOWS_AOS_H
| 4,049 | C | 27.928571 | 95 | 0.69227 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/platform/windows/NsWindowsTrigConstants.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef PS_WINDOWS_TRIG_CONSTANTS_H
#define PS_WINDOWS_TRIG_CONSTANTS_H
//#include "NvMath.h"
#define NV_GLOBALCONST extern const __declspec(selectany)
__declspec(align(16)) struct NV_VECTORF32
{
float f[4];
};
//#define NV_PI 3.141592654f
//#define NV_2PI 6.283185307f
//#define NV_1DIVPI 0.318309886f
//#define NV_1DIV2PI 0.159154943f
//#define NV_PIDIV2 1.570796327f
//#define NV_PIDIV4 0.785398163f
NV_GLOBALCONST NV_VECTORF32 g_NVSinCoefficients0 = {{1.0f, -0.166666667f, 8.333333333e-3f, -1.984126984e-4f}};
NV_GLOBALCONST NV_VECTORF32 g_NVSinCoefficients1 = {{2.755731922e-6f, -2.505210839e-8f, 1.605904384e-10f, -7.647163732e-13f}};
NV_GLOBALCONST NV_VECTORF32 g_NVSinCoefficients2 = {{2.811457254e-15f, -8.220635247e-18f, 1.957294106e-20f, -3.868170171e-23f}};
NV_GLOBALCONST NV_VECTORF32 g_NVCosCoefficients0 = {{1.0f, -0.5f, 4.166666667e-2f, -1.388888889e-3f}};
NV_GLOBALCONST NV_VECTORF32 g_NVCosCoefficients1 = {{2.480158730e-5f, -2.755731922e-7f, 2.087675699e-9f, -1.147074560e-11f}};
NV_GLOBALCONST NV_VECTORF32 g_NVCosCoefficients2 = {{4.779477332e-14f, -1.561920697e-16f, 4.110317623e-19f, -8.896791392e-22f}};
NV_GLOBALCONST NV_VECTORF32 g_NVTanCoefficients0 = {{1.0f, 0.333333333f, 0.133333333f, 5.396825397e-2f}};
NV_GLOBALCONST NV_VECTORF32 g_NVTanCoefficients1 = {{2.186948854e-2f, 8.863235530e-3f, 3.592128167e-3f, 1.455834485e-3f}};
NV_GLOBALCONST NV_VECTORF32 g_NVTanCoefficients2 = {{5.900274264e-4f, 2.391290764e-4f, 9.691537707e-5f, 3.927832950e-5f}};
NV_GLOBALCONST NV_VECTORF32 g_NVASinCoefficients0 = {{-0.05806367563904f, -0.41861972469416f, 0.22480114791621f, 2.17337241360606f}};
NV_GLOBALCONST NV_VECTORF32 g_NVASinCoefficients1 = {{0.61657275907170f, 4.29696498283455f, -1.18942822255452f, -6.53784832094831f}};
NV_GLOBALCONST NV_VECTORF32 g_NVASinCoefficients2 = {{-1.36926553863413f, -4.48179294237210f, 1.41810672941833f, 5.48179257935713f}};
NV_GLOBALCONST NV_VECTORF32 g_NVATanCoefficients0 = {{1.0f, 0.333333334f, 0.2f, 0.142857143f}};
NV_GLOBALCONST NV_VECTORF32 g_NVATanCoefficients1 = {{1.111111111e-1f, 9.090909091e-2f, 7.692307692e-2f, 6.666666667e-2f}};
NV_GLOBALCONST NV_VECTORF32 g_NVATanCoefficients2 = {{5.882352941e-2f, 5.263157895e-2f, 4.761904762e-2f, 4.347826087e-2f}};
NV_GLOBALCONST NV_VECTORF32 g_NVSinEstCoefficients = {{1.0f, -1.66521856991541e-1f, 8.199913018755e-3f, -1.61475937228e-4f}};
NV_GLOBALCONST NV_VECTORF32 g_NVCosEstCoefficients = {{1.0f, -4.95348008918096e-1f, 3.878259962881e-2f, -9.24587976263e-4f}};
NV_GLOBALCONST NV_VECTORF32 g_NVTanEstCoefficients = {{2.484f, -1.954923183e-1f, 2.467401101f, NvInvPi}};
NV_GLOBALCONST NV_VECTORF32 g_NVATanEstCoefficients = {{7.689891418951e-1f, 1.104742493348f, 8.661844266006e-1f, NvPiDivTwo}};
NV_GLOBALCONST NV_VECTORF32 g_NVASinEstCoefficients = {{-1.36178272886711f, 2.37949493464538f, -8.08228565650486e-1f, 2.78440142746736e-1f}};
NV_GLOBALCONST NV_VECTORF32 g_NVASinEstConstants = {{1.00000011921f, NvPiDivTwo, 0.0f, 0.0f}};
NV_GLOBALCONST NV_VECTORF32 g_NVPiConstants0 = {{NvPi, NvTwoPi, NvInvPi, NvInvTwoPi}};
NV_GLOBALCONST NV_VECTORF32 g_NVReciprocalTwoPi = {{NvInvTwoPi, NvInvTwoPi, NvInvTwoPi, NvInvTwoPi}};
NV_GLOBALCONST NV_VECTORF32 g_NVTwoPi = {{NvTwoPi, NvTwoPi, NvTwoPi, NvTwoPi}};
#endif
| 5,068 | C | 66.586666 | 141 | 0.747435 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/platform/windows/NsWindowsFPU.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_WINDOWS_NSWINDOWSFPU_H
#define NV_WINDOWS_NSWINDOWSFPU_H
NV_INLINE nvidia::shdfnd::SIMDGuard::SIMDGuard()
{
#if !NV_ARM
mControlWord = _mm_getcsr();
// set default (disable exceptions: _MM_MASK_MASK) and FTZ (_MM_FLUSH_ZERO_ON), DAZ (_MM_DENORMALS_ZERO_ON: (1<<6))
_mm_setcsr(_MM_MASK_MASK | _MM_FLUSH_ZERO_ON | (1 << 6));
#endif
}
NV_INLINE nvidia::shdfnd::SIMDGuard::~SIMDGuard()
{
#if !NV_ARM
// restore control word and clear any exception flags
// (setting exception state flags cause exceptions on the first following fp operation)
_mm_setcsr(mControlWord & ~_MM_EXCEPT_MASK);
#endif
}
#endif // #ifndef NV_WINDOWS_NSWINDOWSFPU_H
| 2,370 | C | 45.490195 | 119 | 0.74557 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/platform/windows/NsWindowsInlineAoS.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef PS_WINDOWS_INLINE_AOS_H
#define PS_WINDOWS_INLINE_AOS_H
#if !COMPILE_VECTOR_INTRINSICS
#error Vector intrinsics should not be included when using scalar implementation.
#endif
//Remove this define when all platforms use simd solver.
#define NV_SUPPORT_SIMD
/////////////////////////////////////////////////////////////////////
////FUNCTIONS USED ONLY FOR ASSERTS IN VECTORISED IMPLEMENTATIONS
/////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////
//USED ONLY INTERNALLY
//////////////////////////////////////////////////////////////////////
namespace internalWindowsSimd
{
NV_FORCE_INLINE __m128 m128_I2F(__m128i n)
{
return _mm_castsi128_ps(n);
}
NV_FORCE_INLINE __m128i m128_F2I(__m128 n)
{
return _mm_castps_si128(n);
}
NV_FORCE_INLINE uint32_t BAllTrue4_R(const BoolV a)
{
const int32_t moveMask = _mm_movemask_ps(a);
return uint32_t(moveMask == (0xf));
}
NV_FORCE_INLINE uint32_t BAnyTrue4_R(const BoolV a)
{
const int32_t moveMask = _mm_movemask_ps(a);
return uint32_t(moveMask != (0x0));
}
NV_FORCE_INLINE uint32_t BAllTrue3_R(const BoolV a)
{
const int32_t moveMask = _mm_movemask_ps(a);
return uint32_t(((moveMask & 0x7) == (0x7)));
}
NV_FORCE_INLINE uint32_t BAnyTrue3_R(const BoolV a)
{
const int32_t moveMask = _mm_movemask_ps(a);
return uint32_t(((moveMask & 0x7) != (0x0)));
}
NV_FORCE_INLINE uint32_t FiniteTestEq(const Vec4V a, const Vec4V b)
{
//This is a bit of a bodge.
//_mm_comieq_ss returns 1 if either value is nan so we need to re-cast a and b with true encoded as a non-nan number.
//There must be a better way of doing this in sse.
const BoolV one = FOne();
const BoolV zero = FZero();
const BoolV a1 =V4Sel(a,one,zero);
const BoolV b1 =V4Sel(b,one,zero);
return
(
uint32_t(_mm_comieq_ss(a1, b1) &&
_mm_comieq_ss(_mm_shuffle_ps(a1, a1, _MM_SHUFFLE(1,1,1,1)),_mm_shuffle_ps(b1, b1, _MM_SHUFFLE(1,1,1,1))) &&
_mm_comieq_ss(_mm_shuffle_ps(a1, a1, _MM_SHUFFLE(2,2,2,2)),_mm_shuffle_ps(b1, b1, _MM_SHUFFLE(2,2,2,2))) &&
_mm_comieq_ss(_mm_shuffle_ps(a1, a1, _MM_SHUFFLE(3,3,3,3)),_mm_shuffle_ps(b1, b1, _MM_SHUFFLE(3,3,3,3))))
);
}
NV_FORCE_INLINE bool hasZeroElementinFloatV(const FloatV a)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
return (_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)),FZero()) ? true : false);
}
NV_FORCE_INLINE bool hasZeroElementInVec3V(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
return
(
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)),FZero()) ||
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(1,1,1,1)),FZero()) ||
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2)),FZero())
);
}
NV_FORCE_INLINE bool hasZeroElementInVec4V(const Vec4V a)
{
return
(
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)),FZero()) ||
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(1,1,1,1)),FZero()) ||
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2)),FZero()) ||
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(3,3,3,3)),FZero())
);
}
const NV_ALIGN(16, uint32_t gMaskXYZ[4])={0xffffffff, 0xffffffff, 0xffffffff, 0};
}
namespace _VecMathTests
{
NV_FORCE_INLINE bool allElementsEqualFloatV(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return(_mm_comieq_ss(a,b)!=0);
}
NV_FORCE_INLINE bool allElementsEqualVec3V(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return V3AllEq(a, b) != 0;
}
NV_FORCE_INLINE bool allElementsEqualVec4V(const Vec4V a, const Vec4V b)
{
return V4AllEq(a, b) != 0;
}
NV_FORCE_INLINE bool allElementsEqualBoolV(const BoolV a, const BoolV b)
{
return internalWindowsSimd::BAllTrue4_R(VecI32V_IsEq(a, b)) != 0;
}
NV_FORCE_INLINE bool allElementsEqualVecU32V(const VecU32V a, const VecU32V b)
{
return internalWindowsSimd::BAllTrue4_R(V4IsEqU32(a, b)) != 0;
}
NV_FORCE_INLINE bool allElementsEqualVecI32V(const VecI32V a, const VecI32V b)
{
BoolV c = internalWindowsSimd::m128_I2F(_mm_cmpeq_epi32(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b)));
return internalWindowsSimd::BAllTrue4_R(c) != 0;
}
#define VECMATH_AOS_EPSILON (1e-3f)
static const FloatV minFError=FLoad(-VECMATH_AOS_EPSILON);
static const FloatV maxFError=FLoad(VECMATH_AOS_EPSILON);
static const Vec3V minV3Error=V3Load(-VECMATH_AOS_EPSILON);
static const Vec3V maxV3Error=V3Load(VECMATH_AOS_EPSILON);
static const Vec4V minV4Error=V4Load(-VECMATH_AOS_EPSILON);
static const Vec4V maxV4Error=V4Load(VECMATH_AOS_EPSILON);
NV_FORCE_INLINE bool allElementsNearEqualFloatV(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
const FloatV c=FSub(a,b);
return (_mm_comigt_ss(c,minFError) && _mm_comilt_ss(c,maxFError));
}
NV_FORCE_INLINE bool allElementsNearEqualVec3V(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
const Vec3V c=V3Sub(a,b);
return
(
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0,0,0,0)),minV3Error) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0,0,0,0)),maxV3Error) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1,1,1,1)),minV3Error) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1,1,1,1)),maxV3Error) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2,2,2,2)),minV3Error) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2,2,2,2)),maxV3Error)
);
}
NV_FORCE_INLINE bool allElementsNearEqualVec4V(const Vec4V a, const Vec4V b)
{
const Vec4V c=V4Sub(a,b);
return
(
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0,0,0,0)),minV4Error) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0,0,0,0)),maxV4Error) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1,1,1,1)),minV4Error) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1,1,1,1)),maxV4Error) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2,2,2,2)),minV4Error) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2,2,2,2)),maxV4Error) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(3,3,3,3)),minV4Error) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(3,3,3,3)),maxV4Error)
);
}
}
NV_FORCE_INLINE bool isValidFloatV(const FloatV a)
{
return
(
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)),_mm_shuffle_ps(a, a, _MM_SHUFFLE(1,1,1,1))) &&
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)),_mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2))) &&
_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)),_mm_shuffle_ps(a, a, _MM_SHUFFLE(3,3,3,3)))
);
}
NV_FORCE_INLINE bool isValidVec3V(const Vec3V a)
{
return (_mm_comieq_ss(_mm_shuffle_ps(a, a, _MM_SHUFFLE(3,3,3,3)),FZero()) ? true : false);
}
NV_FORCE_INLINE bool isFiniteFloatV(const FloatV a)
{
return NvIsFinite(FStore(a));
/*
const uint32_t badNumber = (_FPCLASS_SNAN | _FPCLASS_QNAN | _FPCLASS_NINF | _FPCLASS_PINF);
const FloatV vBadNum = FloatV_From_F32((float&)badNumber);
const BoolV vMask = BAnd(vBadNum, a);
return FiniteTestEq(vMask, BFFFF()) == 1;
*/
}
NV_FORCE_INLINE bool isFiniteVec3V(const Vec3V a)
{
NV_ALIGN(16, float f[4]);
V4StoreA((Vec4V&)a, f);
return NvIsFinite(f[0])
&& NvIsFinite(f[1])
&& NvIsFinite(f[2]);
/*
const uint32_t badNumber = (_FPCLASS_SNAN | _FPCLASS_QNAN | _FPCLASS_NINF | _FPCLASS_PINF);
const Vec3V vBadNum = Vec3V_From_F32((float&)badNumber);
const BoolV vMask = BAnd(BAnd(vBadNum, a), BTTTF());
return FiniteTestEq(vMask, BFFFF()) == 1;
*/
}
NV_FORCE_INLINE bool isFiniteVec4V(const Vec4V a)
{
NV_ALIGN(16, float f[4]);
V4StoreA(a, f);
return NvIsFinite(f[0])
&& NvIsFinite(f[1])
&& NvIsFinite(f[2])
&& NvIsFinite(f[3]);
/*
const uint32_t badNumber = (_FPCLASS_SNAN | _FPCLASS_QNAN | _FPCLASS_NINF | _FPCLASS_PINF);
const Vec4V vBadNum = Vec4V_From_U32((float&)badNumber);
const BoolV vMask = BAnd(vBadNum, a);
return FiniteTestEq(vMask, BFFFF()) == 1;
*/
}
/////////////////////////////////////////////////////////////////////
////VECTORISED FUNCTION IMPLEMENTATIONS
/////////////////////////////////////////////////////////////////////
NV_FORCE_INLINE FloatV FLoad(const float f)
{
return (_mm_load1_ps(&f));
}
NV_FORCE_INLINE Vec3V V3Load(const float f)
{
return _mm_set_ps(0.0f,f,f,f);
}
NV_FORCE_INLINE Vec4V V4Load(const float f)
{
return (_mm_load1_ps(&f));
}
NV_FORCE_INLINE BoolV BLoad(const bool f)
{
const uint32_t i=uint32_t(-(int32_t)f);
return _mm_load1_ps((float*)&i);
}
NV_FORCE_INLINE Vec3V V3LoadA(const NvVec3& f)
{
VECMATHAOS_ASSERT(0 == ((size_t)&f & 0x0f));
return _mm_and_ps(_mm_load_ps(&f.x), reinterpret_cast<const Vec4V&>(internalWindowsSimd::gMaskXYZ));
}
NV_FORCE_INLINE Vec3V V3LoadU(const NvVec3& f)
{
return (_mm_set_ps(0.0f,f.z,f.y,f.x));
}
// w component of result is undefined
NV_FORCE_INLINE Vec3V V3LoadUnsafeA(const NvVec3& f)
{
VECMATHAOS_ASSERT(0 == ((uint64_t)&f & 0x0f));
return _mm_load_ps(&f.x);
}
NV_FORCE_INLINE Vec3V V3LoadA(const float* const f)
{
VECMATHAOS_ASSERT(0 == ((uint64_t)f & 0x0f));
return V4ClearW(_mm_load_ps(f));
}
NV_FORCE_INLINE Vec3V V3LoadU(const float* const i)
{
return (_mm_set_ps(0.0f,i[2],i[1],i[0]));
}
NV_FORCE_INLINE Vec3V Vec3V_From_Vec4V(Vec4V v)
{
return V4ClearW(v);
}
NV_FORCE_INLINE Vec3V Vec3V_From_Vec4V_WUndefined(const Vec4V v)
{
return v;
}
NV_FORCE_INLINE Vec4V Vec4V_From_Vec3V(Vec3V f)
{
return f; //ok if it is implemented as the same type.
}
NV_FORCE_INLINE Vec4V Vec4V_From_FloatV(FloatV f)
{
return f;
}
NV_FORCE_INLINE Vec3V Vec3V_From_FloatV(FloatV f)
{
return Vec3V_From_Vec4V(Vec4V_From_FloatV(f));
}
NV_FORCE_INLINE Vec3V Vec3V_From_FloatV_WUndefined(FloatV f)
{
return Vec3V_From_Vec4V_WUndefined(Vec4V_From_FloatV(f));
}
NV_FORCE_INLINE Vec4V Vec4V_From_NvVec3_WUndefined(const NvVec3& f)
{
return (_mm_set_ps(0.0f,f.z,f.y,f.x));
}
NV_FORCE_INLINE Vec4V V4LoadA(const float* const f)
{
VECMATHAOS_ASSERT(0 == ((uint64_t)f & 0x0f));
return (_mm_load_ps(f));
}
NV_FORCE_INLINE void V4StoreA(const Vec4V a, float* f)
{
VECMATHAOS_ASSERT(0 == ((uint64_t)f & 0x0f));
_mm_store_ps(f,a);
}
NV_FORCE_INLINE void V4StoreU(const Vec4V a, float* f)
{
_mm_storeu_ps(f,a);
}
NV_FORCE_INLINE void BStoreA(const BoolV a, uint32_t* f)
{
VECMATHAOS_ASSERT(0 == ((uint64_t)f & 0x0f));
_mm_store_ps((float*)f,a);
}
NV_FORCE_INLINE void U4StoreA(const VecU32V uv, uint32_t* u)
{
VECMATHAOS_ASSERT(0 == ((uint64_t)u & 0x0f));
_mm_store_ps((float*)u,uv);
}
NV_FORCE_INLINE void I4StoreA(const VecI32V iv, int32_t* i)
{
VECMATHAOS_ASSERT(0 == ((uint64_t)i & 0x0f));
_mm_store_ps((float*)i,iv);
}
NV_FORCE_INLINE Vec4V V4LoadU(const float* const f)
{
return (_mm_loadu_ps(f));
}
NV_FORCE_INLINE BoolV BLoad(const bool* const f)
{
const NV_ALIGN(16, uint32_t b[4])={uint32_t(-(int32_t)f[0]), uint32_t(-(int32_t)f[1]), uint32_t(-(int32_t)f[2]), uint32_t(-(int32_t)f[3])};
return _mm_load_ps((float*)&b);
}
NV_FORCE_INLINE float FStore(const FloatV a)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
float f;
_mm_store_ss(&f,a);
return f;
}
NV_FORCE_INLINE void FStore(const FloatV a, float* NV_RESTRICT f)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
_mm_store_ss(f,a);
}
NV_FORCE_INLINE void V3StoreA(const Vec3V a, NvVec3& f)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(0 == ((int)&a & 0x0F));
VECMATHAOS_ASSERT(0 == ((int)&f & 0x0F));
NV_ALIGN(16, float f2[4]);
_mm_store_ps(f2,a);
f=NvVec3(f2[0],f2[1],f2[2]);
}
NV_FORCE_INLINE void Store_From_BoolV(const BoolV b, uint32_t* b2)
{
_mm_store_ss((float*)b2,b);
}
NV_FORCE_INLINE void V3StoreU(const Vec3V a, NvVec3& f)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(0 == ((int)&a & 0x0F));
NV_ALIGN(16, float f2[4]);
_mm_store_ps(f2,a);
f=NvVec3(f2[0],f2[1],f2[2]);
}
NV_FORCE_INLINE Mat33V Mat33V_From_NvMat33(const NvMat33 &m)
{
return Mat33V(V3LoadU(m.column0),
V3LoadU(m.column1),
V3LoadU(m.column2));
}
NV_FORCE_INLINE void NvMat33_From_Mat33V(const Mat33V &m, NvMat33 &out)
{
NV_ASSERT((size_t(&out)&15)==0);
V3StoreU(m.col0, out.column0);
V3StoreU(m.col1, out.column1);
V3StoreU(m.col2, out.column2);
}
//////////////////////////////////
//FLOATV
//////////////////////////////////
NV_FORCE_INLINE FloatV FZero()
{
//return FloatV_From_F32(0.0f);
return _mm_setzero_ps();
}
NV_FORCE_INLINE FloatV FOne()
{
return FLoad(1.0f);
}
NV_FORCE_INLINE FloatV FHalf()
{
return FLoad(0.5f);
}
NV_FORCE_INLINE FloatV FEps()
{
return FLoad(NV_EPS_REAL);
}
NV_FORCE_INLINE FloatV FEps6()
{
return FLoad(1e-6f);
}
NV_FORCE_INLINE FloatV FMax()
{
return FLoad(NV_MAX_REAL);
}
NV_FORCE_INLINE FloatV FNegMax()
{
return FLoad(-NV_MAX_REAL);
}
NV_FORCE_INLINE FloatV IZero()
{
const uint32_t zero = 0;
return _mm_load1_ps((float*)&zero);
}
NV_FORCE_INLINE FloatV IOne()
{
const uint32_t one = 1;
return _mm_load1_ps((float*)&one);
}
NV_FORCE_INLINE FloatV ITwo()
{
const uint32_t two = 2;
return _mm_load1_ps((float*)&two);
}
NV_FORCE_INLINE FloatV IThree()
{
const uint32_t three = 3;
return _mm_load1_ps((float*)&three);
}
NV_FORCE_INLINE FloatV IFour()
{
uint32_t four = 4;
return _mm_load1_ps((float*)&four);
}
NV_FORCE_INLINE FloatV FNeg(const FloatV f)
{
VECMATHAOS_ASSERT(isValidFloatV(f));
return _mm_sub_ps( _mm_setzero_ps(), f);
}
NV_FORCE_INLINE FloatV FAdd(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_add_ps(a,b);
}
NV_FORCE_INLINE FloatV FSub(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_sub_ps(a,b);
}
NV_FORCE_INLINE FloatV FMul(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_mul_ps(a,b);
}
NV_FORCE_INLINE FloatV FDiv(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_div_ps(a,b);
}
NV_FORCE_INLINE FloatV FDivFast(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_mul_ps(a,_mm_rcp_ps(b));
}
NV_FORCE_INLINE FloatV FRecip(const FloatV a)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
return _mm_div_ps(FOne(),a);
}
NV_FORCE_INLINE FloatV FRecipFast(const FloatV a)
{
return _mm_rcp_ps(a);
}
NV_FORCE_INLINE FloatV FRsqrt(const FloatV a)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
return _mm_div_ps(FOne(),_mm_sqrt_ps(a));
}
NV_FORCE_INLINE FloatV FSqrt(const FloatV a)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
return _mm_sqrt_ps(a);
}
NV_FORCE_INLINE FloatV FRsqrtFast(const FloatV a)
{
return _mm_rsqrt_ps(a);
}
NV_FORCE_INLINE FloatV FScaleAdd(const FloatV a, const FloatV b, const FloatV c)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
VECMATHAOS_ASSERT(isValidFloatV(c));
return FAdd(FMul(a,b),c);
}
NV_FORCE_INLINE FloatV FNegScaleSub(const FloatV a, const FloatV b, const FloatV c)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
VECMATHAOS_ASSERT(isValidFloatV(c));
return FSub(c,FMul(a,b));
}
NV_FORCE_INLINE FloatV FAbs(const FloatV a)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
NV_ALIGN(16, const static uint32_t absMask[4]) = {0x7fFFffFF, 0x7fFFffFF, 0x7fFFffFF, 0x7fFFffFF};
return _mm_and_ps(a, _mm_load_ps((float*)absMask));
}
NV_FORCE_INLINE FloatV FSel(const BoolV c, const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(_VecMathTests::allElementsEqualBoolV(c,BTTTT()) || _VecMathTests::allElementsEqualBoolV(c,BFFFF()));
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a));
}
NV_FORCE_INLINE BoolV FIsGrtr(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_cmpgt_ps(a,b);
}
NV_FORCE_INLINE BoolV FIsGrtrOrEq(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_cmpge_ps(a,b);
}
NV_FORCE_INLINE BoolV FIsEq(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_cmpeq_ps(a,b);
}
NV_FORCE_INLINE FloatV FMax(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_max_ps(a, b);
}
NV_FORCE_INLINE FloatV FMin(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_min_ps(a, b);
}
NV_FORCE_INLINE FloatV FClamp(const FloatV a, const FloatV minV, const FloatV maxV)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(minV));
VECMATHAOS_ASSERT(isValidFloatV(maxV));
return FMax(FMin(a,maxV),minV);
}
NV_FORCE_INLINE uint32_t FAllGrtr(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return uint32_t(_mm_comigt_ss(a,b));
}
NV_FORCE_INLINE uint32_t FAllGrtrOrEq(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return uint32_t(_mm_comige_ss(a,b));
}
NV_FORCE_INLINE uint32_t FAllEq(const FloatV a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return uint32_t(_mm_comieq_ss(a,b));
}
NV_FORCE_INLINE FloatV FRound(const FloatV a)
{
//return _mm_round_ps(a, 0x0);
const FloatV half = FLoad(0.5f);
const __m128 signBit = _mm_cvtepi32_ps(_mm_srli_epi32(_mm_cvtps_epi32(a), 31));
const FloatV aRound = FSub(FAdd(a, half), signBit);
__m128i tmp = _mm_cvttps_epi32(aRound);
return _mm_cvtepi32_ps(tmp);
}
NV_FORCE_INLINE FloatV FSin(const FloatV a)
{
//Vec4V V1, V2, V3, V5, V7, V9, V11, V13, V15, V17, V19, V21, V23;
//Vec4V S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11;
FloatV Result;
// Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI
const FloatV recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f);
const FloatV twoPi = V4LoadA(g_NVTwoPi.f);
const FloatV tmp = FMul(a, recipTwoPi);
const FloatV b = FRound(tmp);
const FloatV V1 = FNegMulSub(twoPi, b, a);
// sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! -
// V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI)
const FloatV V2 = FMul(V1, V1);
const FloatV V3 = FMul(V2, V1);
const FloatV V5 = FMul(V3, V2);
const FloatV V7 = FMul(V5, V2);
const FloatV V9 = FMul(V7, V2);
const FloatV V11 = FMul(V9, V2);
const FloatV V13 = FMul(V11, V2);
const FloatV V15 = FMul(V13, V2);
const FloatV V17 = FMul(V15, V2);
const FloatV V19 = FMul(V17, V2);
const FloatV V21 = FMul(V19, V2);
const FloatV V23 = FMul(V21, V2);
const Vec4V sinCoefficients0 = V4LoadA(g_NVSinCoefficients0.f);
const Vec4V sinCoefficients1 = V4LoadA(g_NVSinCoefficients1.f);
const Vec4V sinCoefficients2 = V4LoadA(g_NVSinCoefficients2.f);
const FloatV S1 = V4GetY(sinCoefficients0);
const FloatV S2 = V4GetZ(sinCoefficients0);
const FloatV S3 = V4GetW(sinCoefficients0);
const FloatV S4 = V4GetX(sinCoefficients1);
const FloatV S5 = V4GetY(sinCoefficients1);
const FloatV S6 = V4GetZ(sinCoefficients1);
const FloatV S7 = V4GetW(sinCoefficients1);
const FloatV S8 = V4GetX(sinCoefficients2);
const FloatV S9 = V4GetY(sinCoefficients2);
const FloatV S10 = V4GetZ(sinCoefficients2);
const FloatV S11 = V4GetW(sinCoefficients2);
Result = FMulAdd(S1, V3, V1);
Result = FMulAdd(S2, V5, Result);
Result = FMulAdd(S3, V7, Result);
Result = FMulAdd(S4, V9, Result);
Result = FMulAdd(S5, V11, Result);
Result = FMulAdd(S6, V13, Result);
Result = FMulAdd(S7, V15, Result);
Result = FMulAdd(S8, V17, Result);
Result = FMulAdd(S9, V19, Result);
Result = FMulAdd(S10, V21, Result);
Result = FMulAdd(S11, V23, Result);
return Result;
}
NV_FORCE_INLINE FloatV FCos(const FloatV a)
{
//XMVECTOR V1, V2, V4, V6, V8, V10, V12, V14, V16, V18, V20, V22;
//XMVECTOR C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11;
FloatV Result;
// Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI
const FloatV recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f);
const FloatV twoPi = V4LoadA(g_NVTwoPi.f);
const FloatV tmp = FMul(a, recipTwoPi);
const FloatV b = FRound(tmp);
const FloatV V1 = FNegMulSub(twoPi, b, a);
// cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! -
// V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI)
const FloatV V2 = FMul(V1, V1);
const FloatV V4 = FMul(V2, V2);
const FloatV V6 = FMul(V4, V2);
const FloatV V8 = FMul(V4, V4);
const FloatV V10 = FMul(V6, V4);
const FloatV V12 = FMul(V6, V6);
const FloatV V14 = FMul(V8, V6);
const FloatV V16 = FMul(V8, V8);
const FloatV V18 = FMul(V10, V8);
const FloatV V20 = FMul(V10, V10);
const FloatV V22 = FMul(V12, V10);
const Vec4V cosCoefficients0 = V4LoadA(g_NVCosCoefficients0.f);
const Vec4V cosCoefficients1 = V4LoadA(g_NVCosCoefficients1.f);
const Vec4V cosCoefficients2 = V4LoadA(g_NVCosCoefficients2.f);
const FloatV C1 = V4GetY(cosCoefficients0);
const FloatV C2 = V4GetZ(cosCoefficients0);
const FloatV C3 = V4GetW(cosCoefficients0);
const FloatV C4 = V4GetX(cosCoefficients1);
const FloatV C5 = V4GetY(cosCoefficients1);
const FloatV C6 = V4GetZ(cosCoefficients1);
const FloatV C7 = V4GetW(cosCoefficients1);
const FloatV C8 = V4GetX(cosCoefficients2);
const FloatV C9 = V4GetY(cosCoefficients2);
const FloatV C10 = V4GetZ(cosCoefficients2);
const FloatV C11 = V4GetW(cosCoefficients2);
Result = FMulAdd(C1, V2, V4One());
Result = FMulAdd(C2, V4, Result);
Result = FMulAdd(C3, V6, Result);
Result = FMulAdd(C4, V8, Result);
Result = FMulAdd(C5, V10, Result);
Result = FMulAdd(C6, V12, Result);
Result = FMulAdd(C7, V14, Result);
Result = FMulAdd(C8, V16, Result);
Result = FMulAdd(C9, V18, Result);
Result = FMulAdd(C10, V20, Result);
Result = FMulAdd(C11, V22, Result);
return Result;
}
NV_FORCE_INLINE uint32_t FOutOfBounds(const FloatV a, const FloatV min, const FloatV max)
{
const BoolV ffff = BFFFF();
const BoolV c = BOr(FIsGrtr(a, max), FIsGrtr(min, a));
return uint32_t(!BAllEq(c, ffff));
}
NV_FORCE_INLINE uint32_t FInBounds(const FloatV a, const FloatV min, const FloatV max)
{
const BoolV tttt = BTTTT();
const BoolV c = BAnd(FIsGrtrOrEq(a, min), FIsGrtrOrEq(max, a));
return BAllEq(c, tttt);
}
NV_FORCE_INLINE uint32_t FOutOfBounds(const FloatV a, const FloatV bounds)
{
return FOutOfBounds(a, FNeg(bounds), bounds);
}
NV_FORCE_INLINE uint32_t FInBounds(const FloatV a, const FloatV bounds)
{
return FInBounds(a, FNeg(bounds), bounds);
}
//////////////////////////////////
//VEC3V
//////////////////////////////////
NV_FORCE_INLINE Vec3V V3Splat(const FloatV f)
{
VECMATHAOS_ASSERT(isValidFloatV(f));
const __m128 zero=V3Zero();
const __m128 fff0 = _mm_move_ss(f, zero);
return _mm_shuffle_ps(fff0, fff0, _MM_SHUFFLE(0,1,2,3));
}
NV_FORCE_INLINE Vec3V V3Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z)
{
VECMATHAOS_ASSERT(isValidFloatV(x));
VECMATHAOS_ASSERT(isValidFloatV(y));
VECMATHAOS_ASSERT(isValidFloatV(z));
// static on zero causes compiler crash on x64 debug_opt
const __m128 zero=V3Zero();
const __m128 xy = _mm_move_ss(x, y);
const __m128 z0 = _mm_move_ss(zero, z);
return _mm_shuffle_ps(xy, z0, _MM_SHUFFLE(1,0,0,1));
}
NV_FORCE_INLINE Vec3V V3UnitX()
{
const NV_ALIGN(16, float x[4])={1.0f,0.0f,0.0f,0.0f};
const __m128 x128=_mm_load_ps(x);
return x128;
}
NV_FORCE_INLINE Vec3V V3UnitY()
{
const NV_ALIGN(16, float y[4])={0.0f,1.0f,0.0f,0.0f};
const __m128 y128=_mm_load_ps(y);
return y128;
}
NV_FORCE_INLINE Vec3V V3UnitZ()
{
const NV_ALIGN(16, float z[4])={0.0f,0.0f,1.0f,0.0f};
const __m128 z128=_mm_load_ps(z);
return z128;
}
NV_FORCE_INLINE FloatV V3GetX(const Vec3V f)
{
VECMATHAOS_ASSERT(isValidVec3V(f));
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(0,0,0,0));
}
NV_FORCE_INLINE FloatV V3GetY(const Vec3V f)
{
VECMATHAOS_ASSERT(isValidVec3V(f));
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(1,1,1,1));
}
NV_FORCE_INLINE FloatV V3GetZ(const Vec3V f)
{
VECMATHAOS_ASSERT(isValidVec3V(f));
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(2,2,2,2));
}
NV_FORCE_INLINE Vec3V V3SetX(const Vec3V v, const FloatV f)
{
VECMATHAOS_ASSERT(isValidVec3V(v));
VECMATHAOS_ASSERT(isValidFloatV(f));
return V3Sel(BFTTT(),v,f);
}
NV_FORCE_INLINE Vec3V V3SetY(const Vec3V v, const FloatV f)
{
VECMATHAOS_ASSERT(isValidVec3V(v));
VECMATHAOS_ASSERT(isValidFloatV(f));
return V3Sel(BTFTT(),v,f);
}
NV_FORCE_INLINE Vec3V V3SetZ(const Vec3V v, const FloatV f)
{
VECMATHAOS_ASSERT(isValidVec3V(v));
VECMATHAOS_ASSERT(isValidFloatV(f));
return V3Sel(BTTFT(),v,f);
}
NV_FORCE_INLINE Vec3V V3ColX(const Vec3V a, const Vec3V b, const Vec3V c)
{
Vec3V r = _mm_shuffle_ps(a,c,_MM_SHUFFLE(3,0,3,0));
return V3SetY(r, V3GetX(b));
}
NV_FORCE_INLINE Vec3V V3ColY(const Vec3V a, const Vec3V b, const Vec3V c)
{
Vec3V r = _mm_shuffle_ps(a,c,_MM_SHUFFLE(3,1,3,1));
return V3SetY(r, V3GetY(b));
}
NV_FORCE_INLINE Vec3V V3ColZ(const Vec3V a, const Vec3V b, const Vec3V c)
{
Vec3V r = _mm_shuffle_ps(a,c,_MM_SHUFFLE(3,2,3,2));
return V3SetY(r, V3GetZ(b));
}
NV_FORCE_INLINE Vec3V V3Zero()
{
return _mm_setzero_ps();
}
NV_FORCE_INLINE Vec3V V3One()
{
return V3Load(1.0f);
}
NV_FORCE_INLINE Vec3V V3Eps()
{
return V3Load(NV_EPS_REAL);
}
NV_FORCE_INLINE Vec3V V3Neg(const Vec3V f)
{
VECMATHAOS_ASSERT(isValidVec3V(f));
return _mm_sub_ps( _mm_setzero_ps(), f);
}
NV_FORCE_INLINE Vec3V V3Add(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return _mm_add_ps(a,b);
}
NV_FORCE_INLINE Vec3V V3Sub(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return _mm_sub_ps(a,b);
}
NV_FORCE_INLINE Vec3V V3Scale(const Vec3V a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_mul_ps(a,b);
}
NV_FORCE_INLINE Vec3V V3Mul(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return _mm_mul_ps(a,b);
}
NV_FORCE_INLINE Vec3V V3ScaleInv(const Vec3V a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_div_ps(a,b);
}
NV_FORCE_INLINE Vec3V V3Div(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
// why are these here?
//static const __m128 one=V3One();
//static const __m128 tttf=BTTTF();
//const __m128 b1=V3Sel(tttf,b,one);
return _mm_div_ps(a,b);
}
NV_FORCE_INLINE Vec3V V3ScaleInvFast(const Vec3V a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_mul_ps(a,_mm_rcp_ps(b));
}
NV_FORCE_INLINE Vec3V V3DivFast(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
const __m128 one=V3One();
const __m128 tttf=BTTTF();
const __m128 b1=V3Sel(tttf,b,one);
return _mm_mul_ps(a,_mm_rcp_ps(b1));
}
NV_FORCE_INLINE Vec3V V3Recip(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
const __m128 zero=V3Zero();
const __m128 tttf=BTTTF();
const __m128 recipA=_mm_div_ps(V3One(),a);
return V3Sel(tttf,recipA,zero);
}
NV_FORCE_INLINE Vec3V V3RecipFast(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
const __m128 zero=V3Zero();
const __m128 tttf=BTTTF();
const __m128 recipA=_mm_rcp_ps(a);
return V3Sel(tttf,recipA,zero);
}
NV_FORCE_INLINE Vec3V V3Rsqrt(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
const __m128 zero=V3Zero();
const __m128 tttf=BTTTF();
const __m128 recipA=_mm_div_ps(V3One(),_mm_sqrt_ps(a));
return V3Sel(tttf,recipA,zero);
}
NV_FORCE_INLINE Vec3V V3RsqrtFast(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
const __m128 zero=V3Zero();
const __m128 tttf=BTTTF();
const __m128 recipA=_mm_rsqrt_ps(a);
return V3Sel(tttf,recipA,zero);
}
NV_FORCE_INLINE Vec3V V3ScaleAdd(const Vec3V a, const FloatV b, const Vec3V c)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
VECMATHAOS_ASSERT(isValidVec3V(c));
return V3Add(V3Scale(a,b),c);
}
NV_FORCE_INLINE Vec3V V3NegScaleSub(const Vec3V a, const FloatV b, const Vec3V c)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidFloatV(b));
VECMATHAOS_ASSERT(isValidVec3V(c));
return V3Sub(c,V3Scale(a,b));
}
NV_FORCE_INLINE Vec3V V3MulAdd(const Vec3V a, const Vec3V b, const Vec3V c)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
VECMATHAOS_ASSERT(isValidVec3V(c));
return V3Add(V3Mul(a,b),c);
}
NV_FORCE_INLINE Vec3V V3NegMulSub(const Vec3V a, const Vec3V b, const Vec3V c)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
VECMATHAOS_ASSERT(isValidVec3V(c));
return V3Sub(c,V3Mul(a,b));
}
NV_FORCE_INLINE Vec3V V3Abs(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
return V3Max(a,V3Neg(a));
}
NV_FORCE_INLINE FloatV V3Dot(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
__m128 dot1 = _mm_mul_ps(a, b); //w,z,y,x
//__m128 shuf1 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(2,1,0,3)); //z,y,x,w
//__m128 shuf2 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(1,0,3,2)); //y,x,w,z
//__m128 shuf3 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(0,3,2,1)); //x,w,z,y
//return _mm_add_ps(_mm_add_ps(shuf2, shuf3), _mm_add_ps(dot1,shuf1));
__m128 shuf1 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(0,0,0,0)); //z,y,x,w
__m128 shuf2 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(1,1,1,1)); //y,x,w,z
__m128 shuf3 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(2,2,2,2)); //x,w,z,y
return _mm_add_ps(_mm_add_ps(shuf1, shuf2), shuf3);
}
NV_FORCE_INLINE Vec3V V3Cross(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
__m128 r1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 0, 2)); //z,x,y,w
__m128 r2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1)); //y,z,x,w
__m128 l1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); //y,z,x,w
__m128 l2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 1, 0, 2)); //z,x,y,w
return _mm_sub_ps(_mm_mul_ps(l1, l2), _mm_mul_ps(r1,r2));
}
NV_FORCE_INLINE VecCrossV V3PrepareCross(const Vec3V a)
{
VecCrossV v;
v.mR1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 0, 2)); //z,x,y,w
v.mL1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); //y,z,x,w
return v;
}
NV_FORCE_INLINE Vec3V V3Cross(const VecCrossV& a, const Vec3V b)
{
__m128 r2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1)); //y,z,x,w
__m128 l2 = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 1, 0, 2)); //z,x,y,w
return _mm_sub_ps(_mm_mul_ps(a.mL1, l2), _mm_mul_ps(a.mR1, r2));
}
NV_FORCE_INLINE Vec3V V3Cross(const Vec3V a, const VecCrossV& b)
{
__m128 r2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); //y,z,x,w
__m128 l2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 1, 0, 2)); //z,x,y,w
return _mm_sub_ps(_mm_mul_ps(b.mR1, r2), _mm_mul_ps(b.mL1, l2));
}
NV_FORCE_INLINE Vec3V V3Cross(const VecCrossV& a, const VecCrossV& b)
{
return _mm_sub_ps(_mm_mul_ps(a.mL1, b.mR1), _mm_mul_ps(a.mR1, b.mL1));
}
NV_FORCE_INLINE FloatV V3Length(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
return _mm_sqrt_ps(V3Dot(a,a));
}
NV_FORCE_INLINE FloatV V3LengthSq(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
return V3Dot(a,a);
}
NV_FORCE_INLINE Vec3V V3Normalize(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(!FAllEq(V3Dot(a,a), FZero()));
return V3ScaleInv(a, _mm_sqrt_ps(V3Dot(a,a)));
}
NV_FORCE_INLINE Vec3V V3NormalizeFast(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
return V3Mul(a, _mm_rsqrt_ps(V3Dot(a,a)));
}
NV_FORCE_INLINE Vec3V V3NormalizeSafe(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
const __m128 zero=V3Zero();
const __m128 eps=FEps();
const __m128 length=V3Length(a);
const __m128 isGreaterThanZero=FIsGrtr(length,eps);
return V3Sel(isGreaterThanZero,V3ScaleInv(a,length),zero);
}
NV_FORCE_INLINE Vec3V V3Sel(const BoolV c, const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return _mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a));
}
NV_FORCE_INLINE BoolV V3IsGrtr(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return _mm_cmpgt_ps(a,b);
}
NV_FORCE_INLINE BoolV V3IsGrtrOrEq(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return _mm_cmpge_ps(a,b);
}
NV_FORCE_INLINE BoolV V3IsEq(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return _mm_cmpeq_ps(a,b);
}
NV_FORCE_INLINE Vec3V V3Max(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return _mm_max_ps(a, b);
}
NV_FORCE_INLINE Vec3V V3Min(const Vec3V a, const Vec3V b)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(b));
return _mm_min_ps(a, b);
}
//Extract the maximum value from a
NV_FORCE_INLINE FloatV V3ExtractMax(const Vec3V a)
{
const __m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0));
const __m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1,1,1,1));
const __m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2));
return _mm_max_ps(_mm_max_ps(shuf1, shuf2), shuf3);
}
//Extract the maximum value from a
NV_FORCE_INLINE FloatV V3ExtractMin(const Vec3V a)
{
const __m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0));
const __m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1,1,1,1));
const __m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2));
return _mm_min_ps(_mm_min_ps(shuf1, shuf2), shuf3);
}
//// if(a > 0.0f) return 1.0f; else if a == 0.f return 0.f, else return -1.f;
//NV_FORCE_INLINE Vec3V V3MathSign(const Vec3V a)
//{
// VECMATHAOS_ASSERT(isValidVec3V(a));
//
// const __m128i ai = _mm_cvtps_epi32(a);
// const __m128i bi = _mm_cvtps_epi32(V3Neg(a));
// const __m128 aa = _mm_cvtepi32_ps(_mm_srai_epi32(ai, 31));
// const __m128 bb = _mm_cvtepi32_ps(_mm_srai_epi32(bi, 31));
// return _mm_or_ps(aa, bb);
//}
//return (a >= 0.0f) ? 1.0f : -1.0f;
NV_FORCE_INLINE Vec3V V3Sign(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
const __m128 zero = V3Zero();
const __m128 one = V3One();
const __m128 none = V3Neg(one);
return V3Sel(V3IsGrtrOrEq(a, zero), one, none);
}
NV_FORCE_INLINE Vec3V V3Clamp(const Vec3V a, const Vec3V minV, const Vec3V maxV)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(minV));
VECMATHAOS_ASSERT(isValidVec3V(maxV));
return V3Max(V3Min(a,maxV),minV);
}
NV_FORCE_INLINE uint32_t V3AllGrtr(const Vec3V a, const Vec3V b)
{
return internalWindowsSimd::BAllTrue3_R(V4IsGrtr(a, b));
}
NV_FORCE_INLINE uint32_t V3AllGrtrOrEq(const Vec3V a, const Vec3V b)
{
return internalWindowsSimd::BAllTrue3_R(V4IsGrtrOrEq(a, b));
}
NV_FORCE_INLINE uint32_t V3AllEq(const Vec3V a, const Vec3V b)
{
return internalWindowsSimd::BAllTrue3_R(V4IsEq(a, b));
}
NV_FORCE_INLINE Vec3V V3Round(const Vec3V a)
{
//return _mm_round_ps(a, 0x0);
const Vec3V half = V3Load(0.5f);
const __m128 signBit = _mm_cvtepi32_ps(_mm_srli_epi32(_mm_cvtps_epi32(a), 31));
const Vec3V aRound = V3Sub(V3Add(a, half), signBit);
__m128i tmp = _mm_cvttps_epi32(aRound);
return _mm_cvtepi32_ps(tmp);
}
NV_FORCE_INLINE Vec3V V3Sin(const Vec3V a)
{
//Vec4V V1, V2, V3, V5, V7, V9, V11, V13, V15, V17, V19, V21, V23;
//Vec4V S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11;
Vec3V Result;
// Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI
const Vec3V recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f);
const Vec3V twoPi = V4LoadA(g_NVTwoPi.f);
const Vec3V tmp = V3Mul(a, recipTwoPi);
const Vec3V b = V3Round(tmp);
const Vec3V V1 = V3NegMulSub(twoPi, b, a);
// sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! -
// V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI)
const Vec3V V2 = V3Mul(V1, V1);
const Vec3V V3 = V3Mul(V2, V1);
const Vec3V V5 = V3Mul(V3, V2);
const Vec3V V7 = V3Mul(V5, V2);
const Vec3V V9 = V3Mul(V7, V2);
const Vec3V V11 = V3Mul(V9, V2);
const Vec3V V13 = V3Mul(V11, V2);
const Vec3V V15 = V3Mul(V13, V2);
const Vec3V V17 = V3Mul(V15, V2);
const Vec3V V19 = V3Mul(V17, V2);
const Vec3V V21 = V3Mul(V19, V2);
const Vec3V V23 = V3Mul(V21, V2);
const Vec4V sinCoefficients0 = V4LoadA(g_NVSinCoefficients0.f);
const Vec4V sinCoefficients1 = V4LoadA(g_NVSinCoefficients1.f);
const Vec4V sinCoefficients2 = V4LoadA(g_NVSinCoefficients2.f);
const FloatV S1 = V4GetY(sinCoefficients0);
const FloatV S2 = V4GetZ(sinCoefficients0);
const FloatV S3 = V4GetW(sinCoefficients0);
const FloatV S4 = V4GetX(sinCoefficients1);
const FloatV S5 = V4GetY(sinCoefficients1);
const FloatV S6 = V4GetZ(sinCoefficients1);
const FloatV S7 = V4GetW(sinCoefficients1);
const FloatV S8 = V4GetX(sinCoefficients2);
const FloatV S9 = V4GetY(sinCoefficients2);
const FloatV S10 = V4GetZ(sinCoefficients2);
const FloatV S11 = V4GetW(sinCoefficients2);
Result = V3MulAdd(S1, V3, V1);
Result = V3MulAdd(S2, V5, Result);
Result = V3MulAdd(S3, V7, Result);
Result = V3MulAdd(S4, V9, Result);
Result = V3MulAdd(S5, V11, Result);
Result = V3MulAdd(S6, V13, Result);
Result = V3MulAdd(S7, V15, Result);
Result = V3MulAdd(S8, V17, Result);
Result = V3MulAdd(S9, V19, Result);
Result = V3MulAdd(S10, V21, Result);
Result = V3MulAdd(S11, V23, Result);
return Result;
}
NV_FORCE_INLINE Vec3V V3Cos(const Vec3V a)
{
//XMVECTOR V1, V2, V4, V6, V8, V10, V12, V14, V16, V18, V20, V22;
//XMVECTOR C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11;
Vec3V Result;
// Modulo the range of the given angles such that -XM_2PI <= Angles < XM_2PI
const Vec3V recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f);
const Vec3V twoPi = V4LoadA(g_NVTwoPi.f);
const Vec3V tmp = V3Mul(a, recipTwoPi);
const Vec3V b = V3Round(tmp);
const Vec3V V1 = V3NegMulSub(twoPi, b, a);
// cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! -
// V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI)
const Vec3V V2 = V3Mul(V1, V1);
const Vec3V V4 = V3Mul(V2, V2);
const Vec3V V6 = V3Mul(V4, V2);
const Vec3V V8 = V3Mul(V4, V4);
const Vec3V V10 = V3Mul(V6, V4);
const Vec3V V12 = V3Mul(V6, V6);
const Vec3V V14 = V3Mul(V8, V6);
const Vec3V V16 = V3Mul(V8, V8);
const Vec3V V18 = V3Mul(V10, V8);
const Vec3V V20 = V3Mul(V10, V10);
const Vec3V V22 = V3Mul(V12, V10);
const Vec4V cosCoefficients0 = V4LoadA(g_NVCosCoefficients0.f);
const Vec4V cosCoefficients1 = V4LoadA(g_NVCosCoefficients1.f);
const Vec4V cosCoefficients2 = V4LoadA(g_NVCosCoefficients2.f);
const FloatV C1 = V4GetY(cosCoefficients0);
const FloatV C2 = V4GetZ(cosCoefficients0);
const FloatV C3 = V4GetW(cosCoefficients0);
const FloatV C4 = V4GetX(cosCoefficients1);
const FloatV C5 = V4GetY(cosCoefficients1);
const FloatV C6 = V4GetZ(cosCoefficients1);
const FloatV C7 = V4GetW(cosCoefficients1);
const FloatV C8 = V4GetX(cosCoefficients2);
const FloatV C9 = V4GetY(cosCoefficients2);
const FloatV C10 = V4GetZ(cosCoefficients2);
const FloatV C11 = V4GetW(cosCoefficients2);
Result = V3MulAdd(C1, V2, V4One());
Result = V3MulAdd(C2, V4, Result);
Result = V3MulAdd(C3, V6, Result);
Result = V3MulAdd(C4, V8, Result);
Result = V3MulAdd(C5, V10, Result);
Result = V3MulAdd(C6, V12, Result);
Result = V3MulAdd(C7, V14, Result);
Result = V3MulAdd(C8, V16, Result);
Result = V3MulAdd(C9, V18, Result);
Result = V3MulAdd(C10, V20, Result);
Result = V3MulAdd(C11, V22, Result);
return Result;
}
NV_FORCE_INLINE Vec3V V3PermYZZ(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
return _mm_shuffle_ps(a,a,_MM_SHUFFLE(3,2,2,1));
}
NV_FORCE_INLINE Vec3V V3PermXYX(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
return _mm_shuffle_ps(a,a,_MM_SHUFFLE(3,0,1,0));
}
NV_FORCE_INLINE Vec3V V3PermYZX(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
return _mm_shuffle_ps(a,a,_MM_SHUFFLE(3,0,2,1));
}
NV_FORCE_INLINE Vec3V V3PermZXY(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3,1,0,2));
}
NV_FORCE_INLINE Vec3V V3PermZZY(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3,1,2,2));
}
NV_FORCE_INLINE Vec3V V3PermYXX(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3,0,0,1));
}
NV_FORCE_INLINE Vec3V V3Perm_Zero_1Z_0Y(const Vec3V v0, const Vec3V v1)
{
VECMATHAOS_ASSERT(isValidVec3V(v0));
VECMATHAOS_ASSERT(isValidVec3V(v1));
return _mm_shuffle_ps(v1, v0, _MM_SHUFFLE(3,1,2,3));
}
NV_FORCE_INLINE Vec3V V3Perm_0Z_Zero_1X(const Vec3V v0, const Vec3V v1)
{
VECMATHAOS_ASSERT(isValidVec3V(v0));
VECMATHAOS_ASSERT(isValidVec3V(v1));
return _mm_shuffle_ps(v0, v1, _MM_SHUFFLE(3,0,3,2));
}
NV_FORCE_INLINE Vec3V V3Perm_1Y_0X_Zero(const Vec3V v0, const Vec3V v1)
{
VECMATHAOS_ASSERT(isValidVec3V(v0));
VECMATHAOS_ASSERT(isValidVec3V(v1));
//There must be a better way to do this.
Vec3V v2=V3Zero();
FloatV y1=V3GetY(v1);
FloatV x0=V3GetX(v0);
v2=V3SetX(v2,y1);
return V3SetY(v2,x0);
}
NV_FORCE_INLINE FloatV V3SumElems(const Vec3V a)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
__m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0)); //z,y,x,w
__m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1,1,1,1)); //y,x,w,z
__m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2)); //x,w,z,y
return _mm_add_ps(_mm_add_ps(shuf1, shuf2), shuf3);
}
NV_FORCE_INLINE uint32_t V3OutOfBounds(const Vec3V a, const Vec3V min, const Vec3V max)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(min));
VECMATHAOS_ASSERT(isValidVec3V(max));
const BoolV ffff = BFFFF();
const BoolV c = BOr(V3IsGrtr(a, max), V3IsGrtr(min, a));
return uint32_t(!BAllEq(c, ffff));
}
NV_FORCE_INLINE uint32_t V3InBounds(const Vec3V a, const Vec3V min, const Vec3V max)
{
VECMATHAOS_ASSERT(isValidVec3V(a));
VECMATHAOS_ASSERT(isValidVec3V(min));
VECMATHAOS_ASSERT(isValidVec3V(max));
const BoolV tttt = BTTTT();
const BoolV c = BAnd(V3IsGrtrOrEq(a, min), V3IsGrtrOrEq(max, a));
return BAllEq(c, tttt);
}
NV_FORCE_INLINE uint32_t V3OutOfBounds(const Vec3V a, const Vec3V bounds)
{
return V3OutOfBounds(a, V3Neg(bounds), bounds);
}
NV_FORCE_INLINE uint32_t V3InBounds(const Vec3V a, const Vec3V bounds)
{
return V3InBounds(a, V3Neg(bounds), bounds);
}
//////////////////////////////////
//VEC4V
//////////////////////////////////
NV_FORCE_INLINE Vec4V V4Splat(const FloatV f)
{
VECMATHAOS_ASSERT(isValidFloatV(f));
//return _mm_shuffle_ps(f, f, _MM_SHUFFLE(0,0,0,0));
return f;
}
NV_FORCE_INLINE Vec4V V4Merge(const FloatV* const floatVArray)
{
VECMATHAOS_ASSERT(isValidFloatV(floatVArray[0]));
VECMATHAOS_ASSERT(isValidFloatV(floatVArray[1]));
VECMATHAOS_ASSERT(isValidFloatV(floatVArray[2]));
VECMATHAOS_ASSERT(isValidFloatV(floatVArray[3]));
__m128 xw = _mm_move_ss(floatVArray[1], floatVArray[0]); //y, y, y, x
__m128 yz = _mm_move_ss(floatVArray[2], floatVArray[3]); //z, z, z, w
return (_mm_shuffle_ps(xw,yz,_MM_SHUFFLE(0,2,1,0)));
}
NV_FORCE_INLINE Vec4V V4Merge(const FloatVArg x, const FloatVArg y, const FloatVArg z, const FloatVArg w)
{
VECMATHAOS_ASSERT(isValidFloatV(x));
VECMATHAOS_ASSERT(isValidFloatV(y));
VECMATHAOS_ASSERT(isValidFloatV(z));
VECMATHAOS_ASSERT(isValidFloatV(w));
__m128 xw = _mm_move_ss(y, x); //y, y, y, x
__m128 yz = _mm_move_ss(z, w); //z, z, z, w
return (_mm_shuffle_ps(xw,yz,_MM_SHUFFLE(0,2,1,0)));
}
NV_FORCE_INLINE Vec4V V4MergeW(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w)
{
const Vec4V xz = _mm_unpackhi_ps(x, z);
const Vec4V yw = _mm_unpackhi_ps(y, w);
return _mm_unpackhi_ps(xz, yw);
}
NV_FORCE_INLINE Vec4V V4MergeZ(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w)
{
const Vec4V xz = _mm_unpackhi_ps(x, z);
const Vec4V yw = _mm_unpackhi_ps(y, w);
return _mm_unpacklo_ps(xz, yw);
}
NV_FORCE_INLINE Vec4V V4MergeY(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w)
{
const Vec4V xz = _mm_unpacklo_ps(x, z);
const Vec4V yw = _mm_unpacklo_ps(y, w);
return _mm_unpackhi_ps(xz, yw);
}
NV_FORCE_INLINE Vec4V V4MergeX(const Vec4VArg x, const Vec4VArg y, const Vec4VArg z, const Vec4VArg w)
{
const Vec4V xz = _mm_unpacklo_ps(x, z);
const Vec4V yw = _mm_unpacklo_ps(y, w);
return _mm_unpacklo_ps(xz, yw);
}
NV_FORCE_INLINE Vec4V V4UnpackXY(const Vec4VArg a, const Vec4VArg b)
{
return _mm_unpacklo_ps(a, b);
}
NV_FORCE_INLINE Vec4V V4UnpackZW(const Vec4VArg a, const Vec4VArg b)
{
return _mm_unpackhi_ps(a, b);
}
NV_FORCE_INLINE Vec4V V4Perm_YXWZ(const Vec4V a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,3,0,1));
}
NV_FORCE_INLINE Vec4V V4Perm_XZXZ(const Vec4V a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,0,2,0));
}
NV_FORCE_INLINE Vec4V V4Perm_YWYW(const Vec4V a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3,1,3,1));
}
template<uint8_t x, uint8_t y, uint8_t z, uint8_t w> NV_FORCE_INLINE Vec4V V4Perm(const Vec4V a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(w, z, y, x));
}
NV_FORCE_INLINE Vec4V V4UnitW()
{
const NV_ALIGN(16, float w[4])={0.0f,0.0f,0.0f,1.0f};
const __m128 w128=_mm_load_ps(w);
return w128;
}
NV_FORCE_INLINE Vec4V V4UnitX()
{
const NV_ALIGN(16, float x[4])={1.0f,0.0f,0.0f,0.0f};
const __m128 x128=_mm_load_ps(x);
return x128;
}
NV_FORCE_INLINE Vec4V V4UnitY()
{
const NV_ALIGN(16, float y[4])={0.0f,1.0f,0.0f,0.0f};
const __m128 y128=_mm_load_ps(y);
return y128;
}
NV_FORCE_INLINE Vec4V V4UnitZ()
{
const NV_ALIGN(16, float z[4])={0.0f,0.0f,1.0f,0.0f};
const __m128 z128=_mm_load_ps(z);
return z128;
}
NV_FORCE_INLINE FloatV V4GetW(const Vec4V f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(3,3,3,3));
}
NV_FORCE_INLINE FloatV V4GetX(const Vec4V f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(0,0,0,0));
}
NV_FORCE_INLINE FloatV V4GetY(const Vec4V f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(1,1,1,1));
}
NV_FORCE_INLINE FloatV V4GetZ(const Vec4V f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(2,2,2,2));
}
NV_FORCE_INLINE Vec4V V4SetW(const Vec4V v, const FloatV f)
{
VECMATHAOS_ASSERT(isValidFloatV(f));
return V4Sel(BTTTF(),v,f);
}
NV_FORCE_INLINE Vec4V V4ClearW(const Vec4V v)
{
return _mm_and_ps(v, (VecI32V&)internalWindowsSimd::gMaskXYZ);
}
NV_FORCE_INLINE Vec4V V4SetX(const Vec4V v, const FloatV f)
{
VECMATHAOS_ASSERT(isValidFloatV(f));
return V4Sel(BFTTT(),v,f);
}
NV_FORCE_INLINE Vec4V V4SetY(const Vec4V v, const FloatV f)
{
VECMATHAOS_ASSERT(isValidFloatV(f));
return V4Sel(BTFTT(),v,f);
}
NV_FORCE_INLINE Vec4V V4SetZ(const Vec4V v, const FloatV f)
{
VECMATHAOS_ASSERT(isValidVec3V(v));
VECMATHAOS_ASSERT(isValidFloatV(f));
return V4Sel(BTTFT(),v,f);
}
NV_FORCE_INLINE Vec4V V4Zero()
{
return _mm_setzero_ps();
}
NV_FORCE_INLINE Vec4V V4One()
{
return V4Load(1.0f);
}
NV_FORCE_INLINE Vec4V V4Eps()
{
return V4Load(NV_EPS_REAL);
}
NV_FORCE_INLINE Vec4V V4Neg(const Vec4V f)
{
return _mm_sub_ps( _mm_setzero_ps(), f);
}
NV_FORCE_INLINE Vec4V V4Add(const Vec4V a, const Vec4V b)
{
return _mm_add_ps(a,b);
}
NV_FORCE_INLINE Vec4V V4Sub(const Vec4V a, const Vec4V b)
{
return _mm_sub_ps(a,b);
}
NV_FORCE_INLINE Vec4V V4Scale(const Vec4V a, const FloatV b)
{
return _mm_mul_ps(a,b);
}
NV_FORCE_INLINE Vec4V V4Mul(const Vec4V a, const Vec4V b)
{
return _mm_mul_ps(a,b);
}
NV_FORCE_INLINE Vec4V V4ScaleInv(const Vec4V a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_div_ps(a,b);
}
NV_FORCE_INLINE Vec4V V4Div(const Vec4V a, const Vec4V b)
{
return _mm_div_ps(a,b);
}
NV_FORCE_INLINE Vec4V V4ScaleInvFast(const Vec4V a, const FloatV b)
{
VECMATHAOS_ASSERT(isValidFloatV(b));
return _mm_mul_ps(a,_mm_rcp_ps(b));
}
NV_FORCE_INLINE Vec4V V4DivFast(const Vec4V a, const Vec4V b)
{
return _mm_mul_ps(a,_mm_rcp_ps(b));
}
NV_FORCE_INLINE Vec4V V4Recip(const Vec4V a)
{
return _mm_div_ps(V4One(),a);
}
NV_FORCE_INLINE Vec4V V4RecipFast(const Vec4V a)
{
return _mm_rcp_ps(a);
}
NV_FORCE_INLINE Vec4V V4Rsqrt(const Vec4V a)
{
return _mm_div_ps(V4One(),_mm_sqrt_ps(a));
}
NV_FORCE_INLINE Vec4V V4RsqrtFast(const Vec4V a)
{
return _mm_rsqrt_ps(a);
}
NV_FORCE_INLINE Vec4V V4Sqrt(const Vec4V a)
{
return _mm_sqrt_ps(a);
}
NV_FORCE_INLINE Vec4V V4ScaleAdd(const Vec4V a, const FloatV b, const Vec4V c)
{
VECMATHAOS_ASSERT(isValidFloatV(b));
return V4Add(V4Scale(a,b),c);
}
NV_FORCE_INLINE Vec4V V4NegScaleSub(const Vec4V a, const FloatV b, const Vec4V c)
{
VECMATHAOS_ASSERT(isValidFloatV(b));
return V4Sub(c,V4Scale(a,b));
}
NV_FORCE_INLINE Vec4V V4MulAdd(const Vec4V a, const Vec4V b, const Vec4V c)
{
return V4Add(V4Mul(a,b),c);
}
NV_FORCE_INLINE Vec4V V4NegMulSub(const Vec4V a, const Vec4V b, const Vec4V c)
{
return V4Sub(c,V4Mul(a,b));
}
NV_FORCE_INLINE Vec4V V4Abs(const Vec4V a)
{
return V4Max(a,V4Neg(a));
}
NV_FORCE_INLINE FloatV V4SumElements(const Vec4V a)
{
const Vec4V xy = V4UnpackXY(a, a); //x,x,y,y
const Vec4V zw = V4UnpackZW(a, a); //z,z,w,w
const Vec4V xz_yw = V4Add(xy, zw); //x+z,x+z,y+w,y+w
const FloatV xz = V4GetX(xz_yw); //x+z
const FloatV yw = V4GetZ(xz_yw); //y+w
return FAdd(xz, yw); //sum
}
NV_FORCE_INLINE FloatV V4Dot(const Vec4V a, const Vec4V b)
{
__m128 dot1 = _mm_mul_ps(a, b); //x,y,z,w
__m128 shuf1 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(2,1,0,3)); //w,x,y,z
__m128 shuf2 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(1,0,3,2)); //z,w,x,y
__m128 shuf3 = _mm_shuffle_ps(dot1, dot1, _MM_SHUFFLE(0,3,2,1)); //y,z,w,x
return _mm_add_ps(_mm_add_ps(shuf2, shuf3), _mm_add_ps(dot1,shuf1));
}
NV_FORCE_INLINE FloatV V4Length(const Vec4V a)
{
return _mm_sqrt_ps(V4Dot(a,a));
}
NV_FORCE_INLINE FloatV V4LengthSq(const Vec4V a)
{
return V4Dot(a,a);
}
NV_FORCE_INLINE Vec4V V4Normalize(const Vec4V a)
{
VECMATHAOS_ASSERT(!FAllEq(V4Dot(a,a), FZero()));
return V4ScaleInv(a,_mm_sqrt_ps(V4Dot(a,a)));
}
NV_FORCE_INLINE Vec4V V4NormalizeFast(const Vec4V a)
{
return V4ScaleInvFast(a,_mm_sqrt_ps(V4Dot(a,a)));
}
NV_FORCE_INLINE Vec4V V4NormalizeSafe(const Vec4V a)
{
const __m128 zero=FZero();
const __m128 eps=V3Eps();
const __m128 length=V4Length(a);
const __m128 isGreaterThanZero=V4IsGrtr(length,eps);
return V4Sel(isGreaterThanZero,V4ScaleInv(a,length),zero);
}
NV_FORCE_INLINE Vec4V V4Sel(const BoolV c, const Vec4V a, const Vec4V b)
{
return _mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a));
}
NV_FORCE_INLINE BoolV V4IsGrtr(const Vec4V a, const Vec4V b)
{
return _mm_cmpgt_ps(a,b);
}
NV_FORCE_INLINE BoolV V4IsGrtrOrEq(const Vec4V a, const Vec4V b)
{
return _mm_cmpge_ps(a,b);
}
NV_FORCE_INLINE BoolV V4IsEq(const Vec4V a, const Vec4V b)
{
return _mm_cmpeq_ps(a,b);
}
NV_FORCE_INLINE BoolV V4IsEqU32(const VecU32V a, const VecU32V b)
{
return internalWindowsSimd::m128_I2F(_mm_cmpeq_epi32(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b)));
}
NV_FORCE_INLINE Vec4V V4Max(const Vec4V a, const Vec4V b)
{
return _mm_max_ps(a, b);
}
NV_FORCE_INLINE Vec4V V4Min(const Vec4V a, const Vec4V b)
{
return _mm_min_ps(a, b);
}
//Extract the maximum value from a
NV_FORCE_INLINE FloatV V4ExtractMax(const Vec4V a)
{
__m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,1,0,3));
__m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1,0,3,2));
__m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0,3,2,1));
return _mm_max_ps(_mm_max_ps(a, shuf1), _mm_max_ps(shuf2, shuf3));
}
//Extract the maximum value from a
NV_FORCE_INLINE FloatV V4ExtractMin(const Vec4V a)
{
__m128 shuf1 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,1,0,3));
__m128 shuf2 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1,0,3,2));
__m128 shuf3 = _mm_shuffle_ps(a, a, _MM_SHUFFLE(0,3,2,1));
return _mm_min_ps(_mm_min_ps(a, shuf1), _mm_min_ps(shuf2, shuf3));
}
NV_FORCE_INLINE Vec4V V4Clamp(const Vec4V a, const Vec4V minV, const Vec4V maxV)
{
return V4Max(V4Min(a,maxV),minV);
}
NV_FORCE_INLINE uint32_t V4AllGrtr(const Vec4V a, const Vec4V b)
{
return internalWindowsSimd::BAllTrue4_R(V4IsGrtr(a, b));
}
NV_FORCE_INLINE uint32_t V4AllGrtrOrEq(const Vec4V a, const Vec4V b)
{
return internalWindowsSimd::BAllTrue4_R(V4IsGrtrOrEq(a, b));
}
NV_FORCE_INLINE uint32_t V4AllEq(const Vec4V a, const Vec4V b)
{
return internalWindowsSimd::BAllTrue4_R(V4IsEq(a, b));
}
NV_FORCE_INLINE Vec4V V4Round(const Vec4V a)
{
//return _mm_round_ps(a, 0x0);
const Vec4V half = V4Load(0.5f);
const __m128 signBit = _mm_cvtepi32_ps(_mm_srli_epi32(_mm_cvtps_epi32(a), 31));
const Vec4V aRound = V4Sub(V4Add(a, half), signBit);
__m128i tmp = _mm_cvttps_epi32(aRound);
return _mm_cvtepi32_ps(tmp);
}
NV_FORCE_INLINE Vec4V V4Sin(const Vec4V a)
{
//Vec4V V1, V2, V3, V5, V7, V9, V11, V13, V15, V17, V19, V21, V23;
//Vec4V S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11;
Vec4V Result;
const Vec4V recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f);
const Vec4V twoPi = V4LoadA(g_NVTwoPi.f);
const Vec4V tmp = V4Mul(a, recipTwoPi);
const Vec4V b = V4Round(tmp);
const Vec4V V1 = V4NegMulSub(twoPi, b, a);
// sin(V) ~= V - V^3 / 3! + V^5 / 5! - V^7 / 7! + V^9 / 9! - V^11 / 11! + V^13 / 13! -
// V^15 / 15! + V^17 / 17! - V^19 / 19! + V^21 / 21! - V^23 / 23! (for -PI <= V < PI)
const Vec4V V2 = V4Mul(V1, V1);
const Vec4V V3 = V4Mul(V2, V1);
const Vec4V V5 = V4Mul(V3, V2);
const Vec4V V7 = V4Mul(V5, V2);
const Vec4V V9 = V4Mul(V7, V2);
const Vec4V V11 = V4Mul(V9, V2);
const Vec4V V13 = V4Mul(V11, V2);
const Vec4V V15 = V4Mul(V13, V2);
const Vec4V V17 = V4Mul(V15, V2);
const Vec4V V19 = V4Mul(V17, V2);
const Vec4V V21 = V4Mul(V19, V2);
const Vec4V V23 = V4Mul(V21, V2);
const Vec4V sinCoefficients0 = V4LoadA(g_NVSinCoefficients0.f);
const Vec4V sinCoefficients1 = V4LoadA(g_NVSinCoefficients1.f);
const Vec4V sinCoefficients2 = V4LoadA(g_NVSinCoefficients2.f);
const FloatV S1 = V4GetY(sinCoefficients0);
const FloatV S2 = V4GetZ(sinCoefficients0);
const FloatV S3 = V4GetW(sinCoefficients0);
const FloatV S4 = V4GetX(sinCoefficients1);
const FloatV S5 = V4GetY(sinCoefficients1);
const FloatV S6 = V4GetZ(sinCoefficients1);
const FloatV S7 = V4GetW(sinCoefficients1);
const FloatV S8 = V4GetX(sinCoefficients2);
const FloatV S9 = V4GetY(sinCoefficients2);
const FloatV S10 = V4GetZ(sinCoefficients2);
const FloatV S11 = V4GetW(sinCoefficients2);
Result = V4MulAdd(S1, V3, V1);
Result = V4MulAdd(S2, V5, Result);
Result = V4MulAdd(S3, V7, Result);
Result = V4MulAdd(S4, V9, Result);
Result = V4MulAdd(S5, V11, Result);
Result = V4MulAdd(S6, V13, Result);
Result = V4MulAdd(S7, V15, Result);
Result = V4MulAdd(S8, V17, Result);
Result = V4MulAdd(S9, V19, Result);
Result = V4MulAdd(S10, V21, Result);
Result = V4MulAdd(S11, V23, Result);
return Result;
}
NV_FORCE_INLINE Vec4V V4Cos(const Vec4V a)
{
//XMVECTOR V1, V2, V4, V6, V8, V10, V12, V14, V16, V18, V20, V22;
//XMVECTOR C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11;
Vec4V Result;
const Vec4V recipTwoPi = V4LoadA(g_NVReciprocalTwoPi.f);
const FloatV twoPi = V4LoadA(g_NVTwoPi.f);
const Vec4V tmp = V4Mul(a, recipTwoPi);
const Vec4V b = V4Round(tmp);
const Vec4V V1 = V4NegMulSub(twoPi, b, a);
// cos(V) ~= 1 - V^2 / 2! + V^4 / 4! - V^6 / 6! + V^8 / 8! - V^10 / 10! + V^12 / 12! -
// V^14 / 14! + V^16 / 16! - V^18 / 18! + V^20 / 20! - V^22 / 22! (for -PI <= V < PI)
const Vec4V V2 = V4Mul(V1, V1);
const Vec4V V4 = V4Mul(V2, V2);
const Vec4V V6 = V4Mul(V4, V2);
const Vec4V V8 = V4Mul(V4, V4);
const Vec4V V10 = V4Mul(V6, V4);
const Vec4V V12 = V4Mul(V6, V6);
const Vec4V V14 = V4Mul(V8, V6);
const Vec4V V16 = V4Mul(V8, V8);
const Vec4V V18 = V4Mul(V10, V8);
const Vec4V V20 = V4Mul(V10, V10);
const Vec4V V22 = V4Mul(V12, V10);
const Vec4V cosCoefficients0 = V4LoadA(g_NVCosCoefficients0.f);
const Vec4V cosCoefficients1 = V4LoadA(g_NVCosCoefficients1.f);
const Vec4V cosCoefficients2 = V4LoadA(g_NVCosCoefficients2.f);
const FloatV C1 = V4GetY(cosCoefficients0);
const FloatV C2 = V4GetZ(cosCoefficients0);
const FloatV C3 = V4GetW(cosCoefficients0);
const FloatV C4 = V4GetX(cosCoefficients1);
const FloatV C5 = V4GetY(cosCoefficients1);
const FloatV C6 = V4GetZ(cosCoefficients1);
const FloatV C7 = V4GetW(cosCoefficients1);
const FloatV C8 = V4GetX(cosCoefficients2);
const FloatV C9 = V4GetY(cosCoefficients2);
const FloatV C10 = V4GetZ(cosCoefficients2);
const FloatV C11 = V4GetW(cosCoefficients2);
Result = V4MulAdd(C1, V2, V4One());
Result = V4MulAdd(C2, V4, Result);
Result = V4MulAdd(C3, V6, Result);
Result = V4MulAdd(C4, V8, Result);
Result = V4MulAdd(C5, V10, Result);
Result = V4MulAdd(C6, V12, Result);
Result = V4MulAdd(C7, V14, Result);
Result = V4MulAdd(C8, V16, Result);
Result = V4MulAdd(C9, V18, Result);
Result = V4MulAdd(C10, V20, Result);
Result = V4MulAdd(C11, V22, Result);
return Result;
}
NV_FORCE_INLINE void V4Transpose(Vec4V& col0, Vec4V& col1, Vec4V& col2, Vec4V& col3)
{
Vec4V tmp0 = _mm_unpacklo_ps(col0, col1);
Vec4V tmp2 = _mm_unpacklo_ps(col2, col3);
Vec4V tmp1 = _mm_unpackhi_ps(col0, col1);
Vec4V tmp3 = _mm_unpackhi_ps(col2, col3);
col0 = _mm_movelh_ps(tmp0, tmp2);
col1 = _mm_movehl_ps(tmp2, tmp0);
col2 = _mm_movelh_ps(tmp1, tmp3);
col3 = _mm_movehl_ps(tmp3, tmp1);
}
//////////////////////////////////
//BoolV
//////////////////////////////////
NV_FORCE_INLINE BoolV BFFFF()
{
return _mm_setzero_ps();
}
NV_FORCE_INLINE BoolV BFFFT()
{
/*const NV_ALIGN(16, uint32_t f[4])={0,0,0,0xFFFFFFFF};
const __m128 ffft=_mm_load_ps((float*)&f);
return ffft;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(-1, 0, 0, 0));
}
NV_FORCE_INLINE BoolV BFFTF()
{
/*const NV_ALIGN(16, uint32_t f[4])={0,0,0xFFFFFFFF,0};
const __m128 fftf=_mm_load_ps((float*)&f);
return fftf;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, -1, 0, 0));
}
NV_FORCE_INLINE BoolV BFFTT()
{
/*const NV_ALIGN(16, uint32_t f[4])={0,0,0xFFFFFFFF,0xFFFFFFFF};
const __m128 fftt=_mm_load_ps((float*)&f);
return fftt;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(-1, -1, 0, 0));
}
NV_FORCE_INLINE BoolV BFTFF()
{
/*const NV_ALIGN(16, uint32_t f[4])={0,0xFFFFFFFF,0,0};
const __m128 ftff=_mm_load_ps((float*)&f);
return ftff;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, 0, -1, 0));
}
NV_FORCE_INLINE BoolV BFTFT()
{
/*const NV_ALIGN(16, uint32_t f[4])={0,0xFFFFFFFF,0,0xFFFFFFFF};
const __m128 ftft=_mm_load_ps((float*)&f);
return ftft;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(-1, 0, -1, 0));
}
NV_FORCE_INLINE BoolV BFTTF()
{
/*const NV_ALIGN(16, uint32_t f[4])={0,0xFFFFFFFF,0xFFFFFFFF,0};
const __m128 fttf=_mm_load_ps((float*)&f);
return fttf;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, -1, -1, 0));
}
NV_FORCE_INLINE BoolV BFTTT()
{
/*const NV_ALIGN(16, uint32_t f[4])={0,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF};
const __m128 fttt=_mm_load_ps((float*)&f);
return fttt;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(-1, -1, -1, 0));
}
NV_FORCE_INLINE BoolV BTFFF()
{
//const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0,0,0};
//const __m128 tfff=_mm_load_ps((float*)&f);
//return tfff;
return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, 0, 0, -1));
}
NV_FORCE_INLINE BoolV BTFFT()
{
/*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0,0,0xFFFFFFFF};
const __m128 tfft=_mm_load_ps((float*)&f);
return tfft;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(-1, 0, 0, -1));
}
NV_FORCE_INLINE BoolV BTFTF()
{
/*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0,0xFFFFFFFF,0};
const __m128 tftf=_mm_load_ps((float*)&f);
return tftf;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, -1, 0, -1));
}
NV_FORCE_INLINE BoolV BTFTT()
{
/*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0,0xFFFFFFFF,0xFFFFFFFF};
const __m128 tftt=_mm_load_ps((float*)&f);
return tftt;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(-1, -1, 0, -1));
}
NV_FORCE_INLINE BoolV BTTFF()
{
/*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0xFFFFFFFF,0,0};
const __m128 ttff=_mm_load_ps((float*)&f);
return ttff;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, 0, -1, -1));
}
NV_FORCE_INLINE BoolV BTTFT()
{
/*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0xFFFFFFFF,0,0xFFFFFFFF};
const __m128 ttft=_mm_load_ps((float*)&f);
return ttft;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(-1, 0, -1, -1));
}
NV_FORCE_INLINE BoolV BTTTF()
{
/*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0};
const __m128 tttf=_mm_load_ps((float*)&f);
return tttf;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, -1, -1, -1));
}
NV_FORCE_INLINE BoolV BTTTT()
{
/*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF};
const __m128 tttt=_mm_load_ps((float*)&f);
return tttt;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(-1, -1, -1, -1));
}
NV_FORCE_INLINE BoolV BXMask()
{
/*const NV_ALIGN(16, uint32_t f[4])={0xFFFFFFFF,0,0,0};
const __m128 tfff=_mm_load_ps((float*)&f);
return tfff;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, 0, 0, -1));
}
NV_FORCE_INLINE BoolV BYMask()
{
/*const NV_ALIGN(16, uint32_t f[4])={0,0xFFFFFFFF,0,0};
const __m128 ftff=_mm_load_ps((float*)&f);
return ftff;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, 0, -1, 0));
}
NV_FORCE_INLINE BoolV BZMask()
{
/*const NV_ALIGN(16, uint32_t f[4])={0,0,0xFFFFFFFF,0};
const __m128 fftf=_mm_load_ps((float*)&f);
return fftf;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(0, -1, 0, 0));
}
NV_FORCE_INLINE BoolV BWMask()
{
/*const NV_ALIGN(16, uint32_t f[4])={0,0,0,0xFFFFFFFF};
const __m128 ffft=_mm_load_ps((float*)&f);
return ffft;*/
return internalWindowsSimd::m128_I2F(_mm_set_epi32(-1, 0, 0, 0));
}
NV_FORCE_INLINE BoolV BGetX(const BoolV f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(0,0,0,0));
}
NV_FORCE_INLINE BoolV BGetY(const BoolV f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(1,1,1,1));
}
NV_FORCE_INLINE BoolV BGetZ(const BoolV f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(2,2,2,2));
}
NV_FORCE_INLINE BoolV BGetW(const BoolV f)
{
return _mm_shuffle_ps(f, f, _MM_SHUFFLE(3,3,3,3));
}
NV_FORCE_INLINE BoolV BSetX(const BoolV v, const BoolV f)
{
return V4Sel(BFTTT(),v,f);
}
NV_FORCE_INLINE BoolV BSetY(const BoolV v, const BoolV f)
{
return V4Sel(BTFTT(),v,f);
}
NV_FORCE_INLINE BoolV BSetZ(const BoolV v, const BoolV f)
{
return V4Sel(BTTFT(),v,f);
}
NV_FORCE_INLINE BoolV BSetW(const BoolV v, const BoolV f)
{
return V4Sel(BTTTF(),v,f);
}
template<int index> BoolV BSplatElement(BoolV a)
{
return internalWindowsSimd::m128_I2F(_mm_shuffle_epi32(internalWindowsSimd::m128_F2I(a), _MM_SHUFFLE(index, index, index, index)));
}
NV_FORCE_INLINE BoolV BAnd(const BoolV a, const BoolV b)
{
return (_mm_and_ps(a,b));
}
NV_FORCE_INLINE BoolV BNot(const BoolV a)
{
const BoolV bAllTrue(BTTTT());
return _mm_xor_ps(a, bAllTrue);
}
NV_FORCE_INLINE BoolV BAndNot(const BoolV a, const BoolV b)
{
return (_mm_andnot_ps(b, a));
}
NV_FORCE_INLINE BoolV BOr(const BoolV a, const BoolV b)
{
return (_mm_or_ps(a,b));
}
NV_FORCE_INLINE BoolV BAllTrue4(const BoolV a)
{
const BoolV bTmp = _mm_and_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,1,0,1)), _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,3,2,3)));
return _mm_and_ps(_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(0,0,0,0)), _mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(1,1,1,1)));
}
NV_FORCE_INLINE BoolV BAnyTrue4(const BoolV a)
{
const BoolV bTmp = _mm_or_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,1,0,1)), _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,3,2,3)));
return _mm_or_ps(_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(0,0,0,0)), _mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(1,1,1,1)));
}
NV_FORCE_INLINE BoolV BAllTrue3(const BoolV a)
{
const BoolV bTmp = _mm_and_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,1,0,1)), _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2)));
return _mm_and_ps(_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(0,0,0,0)), _mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(1,1,1,1)));
}
NV_FORCE_INLINE BoolV BAnyTrue3(const BoolV a)
{
const BoolV bTmp = _mm_or_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(0,1,0,1)), _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2)));
return _mm_or_ps(_mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(0,0,0,0)), _mm_shuffle_ps(bTmp, bTmp, _MM_SHUFFLE(1,1,1,1)));
}
NV_FORCE_INLINE uint32_t BAllEq(const BoolV a, const BoolV b)
{
const BoolV bTest = internalWindowsSimd::m128_I2F(_mm_cmpeq_epi32(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b)));
return internalWindowsSimd::BAllTrue4_R(bTest);
}
NV_FORCE_INLINE uint32_t BAllEqTTTT(const BoolV a)
{
return uint32_t(_mm_movemask_ps(a)==15);
}
NV_FORCE_INLINE uint32_t BAllEqFFFF(const BoolV a)
{
return uint32_t(_mm_movemask_ps(a)==0);
}
NV_FORCE_INLINE uint32_t BGetBitMask(const BoolV a)
{
return uint32_t(_mm_movemask_ps(a));
}
//////////////////////////////////
//MAT33V
//////////////////////////////////
NV_FORCE_INLINE Vec3V M33MulV3(const Mat33V& a, const Vec3V b)
{
const FloatV x=V3GetX(b);
const FloatV y=V3GetY(b);
const FloatV z=V3GetZ(b);
const Vec3V v0=V3Scale(a.col0,x);
const Vec3V v1=V3Scale(a.col1,y);
const Vec3V v2=V3Scale(a.col2,z);
const Vec3V v0PlusV1=V3Add(v0,v1);
return V3Add(v0PlusV1,v2);
}
NV_FORCE_INLINE Vec3V M33TrnspsMulV3(const Mat33V& a, const Vec3V b)
{
const FloatV x=V3Dot(a.col0,b);
const FloatV y=V3Dot(a.col1,b);
const FloatV z=V3Dot(a.col2,b);
return V3Merge(x,y,z);
}
NV_FORCE_INLINE Vec3V M33MulV3AddV3(const Mat33V& A, const Vec3V b, const Vec3V c)
{
const FloatV x=V3GetX(b);
const FloatV y=V3GetY(b);
const FloatV z=V3GetZ(b);
Vec3V result = V3MulAdd(A.col0, x, c);
result = V3MulAdd(A.col1, y, result);
return V3MulAdd(A.col2, z, result);
}
NV_FORCE_INLINE Mat33V M33MulM33(const Mat33V& a, const Mat33V& b)
{
return Mat33V(M33MulV3(a,b.col0),M33MulV3(a,b.col1),M33MulV3(a,b.col2));
}
NV_FORCE_INLINE Mat33V M33Add(const Mat33V& a, const Mat33V& b)
{
return Mat33V(V3Add(a.col0,b.col0),V3Add(a.col1,b.col1),V3Add(a.col2,b.col2));
}
NV_FORCE_INLINE Mat33V M33Scale(const Mat33V& a, const FloatV& b)
{
return Mat33V(V3Scale(a.col0,b),V3Scale(a.col1,b),V3Scale(a.col2,b));
}
NV_FORCE_INLINE Mat33V M33Sub(const Mat33V& a, const Mat33V& b)
{
return Mat33V(V3Sub(a.col0,b.col0),V3Sub(a.col1,b.col1),V3Sub(a.col2,b.col2));
}
NV_FORCE_INLINE Mat33V M33Neg(const Mat33V& a)
{
return Mat33V(V3Neg(a.col0),V3Neg(a.col1),V3Neg(a.col2));
}
NV_FORCE_INLINE Mat33V M33Abs(const Mat33V& a)
{
return Mat33V(V3Abs(a.col0),V3Abs(a.col1),V3Abs(a.col2));
}
NV_FORCE_INLINE Mat33V M33Inverse(const Mat33V& a)
{
const BoolV tfft=BTFFT();
const BoolV tttf=BTTTF();
const FloatV zero=V3Zero();
const Vec3V cross01 = V3Cross(a.col0,a.col1);
const Vec3V cross12 = V3Cross(a.col1,a.col2);
const Vec3V cross20 = V3Cross(a.col2,a.col0);
const FloatV dot = V3Dot(cross01,a.col2);
const FloatV invDet = _mm_rcp_ps(dot);
const Vec3V mergeh = _mm_unpacklo_ps(cross12,cross01);
const Vec3V mergel = _mm_unpackhi_ps(cross12,cross01);
Vec3V colInv0 = _mm_unpacklo_ps(mergeh,cross20);
colInv0 = _mm_or_ps(_mm_andnot_ps(tttf, zero), _mm_and_ps(tttf, colInv0));
const Vec3V zppd=_mm_shuffle_ps(mergeh,cross20,_MM_SHUFFLE(3,0,0,2));
const Vec3V pbwp=_mm_shuffle_ps(cross20,mergeh,_MM_SHUFFLE(3,3,1,0));
const Vec3V colInv1=_mm_or_ps(_mm_andnot_ps(BTFFT(), pbwp), _mm_and_ps(BTFFT(), zppd));
const Vec3V xppd=_mm_shuffle_ps(mergel,cross20,_MM_SHUFFLE(3,0,0,0));
const Vec3V pcyp=_mm_shuffle_ps(cross20,mergel,_MM_SHUFFLE(3,1,2,0));
const Vec3V colInv2=_mm_or_ps(_mm_andnot_ps(tfft, pcyp), _mm_and_ps(tfft, xppd));
return Mat33V
(
_mm_mul_ps(colInv0,invDet),
_mm_mul_ps(colInv1,invDet),
_mm_mul_ps(colInv2,invDet)
);
}
NV_FORCE_INLINE Mat33V M33Trnsps(const Mat33V& a)
{
return Mat33V
(
V3Merge(V3GetX(a.col0),V3GetX(a.col1),V3GetX(a.col2)),
V3Merge(V3GetY(a.col0),V3GetY(a.col1),V3GetY(a.col2)),
V3Merge(V3GetZ(a.col0),V3GetZ(a.col1),V3GetZ(a.col2))
);
}
NV_FORCE_INLINE Mat33V M33Identity()
{
return Mat33V
(
V3UnitX(),
V3UnitY(),
V3UnitZ()
);
}
NV_FORCE_INLINE Mat33V M33Diagonal(const Vec3VArg d)
{
const FloatV x = V3Mul(V3UnitX(), d);
const FloatV y = V3Mul(V3UnitY(), d);
const FloatV z = V3Mul(V3UnitZ(), d);
return Mat33V(x, y, z);
}
//////////////////////////////////
//MAT34V
//////////////////////////////////
NV_FORCE_INLINE Vec3V M34MulV3(const Mat34V& a, const Vec3V b)
{
const FloatV x=V3GetX(b);
const FloatV y=V3GetY(b);
const FloatV z=V3GetZ(b);
const Vec3V v0=V3Scale(a.col0,x);
const Vec3V v1=V3Scale(a.col1,y);
const Vec3V v2=V3Scale(a.col2,z);
const Vec3V v0PlusV1=V3Add(v0,v1);
const Vec3V v0PlusV1Plusv2=V3Add(v0PlusV1,v2);
return (V3Add(v0PlusV1Plusv2,a.col3));
}
NV_FORCE_INLINE Vec3V M34Mul33V3(const Mat34V& a, const Vec3V b)
{
const FloatV x=V3GetX(b);
const FloatV y=V3GetY(b);
const FloatV z=V3GetZ(b);
const Vec3V v0=V3Scale(a.col0,x);
const Vec3V v1=V3Scale(a.col1,y);
const Vec3V v2=V3Scale(a.col2,z);
const Vec3V v0PlusV1=V3Add(v0,v1);
return V3Add(v0PlusV1,v2);
}
NV_FORCE_INLINE Vec3V M34TrnspsMul33V3(const Mat34V& a, const Vec3V b)
{
const FloatV x=V3Dot(a.col0,b);
const FloatV y=V3Dot(a.col1,b);
const FloatV z=V3Dot(a.col2,b);
return V3Merge(x,y,z);
}
NV_FORCE_INLINE Mat34V M34MulM34(const Mat34V& a, const Mat34V& b)
{
return Mat34V(M34Mul33V3(a,b.col0), M34Mul33V3(a,b.col1),M34Mul33V3(a,b.col2),M34MulV3(a,b.col3));
}
NV_FORCE_INLINE Mat33V M34MulM33(const Mat34V& a, const Mat33V& b)
{
return Mat33V(M34Mul33V3(a,b.col0),M34Mul33V3(a,b.col1),M34Mul33V3(a,b.col2));
}
NV_FORCE_INLINE Mat33V M34Mul33MM34(const Mat34V& a, const Mat34V& b)
{
return Mat33V(M34Mul33V3(a,b.col0),M34Mul33V3(a,b.col1),M34Mul33V3(a,b.col2));
}
NV_FORCE_INLINE Mat34V M34Add(const Mat34V& a, const Mat34V& b)
{
return Mat34V(V3Add(a.col0,b.col0),V3Add(a.col1,b.col1),V3Add(a.col2,b.col2),V3Add(a.col3,b.col3));
}
NV_FORCE_INLINE Mat34V M34Inverse(const Mat34V& a)
{
Mat34V aInv;
const BoolV tfft=BTFFT();
const BoolV tttf=BTTTF();
const FloatV zero=V3Zero();
const Vec3V cross01 = V3Cross(a.col0,a.col1);
const Vec3V cross12 = V3Cross(a.col1,a.col2);
const Vec3V cross20 = V3Cross(a.col2,a.col0);
const FloatV dot = V3Dot(cross01,a.col2);
const FloatV invDet = _mm_rcp_ps(dot);
const Vec3V mergeh = _mm_unpacklo_ps(cross12,cross01);
const Vec3V mergel = _mm_unpackhi_ps(cross12,cross01);
Vec3V colInv0 = _mm_unpacklo_ps(mergeh,cross20);
colInv0 = _mm_or_ps(_mm_andnot_ps(tttf, zero), _mm_and_ps(tttf, colInv0));
const Vec3V zppd=_mm_shuffle_ps(mergeh,cross20,_MM_SHUFFLE(3,0,0,2));
const Vec3V pbwp=_mm_shuffle_ps(cross20,mergeh,_MM_SHUFFLE(3,3,1,0));
const Vec3V colInv1=_mm_or_ps(_mm_andnot_ps(BTFFT(), pbwp), _mm_and_ps(BTFFT(), zppd));
const Vec3V xppd=_mm_shuffle_ps(mergel,cross20,_MM_SHUFFLE(3,0,0,0));
const Vec3V pcyp=_mm_shuffle_ps(cross20,mergel,_MM_SHUFFLE(3,1,2,0));
const Vec3V colInv2=_mm_or_ps(_mm_andnot_ps(tfft, pcyp), _mm_and_ps(tfft, xppd));
aInv.col0=_mm_mul_ps(colInv0,invDet);
aInv.col1=_mm_mul_ps(colInv1,invDet);
aInv.col2=_mm_mul_ps(colInv2,invDet);
aInv.col3=M34Mul33V3(aInv,V3Neg(a.col3));
return aInv;
}
NV_FORCE_INLINE Mat33V M34Trnsps33(const Mat34V& a)
{
return Mat33V
(
V3Merge(V3GetX(a.col0),V3GetX(a.col1),V3GetX(a.col2)),
V3Merge(V3GetY(a.col0),V3GetY(a.col1),V3GetY(a.col2)),
V3Merge(V3GetZ(a.col0),V3GetZ(a.col1),V3GetZ(a.col2))
);
}
//////////////////////////////////
//MAT44V
//////////////////////////////////
NV_FORCE_INLINE Vec4V M44MulV4(const Mat44V& a, const Vec4V b)
{
const FloatV x=V4GetX(b);
const FloatV y=V4GetY(b);
const FloatV z=V4GetZ(b);
const FloatV w=V4GetW(b);
const Vec4V v0=V4Scale(a.col0,x);
const Vec4V v1=V4Scale(a.col1,y);
const Vec4V v2=V4Scale(a.col2,z);
const Vec4V v3=V4Scale(a.col3,w);
const Vec4V v0PlusV1=V4Add(v0,v1);
const Vec4V v0PlusV1Plusv2=V4Add(v0PlusV1,v2);
return (V4Add(v0PlusV1Plusv2,v3));
}
NV_FORCE_INLINE Vec4V M44TrnspsMulV4(const Mat44V& a, const Vec4V b)
{
NV_ALIGN(16, FloatV dotProdArray[4])=
{
V4Dot(a.col0,b),
V4Dot(a.col1,b),
V4Dot(a.col2,b),
V4Dot(a.col3,b)
};
return V4Merge(dotProdArray);
}
NV_FORCE_INLINE Mat44V M44MulM44(const Mat44V& a, const Mat44V& b)
{
return Mat44V(M44MulV4(a,b.col0),M44MulV4(a,b.col1),M44MulV4(a,b.col2),M44MulV4(a,b.col3));
}
NV_FORCE_INLINE Mat44V M44Add(const Mat44V& a, const Mat44V& b)
{
return Mat44V(V4Add(a.col0,b.col0),V4Add(a.col1,b.col1),V4Add(a.col2,b.col2),V4Add(a.col3,b.col3));
}
NV_FORCE_INLINE Mat44V M44Trnsps(const Mat44V& a)
{
const Vec4V v0 = _mm_unpacklo_ps(a.col0, a.col2);
const Vec4V v1 = _mm_unpackhi_ps(a.col0, a.col2);
const Vec4V v2 = _mm_unpacklo_ps(a.col1, a.col3);
const Vec4V v3 = _mm_unpackhi_ps(a.col1, a.col3);
return Mat44V( _mm_unpacklo_ps(v0, v2),_mm_unpackhi_ps(v0, v2),_mm_unpacklo_ps(v1, v3),_mm_unpackhi_ps(v1, v3));
}
NV_FORCE_INLINE Mat44V M44Inverse(const Mat44V& a)
{
__m128 minor0, minor1, minor2, minor3;
__m128 row0, row1, row2, row3;
__m128 det, tmp1;
tmp1=V4Zero();
row1=V4Zero();
row3=V4Zero();
row0=a.col0;
row1=_mm_shuffle_ps(a.col1,a.col1,_MM_SHUFFLE(1,0,3,2));
row2=a.col2;
row3=_mm_shuffle_ps(a.col3,a.col3,_MM_SHUFFLE(1,0,3,2));
tmp1 = _mm_mul_ps(row2, row3);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
minor0 = _mm_mul_ps(row1, tmp1);
minor1 = _mm_mul_ps(row0, tmp1);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
minor0 = _mm_sub_ps(_mm_mul_ps(row1, tmp1), minor0);
minor1 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor1);
minor1 = _mm_shuffle_ps(minor1, minor1, 0x4E);
tmp1 = _mm_mul_ps(row1, row2);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
minor0 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor0);
minor3 = _mm_mul_ps(row0, tmp1);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row3, tmp1));
minor3 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor3);
minor3 = _mm_shuffle_ps(minor3, minor3, 0x4E);
tmp1 = _mm_mul_ps(_mm_shuffle_ps(row1, row1, 0x4E), row3);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
row2 = _mm_shuffle_ps(row2, row2, 0x4E);
minor0 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor0);
minor2 = _mm_mul_ps(row0, tmp1);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row2, tmp1));
minor2 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor2);
minor2 = _mm_shuffle_ps(minor2, minor2, 0x4E);
tmp1 = _mm_mul_ps(row0, row1);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
minor2 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor2);
minor3 = _mm_sub_ps(_mm_mul_ps(row2, tmp1), minor3);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
minor2 = _mm_sub_ps(_mm_mul_ps(row3, tmp1), minor2);
minor3 = _mm_sub_ps(minor3, _mm_mul_ps(row2, tmp1));
tmp1 = _mm_mul_ps(row0, row3);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row2, tmp1));
minor2 = _mm_add_ps(_mm_mul_ps(row1, tmp1), minor2);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
minor1 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor1);
minor2 = _mm_sub_ps(minor2, _mm_mul_ps(row1, tmp1));
tmp1 = _mm_mul_ps(row0, row2);
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
minor1 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor1);
minor3 = _mm_sub_ps(minor3, _mm_mul_ps(row1, tmp1));
tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row3, tmp1));
minor3 = _mm_add_ps(_mm_mul_ps(row1, tmp1), minor3);
det = _mm_mul_ps(row0, minor0);
det = _mm_add_ps(_mm_shuffle_ps(det, det, 0x4E), det);
det = _mm_add_ss(_mm_shuffle_ps(det, det, 0xB1), det);
tmp1 = _mm_rcp_ss(det);
#if 0
det = _mm_sub_ss(_mm_add_ss(tmp1, tmp1), _mm_mul_ss(det, _mm_mul_ss(tmp1, tmp1)));
det = _mm_shuffle_ps(det, det, 0x00);
#else
det= _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(0,0,0,0));
#endif
minor0 = _mm_mul_ps(det, minor0);
minor1 = _mm_mul_ps(det, minor1);
minor2 = _mm_mul_ps(det, minor2);
minor3 = _mm_mul_ps(det, minor3);
Mat44V invTrans(minor0,minor1,minor2,minor3);
return M44Trnsps(invTrans);
}
NV_FORCE_INLINE Vec4V V4LoadXYZW(const float& x, const float& y, const float& z, const float& w)
{
return _mm_set_ps(w, z, y, x);
}
/*
// AP: work in progress - use proper SSE intrinsics where possible
NV_FORCE_INLINE VecU16V V4U32PK(VecU32V a, VecU32V b)
{
VecU16V result;
result.m128_u16[0] = uint16_t(NvClamp<uint32_t>((a).m128_u32[0], 0, 0xFFFF));
result.m128_u16[1] = uint16_t(NvClamp<uint32_t>((a).m128_u32[1], 0, 0xFFFF));
result.m128_u16[2] = uint16_t(NvClamp<uint32_t>((a).m128_u32[2], 0, 0xFFFF));
result.m128_u16[3] = uint16_t(NvClamp<uint32_t>((a).m128_u32[3], 0, 0xFFFF));
result.m128_u16[4] = uint16_t(NvClamp<uint32_t>((b).m128_u32[0], 0, 0xFFFF));
result.m128_u16[5] = uint16_t(NvClamp<uint32_t>((b).m128_u32[1], 0, 0xFFFF));
result.m128_u16[6] = uint16_t(NvClamp<uint32_t>((b).m128_u32[2], 0, 0xFFFF));
result.m128_u16[7] = uint16_t(NvClamp<uint32_t>((b).m128_u32[3], 0, 0xFFFF));
return result;
}
*/
NV_FORCE_INLINE VecU32V V4U32Sel(const BoolV c, const VecU32V a, const VecU32V b)
{
return internalWindowsSimd::m128_I2F(_mm_or_si128(
_mm_andnot_si128(internalWindowsSimd::m128_F2I(c), internalWindowsSimd::m128_F2I(b)),
_mm_and_si128(internalWindowsSimd::m128_F2I(c), internalWindowsSimd::m128_F2I(a))
));
}
NV_FORCE_INLINE VecU32V V4U32or(VecU32V a, VecU32V b)
{
return internalWindowsSimd::m128_I2F(_mm_or_si128(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b)));
}
NV_FORCE_INLINE VecU32V V4U32and(VecU32V a, VecU32V b)
{
return internalWindowsSimd::m128_I2F(_mm_and_si128(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b)));
}
NV_FORCE_INLINE VecU32V V4U32Andc(VecU32V a, VecU32V b)
{
return internalWindowsSimd::m128_I2F(_mm_andnot_si128(internalWindowsSimd::m128_F2I(b), internalWindowsSimd::m128_F2I(a)));
}
/*
NV_FORCE_INLINE VecU16V V4U16Or(VecU16V a, VecU16V b)
{
return internalWindowsSimd::m128_I2F(_mm_or_si128(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b)));
}
*/
/*
NV_FORCE_INLINE VecU16V V4U16And(VecU16V a, VecU16V b)
{
return internalWindowsSimd::m128_I2F(_mm_and_si128(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b)));
}
*/
/*
NV_FORCE_INLINE VecU16V V4U16Andc(VecU16V a, VecU16V b)
{
return internalWindowsSimd::m128_I2F(_mm_andnot_si128(internalWindowsSimd::m128_F2I(b), internalWindowsSimd::m128_F2I(a)));
}
*/
NV_FORCE_INLINE VecI32V U4Load(const uint32_t i)
{
return (_mm_load1_ps((float*)&i));
}
NV_FORCE_INLINE VecU32V U4LoadU(const uint32_t* i)
{
return _mm_loadu_ps((float*)i);
}
NV_FORCE_INLINE VecU32V U4LoadA(const uint32_t* i)
{
VECMATHAOS_ASSERT(0==((size_t)i & 0x0f));
return _mm_load_ps((float*)i);
}
NV_FORCE_INLINE VecI32V I4Load(const int32_t i)
{
return (_mm_load1_ps((float*)&i));
}
NV_FORCE_INLINE VecI32V I4LoadU(const int32_t* i)
{
return _mm_loadu_ps((float*)i);
}
NV_FORCE_INLINE VecI32V I4LoadA(const int32_t* i)
{
return _mm_load_ps((float*)i);
}
NV_FORCE_INLINE VecI32V VecI32V_Add(const VecI32VArg a, const VecI32VArg b)
{
return internalWindowsSimd::m128_I2F(_mm_add_epi32(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b)));
}
NV_FORCE_INLINE VecI32V VecI32V_Sub(const VecI32VArg a, const VecI32VArg b)
{
return internalWindowsSimd::m128_I2F(_mm_sub_epi32(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b)));
}
NV_FORCE_INLINE BoolV VecI32V_IsGrtr(const VecI32VArg a, const VecI32VArg b)
{
return internalWindowsSimd::m128_I2F(_mm_cmpgt_epi32(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b)));
}
NV_FORCE_INLINE BoolV VecI32V_IsEq(const VecI32VArg a, const VecI32VArg b)
{
return internalWindowsSimd::m128_I2F(_mm_cmpeq_epi32(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b)));
}
NV_FORCE_INLINE VecI32V V4I32Sel(const BoolV c, const VecI32V a, const VecI32V b)
{
return V4U32Sel(c, a, b);
}
NV_FORCE_INLINE VecI32V VecI32V_Zero()
{
return V4Zero();
}
NV_FORCE_INLINE VecI32V VecI32V_One()
{
return I4Load(1);
}
NV_FORCE_INLINE VecI32V VecI32V_Two()
{
return I4Load(2);
}
NV_FORCE_INLINE VecI32V VecI32V_MinusOne()
{
return I4Load(-1);
}
NV_FORCE_INLINE VecU32V U4Zero()
{
return U4Load(0);
}
NV_FORCE_INLINE VecU32V U4One()
{
return U4Load(1);
}
NV_FORCE_INLINE VecU32V U4Two()
{
return U4Load(2);
}
NV_FORCE_INLINE VecI32V VecI32V_Sel(const BoolV c, const VecI32VArg a, const VecI32VArg b)
{
VECMATHAOS_ASSERT(_VecMathTests::allElementsEqualBoolV(c,BTTTT()) || _VecMathTests::allElementsEqualBoolV(c,BFFFF()));
return _mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a));
}
NV_FORCE_INLINE VecShiftV VecI32V_PrepareShift(const VecI32VArg shift)
{
VecShiftV preparedShift;
preparedShift.shift = VecI32V_Sel(BTFFF(), shift, VecI32V_Zero());
return preparedShift;
}
NV_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const VecShiftVArg count)
{
return internalWindowsSimd::m128_I2F(_mm_sll_epi32(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(count.shift)));
}
NV_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const VecShiftVArg count)
{
return internalWindowsSimd::m128_I2F(_mm_srl_epi32(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(count.shift)));
}
NV_FORCE_INLINE VecI32V VecI32V_And(const VecI32VArg a, const VecI32VArg b)
{
return _mm_and_ps(a, b);
}
NV_FORCE_INLINE VecI32V VecI32V_Or(const VecI32VArg a, const VecI32VArg b)
{
return _mm_or_ps(a, b);
}
NV_FORCE_INLINE VecI32V VecI32V_GetX(const VecI32VArg a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(0,0,0,0));
}
NV_FORCE_INLINE VecI32V VecI32V_GetY(const VecI32VArg a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(1,1,1,1));
}
NV_FORCE_INLINE VecI32V VecI32V_GetZ(const VecI32VArg a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(2,2,2,2));
}
NV_FORCE_INLINE VecI32V VecI32V_GetW(const VecI32VArg a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3,3,3,3));
}
NV_FORCE_INLINE void NvI32_From_VecI32V(const VecI32VArg a, int32_t* i)
{
_mm_store_ss((float*)i,a);
}
NV_FORCE_INLINE VecI32V VecI32V_From_BoolV(const BoolVArg a)
{
return a;
}
NV_FORCE_INLINE VecU32V VecU32V_From_BoolV(const BoolVArg a)
{
return a;
}
NV_FORCE_INLINE VecI32V VecI32V_Merge(const VecI32VArg a, const VecI32VArg b, const VecI32VArg c, const VecI32VArg d)
{
return V4Merge(a, b, c, d);
}
/*
template<int a> NV_FORCE_INLINE VecI32V V4ISplat()
{
VecI32V result;
result.m128_i32[0] = a;
result.m128_i32[1] = a;
result.m128_i32[2] = a;
result.m128_i32[3] = a;
return result;
}
template<uint32_t a> NV_FORCE_INLINE VecU32V V4USplat()
{
VecU32V result;
result.m128_u32[0] = a;
result.m128_u32[1] = a;
result.m128_u32[2] = a;
result.m128_u32[3] = a;
return result;
}
*/
/*
NV_FORCE_INLINE void V4U16StoreAligned(VecU16V val, VecU16V* address)
{
*address = val;
}
*/
NV_FORCE_INLINE void V4U32StoreAligned(VecU32V val, VecU32V* address)
{
*address = val;
}
NV_FORCE_INLINE Vec4V V4Andc(const Vec4V a, const VecU32V b)
{
VecU32V result32(a);
result32 = V4U32Andc(result32, b);
return Vec4V(result32);
}
NV_FORCE_INLINE VecU32V V4IsGrtrV32u(const Vec4V a, const Vec4V b)
{
return V4IsGrtr(a, b);
}
NV_FORCE_INLINE VecU16V V4U16LoadAligned(VecU16V* addr)
{
return *addr;
}
NV_FORCE_INLINE VecU16V V4U16LoadUnaligned(VecU16V* addr)
{
return *addr;
}
// unsigned compares are not supported on x86
NV_FORCE_INLINE VecU16V V4U16CompareGt(VecU16V a, VecU16V b)
{
// _mm_cmpgt_epi16 doesn't work for unsigned values unfortunately
// return m128_I2F(_mm_cmpgt_epi16(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b)));
VecU16V result;
result.m128_u16[0] = uint16_t((a).m128_u16[0]>(b).m128_u16[0]);
result.m128_u16[1] = uint16_t((a).m128_u16[1]>(b).m128_u16[1]);
result.m128_u16[2] = uint16_t((a).m128_u16[2]>(b).m128_u16[2]);
result.m128_u16[3] = uint16_t((a).m128_u16[3]>(b).m128_u16[3]);
result.m128_u16[4] = uint16_t((a).m128_u16[4]>(b).m128_u16[4]);
result.m128_u16[5] = uint16_t((a).m128_u16[5]>(b).m128_u16[5]);
result.m128_u16[6] = uint16_t((a).m128_u16[6]>(b).m128_u16[6]);
result.m128_u16[7] = uint16_t((a).m128_u16[7]>(b).m128_u16[7]);
return result;
}
NV_FORCE_INLINE VecU16V V4I16CompareGt(VecU16V a, VecU16V b)
{
return internalWindowsSimd::m128_I2F(_mm_cmpgt_epi16(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b)));
}
NV_FORCE_INLINE Vec4V Vec4V_From_VecU32V(VecU32V a)
{
Vec4V result = V4LoadXYZW(float(a.m128_u32[0]), float(a.m128_u32[1]), float(a.m128_u32[2]), float(a.m128_u32[3]));
return result;
}
NV_FORCE_INLINE Vec4V Vec4V_From_VecI32V(VecI32V a)
{
return _mm_cvtepi32_ps(internalWindowsSimd::m128_F2I(a));
}
NV_FORCE_INLINE VecI32V VecI32V_From_Vec4V(Vec4V a)
{
return internalWindowsSimd::m128_I2F(_mm_cvttps_epi32(a));
}
NV_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecU32V(VecU32V a)
{
return Vec4V(a);
}
NV_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecI32V(VecI32V a)
{
return Vec4V(a);
}
NV_FORCE_INLINE VecU32V VecU32V_ReinterpretFrom_Vec4V(Vec4V a)
{
return VecU32V(a);
}
NV_FORCE_INLINE VecI32V VecI32V_ReinterpretFrom_Vec4V(Vec4V a)
{
return VecI32V(a);
}
template<int index> NV_FORCE_INLINE VecU32V V4U32SplatElement(VecU32V a)
{
return internalWindowsSimd::m128_I2F(_mm_shuffle_epi32(internalWindowsSimd::m128_F2I(a), _MM_SHUFFLE(index, index, index, index)));
}
template<int index> NV_FORCE_INLINE Vec4V V4SplatElement(Vec4V a)
{
return internalWindowsSimd::m128_I2F(_mm_shuffle_epi32(internalWindowsSimd::m128_F2I(a), _MM_SHUFFLE(index, index, index, index)));
}
template<int index> NV_FORCE_INLINE VecU16V V4U16SplatElement(VecU16V a)
{
VecU16V result = a; //AM: initializing to avoid nonsensical warning 4701 here with VC10.
for (int i = 0; i < 8; i ++)
result.m128_u16[i] = a.m128_u16[index];
return result;
}
template<int imm> NV_FORCE_INLINE VecI16V V4I16SplatImmediate()
{
VecI16V result;
result.m128_i16[0] = imm;
result.m128_i16[1] = imm;
result.m128_i16[2] = imm;
result.m128_i16[3] = imm;
result.m128_i16[4] = imm;
result.m128_i16[5] = imm;
result.m128_i16[6] = imm;
result.m128_i16[7] = imm;
return result;
}
template<uint16_t imm> NV_FORCE_INLINE VecU16V V4U16SplatImmediate()
{
VecU16V result;
result.m128_u16[0] = imm;
result.m128_u16[1] = imm;
result.m128_u16[2] = imm;
result.m128_u16[3] = imm;
result.m128_u16[4] = imm;
result.m128_u16[5] = imm;
result.m128_u16[6] = imm;
result.m128_u16[7] = imm;
return result;
}
NV_FORCE_INLINE VecU16V V4U16SubtractModulo(VecU16V a, VecU16V b)
{
return internalWindowsSimd::m128_I2F(_mm_sub_epi16(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b)));
}
NV_FORCE_INLINE VecU16V V4U16AddModulo(VecU16V a, VecU16V b)
{
return internalWindowsSimd::m128_I2F(_mm_add_epi16(internalWindowsSimd::m128_F2I(a), internalWindowsSimd::m128_F2I(b)));
}
NV_FORCE_INLINE VecU32V V4U16GetLo16(VecU16V a)
{
VecU32V result;
result.m128_u32[0] = a.m128_u16[0];
result.m128_u32[1] = a.m128_u16[2];
result.m128_u32[2] = a.m128_u16[4];
result.m128_u32[3] = a.m128_u16[6];
return result;
}
NV_FORCE_INLINE VecU32V V4U16GetHi16(VecU16V a)
{
VecU32V result;
result.m128_u32[0] = a.m128_u16[1];
result.m128_u32[1] = a.m128_u16[3];
result.m128_u32[2] = a.m128_u16[5];
result.m128_u32[3] = a.m128_u16[7];
return result;
}
NV_FORCE_INLINE VecU32V VecU32VLoadXYZW(uint32_t x, uint32_t y, uint32_t z, uint32_t w)
{
VecU32V result;
result.m128_u32[0] = x;
result.m128_u32[1] = y;
result.m128_u32[2] = z;
result.m128_u32[3] = w;
return result;
}
NV_FORCE_INLINE Vec4V V4ConvertFromI32V(const VecI32V in)
{
return _mm_cvtepi32_ps(internalWindowsSimd::m128_F2I(in));
}
//not used
/*
NV_FORCE_INLINE Vec4V V4LoadAligned(Vec4V* addr)
{
return *addr;
}
*/
/*
NV_FORCE_INLINE Vec4V V4LoadUnaligned(Vec4V* addr)
{
return Vec4V_From_F32Array((float*)addr);
}
*/
/*
NV_FORCE_INLINE Vec4V V4Ceil(const Vec4V a)
{
return Vec4V_From_XYZW(NvCeil(a.m128_f32[0]), NvCeil(a.m128_f32[1]), NvCeil(a.m128_f32[2]), NvCeil(a.m128_f32[3]));
}
*/
/*
NV_FORCE_INLINE Vec4V V4Floor(const Vec4V a)
{
return Vec4V_From_XYZW(NvFloor(a.m128_f32[0]), NvFloor(a.m128_f32[1]), NvFloor(a.m128_f32[2]), NvFloor(a.m128_f32[3]));
}
*/
/*
NV_FORCE_INLINE VecU32V V4ConvertToU32VSaturate(const Vec4V a, uint32_t power)
{
NV_ASSERT(power == 0 && "Non-zero power not supported in convertToU32VSaturate");
NV_UNUSED(power); // prevent warning in release builds
float ffffFFFFasFloat = float(0xFFFF0000);
VecU32V result;
result.m128_u32[0] = uint32_t(NvClamp<float>((a).m128_f32[0], 0.0f, ffffFFFFasFloat));
result.m128_u32[1] = uint32_t(NvClamp<float>((a).m128_f32[1], 0.0f, ffffFFFFasFloat));
result.m128_u32[2] = uint32_t(NvClamp<float>((a).m128_f32[2], 0.0f, ffffFFFFasFloat));
result.m128_u32[3] = uint32_t(NvClamp<float>((a).m128_f32[3], 0.0f, ffffFFFFasFloat));
return result;
}
*/
#endif //PS_WINDOWS_INLINE_AOS_H
| 96,498 | C | 28.73775 | 143 | 0.641329 |
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/platform/windows/NsWindowsIntrinsics.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_WINDOWS_NSWINDOWSINTRINSICS_H
#define NV_WINDOWS_NSWINDOWSINTRINSICS_H
#include "Ns.h"
#include "NvAssert.h"
// this file is for internal intrinsics - that is, intrinsics that are used in
// cross platform code but do not appear in the API
#if !(NV_WINDOWS_FAMILY || NV_WINRT)
#error "This file should only be included by Windows or WIN8ARM builds!!"
#endif
#pragma warning(push)
//'symbol' is not defined as a preprocessor macro, replacing with '0' for 'directives'
#pragma warning(disable : 4668)
#if NV_VC == 10
#pragma warning(disable : 4987) // nonstandard extension used: 'throw (...)'
#endif
#include <intrin.h>
#pragma warning(pop)
#pragma warning(push)
#pragma warning(disable : 4985) // 'symbol name': attributes not present on previous declaration
#include <math.h>
#pragma warning(pop)
#include <float.h>
#include <mmintrin.h>
#pragma intrinsic(_BitScanForward)
#pragma intrinsic(_BitScanReverse)
namespace nvidia
{
namespace shdfnd
{
/*
* Implements a memory barrier
*/
NV_FORCE_INLINE void memoryBarrier()
{
_ReadWriteBarrier();
/* long Barrier;
__asm {
xchg Barrier, eax
}*/
}
/*!
Returns the index of the highest set bit. Not valid for zero arg.
*/
NV_FORCE_INLINE uint32_t highestSetBitUnsafe(uint32_t v)
{
unsigned long retval;
_BitScanReverse(&retval, v);
return retval;
}
/*!
Returns the index of the highest set bit. Undefined for zero arg.
*/
NV_FORCE_INLINE uint32_t lowestSetBitUnsafe(uint32_t v)
{
unsigned long retval;
_BitScanForward(&retval, v);
return retval;
}
/*!
Returns the number of leading zeros in v. Returns 32 for v=0.
*/
NV_FORCE_INLINE uint32_t countLeadingZeros(uint32_t v)
{
if(v)
{
unsigned long bsr = (unsigned long)-1;
_BitScanReverse(&bsr, v);
return 31 - bsr;
}
else
return 32;
}
/*!
Prefetch aligned cache size around \c ptr+offset.
*/
#if !NV_ARM
NV_FORCE_INLINE void prefetchLine(const void* ptr, uint32_t offset = 0)
{
// cache line on X86/X64 is 64-bytes so a 128-byte prefetch would require 2 prefetches.
// However, we can only dispatch a limited number of prefetch instructions so we opt to prefetch just 1 cache line
/*_mm_prefetch(((const char*)ptr + offset), _MM_HINT_T0);*/
// We get slightly better performance prefetching to non-temporal addresses instead of all cache levels
_mm_prefetch(((const char*)ptr + offset), _MM_HINT_NTA);
}
#else
NV_FORCE_INLINE void prefetchLine(const void* ptr, uint32_t offset = 0)
{
// arm does have 32b cache line size
__prefetch(((const char*)ptr + offset));
}
#endif
/*!
Prefetch \c count bytes starting at \c ptr.
*/
#if !NV_ARM
NV_FORCE_INLINE void prefetch(const void* ptr, uint32_t count = 1)
{
const char* cp = (char*)ptr;
uint64_t p = size_t(ptr);
uint64_t startLine = p >> 6, endLine = (p + count - 1) >> 6;
uint64_t lines = endLine - startLine + 1;
do
{
prefetchLine(cp);
cp += 64;
} while(--lines);
}
#else
NV_FORCE_INLINE void prefetch(const void* ptr, uint32_t count = 1)
{
const char* cp = (char*)ptr;
uint32_t p = size_t(ptr);
uint32_t startLine = p >> 5, endLine = (p + count - 1) >> 5;
uint32_t lines = endLine - startLine + 1;
do
{
prefetchLine(cp);
cp += 32;
} while(--lines);
}
#endif
//! \brief platform-specific reciprocal
NV_CUDA_CALLABLE NV_FORCE_INLINE float recipFast(float a)
{
return 1.0f / a;
}
//! \brief platform-specific fast reciprocal square root
NV_CUDA_CALLABLE NV_FORCE_INLINE float recipSqrtFast(float a)
{
return 1.0f / ::sqrtf(a);
}
//! \brief platform-specific floor
NV_CUDA_CALLABLE NV_FORCE_INLINE float floatFloor(float x)
{
return ::floorf(x);
}
#define NS_EXPECT_TRUE(x) x
#define NS_EXPECT_FALSE(x) x
} // namespace shdfnd
} // namespace nvidia
#endif // #ifndef NV_WINDOWS_NSWINDOWSINTRINSICS_H
| 5,588 | C | 28.415789 | 118 | 0.700608 |
NVIDIA-Omniverse/PhysX/blast/source/shared/utils/AssetGenerator.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef ASSETGENERATOR_H
#define ASSETGENERATOR_H
#include "NvBlast.h"
#include <vector>
#include <cmath>
class GeneratorAsset
{
public:
struct Vec3
{
float x, y, z;
Vec3() {}
Vec3(float x_, float y_, float z_) : x(x_), y(y_), z(z_) {}
Vec3 operator * (float v) const { return Vec3(x * v, y * v, z * v); }
Vec3 operator * (const Vec3& v) const { return Vec3(x * v.x, y * v.y, z * v.z); }
Vec3 operator + (const Vec3& v) const { return Vec3(x + v.x, y + v.y, z + v.z); }
Vec3 operator - (const Vec3& v) const { return Vec3(x - v.x, y - v.y, z - v.z); }
Vec3 getNormalized() const
{
return (*this)*(1.0f / sqrt(x*x + y*y + z*z));
}
};
struct BlastChunkCube
{
BlastChunkCube(Vec3 position_, Vec3 extents_)
{
position = position_;
extents = extents_;
}
Vec3 position;
Vec3 extents;
};
std::vector<NvBlastChunkDesc> solverChunks;
std::vector<NvBlastBondDesc> solverBonds;
std::vector<BlastChunkCube> chunks;
Vec3 extents;
};
class CubeAssetGenerator
{
public:
struct DepthInfo
{
DepthInfo(GeneratorAsset::Vec3 slices = GeneratorAsset::Vec3(1, 1, 1), NvBlastChunkDesc::Flags flag_ = NvBlastChunkDesc::Flags::NoFlags)
: slicesPerAxis(slices), flag(flag_) {}
GeneratorAsset::Vec3 slicesPerAxis;
NvBlastChunkDesc::Flags flag;
};
enum BondFlags
{
NO_BONDS = 0,
X_BONDS = 1 << 0,
Y_BONDS = 1 << 1,
Z_BONDS = 1 << 2,
X_PLUS_WORLD_BONDS = 1 << 3,
X_MINUS_WORLD_BONDS = 1 << 4,
Y_PLUS_WORLD_BONDS = 1 << 5,
Y_MINUS_WORLD_BONDS = 1 << 6,
Z_PLUS_WORLD_BONDS = 1 << 7,
Z_MINUS_WORLD_BONDS = 1 << 8,
ALL_INTERNAL_BONDS = X_BONDS | Y_BONDS | Z_BONDS
};
struct Settings
{
Settings() : bondFlags(BondFlags::ALL_INTERNAL_BONDS) {}
std::vector<DepthInfo> depths;
GeneratorAsset::Vec3 extents;
BondFlags bondFlags;
};
static void generate(GeneratorAsset& asset, const Settings& settings);
private:
static void fillBondDesc(std::vector<NvBlastBondDesc>& bondDescs, uint32_t id0, uint32_t id1, GeneratorAsset::Vec3 pos0, GeneratorAsset::Vec3 pos1, GeneratorAsset::Vec3 size, float area);
};
inline CubeAssetGenerator::BondFlags operator | (CubeAssetGenerator::BondFlags a, CubeAssetGenerator::BondFlags b)
{
return static_cast<CubeAssetGenerator::BondFlags>(static_cast<int>(a) | static_cast<int>(b));
}
#endif // #ifndef ASSETGENERATOR_H
| 4,248 | C | 33.266129 | 191 | 0.64807 |
NVIDIA-Omniverse/PhysX/blast/source/shared/utils/AssetGenerator.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "AssetGenerator.h"
#include <cstring>
void CubeAssetGenerator::generate(GeneratorAsset& asset, const Settings& settings)
{
// cleanup
asset.solverChunks.clear();
asset.solverBonds.clear();
asset.chunks.clear();
// initial params
std::vector<uint32_t> depthStartIDs;
std::vector<GeneratorAsset::Vec3> depthSlicesPerAxisTotal;
uint32_t currentID = 0;
GeneratorAsset::Vec3 extents = settings.extents;
asset.extents = extents;
// Iterate over depths and create children
for (uint32_t depth = 0; depth < settings.depths.size(); depth++)
{
GeneratorAsset::Vec3 slicesPerAxis = settings.depths[depth].slicesPerAxis;
GeneratorAsset::Vec3 slicesPerAxisTotal = (depth == 0) ? slicesPerAxis : slicesPerAxis * (depthSlicesPerAxisTotal[depth - 1]);
depthSlicesPerAxisTotal.push_back(slicesPerAxisTotal);
depthStartIDs.push_back(currentID);
extents.x /= slicesPerAxis.x;
extents.y /= slicesPerAxis.y;
extents.z /= slicesPerAxis.z;
for (uint32_t z = 0; z < (uint32_t)slicesPerAxisTotal.z; ++z)
{
uint32_t parent_z = z / (uint32_t)slicesPerAxis.z;
for (uint32_t y = 0; y < (uint32_t)slicesPerAxisTotal.y; ++y)
{
uint32_t parent_y = y / (uint32_t)slicesPerAxis.y;
for (uint32_t x = 0; x < (uint32_t)slicesPerAxisTotal.x; ++x)
{
uint32_t parent_x = x / (uint32_t)slicesPerAxis.x;
uint32_t parentID = depth == 0 ? UINT32_MAX :
depthStartIDs[depth - 1] + parent_x + (uint32_t)depthSlicesPerAxisTotal[depth - 1].x*(parent_y + (uint32_t)depthSlicesPerAxisTotal[depth - 1].y*parent_z);
GeneratorAsset::Vec3 position;
position.x = ((float)x - (slicesPerAxisTotal.x / 2) + 0.5f) * extents.x;
position.y = ((float)y - (slicesPerAxisTotal.y / 2) + 0.5f) * extents.y;
position.z = ((float)z - (slicesPerAxisTotal.z / 2) + 0.5f) * extents.z;
NvBlastChunkDesc chunkDesc;
memcpy(chunkDesc.centroid, &position.x, 3 * sizeof(float));
chunkDesc.volume = extents.x * extents.y * extents.z;
chunkDesc.flags = settings.depths[depth].flag;
chunkDesc.userData = currentID++;
chunkDesc.parentChunkDescIndex = parentID;
asset.solverChunks.push_back(chunkDesc);
if (settings.depths[depth].flag & NvBlastChunkDesc::Flags::SupportFlag)
{
// Internal bonds
// x-neighbor
if (x > 0 && (settings.bondFlags & BondFlags::X_BONDS))
{
GeneratorAsset::Vec3 xNeighborPosition = position - GeneratorAsset::Vec3(extents.x, 0, 0);
uint32_t neighborID = chunkDesc.userData - 1;
fillBondDesc(asset.solverBonds, chunkDesc.userData, neighborID, position, xNeighborPosition, extents, extents.y * extents.z);
}
// y-neighbor
if (y > 0 && (settings.bondFlags & BondFlags::Y_BONDS))
{
GeneratorAsset::Vec3 yNeighborPosition = position - GeneratorAsset::Vec3(0, extents.y, 0);
uint32_t neighborID = chunkDesc.userData - (uint32_t)slicesPerAxisTotal.x;
fillBondDesc(asset.solverBonds, chunkDesc.userData, neighborID, position, yNeighborPosition, extents, extents.z * extents.x);
}
// z-neighbor
if (z > 0 && (settings.bondFlags & BondFlags::Z_BONDS))
{
GeneratorAsset::Vec3 zNeighborPosition = position - GeneratorAsset::Vec3(0, 0, extents.z);
uint32_t neighborID = chunkDesc.userData - (uint32_t)slicesPerAxisTotal.x*(uint32_t)slicesPerAxisTotal.y;
fillBondDesc(asset.solverBonds, chunkDesc.userData, neighborID, position, zNeighborPosition, extents, extents.x * extents.y);
}
// World bonds (only one per chunk is enough, otherwise they will be removed as duplicated, thus 'else if')
// -x world bond
if (x == 0 && (settings.bondFlags & BondFlags::X_MINUS_WORLD_BONDS))
{
GeneratorAsset::Vec3 xNeighborPosition = position - GeneratorAsset::Vec3(extents.x, 0, 0);
fillBondDesc(asset.solverBonds, chunkDesc.userData, UINT32_MAX, position, xNeighborPosition, extents, extents.y * extents.z);
}
// +x world bond
else if (x == slicesPerAxisTotal.x - 1 && (settings.bondFlags & BondFlags::X_PLUS_WORLD_BONDS))
{
GeneratorAsset::Vec3 xNeighborPosition = position + GeneratorAsset::Vec3(extents.x, 0, 0);
fillBondDesc(asset.solverBonds, chunkDesc.userData, UINT32_MAX, position, xNeighborPosition, extents, extents.y * extents.z);
}
// -y world bond
else if (y == 0 && (settings.bondFlags & BondFlags::Y_MINUS_WORLD_BONDS))
{
GeneratorAsset::Vec3 yNeighborPosition = position - GeneratorAsset::Vec3(0, extents.y, 0);
fillBondDesc(asset.solverBonds, chunkDesc.userData, UINT32_MAX, position, yNeighborPosition, extents, extents.z * extents.x);
}
// +y world bond
else if (y == slicesPerAxisTotal.y - 1 && (settings.bondFlags & BondFlags::Y_PLUS_WORLD_BONDS))
{
GeneratorAsset::Vec3 yNeighborPosition = position + GeneratorAsset::Vec3(0, extents.y, 0);
fillBondDesc(asset.solverBonds, chunkDesc.userData, UINT32_MAX, position, yNeighborPosition, extents, extents.z * extents.x);
}
// -z world bond
else if (z == 0 && (settings.bondFlags & BondFlags::Z_MINUS_WORLD_BONDS))
{
GeneratorAsset::Vec3 zNeighborPosition = position - GeneratorAsset::Vec3(0, 0, extents.z);
fillBondDesc(asset.solverBonds, chunkDesc.userData, UINT32_MAX, position, zNeighborPosition, extents, extents.x * extents.y);
}
// +z world bond
else if (z == slicesPerAxisTotal.z - 1 && (settings.bondFlags & BondFlags::Z_PLUS_WORLD_BONDS))
{
GeneratorAsset::Vec3 zNeighborPosition = position + GeneratorAsset::Vec3(0, 0, extents.z);
fillBondDesc(asset.solverBonds, chunkDesc.userData, UINT32_MAX, position, zNeighborPosition, extents, extents.x * extents.y);
}
}
asset.chunks.push_back(GeneratorAsset::BlastChunkCube(position, extents/*isStatic*/));
}
}
}
}
// Reorder chunks
std::vector<uint32_t> chunkReorderMap(asset.solverChunks.size());
std::vector<char> scratch(asset.solverChunks.size() * sizeof(NvBlastChunkDesc));
NvBlastBuildAssetDescChunkReorderMap(chunkReorderMap.data(), asset.solverChunks.data(), (uint32_t)asset.solverChunks.size(), scratch.data(), nullptr);
std::vector<GeneratorAsset::BlastChunkCube> chunksTemp = asset.chunks;
for (uint32_t i = 0; i < chunkReorderMap.size(); ++i)
{
asset.chunks[chunkReorderMap[i]] = chunksTemp[i];
}
NvBlastApplyAssetDescChunkReorderMapInPlace(asset.solverChunks.data(), (uint32_t)asset.solverChunks.size(), asset.solverBonds.data(), (uint32_t)asset.solverBonds.size(), chunkReorderMap.data(), true, scratch.data(), nullptr);
}
void CubeAssetGenerator::fillBondDesc(std::vector<NvBlastBondDesc>& bondDescs, uint32_t id0, uint32_t id1, GeneratorAsset::Vec3 pos0, GeneratorAsset::Vec3 pos1, GeneratorAsset::Vec3 size, float area)
{
NV_UNUSED(size);
NvBlastBondDesc bondDesc = NvBlastBondDesc();
bondDesc.chunkIndices[0] = id0;
bondDesc.chunkIndices[1] = id1;
bondDesc.bond.area = area;
GeneratorAsset::Vec3 centroid = (pos0 + pos1) * 0.5f;
bondDesc.bond.centroid[0] = centroid.x;
bondDesc.bond.centroid[1] = centroid.y;
bondDesc.bond.centroid[2] = centroid.z;
GeneratorAsset::Vec3 normal = (pos0 - pos1).getNormalized();
bondDesc.bond.normal[0] = normal.x;
bondDesc.bond.normal[1] = normal.y;
bondDesc.bond.normal[2] = normal.z;
bondDescs.push_back(bondDesc);
}
| 10,612 | C++ | 54.565445 | 229 | 0.595929 |
NVIDIA-Omniverse/PhysX/blast/include/extensions/authoring/NvBlastExtAuthoringFractureTool.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
//! @brief Defines the API for the NvBlastExtAuthoring blast sdk extension's FractureTool
#ifndef NVBLASTAUTHORINGFRACTURETOOL_H
#define NVBLASTAUTHORINGFRACTURETOOL_H
#include "NvBlastExtAuthoringTypes.h"
namespace Nv
{
namespace Blast
{
class SpatialAccelerator;
class Triangulator;
class Mesh;
class CutoutSet;
/*
Transform used for chunk scaling (uniform scale + translation only)
*/
struct TransformST
{
NvcVec3 t; // Translation
float s; // Uniform scale
static TransformST identity() { return {{0.0f, 0.0f, 0.0f}, 1.0f}; }
/* Point and vector transformations. Note, normals are invariant (up to normalization) under TransformST transformations. */
NvcVec3 transformPos(const NvcVec3& p) const { return {s * p.x + t.x, s * p.y + t.y, s * p.z + t.z}; }
NvcVec3 transformDir(const NvcVec3& d) const { return {s * d.x, s * d.y, s * d.z}; }
NvcVec3 invTransformPos(const NvcVec3& p) const { return {(p.x - t.x) / s, (p.y - t.y) / s, (p.z - t.z) / s}; }
NvcVec3 invTransformDir(const NvcVec3& d) const { return {d.x / s, d.y / s, d.z / s}; }
};
/*
Chunk data, chunks with parentChunkId == -1 are the source meshes.
*/
struct ChunkInfo
{
ChunkInfo();
enum ChunkFlags
{
NO_FLAGS = 0,
APPROXIMATE_BONDING = 1 // Created by island splitting or chunk merge, etc. and should check for inexact bonds
};
protected:
/**
* The mesh is transformed to fit within a unit cube centered at the origin.
* This transform puts the mesh back into its original space.
* These fields are protected so that only an authoring class can access them.
* It is important that the tmToWorld be set based upon the mesh bounds and parent tmToWorld.
*/
TransformST tmToWorld;
Mesh* meshData;
/**
* Parent ID is set to this value initially, as opposed to -1 (which is a valid parent ID denoting "no parent")
*/
enum { UninitializedID = INT32_MIN };
public:
int32_t parentChunkId;
int32_t chunkId;
uint32_t flags;
bool isLeaf;
bool isChanged;
const TransformST& getTmToWorld() const { return tmToWorld; }
Mesh* getMesh() const { return meshData; }
};
inline ChunkInfo::ChunkInfo() :
tmToWorld(TransformST::identity()),
meshData(nullptr),
parentChunkId(UninitializedID),
chunkId(-1),
flags(NO_FLAGS),
isLeaf(false),
isChanged(true)
{
}
/**
Abstract base class for user-defined random value generator.
*/
class RandomGeneratorBase
{
public:
// Generates uniformly distributed value in [0, 1] range.
virtual float getRandomValue() = 0;
// Seeds random value generator
virtual void seed(int32_t seed) = 0;
virtual ~RandomGeneratorBase(){};
};
/*
Noise fracturing configuration for chunks's faces
*/
struct NoiseConfiguration
{
/**
Noisy slicing configutaion:
Amplitude of cutting surface noise. If it is 0 - noise is disabled.
*/
float amplitude = 0.f;
/**
Frequencey of cutting surface noise.
*/
float frequency = 1.f;
/**
Octave number in slicing surface noise.
*/
uint32_t octaveNumber = 1;
/**
Sampling interval for surface grid.
*/
NvcVec3 samplingInterval = { 1, 1, 1 };
};
/*
Slicing fracturing configuration
*/
struct SlicingConfiguration
{
/**
Number of slices in each direction
*/
int32_t x_slices = 1, y_slices = 1, z_slices = 1;
/**
Offset variation, value in [0, 1]
*/
float offset_variations = 0.f;
/**
Angle variation, value in [0, 1]
*/
float angle_variations = 0.f;
/*
Noise parameters for faces between sliced chunks
*/
NoiseConfiguration noise;
};
/**
Cutout fracturing configuration
*/
struct CutoutConfiguration
{
/**
Set of grouped convex loop patterns for cutout in normal direction.
Not required for PLANE_ONLY mode
*/
CutoutSet* cutoutSet = nullptr;
/**
Transform for initial pattern position and orientation.
By default 2d pattern lies in XY plane (Y is up) the center of pattern is (0, 0)
*/
NvcTransform transform = {{0, 0, 0, 1}, {0, 0, 0}};
/**
Scale for pattern. Unscaled pattern has size (1, 1).
For negative scale pattern will be placed at the center of chunk and scaled with max distance between points of
its AABB
*/
NvcVec2 scale = { -1, -1 };
/**
Conic aperture in degree, for cylindric cutout set it to 0.
*/
float aperture = 0.f;
/**
If relative transform is set - position will be displacement vector from chunk's center. Otherwise from global
origin.
*/
bool isRelativeTransform = true;
/**
Add generatad faces to the same smoothing group as original face without noise
*/
bool useSmoothing = false;
/**
Noise parameters for cutout surface, see NoiseConfiguration.
*/
NoiseConfiguration noise;
};
/**
Class for voronoi sites generation inside supplied mesh.
*/
class VoronoiSitesGenerator
{
public:
virtual ~VoronoiSitesGenerator() {}
/**
Release VoronoiSitesGenerator memory
*/
virtual void release() = 0;
/**
Set base fracture mesh
*/
virtual void setBaseMesh(const Mesh* mesh) = 0;
/**
Access to generated voronoi sites.
\param[out] Pointer to generated voronoi sites
\return Count of generated voronoi sites.
*/
virtual uint32_t getVoronoiSites(const NvcVec3*& sites) = 0;
/**
Add site in particular point
\param[in] site Site coordinates
*/
virtual void addSite(const NvcVec3& site) = 0;
/**
Uniformly generate sites inside the mesh
\param[in] numberOfSites Number of generated sites
*/
virtual void uniformlyGenerateSitesInMesh(uint32_t numberOfSites) = 0;
/**
Generate sites in clustered fashion
\param[in] numberOfClusters Number of generated clusters
\param[in] sitesPerCluster Number of sites in each cluster
\param[in] clusterRadius Voronoi cells cluster radius
*/
virtual void clusteredSitesGeneration(uint32_t numberOfClusters, uint32_t sitesPerCluster, float clusterRadius) = 0;
/**
Radial pattern of sites generation
\param[in] center Center of generated pattern
\param[in] normal Normal to plane in which sites are generated
\param[in] radius Pattern radius
\param[in] angularSteps Number of angular steps
\param[in] radialSteps Number of radial steps
\param[in] angleOffset Angle offset at each radial step
\param[in] variability Randomness of sites distribution
*/
virtual void radialPattern(const NvcVec3& center, const NvcVec3& normal, float radius, int32_t angularSteps,
int32_t radialSteps, float angleOffset = 0.0f, float variability = 0.0f) = 0;
/**
Generate sites inside sphere
\param[in] count Count of generated sites
\param[in] radius Radius of sphere
\param[in] center Center of sphere
*/
virtual void generateInSphere(const uint32_t count, const float radius, const NvcVec3& center) = 0;
/**
Set stencil mesh. With stencil mesh sites are generated only inside both of fracture and stencil meshes.
\param[in] stencil Stencil mesh.
*/
virtual void setStencil(const Mesh* stencil) = 0;
/**
Removes stencil mesh
*/
virtual void clearStencil() = 0;
/**
Deletes sites inside supplied sphere
\param[in] radius Radius of sphere
\param[in] center Center of sphere
\param[in] eraserProbability Probability of removing some particular site
*/
virtual void deleteInSphere(const float radius, const NvcVec3& center, const float eraserProbability = 1) = 0;
};
/**
FractureTool class provides methods to fracture provided mesh and generate Blast asset data
*/
class FractureTool
{
public:
virtual ~FractureTool() {}
/**
Release FractureTool memory
*/
virtual void release() = 0;
/**
Reset FractureTool state.
*/
virtual void reset() = 0;
/**
Set input meshes which will be fractured, FractureTool will be reset.
If ids != nullptr, it must point to an array of length meshSizes.
Each mesh will be assigned to a chunk with ID given by the corresponding element in ids.
If the corresponding element is negative, or ids is NULL, then the chunk will be assigned
an arbitrary (but currently unused) ID.
Returns true iff all meshes were assigned chunks with valid IDs.
*/
virtual bool setSourceMeshes(Mesh const * const * meshes, uint32_t meshesSize, const int32_t* ids = nullptr) = 0;
/**
Set chunk mesh, parentId should be valid, return id of new chunk.
*/
virtual int32_t setChunkMesh(const Mesh* mesh, int32_t parentId, int32_t chunkId = -1) = 0;
/**
Set the material id to use for new interior faces. Defaults to kMaterialInteriorId
*/
virtual void setInteriorMaterialId(int32_t materialId) = 0;
/**
Gets the material id to use for new interior faces
*/
virtual int32_t getInteriorMaterialId() const = 0;
/**
Replaces an material id on faces with a new one
*/
virtual void replaceMaterialId(int32_t oldMaterialId, int32_t newMaterialId) = 0;
/**
Get chunk mesh in polygonal representation. User's code should release it after usage.
This function welds vertices based upon vertex position and normal. If splitUVs == true,
UV coordinates are also considered in vertex welding.
*/
virtual Mesh* createChunkMesh(int32_t chunkInfoIndex, bool splitUVs = true) = 0;
/**
Fractures specified chunk with voronoi method.
\param[in] chunkId Chunk to fracture
\param[in] cellPoints Array of voronoi sites
\param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly
generated chunks will be at next depth level, source chunk will be parent for them. Case replaceChunk == true &&
chunkId == 0 considered as wrong input parameters \return If 0, fracturing is successful.
*/
virtual int32_t
voronoiFracturing(uint32_t chunkId, uint32_t cellCount, const NvcVec3* cellPoints, bool replaceChunk) = 0;
/**
Fractures specified chunk with voronoi method. Cells can be scaled along x,y,z axes.
\param[in] chunkId Chunk to fracture
\param[in] cellPoints Array of voronoi sites
\param[in] cellPoints Array of voronoi sites
\param[in] scale Voronoi cells scaling factor
\param[in] rotation Voronoi cells rotation. Has no effect without cells scale factor
\param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly
generated chunks will be at next depth level, source chunk will be parent for them. Case replaceChunk == true &&
chunkId == 0 considered as wrong input parameters \return If 0, fracturing is successful.
*/
virtual int32_t voronoiFracturing(uint32_t chunkId, uint32_t cellCount, const NvcVec3* cellPoints,
const NvcVec3& scale, const NvcQuat& rotation, bool replaceChunk) = 0;
/**
Fractures specified chunk with slicing method.
\param[in] chunkId Chunk to fracture
\param[in] conf Slicing parameters, see SlicingConfiguration.
\param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly
generated chunks will be at next depth level, source chunk will be parent for
them. Case replaceChunk == true && chunkId == 0 considered as wrong input parameters
\param[in] rnd User supplied random number generator
\return If 0, fracturing is successful.
*/
virtual int32_t
slicing(uint32_t chunkId, const SlicingConfiguration& conf, bool replaceChunk, RandomGeneratorBase* rnd) = 0;
/**
Cut chunk with plane.
\param[in] chunkId Chunk to fracture
\param[in] normal Plane normal
\param[in] position Point on plane
\param[in] noise Noise configuration for plane-chunk intersection, see NoiseConfiguration.
\param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly
generated chunks will be at next depth level, source chunk will be parent for
them. Case replaceChunk == true && chunkId == 0 considered as wrong input parameters
\param[in] rnd User supplied random number generator
\return If 0, fracturing is successful.
*/
virtual int32_t cut(uint32_t chunkId, const NvcVec3& normal, const NvcVec3& position,
const NoiseConfiguration& noise, bool replaceChunk, RandomGeneratorBase* rnd) = 0;
/**
Cutout fracture for specified chunk.
\param[in] chunkId Chunk to fracture
\param[in] conf Cutout parameters, see CutoutConfiguration.
\param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly
\param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly
generated chunks will be at next depth level, source chunk will be parent for
them. Case replaceChunk == true && chunkId == 0 considered as wrong input parameters
\param[in] rnd User supplied random number generator
\return If 0, fracturing is successful.
*/
virtual int32_t cutout(uint32_t chunkId, CutoutConfiguration conf, bool replaceChunk, RandomGeneratorBase* rnd) = 0;
/**
Creates resulting fractured mesh geometry from intermediate format
*/
virtual void finalizeFracturing() = 0;
/**
Returns overall number of chunks in fracture.
*/
virtual uint32_t getChunkCount() const = 0;
/**
Get chunk information
*/
virtual const ChunkInfo& getChunkInfo(int32_t chunkInfoIndex) = 0;
/**
Get percentage of mesh overlap.
percentage computed as volume(intersection(meshA , meshB)) / volume (meshA)
\param[in] meshA Mesh A
\param[in] meshB Mesh B
\return mesh overlap percentage
*/
virtual float getMeshOverlap(const Mesh& meshA, const Mesh& meshB) = 0;
/**
Get chunk base mesh
\param[in] chunkIndex Chunk index
\param[out] output Array of triangles to be filled
\return number of triangles in base mesh
*/
virtual uint32_t getBaseMesh(int32_t chunkIndex, Triangle*& output) = 0;
/**
Update chunk base mesh
\note Doesn't allocates output array, Triangle* output should be preallocated by user
\param[in] chunkIndex Chunk index
\param[out] output Array of triangles to be filled
\return number of triangles in base mesh
*/
virtual uint32_t updateBaseMesh(int32_t chunkIndex, Triangle* output) = 0;
/**
Return info index of chunk with specified chunkId
\param[in] chunkId Chunk ID
\return Chunk info index in internal buffer, if not exist -1 is returned.
*/
virtual int32_t getChunkInfoIndex(int32_t chunkId) const = 0;
/**
Return id of chunk with specified info index.
\param[in] chunkInfoIndex Chunk info index
\return Chunk id or -1 if there is no such chunk.
*/
virtual int32_t getChunkId(int32_t chunkInfoIndex) const = 0;
/**
Return depth level of the given chunk
\param[in] chunkId Chunk ID
\return Chunk depth or -1 if there is no such chunk.
*/
virtual int32_t getChunkDepth(int32_t chunkId) const = 0;
/**
Return array of chunks IDs with given depth.
\param[in] depth Chunk depth
\param[out] Pointer to array of chunk IDs
\return Number of chunks in array
*/
virtual uint32_t getChunksIdAtDepth(uint32_t depth, int32_t*& chunkIds) const = 0;
/**
Get result geometry without noise as vertex and index buffers, where index buffers contain series of triplets
which represent triangles.
\param[out] vertexBuffer Array of vertices to be filled
\param[out] indexBuffer Array of indices to be filled
\param[out] indexBufferOffsets Array of offsets in indexBuffer for each base mesh.
Contains getChunkCount() + 1 elements. Last one is indexBuffer size
\return Number of vertices in vertexBuffer
*/
virtual uint32_t
getBufferedBaseMeshes(Vertex*& vertexBuffer, uint32_t*& indexBuffer, uint32_t*& indexBufferOffsets) = 0;
/**
Set automatic islands removing. May cause instabilities.
\param[in] isRemoveIslands Flag whether remove or not islands.
*/
virtual void setRemoveIslands(bool isRemoveIslands) = 0;
/**
Try find islands and remove them on some specifical chunk. If chunk has childs, island removing can lead to
wrong results! Apply it before further chunk splitting. \param[in] chunkId Chunk ID which should be checked for
islands \return Number of found islands is returned
*/
virtual int32_t islandDetectionAndRemoving(int32_t chunkId, bool createAtNewDepth = false) = 0;
/**
Check if input mesh contains open edges. Open edges can lead to wrong fracturing results.
\return true if mesh contains open edges
*/
virtual bool isMeshContainOpenEdges(const Mesh* input) = 0;
/**
Delete all children for specified chunk (also recursively delete chidren of children).
\param[in] chunkId Chunk ID which children should be deleted
\param[in] deleteRoot (optional) If true, deletes the given chunk too
\return true if one or more chunks were removed
*/
virtual bool deleteChunkSubhierarchy(int32_t chunkId, bool deleteRoot = false) = 0;
/**
Optimize chunk hierarhy for better runtime performance.
It tries to unite chunks to groups of some size in order to transform flat hierarchy (all chunks are children of
single root) to tree like hieracrhy with limited number of children for each chunk.
\param[in] threshold If number of children of some chunk less then maxAtLevel then it would be considered as already
optimized and skipped.
\param[in] targetClusterSize Target number of children for processed chunks.
\param[in] chunksToMerge Which chunks are merge candidate. If NULL, all chunks will be a merge candidate.
\param[in] mergeChunkCount size of chunksToMerge array, if chunksToMerge != NULL.
\param[in] adjChunks Optional index pairs to describe chunk adjacency. May be NULL.
\param[in] adjChunksSize If 'adjChunks' is not NULL, the number of index pairs in the adjChunks array.
\param[in] removeOriginalChunks If true, original chunks that are merged are removed.
*/
virtual void uniteChunks(uint32_t threshold, uint32_t targetClusterSize,
const uint32_t* chunksToMerge, uint32_t mergeChunkCount,
const NvcVec2i* adjChunks, uint32_t adjChunksSize,
bool removeOriginalChunks = false) = 0;
/**
Set the APPROXIMATE_BONDING flag in the chunk's ChunkInfo
\param[in] chunkInfoIndex chunk info index - use getChunkInfoIndex(ID)
\param[in] useApproximateBonding value of flag to set
\return true if the chunk ID is found, false otherwise
*/
virtual bool setApproximateBonding(uint32_t chunkInfoIndex, bool useApproximateBonding) = 0;
/**
Rescale interior uv coordinates of given chunk to fit square of given size.
\param[in] side Size of square side
\param[in] chunkId Chunk ID for which UVs should be scaled.
*/
virtual void fitUvToRect(float side, uint32_t chunkId) = 0;
/**
Rescale interior uv coordinates of all existing chunks to fit square of given size, relative sizes will be
preserved. \param[in] side Size of square side
*/
virtual void fitAllUvToRect(float side) = 0;
};
} // namespace Blast
} // namespace Nv
#endif // ifndef NVBLASTAUTHORINGFRACTURETOOL_H
| 22,590 | C | 37.031986 | 129 | 0.656662 |
NVIDIA-Omniverse/PhysX/blast/include/extensions/authoring/NvBlastExtAuthoringBooleanTool.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
//! @brief Defines the API for the NvBlastExtAuthoring blast sdk extension's BooleanTool
#ifndef NVBLASTAUTHORINGBOOLEANTOOL_H
#define NVBLASTAUTHORINGBOOLEANTOOL_H
#include "NvBlastExtAuthoringTypes.h"
namespace Nv
{
namespace Blast
{
// Forward declaration
class Mesh;
class SpatialAccelerator;
/**
Tool for performing boolean operations on polygonal meshes.
Tool supports only closed meshes. Performing boolean on meshes with holes can lead to unexpected behavior, e.g. holes in result geometry.
*/
class BooleanTool
{
public:
virtual ~BooleanTool() {}
/**
* Release BooleanTool memory
*/
virtual void release() = 0;
/**
* Operation to perform
*/
enum Op
{
Intersection,
Union,
Difference
};
/**
* Perform boolean operation on two polygonal meshes (A and B).
* \param[in] meshA Mesh A
* \param[in] accelA Spatial accelerator for meshA. Can be nullptr.
* \param[in] meshB Mesh B
* \param[in] accelB Spatial accelerator for meshB. Can be nullptr.
* \param[in] op Boolean operation type (see BooleanTool::Op)
* \return new mesh result of the boolean operation. If nullptr, result is the empty set.
*/
virtual Mesh* performBoolean(const Mesh* meshA, SpatialAccelerator* accelA, const Mesh* meshB, SpatialAccelerator* accelB, Op op) = 0;
/**
* Test whether point contained in mesh.
* \param[in] mesh Mesh geometry
* \param[in] accel Spatial accelerator for mesh. Can be nullptr.
* \param[in] point Point which should be tested
* \return true iff point is inside of mesh
*/
virtual bool pointInMesh(const Mesh* mesh, SpatialAccelerator* accel, const NvcVec3& point) = 0;
};
} // namespace Blast
} // namespace Nv
#endif // ifndef NVBLASTAUTHORINGBOOLEANTOOL_H
| 3,463 | C | 35.851063 | 141 | 0.711522 |
NVIDIA-Omniverse/PhysX/blast/include/extensions/authoring/NvBlastExtAuthoringBondGenerator.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
//! @brief Defines the API for the NvBlastExtAuthoring blast sdk extension's BlastBondGenerator
#ifndef NVBLASTEXTAUTHORINGBONDGENERATOR_H
#define NVBLASTEXTAUTHORINGBONDGENERATOR_H
#include "NvBlastExtAuthoringTypes.h"
struct NvBlastBondDesc;
struct NvBlastChunkDesc;
struct NvBlastBond;
namespace Nv
{
namespace Blast
{
// Forward declarations
class FractureTool;
class TriangleProcessor;
struct PlaneChunkIndexer;
/**
Bond interface generation configuration
EXACT - common surface will be searched
AVERAGE - Inerface is approximated by projections or intersecitons with midplane
maxSeparation - for AVERAGE mode. Maximum distance between chunks and midplane used in decision whether create bond or chunks are too far from each other.
*/
struct BondGenerationConfig
{
enum BondGenMode { EXACT, AVERAGE };
float maxSeparation;
BondGenMode bondMode;
};
struct PlaneChunkIndexer
{
int32_t chunkId;
int32_t trId;
NvcPlane plane;
};
/**
Tool for gathering bond information from provided mesh geometry
*/
class BlastBondGenerator
{
public:
virtual ~BlastBondGenerator() {}
/**
Release BlastBondGenerator memory
*/
virtual void release() = 0;
/**
This method based on marking triangles during fracture process, so can be used only with internally fractured meshes.
\note User should call NVBLAST_FREE for resultBondDescs when it not needed anymore
\param[in] tool FractureTool which contains chunks representation, tool->finalizeFracturing() should be called before.
\param[in] chunkIsSupport Pointer to array of flags, if true - chunk is support. Array size should be equal to chunk count in tool.
\param[out] resultBondDescs Pointer to array of created bond descriptors.
\param[out] resultChunkDescriptors Pointer to array of created chunk descriptors.
\return Number of created bonds
*/
virtual int32_t buildDescFromInternalFracture(FractureTool* tool, const bool* chunkIsSupport,
NvBlastBondDesc*& resultBondDescs, NvBlastChunkDesc*& resultChunkDescriptors) = 0;
/**
Creates bond description between two meshes
\param[in] meshACount Number of triangles in mesh A
\param[in] meshA Pointer to array of triangles of mesh A.
\param[in] meshBCount Number of triangles in mesh B
\param[in] meshB Pointer to array of triangles of mesh B.
\param[out] resultBond Result bond description.
\param[in] conf Bond creation mode.
\return 0 if success
*/
virtual int32_t createBondBetweenMeshes(uint32_t meshACount, const Triangle* meshA, uint32_t meshBCount, const Triangle* meshB,
NvBlastBond& resultBond, BondGenerationConfig conf) = 0;
/**
Creates bond description between number of meshes
\note User should call NVBLAST_FREE for resultBondDescs when it not needed anymore
\param[in] meshCount Number of meshes
\param[in] geometryOffset Pointer to array of triangle offsets for each mesh.
Containts meshCount + 1 element, last one is total number of triangles in geometry
\param[in] geometry Pointer to array of triangles.
Triangles from geometryOffset[i] to geometryOffset[i+1] correspond to i-th mesh.
\param[in] overlapsCount Number of overlaps
\param[in] overlaps Pointer to array of pairs - indexes of chunks, for which bond should be created.
\param[out] resultBond Pointer to array of result bonds.
\param[in] cfg Bond creation mode.
\return Number of created bonds
*/
virtual int32_t createBondBetweenMeshes(uint32_t meshCount, const uint32_t* geometryOffset, const Triangle* geometry,
uint32_t overlapsCount, const uint32_t* overlapsA, const uint32_t* overlapsB,
NvBlastBondDesc*& resultBond, BondGenerationConfig cfg) = 0;
/**
Creates bond description for prefractured meshes, when there is no info about which chunks should be connected with bond.
\note User should call NVBLAST_FREE for resultBondDescs when it not needed anymore
\param[in] meshCount Number of meshes
\param[in] geometryOffset Pointer to array of triangle offsets for each mesh.
Containts meshCount + 1 element, last one is total number of triangles in geometry
\param[in] geometry Pointer to array of triangles.
Triangles from geometryOffset[i] to geometryOffset[i+1] correspond to i-th mesh.
\param[in] chunkIsSupport Pointer to array of flags, if true - chunk is support. Array size should be equal to chunk count in tool.
\param[out] resultBondDescs Pointer to array of result bonds.
\param[in] conf Bond creation mode.
\return Number of created bonds
*/
virtual int32_t bondsFromPrefractured(uint32_t meshCount, const uint32_t* geometryOffset, const Triangle* geometry,
const bool* chunkIsSupport, NvBlastBondDesc*& resultBondDescs,
BondGenerationConfig conf) = 0;
/**
Creates bond description for prefractured meshes, when there is no info about which chunks should be connected with bond.
This uses the same process as bondsFromPrefractured using the BondGenMode::AVERAGE mode however the existing collision data is used.
\note User should call NVBLAST_FREE for resultBondDescs when it not needed anymore.
\param[in] meshCount Number of meshes
\param[in] convexHullOffset Pointer to array of convex hull offsets for each mesh.
Containts meshCount + 1 element, last one is total number of hulls in the geometry
\param[in] chunkHulls Pointer to array of convex hulls.
Hulls from convexHullOffset[i] to convexHullOffset[i+1] correspond to i-th mesh.
\param[in] chunkIsSupport Pointer to array of flags, if true - chunk is support. Array size should be equal to chunk count in tool.
\param[in] meshGroups Pointer to array of group ids for each mesh, bonds will not be generated between meshs of the same group. If null each mesh is assumed to be in it's own group.
\param[out] resultBondDescs Pointer to array of result bonds.
\return Number of created bonds
*/
virtual int32_t bondsFromPrefractured(uint32_t meshCount, const uint32_t* convexHullOffset, const CollisionHull** chunkHulls,
const bool* chunkIsSupport, const uint32_t* meshGroups, NvBlastBondDesc*& resultBondDescs, float maxSeparation) = 0;
};
} // namespace Blast
} // namespace Nv
#endif // NVBLASTEXTAUTHORINGBONDGENERATOR_H | 8,626 | C | 49.156976 | 195 | 0.695224 |
NVIDIA-Omniverse/PhysX/blast/include/extensions/authoring/NvBlastExtAuthoringCutout.h | // THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
//
// Information and code furnished is believed to be accurate and reliable.
// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
// information or for any infringement of patents or other rights of third parties that may
// result from its use. No license is granted by implication or otherwise under any patent
// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
// This code supersedes and replaces all information previously supplied.
// NVIDIA Corporation products are not authorized for use as critical
// components in life support devices or systems without express written approval of
// NVIDIA Corporation.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
//! @brief Defines the API for the NvBlastExtAuthoring blast sdk extension's CutoutSet, used for cutout fracturing
#ifndef NVBLASTAUTHORINGCUTOUT_H
#define NVBLASTAUTHORINGCUTOUT_H
#include "NvBlastExtAuthoringTypes.h"
namespace Nv
{
namespace Blast
{
/**
Interface to a "cutout set," used with chippable fracturing. A cutout set is created from a bitmap. The
result is turned into cutouts which are applied to the mesh. For example, a bitmap which looks like a brick
pattern will generate a cutout for each "brick," forming the cutout set.
Each cutout is a 2D entity, meant to be projected onto various faces of a mesh. They are represented
by a set of 2D vertices, which form closed loops. More than one loop may represent a single cutout, if
the loops are forced to be convex. Otherwise, a cutout is represented by a single loop.
*/
class CutoutSet
{
public:
/** Returns the number of cutouts in the set. */
virtual uint32_t getCutoutCount() const = 0;
/**
Applies to the cutout indexed by cutoutIndex:
Returns the number of vertices in the cutout.
*/
virtual uint32_t getCutoutVertexCount(uint32_t cutoutIndex, uint32_t loopIndex) const = 0;
/**
Applies to the cutout indexed by cutoutIndex:
Returns the number of loops in this cutout.
*/
virtual uint32_t getCutoutLoopCount(uint32_t cutoutIndex) const = 0;
/**
Applies to the cutout indexed by cutoutIndex:
Returns the vertex indexed by vertexIndex. (Only the X and Y coordinates are used.)
*/
virtual const NvcVec3& getCutoutVertex(uint32_t cutoutIndex, uint32_t loopIndex, uint32_t vertexIndex) const = 0;
/**
If smoothing group should be changed for adjacent to this vertex faces return true
*/
virtual bool isCutoutVertexToggleSmoothingGroup(uint32_t cutoutIndex, uint32_t loopIndex, uint32_t vertexIndex) const = 0;
/**
Whether or not this cutout set is to be tiled.
*/
virtual bool isPeriodic() const = 0;
/**
The dimensions of the fracture map used to create the cutout set.
*/
virtual const NvcVec2& getDimensions() const = 0;
/** Releases all memory and deletes itself. */
virtual void release() = 0;
protected:
/** Protected destructor. Use the release() method. */
virtual ~CutoutSet() {}
};
} // namespace Blast
} // namespace Nv
#endif // idndef NVBLASTAUTHORINGCUTOUT_H
| 3,464 | C | 36.663043 | 145 | 0.707852 |
NVIDIA-Omniverse/PhysX/blast/include/extensions/authoring/NvBlastExtAuthoring.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
//! @brief Defines the API for the NvBlastExtAuthoring blast sdk extension
#ifndef NVBLASTAUTHORING_H
#define NVBLASTAUTHORING_H
#include "NvBlastExtAuthoringTypes.h"
namespace Nv
{
namespace Blast
{
class Mesh;
class VoronoiSitesGenerator;
class CutoutSet;
class RandomGeneratorBase;
class FractureTool;
class ConvexMeshBuilder;
class BlastBondGenerator;
class MeshCleaner;
class PatternGenerator;
class SpatialGrid;
class SpatialAccelerator;
class BooleanTool;
} // namespace Blast
} // namespace Nv
struct NvBlastExtAssetUtilsBondDesc;
/**
Constructs mesh object from array of triangles.
User should call release() after usage.
\param[in] positions Array for vertex positions, 3 * verticesCount floats will be read
\param[in] normals Array for vertex normals, 3 * verticesCount floats will be read
\param[in] uv Array for vertex uv coordinates, 2 * verticesCount floats will be read
\param[in] verticesCount Number of vertices in mesh
\param[in] indices Array of vertex indices. Indices contain vertex index triplets which form a mesh triangle.
\param[in] indicesCount Indices count (should be equal to numberOfTriangles * 3)
\return pointer to Nv::Blast::Mesh if it was created succefully otherwise return nullptr
*/
NV_C_API Nv::Blast::Mesh*
NvBlastExtAuthoringCreateMesh(const NvcVec3* positions, const NvcVec3* normals, const NvcVec2* uv,
uint32_t verticesCount, const uint32_t* indices, uint32_t indicesCount);
/**
Constructs mesh object from triangles represented as arrays of vertices, indices and per facet material.
User should call Mesh::release() after usage.
\param[in] vertices Array for vertex positions, 3 * verticesCount floats will be read
\param[in] verticesCount Number of vertices in mesh
\param[in] indices Array of vertex indices. Indices contain vertex index triplets which form a mesh triangle.
\param[in] indicesCount Indices count (should be equal to numberOfTriangles * 3)
\param[in] materials Array of material indices per triangle. If not set default material (0) will be assigned.
\param[in] materialStride Stride for material indices
\return pointer to Nv::Blast::Mesh if it was created succefully otherwise return nullptr
*/
NV_C_API Nv::Blast::Mesh*
NvBlastExtAuthoringCreateMeshOnlyTriangles(const void* vertices, uint32_t verticesCount, uint32_t* indices,
uint32_t indexCount, void* materials = nullptr, uint32_t materialStride = 4);
/**
Constructs mesh object from array of vertices, edges and facets.
User should call release() after usage.
\param[in] vertices Array for Nv::Blast::Vertex
\param[in] edges Array for Nv::Blast::Edge
\param[in] facets Array for Nv::Blast::Facet
\param[in] verticesCount Number of vertices in mesh
\param[in] edgesCount Number of edges in mesh
\param[in] facetsCount Number of facets in mesh
\return pointer to Nv::Blast::Mesh if it was created succefully otherwise return nullptr
*/
NV_C_API Nv::Blast::Mesh*
NvBlastExtAuthoringCreateMeshFromFacets(const void* vertices, const void* edges, const void* facets,
uint32_t verticesCount, uint32_t edgesCount, uint32_t facetsCount);
/**
Voronoi sites should not be generated outside of the fractured mesh, so VoronoiSitesGenerator
should be supplied with fracture mesh.
\param[in] mesh Fracture mesh
\param[in] rnd User supplied random value generator.
\return Pointer to VoronoiSitesGenerator. User's code should release it after usage.
*/
NV_C_API Nv::Blast::VoronoiSitesGenerator*
NvBlastExtAuthoringCreateVoronoiSitesGenerator(Nv::Blast::Mesh* mesh, Nv::Blast::RandomGeneratorBase* rng);
/** Instantiates a blank CutoutSet */
NV_C_API Nv::Blast::CutoutSet* NvBlastExtAuthoringCreateCutoutSet();
/**
Builds a cutout set (which must have been initially created by createCutoutSet()).
Uses a bitmap described by pixelBuffer, bufferWidth, and bufferHeight. Each pixel is represented
by one byte in the buffer.
\param cutoutSet the CutoutSet to build
\param pixelBuffer pointer to be beginning of the pixel buffer
\param bufferWidth the width of the buffer in pixels
\param bufferHeight the height of the buffer in pixels
\param segmentationErrorThreshold Reduce the number of vertices on curve untill segmentation error is smaller then
specified. By default set it to 0.001. \param snapThreshold the pixel distance at which neighboring cutout vertices and
segments may be fudged into alignment. By default set it to 1.
\param periodic whether or not to use periodic boundary conditions when creating cutouts from the map
\param expandGaps expand cutout regions to gaps or keep it as is
*/
NV_C_API void
NvBlastExtAuthoringBuildCutoutSet(Nv::Blast::CutoutSet& cutoutSet, const uint8_t* pixelBuffer, uint32_t bufferWidth,
uint32_t bufferHeight, float segmentationErrorThreshold, float snapThreshold,
bool periodic, bool expandGaps);
/**
Create FractureTool object.
\return Pointer to create FractureTool. User's code should release it after usage.
*/
NV_C_API Nv::Blast::FractureTool* NvBlastExtAuthoringCreateFractureTool();
/**
Create BlastBondGenerator
\return Pointer to created BlastBondGenerator. User's code should release it after usage.
*/
NV_C_API Nv::Blast::BlastBondGenerator* NvBlastExtAuthoringCreateBondGenerator(Nv::Blast::ConvexMeshBuilder* builder);
/**
Build convex mesh decomposition.
\param[in] mesh Triangle mesh to decompose.
\param[in] triangleCount Number of triangles in mesh.
\param[in] params Parameters for convex mesh decomposition builder.
\param[out] convexes The resulting convex hulls.
\return Number of created convex hulls.
*/
NV_C_API int32_t NvBlastExtAuthoringBuildMeshConvexDecomposition(Nv::Blast::ConvexMeshBuilder* cmb,
const Nv::Blast::Triangle* mesh,
uint32_t triangleCount,
const Nv::Blast::ConvexDecompositionParams& params,
Nv::Blast::CollisionHull**& convexes);
/**
Convex geometry trimming.
Using slicing with noised slicing surface can result in intersecting collision geometry.
It leads to unstable behaviour of rigid body simulation.
This method trims all intersecting parts of collision geometry.
As a drawback, trimming collision geometry can lead to penetrating render meshes during simulation.
\param[in] chunksCount Number of chunks
\param[in,out] in ConvexHull geometry which should be clipped.
\param[in] chunkDepth Array of depth levels of convex hulls corresponding chunks.
*/
NV_C_API void NvBlastExtAuthoringTrimCollisionGeometry(Nv::Blast::ConvexMeshBuilder* cmb, uint32_t chunksCount,
Nv::Blast::CollisionHull** in, const uint32_t* chunkDepth);
/**
Transforms collision hull in place using scale, rotation, transform.
\param[in, out] hull Pointer to the hull to be transformed (modified).
\param[in] scale Pointer to scale to be applied. Can be nullptr.
\param[in] rotation Pointer to rotation to be applied. Can be nullptr.
\param[in] translation Pointer to translation to be applied. Can be nullptr.
*/
NV_C_API void NvBlastExtAuthoringTransformCollisionHullInPlace(Nv::Blast::CollisionHull* hull, const NvcVec3* scaling,
const NvcQuat* rotation, const NvcVec3* translation);
/**
Transforms collision hull in place using scale, rotation, transform.
\param[in] hull Pointer to the hull to be transformed (modified).
\param[in] scale Pointer to scale to be applied. Can be nullptr.
\param[in] rotation Pointer to rotation to be applied. Can be nullptr.
\param[in] translation Pointer to translation to be applied. Can be nullptr.
*/
NV_C_API Nv::Blast::CollisionHull*
NvBlastExtAuthoringTransformCollisionHull(const Nv::Blast::CollisionHull* hull, const NvcVec3* scaling,
const NvcQuat* rotation, const NvcVec3* translation);
/**
Performs pending fractures and generates fractured asset, render and collision geometry
\param[in] fTool Fracture tool created by NvBlastExtAuthoringCreateFractureTool
\param[in] bondGenerator Bond generator created by NvBlastExtAuthoringCreateBondGenerator
\param[in] collisionBuilder Collision builder created by NvBlastExtAuthoringCreateConvexMeshBuilder
\param[in] defaultSupportDepth All new chunks will be marked as support if its depth equal to defaultSupportDepth.
By default leaves (chunks without children) marked as support.
\param[in] collisionParam Parameters of collision hulls generation.
\return Authoring result
*/
NV_C_API Nv::Blast::AuthoringResult*
NvBlastExtAuthoringProcessFracture(Nv::Blast::FractureTool& fTool, Nv::Blast::BlastBondGenerator& bondGenerator,
Nv::Blast::ConvexMeshBuilder& collisionBuilder,
const Nv::Blast::ConvexDecompositionParams& collisionParam,
int32_t defaultSupportDepth = -1);
/**
Releases collision data for AuthoringResult. AuthoringResult should be created by NvBlast.
*/
NV_C_API void NvBlastExtAuthoringReleaseAuthoringResultCollision(Nv::Blast::ConvexMeshBuilder& collisionBuilder, Nv::Blast::AuthoringResult* ar);
/**
Releases AuthoringResult data. AuthoringResult should be created by NvBlast.
*/
NV_C_API void NvBlastExtAuthoringReleaseAuthoringResult(Nv::Blast::ConvexMeshBuilder& collisionBuilder, Nv::Blast::AuthoringResult* ar);
/**
Updates graphics mesh only
\param[in] fTool Fracture tool created by NvBlastExtAuthoringCreateFractureTool
\param[out] ares AuthoringResult object which contains chunks, for which rendermeshes will be updated
(e.g. to tweak UVs). Initially should be created by NvBlastExtAuthoringProcessFracture.
*/
NV_C_API void NvBlastExtAuthoringUpdateGraphicsMesh(Nv::Blast::FractureTool& fTool, Nv::Blast::AuthoringResult& ares);
/**
Build collision meshes
\param[in,out] ares AuthoringResult object which contains chunks, for which collision meshes will be
built. \param[in] collisionBuilder Reference to ConvexMeshBuilder instance. \param[in] collisionParam
Parameters of collision hulls generation.
\param[in] chunksToProcessCount Number of chunk indices in chunksToProcess memory buffer.
\param[in] chunksToProcess Chunk indices for which collision mesh should be built.
*/
NV_C_API void NvBlastExtAuthoringBuildCollisionMeshes(Nv::Blast::AuthoringResult& ares,
Nv::Blast::ConvexMeshBuilder& collisionBuilder,
const Nv::Blast::ConvexDecompositionParams& collisionParam,
uint32_t chunksToProcessCount, uint32_t* chunksToProcess);
/**
Creates MeshCleaner object
\return pointer to Nv::Blast::Mesh if it was created succefully otherwise return nullptr
*/
NV_C_API Nv::Blast::MeshCleaner* NvBlastExtAuthoringCreateMeshCleaner();
/**
Finds bonds connecting chunks in a list of assets
New bond descriptors may be given to bond support chunks from different components.
An NvBlastAsset may appear more than once in the components array.
NOTE: This function allocates memory using the allocator in NvBlastGlobals, to create the new bond
descriptor arrays returned. The user must free this memory after use with NVBLAST_FREE
\param[in] components An array of assets to merge, of size componentCount.
\param[in] scales If not NULL, an array of size componentCount of scales to apply to the geometric data in
the chunks and bonds. If NULL, no scaling is applied.
\param[in] rotations If not NULL, an array of size componentCount of rotations to apply to the geometric data
in the chunks and bonds. The quaternions MUST be normalized. If NULL, no rotations are applied.
\param[in] translations If not NULL, an array of of size componentCount of translations to apply to the
geometric data in the chunks and bonds. If NULL, no translations are applied.
\param[in] convexHullOffsets For each component, an array of chunkSize+1 specifying the start of the convex hulls for that
chunk inside the chunkHulls array for that component.
\param[in] chunkHulls For each component, an array of CollisionHull* specifying the collision geometry for the
chunks in that component.
\param[in] componentCount The size of the components and relativeTransforms arrays.
\param[out] newBondDescs Descriptors of type NvBlastExtAssetUtilsBondDesc for new bonds between components.
\param[in] maxSeparation Maximal distance between chunks which can be connected by bond.
\return the number of bonds in newBondDescs
*/
NV_C_API uint32_t NvBlastExtAuthoringFindAssetConnectingBonds(
const NvBlastAsset** components, const NvcVec3* scales, const NvcQuat* rotations, const NvcVec3* translations,
const uint32_t** convexHullOffsets, const Nv::Blast::CollisionHull*** chunkHulls, uint32_t componentCount,
NvBlastExtAssetUtilsBondDesc*& newBondDescs, float maxSeparation = 0.0f);
/**
Returns pattern generator used for generating fracture patterns.
*/
NV_C_API Nv::Blast::PatternGenerator* NvBlastExtAuthoringCreatePatternGenerator();
/**
Create spatial grid for mesh.
Release using Nv::Blast::SpatialGrid::release()
*/
NV_C_API Nv::Blast::SpatialGrid* NvBlastExtAuthoringCreateSpatialGrid(uint32_t resolution, const Nv::Blast::Mesh* m);
/**
Create GridAccelerator - SpatialAccelerator which use Grid for faster mesh sampling.
Release using Nv::Blast::SpatialAccelerator::release()
*/
NV_C_API Nv::Blast::SpatialAccelerator* NvBlastExtAuthoringCreateGridAccelerator(Nv::Blast::SpatialGrid* parent);
/**
Create SweepingAccelerator - SpatialAccelerator which uses a sweep algorithm.
Release using Nv::Blast::SpatialAccelerator::release()
*/
NV_C_API Nv::Blast::SpatialAccelerator* NvBlastExtAuthoringCreateSweepingAccelerator(const Nv::Blast::Mesh* m);
/**
Create BBoxBasedAccelerator - SpatialAccelerator which uses a bbox/grid algorithm.
Release using Nv::Blast::SpatialAccelerator::release()
*/
NV_C_API Nv::Blast::SpatialAccelerator* NvBlastExtAuthoringCreateBBoxBasedAccelerator(uint32_t resolution, const Nv::Blast::Mesh* m);
#define kBBoxBasedAcceleratorDefaultResolution 10
/**
Create BooleanTool object.
\return Pointer to created BooleanTool. User's code should release it after usage.
*/
NV_C_API Nv::Blast::BooleanTool* NvBlastExtAuthoringCreateBooleanTool();
#endif // ifndef NVBLASTAUTHORING_H
| 16,932 | C | 49.395833 | 145 | 0.723482 |
NVIDIA-Omniverse/PhysX/blast/include/extensions/authoring/NvBlastExtAuthoringMeshCleaner.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
//! @brief Defines the API for the NvBlastExtAuthoring blast sdk extension's MeshCleaner utility
#ifndef NVBLASTEXTAUTHORINGMESHCLEANER_H
#define NVBLASTEXTAUTHORINGMESHCLEANER_H
#include "NvBlastExtAuthoringTypes.h"
/**
FractureTool has requirements to input meshes to fracture them successfully:
1) Mesh should be closed (watertight)
2) There should not be self-intersections and open-edges.
*/
/**
Mesh cleaner input is closed mesh with self-intersections and open-edges (only in the interior).
It tries to track outer hull to make input mesh solid and meet requierements of FractureTool. If mesh contained some internal cavities they will be removed.
*/
namespace Nv
{
namespace Blast
{
class Mesh;
class MeshCleaner
{
public:
virtual ~MeshCleaner() {}
/**
Tries to remove self intersections and open edges in interior of mesh.
\param[in] mesh Mesh to be cleaned.
\return Cleaned mesh or nullptr if failed.
*/
virtual Mesh* cleanMesh(const Mesh* mesh) = 0;
virtual void release() = 0;
};
} // namespace Blast
} // namespace Nv
#endif // ifndef NVBLASTEXTAUTHORINGMESHCLEANER_H | 2,742 | C | 36.575342 | 160 | 0.747994 |
NVIDIA-Omniverse/PhysX/blast/include/extensions/assetutils/NvBlastExtAssetUtils.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
//! @brief Defines the API for the NvBlastExtAssetUtils blast sdk extension
#ifndef NVBLASTEXTASSETUTILS_H
#define NVBLASTEXTASSETUTILS_H
#include "NvBlastTypes.h"
#include "NvCTypes.h"
#include <stdint.h>
/**
Reauthor the provided asset to create external bonds in the specified support chunks.
\param[in] asset Pointer to the original asset. Won't be modified.
\param[in] externalBoundChunks Array of support chunk indices which are to be bound to the external body.
\param[in] externalBoundChunksCount Size of externalBoundChunks array.
\param[in] bondDirections Array of normals for each bond (size externalBoundChunksCount)
\param[in] bondUserData Array of user data values for the new bonds, of size externalBoundChunksCount. May be NULL. If NULL, bond user data will be set to zero.
\return a new asset with added bonds if successful, NULL otherwise.
*/
NV_C_API NvBlastAsset* NvBlastExtAssetUtilsAddExternalBonds
(
const NvBlastAsset* asset,
const uint32_t* externalBoundChunks,
uint32_t externalBoundChunkCount,
const NvcVec3* bondDirections,
const uint32_t* bondUserData
);
// DEPRICATED: remove on next major version bump
#define NvBlastExtAssetUtilsAddWorldBonds NvBlastExtAssetUtilsAddExternalBonds
/**
Bond descriptor used to merge assets.
In addition to the NvBlastBondDesc fields, adds "component" indices to indicate
to which component asset the chunk indices in NvBlastBondDesc refer. Used in the
function NvBlastExtAssetUtilsMergeAssets.
*/
struct NvBlastExtAssetUtilsBondDesc : public NvBlastBondDesc
{
uint32_t componentIndices[2]; //!< The asset component for the corresponding chunkIndices[2] value.
};
/**
Creates an asset descriptor from an asset.
NOTE: This function allocates memory using the allocator in NvBlastGlobals, to create the new chunk and bond
descriptor arrays referenced in the returned NvBlastAssetDesc. The user must free this memory after use with
NVBLAST_FREE appied to the pointers in the returned NvBlastAssetDesc.
\param[in] asset The asset from which to create a descriptor.
\return an asset descriptor that will build an exact duplicate of the input asset.
*/
NV_C_API NvBlastAssetDesc NvBlastExtAssetUtilsCreateDesc(const NvBlastAsset* asset);
/**
Creates an asset descriptor which will build an asset that merges several assets. Each asset (or component)
is given a transform, applied to the geometric information in the chunk and bond descriptors.
New bond descriptors may be given to bond support chunks from different components.
An NvBlastAsset may appear more than once in the components array.
This function will call NvBlastEnsureAssetExactSupportCoverage on the returned chunk descriptors. It will also
call NvBlastReorderAssetDescChunks if the user passes in valid arrays for chunkReorderMap and chunkReorderMapSize.
Otherwise, the user must ensure that the returned chunk descriptors are in a valid order is valid before using them.
NOTE: This function allocates memory using the allocator in NvBlastGlobals, to create the new chunk and bond
descriptor arrays referenced in the returned NvBlastAssetDesc. The user must free this memory after use with
NVBLAST_FREE appied to the pointers in the returned NvBlastAssetDesc.
\param[in] components An array of assets to merge, of size componentCount.
\param[in] scales An array of scales to apply to the geometric data in the chunks and bonds.
If NULL, no scales are applied. If not NULL, the array must be of size componentCount.
\param[in] rotations An array of rotations to apply to the geometric data in the chunks and bonds,
stored quaternion format. The quaternions MUST be normalized. If NULL, no rotations are applied.
If not NULL, the array must be of size componentCount.
\param[in] translations An array of translations to apply to the geometric data in the chunks and bonds.
If NULL, no translations are applied. If not NULL, the array must be of size componentCount.
\param[in] componentCount The size of the components and relativeTransforms arrays.
\param[in] newBondDescs Descriptors of type NvBlastExtAssetUtilsBondDesc for new bonds between components, of size newBondCount. If NULL, newBondCount must be 0.
\param[in] newBondCount The size of the newBondDescs array.
\param[in] chunkIndexOffsets If not NULL, must point to a uin32_t array of size componentCount. It will be filled with the starting elements in chunkReorderMap corresponding to
each component.
\param[in] chunkReorderMap If not NULL, the returned descriptor is run through NvBlastReorderAssetDescChunks, to ensure that it is a valid asset descriptor. In the process, chunks
may be reordered (in addition to their natural re-indexing due to them all being placed in one array). To map from the old chunk indexing for the various
component assets to the chunk indexing used in the returned descriptor, set chunkReorderMap to point to a uin32_t array of size equal to the total number
of chunks in all components, and pass in a non-NULL value to chunkIndexOffsets as described above. Then, for component index c and chunk index k within
that component, the new chunk index is given by: index = chunkReorderMap[ k + chunkIndexOffsets[c] ].
\param[in] chunkReorderMapSize The size of the array passed into chunkReorderMap, if chunkReorderMap is not NULL. This is for safety, so that this function does not overwrite chunkReorderMap.
\return an asset descriptor that will build an asset which merges the components, using NvBlastCreateAsset.
*/
NV_C_API NvBlastAssetDesc NvBlastExtAssetUtilsMergeAssets
(
const NvBlastAsset** components,
const NvcVec3* scales,
const NvcQuat* rotations,
const NvcVec3* translations,
uint32_t componentCount,
const NvBlastExtAssetUtilsBondDesc* newBondDescs,
uint32_t newBondCount,
uint32_t* chunkIndexOffsets,
uint32_t* chunkReorderMap,
uint32_t chunkReorderMapSize
);
/**
Transforms asset in place using scale, rotation, transform.
Chunk centroids, chunk bond centroids and bond normals are being transformed.
Chunk volume and bond area are changed accordingly.
\param[in, out] asset Pointer to the asset to be transformed (modified).
\param[in] scale Pointer to scale to be applied. Can be nullptr.
\param[in] rotation Pointer to rotation to be applied. Can be nullptr.
\param[in] translation Pointer to translation to be applied. Can be nullptr.
*/
NV_C_API void NvBlastExtAssetTransformInPlace
(
NvBlastAsset* asset,
const NvcVec3* scale,
const NvcQuat* rotation,
const NvcVec3* translation
);
#endif // ifndef NVBLASTEXTASSETUTILS_H
| 8,605 | C | 51.797546 | 193 | 0.750261 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.