file_path
stringlengths 21
207
| content
stringlengths 5
1.02M
| size
int64 5
1.02M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
NVIDIA-Omniverse/PhysX/physx/source/pvd/src/PxProfileZoneManagerImpl.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PROFILE_ZONE_MANAGER_IMPL_H
#define PX_PROFILE_ZONE_MANAGER_IMPL_H
#include "PxProfileZoneManager.h"
#include "PxProfileScopedMutexLock.h"
#include "PxPvdProfileZone.h"
#include "PxProfileAllocatorWrapper.h"
#include "foundation/PxArray.h"
#include "foundation/PxMutex.h"
namespace physx { namespace profile {
struct NullEventNameProvider : public PxProfileNameProvider
{
virtual PxProfileNames getProfileNames() const { return PxProfileNames( 0, 0 ); }
};
class ZoneManagerImpl : public PxProfileZoneManager
{
typedef ScopedLockImpl<PxMutex> TScopedLockType;
PxProfileAllocatorWrapper mWrapper;
PxProfileArray<PxProfileZone*> mZones;
PxProfileArray<PxProfileZoneHandler*> mHandlers;
PxMutex mMutex;
ZoneManagerImpl( const ZoneManagerImpl& inOther );
ZoneManagerImpl& operator=( const ZoneManagerImpl& inOther );
public:
ZoneManagerImpl(PxAllocatorCallback* inFoundation)
: mWrapper( inFoundation )
, mZones( mWrapper )
, mHandlers( mWrapper )
{}
virtual ~ZoneManagerImpl()
{
//This assert would mean that a profile zone is outliving us.
//This will cause a crash when the profile zone is released.
PX_ASSERT( mZones.size() == 0 );
while( mZones.size() )
removeProfileZone( *mZones.back() );
}
virtual void addProfileZone( PxProfileZone& inSDK )
{
TScopedLockType lock( &mMutex );
if ( inSDK.getProfileZoneManager() != NULL )
{
if ( inSDK.getProfileZoneManager() == this )
return;
else //there must be two managers in the system somehow.
{
PX_ASSERT( false );
inSDK.getProfileZoneManager()->removeProfileZone( inSDK );
}
}
mZones.pushBack( &inSDK );
inSDK.setProfileZoneManager( this );
for ( uint32_t idx =0; idx < mHandlers.size(); ++idx )
mHandlers[idx]->onZoneAdded( inSDK );
}
virtual void removeProfileZone( PxProfileZone& inSDK )
{
TScopedLockType lock( &mMutex );
if ( inSDK.getProfileZoneManager() == NULL )
return;
else if ( inSDK.getProfileZoneManager() != this )
{
PX_ASSERT( false );
inSDK.getProfileZoneManager()->removeProfileZone( inSDK );
return;
}
inSDK.setProfileZoneManager( NULL );
for ( uint32_t idx = 0; idx < mZones.size(); ++idx )
{
if ( mZones[idx] == &inSDK )
{
for ( uint32_t handler =0; handler < mHandlers.size(); ++handler )
mHandlers[handler]->onZoneRemoved( inSDK );
mZones.replaceWithLast( idx );
}
}
}
virtual void flushProfileEvents()
{
uint32_t sdkCount = mZones.size();
for ( uint32_t idx = 0; idx < sdkCount; ++idx )
mZones[idx]->flushProfileEvents();
}
virtual void addProfileZoneHandler( PxProfileZoneHandler& inHandler )
{
TScopedLockType lock( &mMutex );
mHandlers.pushBack( &inHandler );
for ( uint32_t idx = 0; idx < mZones.size(); ++idx )
inHandler.onZoneAdded( *mZones[idx] );
}
virtual void removeProfileZoneHandler( PxProfileZoneHandler& inHandler )
{
TScopedLockType lock( &mMutex );
for( uint32_t idx = 0; idx < mZones.size(); ++idx )
inHandler.onZoneRemoved( *mZones[idx] );
for( uint32_t idx = 0; idx < mHandlers.size(); ++idx )
{
if ( mHandlers[idx] == &inHandler )
mHandlers.replaceWithLast( idx );
}
}
virtual PxProfileZone& createProfileZone( const char* inSDKName, PxProfileNameProvider* inProvider, uint32_t inEventBufferByteSize )
{
NullEventNameProvider nullProvider;
if ( inProvider == NULL )
inProvider = &nullProvider;
return createProfileZone( inSDKName, inProvider->getProfileNames(), inEventBufferByteSize );
}
virtual PxProfileZone& createProfileZone( const char* inSDKName, PxProfileNames inNames, uint32_t inEventBufferByteSize )
{
PxProfileZone& retval( PxProfileZone::createProfileZone( &mWrapper.getAllocator(), inSDKName, inNames, inEventBufferByteSize ) );
addProfileZone( retval );
return retval;
}
virtual void release()
{
PX_PROFILE_DELETE( mWrapper.getAllocator(), this );
}
};
} }
#endif
| 5,729 | C | 32.121387 | 134 | 0.713039 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/src/PxPvdCommStreamTypes.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
#ifndef PX_PVD_COMM_STREAM_TYPES_H
#define PX_PVD_COMM_STREAM_TYPES_H
#include "foundation/PxErrorCallback.h"
#include "common/PxRenderBuffer.h"
#include "pvd/PxPvdTransport.h"
#include "PxPvdObjectModelBaseTypes.h"
#include "PxPvdCommStreamEvents.h"
#include "PxPvdDataStream.h"
#include "foundation/PxMutex.h"
namespace physx
{
namespace profile
{
class PxProfileZone;
class PxProfileMemoryEventBuffer;
}
namespace pvdsdk
{
struct PvdErrorMessage;
class PvdObjectModelMetaData;
DEFINE_PVD_TYPE_NAME_MAP(profile::PxProfileZone, "_debugger_", "PxProfileZone")
DEFINE_PVD_TYPE_NAME_MAP(profile::PxProfileMemoryEventBuffer, "_debugger_", "PxProfileMemoryEventBuffer")
DEFINE_PVD_TYPE_NAME_MAP(PvdErrorMessage, "_debugger_", "PvdErrorMessage")
// All event streams are on the 'events' property of objects of these types
static inline NamespacedName getMemoryEventTotalsClassName()
{
return NamespacedName("_debugger", "MemoryEventTotals");
}
class PvdOMMetaDataProvider
{
protected:
virtual ~PvdOMMetaDataProvider()
{
}
public:
virtual void addRef() = 0;
virtual void release() = 0;
virtual PvdObjectModelMetaData& lock() = 0;
virtual void unlock() = 0;
virtual bool createInstance(const NamespacedName& clsName, const void* instance) = 0;
virtual bool isInstanceValid(const void* instance) = 0;
virtual void destroyInstance(const void* instance) = 0;
virtual int32_t getInstanceClassType(const void* instance) = 0;
};
class PvdCommStreamEmbeddedTypes
{
public:
static const char* getProfileEventStreamSemantic()
{
return "profile event stream";
}
static const char* getMemoryEventStreamSemantic()
{
return "memory event stream";
}
static const char* getRendererEventStreamSemantic()
{
return "render event stream";
}
};
class PvdCommStreamEventBufferClient;
template <typename TStreamType>
struct EventStreamifier : public PvdEventSerializer
{
TStreamType& mBuffer;
EventStreamifier(TStreamType& buf) : mBuffer(buf)
{
}
template <typename TDataType>
void write(const TDataType& type)
{
mBuffer.write(reinterpret_cast<const uint8_t*>(&type), sizeof(TDataType));
}
template <typename TDataType>
void write(const TDataType* type, uint32_t count)
{
mBuffer.write(reinterpret_cast<const uint8_t*>(type), count * sizeof(TDataType));
}
void writeRef(DataRef<const uint8_t> data)
{
uint32_t amount = static_cast<uint32_t>(data.size());
write(amount);
write(data.begin(), amount);
}
void writeRef(DataRef<StringHandle> data)
{
uint32_t amount = static_cast<uint32_t>(data.size());
write(amount);
write(data.begin(), amount);
}
template <typename TDataType>
void writeRef(DataRef<TDataType> data)
{
uint32_t amount = static_cast<uint32_t>(data.size());
write(amount);
for(uint32_t idx = 0; idx < amount; ++idx)
{
TDataType& dtype(const_cast<TDataType&>(data[idx]));
dtype.serialize(*this);
}
}
virtual void streamify(uint16_t& val)
{
write(val);
}
virtual void streamify(uint8_t& val)
{
write(val);
}
virtual void streamify(uint32_t& val)
{
write(val);
}
virtual void streamify(float& val)
{
write(val);
}
virtual void streamify(uint64_t& val)
{
write(val);
}
virtual void streamify(PxDebugText& val)
{
write(val.color);
write(val.position);
write(val.size);
streamify(val.string);
}
virtual void streamify(String& val)
{
uint32_t len = 0;
String temp = nonNull(val);
if(*temp)
len = static_cast<uint32_t>(strlen(temp) + 1);
write(len);
write(val, len);
}
virtual void streamify(DataRef<const uint8_t>& val)
{
writeRef(val);
}
virtual void streamify(DataRef<NameHandleValue>& val)
{
writeRef(val);
}
virtual void streamify(DataRef<StreamPropMessageArg>& val)
{
writeRef(val);
}
virtual void streamify(DataRef<StringHandle>& val)
{
writeRef(val);
}
private:
EventStreamifier& operator=(const EventStreamifier&);
};
struct MeasureStream
{
uint32_t mSize;
MeasureStream() : mSize(0)
{
}
template <typename TDataType>
void write(const TDataType& val)
{
mSize += sizeof(val);
}
template <typename TDataType>
void write(const TDataType*, uint32_t count)
{
mSize += sizeof(TDataType) * count;
}
};
struct DataStreamState
{
enum Enum
{
Open,
SetPropertyValue,
PropertyMessageGroup
};
};
} // pvdsdk
} // physx
#endif
| 5,865 | C | 24.504348 | 105 | 0.73572 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/src/PxPvdDataStream.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
#include "foundation/PxAssert.h"
#include "PxPvdCommStreamEventSink.h"
#include "PxPvdDataStreamHelpers.h"
#include "PxPvdObjectModelInternalTypes.h"
#include "PxPvdImpl.h"
using namespace physx;
using namespace physx::pvdsdk;
namespace
{
struct ScopedMetaData
{
PvdOMMetaDataProvider& mProvider;
PvdObjectModelMetaData& mMeta;
ScopedMetaData(PvdOMMetaDataProvider& provider) : mProvider(provider), mMeta(provider.lock())
{
}
~ScopedMetaData()
{
mProvider.unlock();
}
PvdObjectModelMetaData* operator->()
{
return &mMeta;
}
private:
ScopedMetaData& operator=(const ScopedMetaData&);
};
struct PropertyDefinitionHelper : public PvdPropertyDefinitionHelper
{
PvdDataStream* mStream;
PvdOMMetaDataProvider& mProvider;
PxArray<char> mNameBuffer;
PxArray<uint32_t> mNameStack;
PxArray<NamedValue> mNamedValues;
PxArray<PropertyMessageArg> mPropertyMessageArgs;
PropertyDefinitionHelper(PvdOMMetaDataProvider& provider)
: mStream(NULL)
, mProvider(provider)
, mNameBuffer("PropertyDefinitionHelper::mNameBuffer")
, mNameStack("PropertyDefinitionHelper::mNameStack")
, mNamedValues("PropertyDefinitionHelper::mNamedValues")
, mPropertyMessageArgs("PropertyDefinitionHelper::mPropertyMessageArgs")
{
}
void setStream(PvdDataStream* stream)
{
mStream = stream;
}
inline void appendStrToBuffer(const char* str)
{
if(str == NULL)
return;
size_t strLen = strlen(str);
size_t endBufOffset = mNameBuffer.size();
size_t resizeLen = endBufOffset;
// account for null
if(mNameBuffer.empty())
resizeLen += 1;
else
endBufOffset -= 1;
mNameBuffer.resize(static_cast<uint32_t>(resizeLen + strLen));
char* endPtr = mNameBuffer.begin() + endBufOffset;
PxMemCopy(endPtr, str, static_cast<uint32_t>(strLen));
}
virtual void pushName(const char* nm, const char* appender = ".")
{
size_t nameBufLen = mNameBuffer.size();
mNameStack.pushBack(static_cast<uint32_t>(nameBufLen));
if(mNameBuffer.empty() == false)
appendStrToBuffer(appender);
appendStrToBuffer(nm);
mNameBuffer.back() = 0;
}
virtual void pushBracketedName(const char* inName, const char* leftBracket = "[", const char* rightBracket = "]")
{
size_t nameBufLen = mNameBuffer.size();
mNameStack.pushBack(static_cast<uint32_t>(nameBufLen));
appendStrToBuffer(leftBracket);
appendStrToBuffer(inName);
appendStrToBuffer(rightBracket);
mNameBuffer.back() = 0;
}
virtual void popName()
{
if(mNameStack.empty())
return;
mNameBuffer.resize(static_cast<uint32_t>(mNameStack.back()));
mNameStack.popBack();
if(mNameBuffer.empty() == false)
mNameBuffer.back() = 0;
}
virtual const char* getTopName()
{
if(mNameBuffer.size())
return mNameBuffer.begin();
return "";
}
virtual void clearNameStack()
{
mNameBuffer.clear();
mNameStack.clear();
}
virtual void addNamedValue(const char* name, uint32_t value)
{
mNamedValues.pushBack(NamedValue(name, value));
}
virtual void clearNamedValues()
{
mNamedValues.clear();
}
virtual DataRef<NamedValue> getNamedValues()
{
return DataRef<NamedValue>(mNamedValues.begin(), mNamedValues.size());
}
virtual void createProperty(const NamespacedName& clsName, const char* inSemantic, const NamespacedName& dtypeName,
PropertyType::Enum propType)
{
mStream->createProperty(clsName, getTopName(), inSemantic, dtypeName, propType, getNamedValues());
clearNamedValues();
}
const char* registerStr(const char* str)
{
ScopedMetaData scopedProvider(mProvider);
return scopedProvider->getStringTable().registerStr(str);
}
virtual void addPropertyMessageArg(const NamespacedName& inDatatype, uint32_t inOffset, uint32_t inSize)
{
mPropertyMessageArgs.pushBack(PropertyMessageArg(registerStr(getTopName()), inDatatype, inOffset, inSize));
}
virtual void addPropertyMessage(const NamespacedName& clsName, const NamespacedName& msgName,
uint32_t inStructSizeInBytes)
{
if(mPropertyMessageArgs.empty())
{
PX_ASSERT(false);
return;
}
mStream->createPropertyMessage(
clsName, msgName, DataRef<PropertyMessageArg>(mPropertyMessageArgs.begin(), mPropertyMessageArgs.size()),
inStructSizeInBytes);
}
virtual void clearPropertyMessageArgs()
{
mPropertyMessageArgs.clear();
}
private:
PropertyDefinitionHelper& operator=(const PropertyDefinitionHelper&);
};
class PvdMemPool
{
// Link List
PxArray<uint8_t*> mMemBuffer;
uint32_t mLength;
uint32_t mBufIndex;
// 4k for one page
static const int BUFFER_LENGTH = 4096;
PX_NOCOPY(PvdMemPool)
public:
PvdMemPool(const char* bufDataName) : mMemBuffer(bufDataName), mLength(0), mBufIndex(0)
{
grow();
}
~PvdMemPool()
{
for(uint32_t i = 0; i < mMemBuffer.size(); i++)
{
PX_FREE(mMemBuffer[i]);
}
}
void grow()
{
if(mBufIndex + 1 < mMemBuffer.size())
{
mBufIndex++;
}
else
{
uint8_t* Buf = reinterpret_cast<uint8_t*>(PX_ALLOC(BUFFER_LENGTH, "PvdMemPool::mMemBuffer.buf"));
mMemBuffer.pushBack(Buf);
mBufIndex = mMemBuffer.size() - 1;
}
mLength = 0;
}
void* allocate(uint32_t length)
{
if(length > uint32_t(BUFFER_LENGTH))
return NULL;
if(length + mLength > uint32_t(BUFFER_LENGTH))
grow();
void* mem = reinterpret_cast<void*>(&mMemBuffer[mBufIndex][mLength]);
mLength += length;
return mem;
}
void clear()
{
mLength = 0;
mBufIndex = 0;
}
};
struct PvdOutStream : public PvdDataStream, public PxUserAllocated
{
PxHashMap<String, uint32_t> mStringHashMap;
PvdOMMetaDataProvider& mMetaDataProvider;
PxArray<uint8_t> mTempBuffer;
PropertyDefinitionHelper mPropertyDefinitionHelper;
DataStreamState::Enum mStreamState;
ClassDescription mSPVClass;
PropertyMessageDescription mMessageDesc;
// Set property value and SetPropertyMessage calls require
// us to write the data out to a separate buffer
// when strings are involved.
ForwardingMemoryBuffer mSPVBuffer;
uint32_t mEventCount;
uint32_t mPropertyMessageSize;
bool mConnected;
uint64_t mStreamId;
PxArray<PvdCommand*> mPvdCommandArray;
PvdMemPool mPvdCommandPool;
PxPvdTransport& mTransport;
PvdOutStream(PxPvdTransport& transport, PvdOMMetaDataProvider& provider, uint64_t streamId)
: mStringHashMap("PvdOutStream::mStringHashMap")
, mMetaDataProvider(provider)
, mTempBuffer("PvdOutStream::mTempBuffer")
, mPropertyDefinitionHelper(mMetaDataProvider)
, mStreamState(DataStreamState::Open)
, mSPVBuffer("PvdCommStreamBufferedEventSink::mSPVBuffer")
, mEventCount(0)
, mPropertyMessageSize(0)
, mConnected(true)
, mStreamId(streamId)
, mPvdCommandArray("PvdCommStreamBufferedEventSink::mPvdCommandArray")
, mPvdCommandPool("PvdCommStreamBufferedEventSink::mPvdCommandPool")
, mTransport(transport)
{
mPropertyDefinitionHelper.setStream(this);
}
virtual ~PvdOutStream()
{
}
virtual void release()
{
PVD_DELETE(this);
}
StringHandle toStream(String nm)
{
if(nm == NULL || *nm == 0)
return 0;
const PxHashMap<String, uint32_t>::Entry* entry(mStringHashMap.find(nm));
if(entry)
return entry->second;
ScopedMetaData meta(mMetaDataProvider);
StringHandle hdl = meta->getStringTable().strToHandle(nm);
nm = meta->getStringTable().handleToStr(hdl);
handlePvdEvent(StringHandleEvent(nm, hdl));
mStringHashMap.insert(nm, hdl);
return hdl;
}
StreamNamespacedName toStream(const NamespacedName& nm)
{
return StreamNamespacedName(toStream(nm.mNamespace), toStream(nm.mName));
}
bool isClassExist(const NamespacedName& nm)
{
ScopedMetaData meta(mMetaDataProvider);
return meta->findClass(nm).hasValue();
}
bool createMetaClass(const NamespacedName& nm)
{
ScopedMetaData meta(mMetaDataProvider);
meta->getOrCreateClass(nm);
return true;
}
bool deriveMetaClass(const NamespacedName& parent, const NamespacedName& child)
{
ScopedMetaData meta(mMetaDataProvider);
return meta->deriveClass(parent, child);
}
// You will notice that some functions are #pragma'd out throughout this file.
// This is because they are only called from asserts which means they aren't
// called in release. This causes warnings when building using snc which break
// the build.
#if PX_DEBUG
bool propertyExists(const NamespacedName& nm, String pname)
{
ScopedMetaData meta(mMetaDataProvider);
return meta->findProperty(nm, pname).hasValue();
}
#endif
PvdError boolToError(bool val)
{
if(val)
return PvdErrorType::Success;
return PvdErrorType::NetworkError;
}
// PvdMetaDataStream
virtual PvdError createClass(const NamespacedName& nm)
{
PX_ASSERT(mStreamState == DataStreamState::Open);
#if PX_DEBUG
PX_ASSERT(isClassExist(nm) == false);
#endif
createMetaClass(nm);
return boolToError(handlePvdEvent(CreateClass(toStream(nm))));
}
virtual PvdError deriveClass(const NamespacedName& parent, const NamespacedName& child)
{
PX_ASSERT(mStreamState == DataStreamState::Open);
#if PX_DEBUG
PX_ASSERT(isClassExist(parent));
PX_ASSERT(isClassExist(child));
#endif
deriveMetaClass(parent, child);
return boolToError(handlePvdEvent(DeriveClass(toStream(parent), toStream(child))));
}
template <typename TDataType>
TDataType* allocTemp(uint32_t numItems)
{
uint32_t desiredBytes = numItems * sizeof(TDataType);
if(desiredBytes > mTempBuffer.size())
mTempBuffer.resize(desiredBytes);
TDataType* retval = reinterpret_cast<TDataType*>(mTempBuffer.begin());
if(numItems)
{
PVD_FOREACH(idx, numItems) new (retval + idx) TDataType();
}
return retval;
}
#if PX_DEBUG
// Property datatypes need to be uniform.
// At this point, the data stream cannot handle properties that
// A struct with a float member and a char member would work.
// A struct with a float member and a long member would work (more efficiently).
bool isValidPropertyDatatype(const NamespacedName& dtypeName)
{
ScopedMetaData meta(mMetaDataProvider);
ClassDescription clsDesc(meta->findClass(dtypeName));
return clsDesc.mRequiresDestruction == false;
}
#endif
NamespacedName createMetaProperty(const NamespacedName& clsName, String name, String semantic,
const NamespacedName& dtypeName, PropertyType::Enum propertyType)
{
ScopedMetaData meta(mMetaDataProvider);
int32_t dtypeType = meta->findClass(dtypeName)->mClassId;
NamespacedName typeName = dtypeName;
if(dtypeType == getPvdTypeForType<String>())
{
dtypeType = getPvdTypeForType<StringHandle>();
typeName = getPvdNamespacedNameForType<StringHandle>();
}
Option<PropertyDescription> propOpt =
meta->createProperty(meta->findClass(clsName)->mClassId, name, semantic, dtypeType, propertyType);
PX_ASSERT(propOpt.hasValue());
PX_UNUSED(propOpt);
return typeName;
}
virtual PvdError createProperty(const NamespacedName& clsName, String name, String semantic,
const NamespacedName& incomingDtypeName, PropertyType::Enum propertyType,
DataRef<NamedValue> values)
{
PX_ASSERT(mStreamState == DataStreamState::Open);
#if PX_DEBUG
PX_ASSERT(isClassExist(clsName));
PX_ASSERT(propertyExists(clsName, name) == false);
#endif
NamespacedName dtypeName(incomingDtypeName);
if(safeStrEq(dtypeName.mName, "VoidPtr"))
dtypeName.mName = "ObjectRef";
#if PX_DEBUG
PX_ASSERT(isClassExist(dtypeName));
PX_ASSERT(isValidPropertyDatatype(dtypeName));
#endif
NamespacedName typeName = createMetaProperty(clsName, name, semantic, dtypeName, propertyType);
// Can't have arrays of strings or arrays of string handles due to the difficulty
// of quickly dealing with them on the network receiving side.
if(propertyType == PropertyType::Array && safeStrEq(typeName.mName, "StringHandle"))
{
PX_ASSERT(false);
return PvdErrorType::ArgumentError;
}
uint32_t numItems = values.size();
NameHandleValue* streamValues = allocTemp<NameHandleValue>(numItems);
PVD_FOREACH(idx, numItems)
streamValues[idx] = NameHandleValue(toStream(values[idx].mName), values[idx].mValue);
CreateProperty evt(toStream(clsName), toStream(name), toStream(semantic), toStream(typeName), propertyType,
DataRef<NameHandleValue>(streamValues, numItems));
return boolToError(handlePvdEvent(evt));
}
bool createMetaPropertyMessage(const NamespacedName& cls, const NamespacedName& msgName,
DataRef<PropertyMessageArg> entries, uint32_t messageSizeInBytes)
{
ScopedMetaData meta(mMetaDataProvider);
return meta->createPropertyMessage(cls, msgName, entries, messageSizeInBytes).hasValue();
}
#if PX_DEBUG
bool messageExists(const NamespacedName& msgName)
{
ScopedMetaData meta(mMetaDataProvider);
return meta->findPropertyMessage(msgName).hasValue();
}
#endif
virtual PvdError createPropertyMessage(const NamespacedName& cls, const NamespacedName& msgName,
DataRef<PropertyMessageArg> entries, uint32_t messageSizeInBytes)
{
PX_ASSERT(mStreamState == DataStreamState::Open);
#if PX_DEBUG
PX_ASSERT(isClassExist(cls));
PX_ASSERT(messageExists(msgName) == false);
#endif
createMetaPropertyMessage(cls, msgName, entries, messageSizeInBytes);
uint32_t numItems = entries.size();
StreamPropMessageArg* streamValues = allocTemp<StreamPropMessageArg>(numItems);
PVD_FOREACH(idx, numItems)
streamValues[idx] =
StreamPropMessageArg(toStream(entries[idx].mPropertyName), toStream(entries[idx].mDatatypeName),
entries[idx].mMessageOffset, entries[idx].mByteSize);
CreatePropertyMessage evt(toStream(cls), toStream(msgName),
DataRef<StreamPropMessageArg>(streamValues, numItems), messageSizeInBytes);
return boolToError(handlePvdEvent(evt));
}
uint64_t toStream(const void* instance)
{
return PVD_POINTER_TO_U64(instance);
}
virtual PvdError createInstance(const NamespacedName& cls, const void* instance)
{
PX_ASSERT(isInstanceValid(instance) == false);
PX_ASSERT(mStreamState == DataStreamState::Open);
bool success = mMetaDataProvider.createInstance(cls, instance);
PX_ASSERT(success);
(void)success;
return boolToError(handlePvdEvent(CreateInstance(toStream(cls), toStream(instance))));
}
virtual bool isInstanceValid(const void* instance)
{
return mMetaDataProvider.isInstanceValid(instance);
}
#if PX_DEBUG
// If the property will fit or is already completely in memory
bool checkPropertyType(const void* instance, String name, const NamespacedName& incomingType)
{
int32_t instType = mMetaDataProvider.getInstanceClassType(instance);
ScopedMetaData meta(mMetaDataProvider);
Option<PropertyDescription> prop = meta->findProperty(instType, name);
if(prop.hasValue() == false)
return false;
int32_t propType = prop->mDatatype;
int32_t incomingTypeId = meta->findClass(incomingType)->mClassId;
if(incomingTypeId != getPvdTypeForType<VoidPtr>())
{
MarshalQueryResult result = meta->checkMarshalling(incomingTypeId, propType);
bool possible = result.needsMarshalling == false || result.canMarshal;
return possible;
}
else
{
if(propType != getPvdTypeForType<ObjectRef>())
return false;
}
return true;
}
#endif
DataRef<const uint8_t> bufferPropertyValue(ClassDescriptionSizeInfo info, DataRef<const uint8_t> data)
{
uint32_t realSize = info.mByteSize;
uint32_t numItems = data.size() / realSize;
if(info.mPtrOffsets.size() != 0)
{
mSPVBuffer.clear();
PVD_FOREACH(item, numItems)
{
const uint8_t* itemPtr = data.begin() + item * realSize;
mSPVBuffer.write(itemPtr, realSize);
PVD_FOREACH(stringIdx, info.mPtrOffsets.size())
{
PtrOffset offset(info.mPtrOffsets[stringIdx]);
if(offset.mOffsetType == PtrOffsetType::VoidPtrOffset)
continue;
const char* strPtr;
physx::intrinsics::memCopy(&strPtr, itemPtr + offset.mOffset, sizeof(char*));
strPtr = nonNull(strPtr);
uint32_t len = safeStrLen(strPtr) + 1;
mSPVBuffer.write(strPtr, len);
}
}
data = DataRef<const uint8_t>(mSPVBuffer.begin(), mSPVBuffer.size());
}
return data;
}
virtual PvdError setPropertyValue(const void* instance, String name, DataRef<const uint8_t> data,
const NamespacedName& incomingTypeName)
{
PX_ASSERT(isInstanceValid(instance));
#if PX_DEBUG
PX_ASSERT(isClassExist(incomingTypeName));
#endif
PX_ASSERT(mStreamState == DataStreamState::Open);
ClassDescription clsDesc;
{
ScopedMetaData meta(mMetaDataProvider);
clsDesc = meta->findClass(incomingTypeName);
}
uint32_t realSize = clsDesc.getNativeSize();
uint32_t numItems = data.size() / realSize;
data = bufferPropertyValue(clsDesc.getNativeSizeInfo(), data);
SetPropertyValue evt(toStream(instance), toStream(name), data, toStream(incomingTypeName), numItems);
return boolToError(handlePvdEvent(evt));
}
// Else if the property is very large (contact reports) you can send it in chunks.
virtual PvdError beginSetPropertyValue(const void* instance, String name, const NamespacedName& incomingTypeName)
{
PX_ASSERT(isInstanceValid(instance));
#if PX_DEBUG
PX_ASSERT(isClassExist(incomingTypeName));
PX_ASSERT(checkPropertyType(instance, name, incomingTypeName));
#endif
PX_ASSERT(mStreamState == DataStreamState::Open);
mStreamState = DataStreamState::SetPropertyValue;
{
ScopedMetaData meta(mMetaDataProvider);
mSPVClass = meta->findClass(incomingTypeName);
}
BeginSetPropertyValue evt(toStream(instance), toStream(name), toStream(incomingTypeName));
return boolToError(handlePvdEvent(evt));
}
virtual PvdError appendPropertyValueData(DataRef<const uint8_t> data)
{
uint32_t realSize = mSPVClass.getNativeSize();
uint32_t numItems = data.size() / realSize;
data = bufferPropertyValue(mSPVClass.getNativeSizeInfo(), data);
PX_ASSERT(mStreamState == DataStreamState::SetPropertyValue);
return boolToError(handlePvdEvent(AppendPropertyValueData(data, numItems)));
}
virtual PvdError endSetPropertyValue()
{
PX_ASSERT(mStreamState == DataStreamState::SetPropertyValue);
mStreamState = DataStreamState::Open;
return boolToError(handlePvdEvent(EndSetPropertyValue()));
}
#if PX_DEBUG
bool checkPropertyMessage(const void* instance, const NamespacedName& msgName)
{
int32_t clsId = mMetaDataProvider.getInstanceClassType(instance);
ScopedMetaData meta(mMetaDataProvider);
PropertyMessageDescription desc(meta->findPropertyMessage(msgName));
bool retval = meta->isDerivedFrom(clsId, desc.mClassId);
return retval;
}
#endif
DataRef<const uint8_t> bufferPropertyMessage(const PropertyMessageDescription& desc, DataRef<const uint8_t> data)
{
if(desc.mStringOffsets.size())
{
mSPVBuffer.clear();
mSPVBuffer.write(data.begin(), data.size());
PVD_FOREACH(idx, desc.mStringOffsets.size())
{
const char* strPtr;
physx::intrinsics::memCopy(&strPtr, data.begin() + desc.mStringOffsets[idx], sizeof(char*));
strPtr = nonNull(strPtr);
uint32_t len = safeStrLen(strPtr) + 1;
mSPVBuffer.write(strPtr, len);
}
data = DataRef<const uint8_t>(mSPVBuffer.begin(), mSPVBuffer.end());
}
return data;
}
virtual PvdError setPropertyMessage(const void* instance, const NamespacedName& msgName, DataRef<const uint8_t> data)
{
ScopedMetaData meta(mMetaDataProvider);
PX_ASSERT(isInstanceValid(instance));
#if PX_DEBUG
PX_ASSERT(messageExists(msgName));
PX_ASSERT(checkPropertyMessage(instance, msgName));
#endif
PropertyMessageDescription desc(meta->findPropertyMessage(msgName));
if(data.size() < desc.mMessageByteSize)
{
PX_ASSERT(false);
return PvdErrorType::ArgumentError;
}
data = bufferPropertyMessage(desc, data);
PX_ASSERT(mStreamState == DataStreamState::Open);
return boolToError(handlePvdEvent(SetPropertyMessage(toStream(instance), toStream(msgName), data)));
}
#if PX_DEBUG
bool checkBeginPropertyMessageGroup(const NamespacedName& msgName)
{
ScopedMetaData meta(mMetaDataProvider);
PropertyMessageDescription desc(meta->findPropertyMessage(msgName));
return desc.mStringOffsets.size() == 0;
}
#endif
// If you need to send of lot of identical messages, this avoids a hashtable lookup per message.
virtual PvdError beginPropertyMessageGroup(const NamespacedName& msgName)
{
#if PX_DEBUG
PX_ASSERT(messageExists(msgName));
PX_ASSERT(checkBeginPropertyMessageGroup(msgName));
#endif
PX_ASSERT(mStreamState == DataStreamState::Open);
mStreamState = DataStreamState::PropertyMessageGroup;
ScopedMetaData meta(mMetaDataProvider);
mMessageDesc = meta->findPropertyMessage(msgName);
return boolToError(handlePvdEvent(BeginPropertyMessageGroup(toStream(msgName))));
}
virtual PvdError sendPropertyMessageFromGroup(const void* instance, DataRef<const uint8_t> data)
{
PX_ASSERT(mStreamState == DataStreamState::PropertyMessageGroup);
PX_ASSERT(isInstanceValid(instance));
#if PX_DEBUG
PX_ASSERT(checkPropertyMessage(instance, mMessageDesc.mMessageName));
#endif
if(mMessageDesc.mMessageByteSize != data.size())
{
PX_ASSERT(false);
return PvdErrorType::ArgumentError;
}
if(data.size() < mMessageDesc.mMessageByteSize)
return PvdErrorType::ArgumentError;
data = bufferPropertyMessage(mMessageDesc, data);
return boolToError(handlePvdEvent(SendPropertyMessageFromGroup(toStream(instance), data)));
}
virtual PvdError endPropertyMessageGroup()
{
PX_ASSERT(mStreamState == DataStreamState::PropertyMessageGroup);
mStreamState = DataStreamState::Open;
return boolToError(handlePvdEvent(EndPropertyMessageGroup()));
}
virtual PvdError pushBackObjectRef(const void* instance, String propName, const void* data)
{
PX_ASSERT(isInstanceValid(instance));
PX_ASSERT(isInstanceValid(data));
PX_ASSERT(mStreamState == DataStreamState::Open);
return boolToError(handlePvdEvent(PushBackObjectRef(toStream(instance), toStream(propName), toStream(data))));
}
virtual PvdError removeObjectRef(const void* instance, String propName, const void* data)
{
PX_ASSERT(isInstanceValid(instance));
PX_ASSERT(isInstanceValid(data));
PX_ASSERT(mStreamState == DataStreamState::Open);
return boolToError(handlePvdEvent(RemoveObjectRef(toStream(instance), toStream(propName), toStream(data))));
}
// Instance elimination.
virtual PvdError destroyInstance(const void* instance)
{
PX_ASSERT(isInstanceValid(instance));
PX_ASSERT(mStreamState == DataStreamState::Open);
mMetaDataProvider.destroyInstance(instance);
return boolToError(handlePvdEvent(DestroyInstance(toStream(instance))));
}
// Profiling hooks
virtual PvdError beginSection(const void* instance, String name)
{
PX_ASSERT(mStreamState == DataStreamState::Open);
return boolToError(handlePvdEvent(
BeginSection(toStream(instance), toStream(name), PxTime::getCurrentCounterValue())));
}
virtual PvdError endSection(const void* instance, String name)
{
PX_ASSERT(mStreamState == DataStreamState::Open);
return boolToError(handlePvdEvent(
EndSection(toStream(instance), toStream(name), PxTime::getCurrentCounterValue())));
}
virtual PvdError originShift(const void* scene, PxVec3 shift)
{
PX_ASSERT(mStreamState == DataStreamState::Open);
return boolToError(handlePvdEvent(OriginShift(toStream(scene), shift)));
}
virtual void addProfileZone(void* zone, const char* name)
{
handlePvdEvent(AddProfileZone(toStream(zone), name));
}
virtual void addProfileZoneEvent(void* zone, const char* name, uint16_t eventId, bool compileTimeEnabled)
{
handlePvdEvent(AddProfileZoneEvent(toStream(zone), name, eventId, compileTimeEnabled));
}
// add a variable sized event
void addEvent(const EventSerializeable& evt, PvdCommStreamEventTypes::Enum evtType)
{
MeasureStream measure;
PvdCommStreamEventSink::writeStreamEvent(evt, evtType, measure);
EventGroup evtGroup(measure.mSize, 1, mStreamId, PxTime::getCurrentCounterValue());
EventStreamifier<PxPvdTransport> streamifier(mTransport.lock());
evtGroup.serialize(streamifier);
PvdCommStreamEventSink::writeStreamEvent(evt, evtType, mTransport);
mTransport.unlock();
}
void setIsTopLevelUIElement(const void* instance, bool topLevel)
{
addEvent(SetIsTopLevel(static_cast<uint64_t>(reinterpret_cast<size_t>(instance)), topLevel),
getCommStreamEventType<SetIsTopLevel>());
}
void sendErrorMessage(uint32_t code, const char* message, const char* file, uint32_t line)
{
addEvent(ErrorMessage(code, message, file, line), getCommStreamEventType<ErrorMessage>());
}
void updateCamera(const char* name, const PxVec3& origin, const PxVec3& up, const PxVec3& target)
{
addEvent(SetCamera(name, origin, up, target), getCommStreamEventType<SetCamera>());
}
template <typename TEventType>
bool handlePvdEvent(const TEventType& evt)
{
addEvent(evt, getCommStreamEventType<TEventType>());
return mConnected;
}
virtual PvdPropertyDefinitionHelper& getPropertyDefinitionHelper()
{
mPropertyDefinitionHelper.clearBufferedData();
return mPropertyDefinitionHelper;
}
virtual bool isConnected()
{
return mConnected;
}
virtual void* allocateMemForCmd(uint32_t length)
{
return mPvdCommandPool.allocate(length);
}
virtual void pushPvdCommand(PvdCommand& cmd)
{
mPvdCommandArray.pushBack(&cmd);
}
virtual void flushPvdCommand()
{
uint32_t cmdQueueSize = mPvdCommandArray.size();
for(uint32_t i = 0; i < cmdQueueSize; i++)
{
if(mPvdCommandArray[i])
{
// if(mPvdCommandArray[i]->canRun(*this))
mPvdCommandArray[i]->run(*this);
mPvdCommandArray[i]->~PvdCommand();
}
}
mPvdCommandArray.clear();
mPvdCommandPool.clear();
}
PX_NOCOPY(PvdOutStream)
};
}
PvdDataStream* PvdDataStream::create(PxPvd* pvd)
{
if(pvd == NULL)
{
PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "PvdDataStream::create - pvd must be non-NULL!");
return NULL;
}
PvdImpl* pvdImpl = static_cast<PvdImpl*>(pvd);
return PVD_NEW(PvdOutStream)(*pvdImpl->getTransport(), pvdImpl->getMetaDataProvider(), pvdImpl->getNextStreamId());
}
| 27,599 | C++ | 30.98146 | 121 | 0.744302 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/src/PxProfileScopedMutexLock.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PROFILE_SCOPED_MUTEX_LOCK_H
#define PX_PROFILE_SCOPED_MUTEX_LOCK_H
#include "foundation/Px.h"
namespace physx { namespace profile {
/**
* Generic class to wrap any mutex type that has lock and unlock methods
*/
template<typename TMutexType>
struct ScopedLockImpl
{
TMutexType* mMutex;
ScopedLockImpl( TMutexType* inM ) : mMutex( inM )
{
if ( mMutex ) mMutex->lock();
}
~ScopedLockImpl()
{
if ( mMutex ) mMutex->unlock();
}
};
/**
* Null locking system that does nothing.
*/
struct NullLock
{
template<typename TDataType> NullLock( TDataType*) {}
};
}}
#endif
| 2,313 | C | 35.156249 | 74 | 0.740597 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/src/PxProfileEventMutex.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
#ifndef PX_PROFILE_EVENT_MUTEX_H
#define PX_PROFILE_EVENT_MUTEX_H
#include "foundation/Px.h"
namespace physx { namespace profile {
/**
* Mutex interface that hides implementation around lock and unlock.
* The event system locks the mutex for every interaction.
*/
class PxProfileEventMutex
{
protected:
virtual ~PxProfileEventMutex(){}
public:
virtual void lock() = 0;
virtual void unlock() = 0;
};
/**
* Take any mutex type that implements lock and unlock and make an EventMutex out of it.
*/
template<typename TMutexType>
struct PxProfileEventMutexImpl : public PxProfileEventMutex
{
TMutexType* mMutex;
PxProfileEventMutexImpl( TMutexType* inMtx ) : mMutex( inMtx ) {}
virtual void lock() { mMutex->lock(); }
virtual void unlock() { mMutex->unlock(); }
};
} }
#endif
| 2,376 | C | 36.730158 | 89 | 0.748316 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/src/PxPvdObjectRegistrar.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PVD_OBJECT_REGISTRAR_H
#define PX_PVD_OBJECT_REGISTRAR_H
/** \addtogroup pvd
@{
*/
#include "foundation/PxHashMap.h"
#include "foundation/PxMutex.h"
#if !PX_DOXYGEN
namespace physx
{
namespace pvdsdk
{
#endif
class ObjectRegistrar
{
PX_NOCOPY(ObjectRegistrar)
public:
ObjectRegistrar()
{
}
virtual ~ObjectRegistrar()
{
}
bool addItem(const void* inItem);
bool decItem(const void* inItem);
void clear();
private:
physx::PxHashMap<const void*, uint32_t> mRefCountMap;
physx::PxMutex mRefCountMapLock;
};
#if !PX_DOXYGEN
} // pvdsdk
} // physx
#endif
/** @} */
#endif
| 2,294 | C | 31.323943 | 74 | 0.750218 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/src/PxPvdProfileZoneClient.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PVD_PROFILE_ZONE_CLIENT_H
#define PX_PVD_PROFILE_ZONE_CLIENT_H
#include "PxPvdClient.h"
#include "foundation/PxHashMap.h"
#include "foundation/PxMutex.h"
#include "PxProfileZoneManager.h"
namespace physx
{
namespace pvdsdk
{
class PvdImpl;
class PvdDataStream;
struct ProfileZoneClient;
class PvdProfileZoneClient : public PvdClient, public profile::PxProfileZoneHandler, public PxUserAllocated
{
PX_NOCOPY(PvdProfileZoneClient)
public:
PvdProfileZoneClient(PvdImpl& pvd);
virtual ~PvdProfileZoneClient();
bool isConnected() const;
void onPvdConnected();
void onPvdDisconnected();
void flush();
PvdDataStream* getDataStream();
// PxProfileZoneHandler
void onZoneAdded(profile::PxProfileZone& inSDK);
void onZoneRemoved(profile::PxProfileZone& inSDK);
private:
PxMutex mMutex; // zoneAdded can called from different threads
PvdImpl& mSDKPvd;
PvdDataStream* mPvdDataStream;
physx::PxArray<ProfileZoneClient*> mProfileZoneClients;
bool mIsConnected;
};
} // namespace pvdsdk
} // namespace physx
#endif
| 2,739 | C | 34.584415 | 107 | 0.773275 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/src/PxProfileEventBufferClientManager.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
#ifndef PX_PROFILE_EVENT_BUFFER_CLIENT_MANAGER_H
#define PX_PROFILE_EVENT_BUFFER_CLIENT_MANAGER_H
#include "PxProfileEventBufferClient.h"
namespace physx { namespace profile {
/**
\brief Manager keep collections of PxProfileEventBufferClient clients.
@see PxProfileEventBufferClient
*/
class PxProfileEventBufferClientManager
{
protected:
virtual ~PxProfileEventBufferClientManager(){}
public:
/**
\brief Adds new client.
\param inClient Client to add.
*/
virtual void addClient( PxProfileEventBufferClient& inClient ) = 0;
/**
\brief Removes a client.
\param inClient Client to remove.
*/
virtual void removeClient( PxProfileEventBufferClient& inClient ) = 0;
/**
\brief Check if manager has clients.
\return True if manager has added clients.
*/
virtual bool hasClients() const = 0;
};
/**
\brief Manager keep collections of PxProfileZoneClient clients.
@see PxProfileZoneClient
*/
class PxProfileZoneClientManager
{
protected:
virtual ~PxProfileZoneClientManager(){}
public:
/**
\brief Adds new client.
\param inClient Client to add.
*/
virtual void addClient( PxProfileZoneClient& inClient ) = 0;
/**
\brief Removes a client.
\param inClient Client to remove.
*/
virtual void removeClient( PxProfileZoneClient& inClient ) = 0;
/**
\brief Check if manager has clients.
\return True if manager has added clients.
*/
virtual bool hasClients() const = 0;
};
} }
#endif
| 3,029 | C | 30.894737 | 74 | 0.745791 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/src/PxProfileDataBuffer.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PROFILE_DATA_BUFFER_H
#define PX_PROFILE_DATA_BUFFER_H
#include "PxProfileAllocatorWrapper.h"
#include "PxProfileMemoryBuffer.h"
#include "PxProfileEventBufferClient.h"
namespace physx { namespace profile {
template<typename TMutex
, typename TScopedLock>
class DataBuffer //base class for buffers that cache data and then dump the data to clients.
{
public:
typedef TMutex TMutexType;
typedef TScopedLock TScopedLockType;
typedef PxProfileWrapperNamedAllocator TU8AllocatorType;
typedef MemoryBuffer<TU8AllocatorType > TMemoryBufferType;
typedef PxProfileArray<PxProfileEventBufferClient*> TBufferClientArray;
protected:
PxProfileAllocatorWrapper mWrapper;
TMemoryBufferType mDataArray;
TBufferClientArray mBufferClients;
uint32_t mBufferFullAmount;
EventContextInformation mEventContextInformation;
TMutexType* mBufferMutex;
volatile bool mHasClients;
EventSerializer<TMemoryBufferType > mSerializer;
public:
DataBuffer( PxAllocatorCallback* inFoundation
, uint32_t inBufferFullAmount
, TMutexType* inBufferMutex
, const char* inAllocationName )
: mWrapper( inFoundation )
, mDataArray( TU8AllocatorType( mWrapper, inAllocationName ) )
, mBufferClients( mWrapper )
, mBufferFullAmount( inBufferFullAmount )
, mBufferMutex( inBufferMutex )
, mHasClients( false )
, mSerializer( &mDataArray )
{
//The data array is never resized really. We ensure
//it is bigger than it will ever need to be.
mDataArray.reserve( inBufferFullAmount + 68 );
}
virtual ~DataBuffer()
{
while(mBufferClients.size() )
{
removeClient( *mBufferClients[0] );
}
}
PxProfileAllocatorWrapper& getWrapper() { return mWrapper; }
TMutexType* getBufferMutex() { return mBufferMutex; }
void setBufferMutex(TMutexType* mutex) { mBufferMutex = mutex; }
void addClient( PxProfileEventBufferClient& inClient )
{
TScopedLockType lock( mBufferMutex );
mBufferClients.pushBack( &inClient );
mHasClients = true;
}
void removeClient( PxProfileEventBufferClient& inClient )
{
TScopedLockType lock( mBufferMutex );
for ( uint32_t idx =0; idx < mBufferClients.size(); ++idx )
{
if (mBufferClients[idx] == &inClient )
{
inClient.handleClientRemoved();
mBufferClients.replaceWithLast( idx );
break;
}
}
mHasClients = mBufferClients.size() != 0;
}
bool hasClients() const
{
return mHasClients;
}
virtual void flushEvents()
{
TScopedLockType lock(mBufferMutex);
const uint8_t* theData = mDataArray.begin();
uint32_t theDataSize = mDataArray.size();
sendDataToClients(theData, theDataSize);
mDataArray.clear();
clearCachedData();
}
//Used for chaining together event buffers.
virtual void handleBufferFlush( const uint8_t* inData, uint32_t inDataSize )
{
TScopedLockType lock( mBufferMutex );
if ( inData && inDataSize )
{
clearCachedData();
if ( mDataArray.size() + inDataSize >= mBufferFullAmount )
flushEvents();
if ( inDataSize >= mBufferFullAmount )
sendDataToClients( inData, inDataSize );
else
mDataArray.write( inData, inDataSize );
}
}
protected:
virtual void clearCachedData()
{
}
private:
void sendDataToClients( const uint8_t* inData, uint32_t inDataSize )
{
uint32_t clientCount = mBufferClients.size();
for( uint32_t idx =0; idx < clientCount; ++idx )
mBufferClients[idx]->handleBufferFlush( inData, inDataSize );
}
};
}}
#endif
| 5,270 | C | 30.753012 | 93 | 0.725806 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/src/PxPvdBits.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
#ifndef PX_PVD_BITS_H
#define PX_PVD_BITS_H
#include "PxPvdObjectModelBaseTypes.h"
namespace physx
{
namespace pvdsdk
{
// Marshallers cannot assume src is aligned, but they can assume dest is aligned.
typedef void (*TSingleMarshaller)(const uint8_t* src, uint8_t* dest);
typedef void (*TBlockMarshaller)(const uint8_t* src, uint8_t* dest, uint32_t numItems);
template <uint8_t ByteCount>
static inline void doSwapBytes(uint8_t* __restrict inData)
{
for(uint32_t idx = 0; idx < ByteCount / 2; ++idx)
{
uint32_t endIdx = ByteCount - idx - 1;
uint8_t theTemp = inData[idx];
inData[idx] = inData[endIdx];
inData[endIdx] = theTemp;
}
}
template <uint8_t ByteCount>
static inline void doSwapBytes(uint8_t* __restrict inData, uint32_t itemCount)
{
uint8_t* end = inData + itemCount * ByteCount;
for(; inData < end; inData += ByteCount)
doSwapBytes<ByteCount>(inData);
}
static inline void swapBytes(uint8_t* __restrict dataPtr, uint32_t numBytes, uint32_t itemWidth)
{
uint32_t numItems = numBytes / itemWidth;
switch(itemWidth)
{
case 1:
break;
case 2:
doSwapBytes<2>(dataPtr, numItems);
break;
case 4:
doSwapBytes<4>(dataPtr, numItems);
break;
case 8:
doSwapBytes<8>(dataPtr, numItems);
break;
case 16:
doSwapBytes<16>(dataPtr, numItems);
break;
default:
PX_ASSERT(false);
break;
}
}
static inline void swapBytes(uint8_t&)
{
}
static inline void swapBytes(int8_t&)
{
}
static inline void swapBytes(uint16_t& inData)
{
doSwapBytes<2>(reinterpret_cast<uint8_t*>(&inData));
}
static inline void swapBytes(int16_t& inData)
{
doSwapBytes<2>(reinterpret_cast<uint8_t*>(&inData));
}
static inline void swapBytes(uint32_t& inData)
{
doSwapBytes<4>(reinterpret_cast<uint8_t*>(&inData));
}
static inline void swapBytes(int32_t& inData)
{
doSwapBytes<4>(reinterpret_cast<uint8_t*>(&inData));
}
static inline void swapBytes(float& inData)
{
doSwapBytes<4>(reinterpret_cast<uint8_t*>(&inData));
}
static inline void swapBytes(uint64_t& inData)
{
doSwapBytes<8>(reinterpret_cast<uint8_t*>(&inData));
}
static inline void swapBytes(int64_t& inData)
{
doSwapBytes<8>(reinterpret_cast<uint8_t*>(&inData));
}
static inline void swapBytes(double& inData)
{
doSwapBytes<8>(reinterpret_cast<uint8_t*>(&inData));
}
static inline bool checkLength(const uint8_t* inStart, const uint8_t* inStop, uint32_t inLength)
{
return static_cast<uint32_t>(inStop - inStart) >= inLength;
}
}
}
#endif
| 3,989 | C | 29 | 96 | 0.73803 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/src/PxPvdCommStreamEvents.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
#ifndef PX_PVD_COMM_STREAM_EVENTS_H
#define PX_PVD_COMM_STREAM_EVENTS_H
#include "foundation/PxVec3.h"
#include "foundation/PxFlags.h"
#include "foundation/PxTime.h"
#include "PxPvdObjectModelBaseTypes.h"
namespace physx
{
namespace pvdsdk
{
struct CommStreamFlagTypes
{
enum Enum
{
Is64BitPtr = 1
};
};
typedef PxFlags<CommStreamFlagTypes::Enum, uint32_t> CommStreamFlags;
template <typename TDataType>
struct PvdCommVariableSizedEventCheck
{
bool variable_size_check;
};
// Pick out the events that are possibly very large.
// This helps us keep our buffers close to the size the user requested.
#define DECLARE_TYPE_VARIABLE_SIZED(type) \
template <> \
struct PvdCommVariableSizedEventCheck<type> \
{ \
uint32_t variable_size_check; \
};
struct NameHandleValue;
struct StreamPropMessageArg;
struct StringHandleEvent;
struct CreateClass;
struct DeriveClass;
struct CreateProperty;
struct CreatePropertyMessage;
struct CreateInstance;
struct SetPropertyValue;
struct BeginSetPropertyValue;
struct AppendPropertyValueData;
struct EndSetPropertyValue;
struct SetPropertyMessage;
struct BeginPropertyMessageGroup;
struct SendPropertyMessageFromGroup;
struct EndPropertyMessageGroup;
struct CreateDestroyInstanceProperty;
struct PushBackObjectRef;
struct RemoveObjectRef;
struct BeginSection;
struct EndSection;
struct SetPickable;
struct SetColor;
struct SetIsTopLevel;
struct SetCamera;
struct AddProfileZone;
struct AddProfileZoneEvent;
struct StreamEndEvent;
struct ErrorMessage;
struct OriginShift;
struct DestroyInstance;
#define DECLARE_COMM_STREAM_EVENTS \
\
DECLARE_PVD_COMM_STREAM_EVENT(StringHandleEvent) \
DECLARE_PVD_COMM_STREAM_EVENT(CreateClass) \
DECLARE_PVD_COMM_STREAM_EVENT(DeriveClass) \
DECLARE_PVD_COMM_STREAM_EVENT(CreateProperty) \
DECLARE_PVD_COMM_STREAM_EVENT(CreatePropertyMessage) \
DECLARE_PVD_COMM_STREAM_EVENT(CreateInstance) \
DECLARE_PVD_COMM_STREAM_EVENT(SetPropertyValue) \
DECLARE_PVD_COMM_STREAM_EVENT(BeginSetPropertyValue) \
DECLARE_PVD_COMM_STREAM_EVENT(AppendPropertyValueData) \
DECLARE_PVD_COMM_STREAM_EVENT(EndSetPropertyValue) \
DECLARE_PVD_COMM_STREAM_EVENT(SetPropertyMessage) \
DECLARE_PVD_COMM_STREAM_EVENT(BeginPropertyMessageGroup) \
DECLARE_PVD_COMM_STREAM_EVENT(SendPropertyMessageFromGroup) \
DECLARE_PVD_COMM_STREAM_EVENT(EndPropertyMessageGroup) \
DECLARE_PVD_COMM_STREAM_EVENT(DestroyInstance) \
DECLARE_PVD_COMM_STREAM_EVENT(PushBackObjectRef) \
DECLARE_PVD_COMM_STREAM_EVENT(RemoveObjectRef) \
DECLARE_PVD_COMM_STREAM_EVENT(BeginSection) \
DECLARE_PVD_COMM_STREAM_EVENT(EndSection) \
DECLARE_PVD_COMM_STREAM_EVENT(SetPickable) \
DECLARE_PVD_COMM_STREAM_EVENT(SetColor) \
DECLARE_PVD_COMM_STREAM_EVENT(SetIsTopLevel) \
DECLARE_PVD_COMM_STREAM_EVENT(SetCamera) \
DECLARE_PVD_COMM_STREAM_EVENT(AddProfileZone) \
DECLARE_PVD_COMM_STREAM_EVENT(AddProfileZoneEvent) \
DECLARE_PVD_COMM_STREAM_EVENT(StreamEndEvent) \
DECLARE_PVD_COMM_STREAM_EVENT(ErrorMessage) \
DECLARE_PVD_COMM_STREAM_EVENT_NO_COMMA(OriginShift)
struct PvdCommStreamEventTypes
{
enum Enum
{
Unknown = 0,
#define DECLARE_PVD_COMM_STREAM_EVENT(x) x,
#define DECLARE_PVD_COMM_STREAM_EVENT_NO_COMMA(x) x
DECLARE_COMM_STREAM_EVENTS
#undef DECLARE_PVD_COMM_STREAM_EVENT_NO_COMMA
#undef DECLARE_PVD_COMM_STREAM_EVENT
, Last
};
};
template <typename TDataType>
struct DatatypeToCommEventType
{
bool compile_error;
};
template <PvdCommStreamEventTypes::Enum TEnumType>
struct CommEventTypeToDatatype
{
bool compile_error;
};
#define DECLARE_PVD_COMM_STREAM_EVENT(x) \
template <> \
struct DatatypeToCommEventType<x> \
{ \
enum Enum \
{ \
EEventTypeMap = PvdCommStreamEventTypes::x \
}; \
}; \
template <> \
struct CommEventTypeToDatatype<PvdCommStreamEventTypes::x> \
{ \
typedef x TEventType; \
};
#define DECLARE_PVD_COMM_STREAM_EVENT_NO_COMMA(x) \
\
template<> struct DatatypeToCommEventType<x> \
{ \
enum Enum \
{ \
EEventTypeMap = PvdCommStreamEventTypes::x \
}; \
}; \
\
template<> struct CommEventTypeToDatatype<PvdCommStreamEventTypes::x> \
{ \
typedef x TEventType; \
};
DECLARE_COMM_STREAM_EVENTS
#undef DECLARE_PVD_COMM_STREAM_EVENT_NO_COMMA
#undef DECLARE_PVD_COMM_STREAM_EVENT
template <typename TDataType>
PvdCommStreamEventTypes::Enum getCommStreamEventType()
{
return static_cast<PvdCommStreamEventTypes::Enum>(DatatypeToCommEventType<TDataType>::EEventTypeMap);
}
struct StreamNamespacedName
{
StringHandle mNamespace; // StringHandle handles
StringHandle mName;
StreamNamespacedName(StringHandle ns = 0, StringHandle nm = 0) : mNamespace(ns), mName(nm)
{
}
};
class EventSerializeable;
class PvdEventSerializer
{
protected:
virtual ~PvdEventSerializer()
{
}
public:
virtual void streamify(uint8_t& val) = 0;
virtual void streamify(uint16_t& val) = 0;
virtual void streamify(uint32_t& val) = 0;
virtual void streamify(float& val) = 0;
virtual void streamify(uint64_t& val) = 0;
virtual void streamify(String& val) = 0;
virtual void streamify(DataRef<const uint8_t>& data) = 0;
virtual void streamify(DataRef<NameHandleValue>& data) = 0;
virtual void streamify(DataRef<StreamPropMessageArg>& data) = 0;
virtual void streamify(DataRef<StringHandle>& data) = 0;
void streamify(StringHandle& hdl)
{
streamify(hdl.mHandle);
}
void streamify(CommStreamFlags& flags)
{
uint32_t val(flags);
streamify(val);
flags = CommStreamFlags(val);
}
void streamify(PvdCommStreamEventTypes::Enum& val)
{
uint8_t detyped = static_cast<uint8_t>(val);
streamify(detyped);
val = static_cast<PvdCommStreamEventTypes::Enum>(detyped);
}
void streamify(PropertyType::Enum& val)
{
uint8_t detyped = static_cast<uint8_t>(val);
streamify(detyped);
val = static_cast<PropertyType::Enum>(detyped);
}
void streamify(bool& val)
{
uint8_t detyped = uint8_t(val ? 1 : 0);
streamify(detyped);
val = detyped ? true : false;
}
void streamify(StreamNamespacedName& name)
{
streamify(name.mNamespace);
streamify(name.mName);
}
void streamify(PvdColor& color)
{
streamify(color.r);
streamify(color.g);
streamify(color.b);
streamify(color.a);
}
void streamify(PxVec3& vec)
{
streamify(vec.x);
streamify(vec.y);
streamify(vec.z);
}
static uint32_t measure(const EventSerializeable& evt);
};
class EventSerializeable
{
protected:
virtual ~EventSerializeable()
{
}
public:
virtual void serialize(PvdEventSerializer& serializer) = 0;
};
/** Numbers generated from random.org
129919156 17973702 401496246 144984007 336950759
907025328 837150850 679717896 601529147 269478202
*/
struct StreamInitialization : public EventSerializeable
{
static uint32_t getStreamId()
{
return 837150850;
}
static uint32_t getStreamVersion()
{
return 1;
}
uint32_t mStreamId;
uint32_t mStreamVersion;
uint64_t mTimestampNumerator;
uint64_t mTimestampDenominator;
CommStreamFlags mStreamFlags;
StreamInitialization()
: mStreamId(getStreamId())
, mStreamVersion(getStreamVersion())
, mTimestampNumerator(physx::PxTime::getCounterFrequency().mNumerator * 10)
, mTimestampDenominator(physx::PxTime::getCounterFrequency().mDenominator)
, mStreamFlags(sizeof(void*) == 4 ? 0 : 1)
{
}
void serialize(PvdEventSerializer& s)
{
s.streamify(mStreamId);
s.streamify(mStreamVersion);
s.streamify(mTimestampNumerator);
s.streamify(mTimestampDenominator);
s.streamify(mStreamFlags);
}
};
struct EventGroup : public EventSerializeable
{
uint32_t mDataSize; // in bytes, data directly follows this header
uint32_t mNumEvents;
uint64_t mStreamId;
uint64_t mTimestamp;
EventGroup(uint32_t dataSize = 0, uint32_t numEvents = 0, uint64_t streamId = 0, uint64_t ts = 0)
: mDataSize(dataSize), mNumEvents(numEvents), mStreamId(streamId), mTimestamp(ts)
{
}
void serialize(PvdEventSerializer& s)
{
s.streamify(mDataSize);
s.streamify(mNumEvents);
s.streamify(mStreamId);
s.streamify(mTimestamp);
}
};
struct StringHandleEvent : public EventSerializeable
{
String mString;
uint32_t mHandle;
StringHandleEvent(String str, uint32_t hdl) : mString(str), mHandle(hdl)
{
}
StringHandleEvent()
{
}
void serialize(PvdEventSerializer& s)
{
s.streamify(mString);
s.streamify(mHandle);
}
};
DECLARE_TYPE_VARIABLE_SIZED(StringHandleEvent)
typedef uint64_t Timestamp;
struct CreateClass : public EventSerializeable
{
StreamNamespacedName mName;
CreateClass(StreamNamespacedName nm) : mName(nm)
{
}
CreateClass()
{
}
void serialize(PvdEventSerializer& s)
{
s.streamify(mName);
}
};
struct DeriveClass : public EventSerializeable
{
StreamNamespacedName mParent;
StreamNamespacedName mChild;
DeriveClass(StreamNamespacedName p, StreamNamespacedName c) : mParent(p), mChild(c)
{
}
DeriveClass()
{
}
void serialize(PvdEventSerializer& s)
{
s.streamify(mParent);
s.streamify(mChild);
}
};
struct NameHandleValue : public EventSerializeable
{
StringHandle mName;
uint32_t mValue;
NameHandleValue(StringHandle name, uint32_t val) : mName(name), mValue(val)
{
}
NameHandleValue()
{
}
void serialize(PvdEventSerializer& s)
{
s.streamify(mName);
s.streamify(mValue);
}
};
/*virtual PvdError createProperty( StreamNamespacedName clsName, StringHandle name, StringHandle semantic
, StreamNamespacedName dtypeName, PropertyType::Enum propertyType
, DataRef<NamedValue> values = DataRef<NamedValue>() ) = 0; */
struct CreateProperty : public EventSerializeable
{
StreamNamespacedName mClass;
StringHandle mName;
StringHandle mSemantic;
StreamNamespacedName mDatatypeName;
PropertyType::Enum mPropertyType;
DataRef<NameHandleValue> mValues;
CreateProperty(StreamNamespacedName cls, StringHandle name, StringHandle semantic, StreamNamespacedName dtypeName,
PropertyType::Enum ptype, DataRef<NameHandleValue> values)
: mClass(cls), mName(name), mSemantic(semantic), mDatatypeName(dtypeName), mPropertyType(ptype), mValues(values)
{
}
CreateProperty()
{
}
void serialize(PvdEventSerializer& s)
{
s.streamify(mClass);
s.streamify(mName);
s.streamify(mSemantic);
s.streamify(mDatatypeName);
s.streamify(mPropertyType);
s.streamify(mValues);
}
};
struct StreamPropMessageArg : public EventSerializeable
{
StringHandle mPropertyName;
StreamNamespacedName mDatatypeName;
uint32_t mMessageOffset;
uint32_t mByteSize;
StreamPropMessageArg(StringHandle pname, StreamNamespacedName dtypeName, uint32_t offset, uint32_t byteSize)
: mPropertyName(pname), mDatatypeName(dtypeName), mMessageOffset(offset), mByteSize(byteSize)
{
}
StreamPropMessageArg()
{
}
void serialize(PvdEventSerializer& s)
{
s.streamify(mPropertyName);
s.streamify(mDatatypeName);
s.streamify(mMessageOffset);
s.streamify(mByteSize);
}
};
/*
virtual PvdError createPropertyMessage( StreamNamespacedName cls, StreamNamespacedName msgName
, DataRef<PropertyMessageArg> entries, uint32_t messageSizeInBytes ) =
0;*/
struct CreatePropertyMessage : public EventSerializeable
{
StreamNamespacedName mClass;
StreamNamespacedName mMessageName;
DataRef<StreamPropMessageArg> mMessageEntries;
uint32_t mMessageByteSize;
CreatePropertyMessage(StreamNamespacedName cls, StreamNamespacedName msgName, DataRef<StreamPropMessageArg> propArg,
uint32_t messageByteSize)
: mClass(cls), mMessageName(msgName), mMessageEntries(propArg), mMessageByteSize(messageByteSize)
{
}
CreatePropertyMessage()
{
}
void serialize(PvdEventSerializer& s)
{
s.streamify(mClass);
s.streamify(mMessageName);
s.streamify(mMessageEntries);
s.streamify(mMessageByteSize);
}
};
/**Changing immediate data on instances*/
// virtual PvdError createInstance( StreamNamespacedName cls, uint64_t instance ) = 0;
struct CreateInstance : public EventSerializeable
{
StreamNamespacedName mClass;
uint64_t mInstanceId;
CreateInstance(StreamNamespacedName cls, uint64_t streamId) : mClass(cls), mInstanceId(streamId)
{
}
CreateInstance()
{
}
void serialize(PvdEventSerializer& s)
{
s.streamify(mClass);
s.streamify(mInstanceId);
}
};
// virtual PvdError setPropertyValue( uint64_t instance, StringHandle name, DataRef<const uint8_t> data,
// StreamNamespacedName incomingTypeName ) = 0;
struct SetPropertyValue : public EventSerializeable
{
uint64_t mInstanceId;
StringHandle mPropertyName;
DataRef<const uint8_t> mData;
StreamNamespacedName mIncomingTypeName;
uint32_t mNumItems;
SetPropertyValue(uint64_t instance, StringHandle name, DataRef<const uint8_t> data,
StreamNamespacedName incomingTypeName, uint32_t numItems)
: mInstanceId(instance), mPropertyName(name), mData(data), mIncomingTypeName(incomingTypeName), mNumItems(numItems)
{
}
SetPropertyValue()
{
}
void serializeBeginning(PvdEventSerializer& s)
{
s.streamify(mInstanceId);
s.streamify(mPropertyName);
s.streamify(mIncomingTypeName);
s.streamify(mNumItems);
}
void serialize(PvdEventSerializer& s)
{
serializeBeginning(s);
s.streamify(mData);
}
};
DECLARE_TYPE_VARIABLE_SIZED(SetPropertyValue)
struct BeginSetPropertyValue : public EventSerializeable
{
uint64_t mInstanceId;
StringHandle mPropertyName;
StreamNamespacedName mIncomingTypeName;
BeginSetPropertyValue(uint64_t instance, StringHandle name, StreamNamespacedName incomingTypeName)
: mInstanceId(instance), mPropertyName(name), mIncomingTypeName(incomingTypeName)
{
}
BeginSetPropertyValue()
{
}
void serialize(PvdEventSerializer& s)
{
s.streamify(mInstanceId);
s.streamify(mPropertyName);
s.streamify(mIncomingTypeName);
}
};
// virtual PvdError appendPropertyValueData( DataRef<const uint8_t> data ) = 0;
struct AppendPropertyValueData : public EventSerializeable
{
DataRef<const uint8_t> mData;
uint32_t mNumItems;
AppendPropertyValueData(DataRef<const uint8_t> data, uint32_t numItems) : mData(data), mNumItems(numItems)
{
}
AppendPropertyValueData()
{
}
void serialize(PvdEventSerializer& s)
{
s.streamify(mData);
s.streamify(mNumItems);
}
};
DECLARE_TYPE_VARIABLE_SIZED(AppendPropertyValueData)
// virtual PvdError endSetPropertyValue() = 0;
struct EndSetPropertyValue : public EventSerializeable
{
EndSetPropertyValue()
{
}
void serialize(PvdEventSerializer&)
{
}
};
// virtual PvdError setPropertyMessage( uint64_t instance, StreamNamespacedName msgName, DataRef<const uint8_t> data ) =
// 0;
struct SetPropertyMessage : public EventSerializeable
{
uint64_t mInstanceId;
StreamNamespacedName mMessageName;
DataRef<const uint8_t> mData;
SetPropertyMessage(uint64_t instance, StreamNamespacedName msgName, DataRef<const uint8_t> data)
: mInstanceId(instance), mMessageName(msgName), mData(data)
{
}
SetPropertyMessage()
{
}
void serialize(PvdEventSerializer& s)
{
s.streamify(mInstanceId);
s.streamify(mMessageName);
s.streamify(mData);
}
};
DECLARE_TYPE_VARIABLE_SIZED(SetPropertyMessage)
// virtual PvdError beginPropertyMessageGroup( StreamNamespacedName msgName ) = 0;
struct BeginPropertyMessageGroup : public EventSerializeable
{
StreamNamespacedName mMsgName;
BeginPropertyMessageGroup(StreamNamespacedName msgName) : mMsgName(msgName)
{
}
BeginPropertyMessageGroup()
{
}
void serialize(PvdEventSerializer& s)
{
s.streamify(mMsgName);
}
};
// virtual PvdError sendPropertyMessageFromGroup( uint64_t instance, DataRef<const uint8_t*> data ) = 0;
struct SendPropertyMessageFromGroup : public EventSerializeable
{
uint64_t mInstance;
DataRef<const uint8_t> mData;
SendPropertyMessageFromGroup(uint64_t instance, DataRef<const uint8_t> data) : mInstance(instance), mData(data)
{
}
SendPropertyMessageFromGroup()
{
}
void serialize(PvdEventSerializer& s)
{
s.streamify(mInstance);
s.streamify(mData);
}
};
DECLARE_TYPE_VARIABLE_SIZED(SendPropertyMessageFromGroup)
// virtual PvdError endPropertyMessageGroup() = 0;
struct EndPropertyMessageGroup : public EventSerializeable
{
EndPropertyMessageGroup()
{
}
void serialize(PvdEventSerializer&)
{
}
};
struct PushBackObjectRef : public EventSerializeable
{
uint64_t mInstanceId;
StringHandle mProperty;
uint64_t mObjectRef;
PushBackObjectRef(uint64_t instId, StringHandle prop, uint64_t objRef)
: mInstanceId(instId), mProperty(prop), mObjectRef(objRef)
{
}
PushBackObjectRef()
{
}
void serialize(PvdEventSerializer& s)
{
s.streamify(mInstanceId);
s.streamify(mProperty);
s.streamify(mObjectRef);
}
};
struct RemoveObjectRef : public EventSerializeable
{
uint64_t mInstanceId;
StringHandle mProperty;
uint64_t mObjectRef;
RemoveObjectRef(uint64_t instId, StringHandle prop, uint64_t objRef)
: mInstanceId(instId), mProperty(prop), mObjectRef(objRef)
{
}
RemoveObjectRef()
{
}
void serialize(PvdEventSerializer& s)
{
s.streamify(mInstanceId);
s.streamify(mProperty);
s.streamify(mObjectRef);
}
};
// virtual PvdError destroyInstance( uint64_t key ) = 0;
struct DestroyInstance : public EventSerializeable
{
uint64_t mInstanceId;
DestroyInstance(uint64_t instance) : mInstanceId(instance)
{
}
DestroyInstance()
{
}
void serialize(PvdEventSerializer& s)
{
s.streamify(mInstanceId);
}
};
// virtual PvdError beginSection( uint64_t sectionId, StringHandle name ) = 0;
struct BeginSection : public EventSerializeable
{
uint64_t mSectionId;
StringHandle mName;
Timestamp mTimestamp;
BeginSection(uint64_t sectionId, StringHandle name, uint64_t timestamp)
: mSectionId(sectionId), mName(name), mTimestamp(timestamp)
{
}
BeginSection()
{
}
void serialize(PvdEventSerializer& s)
{
s.streamify(mSectionId);
s.streamify(mName);
s.streamify(mTimestamp);
}
};
// virtual PvdError endSection( uint64_t sectionId, StringHandle name ) = 0;
struct EndSection : public EventSerializeable
{
uint64_t mSectionId;
StringHandle mName;
Timestamp mTimestamp;
EndSection(uint64_t sectionId, StringHandle name, uint64_t timestamp)
: mSectionId(sectionId), mName(name), mTimestamp(timestamp)
{
}
EndSection()
{
}
void serialize(PvdEventSerializer& s)
{
s.streamify(mSectionId);
s.streamify(mName);
s.streamify(mTimestamp);
}
};
// virtual void setPickable( void* instance, bool pickable ) = 0;
struct SetPickable : public EventSerializeable
{
uint64_t mInstanceId;
bool mPickable;
SetPickable(uint64_t instId, bool pick) : mInstanceId(instId), mPickable(pick)
{
}
SetPickable()
{
}
void serialize(PvdEventSerializer& s)
{
s.streamify(mInstanceId);
s.streamify(mPickable);
}
};
// virtual void setColor( void* instance, const PvdColor& color ) = 0;
struct SetColor : public EventSerializeable
{
uint64_t mInstanceId;
PvdColor mColor;
SetColor(uint64_t instId, PvdColor color) : mInstanceId(instId), mColor(color)
{
}
SetColor()
{
}
void serialize(PvdEventSerializer& s)
{
s.streamify(mInstanceId);
s.streamify(mColor);
}
};
// virtual void setColor( void* instance, const PvdColor& color ) = 0;
struct SetIsTopLevel : public EventSerializeable
{
uint64_t mInstanceId;
bool mIsTopLevel;
SetIsTopLevel(uint64_t instId, bool topLevel) : mInstanceId(instId), mIsTopLevel(topLevel)
{
}
SetIsTopLevel() : mIsTopLevel(false)
{
}
void serialize(PvdEventSerializer& s)
{
s.streamify(mInstanceId);
s.streamify(mIsTopLevel);
}
};
struct SetCamera : public EventSerializeable
{
String mName;
PxVec3 mPosition;
PxVec3 mUp;
PxVec3 mTarget;
SetCamera(String name, const PxVec3& pos, const PxVec3& up, const PxVec3& target)
: mName(name), mPosition(pos), mUp(up), mTarget(target)
{
}
SetCamera() : mName(NULL)
{
}
void serialize(PvdEventSerializer& s)
{
s.streamify(mName);
s.streamify(mPosition);
s.streamify(mUp);
s.streamify(mTarget);
}
};
struct ErrorMessage : public EventSerializeable
{
uint32_t mCode;
String mMessage;
String mFile;
uint32_t mLine;
ErrorMessage(uint32_t code, String message, String file, uint32_t line)
: mCode(code), mMessage(message), mFile(file), mLine(line)
{
}
ErrorMessage() : mMessage(NULL), mFile(NULL)
{
}
void serialize(PvdEventSerializer& s)
{
s.streamify(mCode);
s.streamify(mMessage);
s.streamify(mFile);
s.streamify(mLine);
}
};
struct AddProfileZone : public EventSerializeable
{
uint64_t mInstanceId;
String mName;
AddProfileZone(uint64_t iid, String nm) : mInstanceId(iid), mName(nm)
{
}
AddProfileZone() : mName(NULL)
{
}
void serialize(PvdEventSerializer& s)
{
s.streamify(mInstanceId);
s.streamify(mName);
}
};
struct AddProfileZoneEvent : public EventSerializeable
{
uint64_t mInstanceId;
String mName;
uint16_t mEventId;
bool mCompileTimeEnabled;
AddProfileZoneEvent(uint64_t iid, String nm, uint16_t eid, bool cte)
: mInstanceId(iid), mName(nm), mEventId(eid), mCompileTimeEnabled(cte)
{
}
AddProfileZoneEvent()
{
}
void serialize(PvdEventSerializer& s)
{
s.streamify(mInstanceId);
s.streamify(mName);
s.streamify(mEventId);
s.streamify(mCompileTimeEnabled);
}
};
struct StreamEndEvent : public EventSerializeable
{
String mName;
StreamEndEvent() : mName("StreamEnd")
{
}
void serialize(PvdEventSerializer& s)
{
s.streamify(mName);
}
};
struct OriginShift : public EventSerializeable
{
uint64_t mInstanceId;
PxVec3 mShift;
OriginShift(uint64_t iid, const PxVec3& shift) : mInstanceId(iid), mShift(shift)
{
}
OriginShift()
{
}
void serialize(PvdEventSerializer& s)
{
s.streamify(mInstanceId);
s.streamify(mShift);
}
};
} // pvdsdk
} // physx
#endif
| 25,795 | C | 25.109312 | 120 | 0.673231 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/src/PxPvdProfileZoneClient.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "PxPvdImpl.h"
#include "PxPvdProfileZoneClient.h"
#include "PxPvdProfileZone.h"
namespace physx
{
namespace pvdsdk
{
struct ProfileZoneClient : public profile::PxProfileZoneClient, public PxUserAllocated
{
profile::PxProfileZone& mZone;
PvdDataStream& mStream;
ProfileZoneClient(profile::PxProfileZone& zone, PvdDataStream& stream) : mZone(zone), mStream(stream)
{
}
~ProfileZoneClient()
{
mZone.removeClient(*this);
}
virtual void createInstance()
{
mStream.addProfileZone(&mZone, mZone.getName());
mStream.createInstance(&mZone);
mZone.addClient(*this);
profile::PxProfileNames names(mZone.getProfileNames());
PVD_FOREACH(idx, names.eventCount)
{
handleEventAdded(names.events[idx]);
}
}
virtual void handleEventAdded(const profile::PxProfileEventName& inName)
{
mStream.addProfileZoneEvent(&mZone, inName.name, inName.eventId.eventId, inName.eventId.compileTimeEnabled);
}
virtual void handleBufferFlush(const uint8_t* inData, uint32_t inLength)
{
mStream.setPropertyValue(&mZone, "events", inData, inLength);
}
virtual void handleClientRemoved()
{
mStream.destroyInstance(&mZone);
}
private:
ProfileZoneClient& operator=(const ProfileZoneClient&);
};
}
}
using namespace physx;
using namespace pvdsdk;
PvdProfileZoneClient::PvdProfileZoneClient(PvdImpl& pvd) : mSDKPvd(pvd), mPvdDataStream(NULL), mIsConnected(false)
{
}
PvdProfileZoneClient::~PvdProfileZoneClient()
{
mSDKPvd.removeClient(this);
// all zones should removed
PX_ASSERT(mProfileZoneClients.size() == 0);
}
PvdDataStream* PvdProfileZoneClient::getDataStream()
{
return mPvdDataStream;
}
bool PvdProfileZoneClient::isConnected() const
{
return mIsConnected;
}
void PvdProfileZoneClient::onPvdConnected()
{
if(mIsConnected)
return;
mIsConnected = true;
mPvdDataStream = PvdDataStream::create(&mSDKPvd);
}
void PvdProfileZoneClient::onPvdDisconnected()
{
if(!mIsConnected)
return;
mIsConnected = false;
flush();
mPvdDataStream->release();
mPvdDataStream = NULL;
}
void PvdProfileZoneClient::flush()
{
PVD_FOREACH(idx, mProfileZoneClients.size())
mProfileZoneClients[idx]->mZone.flushProfileEvents();
}
void PvdProfileZoneClient::onZoneAdded(profile::PxProfileZone& zone)
{
PX_ASSERT(mIsConnected);
ProfileZoneClient* client = PVD_NEW(ProfileZoneClient)(zone, *mPvdDataStream);
mMutex.lock();
client->createInstance();
mProfileZoneClients.pushBack(client);
mMutex.unlock();
}
void PvdProfileZoneClient::onZoneRemoved(profile::PxProfileZone& zone)
{
for(uint32_t i = 0; i < mProfileZoneClients.size(); i++)
{
if(&zone == &mProfileZoneClients[i]->mZone)
{
mMutex.lock();
ProfileZoneClient* client = mProfileZoneClients[i];
mProfileZoneClients.replaceWithLast(i);
PVD_DELETE(client);
mMutex.unlock();
return;
}
}
}
| 4,503 | C++ | 26.975155 | 114 | 0.758383 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/src/PxProfileEventBufferClient.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
#ifndef PX_PROFILE_EVENT_BUFFER_CLIENT_H
#define PX_PROFILE_EVENT_BUFFER_CLIENT_H
#include "PxProfileEventNames.h"
namespace physx { namespace profile {
/**
\brief Client handles the data when an event buffer flushes. This data
can be parsed (PxProfileEventHandler.h) as a binary set of events.
*/
class PxProfileEventBufferClient
{
protected:
virtual ~PxProfileEventBufferClient(){}
public:
/**
\brief Callback when the event buffer is full. This data is serialized profile events
and can be read back using: PxProfileEventHandler::parseEventBuffer.
\param inData Provided buffer data.
\param inLength Data length.
@see PxProfileEventHandler::parseEventBuffer.
*/
virtual void handleBufferFlush( const uint8_t* inData, uint32_t inLength ) = 0;
/**
\brief Happens if something removes all the clients from the manager.
*/
virtual void handleClientRemoved() = 0;
};
/**
\brief Client handles new profile event add.
*/
class PxProfileZoneClient : public PxProfileEventBufferClient
{
protected:
virtual ~PxProfileZoneClient(){}
public:
/**
\brief Callback when new profile event is added.
\param inName Added profile event name.
*/
virtual void handleEventAdded( const PxProfileEventName& inName ) = 0;
};
} }
#endif
| 2,848 | C | 34.172839 | 87 | 0.754565 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/src/PxPvdUserRenderImpl.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
#ifndef PX_PVD_USER_RENDER_IMPL_H
#define PX_PVD_USER_RENDER_IMPL_H
#include "PxPvdUserRenderer.h"
namespace physx
{
namespace pvdsdk
{
struct PvdUserRenderTypes
{
enum Enum
{
Unknown = 0,
#define DECLARE_PVD_IMMEDIATE_RENDER_TYPE(type) type,
#define DECLARE_PVD_IMMEDIATE_RENDER_TYPE_NO_COMMA(type) type
#include "PxPvdUserRenderTypes.h"
#undef DECLARE_PVD_IMMEDIATE_RENDER_TYPE_NO_COMMA
#undef DECLARE_PVD_IMMEDIATE_RENDER_TYPE
};
};
class RenderSerializer
{
protected:
virtual ~RenderSerializer()
{
}
public:
virtual void streamify(uint64_t& val) = 0;
virtual void streamify(float& val) = 0;
virtual void streamify(uint32_t& val) = 0;
virtual void streamify(uint8_t& val) = 0;
virtual void streamify(DataRef<uint8_t>& val) = 0;
virtual void streamify(DataRef<PxDebugPoint>& val) = 0;
virtual void streamify(DataRef<PxDebugLine>& val) = 0;
virtual void streamify(DataRef<PxDebugTriangle>& val) = 0;
virtual void streamify(PxDebugText& val) = 0;
virtual bool isGood() = 0;
virtual uint32_t hasData() = 0;
void streamify(PvdUserRenderTypes::Enum& val)
{
uint8_t data = static_cast<uint8_t>(val);
streamify(data);
val = static_cast<PvdUserRenderTypes::Enum>(data);
}
void streamify(PxVec3& val)
{
streamify(val[0]);
streamify(val[1]);
streamify(val[2]);
}
void streamify(PvdColor& val)
{
streamify(val.r);
streamify(val.g);
streamify(val.b);
streamify(val.a);
}
void streamify(PxTransform& val)
{
streamify(val.q.x);
streamify(val.q.y);
streamify(val.q.z);
streamify(val.q.w);
streamify(val.p.x);
streamify(val.p.y);
streamify(val.p.z);
}
void streamify(bool& val)
{
uint8_t tempVal = uint8_t(val ? 1 : 0);
streamify(tempVal);
val = tempVal ? true : false;
}
};
template <typename TBulkRenderType>
struct BulkRenderEvent
{
DataRef<TBulkRenderType> mData;
BulkRenderEvent(const TBulkRenderType* data, uint32_t count) : mData(data, count)
{
}
BulkRenderEvent()
{
}
void serialize(RenderSerializer& serializer)
{
serializer.streamify(mData);
}
};
struct SetInstanceIdRenderEvent
{
uint64_t mInstanceId;
SetInstanceIdRenderEvent(uint64_t iid) : mInstanceId(iid)
{
}
SetInstanceIdRenderEvent()
{
}
void serialize(RenderSerializer& serializer)
{
serializer.streamify(mInstanceId);
}
};
struct PointsRenderEvent : BulkRenderEvent<PxDebugPoint>
{
PointsRenderEvent(const PxDebugPoint* data, uint32_t count) : BulkRenderEvent<PxDebugPoint>(data, count)
{
}
PointsRenderEvent()
{
}
};
struct LinesRenderEvent : BulkRenderEvent<PxDebugLine>
{
LinesRenderEvent(const PxDebugLine* data, uint32_t count) : BulkRenderEvent<PxDebugLine>(data, count)
{
}
LinesRenderEvent()
{
}
};
struct TrianglesRenderEvent : BulkRenderEvent<PxDebugTriangle>
{
TrianglesRenderEvent(const PxDebugTriangle* data, uint32_t count) : BulkRenderEvent<PxDebugTriangle>(data, count)
{
}
TrianglesRenderEvent()
{
}
};
struct DebugRenderEvent
{
DataRef<PxDebugPoint> mPointData;
DataRef<PxDebugLine> mLineData;
DataRef<PxDebugTriangle> mTriangleData;
DebugRenderEvent(const PxDebugPoint* pointData, uint32_t pointCount, const PxDebugLine* lineData,
uint32_t lineCount, const PxDebugTriangle* triangleData, uint32_t triangleCount)
: mPointData(pointData, pointCount), mLineData(lineData, lineCount), mTriangleData(triangleData, triangleCount)
{
}
DebugRenderEvent()
{
}
void serialize(RenderSerializer& serializer)
{
serializer.streamify(mPointData);
serializer.streamify(mLineData);
serializer.streamify(mTriangleData);
}
};
struct TextRenderEvent
{
PxDebugText mText;
TextRenderEvent(const PxDebugText& text)
{
mText.color = text.color;
mText.position = text.position;
mText.size = text.size;
mText.string = text.string;
}
TextRenderEvent()
{
}
void serialize(RenderSerializer& serializer)
{
serializer.streamify(mText);
}
};
struct JointFramesRenderEvent
{
PxTransform parent;
PxTransform child;
JointFramesRenderEvent(const PxTransform& p, const PxTransform& c) : parent(p), child(c)
{
}
JointFramesRenderEvent()
{
}
void serialize(RenderSerializer& serializer)
{
serializer.streamify(parent);
serializer.streamify(child);
}
};
struct LinearLimitRenderEvent
{
PxTransform t0;
PxTransform t1;
float value;
bool active;
LinearLimitRenderEvent(const PxTransform& _t0, const PxTransform& _t1, float _value, bool _active)
: t0(_t0), t1(_t1), value(_value), active(_active)
{
}
LinearLimitRenderEvent()
{
}
void serialize(RenderSerializer& serializer)
{
serializer.streamify(t0);
serializer.streamify(t1);
serializer.streamify(value);
serializer.streamify(active);
}
};
struct AngularLimitRenderEvent
{
PxTransform t0;
float lower;
float upper;
bool active;
AngularLimitRenderEvent(const PxTransform& _t0, float _lower, float _upper, bool _active)
: t0(_t0), lower(_lower), upper(_upper), active(_active)
{
}
AngularLimitRenderEvent()
{
}
void serialize(RenderSerializer& serializer)
{
serializer.streamify(t0);
serializer.streamify(lower);
serializer.streamify(upper);
serializer.streamify(active);
}
};
struct LimitConeRenderEvent
{
PxTransform t;
float ySwing;
float zSwing;
bool active;
LimitConeRenderEvent(const PxTransform& _t, float _ySwing, float _zSwing, bool _active)
: t(_t), ySwing(_ySwing), zSwing(_zSwing), active(_active)
{
}
LimitConeRenderEvent()
{
}
void serialize(RenderSerializer& serializer)
{
serializer.streamify(t);
serializer.streamify(ySwing);
serializer.streamify(zSwing);
serializer.streamify(active);
}
};
struct DoubleConeRenderEvent
{
PxTransform t;
float angle;
bool active;
DoubleConeRenderEvent(const PxTransform& _t, float _angle, bool _active) : t(_t), angle(_angle), active(_active)
{
}
DoubleConeRenderEvent()
{
}
void serialize(RenderSerializer& serializer)
{
serializer.streamify(t);
serializer.streamify(angle);
serializer.streamify(active);
}
};
template <typename TDataType>
struct RenderSerializerMap
{
void serialize(RenderSerializer& s, TDataType& d)
{
d.serialize(s);
}
};
template <>
struct RenderSerializerMap<uint8_t>
{
void serialize(RenderSerializer& s, uint8_t& d)
{
s.streamify(d);
}
};
template <>
struct RenderSerializerMap<PxDebugPoint>
{
void serialize(RenderSerializer& s, PxDebugPoint& d)
{
s.streamify(d.pos);
s.streamify(d.color);
}
};
template <>
struct RenderSerializerMap<PxDebugLine>
{
void serialize(RenderSerializer& s, PxDebugLine& d)
{
s.streamify(d.pos0);
s.streamify(d.color0);
s.streamify(d.pos1);
s.streamify(d.color1);
}
};
template <>
struct RenderSerializerMap<PxDebugTriangle>
{
void serialize(RenderSerializer& s, PxDebugTriangle& d)
{
s.streamify(d.pos0);
s.streamify(d.color0);
s.streamify(d.pos1);
s.streamify(d.color1);
s.streamify(d.pos2);
s.streamify(d.color2);
}
};
template <typename TDataType>
struct PvdTypeToRenderType
{
bool compile_error;
};
#define DECLARE_PVD_IMMEDIATE_RENDER_TYPE(type) \
template <> \
struct PvdTypeToRenderType<type##RenderEvent> \
{ \
enum Enum \
{ \
EnumVal = PvdUserRenderTypes::type \
}; \
};
#include "PxPvdUserRenderTypes.h"
#undef DECLARE_PVD_IMMEDIATE_RENDER_TYPE
template <typename TDataType>
PvdUserRenderTypes::Enum getPvdRenderTypeFromType()
{
return static_cast<PvdUserRenderTypes::Enum>(PvdTypeToRenderType<TDataType>::EnumVal);
}
}
}
#endif
| 9,152 | C | 22.712435 | 114 | 0.721045 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/src/PxProfileZoneImpl.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PROFILE_ZONE_IMPL_H
#define PX_PROFILE_ZONE_IMPL_H
#include "PxPvdProfileZone.h"
#include "PxProfileZoneManager.h"
#include "PxProfileContextProviderImpl.h"
#include "PxProfileScopedMutexLock.h"
#include "PxProfileEventBufferAtomic.h"
#include "foundation/PxMutex.h"
namespace physx { namespace profile {
/**
\brief Simple event filter that enables all events.
*/
struct PxProfileNullEventFilter
{
void setEventEnabled( const PxProfileEventId&, bool) { PX_ASSERT(false); }
bool isEventEnabled( const PxProfileEventId&) const { return true; }
};
typedef PxMutexT<PxProfileWrapperReflectionAllocator<uint8_t> > TZoneMutexType;
typedef ScopedLockImpl<TZoneMutexType> TZoneLockType;
typedef EventBuffer< PxDefaultContextProvider, TZoneMutexType, TZoneLockType, PxProfileNullEventFilter > TZoneEventBufferType;
//typedef EventBufferAtomic< PxDefaultContextProvider, TZoneMutexType, TZoneLockType, PxProfileNullEventFilter > TZoneEventBufferType;
template<typename TNameProvider>
class ZoneImpl : TZoneEventBufferType //private inheritance intended
, public PxProfileZone
, public PxProfileEventBufferClient
{
typedef PxMutexT<PxProfileWrapperReflectionAllocator<uint8_t> > TMutexType;
typedef PxProfileHashMap<const char*, uint32_t> TNameToEvtIndexMap;
//ensure we don't reuse event ids.
typedef PxProfileHashMap<uint16_t, const char*> TEvtIdToNameMap;
typedef TMutexType::ScopedLock TLockType;
const char* mName;
mutable TMutexType mMutex;
PxProfileArray<PxProfileEventName> mEventNames;
// to avoid locking, read-only and read-write map exist
TNameToEvtIndexMap mNameToEvtIndexMapR;
TNameToEvtIndexMap mNameToEvtIndexMapRW;
//ensure we don't reuse event ids.
TEvtIdToNameMap mEvtIdToNameMap;
PxProfileZoneManager* mProfileZoneManager;
PxProfileArray<PxProfileZoneClient*> mZoneClients;
volatile bool mEventsActive;
PX_NOCOPY(ZoneImpl<TNameProvider>)
public:
ZoneImpl( PxAllocatorCallback* inAllocator, const char* inName, uint32_t bufferSize = 0x10000 /*64k*/, const TNameProvider& inProvider = TNameProvider() )
: TZoneEventBufferType( inAllocator, bufferSize, PxDefaultContextProvider(), NULL, PxProfileNullEventFilter() )
, mName( inName )
, mMutex( PxProfileWrapperReflectionAllocator<uint8_t>( mWrapper ) )
, mEventNames( mWrapper )
, mNameToEvtIndexMapR( mWrapper )
, mNameToEvtIndexMapRW( mWrapper )
, mEvtIdToNameMap( mWrapper )
, mProfileZoneManager( NULL )
, mZoneClients( mWrapper )
, mEventsActive( false )
{
TZoneEventBufferType::setBufferMutex( &mMutex );
//Initialize the event name structure with existing names from the name provider.
PxProfileNames theNames( inProvider.getProfileNames() );
for ( uint32_t idx = 0; idx < theNames.eventCount; ++idx )
{
const PxProfileEventName& theName (theNames.events[idx]);
doAddName( theName.name, theName.eventId.eventId, theName.eventId.compileTimeEnabled );
}
TZoneEventBufferType::addClient( *this );
}
virtual ~ZoneImpl() {
if ( mProfileZoneManager != NULL )
mProfileZoneManager->removeProfileZone( *this );
mProfileZoneManager = NULL;
TZoneEventBufferType::removeClient( *this );
}
void doAddName( const char* inName, uint16_t inEventId, bool inCompileTimeEnabled )
{
TLockType theLocker( mMutex );
mEvtIdToNameMap.insert( inEventId, inName );
uint32_t idx = static_cast<uint32_t>( mEventNames.size() );
mNameToEvtIndexMapRW.insert( inName, idx );
mEventNames.pushBack( PxProfileEventName( inName, PxProfileEventId( inEventId, inCompileTimeEnabled ) ) );
}
virtual void flushEventIdNameMap()
{
// copy the RW map into R map
if (mNameToEvtIndexMapRW.size())
{
for (TNameToEvtIndexMap::Iterator iter = mNameToEvtIndexMapRW.getIterator(); !iter.done(); ++iter)
{
mNameToEvtIndexMapR.insert(iter->first, iter->second);
}
mNameToEvtIndexMapRW.clear();
}
}
virtual uint16_t getEventIdForName( const char* inName )
{
return getEventIdsForNames( &inName, 1 );
}
virtual uint16_t getEventIdsForNames( const char** inNames, uint32_t inLen )
{
if ( inLen == 0 )
return 0;
// search the read-only map first
const TNameToEvtIndexMap::Entry* theEntry( mNameToEvtIndexMapR.find( inNames[0] ) );
if ( theEntry )
return mEventNames[theEntry->second].eventId;
TLockType theLocker(mMutex);
const TNameToEvtIndexMap::Entry* theReEntry(mNameToEvtIndexMapRW.find(inNames[0]));
if (theReEntry)
return mEventNames[theReEntry->second].eventId;
//Else git R dun.
uint16_t nameSize = static_cast<uint16_t>( mEventNames.size() );
//We don't allow 0 as an event id.
uint16_t eventId = nameSize;
//Find a contiguous set of unique event ids
bool foundAnEventId = false;
do
{
foundAnEventId = false;
++eventId;
for ( uint16_t idx = 0; idx < inLen && foundAnEventId == false; ++idx )
foundAnEventId = mEvtIdToNameMap.find( uint16_t(eventId + idx) ) != NULL;
}
while( foundAnEventId );
uint32_t clientCount = mZoneClients.size();
for ( uint16_t nameIdx = 0; nameIdx < inLen; ++nameIdx )
{
uint16_t newId = uint16_t(eventId + nameIdx);
doAddName( inNames[nameIdx], newId, true );
for( uint32_t clientIdx =0; clientIdx < clientCount; ++clientIdx )
mZoneClients[clientIdx]->handleEventAdded( PxProfileEventName( inNames[nameIdx], PxProfileEventId( newId ) ) );
}
return eventId;
}
virtual void setProfileZoneManager(PxProfileZoneManager* inMgr)
{
mProfileZoneManager = inMgr;
}
virtual PxProfileZoneManager* getProfileZoneManager()
{
return mProfileZoneManager;
}
const char* getName() { return mName; }
PxProfileEventBufferClient* getEventBufferClient() { return this; }
//SDK implementation
void addClient( PxProfileZoneClient& inClient )
{
TLockType lock( mMutex );
mZoneClients.pushBack( &inClient );
mEventsActive = true;
}
void removeClient( PxProfileZoneClient& inClient )
{
TLockType lock( mMutex );
for ( uint32_t idx =0; idx < mZoneClients.size(); ++idx )
{
if (mZoneClients[idx] == &inClient )
{
inClient.handleClientRemoved();
mZoneClients.replaceWithLast( idx );
break;
}
}
mEventsActive = mZoneClients.size() != 0;
}
virtual bool hasClients() const
{
return mEventsActive;
}
virtual PxProfileNames getProfileNames() const
{
TLockType theLocker( mMutex );
const PxProfileEventName* theNames = mEventNames.begin();
uint32_t theEventCount = uint32_t(mEventNames.size());
return PxProfileNames( theEventCount, theNames );
}
virtual void release()
{
PX_PROFILE_DELETE( mWrapper.getAllocator(), this );
}
//Implementation chaining the buffer flush to our clients
virtual void handleBufferFlush( const uint8_t* inData, uint32_t inLength )
{
TLockType theLocker( mMutex );
uint32_t clientCount = mZoneClients.size();
for( uint32_t idx =0; idx < clientCount; ++idx )
mZoneClients[idx]->handleBufferFlush( inData, inLength );
}
//Happens if something removes all the clients from the manager.
virtual void handleClientRemoved() {}
//Send a profile event, optionally with a context. Events are sorted by thread
//and context in the client side.
virtual void startEvent( uint16_t inId, uint64_t contextId)
{
if( mEventsActive )
{
TZoneEventBufferType::startEvent( inId, contextId );
}
}
virtual void stopEvent( uint16_t inId, uint64_t contextId)
{
if( mEventsActive )
{
TZoneEventBufferType::stopEvent( inId, contextId );
}
}
virtual void startEvent( uint16_t inId, uint64_t contextId, uint32_t threadId)
{
if( mEventsActive )
{
TZoneEventBufferType::startEvent( inId, contextId, threadId );
}
}
virtual void stopEvent( uint16_t inId, uint64_t contextId, uint32_t threadId )
{
if( mEventsActive )
{
TZoneEventBufferType::stopEvent( inId, contextId, threadId );
}
}
virtual void atEvent(uint16_t inId, uint64_t contextId, uint32_t threadId, uint64_t start, uint64_t stop)
{
if (mEventsActive)
{
TZoneEventBufferType::startEvent(inId, threadId, contextId, 0, 0, start);
TZoneEventBufferType::stopEvent(inId, threadId, contextId, 0, 0, stop);
}
}
/**
* Set an specific events value. This is different than the profiling value
* for the event; it is a value recorded and kept around without a timestamp associated
* with it. This value is displayed when the event itself is processed.
*/
virtual void eventValue( uint16_t inId, uint64_t contextId, int64_t inValue )
{
if( mEventsActive )
{
TZoneEventBufferType::eventValue( inId, contextId, inValue );
}
}
virtual void flushProfileEvents()
{
TZoneEventBufferType::flushProfileEvents();
}
};
}}
#endif
| 10,615 | C | 32.701587 | 156 | 0.723033 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/src/PxProfileZoneManager.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
#ifndef PX_PROFILE_ZONE_MANAGER_H
#define PX_PROFILE_ZONE_MANAGER_H
#include "PxProfileEventSender.h"
#include "PxProfileEventNames.h"
namespace physx {
class PxAllocatorCallback;
namespace profile {
class PxProfileZone;
class PxProfileNameProvider;
/**
\brief Profile zone handler for zone add/remove notification.
*/
class PxProfileZoneHandler
{
protected:
virtual ~PxProfileZoneHandler(){}
public:
/**
\brief On zone added notification
\note Not a threadsafe call; handlers are expected to be able to handle
this from any thread.
\param inSDK Added zone.
*/
virtual void onZoneAdded( PxProfileZone& inSDK ) = 0;
/**
\brief On zone removed notification
\note Not a threadsafe call; handlers are expected to be able to handle
this from any thread.
\param inSDK removed zone.
*/
virtual void onZoneRemoved( PxProfileZone& inSDK ) = 0;
};
/**
\brief The profiling system was setup in the expectation that there would be several
systems that each had its own island of profile information. PhysX, client code,
and APEX would be the first examples of these. Each one of these islands is represented
by a profile zone.
The Manager is a singleton-like object where all these different systems can be registered
so that clients of the profiling system can have one point to capture *all* profiling events.
Flushing the manager implies that you want to loop through all the profile zones and flush
each one.
@see PxProfileEventFlusher
*/
class PxProfileZoneManager
: public PxProfileEventFlusher //Tell all SDK's to flush their queue of profile events.
{
protected:
virtual ~PxProfileZoneManager(){}
public:
/**
\brief Add new profile zone for the manager.
\note Threadsafe call, can be done from any thread. Handlers that are already connected
will get a new callback on the current thread.
\param inSDK Profile zone to add.
*/
virtual void addProfileZone( PxProfileZone& inSDK ) = 0;
/**
\brief Removes profile zone from the manager.
\note Threadsafe call, can be done from any thread. Handlers that are already connected
will get a new callback on the current thread.
\param inSDK Profile zone to remove.
*/
virtual void removeProfileZone( PxProfileZone& inSDK ) = 0;
/**
\brief Add profile zone handler callback for the profile zone notifications.
\note Threadsafe call. The new handler will immediately be notified about all
known SDKs.
\param inHandler Profile zone handler to add.
*/
virtual void addProfileZoneHandler( PxProfileZoneHandler& inHandler ) = 0;
/**
\brief Removes profile zone handler callback for the profile zone notifications.
\note Threadsafe call. The new handler will immediately be notified about all
known SDKs.
\param inHandler Profile zone handler to remove.
*/
virtual void removeProfileZoneHandler( PxProfileZoneHandler& inHandler ) = 0;
/**
\brief Create a new profile zone. This means you don't need access to a PxFoundation to
create your profile zone object, and your object is automatically registered with
the profile zone manager.
You still need to release your object when you are finished with it.
\param inSDKName Name of the SDK object.
\param inNames Option set of event id to name mappings.
\param inEventBufferByteSize rough maximum size of the event buffer. May exceed this size
by sizeof one event. When full an immediate call to all listeners is made.
*/
virtual PxProfileZone& createProfileZone( const char* inSDKName, PxProfileNames inNames = PxProfileNames(), uint32_t inEventBufferByteSize = 0x4000 /*16k*/ ) = 0;
/**
\brief Releases the profile manager instance.
*/
virtual void release() = 0;
/**
\brief Create the profile zone manager.
\param inAllocatorCallback Allocator callback.
*/
static PxProfileZoneManager& createProfileZoneManager(PxAllocatorCallback* inAllocatorCallback );
};
} }
#endif
| 5,530 | C | 34.455128 | 164 | 0.752622 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/src/PxPvdInternalByteStreams.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
#ifndef PX_PVD_INTERNAL_BYTE_STREAMS_H
#define PX_PVD_INTERNAL_BYTE_STREAMS_H
#include "PxPvdByteStreams.h"
#include "PxPvdFoundation.h"
namespace physx
{
namespace pvdsdk
{
struct MemPvdInputStream : public PvdInputStream
{
const uint8_t* mBegin;
const uint8_t* mEnd;
bool mGood;
MemPvdInputStream(const uint8_t* beg = NULL, const uint8_t* end = NULL)
{
mBegin = beg;
mEnd = end;
mGood = true;
}
uint32_t size() const
{
return mGood ? static_cast<uint32_t>(mEnd - mBegin) : 0;
}
bool isGood() const
{
return mGood;
}
void setup(uint8_t* start, uint8_t* stop)
{
mBegin = start;
mEnd = stop;
}
void nocopyRead(uint8_t*& buffer, uint32_t& len)
{
if(len == 0 || mGood == false)
{
len = 0;
buffer = NULL;
return;
}
uint32_t original = len;
len = PxMin(len, size());
if(mGood && len != original)
mGood = false;
buffer = const_cast<uint8_t*>(mBegin);
mBegin += len;
}
virtual bool read(uint8_t* buffer, uint32_t& len)
{
if(len == 0)
return true;
uint32_t original = len;
len = PxMin(len, size());
physx::intrinsics::memCopy(buffer, mBegin, len);
mBegin += len;
if(len < original)
physx::intrinsics::memZero(buffer + len, original - len);
mGood = mGood && len == original;
return mGood;
}
};
}
}
#endif
| 2,853 | C | 27.54 | 74 | 0.709779 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/src/PxProfileMemoryBuffer.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PROFILE_MEMORY_BUFFER_H
#define PX_PROFILE_MEMORY_BUFFER_H
#include "foundation/PxAllocator.h"
#include "foundation/PxMemory.h"
namespace physx { namespace profile {
template<typename TAllocator = typename PxAllocatorTraits<uint8_t>::Type >
class MemoryBuffer : public TAllocator
{
uint8_t* mBegin;
uint8_t* mEnd;
uint8_t* mCapacityEnd;
public:
MemoryBuffer( const TAllocator& inAlloc = TAllocator() ) : TAllocator( inAlloc ), mBegin( 0 ), mEnd( 0 ), mCapacityEnd( 0 ) {}
~MemoryBuffer()
{
if ( mBegin ) TAllocator::deallocate( mBegin );
}
uint32_t size() const { return static_cast<uint32_t>( mEnd - mBegin ); }
uint32_t capacity() const { return static_cast<uint32_t>( mCapacityEnd - mBegin ); }
uint8_t* begin() { return mBegin; }
uint8_t* end() { return mEnd; }
void setEnd(uint8_t* nEnd) { mEnd = nEnd; }
const uint8_t* begin() const { return mBegin; }
const uint8_t* end() const { return mEnd; }
void clear() { mEnd = mBegin; }
uint32_t write( uint8_t inValue )
{
growBuf( 1 );
*mEnd = inValue;
++mEnd;
return 1;
}
template<typename TDataType>
uint32_t write( const TDataType& inValue )
{
uint32_t writtenSize = sizeof(TDataType);
growBuf(writtenSize);
const uint8_t* __restrict readPtr = reinterpret_cast< const uint8_t* >( &inValue );
uint8_t* __restrict writePtr = mEnd;
for ( uint32_t idx = 0; idx < sizeof(TDataType); ++idx ) writePtr[idx] = readPtr[idx];
mEnd += writtenSize;
return writtenSize;
}
template<typename TDataType>
uint32_t write( const TDataType* inValue, uint32_t inLength )
{
if ( inValue && inLength )
{
uint32_t writeSize = inLength * sizeof( TDataType );
growBuf( writeSize );
PxMemCopy( mBegin + size(), inValue, writeSize );
mEnd += writeSize;
return writeSize;
}
return 0;
}
// used by atomic write. Store the data and write the end afterwards
// we dont check the buffer size, it should not resize on the fly
template<typename TDataType>
uint32_t write(const TDataType* inValue, uint32_t inLength, int32_t index)
{
if (inValue && inLength)
{
uint32_t writeSize = inLength * sizeof(TDataType);
PX_ASSERT(mBegin + index + writeSize < mCapacityEnd);
PxMemCopy(mBegin + index, inValue, writeSize);
return writeSize;
}
return 0;
}
void growBuf( uint32_t inAmount )
{
uint32_t newSize = size() + inAmount;
reserve( newSize );
}
void resize( uint32_t inAmount )
{
reserve( inAmount );
mEnd = mBegin + inAmount;
}
void reserve( uint32_t newSize )
{
uint32_t currentSize = size();
if ( newSize >= capacity() )
{
const uint32_t allocSize = mBegin ? newSize * 2 : newSize;
uint8_t* newData = static_cast<uint8_t*>(TAllocator::allocate(allocSize, PX_FL));
memset(newData, 0xf,allocSize);
if ( mBegin )
{
PxMemCopy( newData, mBegin, currentSize );
TAllocator::deallocate( mBegin );
}
mBegin = newData;
mEnd = mBegin + currentSize;
mCapacityEnd = mBegin + allocSize;
}
}
};
class TempMemoryBuffer
{
uint8_t* mBegin;
uint8_t* mEnd;
uint8_t* mCapacityEnd;
public:
TempMemoryBuffer(uint8_t* data, int32_t size) : mBegin(data), mEnd(data), mCapacityEnd(data + size) {}
~TempMemoryBuffer()
{
}
uint32_t size() const { return static_cast<uint32_t>(mEnd - mBegin); }
uint32_t capacity() const { return static_cast<uint32_t>(mCapacityEnd - mBegin); }
const uint8_t* begin() { return mBegin; }
uint8_t* end() { return mEnd; }
const uint8_t* begin() const { return mBegin; }
const uint8_t* end() const { return mEnd; }
uint32_t write(uint8_t inValue)
{
*mEnd = inValue;
++mEnd;
return 1;
}
template<typename TDataType>
uint32_t write(const TDataType& inValue)
{
uint32_t writtenSize = sizeof(TDataType);
const uint8_t* __restrict readPtr = reinterpret_cast<const uint8_t*>(&inValue);
uint8_t* __restrict writePtr = mEnd;
for (uint32_t idx = 0; idx < sizeof(TDataType); ++idx) writePtr[idx] = readPtr[idx];
mEnd += writtenSize;
return writtenSize;
}
template<typename TDataType>
uint32_t write(const TDataType* inValue, uint32_t inLength)
{
if (inValue && inLength)
{
uint32_t writeSize = inLength * sizeof(TDataType);
PxMemCopy(mBegin + size(), inValue, writeSize);
mEnd += writeSize;
return writeSize;
}
return 0;
}
};
}}
#endif
| 6,145 | C | 31.17801 | 128 | 0.683645 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/src/PxPvdDefaultSocketTransport.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PVD_DEFAULT_SOCKET_TRANSPORT_H
#define PX_PVD_DEFAULT_SOCKET_TRANSPORT_H
#include "pvd/PxPvdTransport.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxSocket.h"
#include "foundation/PxMutex.h"
namespace physx
{
namespace pvdsdk
{
class PvdDefaultSocketTransport : public PxPvdTransport, public PxUserAllocated
{
PX_NOCOPY(PvdDefaultSocketTransport)
public:
PvdDefaultSocketTransport(const char* host, int port, unsigned int timeoutInMilliseconds);
virtual ~PvdDefaultSocketTransport();
virtual bool connect();
virtual void disconnect();
virtual bool isConnected();
virtual bool write(const uint8_t* inBytes, uint32_t inLength);
virtual void flush();
virtual PxPvdTransport& lock();
virtual void unlock();
virtual uint64_t getWrittenDataSize();
virtual void release();
private:
PxSocket mSocket;
const char* mHost;
uint16_t mPort;
unsigned int mTimeout;
bool mConnected;
uint64_t mWrittenData;
PxMutex mMutex;
bool mlocked;
};
} // pvdsdk
} // physx
#endif
| 2,717 | C | 32.975 | 91 | 0.767022 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/src/PxPvdObjectRegistrar.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "PxPvdObjectRegistrar.h"
namespace physx
{
namespace pvdsdk
{
bool ObjectRegistrar::addItem(const void* inItem)
{
physx::PxMutex::ScopedLock lock(mRefCountMapLock);
if(mRefCountMap.find(inItem))
{
uint32_t& counter = mRefCountMap[inItem];
counter++;
return false;
}
else
{
mRefCountMap.insert(inItem, 1);
return true;
}
}
bool ObjectRegistrar::decItem(const void* inItem)
{
physx::PxMutex::ScopedLock lock(mRefCountMapLock);
const physx::PxHashMap<const void*, uint32_t>::Entry* entry = mRefCountMap.find(inItem);
if(entry)
{
uint32_t& retval(const_cast<uint32_t&>(entry->second));
if(retval)
--retval;
uint32_t theValue = retval;
if(theValue == 0)
{
mRefCountMap.erase(inItem);
return true;
}
}
return false;
}
void ObjectRegistrar::clear()
{
physx::PxMutex::ScopedLock lock(mRefCountMapLock);
mRefCountMap.clear();
}
} // pvdsdk
} // physx
| 2,599 | C++ | 31.5 | 89 | 0.744517 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/src/PxPvdProfileZone.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
#ifndef PX_PVD_PROFILE_ZONE_H
#define PX_PVD_PROFILE_ZONE_H
#include "foundation/PxPreprocessor.h"
#include "PxProfileEventBufferClientManager.h"
#include "PxProfileEventNames.h"
#include "PxProfileEventSender.h"
namespace physx {
class PxAllocatorCallback;
namespace profile {
class PxProfileZoneManager;
/**
\brief The profiling system was setup in the expectation that there would be several
systems that each had its own island of profile information. PhysX, client code,
and APEX would be the first examples of these. Each one of these islands is represented
by a profile zone.
A profile zone combines a name, a place where all the events coming from its interface
can flushed, and a mapping from event number to full event name.
It also provides a top level filtering service where profile events
can be filtered by event id.
The profile zone implements a system where if there is no one
listening to events it doesn't provide a mechanism to send them. In this way
the event system is short circuited when there aren't any clients.
All functions on this interface should be considered threadsafe.
@see PxProfileZoneClientManager, PxProfileNameProvider, PxProfileEventSender, PxProfileEventFlusher
*/
class PxProfileZone : public PxProfileZoneClientManager
, public PxProfileNameProvider
, public PxProfileEventSender
, public PxProfileEventFlusher
{
protected:
virtual ~PxProfileZone(){}
public:
/**
\brief Get profile zone name.
\return Zone name.
*/
virtual const char* getName() = 0;
/**
\brief Release the profile zone.
*/
virtual void release() = 0;
/**
\brief Set profile zone manager for the zone.
\param inMgr Profile zone manager.
*/
virtual void setProfileZoneManager(PxProfileZoneManager* inMgr) = 0;
/**
\brief Get profile zone manager for the zone.
\return Profile zone manager.
*/
virtual PxProfileZoneManager* getProfileZoneManager() = 0;
/**
\brief Get or create a new event id for a given name.
If you pass in a previously defined event name (including one returned)
from the name provider) you will just get the same event id back.
\param inName Profile event name.
*/
virtual uint16_t getEventIdForName( const char* inName ) = 0;
/**
\brief Specifies that it is a safe point to flush read-write name map into
read-only map. Make sure getEventIdForName is not called from a different thread.
*/
virtual void flushEventIdNameMap() = 0;
/**
\brief Reserve a contiguous set of profile event ids for a set of names.
This function does not do any meaningful error checking other than to ensure
that if it does generate new ids they are contiguous. If the first name is already
registered, that is the ID that will be returned regardless of what other
names are registered. Thus either use this function alone (without the above
function) or don't use it.
If you register "one","two","three" and the function returns an id of 4, then
"one" is mapped to 4, "two" is mapped to 5, and "three" is mapped to 6.
\param inNames set of names to register.
\param inLen Length of the name list.
\return The first id associated with the first name. The rest of the names
will be associated with monotonically incrementing uint16_t values from the first
id.
*/
virtual uint16_t getEventIdsForNames( const char** inNames, uint32_t inLen ) = 0;
/**
\brief Create a new profile zone.
\param inAllocator memory allocation is controlled through the foundation if one is passed in.
\param inSDKName Name of the profile zone; useful for clients to understand where events came from.
\param inNames Mapping from event id -> event name.
\param inEventBufferByteSize Size of the canonical event buffer. This does not need to be a large number
as profile events are fairly small individually.
\return a profile zone implementation.
*/
static PxProfileZone& createProfileZone(PxAllocatorCallback* inAllocator, const char* inSDKName, PxProfileNames inNames = PxProfileNames(), uint32_t inEventBufferByteSize = 0x10000 /*64k*/);
};
} }
#endif
| 5,725 | C | 39.041958 | 192 | 0.751092 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/src/PxProfileMemoryEvents.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PROFILE_MEMORY_EVENTS_H
#define PX_PROFILE_MEMORY_EVENTS_H
#include "PxProfileEvents.h"
//Memory events define their own event stream
namespace physx { namespace profile {
struct MemoryEventTypes
{
enum Enum
{
Unknown = 0,
StringTableEvent, //introduce a new mapping of const char* -> integer
AllocationEvent,
DeallocationEvent,
FullAllocationEvent
};
};
template<unsigned numBits, typename TDataType>
inline unsigned char convertToNBits( TDataType inType )
{
uint8_t conversion = static_cast<uint8_t>( inType );
PX_ASSERT( conversion < (1 << numBits) );
return conversion;
}
template<typename TDataType>
inline unsigned char convertToTwoBits( TDataType inType )
{
return convertToNBits<2>( inType );
}
template<typename TDataType>
inline unsigned char convertToFourBits( TDataType inType )
{
return convertToNBits<4>( inType );
}
inline EventStreamCompressionFlags::Enum fromNumber( uint8_t inNum ) { return static_cast<EventStreamCompressionFlags::Enum>( inNum ); }
template<unsigned lhs, unsigned rhs>
inline void compileCheckSize()
{
PX_COMPILE_TIME_ASSERT( lhs <= rhs );
}
//Used for predictable bit fields.
template<typename TDataType
, uint8_t TNumBits
, uint8_t TOffset
, typename TInputType>
struct BitMaskSetter
{
//Create a mask that masks out the orginal value shift into place
static TDataType createOffsetMask() { return TDataType(createMask() << TOffset); }
//Create a mask of TNumBits number of tis
static TDataType createMask() { return static_cast<TDataType>((1 << TNumBits) - 1); }
void setValue( TDataType& inCurrent, TInputType inData )
{
PX_ASSERT( inData < ( 1 << TNumBits ) );
//Create a mask to remove the current value.
TDataType theMask = TDataType(~(createOffsetMask()));
//Clear out current value.
inCurrent = TDataType(inCurrent & theMask);
//Create the new value.
TDataType theAddition = static_cast<TDataType>( inData << TOffset );
//or it into the existing value.
inCurrent = TDataType(inCurrent | theAddition);
}
TInputType getValue( TDataType inCurrent )
{
return static_cast<TInputType>( ( inCurrent >> TOffset ) & createMask() );
}
};
struct MemoryEventHeader
{
uint16_t mValue;
typedef BitMaskSetter<uint16_t, 4, 0, uint8_t> TTypeBitmask;
typedef BitMaskSetter<uint16_t, 2, 4, uint8_t> TAddrCompressBitmask;
typedef BitMaskSetter<uint16_t, 2, 6, uint8_t> TTypeCompressBitmask;
typedef BitMaskSetter<uint16_t, 2, 8, uint8_t> TFnameCompressBitmask;
typedef BitMaskSetter<uint16_t, 2, 10, uint8_t> TSizeCompressBitmask;
typedef BitMaskSetter<uint16_t, 2, 12, uint8_t> TLineCompressBitmask;
//That leaves size as the only thing not compressed usually.
MemoryEventHeader( MemoryEventTypes::Enum inType = MemoryEventTypes::Unknown )
: mValue( 0 )
{
uint8_t defaultCompression( convertToTwoBits( EventStreamCompressionFlags::U64 ) );
TTypeBitmask().setValue( mValue, convertToFourBits( inType ) );
TAddrCompressBitmask().setValue( mValue, defaultCompression );
TTypeCompressBitmask().setValue( mValue, defaultCompression );
TFnameCompressBitmask().setValue( mValue, defaultCompression );
TSizeCompressBitmask().setValue( mValue, defaultCompression );
TLineCompressBitmask().setValue( mValue, defaultCompression );
}
MemoryEventTypes::Enum getType() const { return static_cast<MemoryEventTypes::Enum>( TTypeBitmask().getValue( mValue ) ); }
#define DEFINE_MEMORY_HEADER_COMPRESSION_ACCESSOR( name ) \
void set##name( EventStreamCompressionFlags::Enum inEnum ) { T##name##Bitmask().setValue( mValue, convertToTwoBits( inEnum ) ); } \
EventStreamCompressionFlags::Enum get##name() const { return fromNumber( T##name##Bitmask().getValue( mValue ) ); }
DEFINE_MEMORY_HEADER_COMPRESSION_ACCESSOR( AddrCompress )
DEFINE_MEMORY_HEADER_COMPRESSION_ACCESSOR( TypeCompress )
DEFINE_MEMORY_HEADER_COMPRESSION_ACCESSOR( FnameCompress )
DEFINE_MEMORY_HEADER_COMPRESSION_ACCESSOR( SizeCompress )
DEFINE_MEMORY_HEADER_COMPRESSION_ACCESSOR( LineCompress )
#undef DEFINE_MEMORY_HEADER_COMPRESSION_ACCESSOR
bool operator==( const MemoryEventHeader& inOther ) const
{
return mValue == inOther.mValue;
}
template<typename TStreamType>
void streamify( TStreamType& inStream )
{
inStream.streamify( "Header", mValue );
}
};
//Declaration of type level getMemoryEventType function that maps enumeration event types to datatypes
template<typename TDataType>
inline MemoryEventTypes::Enum getMemoryEventType() { PX_ASSERT( false ); return MemoryEventTypes::Unknown; }
inline bool safeStrEq( const char* lhs, const char* rhs )
{
if ( lhs == rhs )
return true;
//If they aren't equal, and one of them is null,
//then they can't be equal.
//This is assuming that the null char* is not equal to
//the empty "" char*.
if ( !lhs || !rhs )
return false;
return ::strcmp( lhs, rhs ) == 0;
}
struct StringTableEvent
{
const char* mString;
uint32_t mHandle;
void init( const char* inStr = "", uint32_t inHdl = 0 )
{
mString = inStr;
mHandle = inHdl;
}
void init( const StringTableEvent& inData )
{
mString = inData.mString;
mHandle = inData.mHandle;
}
bool operator==( const StringTableEvent& inOther ) const
{
return mHandle == inOther.mHandle
&& safeStrEq( mString, inOther.mString );
}
void setup( MemoryEventHeader& ) const {}
template<typename TStreamType>
void streamify( TStreamType& inStream, const MemoryEventHeader& )
{
inStream.streamify( "String", mString );
inStream.streamify( "Handle", mHandle );
}
};
template<> inline MemoryEventTypes::Enum getMemoryEventType<StringTableEvent>() { return MemoryEventTypes::StringTableEvent; }
struct MemoryEventData
{
uint64_t mAddress;
void init( uint64_t addr )
{
mAddress = addr;
}
void init( const MemoryEventData& inData)
{
mAddress = inData.mAddress;
}
bool operator==( const MemoryEventData& inOther ) const
{
return mAddress == inOther.mAddress;
}
void setup( MemoryEventHeader& inHeader ) const
{
inHeader.setAddrCompress( findCompressionValue( mAddress ) );
}
template<typename TStreamType>
void streamify( TStreamType& inStream, const MemoryEventHeader& inHeader )
{
inStream.streamify( "Address", mAddress, inHeader.getAddrCompress() );
}
};
struct AllocationEvent : public MemoryEventData
{
uint32_t mSize;
uint32_t mType;
uint32_t mFile;
uint32_t mLine;
void init( size_t size = 0, uint32_t type = 0, uint32_t file = 0, uint32_t line = 0, uint64_t addr = 0 )
{
MemoryEventData::init( addr );
mSize = static_cast<uint32_t>( size );
mType = type;
mFile = file;
mLine = line;
}
void init( const AllocationEvent& inData )
{
MemoryEventData::init( inData );
mSize = inData.mSize;
mType = inData.mType;
mFile = inData.mFile;
mLine = inData.mLine;
}
bool operator==( const AllocationEvent& inOther ) const
{
return MemoryEventData::operator==( inOther )
&& mSize == inOther.mSize
&& mType == inOther.mType
&& mFile == inOther.mFile
&& mLine == inOther.mLine;
}
void setup( MemoryEventHeader& inHeader ) const
{
inHeader.setTypeCompress( findCompressionValue( mType ) );
inHeader.setFnameCompress( findCompressionValue( mFile ) );
inHeader.setSizeCompress( findCompressionValue( mSize ) );
inHeader.setLineCompress( findCompressionValue( mLine ) );
MemoryEventData::setup( inHeader );
}
template<typename TStreamType>
void streamify( TStreamType& inStream, const MemoryEventHeader& inHeader )
{
inStream.streamify( "Size", mSize, inHeader.getSizeCompress() );
inStream.streamify( "Type", mType, inHeader.getTypeCompress() );
inStream.streamify( "File", mFile, inHeader.getFnameCompress() );
inStream.streamify( "Line", mLine, inHeader.getLineCompress() );
MemoryEventData::streamify( inStream, inHeader );
}
};
template<> inline MemoryEventTypes::Enum getMemoryEventType<AllocationEvent>() { return MemoryEventTypes::AllocationEvent; }
struct FullAllocationEvent : public MemoryEventData
{
size_t mSize;
const char* mType;
const char* mFile;
uint32_t mLine;
void init( size_t size, const char* type, const char* file, uint32_t line, uint64_t addr )
{
MemoryEventData::init( addr );
mSize = size;
mType = type;
mFile = file;
mLine = line;
}
void init( const FullAllocationEvent& inData )
{
MemoryEventData::init( inData );
mSize = inData.mSize;
mType = inData.mType;
mFile = inData.mFile;
mLine = inData.mLine;
}
bool operator==( const FullAllocationEvent& inOther ) const
{
return MemoryEventData::operator==( inOther )
&& mSize == inOther.mSize
&& safeStrEq( mType, inOther.mType )
&& safeStrEq( mFile, inOther.mFile )
&& mLine == inOther.mLine;
}
void setup( MemoryEventHeader& ) const {}
};
template<> inline MemoryEventTypes::Enum getMemoryEventType<FullAllocationEvent>() { return MemoryEventTypes::FullAllocationEvent; }
struct DeallocationEvent : public MemoryEventData
{
void init( uint64_t addr = 0 ) { MemoryEventData::init( addr ); }
void init( const DeallocationEvent& inData ) { MemoryEventData::init( inData ); }
};
template<> inline MemoryEventTypes::Enum getMemoryEventType<DeallocationEvent>() { return MemoryEventTypes::DeallocationEvent; }
class MemoryEvent
{
public:
typedef PX_PROFILE_UNION_5(StringTableEvent, AllocationEvent, DeallocationEvent, FullAllocationEvent, uint8_t) EventData;
private:
MemoryEventHeader mHeader;
EventData mData;
public:
MemoryEvent() {}
MemoryEvent( MemoryEventHeader inHeader, const EventData& inData = EventData() )
: mHeader( inHeader )
, mData( inData )
{
}
template<typename TDataType>
MemoryEvent( const TDataType& inType )
: mHeader( getMemoryEventType<TDataType>() )
, mData( inType )
{
//set the appropriate compression bits.
inType.setup( mHeader );
}
const MemoryEventHeader& getHeader() const { return mHeader; }
const EventData& getData() const { return mData; }
template<typename TDataType>
const TDataType& getValue() const { PX_ASSERT( mHeader.getType() == getMemoryEventType<TDataType>() ); return mData.toType<TDataType>(); }
template<typename TDataType>
TDataType& getValue() { PX_ASSERT( mHeader.getType() == getMemoryEventType<TDataType>() ); return mData.toType<TDataType>(); }
template<typename TRetVal, typename TOperator>
inline TRetVal visit( TOperator inOp ) const;
bool operator==( const MemoryEvent& inOther ) const
{
if ( !(mHeader == inOther.mHeader ) ) return false;
if ( mHeader.getType() )
return inOther.visit<bool>( EventDataEqualOperator<EventData>( mData ) );
return true;
}
};
template<typename TRetVal, typename TOperator>
inline TRetVal visit( MemoryEventTypes::Enum inEventType, const MemoryEvent::EventData& inData, TOperator inOperator )
{
switch( inEventType )
{
case MemoryEventTypes::StringTableEvent: return inOperator( inData.toType( Type2Type<StringTableEvent>() ) );
case MemoryEventTypes::AllocationEvent: return inOperator( inData.toType( Type2Type<AllocationEvent>() ) );
case MemoryEventTypes::DeallocationEvent: return inOperator( inData.toType( Type2Type<DeallocationEvent>() ) );
case MemoryEventTypes::FullAllocationEvent: return inOperator( inData.toType( Type2Type<FullAllocationEvent>() ) );
case MemoryEventTypes::Unknown: return inOperator( static_cast<uint8_t>( inEventType ) );
}
return TRetVal();
}
template<typename TRetVal, typename TOperator>
inline TRetVal MemoryEvent::visit( TOperator inOp ) const
{
return physx::profile::visit<TRetVal>( mHeader.getType(), mData, inOp );
}
}}
#endif
| 13,543 | C | 31.953771 | 140 | 0.72148 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/src/PxPvdFoundation.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
#ifndef PX_PVD_FOUNDATION_H
#define PX_PVD_FOUNDATION_H
#include "foundation/PxVec3.h"
#include "foundation/PxTransform.h"
#include "foundation/PxBounds3.h"
#include "foundation/PxHashSet.h"
#include "foundation/PxHashMap.h"
#include "foundation/PxArray.h"
#include "foundation/PxString.h"
#include "foundation/PxPool.h"
#include "PxPvdObjectModelBaseTypes.h"
namespace physx
{
namespace pvdsdk
{
extern PxAllocatorCallback* gPvdAllocatorCallback;
class ForwardingAllocator : public PxAllocatorCallback
{
void* allocate(size_t size, const char* typeName, const char* filename, int line)
{
return PxGetBroadcastAllocator()->allocate(size, typeName, filename, line);
}
void deallocate(void* ptr)
{
PxGetBroadcastAllocator()->deallocate(ptr);
}
};
class RawMemoryBuffer
{
uint8_t* mBegin;
uint8_t* mEnd;
uint8_t* mCapacityEnd;
const char* mBufDataName;
public:
RawMemoryBuffer(const char* name) : mBegin(0), mEnd(0), mCapacityEnd(0),mBufDataName(name)
{
PX_UNUSED(mBufDataName);
}
~RawMemoryBuffer()
{
PX_FREE(mBegin);
}
uint32_t size() const
{
return static_cast<uint32_t>(mEnd - mBegin);
}
uint32_t capacity() const
{
return static_cast<uint32_t>(mCapacityEnd - mBegin);
}
uint8_t* begin()
{
return mBegin;
}
uint8_t* end()
{
return mEnd;
}
const uint8_t* begin() const
{
return mBegin;
}
const uint8_t* end() const
{
return mEnd;
}
void clear()
{
mEnd = mBegin;
}
const char* cStr()
{
if(mEnd && (*mEnd != 0))
write(0);
return reinterpret_cast<const char*>(mBegin);
}
uint32_t write(uint8_t inValue)
{
*growBuf(1) = inValue;
return 1;
}
template <typename TDataType>
uint32_t write(const TDataType& inValue)
{
const uint8_t* __restrict readPtr = reinterpret_cast<const uint8_t*>(&inValue);
uint8_t* __restrict writePtr = growBuf(sizeof(TDataType));
for(uint32_t idx = 0; idx < sizeof(TDataType); ++idx)
writePtr[idx] = readPtr[idx];
return sizeof(TDataType);
}
template <typename TDataType>
uint32_t write(const TDataType* inValue, uint32_t inLength)
{
uint32_t writeSize = inLength * sizeof(TDataType);
if(inValue && inLength)
{
physx::intrinsics::memCopy(growBuf(writeSize), inValue, writeSize);
}
if(inLength && !inValue)
{
PX_ASSERT(false);
// You can't not write something, because that will cause
// the receiving end to crash.
for(uint32_t idx = 0; idx < writeSize; ++idx)
write(0);
}
return writeSize;
}
uint8_t* growBuf(uint32_t inAmount)
{
uint32_t offset = size();
uint32_t newSize = offset + inAmount;
reserve(newSize);
mEnd += inAmount;
return mBegin + offset;
}
void writeZeros(uint32_t inAmount)
{
uint32_t offset = size();
growBuf(inAmount);
physx::intrinsics::memZero(begin() + offset, inAmount);
}
void reserve(uint32_t newSize)
{
uint32_t currentSize = size();
if(newSize && newSize >= capacity())
{
uint32_t newDataSize = newSize > 4096 ? newSize + (newSize >> 2) : newSize*2;
uint8_t* newData = static_cast<uint8_t*>(PX_ALLOC(newDataSize, mBufDataName));
if(mBegin)
{
physx::intrinsics::memCopy(newData, mBegin, currentSize);
PX_FREE(mBegin);
}
mBegin = newData;
mEnd = mBegin + currentSize;
mCapacityEnd = mBegin + newDataSize;
}
}
};
struct ForwardingMemoryBuffer : public RawMemoryBuffer
{
ForwardingMemoryBuffer(const char* bufDataName) : RawMemoryBuffer(bufDataName)
{
}
ForwardingMemoryBuffer& operator<<(const char* inString)
{
if(inString && *inString)
{
uint32_t len = static_cast<uint32_t>(strlen(inString));
write(inString, len);
}
return *this;
}
template <typename TDataType>
inline ForwardingMemoryBuffer& toStream(const char* inFormat, const TDataType inData)
{
char buffer[128] = { 0 };
Pxsnprintf(buffer, 128, inFormat, inData);
*this << buffer;
return *this;
}
inline ForwardingMemoryBuffer& operator<<(bool inData)
{
*this << (inData ? "true" : "false");
return *this;
}
inline ForwardingMemoryBuffer& operator<<(int32_t inData)
{
return toStream("%d", inData);
}
inline ForwardingMemoryBuffer& operator<<(uint16_t inData)
{
return toStream("%u", uint32_t(inData));
}
inline ForwardingMemoryBuffer& operator<<(uint8_t inData)
{
return toStream("%u", uint32_t(inData));
}
inline ForwardingMemoryBuffer& operator<<(char inData)
{
return toStream("%c", inData);
}
inline ForwardingMemoryBuffer& operator<<(uint32_t inData)
{
return toStream("%u", inData);
}
inline ForwardingMemoryBuffer& operator<<(uint64_t inData)
{
return toStream("%I64u", inData);
}
inline ForwardingMemoryBuffer& operator<<(int64_t inData)
{
return toStream("%I64d", inData);
}
inline ForwardingMemoryBuffer& operator<<(const void* inData)
{
return *this << static_cast<uint64_t>(reinterpret_cast<size_t>(inData));
}
inline ForwardingMemoryBuffer& operator<<(float inData)
{
return toStream("%g", double(inData));
}
inline ForwardingMemoryBuffer& operator<<(double inData)
{
return toStream("%g", inData);
}
inline ForwardingMemoryBuffer& operator<<(const PxVec3& inData)
{
*this << inData[0];
*this << " ";
*this << inData[1];
*this << " ";
*this << inData[2];
return *this;
}
inline ForwardingMemoryBuffer& operator<<(const PxQuat& inData)
{
*this << inData.x;
*this << " ";
*this << inData.y;
*this << " ";
*this << inData.z;
*this << " ";
*this << inData.w;
return *this;
}
inline ForwardingMemoryBuffer& operator<<(const PxTransform& inData)
{
*this << inData.q;
*this << " ";
*this << inData.p;
return *this;
}
inline ForwardingMemoryBuffer& operator<<(const PxBounds3& inData)
{
*this << inData.minimum;
*this << " ";
*this << inData.maximum;
return *this;
}
};
template <typename TDataType>
inline void* PvdAllocate(const char* typeName, const char* file, int line)
{
PX_ASSERT(gPvdAllocatorCallback);
return gPvdAllocatorCallback->allocate(sizeof(TDataType), typeName, file, line);
}
template <typename TDataType>
inline void PvdDeleteAndDeallocate(TDataType* inDType)
{
PX_ASSERT(gPvdAllocatorCallback);
if(inDType)
{
inDType->~TDataType();
gPvdAllocatorCallback->deallocate(inDType);
}
}
}
}
#define PVD_NEW(dtype) new (PvdAllocate<dtype>(#dtype, PX_FL)) dtype
#define PVD_DELETE(obj) PvdDeleteAndDeallocate(obj);
//#define PVD_NEW(dtype) PX_NEW(dtype)
//#define PVD_DELETE(obj) PX_DELETE(obj)
#define PVD_FOREACH(varname, stop) for(uint32_t varname = 0; varname < stop; ++varname)
#define PVD_POINTER_TO_U64(ptr) static_cast<uint64_t>(reinterpret_cast<size_t>(ptr))
#endif
| 8,132 | C | 24.737342 | 91 | 0.703763 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/include/PxPvdErrorCodes.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
#ifndef PX_PVD_ERROR_CODES_H
#define PX_PVD_ERROR_CODES_H
/** \addtogroup pvd
@{
*/
#include "foundation/Px.h"
#if !PX_DOXYGEN
namespace physx
{
namespace pvdsdk
{
#endif
struct PvdErrorType
{
enum Enum
{
Success = 0,
NetworkError,
ArgumentError,
Disconnect,
InternalProblem
};
};
typedef PvdErrorType::Enum PvdError;
#if !PX_DOXYGEN
}
}
#endif
/** @} */
#endif
| 1,945 | C | 29.40625 | 74 | 0.750643 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/include/PxPvdUserRenderer.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
#ifndef PX_PVD_USER_RENDERER_H
#define PX_PVD_USER_RENDERER_H
/** \addtogroup pvd
@{
*/
#include "foundation/PxVec3.h"
#include "foundation/PxTransform.h"
#include "common/PxRenderBuffer.h"
#include "pvd/PxPvd.h"
#include "PxPvdDataStream.h"
#include "foundation/PxUserAllocated.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxPvd;
#if !PX_DOXYGEN
namespace pvdsdk
{
#endif
class RendererEventClient;
class PvdUserRenderer : public PxUserAllocated
{
protected:
virtual ~PvdUserRenderer()
{
}
public:
virtual void release() = 0;
virtual void setClient(RendererEventClient* client) = 0;
// Instance to associate the further rendering with.
virtual void setInstanceId(const void* instanceId) = 0;
// Draw these points associated with this instance
virtual void drawPoints(const PxDebugPoint* points, uint32_t count) = 0;
// Draw these lines associated with this instance
virtual void drawLines(const PxDebugLine* lines, uint32_t count) = 0;
// Draw these triangles associated with this instance
virtual void drawTriangles(const PxDebugTriangle* triangles, uint32_t count) = 0;
// Draw this text associated with this instance
virtual void drawText(const PxDebugText& text) = 0;
// Draw SDK debug render
virtual void drawRenderbuffer(const PxDebugPoint* pointData, uint32_t pointCount, const PxDebugLine* lineData,
uint32_t lineCount, const PxDebugTriangle* triangleData, uint32_t triangleCount) = 0;
// Constraint visualization routines
virtual void visualizeJointFrames(const PxTransform& parent, const PxTransform& child) = 0;
virtual void visualizeLinearLimit(const PxTransform& t0, const PxTransform& t1, float value) = 0;
virtual void visualizeAngularLimit(const PxTransform& t0, float lower, float upper) = 0;
virtual void visualizeLimitCone(const PxTransform& t, float tanQSwingY, float tanQSwingZ) = 0;
virtual void visualizeDoubleCone(const PxTransform& t, float angle) = 0;
// Clear the immedate buffer.
virtual void flushRenderEvents() = 0;
static PvdUserRenderer* create(uint32_t bufferSize = 0x2000);
};
class RendererEventClient
{
public:
virtual ~RendererEventClient(){}
virtual void handleBufferFlush(const uint8_t* inData, uint32_t inLength) = 0;
};
#if !PX_DOXYGEN
}
}
#endif
/** @} */
#endif
| 3,856 | C | 34.385321 | 116 | 0.759855 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/include/PxPvdClient.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PVD_CLIENT_H
#define PX_PVD_CLIENT_H
/** \addtogroup pvd
@{
*/
#include "foundation/PxFlags.h"
#include "foundation/PxVec3.h"
#if !PX_DOXYGEN
namespace physx
{
namespace pvdsdk
{
#endif
class PvdDataStream;
class PvdUserRenderer;
/**
\brief PvdClient is the per-client connection to PVD.
It provides callback when PVD is connected/disconnted.
It provides access to the internal object so that advanced users can create extension client.
*/
class PvdClient
{
public:
virtual PvdDataStream* getDataStream() = 0;
virtual bool isConnected() const = 0;
virtual void onPvdConnected() = 0;
virtual void onPvdDisconnected() = 0;
virtual void flush() = 0;
protected:
virtual ~PvdClient()
{
}
};
#if !PX_DOXYGEN
} // namespace pvdsdk
} // namespace physx
#endif
/** @} */
#endif
| 2,497 | C | 31.441558 | 93 | 0.752103 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/include/PxPvdDataStream.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
#ifndef PX_PVD_DATA_STREAM_H
#define PX_PVD_DATA_STREAM_H
/** \addtogroup pvd
@{
*/
#include "pvd/PxPvd.h"
#include "PxPvdErrorCodes.h"
#include "PxPvdObjectModelBaseTypes.h"
#if !PX_DOXYGEN
namespace physx
{
namespace pvdsdk
{
#endif
class PvdPropertyDefinitionHelper;
class PvdMetaDataStream
{
protected:
virtual ~PvdMetaDataStream()
{
}
public:
virtual PvdError createClass(const NamespacedName& nm) = 0;
template <typename TDataType>
PvdError createClass()
{
return createClass(getPvdNamespacedNameForType<TDataType>());
}
virtual PvdError deriveClass(const NamespacedName& parent, const NamespacedName& child) = 0;
template <typename TParentType, typename TChildType>
PvdError deriveClass()
{
return deriveClass(getPvdNamespacedNameForType<TParentType>(), getPvdNamespacedNameForType<TChildType>());
}
virtual bool isClassExist(const NamespacedName& nm) = 0;
template <typename TDataType>
bool isClassExist()
{
return isClassExist(getPvdNamespacedNameForType<TDataType>());
}
virtual PvdError createProperty(const NamespacedName& clsName, const char* name, const char* semantic,
const NamespacedName& dtypeName, PropertyType::Enum propertyType,
DataRef<NamedValue> values = DataRef<NamedValue>()) = 0;
template <typename TClsType, typename TDataType>
PvdError createProperty(String name, String semantic = "", PropertyType::Enum propertyType = PropertyType::Scalar,
DataRef<NamedValue> values = DataRef<NamedValue>())
{
return createProperty(getPvdNamespacedNameForType<TClsType>(), name, semantic,
getPvdNamespacedNameForType<TDataType>(), propertyType, values);
}
virtual PvdError createPropertyMessage(const NamespacedName& cls, const NamespacedName& msgName,
DataRef<PropertyMessageArg> entries, uint32_t messageSizeInBytes) = 0;
template <typename TClsType, typename TMsgType>
PvdError createPropertyMessage(DataRef<PropertyMessageArg> entries)
{
return createPropertyMessage(getPvdNamespacedNameForType<TClsType>(), getPvdNamespacedNameForType<TMsgType>(),
entries, sizeof(TMsgType));
}
};
class PvdInstanceDataStream
{
protected:
virtual ~PvdInstanceDataStream()
{
}
public:
virtual PvdError createInstance(const NamespacedName& cls, const void* instance) = 0;
template <typename TDataType>
PvdError createInstance(const TDataType* inst)
{
return createInstance(getPvdNamespacedNameForType<TDataType>(), inst);
}
virtual bool isInstanceValid(const void* instance) = 0;
// If the property will fit or is already completely in memory
virtual PvdError setPropertyValue(const void* instance, String name, DataRef<const uint8_t> data,
const NamespacedName& incomingTypeName) = 0;
template <typename TDataType>
PvdError setPropertyValue(const void* instance, String name, const TDataType& value)
{
const uint8_t* dataStart = reinterpret_cast<const uint8_t*>(&value);
return setPropertyValue(instance, name, DataRef<const uint8_t>(dataStart, dataStart + sizeof(TDataType)),
getPvdNamespacedNameForType<TDataType>());
}
template <typename TDataType>
PvdError setPropertyValue(const void* instance, String name, const TDataType* value, uint32_t numItems)
{
const uint8_t* dataStart = reinterpret_cast<const uint8_t*>(value);
return setPropertyValue(instance, name,
DataRef<const uint8_t>(dataStart, dataStart + sizeof(TDataType) * numItems),
getPvdNamespacedNameForType<TDataType>());
}
// Else if the property is very large (contact reports) you can send it in chunks.
virtual PvdError beginSetPropertyValue(const void* instance, String name, const NamespacedName& incomingTypeName) = 0;
template <typename TDataType>
PvdError beginSetPropertyValue(const void* instance, String name)
{
return beginSetPropertyValue(instance, name, getPvdNamespacedNameForType<TDataType>());
}
virtual PvdError appendPropertyValueData(DataRef<const uint8_t> data) = 0;
template <typename TDataType>
PvdError appendPropertyValueData(const TDataType* value, uint32_t numItems)
{
const uint8_t* dataStart = reinterpret_cast<const uint8_t*>(value);
return appendPropertyValueData(DataRef<const uint8_t>(dataStart, dataStart + numItems * sizeof(TDataType)));
}
virtual PvdError endSetPropertyValue() = 0;
// Set a set of properties to various values on an object.
virtual PvdError setPropertyMessage(const void* instance, const NamespacedName& msgName,
DataRef<const uint8_t> data) = 0;
template <typename TDataType>
PvdError setPropertyMessage(const void* instance, const TDataType& value)
{
const uint8_t* dataStart = reinterpret_cast<const uint8_t*>(&value);
return setPropertyMessage(instance, getPvdNamespacedNameForType<TDataType>(),
DataRef<const uint8_t>(dataStart, sizeof(TDataType)));
}
// If you need to send of lot of identical messages, this avoids a hashtable lookup per message.
virtual PvdError beginPropertyMessageGroup(const NamespacedName& msgName) = 0;
template <typename TDataType>
PvdError beginPropertyMessageGroup()
{
return beginPropertyMessageGroup(getPvdNamespacedNameForType<TDataType>());
}
virtual PvdError sendPropertyMessageFromGroup(const void* instance, DataRef<const uint8_t> data) = 0;
template <typename TDataType>
PvdError sendPropertyMessageFromGroup(const void* instance, const TDataType& value)
{
const uint8_t* dataStart = reinterpret_cast<const uint8_t*>(&value);
return sendPropertyMessageFromGroup(instance, DataRef<const uint8_t>(dataStart, sizeof(TDataType)));
}
virtual PvdError endPropertyMessageGroup() = 0;
// These functions ensure the target array doesn't contain duplicates
virtual PvdError pushBackObjectRef(const void* instId, String propName, const void* objRef) = 0;
virtual PvdError removeObjectRef(const void* instId, String propName, const void* objRef) = 0;
// Instance elimination.
virtual PvdError destroyInstance(const void* key) = 0;
// Profiling hooks
virtual PvdError beginSection(const void* instance, String name) = 0;
virtual PvdError endSection(const void* instance, String name) = 0;
// Origin Shift
virtual PvdError originShift(const void* scene, PxVec3 shift) = 0;
public:
/*For some cases, pvd command cannot be run immediately. For example, when create joints, while the actors may still
*pending for insert, the joints update commands can be run deffered.
*/
class PvdCommand
{
public:
// Assigned is needed for copying
PvdCommand(const PvdCommand&)
{
}
PvdCommand& operator=(const PvdCommand&)
{
return *this;
}
public:
PvdCommand()
{
}
virtual ~PvdCommand()
{
}
// Not pure virtual so can have default PvdCommand obj
virtual bool canRun(PvdInstanceDataStream&)
{
return false;
}
virtual void run(PvdInstanceDataStream&)
{
}
};
// PVD SDK provide this helper function to allocate cmd's memory and release them at after flush the command queue
virtual void* allocateMemForCmd(uint32_t length) = 0;
// PVD will call the destructor of PvdCommand object at the end fo flushPvdCommand
virtual void pushPvdCommand(PvdCommand& cmd) = 0;
virtual void flushPvdCommand() = 0;
};
class PvdDataStream : public PvdInstanceDataStream, public PvdMetaDataStream
{
protected:
virtual ~PvdDataStream()
{
}
public:
virtual void release() = 0;
virtual bool isConnected() = 0;
virtual void addProfileZone(void* zone, const char* name) = 0;
virtual void addProfileZoneEvent(void* zone, const char* name, uint16_t eventId, bool compileTimeEnabled) = 0;
virtual PvdPropertyDefinitionHelper& getPropertyDefinitionHelper() = 0;
virtual void setIsTopLevelUIElement(const void* instance, bool topLevel) = 0;
virtual void sendErrorMessage(uint32_t code, const char* message, const char* file, uint32_t line) = 0;
virtual void updateCamera(const char* name, const PxVec3& origin, const PxVec3& up, const PxVec3& target) = 0;
/**
\brief Create a new PvdDataStream.
\param pvd A pointer to a valid PxPvd instance. This must be non-null.
*/
static PvdDataStream* create(PxPvd* pvd);
};
#if !PX_DOXYGEN
} // pvdsdk
} // physx
#endif
/** @} */
#endif
| 9,961 | C | 35.357664 | 119 | 0.740086 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/include/PsPvd.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PS_PVD_H
#define PS_PVD_H
/** \addtogroup pvd
@{
*/
#include "pvd/PxPvd.h"
#include "foundation/PxBroadcast.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxPvdTransport;
#if !PX_DOXYGEN
namespace pvdsdk
{
#endif
class PvdDataStream;
class PvdClient;
class PvdOMMetaDataProvider;
// PsPvd is used for advanced user, it support custom pvd client API
class PsPvd : public physx::PxPvd, public PxAllocationListener
{
public:
virtual void addClient(PvdClient* client) = 0;
virtual void removeClient(PvdClient* client) = 0;
virtual bool registerObject(const void* inItem) = 0;
virtual bool unRegisterObject(const void* inItem) = 0;
//AllocationListener
void onAllocation(size_t size, const char* typeName, const char* filename, int line, void* allocatedMemory) = 0;
void onDeallocation(void* addr) = 0;
virtual PvdOMMetaDataProvider& getMetaDataProvider() = 0;
virtual uint64_t getNextStreamId() = 0;
// Call to flush events to PVD
virtual void flush() = 0;
};
#if !PX_DOXYGEN
} // namespace pvdsdk
} // namespace physx
#endif
/** @} */
#endif
| 2,775 | C | 32.047619 | 113 | 0.752793 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/include/PxPvdDataStreamHelpers.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
#ifndef PX_PVD_DATA_STREAM_HELPERS_H
#define PX_PVD_DATA_STREAM_HELPERS_H
/** \addtogroup pvd
@{
*/
#include "PxPvdObjectModelBaseTypes.h"
#if !PX_DOXYGEN
namespace physx
{
namespace pvdsdk
{
#endif
class PvdPropertyDefinitionHelper
{
protected:
virtual ~PvdPropertyDefinitionHelper()
{
}
public:
/**
Push a name c such that it appends such as a.b.c.
*/
virtual void pushName(const char* inName, const char* inAppendStr = ".") = 0;
/**
Push a name c such that it appends like a.b[c]
*/
virtual void pushBracketedName(const char* inName, const char* leftBracket = "[", const char* rightBracket = "]") = 0;
/**
* Pop the current name
*/
virtual void popName() = 0;
virtual void clearNameStack() = 0;
/**
* Get the current name at the top of the name stack.
* Would return "a.b.c" or "a.b[c]" in the above examples.
*/
virtual const char* getTopName() = 0;
virtual void addNamedValue(const char* name, uint32_t value) = 0;
virtual void clearNamedValues() = 0;
virtual DataRef<NamedValue> getNamedValues() = 0;
/**
* Define a property using the top of the name stack and the passed-in semantic
*/
virtual void createProperty(const NamespacedName& clsName, const char* inSemantic, const NamespacedName& dtypeName,
PropertyType::Enum propType = PropertyType::Scalar) = 0;
template <typename TClsType, typename TDataType>
void createProperty(const char* inSemantic = "", PropertyType::Enum propType = PropertyType::Scalar)
{
createProperty(getPvdNamespacedNameForType<TClsType>(), inSemantic, getPvdNamespacedNameForType<TDataType>(),
propType);
}
// The datatype used for instances needs to be pointer unless you actually have pvdsdk::InstanceId members on your
// value structs.
virtual void addPropertyMessageArg(const NamespacedName& inDatatype, uint32_t inOffset, uint32_t inSize) = 0;
template <typename TDataType>
void addPropertyMessageArg(uint32_t offset)
{
addPropertyMessageArg(getPvdNamespacedNameForType<TDataType>(), offset, static_cast<uint32_t>(sizeof(TDataType)));
}
virtual void addPropertyMessage(const NamespacedName& clsName, const NamespacedName& msgName,
uint32_t inStructSizeInBytes) = 0;
template <typename TClsType, typename TMsgType>
void addPropertyMessage()
{
addPropertyMessage(getPvdNamespacedNameForType<TClsType>(), getPvdNamespacedNameForType<TMsgType>(),
static_cast<uint32_t>(sizeof(TMsgType)));
}
virtual void clearPropertyMessageArgs() = 0;
void clearBufferedData()
{
clearNameStack();
clearPropertyMessageArgs();
clearNamedValues();
}
};
#if !PX_DOXYGEN
} // pvdsdk
} // physx
#endif
/** @} */
#endif
| 4,289 | C | 34.163934 | 119 | 0.73094 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/include/PxPvdObjectModelBaseTypes.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
#ifndef PX_PVD_OBJECT_MODEL_BASE_TYPES_H
#define PX_PVD_OBJECT_MODEL_BASE_TYPES_H
/** \addtogroup pvd
@{
*/
#include "foundation/PxAssert.h"
#if !PX_DOXYGEN
namespace physx
{
namespace pvdsdk
{
#endif
using namespace physx;
inline const char* nonNull(const char* str)
{
return str ? str : "";
}
// strcmp will crash if passed a null string, however,
// so we need to make sure that doesn't happen. We do that
// by equating NULL and the empty string, "".
inline bool safeStrEq(const char* lhs, const char* rhs)
{
return ::strcmp(nonNull(lhs), nonNull(rhs)) == 0;
}
// Does this string have useful information in it.
inline bool isMeaningful(const char* str)
{
return *(nonNull(str)) > 0;
}
inline uint32_t safeStrLen(const char* str)
{
str = nonNull(str);
return static_cast<uint32_t>(strlen(str));
}
struct ObjectRef
{
int32_t mInstanceId;
ObjectRef(int32_t iid = -1) : mInstanceId(iid)
{
}
operator int32_t() const
{
return mInstanceId;
}
bool hasValue() const
{
return mInstanceId > 0;
}
};
struct U32Array4
{
uint32_t mD0;
uint32_t mD1;
uint32_t mD2;
uint32_t mD3;
U32Array4(uint32_t d0, uint32_t d1, uint32_t d2, uint32_t d3) : mD0(d0), mD1(d1), mD2(d2), mD3(d3)
{
}
U32Array4() : mD0(0), mD1(0), mD2(0), mD3(0)
{
}
};
typedef bool PvdBool;
typedef const char* String;
typedef void* VoidPtr;
typedef double PvdF64;
typedef float PvdF32;
typedef int64_t PvdI64;
typedef uint64_t PvdU64;
typedef int32_t PvdI32;
typedef uint32_t PvdU32;
typedef int16_t PvdI16;
typedef uint16_t PvdU16;
typedef int8_t PvdI8;
typedef uint8_t PvdU8;
struct PvdColor
{
uint8_t r;
uint8_t g;
uint8_t b;
uint8_t a;
PvdColor(uint8_t _r, uint8_t _g, uint8_t _b, uint8_t _a = 255) : r(_r), g(_g), b(_b), a(_a)
{
}
PvdColor() : r(0), g(0), b(0), a(255)
{
}
PvdColor(uint32_t abgr)
{
uint8_t* valPtr = reinterpret_cast<uint8_t*>(&abgr);
r = valPtr[0];
g = valPtr[1];
b = valPtr[2];
a = valPtr[3];
}
};
struct StringHandle
{
uint32_t mHandle;
StringHandle(uint32_t val = 0) : mHandle(val)
{
}
operator uint32_t() const
{
return mHandle;
}
};
#define DECLARE_TYPES \
DECLARE_BASE_PVD_TYPE(PvdI8) \
DECLARE_BASE_PVD_TYPE(PvdU8) \
DECLARE_BASE_PVD_TYPE(PvdI16) \
DECLARE_BASE_PVD_TYPE(PvdU16) \
DECLARE_BASE_PVD_TYPE(PvdI32) \
DECLARE_BASE_PVD_TYPE(PvdU32) \
DECLARE_BASE_PVD_TYPE(PvdI64) \
DECLARE_BASE_PVD_TYPE(PvdU64) \
DECLARE_BASE_PVD_TYPE(PvdF32) \
DECLARE_BASE_PVD_TYPE(PvdF64) \
DECLARE_BASE_PVD_TYPE(PvdBool) \
DECLARE_BASE_PVD_TYPE(PvdColor) \
DECLARE_BASE_PVD_TYPE(String) \
DECLARE_BASE_PVD_TYPE(StringHandle) \
DECLARE_BASE_PVD_TYPE(ObjectRef) \
DECLARE_BASE_PVD_TYPE(VoidPtr) \
DECLARE_BASE_PVD_TYPE(PxVec2) \
DECLARE_BASE_PVD_TYPE(PxVec3) \
DECLARE_BASE_PVD_TYPE(PxVec4) \
DECLARE_BASE_PVD_TYPE(PxBounds3) \
DECLARE_BASE_PVD_TYPE(PxQuat) \
DECLARE_BASE_PVD_TYPE(PxTransform) \
DECLARE_BASE_PVD_TYPE(PxMat33) \
DECLARE_BASE_PVD_TYPE(PxMat44) \
DECLARE_BASE_PVD_TYPE(U32Array4)
struct PvdBaseType
{
enum Enum
{
None = 0,
InternalStart = 1,
InternalStop = 64,
#define DECLARE_BASE_PVD_TYPE(type) type,
DECLARE_TYPES
Last
#undef DECLARE_BASE_PVD_TYPE
};
};
struct NamespacedName
{
String mNamespace;
String mName;
NamespacedName(String ns, String nm) : mNamespace(ns), mName(nm)
{
}
NamespacedName(String nm = "") : mNamespace(""), mName(nm)
{
}
bool operator==(const NamespacedName& other) const
{
return safeStrEq(mNamespace, other.mNamespace) && safeStrEq(mName, other.mName);
}
};
struct NamedValue
{
String mName;
uint32_t mValue;
NamedValue(String nm = "", uint32_t val = 0) : mName(nm), mValue(val)
{
}
};
template <typename T>
struct BaseDataTypeToTypeMap
{
bool compile_error;
};
template <PvdBaseType::Enum>
struct BaseTypeToDataTypeMap
{
bool compile_error;
};
// Users can extend this mapping with new datatypes.
template <typename T>
struct PvdDataTypeToNamespacedNameMap
{
bool Name;
};
// This mapping tells you the what class id to use for the base datatypes
//
#define DECLARE_BASE_PVD_TYPE(type) \
template <> \
struct BaseDataTypeToTypeMap<type> \
{ \
enum Enum \
{ \
BaseTypeEnum = PvdBaseType::type \
}; \
}; \
template <> \
struct BaseDataTypeToTypeMap<const type&> \
{ \
enum Enum \
{ \
BaseTypeEnum = PvdBaseType::type \
}; \
}; \
template <> \
struct BaseTypeToDataTypeMap<PvdBaseType::type> \
{ \
typedef type TDataType; \
}; \
template <> \
struct PvdDataTypeToNamespacedNameMap<type> \
{ \
NamespacedName Name; \
PvdDataTypeToNamespacedNameMap<type>() : Name("physx3", #type) \
{ \
} \
}; \
template <> \
struct PvdDataTypeToNamespacedNameMap<const type&> \
{ \
NamespacedName Name; \
PvdDataTypeToNamespacedNameMap<const type&>() : Name("physx3", #type) \
{ \
} \
};
DECLARE_TYPES
#undef DECLARE_BASE_PVD_TYPE
template <typename TDataType>
inline int32_t getPvdTypeForType()
{
return static_cast<PvdBaseType::Enum>(BaseDataTypeToTypeMap<TDataType>::BaseTypeEnum);
}
template <typename TDataType>
inline NamespacedName getPvdNamespacedNameForType()
{
return PvdDataTypeToNamespacedNameMap<TDataType>().Name;
}
#define DEFINE_PVD_TYPE_NAME_MAP(type, ns, name) \
template <> \
struct PvdDataTypeToNamespacedNameMap<type> \
{ \
NamespacedName Name; \
PvdDataTypeToNamespacedNameMap<type>() : Name(ns, name) \
{ \
} \
};
#define DEFINE_PVD_TYPE_ALIAS(newType, oldType) \
template <> \
struct PvdDataTypeToNamespacedNameMap<newType> \
{ \
NamespacedName Name; \
PvdDataTypeToNamespacedNameMap<newType>() : Name(PvdDataTypeToNamespacedNameMap<oldType>().Name) \
{ \
} \
};
DEFINE_PVD_TYPE_ALIAS(const void*, void*)
struct ArrayData
{
uint8_t* mBegin;
uint8_t* mEnd;
uint8_t* mCapacity; //>= stop
ArrayData(uint8_t* beg = NULL, uint8_t* end = NULL, uint8_t* cap = NULL) : mBegin(beg), mEnd(end), mCapacity(cap)
{
}
uint8_t* begin()
{
return mBegin;
}
uint8_t* end()
{
return mEnd;
}
uint32_t byteCapacity()
{
return static_cast<uint32_t>(mCapacity - mBegin);
}
uint32_t byteSize() const
{
return static_cast<uint32_t>(mEnd - mBegin);
} // in bytes
uint32_t numberOfItems(uint32_t objectByteSize)
{
if(objectByteSize)
return byteSize() / objectByteSize;
return 0;
}
void forgetData()
{
mBegin = mEnd = mCapacity = 0;
}
};
template <typename T>
class DataRef
{
const T* mBegin;
const T* mEnd;
public:
DataRef(const T* b, uint32_t count) : mBegin(b), mEnd(b + count)
{
}
DataRef(const T* b = NULL, const T* e = NULL) : mBegin(b), mEnd(e)
{
}
DataRef(const DataRef& o) : mBegin(o.mBegin), mEnd(o.mEnd)
{
}
DataRef& operator=(const DataRef& o)
{
mBegin = o.mBegin;
mEnd = o.mEnd;
return *this;
}
uint32_t size() const
{
return static_cast<uint32_t>(mEnd - mBegin);
}
const T* begin() const
{
return mBegin;
}
const T* end() const
{
return mEnd;
}
const T& operator[](uint32_t idx) const
{
PX_ASSERT(idx < size());
return mBegin[idx];
}
const T& back() const
{
PX_ASSERT(mEnd > mBegin);
return *(mEnd - 1);
}
};
struct PropertyType
{
enum Enum
{
Unknown = 0,
Scalar,
Array
};
};
// argument to the create property message function
struct PropertyMessageArg
{
String mPropertyName;
NamespacedName mDatatypeName;
// where in the message this property starts.
uint32_t mMessageOffset;
// size of this entry object
uint32_t mByteSize;
PropertyMessageArg(String propName, NamespacedName dtype, uint32_t msgOffset, uint32_t byteSize)
: mPropertyName(propName), mDatatypeName(dtype), mMessageOffset(msgOffset), mByteSize(byteSize)
{
}
PropertyMessageArg() : mPropertyName(""), mMessageOffset(0), mByteSize(0)
{
}
};
class PvdUserRenderer;
DEFINE_PVD_TYPE_NAME_MAP(PvdUserRenderer, "_debugger_", "PvdUserRenderer")
#if !PX_DOXYGEN
}
}
#endif
/** @} */
#endif
| 14,371 | C | 32.501165 | 120 | 0.455431 |
NVIDIA-Omniverse/PhysX/physx/source/pvd/include/PxProfileAllocatorWrapper.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PROFILE_ALLOCATOR_WRAPPER_H
#define PX_PROFILE_ALLOCATOR_WRAPPER_H
#include "foundation/PxPreprocessor.h"
#include "foundation/PxAllocatorCallback.h"
#include "foundation/PxErrorCallback.h"
#include "foundation/PxAssert.h"
#include "foundation/PxHashMap.h"
#include "foundation/PxArray.h"
namespace physx { namespace profile {
/**
\brief Helper struct to encapsulate the user allocator callback
Useful for array and hash templates
*/
struct PxProfileAllocatorWrapper
{
PxAllocatorCallback* mUserAllocator;
PxProfileAllocatorWrapper( PxAllocatorCallback& inUserAllocator )
: mUserAllocator( &inUserAllocator )
{
}
PxProfileAllocatorWrapper( PxAllocatorCallback* inUserAllocator )
: mUserAllocator( inUserAllocator )
{
}
PxAllocatorCallback& getAllocator() const
{
PX_ASSERT( NULL != mUserAllocator );
return *mUserAllocator;
}
};
/**
\brief Helper class to encapsulate the reflection allocator
*/
template <typename T>
class PxProfileWrapperReflectionAllocator
{
static const char* getName()
{
#if PX_LINUX || PX_OSX || PX_EMSCRIPTEN || PX_SWITCH
return __PRETTY_FUNCTION__;
#else
return typeid(T).name();
#endif
}
PxProfileAllocatorWrapper* mWrapper;
public:
PxProfileWrapperReflectionAllocator(PxProfileAllocatorWrapper& inWrapper) : mWrapper( &inWrapper ) {}
PxProfileWrapperReflectionAllocator( const PxProfileWrapperReflectionAllocator& inOther )
: mWrapper( inOther.mWrapper )
{
}
PxProfileWrapperReflectionAllocator& operator=( const PxProfileWrapperReflectionAllocator& inOther )
{
mWrapper = inOther.mWrapper;
return *this;
}
PxAllocatorCallback& getAllocator() { return mWrapper->getAllocator(); }
void* allocate(size_t size, const char* filename, int line)
{
#if PX_CHECKED // checked and debug builds
if(!size)
return 0;
return getAllocator().allocate(size, getName(), filename, line);
#else
return getAllocator().allocate(size, "<no allocation names in this config>", filename, line);
#endif
}
void deallocate(void* ptr)
{
if(ptr)
getAllocator().deallocate(ptr);
}
};
/**
\brief Helper class to encapsulate the named allocator
*/
struct PxProfileWrapperNamedAllocator
{
PxProfileAllocatorWrapper* mWrapper;
const char* mAllocationName;
PxProfileWrapperNamedAllocator(PxProfileAllocatorWrapper& inWrapper, const char* inAllocationName)
: mWrapper( &inWrapper )
, mAllocationName( inAllocationName )
{}
PxProfileWrapperNamedAllocator( const PxProfileWrapperNamedAllocator& inOther )
: mWrapper( inOther.mWrapper )
, mAllocationName( inOther.mAllocationName )
{
}
PxProfileWrapperNamedAllocator& operator=( const PxProfileWrapperNamedAllocator& inOther )
{
mWrapper = inOther.mWrapper;
mAllocationName = inOther.mAllocationName;
return *this;
}
PxAllocatorCallback& getAllocator() { return mWrapper->getAllocator(); }
void* allocate(size_t size, const char* filename, int line)
{
if(!size)
return 0;
return getAllocator().allocate(size, mAllocationName, filename, line);
}
void deallocate(void* ptr)
{
if(ptr)
getAllocator().deallocate(ptr);
}
};
/**
\brief Helper struct to encapsulate the array
*/
template<class T>
struct PxProfileArray : public PxArray<T, PxProfileWrapperReflectionAllocator<T> >
{
typedef PxProfileWrapperReflectionAllocator<T> TAllocatorType;
PxProfileArray( PxProfileAllocatorWrapper& inWrapper )
: PxArray<T, TAllocatorType >( TAllocatorType( inWrapper ) )
{
}
PxProfileArray( const PxProfileArray< T >& inOther )
: PxArray<T, TAllocatorType >( inOther, inOther )
{
}
};
/**
\brief Helper struct to encapsulate the array
*/
template<typename TKeyType, typename TValueType, typename THashType=PxHash<TKeyType> >
struct PxProfileHashMap : public PxHashMap<TKeyType, TValueType, THashType, PxProfileWrapperReflectionAllocator< TValueType > >
{
typedef PxHashMap<TKeyType, TValueType, THashType, PxProfileWrapperReflectionAllocator< TValueType > > THashMapType;
typedef PxProfileWrapperReflectionAllocator<TValueType> TAllocatorType;
PxProfileHashMap( PxProfileAllocatorWrapper& inWrapper )
: THashMapType( TAllocatorType( inWrapper ) )
{
}
};
/**
\brief Helper function to encapsulate the profile allocation
*/
template<typename TDataType>
inline TDataType* PxProfileAllocate( PxAllocatorCallback* inAllocator, const char* file, int inLine )
{
PxProfileAllocatorWrapper wrapper( inAllocator );
typedef PxProfileWrapperReflectionAllocator< TDataType > TAllocator;
TAllocator theAllocator( wrapper );
return reinterpret_cast<TDataType*>( theAllocator.allocate( sizeof( TDataType ), file, inLine ) );
}
/**
\brief Helper function to encapsulate the profile allocation
*/
template<typename TDataType>
inline TDataType* PxProfileAllocate( PxAllocatorCallback& inAllocator, const char* file, int inLine )
{
return PxProfileAllocate<TDataType>( &inAllocator, file, inLine );
}
/**
\brief Helper function to encapsulate the profile deallocation
*/
template<typename TDataType>
inline void PxProfileDeleteAndDeallocate( PxProfileAllocatorWrapper& inAllocator, TDataType* inDType )
{
PX_ASSERT(inDType);
PxAllocatorCallback& allocator( inAllocator.getAllocator() );
inDType->~TDataType();
allocator.deallocate( inDType );
}
/**
\brief Helper function to encapsulate the profile deallocation
*/
template<typename TDataType>
inline void PxProfileDeleteAndDeallocate( PxAllocatorCallback& inAllocator, TDataType* inDType )
{
PxProfileAllocatorWrapper wrapper( &inAllocator );
PxProfileDeleteAndDeallocate( wrapper, inDType );
}
} }
#define PX_PROFILE_NEW( allocator, dtype ) new (physx::profile::PxProfileAllocate<dtype>( allocator, PX_FL)) dtype
#define PX_PROFILE_DELETE( allocator, obj ) physx::profile::PxProfileDeleteAndDeallocate( allocator, obj );
#endif
| 7,606 | C | 32.073913 | 128 | 0.756902 |
NVIDIA-Omniverse/PhysX/physx/source/physxcooking/src/Cooking.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef COOKING_H
#define COOKING_H
#include "foundation/PxMemory.h"
#include "cooking/PxCooking.h"
#include "foundation/PxUserAllocated.h"
namespace physx
{
class TriangleMeshBuilder;
class TetrahedronMeshBuilder;
class ConvexMeshBuilder;
class ConvexHullLib;
class PxInsertionCallback;
struct PxTriangleMeshInternalData;
struct PxBVHInternalData;
}
#endif
| 2,060 | C | 41.061224 | 74 | 0.777184 |
NVIDIA-Omniverse/PhysX/physx/source/physxcooking/src/Cooking.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "Cooking.h"
#include "GuCooking.h"
#include "GuBVH.h"
///////////////////////////////////////////////////////////////////////////////
using namespace physx;
using namespace Gu;
#include "cooking/PxCookingInternal.h"
#include "GuTriangleMeshBV4.h"
physx::PxTriangleMesh* PxCreateTriangleMeshInternal(const physx::PxTriangleMeshInternalData& data)
{
TriangleMesh* np;
PX_NEW_SERIALIZED(np, BV4TriangleMesh)(data);
return np;
}
physx::PxBVH* PxCreateBVHInternal(const physx::PxBVHInternalData& data)
{
BVH* np;
PX_NEW_SERIALIZED(np, BVH)(data);
return np;
}
///////////////////////////////////////////////////////////////////////////////
PxInsertionCallback* PxGetStandaloneInsertionCallback()
{
return immediateCooking::getInsertionCallback();
}
bool PxCookBVH(const PxBVHDesc& desc, PxOutputStream& stream)
{
return immediateCooking::cookBVH(desc, stream);
}
PxBVH* PxCreateBVH(const PxBVHDesc& desc, PxInsertionCallback& insertionCallback)
{
return immediateCooking::createBVH(desc, insertionCallback);
}
bool PxCookHeightField(const PxHeightFieldDesc& desc, PxOutputStream& stream)
{
return immediateCooking::cookHeightField(desc, stream);
}
PxHeightField* PxCreateHeightField(const PxHeightFieldDesc& desc, PxInsertionCallback& insertionCallback)
{
return immediateCooking::createHeightField(desc, insertionCallback);
}
bool PxCookConvexMesh(const PxCookingParams& params, const PxConvexMeshDesc& desc, PxOutputStream& stream, PxConvexMeshCookingResult::Enum* condition)
{
return immediateCooking::cookConvexMesh(params, desc, stream, condition);
}
PxConvexMesh* PxCreateConvexMesh(const PxCookingParams& params, const PxConvexMeshDesc& desc, PxInsertionCallback& insertionCallback, PxConvexMeshCookingResult::Enum* condition)
{
return immediateCooking::createConvexMesh(params, desc, insertionCallback, condition);
}
bool PxValidateConvexMesh(const PxCookingParams& params, const PxConvexMeshDesc& desc)
{
return immediateCooking::validateConvexMesh(params, desc);
}
bool PxComputeHullPolygons(const PxCookingParams& params, const PxSimpleTriangleMesh& mesh, PxAllocatorCallback& inCallback, PxU32& nbVerts, PxVec3*& vertices, PxU32& nbIndices, PxU32*& indices, PxU32& nbPolygons, PxHullPolygon*& hullPolygons)
{
return immediateCooking::computeHullPolygons(params, mesh, inCallback, nbVerts, vertices, nbIndices, indices, nbPolygons, hullPolygons);
}
bool PxValidateTriangleMesh(const PxCookingParams& params, const PxTriangleMeshDesc& desc)
{
return immediateCooking::validateTriangleMesh(params, desc);
}
PxTriangleMesh* PxCreateTriangleMesh(const PxCookingParams& params, const PxTriangleMeshDesc& desc, PxInsertionCallback& insertionCallback, PxTriangleMeshCookingResult::Enum* condition)
{
return immediateCooking::createTriangleMesh(params, desc, insertionCallback, condition);
}
bool PxCookTriangleMesh(const PxCookingParams& params, const PxTriangleMeshDesc& desc, PxOutputStream& stream, PxTriangleMeshCookingResult::Enum* condition)
{
return immediateCooking::cookTriangleMesh(params, desc, stream, condition);
}
bool PxCookTetrahedronMesh(const PxCookingParams& params, const PxTetrahedronMeshDesc& meshDesc, PxOutputStream& stream)
{
return immediateCooking::cookTetrahedronMesh(params, meshDesc, stream);
}
PxTetrahedronMesh* PxCreateTetrahedronMesh(const PxCookingParams& params, const PxTetrahedronMeshDesc& meshDesc, PxInsertionCallback& insertionCallback)
{
return immediateCooking::createTetrahedronMesh(params, meshDesc, insertionCallback);
}
bool PxCookSoftBodyMesh(const PxCookingParams& params, const PxTetrahedronMeshDesc& simulationMeshDesc, const PxTetrahedronMeshDesc& collisionMeshDesc, const PxSoftBodySimulationDataDesc& softbodyDataDesc, PxOutputStream& stream)
{
return immediateCooking::cookSoftBodyMesh(params, simulationMeshDesc, collisionMeshDesc, softbodyDataDesc, stream);
}
PxSoftBodyMesh* PxCreateSoftBodyMesh(const PxCookingParams& params, const PxTetrahedronMeshDesc& simulationMeshDesc, const PxTetrahedronMeshDesc& collisionMeshDesc, const PxSoftBodySimulationDataDesc& softbodyDataDesc, PxInsertionCallback& insertionCallback)
{
return immediateCooking::createSoftBodyMesh(params, simulationMeshDesc, collisionMeshDesc, softbodyDataDesc, insertionCallback);
}
PxCollisionMeshMappingData* PxComputeModelsMapping(const PxCookingParams& params, PxTetrahedronMeshData& simulationMesh, const PxTetrahedronMeshData& collisionMesh, const PxSoftBodyCollisionData& collisionData, const PxBoundedData* vertexToTet)
{
return immediateCooking::computeModelsMapping(params, simulationMesh, collisionMesh, collisionData, vertexToTet);
}
PxCollisionTetrahedronMeshData* PxComputeCollisionData(const PxCookingParams& params, const PxTetrahedronMeshDesc& collisionMeshDesc)
{
return immediateCooking::computeCollisionData(params, collisionMeshDesc);
}
PxSimulationTetrahedronMeshData* PxComputeSimulationData(const PxCookingParams& params, const PxTetrahedronMeshDesc& simulationMeshDesc)
{
return immediateCooking::computeSimulationData(params, simulationMeshDesc);
}
PxSoftBodyMesh* PxAssembleSoftBodyMesh(PxTetrahedronMeshData& simulationMesh, PxSoftBodySimulationData& simulationData, PxTetrahedronMeshData& collisionMesh, PxSoftBodyCollisionData& collisionData, PxCollisionMeshMappingData& mappingData, PxInsertionCallback& insertionCallback)
{
return immediateCooking::assembleSoftBodyMesh(simulationMesh, simulationData, collisionMesh, collisionData, mappingData, insertionCallback);
}
PxSoftBodyMesh* PxAssembleSoftBodyMesh_Sim(PxSimulationTetrahedronMeshData& simulationMesh, PxCollisionTetrahedronMeshData& collisionMesh, PxCollisionMeshMappingData& mappingData, PxInsertionCallback& insertionCallback)
{
return immediateCooking::assembleSoftBodyMesh_Sim(simulationMesh, collisionMesh, mappingData, insertionCallback);
}
| 7,525 | C++ | 45.745341 | 278 | 0.807575 |
NVIDIA-Omniverse/PhysX/physx/source/physxcooking/src/windows/WindowsCookingDelayLoadHook.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "common/windows/PxWindowsDelayLoadHook.h"
#include "foundation/windows/PxWindowsInclude.h"
#include "windows/CmWindowsLoadLibrary.h"
static const physx::PxDelayLoadHook* gCookingDelayLoadHook = NULL;
void physx::PxSetPhysXCookingDelayLoadHook(const physx::PxDelayLoadHook* hook)
{
gCookingDelayLoadHook = hook;
}
// delay loading is enabled only for non static configuration
#if !defined PX_PHYSX_STATIC_LIB
// Prior to Visual Studio 2015 Update 3, these hooks were non-const.
#define DELAYIMP_INSECURE_WRITABLE_HOOKS
#include <delayimp.h>
using namespace physx;
#pragma comment(lib, "delayimp")
FARPROC WINAPI cookingDelayHook(unsigned dliNotify, PDelayLoadInfo pdli)
{
switch (dliNotify) {
case dliStartProcessing :
break;
case dliNotePreLoadLibrary :
{
return Cm::physXCommonDliNotePreLoadLibrary(pdli->szDll,gCookingDelayLoadHook);
}
break;
case dliNotePreGetProcAddress :
break;
case dliFailLoadLib :
break;
case dliFailGetProc :
break;
case dliNoteEndProcessing :
break;
default :
return NULL;
}
return NULL;
}
PfnDliHook __pfnDliNotifyHook2 = cookingDelayHook;
#endif
| 2,833 | C++ | 31.574712 | 82 | 0.769149 |
NVIDIA-Omniverse/PhysX/physx/source/task/src/TaskManager.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
#include "task/PxTask.h"
#include "foundation/PxErrors.h"
#include "foundation/PxHashMap.h"
#include "foundation/PxAllocator.h"
#include "foundation/PxAtomic.h"
#include "foundation/PxMutex.h"
#include "foundation/PxArray.h"
#include "foundation/PxThread.h"
#define LOCK() PxMutex::ScopedLock _lock_(mMutex)
namespace physx
{
const int EOL = -1;
typedef PxHashMap<const char *, PxTaskID> PxTaskNameToIDMap;
struct PxTaskDepTableRow
{
PxTaskID mTaskID;
int mNextDep;
};
typedef PxArray<PxTaskDepTableRow> PxTaskDepTable;
class PxTaskTableRow
{
public:
PxTaskTableRow() : mRefCount( 1 ), mStartDep(EOL), mLastDep(EOL) {}
void addDependency( PxTaskDepTable& depTable, PxTaskID taskID )
{
int newDep = int(depTable.size());
PxTaskDepTableRow row;
row.mTaskID = taskID;
row.mNextDep = EOL;
depTable.pushBack( row );
if( mLastDep == EOL )
{
mStartDep = mLastDep = newDep;
}
else
{
depTable[ uint32_t(mLastDep) ].mNextDep = newDep;
mLastDep = newDep;
}
}
PxTask * mTask;
volatile int mRefCount;
PxTaskType::Enum mType;
int mStartDep;
int mLastDep;
};
typedef PxArray<PxTaskTableRow> PxTaskTable;
/* Implementation of PxTaskManager abstract API */
class PxTaskMgr : public PxTaskManager, public PxUserAllocated
{
PX_NOCOPY(PxTaskMgr)
public:
PxTaskMgr(PxErrorCallback& , PxCpuDispatcher*);
~PxTaskMgr();
void setCpuDispatcher( PxCpuDispatcher& ref )
{
mCpuDispatcher = &ref;
}
PxCpuDispatcher* getCpuDispatcher() const
{
return mCpuDispatcher;
}
void resetDependencies();
void startSimulation();
void stopSimulation();
void taskCompleted( PxTask& task );
PxTaskID getNamedTask( const char *name );
PxTaskID submitNamedTask( PxTask *task, const char *name, PxTaskType::Enum type = PxTaskType::eCPU );
PxTaskID submitUnnamedTask( PxTask& task, PxTaskType::Enum type = PxTaskType::eCPU );
PxTask* getTaskFromID( PxTaskID );
void dispatchTask( PxTaskID taskID );
void resolveRow( PxTaskID taskID );
void release();
void finishBefore( PxTask& task, PxTaskID taskID );
void startAfter( PxTask& task, PxTaskID taskID );
void addReference( PxTaskID taskID );
void decrReference( PxTaskID taskID );
int32_t getReference( PxTaskID taskID ) const;
void decrReference( PxLightCpuTask& lighttask );
void addReference( PxLightCpuTask& lighttask );
PxErrorCallback& mErrorCallback;
PxCpuDispatcher *mCpuDispatcher;
PxTaskNameToIDMap mName2IDmap;
volatile int mPendingTasks;
PxMutex mMutex;
PxTaskDepTable mDepTable;
PxTaskTable mTaskTable;
PxArray<PxTaskID> mStartDispatch;
};
PxTaskManager* PxTaskManager::createTaskManager(PxErrorCallback& errorCallback, PxCpuDispatcher* cpuDispatcher)
{
return PX_NEW(PxTaskMgr)(errorCallback, cpuDispatcher);
}
PxTaskMgr::PxTaskMgr(PxErrorCallback& errorCallback, PxCpuDispatcher* cpuDispatcher)
: mErrorCallback (errorCallback)
, mCpuDispatcher( cpuDispatcher )
, mPendingTasks( 0 )
, mDepTable("PxTaskDepTable")
, mTaskTable("PxTaskTable")
, mStartDispatch("StartDispatch")
{
}
PxTaskMgr::~PxTaskMgr()
{
}
void PxTaskMgr::release()
{
PX_DELETE_THIS;
}
void PxTaskMgr::decrReference(PxLightCpuTask& lighttask)
{
/* This does not need a lock! */
if (!PxAtomicDecrement(&lighttask.mRefCount))
{
PX_ASSERT(mCpuDispatcher);
if (mCpuDispatcher)
{
mCpuDispatcher->submitTask(lighttask);
}
else
{
lighttask.release();
}
}
}
void PxTaskMgr::addReference(PxLightCpuTask& lighttask)
{
/* This does not need a lock! */
PxAtomicIncrement(&lighttask.mRefCount);
}
/*
* Called by the owner (Scene) at the start of every frame, before
* asking for tasks to be submitted.
*/
void PxTaskMgr::resetDependencies()
{
PX_ASSERT( !mPendingTasks ); // only valid if you don't resubmit named tasks, this is true for the SDK
PX_ASSERT( mCpuDispatcher );
mTaskTable.clear();
mDepTable.clear();
mName2IDmap.clear();
mPendingTasks = 0;
}
/*
* Called by the owner (Scene) to start simulating the task graph.
* Dispatch all tasks with refCount == 1
*/
void PxTaskMgr::startSimulation()
{
PX_ASSERT( mCpuDispatcher );
/* Handle empty task graph */
if( mPendingTasks == 0 )
return;
for( PxTaskID i = 0 ; i < mTaskTable.size() ; i++ )
{
if( mTaskTable[ i ].mType == PxTaskType::eCOMPLETED )
{
continue;
}
if( !PxAtomicDecrement( &mTaskTable[ i ].mRefCount ) )
{
mStartDispatch.pushBack(i);
}
}
for( uint32_t i=0; i<mStartDispatch.size(); ++i)
{
dispatchTask( mStartDispatch[i] );
}
//mStartDispatch.resize(0);
mStartDispatch.forceSize_Unsafe(0);
}
void PxTaskMgr::stopSimulation()
{
}
PxTaskID PxTaskMgr::getNamedTask( const char *name )
{
const PxTaskNameToIDMap::Entry *ret;
{
LOCK();
ret = mName2IDmap.find( name );
}
if( ret )
{
return ret->second;
}
else
{
// create named entry in task table, without a task
return submitNamedTask( NULL, name, PxTaskType::eNOT_PRESENT );
}
}
PxTask* PxTaskMgr::getTaskFromID( PxTaskID id )
{
LOCK(); // todo: reader lock necessary?
return mTaskTable[ id ].mTask;
}
/* If called at runtime, must be thread-safe */
PxTaskID PxTaskMgr::submitNamedTask( PxTask *task, const char *name, PxTaskType::Enum type )
{
if( task )
{
task->mTm = this;
task->submitted();
}
LOCK();
const PxTaskNameToIDMap::Entry *ret = mName2IDmap.find( name );
if( ret )
{
PxTaskID prereg = ret->second;
if( task )
{
/* name was registered for us by a dependent task */
PX_ASSERT( !mTaskTable[ prereg ].mTask );
PX_ASSERT( mTaskTable[ prereg ].mType == PxTaskType::eNOT_PRESENT );
mTaskTable[ prereg ].mTask = task;
mTaskTable[ prereg ].mType = type;
task->mTaskID = prereg;
}
return prereg;
}
else
{
PxAtomicIncrement(&mPendingTasks);
PxTaskID id = static_cast<PxTaskID>(mTaskTable.size());
mName2IDmap[ name ] = id;
if( task )
{
task->mTaskID = id;
}
PxTaskTableRow r;
r.mTask = task;
r.mType = type;
mTaskTable.pushBack(r);
return id;
}
}
/*
* Add an unnamed task to the task table
*/
PxTaskID PxTaskMgr::submitUnnamedTask( PxTask& task, PxTaskType::Enum type )
{
PxAtomicIncrement(&mPendingTasks);
task.mTm = this;
task.submitted();
LOCK();
task.mTaskID = static_cast<PxTaskID>(mTaskTable.size());
PxTaskTableRow r;
r.mTask = &task;
r.mType = type;
mTaskTable.pushBack(r);
return task.mTaskID;
}
/* Called by worker threads (or cooperating application threads) when a
* PxTask has completed. Propogate depdenencies, decrementing all
* referenced tasks' refCounts. If any of those reach zero, activate
* those tasks.
*/
void PxTaskMgr::taskCompleted( PxTask& task )
{
LOCK();
resolveRow(task.mTaskID);
}
/* ================== Private Functions ======================= */
/*
* Add a dependency to force 'task' to complete before the
* referenced 'taskID' is allowed to be dispatched.
*/
void PxTaskMgr::finishBefore( PxTask& task, PxTaskID taskID )
{
LOCK();
PX_ASSERT( mTaskTable[ taskID ].mType != PxTaskType::eCOMPLETED );
mTaskTable[ task.mTaskID ].addDependency( mDepTable, taskID );
PxAtomicIncrement( &mTaskTable[ taskID ].mRefCount );
}
/*
* Add a dependency to force 'task' to wait for the referenced 'taskID'
* to complete before it is allowed to be dispatched.
*/
void PxTaskMgr::startAfter( PxTask& task, PxTaskID taskID )
{
LOCK();
PX_ASSERT( mTaskTable[ taskID ].mType != PxTaskType::eCOMPLETED );
mTaskTable[ taskID ].addDependency( mDepTable, task.mTaskID );
PxAtomicIncrement( &mTaskTable[ task.mTaskID ].mRefCount );
}
void PxTaskMgr::addReference( PxTaskID taskID )
{
LOCK();
PxAtomicIncrement( &mTaskTable[ taskID ].mRefCount );
}
/*
* Remove one reference count from a task. Must be done here to make it thread safe.
*/
void PxTaskMgr::decrReference( PxTaskID taskID )
{
LOCK();
if( !PxAtomicDecrement( &mTaskTable[ taskID ].mRefCount ) )
{
dispatchTask(taskID);
}
}
int32_t PxTaskMgr::getReference(PxTaskID taskID) const
{
return mTaskTable[ taskID ].mRefCount;
}
/*
* A task has completed, decrement all dependencies and submit tasks
* that are ready to run. Signal simulation end if ther are no more
* pending tasks.
*/
void PxTaskMgr::resolveRow( PxTaskID taskID )
{
int depRow = mTaskTable[ taskID ].mStartDep;
while( depRow != EOL )
{
PxTaskDepTableRow& row = mDepTable[ uint32_t(depRow) ];
PxTaskTableRow& dtt = mTaskTable[ row.mTaskID ];
if( !PxAtomicDecrement( &dtt.mRefCount ) )
{
dispatchTask( row.mTaskID );
}
depRow = row.mNextDep;
}
PxAtomicDecrement( &mPendingTasks );
}
/*
* Submit a ready task to its appropriate dispatcher.
*/
void PxTaskMgr::dispatchTask( PxTaskID taskID )
{
LOCK(); // todo: reader lock necessary?
PxTaskTableRow& tt = mTaskTable[ taskID ];
// prevent re-submission
if( tt.mType == PxTaskType::eCOMPLETED )
{
mErrorCallback.reportError(PxErrorCode::eDEBUG_WARNING, "PxTask dispatched twice", PX_FL);
return;
}
switch ( tt.mType )
{
case PxTaskType::eCPU:
mCpuDispatcher->submitTask( *tt.mTask );
break;
case PxTaskType::eNOT_PRESENT:
/* No task registered with this taskID, resolve its dependencies */
PX_ASSERT(!tt.mTask);
//PxGetFoundation().error(PX_INFO, "unregistered task resolved");
resolveRow( taskID );
break;
case PxTaskType::eCOMPLETED:
default:
mErrorCallback.reportError(PxErrorCode::eDEBUG_WARNING, "Unknown task type", PX_FL);
resolveRow( taskID );
break;
}
tt.mType = PxTaskType::eCOMPLETED;
}
}// end physx namespace
| 11,436 | C++ | 24.701124 | 111 | 0.689314 |
NVIDIA-Omniverse/PhysX/physx/source/compiler/windows/resource/resource.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
//{{NO_DEPENDENCIES}}
// Microsoft Visual C++ generated include file.
// Used by *.rc files
//
// Next default values for new objects
//
#ifdef APSTUDIO_INVOKED
#ifndef APSTUDIO_READONLY_SYMBOLS
#define _APS_NEXT_RESOURCE_VALUE 101
#define _APS_NEXT_COMMAND_VALUE 40001
#define _APS_NEXT_CONTROL_VALUE 1000
#define _APS_NEXT_SYMED_VALUE 101
#endif
#endif
#include "..\..\..\..\include\foundation\PxPhysicsVersion.h"
#define RC_STRINGIFY(x) #x
#define RC_GETSTR(x) RC_STRINGIFY(x)
#define RC_PHYSX_VER PX_PHYSICS_VERSION_MAJOR,PX_PHYSICS_VERSION_MINOR,PX_PHYSICS_VERSION_BUGFIX,0
#define RC_PHYSX_VER_STR RC_GETSTR(PX_PHYSICS_VERSION_MAJOR) "." RC_GETSTR(PX_PHYSICS_VERSION_MINOR) "." RC_GETSTR(PX_PHYSICS_VERSION_BUGFIX) ".0"
#define RC_COMPANY_NAME_STR "NVIDIA Corporation"
#define RC_LEGAL_COPYRIGHT_STR "Copyright (C) 2023 NVIDIA Corporation"
#if defined(_WIN64)
#define RC_PTR_STR "64"
#elif defined(_WIN32)
#define RC_PTR_STR "32"
#endif
#define RC_PRODUCT_NAME_STR "PhysX"
| 2,720 | C | 42.190476 | 146 | 0.747059 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuAABBTreeUpdateMap.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_AABB_TREE_UPDATE_MAP_H
#define GU_AABB_TREE_UPDATE_MAP_H
#include "common/PxPhysXCommonConfig.h"
#include "GuPrunerTypedef.h"
#include "foundation/PxArray.h"
namespace physx
{
namespace Gu
{
class AABBTree;
// Maps pruning pool indices to AABB-tree indices (i.e. locates the object's box in the aabb-tree nodes pool)
//
// The map spans pool indices from 0..N-1, where N is the number of pool entries when the map was created from a tree.
//
// It maps:
// to node indices in the range 0..M-1, where M is the number of nodes in the tree the map was created from,
// or to INVALID_NODE_ID if the pool entry was removed or pool index is outside input domain.
//
// The map is the inverse of the tree mapping: (node[map[poolID]].primitive == poolID) is true at all times.
class AABBTreeUpdateMap
{
public:
AABBTreeUpdateMap() {}
~AABBTreeUpdateMap() {}
void release()
{
mMapping.reset();
}
// indices offset used when indices are shifted from objects (used for merged trees)
PX_PHYSX_COMMON_API void initMap(PxU32 numPoolObjects, const AABBTree& tree);
PX_PHYSX_COMMON_API void invalidate(PoolIndex poolIndex, PoolIndex replacementPoolIndex, AABBTree& tree);
PX_FORCE_INLINE TreeNodeIndex operator[](PxU32 poolIndex) const
{
return poolIndex < mMapping.size() ? mMapping[poolIndex] : INVALID_NODE_ID;
}
private:
// maps from prunerIndex (index in the PruningPool) to treeNode index
// this will only map to leaf tree nodes
PxArray<TreeNodeIndex> mMapping;
};
}
}
#endif
| 3,359 | C | 39.975609 | 119 | 0.719857 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuIncrementalAABBPruner.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_INCREMENTAL_AABB_PRUNER_H
#define GU_INCREMENTAL_AABB_PRUNER_H
#include "common/PxPhysXCommonConfig.h"
#include "GuPruner.h"
#include "GuPruningPool.h"
#include "GuIncrementalAABBTree.h"
#include "GuSqInternal.h"
namespace physx
{
class PxRenderOutput;
namespace Gu
{
class IncrementalAABBPruner : public Pruner
{
public:
PX_PHYSX_COMMON_API IncrementalAABBPruner(PxU32 sceneLimit, PxU64 contextID);
virtual ~IncrementalAABBPruner();
// BasePruner
DECLARE_BASE_PRUNER_API
//~BasePruner
// Pruner
DECLARE_PRUNER_API_COMMON
//~Pruner
// direct access for test code
PX_FORCE_INLINE const IncrementalAABBTree* getAABBTree() const { return mAABBTree; }
private:
void release();
void fullRebuildAABBTree();
void test();
void updateMapping(const PoolIndex poolIndex, IncrementalAABBTreeNode* node);
IncrementalAABBTree* mAABBTree;
PruningPool mPool; // Pool of AABBs
PxArray<IncrementalAABBTreeNode*> mMapping;
PxU64 mContextID;
NodeList mChangedLeaves;
};
}
}
#endif
| 2,849 | C | 34.185185 | 88 | 0.729379 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuAABBTreeUpdateMap.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuAABBTreeUpdateMap.h"
#include "GuAABBTree.h"
#include "GuAABBTreeNode.h"
using namespace physx;
using namespace Gu;
static const PxU32 SHRINK_THRESHOLD = 1024;
void AABBTreeUpdateMap::initMap(PxU32 nbObjects, const AABBTree& tree)
{
if(!nbObjects)
{
release();
return;
}
// Memory management
{
const PxU32 mapSize = nbObjects;
const PxU32 targetCapacity = mapSize + (mapSize>>2);
PxU32 currentCapacity = mMapping.capacity();
if( ( targetCapacity < (currentCapacity>>1) ) && ( (currentCapacity-targetCapacity) > SHRINK_THRESHOLD ) )
{
// trigger reallocation of a smaller array, there is enough memory to save
currentCapacity = 0;
}
if(mapSize > currentCapacity)
{
// the mapping values are invalid and reset below in any case
// so there is no need to copy the values at all
mMapping.reset();
mMapping.reserve(targetCapacity); // since size is 0, reserve will also just allocate
}
mMapping.forceSize_Unsafe(mapSize);
for(PxU32 i=0;i<mapSize;i++)
mMapping[i] = INVALID_NODE_ID;
}
const PxU32 nbNodes = tree.getNbNodes();
const BVHNode* nodes = tree.getNodes();
const PxU32* indices = tree.getIndices();
for(TreeNodeIndex i=0;i<nbNodes;i++)
{
if(nodes[i].isLeaf())
{
const PxU32 nbPrims = nodes[i].getNbRuntimePrimitives();
// PT: with multiple primitives per node, several mapping entries will point to the same node.
PX_ASSERT(nbPrims<16);
for(PxU32 j=0;j<nbPrims;j++)
{
const PxU32 index = nodes[i].getPrimitives(indices)[j];
PX_ASSERT(index<nbObjects);
mMapping[index] = i;
}
}
}
}
void AABBTreeUpdateMap::invalidate(PoolIndex prunerIndex0, PoolIndex prunerIndex1, AABBTree& tree)
{
// prunerIndex0 and prunerIndex1 are both indices into the pool, not handles
// prunerIndex0 is the index in the pruning pool for the node that was just removed
// prunerIndex1 is the index in the pruning pool for the node
const TreeNodeIndex nodeIndex0 = prunerIndex0<mMapping.size() ? mMapping[prunerIndex0] : INVALID_NODE_ID;
const TreeNodeIndex nodeIndex1 = prunerIndex1<mMapping.size() ? mMapping[prunerIndex1] : INVALID_NODE_ID;
//printf("map invalidate pi0:%x ni0:%x\t",prunerIndex0,nodeIndex0);
//printf(" replace with pi1:%x ni1:%x\n",prunerIndex1,nodeIndex1);
// if nodeIndex0 exists:
// invalidate node 0
// invalidate map prunerIndex0
// if nodeIndex1 exists:
// point node 1 to prunerIndex0
// map prunerIndex0 to node 1
// invalidate map prunerIndex1
// eventually:
// - node 0 is invalid
// - prunerIndex0 is mapped to node 1 or
// is not mapped if prunerIndex1 is not mapped
// is not mapped if prunerIndex0==prunerIndex1
// - node 1 points to prunerIndex0 or
// is invalid if prunerIndex1 is not mapped
// is invalid if prunerIndex0==prunerIndex1
// - prunerIndex1 is not mapped
BVHNode* nodes = tree.getNodes();
if(nodeIndex0!=INVALID_NODE_ID)
{
PX_ASSERT(nodeIndex0 < tree.getNbNodes());
PX_ASSERT(nodes[nodeIndex0].isLeaf());
BVHNode* node0 = nodes + nodeIndex0;
const PxU32 nbPrims = node0->getNbRuntimePrimitives();
PX_ASSERT(nbPrims < 16);
// retrieve the primitives pointer
PxU32* primitives = node0->getPrimitives(tree.getIndices());
PX_ASSERT(primitives);
// PT: look for desired pool index in the leaf
bool foundIt = false;
for(PxU32 i=0;i<nbPrims;i++)
{
PX_ASSERT(mMapping[primitives[i]] == nodeIndex0); // PT: all primitives should point to the same leaf node
if(prunerIndex0 == primitives[i])
{
foundIt = true;
const PxU32 last = nbPrims-1;
node0->setNbRunTimePrimitives(last);
primitives[i] = INVALID_POOL_ID; // Mark primitive index as invalid in the node
mMapping[prunerIndex0] = INVALID_NODE_ID; // invalidate the node index for pool 0
// PT: swap within the leaf node. No need to update the mapping since they should all point
// to the same tree node anyway.
if(last!=i)
PxSwap(primitives[i], primitives[last]);
break;
}
}
PX_ASSERT(foundIt);
PX_UNUSED(foundIt);
}
if (nodeIndex1!=INVALID_NODE_ID)
{
// PT: with multiple primitives per leaf, tree nodes may very well be the same for different pool indices.
// However the pool indices may be the same when a swap has been skipped in the pruning pool, in which
// case there is nothing to do.
if(prunerIndex0!=prunerIndex1)
{
PX_ASSERT(nodeIndex1 < tree.getNbNodes());
PX_ASSERT(nodes[nodeIndex1].isLeaf());
BVHNode* node1 = nodes + nodeIndex1;
const PxU32 nbPrims = node1->getNbRuntimePrimitives();
PX_ASSERT(nbPrims < 16);
// retrieve the primitives pointer
PxU32* primitives = node1->getPrimitives(tree.getIndices());
PX_ASSERT(primitives);
// PT: look for desired pool index in the leaf
bool foundIt = false;
for(PxU32 i=0;i<nbPrims;i++)
{
PX_ASSERT(mMapping[primitives[i]] == nodeIndex1); // PT: all primitives should point to the same leaf node
if(prunerIndex1 == primitives[i])
{
foundIt = true;
primitives[i] = prunerIndex0; // point node 1 to the pool object moved to ID 0
mMapping[prunerIndex0] = nodeIndex1; // pool 0 is pointed at by node 1 now
mMapping[prunerIndex1] = INVALID_NODE_ID; // pool 1 is no longer stored in the tree
break;
}
}
PX_ASSERT(foundIt);
PX_UNUSED(foundIt);
}
}
}
| 7,030 | C++ | 34.510101 | 110 | 0.714936 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuQuery.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_QUERY_H
#define GU_QUERY_H
#include "GuBounds.h"
#include "GuBVHTestsSIMD.h"
namespace physx
{
namespace Gu
{
// PT: TODO: the various V3LoadUs in the base tests like SphereAABBTest could be avoided
// PT: TODO: check inflation is consistent in all of these. Looks like it's not.
struct DefaultOBBAABBTest : OBBAABBTest
{
PX_FORCE_INLINE DefaultOBBAABBTest(const ShapeData& queryVolume) :
OBBAABBTest(queryVolume.getPrunerWorldPos(),
queryVolume.getPrunerWorldRot33(),
queryVolume.getPrunerBoxGeomExtentsInflated()) {}
};
struct DefaultAABBAABBTest : AABBAABBTest
{
PX_FORCE_INLINE DefaultAABBAABBTest(const ShapeData& queryVolume) :
AABBAABBTest(queryVolume.getPrunerInflatedWorldAABB()) {}
};
struct DefaultSphereAABBTest : SphereAABBTest
{
PX_FORCE_INLINE DefaultSphereAABBTest(const ShapeData& queryVolume) :
SphereAABBTest( queryVolume.getGuSphere().center,
queryVolume.getGuSphere().radius) {}
};
struct DefaultCapsuleAABBTest : CapsuleAABBTest
{
PX_FORCE_INLINE DefaultCapsuleAABBTest(const ShapeData& queryVolume, float inflation) :
CapsuleAABBTest(queryVolume.getGuCapsule().p1,
queryVolume.getPrunerWorldRot33().column0,
queryVolume.getCapsuleHalfHeight()*2.0f,
PxVec3(queryVolume.getGuCapsule().radius*inflation)) {}
};
}
}
#endif
| 3,032 | C | 38.38961 | 89 | 0.762863 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuIncrementalAABBPruner.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
// PT: TODO: this class isn't actually used at the moment
#define COMPILE_INCREMENTAL_AABB_PRUNER
#ifdef COMPILE_INCREMENTAL_AABB_PRUNER
#include "common/PxProfileZone.h"
#include "CmVisualization.h"
#include "foundation/PxBitUtils.h"
#include "GuIncrementalAABBPruner.h"
#include "GuIncrementalAABBTree.h"
#include "GuCallbackAdapter.h"
#include "GuAABBTree.h"
#include "GuAABBTreeQuery.h"
#include "GuSphere.h"
#include "GuBox.h"
#include "GuCapsule.h"
#include "GuQuery.h"
using namespace physx;
using namespace Gu;
// PT: TODO: this is copied from SqBounds.h, should be either moved to Gu and shared or passed as a user parameter
#define SQ_PRUNER_EPSILON 0.005f
#define SQ_PRUNER_INFLATION (1.0f + SQ_PRUNER_EPSILON) // pruner test shape inflation (not narrow phase shape)
#define PARANOIA_CHECKS 0
IncrementalAABBPruner::IncrementalAABBPruner(PxU32 sceneLimit, PxU64 contextID) :
mAABBTree (NULL),
mPool (contextID, TRANSFORM_CACHE_GLOBAL),
mContextID (contextID)
{
mMapping.resizeUninitialized(sceneLimit);
mPool.preallocate(sceneLimit);
mChangedLeaves.reserve(sceneLimit);
}
IncrementalAABBPruner::~IncrementalAABBPruner()
{
release();
}
bool IncrementalAABBPruner::addObjects(PrunerHandle* results, const PxBounds3* bounds, const PrunerPayload* data, const PxTransform* transforms, PxU32 count, bool )
{
PX_PROFILE_ZONE("SceneQuery.prunerAddObjects", mContextID);
if(!count)
return true;
const PxU32 valid = mPool.addObjects(results, bounds, data, transforms, count);
if(mAABBTree)
{
for(PxU32 i=0;i<valid;i++)
{
const PrunerHandle& handle = results[i];
const PoolIndex poolIndex = mPool.getIndex(handle);
mChangedLeaves.clear();
IncrementalAABBTreeNode* node = mAABBTree->insert(poolIndex, mPool.getCurrentWorldBoxes(), mChangedLeaves);
updateMapping(poolIndex, node);
}
#if PARANOIA_CHECKS
test();
#endif
}
return valid==count;
}
void IncrementalAABBPruner::updateMapping(const PoolIndex poolIndex, IncrementalAABBTreeNode* node)
{
// resize mapping if needed
if(mMapping.size() <= poolIndex)
{
mMapping.resize(mMapping.size() * 2);
}
// if a node was split we need to update the node indices and also the sibling indices
if(!mChangedLeaves.empty())
{
if(node && node->isLeaf())
{
for(PxU32 j = 0; j < node->getNbPrimitives(); j++)
{
mMapping[node->getPrimitives(NULL)[j]] = node;
}
}
for(PxU32 i = 0; i < mChangedLeaves.size(); i++)
{
IncrementalAABBTreeNode* changedNode = mChangedLeaves[i];
PX_ASSERT(changedNode->isLeaf());
for(PxU32 j = 0; j < changedNode->getNbPrimitives(); j++)
{
mMapping[changedNode->getPrimitives(NULL)[j]] = changedNode;
}
}
}
else
{
mMapping[poolIndex] = node;
}
}
void IncrementalAABBPruner::updateObjects(const PrunerHandle* handles, PxU32 count, float inflation, const PxU32* boundsIndices, const PxBounds3* newBounds, const PxTransform32* newTransforms)
{
PX_PROFILE_ZONE("SceneQuery.prunerUpdateObjects", mContextID);
if(!count)
return;
if(handles && boundsIndices && newBounds)
mPool.updateAndInflateBounds(handles, boundsIndices, newBounds, newTransforms, count, inflation);
if(!mAABBTree)
return;
const PxBounds3* poolBounds = mPool.getCurrentWorldBoxes();
for(PxU32 i=0; i<count; i++)
{
const PrunerHandle h = handles[i];
const PoolIndex poolIndex = mPool.getIndex(h);
mChangedLeaves.clear();
IncrementalAABBTreeNode* node = mAABBTree->update(mMapping[poolIndex], poolIndex, poolBounds, mChangedLeaves);
// we removed node during update, need to update the mapping
updateMapping(poolIndex, node);
}
#if PARANOIA_CHECKS
test();
#endif
}
void IncrementalAABBPruner::removeObjects(const PrunerHandle* handles, PxU32 count, PrunerPayloadRemovalCallback* removalCallback)
{
PX_PROFILE_ZONE("SceneQuery.prunerRemoveObjects", mContextID);
if(!count)
return;
for(PxU32 i=0; i<count; i++)
{
const PrunerHandle h = handles[i];
const PoolIndex poolIndex = mPool.getIndex(h); // save the pool index for removed object
const PoolIndex poolRelocatedLastIndex = mPool.removeObject(h, removalCallback); // save the lastIndex returned by removeObject
if(mAABBTree)
{
IncrementalAABBTreeNode* node = mAABBTree->remove(mMapping[poolIndex], poolIndex, mPool.getCurrentWorldBoxes());
// if node moved to its parent
if (node && node->isLeaf())
{
for (PxU32 j = 0; j < node->getNbPrimitives(); j++)
{
const PoolIndex index = node->getPrimitives(NULL)[j];
mMapping[index] = node;
}
}
mMapping[poolIndex] = mMapping[poolRelocatedLastIndex];
// fix indices if we made a swap
if(poolRelocatedLastIndex != poolIndex)
mAABBTree->fixupTreeIndices(mMapping[poolIndex], poolRelocatedLastIndex, poolIndex);
if(!mAABBTree->getNodes())
{
release();
}
}
}
#if PARANOIA_CHECKS
test();
#endif
}
bool IncrementalAABBPruner::overlap(const ShapeData& queryVolume, PrunerOverlapCallback& pcbArgName) const
{
bool again = true;
if(mAABBTree && mAABBTree->getNodes())
{
OverlapCallbackAdapter pcb(pcbArgName, mPool);
switch(queryVolume.getType())
{
case PxGeometryType::eBOX:
{
if(queryVolume.isOBB())
{
const DefaultOBBAABBTest test(queryVolume);
again = AABBTreeOverlap<true, OBBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
else
{
const DefaultAABBAABBTest test(queryVolume);
again = AABBTreeOverlap<true, AABBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
}
break;
case PxGeometryType::eCAPSULE:
{
const DefaultCapsuleAABBTest test(queryVolume, SQ_PRUNER_INFLATION);
again = AABBTreeOverlap<true, CapsuleAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
break;
case PxGeometryType::eSPHERE:
{
const DefaultSphereAABBTest test(queryVolume);
again = AABBTreeOverlap<true, SphereAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
break;
case PxGeometryType::eCONVEXMESH:
{
const DefaultOBBAABBTest test(queryVolume);
again = AABBTreeOverlap<true, OBBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
break;
default:
PX_ALWAYS_ASSERT_MESSAGE("unsupported overlap query volume geometry type");
}
}
return again;
}
bool IncrementalAABBPruner::sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& pcbArgName) const
{
bool again = true;
if(mAABBTree && mAABBTree->getNodes())
{
const PxBounds3& aabb = queryVolume.getPrunerInflatedWorldAABB();
RaycastCallbackAdapter pcb(pcbArgName, mPool);
again = AABBTreeRaycast<true, true, IncrementalAABBTree, IncrementalAABBTreeNode, RaycastCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, aabb.getCenter(), unitDir, inOutDistance, aabb.getExtents(), pcb);
}
return again;
}
bool IncrementalAABBPruner::raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& pcbArgName) const
{
bool again = true;
if(mAABBTree && mAABBTree->getNodes())
{
RaycastCallbackAdapter pcb(pcbArgName, mPool);
again = AABBTreeRaycast<false, true, IncrementalAABBTree, IncrementalAABBTreeNode, RaycastCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, origin, unitDir, inOutDistance, PxVec3(0.0f), pcb);
}
return again;
}
// This isn't part of the pruner virtual interface, but it is part of the public interface
// of AABBPruner - it gets called by SqManager to force a rebuild, and requires a commit() before
// queries can take place
void IncrementalAABBPruner::purge()
{
release();
}
// Commit either performs a refit if background rebuild is not yet finished
// or swaps the current tree for the second tree rebuilt in the background
void IncrementalAABBPruner::commit()
{
PX_PROFILE_ZONE("SceneQuery.prunerCommit", mContextID);
if (!mAABBTree)
{
fullRebuildAABBTree();
return;
}
}
void IncrementalAABBPruner::fullRebuildAABBTree()
{
// Don't bother building an AABB-tree if there isn't a single static object
const PxU32 nbObjects = mPool.getNbActiveObjects();
if (!nbObjects)
return;
const PxU32 indicesSize = PxNextPowerOfTwo(nbObjects);
if(indicesSize > mMapping.size())
{
mMapping.resizeUninitialized(indicesSize);
}
// copy the temp optimized tree into the new incremental tree
mAABBTree = PX_NEW(IncrementalAABBTree)();
mAABBTree->build(AABBTreeBuildParams(INCR_NB_OBJECTS_PER_NODE, nbObjects, &mPool.getCurrentAABBTreeBounds()), mMapping);
#if PARANOIA_CHECKS
test();
#endif
}
void IncrementalAABBPruner::shiftOrigin(const PxVec3& shift)
{
mPool.shiftOrigin(shift);
if(mAABBTree)
mAABBTree->shiftOrigin(shift);
}
void IncrementalAABBPruner::visualize(PxRenderOutput& out, PxU32 primaryColor, PxU32 /*secondaryColor*/) const
{
// getAABBTree() asserts when pruner is dirty. NpScene::visualization() does not enforce flushUpdate. see DE7834
visualizeTree(out, primaryColor, mAABBTree);
// Render added objects not yet in the tree
//out << PxTransform(PxIdentity);
//out << PxU32(PxDebugColor::eARGB_WHITE);
}
void IncrementalAABBPruner::release() // this can be called from purge()
{
PX_DELETE(mAABBTree);
}
void IncrementalAABBPruner::test()
{
if(mAABBTree)
{
mAABBTree->hierarchyCheck(mPool.getNbActiveObjects(), mPool.getCurrentWorldBoxes());
for(PxU32 i = 0; i < mPool.getNbActiveObjects(); i++)
{
mAABBTree->checkTreeLeaf(mMapping[i], i);
}
}
}
void IncrementalAABBPruner::merge(const void* )
{
//const AABBPrunerMergeData& pruningStructure = *reinterpret_cast<const AABBPrunerMergeData*> (mergeParams);
//if(mAABBTree)
//{
// // index in pruning pool, where new objects were added
// const PxU32 pruningPoolIndex = mPool.getNbActiveObjects() - pruningStructure.mNbObjects;
// // create tree from given nodes and indices
// AABBTreeMergeData aabbTreeMergeParams(pruningStructure.mNbNodes, pruningStructure.mAABBTreeNodes,
// pruningStructure.mNbObjects, pruningStructure.mAABBTreeIndices, pruningPoolIndex);
// if (!mIncrementalRebuild)
// {
// // merge tree directly
// mAABBTree->mergeTree(aabbTreeMergeParams);
// }
// else
// {
// mBucketPruner.addTree(aabbTreeMergeParams, mTimeStamp);
// }
//}
}
void IncrementalAABBPruner::getGlobalBounds(PxBounds3& bounds) const
{
if(mAABBTree && mAABBTree->getNodes())
{
StoreBounds(bounds, mAABBTree->getNodes()->mBVMin, mAABBTree->getNodes()->mBVMax);
}
else
bounds.setEmpty();
}
#endif
| 12,574 | C++ | 30.516291 | 222 | 0.745904 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuPruningPool.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_PRUNING_POOL_H
#define GU_PRUNING_POOL_H
#include "common/PxPhysXCommonConfig.h"
#include "GuPrunerTypedef.h"
#include "GuPrunerPayload.h"
#include "GuBounds.h"
#include "GuAABBTreeBounds.h"
namespace physx
{
namespace Gu
{
enum TransformCacheMode
{
TRANSFORM_CACHE_UNUSED,
TRANSFORM_CACHE_LOCAL,
TRANSFORM_CACHE_GLOBAL
};
// This class is designed to maintain a two way mapping between pair(PrunerPayload/userdata,AABB) and PrunerHandle
// Internally there's also an index for handles (AP: can be simplified?)
// This class effectively stores bounded pruner payloads/userdata, returns a PrunerHandle and allows O(1)
// access to them using a PrunerHandle
// Supported operations are add, remove, update bounds
class PX_PHYSX_COMMON_API PruningPool : public PxUserAllocated
{
PX_NOCOPY(PruningPool)
public:
PruningPool(PxU64 contextID, TransformCacheMode mode/*=TRANSFORM_CACHE_UNUSED*/);
~PruningPool();
PX_FORCE_INLINE const PrunerPayload& getPayloadData(PrunerHandle handle, PrunerPayloadData* data=NULL) const
{
const PoolIndex index = getIndex(handle);
if(data)
{
PxBounds3* wb = const_cast<PxBounds3*>(mWorldBoxes.getBounds());
data->mBounds = wb + index;
data->mTransform = mTransforms ? mTransforms + index : NULL;
}
return mObjects[index];
}
void shiftOrigin(const PxVec3& shift);
// PT: adds 'count' objects to the pool. Needs 'count' bounds and 'count' payloads passed as input. Writes out 'count' handles
// in 'results' array. Function returns number of successfully added objects, ideally 'count' but can be less in case we run
// out of memory.
PxU32 addObjects(PrunerHandle* results, const PxBounds3* bounds, const PrunerPayload* data, const PxTransform* transforms, PxU32 count);
// this function will swap the last object with the hole formed by removed PrunerHandle object
// and return the removed last object's index in the pool
PoolIndex removeObject(PrunerHandle h, PrunerPayloadRemovalCallback* removalCallback);
// Data access
PX_FORCE_INLINE PoolIndex getIndex(PrunerHandle h)const { return mHandleToIndex[h]; }
PX_FORCE_INLINE PrunerPayload* getObjects() const { return mObjects; }
PX_FORCE_INLINE const PxTransform* getTransforms() const { return mTransforms; }
PX_FORCE_INLINE PxTransform* getTransforms() { return mTransforms; }
PX_FORCE_INLINE bool setTransform(PrunerHandle handle, const PxTransform& transform)
{
if(!mTransforms)
return false;
mTransforms[getIndex(handle)] = transform;
return true;
}
PX_FORCE_INLINE PxU32 getNbActiveObjects() const { return mNbObjects; }
PX_FORCE_INLINE const PxBounds3* getCurrentWorldBoxes() const { return mWorldBoxes.getBounds(); }
PX_FORCE_INLINE PxBounds3* getCurrentWorldBoxes() { return mWorldBoxes.getBounds(); }
PX_FORCE_INLINE const AABBTreeBounds& getCurrentAABBTreeBounds() const { return mWorldBoxes; }
void updateAndInflateBounds(const PrunerHandle* handles, const PxU32* boundsIndices, const PxBounds3* newBounds, const PxTransform32* newTransforms, PxU32 count, float epsilon);
void preallocate(PxU32 entries);
// protected:
PxU32 mNbObjects; //!< Current number of objects
PxU32 mMaxNbObjects; //!< Max. number of objects (capacity for mWorldBoxes, mObjects)
//!< these arrays are parallel
AABBTreeBounds mWorldBoxes; //!< List of world boxes, stores mNbObjects, capacity=mMaxNbObjects
PrunerPayload* mObjects; //!< List of objects, stores mNbObjects, capacity=mMaxNbObjects
PxTransform* mTransforms;
const TransformCacheMode mTransformCacheMode;
// private:
PoolIndex* mHandleToIndex; //!< Maps from PrunerHandle to internal index (payload/userData index in mObjects)
PrunerHandle* mIndexToHandle; //!< Inverse map from objectIndex to PrunerHandle
// this is the head of a list of holes formed in mHandleToIndex by removed handles
// the rest of the list is stored in holes in mHandleToIndex (in place)
PrunerHandle mFirstRecycledHandle;
PxU64 mContextID;
bool resize(PxU32 newCapacity);
};
}
}
#endif
| 6,129 | C | 46.153846 | 187 | 0.715614 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuWindingNumberT.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_WINDING_NUMBER_T_H
#define GU_WINDING_NUMBER_T_H
/** \addtogroup geomutils
@{
*/
#include "GuTriangle.h"
#include "foundation/PxArray.h"
#include "foundation/PxHashMap.h"
#include "foundation/PxVec3.h"
#include "GuBVH.h"
#include "GuAABBTreeQuery.h"
#include "GuAABBTreeNode.h"
#include "GuWindingNumberCluster.h"
namespace physx
{
namespace Gu
{
using Triangle = Gu::IndexedTriangleT<PxI32>;
template<typename R, typename V3>
struct SecondOrderClusterApproximationT : public ClusterApproximationT<R, V3>
{
PxMat33 WeightedOuterProductSum;
PX_FORCE_INLINE SecondOrderClusterApproximationT() {}
PX_FORCE_INLINE SecondOrderClusterApproximationT(R radius, R areaSum, const V3& weightedCentroid, const V3& weightedNormalSum, const PxMat33& weightedOuterProductSum) :
ClusterApproximationT<R, V3>(radius, areaSum, weightedCentroid, weightedNormalSum), WeightedOuterProductSum(weightedOuterProductSum)
{ }
};
//Evaluates a first order winding number approximation for a given cluster (cluster = bunch of triangles)
template<typename R, typename V3>
PX_FORCE_INLINE R firstOrderClusterApproximation(const V3& weightedCentroid, const V3& weightedNormalSum,
const V3& evaluationPoint)
{
const V3 dir = weightedCentroid - evaluationPoint;
const R l = dir.magnitude();
return (R(0.25 / 3.141592653589793238462643383) / (l * l * l)) * weightedNormalSum.dot(dir);
}
template<typename R, typename V3>
PX_FORCE_INLINE R clusterApproximation(const ClusterApproximationT<R, V3>& c, const V3& evaluationPoint)
{
return firstOrderClusterApproximation(c.WeightedCentroid, c.WeightedNormalSum, evaluationPoint);
}
//Evaluates a second order winding number approximation for a given cluster (cluster = bunch of triangles)
template<typename R, typename V3>
PX_FORCE_INLINE R secondOrderClusterApproximation(const V3& weightedCentroid, const V3& weightedNormalSum,
const PxMat33& weightedOuterProductSum, const V3& evaluationPoint)
{
const V3 dir = weightedCentroid - evaluationPoint;
const R l = dir.magnitude();
const R l2 = l * l;
const R scaling = R(0.25 / 3.141592653589793238462643383) / (l2 * l);
const R firstOrder = scaling * weightedNormalSum.dot(dir);
const R scaling2 = -R(3.0) * scaling / l2;
const R m11 = scaling + scaling2 * dir.x * dir.x, m12 = scaling2 * dir.x * dir.y, m13 = scaling2 * dir.x * dir.z;
const R m21 = scaling2 * dir.y * dir.x, m22 = scaling + scaling2 * dir.y * dir.y, m23 = scaling2 * dir.y * dir.z;
const R m31 = scaling2 * dir.z * dir.x, m32 = scaling2 * dir.z * dir.y, m33 = scaling + scaling2 * dir.z * dir.z;
return firstOrder + (weightedOuterProductSum.column0.x * m11 + weightedOuterProductSum.column1.x * m12 + weightedOuterProductSum.column2.x * m13 +
weightedOuterProductSum.column0.y * m21 + weightedOuterProductSum.column1.y * m22 + weightedOuterProductSum.column2.y * m23 +
weightedOuterProductSum.column0.z * m31 + weightedOuterProductSum.column1.z * m32 + weightedOuterProductSum.column2.z * m33);
}
template<typename R, typename V3>
PX_FORCE_INLINE R clusterApproximation(const SecondOrderClusterApproximationT<R, V3>& c, const V3& evaluationPoint)
{
return secondOrderClusterApproximation(c.WeightedCentroid, c.WeightedNormalSum, c.WeightedOuterProductSum, evaluationPoint);
}
//Computes parameters to approximately represent a cluster (cluster = bunch of triangles) to be used to compute a winding number approximation
template<typename R, typename V3>
void approximateCluster(const PxArray<PxI32>& triangleSet, PxU32 start, PxU32 end, const PxU32* triangles, const V3* points,
const PxArray<R>& triangleAreas, const PxArray<V3>& triangleNormalsTimesTriangleArea, const PxArray<V3>& triangleCentroids, ClusterApproximationT<R, V3>& cluster)
{
V3 weightedCentroid(0., 0., 0.);
R areaSum = 0;
V3 weightedNormalSum(0., 0., 0.);
for (PxU32 i = start; i < end; ++i)
{
PxI32 triId = triangleSet[i];
areaSum += triangleAreas[triId];
weightedCentroid += triangleCentroids[triId] * triangleAreas[triId];
weightedNormalSum += triangleNormalsTimesTriangleArea[triId];
}
weightedCentroid = weightedCentroid / areaSum;
R radiusSquared = 0;
for (PxU32 i = start; i < end; ++i)
{
PxI32 triId = triangleSet[i];
const PxU32* tri = &triangles[3 * triId];
R d2 = (weightedCentroid - points[tri[0]]).magnitudeSquared();
if (d2 > radiusSquared) radiusSquared = d2;
d2 = (weightedCentroid - points[tri[1]]).magnitudeSquared();
if (d2 > radiusSquared) radiusSquared = d2;
d2 = (weightedCentroid - points[tri[2]]).magnitudeSquared();
if (d2 > radiusSquared) radiusSquared = d2;
}
cluster = ClusterApproximationT<R, V3>(PxSqrt(radiusSquared), areaSum, weightedCentroid, weightedNormalSum/*, weightedOuterProductSum*/);
}
//Computes parameters to approximately represent a cluster (cluster = bunch of triangles) to be used to compute a winding number approximation
template<typename R, typename V3>
void approximateCluster(const PxArray<PxI32>& triangleSet, PxU32 start, PxU32 end, const PxU32* triangles, const V3* points,
const PxArray<R>& triangleAreas, const PxArray<V3>& triangleNormalsTimesTriangleArea, const PxArray<V3>& triangleCentroids, SecondOrderClusterApproximationT<R, V3>& cluster)
{
V3 weightedCentroid(0., 0., 0.);
R areaSum = 0;
V3 weightedNormalSum(0., 0., 0.);
for (PxU32 i = start; i < end; ++i)
{
PxI32 triId = triangleSet[i];
areaSum += triangleAreas[triId];
weightedCentroid += triangleCentroids[triId] * triangleAreas[triId];
weightedNormalSum += triangleNormalsTimesTriangleArea[triId];
}
weightedCentroid = weightedCentroid / areaSum;
R radiusSquared = 0;
PxMat33 weightedOuterProductSum(PxZERO::PxZero);
for (PxU32 i = start; i < end; ++i)
{
PxI32 triId = triangleSet[i];
const PxU32* tri = &triangles[3 * triId];
R d2 = (weightedCentroid - points[tri[0]]).magnitudeSquared();
if (d2 > radiusSquared) radiusSquared = d2;
d2 = (weightedCentroid - points[tri[1]]).magnitudeSquared();
if (d2 > radiusSquared) radiusSquared = d2;
d2 = (weightedCentroid - points[tri[2]]).magnitudeSquared();
if (d2 > radiusSquared) radiusSquared = d2;
weightedOuterProductSum = weightedOuterProductSum + PxMat33::outer(triangleCentroids[triId] - weightedCentroid, triangleNormalsTimesTriangleArea[triId]);
}
cluster = SecondOrderClusterApproximationT<R, V3>(PxSqrt(radiusSquared), areaSum, weightedCentroid, weightedNormalSum, weightedOuterProductSum);
}
//Exact winding number evaluation, needs to be called for every triangle close to the winding number query point
template<typename R, typename V3>
PX_FORCE_INLINE R evaluateExact(V3 a, V3 b, V3 c, const V3& p)
{
const R twoOver4PI = R(0.5 / 3.141592653589793238462643383);
a -= p;
b -= p;
c -= p;
const R la = a.magnitude(),
lb = b.magnitude(),
lc = c.magnitude();
const R y = a.x * b.y * c.z - a.x * b.z * c.y - a.y * b.x * c.z + a.y * b.z * c.x + a.z * b.x * c.y - a.z * b.y * c.x;
const R x = (la * lb * lc + (a.x * b.x + a.y * b.y + a.z * b.z) * lc +
(b.x * c.x + b.y * c.y + b.z * c.z) * la + (c.x * a.x + c.y * a.y + c.z * a.z) * lb);
return twoOver4PI * PxAtan2(y, x);
}
struct Section
{
PxI32 start;
PxI32 end;
Section(PxI32 s, PxI32 e) : start(s), end(e)
{}
};
//Helper method that recursively traverses the given BVH tree and computes a cluster approximation for every node and links it to the node
template<typename R, typename V3>
void precomputeClusterInformation(PxI32 nodeId, const BVHNode* tree, const PxU32* triangles, const PxU32 numTriangles,
const V3* points, PxHashMap<PxU32, ClusterApproximationT<R, V3>>& infos, const PxArray<R> triangleAreas,
const PxArray<V3>& triangleNormalsTimesTriangleArea, const PxArray<V3>& triangleCentroids)
{
PxArray<PxI32> stack;
stack.pushBack(nodeId);
PxArray<Section> returnStack;
PxArray<PxI32> triIndices;
triIndices.reserve(numTriangles);
infos.reserve(PxU32(1.2f*numTriangles));
while (stack.size() > 0)
{
nodeId = stack.popBack();
if (nodeId >= 0)
{
const BVHNode& node = tree[nodeId];
if (node.isLeaf())
{
triIndices.pushBack(node.getPrimitiveIndex());
returnStack.pushBack(Section(triIndices.size() - 1, triIndices.size()));
continue;
}
stack.pushBack(-nodeId - 1); //Marker for return index
stack.pushBack(node.getPosIndex());
stack.pushBack(node.getPosIndex() + 1);
}
else
{
Section trianglesA = returnStack.popBack();
Section trianglesB = returnStack.popBack();
Section sum(trianglesB.start, trianglesA.end);
nodeId = -nodeId - 1;
ClusterApproximationT<R, V3> c;
approximateCluster<R, V3>(triIndices, sum.start, sum.end, triangles, points, triangleAreas, triangleNormalsTimesTriangleArea, triangleCentroids, c);
infos.insert(PxU32(nodeId), c);
returnStack.pushBack(sum);
}
}
}
//Precomputes a cluster approximation for every node in the BVH tree
template<typename R, typename V3>
void precomputeClusterInformation(const BVHNode* tree, const PxU32* triangles, const PxU32 numTriangles,
const V3* points, PxHashMap<PxU32, ClusterApproximationT<R, V3>>& result, PxI32 rootNodeIndex)
{
PxArray<R> triangleAreas;
triangleAreas.resize(numTriangles);
PxArray<V3> triangleNormalsTimesTriangleArea;
triangleNormalsTimesTriangleArea.resize(numTriangles);
PxArray<V3> triangleCentroids;
triangleCentroids.resize(numTriangles);
for (PxU32 i = 0; i < numTriangles; ++i)
{
const PxU32* tri = &triangles[3 * i];
const V3& a = points[tri[0]];
const V3& b = points[tri[1]];
const V3& c = points[tri[2]];
triangleNormalsTimesTriangleArea[i] = (b - a).cross(c - a) * R(0.5);
triangleAreas[i] = triangleNormalsTimesTriangleArea[i].magnitude();
triangleCentroids[i] = (a + b + c) * R(1.0 / 3.0);
}
result.clear();
precomputeClusterInformation(rootNodeIndex, tree, triangles, numTriangles, points, result, triangleAreas, triangleNormalsTimesTriangleArea, triangleCentroids);
}
template<typename R, typename V3>
class WindingNumberTraversalController
{
public:
R mWindingNumber = 0;
private:
const PxU32* mTriangles;
const V3* mPoints;
const PxHashMap<PxU32, ClusterApproximationT<R, V3>>& mClusters;
V3 mQueryPoint;
R mDistanceThresholdBeta;
public:
PX_FORCE_INLINE WindingNumberTraversalController(const PxU32* triangles, const V3* points,
const PxHashMap<PxU32, ClusterApproximationT<R, V3>>& clusters, const V3& queryPoint, R distanceThresholdBeta = 2)
: mTriangles(triangles), mPoints(points), mClusters(clusters), mQueryPoint(queryPoint), mDistanceThresholdBeta(distanceThresholdBeta)
{ }
PX_FORCE_INLINE Gu::TraversalControl::Enum analyze(const BVHNode& node, PxI32 nodeIndex)
{
if (node.isLeaf())
{
PX_ASSERT(node.getNbPrimitives() == 1);
const PxU32* tri = &mTriangles[3 * node.getPrimitiveIndex()];
mWindingNumber += evaluateExact<R, V3>(mPoints[tri[0]], mPoints[tri[1]], mPoints[tri[2]], mQueryPoint);
return Gu::TraversalControl::eDontGoDeeper;
}
const ClusterApproximationT<R, V3>& cluster = mClusters.find(nodeIndex)->second;
const R distSquared = (mQueryPoint - cluster.WeightedCentroid).magnitudeSquared();
const R threshold = mDistanceThresholdBeta * cluster.Radius;
if (distSquared > threshold * threshold)
{
//mWindingNumber += secondOrderClusterApproximation(cluster.WeightedCentroid, cluster.WeightedNormalSum, cluster.WeightedOuterProductSum, mQueryPoint);
mWindingNumber += firstOrderClusterApproximation<R, V3>(cluster.WeightedCentroid, cluster.WeightedNormalSum, mQueryPoint); // secondOrderClusterApproximation(cluster.WeightedCentroid, cluster.WeightedNormalSum, cluster.WeightedOuterProductSum, mQueryPoint);
return Gu::TraversalControl::eDontGoDeeper;
}
return Gu::TraversalControl::eGoDeeper;
}
private:
PX_NOCOPY(WindingNumberTraversalController)
};
template<typename R, typename V3>
R computeWindingNumber(const BVHNode* tree, const V3& q, R beta, const PxHashMap<PxU32, ClusterApproximationT<R, V3>>& clusters,
const PxU32* triangles, const V3* points)
{
WindingNumberTraversalController<R, V3> c(triangles, points, clusters, q, beta);
traverseBVH<WindingNumberTraversalController<R, V3>>(tree, c);
return c.mWindingNumber;
}
}
}
/** @} */
#endif
| 14,073 | C | 41.264264 | 261 | 0.732822 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuAABBTree.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_AABBTREE_H
#define GU_AABBTREE_H
#include "foundation/PxMemory.h"
#include "foundation/PxArray.h"
#include "foundation/PxBounds3.h"
#include "foundation/PxUserAllocated.h"
#include "common/PxPhysXCommonConfig.h"
#include "GuPrunerTypedef.h"
namespace physx
{
namespace Gu
{
struct BVHNode;
struct SAH_Buffers;
class NodeAllocator;
struct BuildStats;
class AABBTreeBounds;
// PT: TODO: sometimes we export member functions, sometimes we export the whole class. What's the story here?
#if PX_VC
#pragma warning(push)
#pragma warning( disable : 4251 ) // class needs to have dll-interface to be used by clients of class
#endif
//! Contains AABB-tree build parameters
class PX_PHYSX_COMMON_API AABBTreeBuildParams : public PxUserAllocated
{
public:
AABBTreeBuildParams(PxU32 limit = 1, PxU32 nb_prims = 0, const AABBTreeBounds* bounds = NULL, BVHBuildStrategy bs = BVH_SPLATTER_POINTS) :
mLimit (limit),
mNbPrimitives (nb_prims),
mBounds (bounds),
mCache (NULL),
mBuildStrategy (bs)
{
}
~AABBTreeBuildParams()
{
reset();
}
PX_FORCE_INLINE void reset()
{
mLimit = mNbPrimitives = 0;
mBounds = NULL;
PX_FREE(mCache);
}
PxU32 mLimit; //!< Limit number of primitives / node. If limit is 1, build a complete tree (2*N-1 nodes)
PxU32 mNbPrimitives; //!< Number of (source) primitives.
const AABBTreeBounds* mBounds; //!< Shortcut to an app-controlled array of AABBs.
mutable PxVec3* mCache; //!< Cache for AABB centers - managed by build code.
BVHBuildStrategy mBuildStrategy;
};
//! AABB tree node used for building
class PX_PHYSX_COMMON_API AABBTreeBuildNode : public PxUserAllocated
{
public:
PX_FORCE_INLINE AABBTreeBuildNode() {}
PX_FORCE_INLINE ~AABBTreeBuildNode() {}
PX_FORCE_INLINE const PxBounds3& getAABB() const { return mBV; }
PX_FORCE_INLINE const AABBTreeBuildNode* getPos() const { return mPos; }
PX_FORCE_INLINE const AABBTreeBuildNode* getNeg() const { const AABBTreeBuildNode* P = mPos; return P ? P + 1 : NULL; }
PX_FORCE_INLINE bool isLeaf() const { return !getPos(); }
PxBounds3 mBV; //!< Global bounding-volume enclosing all the node-related primitives
const AABBTreeBuildNode* mPos; //!< "Positive" & "Negative" children
PxU32 mNodeIndex; //!< Index of node-related primitives (in the tree's mIndices array)
PxU32 mNbPrimitives; //!< Number of primitives for this node
PX_FORCE_INLINE PxU32 getNbPrimitives() const { return mNbPrimitives; }
PX_FORCE_INLINE PxU32 getNbRuntimePrimitives() const { return mNbPrimitives; }
PX_FORCE_INLINE void setNbRunTimePrimitives(PxU32 val) { mNbPrimitives = val; }
PX_FORCE_INLINE const PxU32* getPrimitives(const PxU32* base) const { return base + mNodeIndex; }
PX_FORCE_INLINE PxU32* getPrimitives(PxU32* base) { return base + mNodeIndex; }
void subdivide(const AABBTreeBuildParams& params, BuildStats& stats, NodeAllocator& allocator, PxU32* const indices);
void subdivideSAH(const AABBTreeBuildParams& params, SAH_Buffers& sah, BuildStats& stats, NodeAllocator& allocator, PxU32* const indices);
void _buildHierarchy(const AABBTreeBuildParams& params, BuildStats& stats, NodeAllocator& allocator, PxU32* const indices);
void _buildHierarchySAH(const AABBTreeBuildParams& params, SAH_Buffers& sah, BuildStats& stats, NodeAllocator& allocator, PxU32* const indices);
};
//! For complete trees we can predict the final number of nodes and preallocate them. For incomplete trees we can't.
//! But we don't want to allocate nodes one by one (which would be quite slow), so we use this helper class to
//! allocate N nodes at once, while minimizing the amount of nodes allocated for nothing. An initial amount of
//! nodes is estimated using the max number for a complete tree, and the user-defined number of primitives per leaf.
//! In ideal cases this estimated number will be quite close to the final number of nodes. When that number is not
//! enough though, slabs of N=1024 extra nodes are allocated until the build is complete.
class PX_PHYSX_COMMON_API NodeAllocator : public PxUserAllocated
{
public:
NodeAllocator();
~NodeAllocator();
void release();
void init(PxU32 nbPrimitives, PxU32 limit);
AABBTreeBuildNode* getBiNode();
AABBTreeBuildNode* mPool;
struct Slab
{
PX_FORCE_INLINE Slab() {}
PX_FORCE_INLINE Slab(AABBTreeBuildNode* pool, PxU32 nbUsedNodes, PxU32 maxNbNodes) : mPool(pool), mNbUsedNodes(nbUsedNodes), mMaxNbNodes(maxNbNodes) {}
AABBTreeBuildNode* mPool;
PxU32 mNbUsedNodes;
PxU32 mMaxNbNodes;
};
PxArray<Slab> mSlabs;
PxU32 mCurrentSlabIndex;
PxU32 mTotalNbNodes;
};
#if PX_VC
#pragma warning(pop)
#endif
/*
* \brief Builds AABBtree from given parameters.
* \param params [in/out] AABBTree build params
* \param nodeAllocator [in/out] Node allocator
* \param stats [out] Statistics
* \return Indices buffer allocated during build, or NULL if failed
*/
PX_PHYSX_COMMON_API PxU32* buildAABBTree(const AABBTreeBuildParams& params, NodeAllocator& nodeAllocator, BuildStats& stats);
// PT: TODO: explain how users should call these functions and maybe revisit this
PX_PHYSX_COMMON_API void flattenTree(const NodeAllocator& nodeAllocator, BVHNode* dest, const PxU32* remap = NULL);
PX_PHYSX_COMMON_API void buildAABBTree(PxU32 nbBounds, const AABBTreeBounds& bounds, PxArray<BVHNode>& tree);
PxU32 reshuffle(PxU32 nb, PxU32* const PX_RESTRICT prims, const PxVec3* PX_RESTRICT centers, float splitValue, PxU32 axis);
class BitArray
{
public:
BitArray() : mBits(NULL), mSize(0) {}
BitArray(PxU32 nb_bits) { init(nb_bits); }
~BitArray() { PX_FREE(mBits); }
bool init(PxU32 nb_bits);
// Data management
PX_FORCE_INLINE void setBit(PxU32 bit_number)
{
mBits[bit_number>>5] |= 1<<(bit_number&31);
}
PX_FORCE_INLINE void clearBit(PxU32 bit_number)
{
mBits[bit_number>>5] &= ~(1<<(bit_number&31));
}
PX_FORCE_INLINE void toggleBit(PxU32 bit_number)
{
mBits[bit_number>>5] ^= 1<<(bit_number&31);
}
PX_FORCE_INLINE void clearAll() { PxMemZero(mBits, mSize*4); }
PX_FORCE_INLINE void setAll() { PxMemSet(mBits, 0xff, mSize*4); }
void resize(PxU32 maxBitNumber);
// Data access
PX_FORCE_INLINE PxIntBool isSet(PxU32 bit_number) const
{
return PxIntBool(mBits[bit_number>>5] & (1<<(bit_number&31)));
}
PX_FORCE_INLINE const PxU32* getBits() const { return mBits; }
PX_FORCE_INLINE PxU32 getSize() const { return mSize; }
protected:
PxU32* mBits; //!< Array of bits
PxU32 mSize; //!< Size of the array in dwords
};
//! Contains AABB-tree merge parameters
class AABBTreeMergeData
{
public:
AABBTreeMergeData(PxU32 nbNodes, const BVHNode* nodes, PxU32 nbIndices, const PxU32* indices, PxU32 indicesOffset) :
mNbNodes(nbNodes), mNodes(nodes), mNbIndices(nbIndices), mIndices(indices), mIndicesOffset(indicesOffset)
{
}
~AABBTreeMergeData() {}
PX_FORCE_INLINE const BVHNode& getRootNode() const { return *mNodes; }
public:
PxU32 mNbNodes; //!< Number of nodes of AABB tree merge
const BVHNode* mNodes; //!< Nodes of AABB tree merge
PxU32 mNbIndices; //!< Number of indices of AABB tree merge
const PxU32* mIndices; //!< Indices of AABB tree merge
PxU32 mIndicesOffset; //!< Indices offset from pruning pool
};
// Progressive building
class FIFOStack;
//~Progressive building
// PT: base class used to share some data and code between Gu::AABBtree and Gu::BVH. This is WIP and subject to change.
// Design dictated by refactoring necessities rather than a grand vision of something.
class BVHCoreData : public PxUserAllocated
{
public:
BVHCoreData() : mNbIndices(0), mNbNodes(0), mNodes(NULL), mIndices(NULL) {}
PX_FORCE_INLINE PxU32 getNbIndices() const { return mNbIndices; }
PX_FORCE_INLINE const PxU32* getIndices() const { return mIndices; }
PX_FORCE_INLINE PxU32* getIndices() { return mIndices; }
PX_FORCE_INLINE void setIndices(PxU32* indices) { mIndices = indices; }
PX_FORCE_INLINE PxU32 getNbNodes() const { return mNbNodes; }
PX_FORCE_INLINE const BVHNode* getNodes() const { return mNodes; }
PX_FORCE_INLINE BVHNode* getNodes() { return mNodes; }
PX_PHYSX_COMMON_API void fullRefit(const PxBounds3* boxes);
// PT: I'm leaving the above accessors here to avoid refactoring the SQ code using them, but members became public.
PxU32 mNbIndices; //!< Nb indices
PxU32 mNbNodes; //!< Number of nodes in the tree.
BVHNode* mNodes; //!< Linear pool of nodes.
PxU32* mIndices; //!< Indices in the app list. Indices are reorganized during build (permutation).
};
class BVHPartialRefitData : public BVHCoreData
{
public:
PX_PHYSX_COMMON_API BVHPartialRefitData();
PX_PHYSX_COMMON_API ~BVHPartialRefitData();
PX_PHYSX_COMMON_API void releasePartialRefitData(bool clearRefitMap);
// adds node[index] to a list of nodes to refit when refitMarkedNodes is called
// Note that this includes updating the hierarchy up the chain
PX_PHYSX_COMMON_API void markNodeForRefit(TreeNodeIndex nodeIndex);
PX_PHYSX_COMMON_API void refitMarkedNodes(const PxBounds3* boxes);
PX_FORCE_INLINE PxU32* getUpdateMap() { return mUpdateMap; }
protected:
PxU32* mParentIndices; //!< PT: hot/cold split, keep parent data in separate array
PxU32* mUpdateMap; //!< PT: Local index to tree node index
BitArray mRefitBitmask; //!< bit is set for each node index in markForRefit
PxU32 mRefitHighestSetWord;
PxU32* getParentIndices();
public:
void createUpdateMap(PxU32 nbObjects);
};
//! AABB-tree, N primitives/leaf
// PT: TODO: each PX_PHYSX_COMMON_API is a cross-DLL call, should we split that class in Gu/Sq parts to minimize this?
class AABBTree : public BVHPartialRefitData
{
public:
PX_PHYSX_COMMON_API AABBTree();
PX_PHYSX_COMMON_API ~AABBTree();
// Build
PX_PHYSX_COMMON_API bool build(const AABBTreeBuildParams& params, NodeAllocator& nodeAllocator);
// Progressive building
PX_PHYSX_COMMON_API PxU32 progressiveBuild(const AABBTreeBuildParams& params, NodeAllocator& nodeAllocator, BuildStats& stats, PxU32 progress, PxU32 limit);
//~Progressive building
PX_PHYSX_COMMON_API void release(bool clearRefitMap=true);
// Merge tree with another one
PX_PHYSX_COMMON_API void mergeTree(const AABBTreeMergeData& tree);
// Initialize tree from given merge data
PX_PHYSX_COMMON_API void initTree(const AABBTreeMergeData& tree);
// Data access
PX_FORCE_INLINE PxU32 getTotalPrims() const { return mTotalPrims; }
PX_PHYSX_COMMON_API void shiftOrigin(const PxVec3& shift);
// Shift indices of the tree by offset. Used for merged trees, when initial indices needs to be shifted to match indices in current pruning pool
PX_PHYSX_COMMON_API void shiftIndices(PxU32 offset);
#if PX_DEBUG
void validate() {}
#endif
private:
PxU32 mTotalPrims; //!< Copy of final BuildStats::mTotalPrims
// Progressive building
FIFOStack* mStack;
//~Progressive building
bool buildInit(const AABBTreeBuildParams& params, NodeAllocator& nodeAllocator, BuildStats& stats);
void buildEnd(const AABBTreeBuildParams& params, NodeAllocator& nodeAllocator, const BuildStats& stats);
// tree merge
void mergeRuntimeNode(BVHNode& targetNode, const AABBTreeMergeData& tree, PxU32 targetNodeIndex);
void mergeRuntimeLeaf(BVHNode& targetNode, const AABBTreeMergeData& tree, PxU32 targetNodeIndex);
void addRuntimeChilds(PxU32& nodeIndex, const AABBTreeMergeData& tree);
void traverseRuntimeNode(BVHNode& targetNode, const AABBTreeMergeData& tree, PxU32 nodeIndex);
};
} // namespace Gu
}
#endif // GU_AABBTREE_H
| 14,093 | C | 41.197605 | 161 | 0.698858 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuMaverickNode.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuMaverickNode.h"
using namespace physx;
using namespace Gu;
const PxU32 MaverickNode::mIndices[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
bool MaverickNode::addObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp)
{
if(mNbFree<FREE_PRUNER_SIZE)
{
const PxU32 index = mNbFree++;
mFreeObjects[index] = object;
mFreeHandles[index] = handle;
mFreeBounds[index] = worldAABB;
mFreeTransforms[index] = transform;
mFreeStamps[index] = timeStamp;
return true;
}
return false;
}
bool MaverickNode::updateObject(const PrunerPayload& object, const PxBounds3& worldAABB, const PxTransform& transform)
{
for(PxU32 i=0;i<mNbFree;i++)
{
if(mFreeObjects[i]==object)
{
mFreeBounds[i] = worldAABB;
mFreeTransforms[i] = transform;
return true;
}
}
return false;
}
bool MaverickNode::updateObject(PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform)
{
for(PxU32 i=0;i<mNbFree;i++)
{
if(mFreeHandles[i]==handle)
{
mFreeBounds[i] = worldAABB;
mFreeTransforms[i] = transform;
return true;
}
}
return false;
}
void MaverickNode::remove(PxU32 index)
{
mNbFree--;
if(index!=mNbFree)
{
mFreeBounds[index] = mFreeBounds[mNbFree];
mFreeTransforms[index] = mFreeTransforms[mNbFree];
mFreeObjects[index] = mFreeObjects[mNbFree];
mFreeHandles[index] = mFreeHandles[mNbFree];
mFreeStamps[index] = mFreeStamps[mNbFree];
}
}
bool MaverickNode::removeObject(const PrunerPayload& object, PxU32& timeStamp)
{
for(PxU32 i=0;i<mNbFree;i++)
{
if(mFreeObjects[i]==object)
{
// We found the object we want to remove. Close the gap as usual.
timeStamp = mFreeStamps[i];
remove(i);
return true;
}
}
return false;
}
bool MaverickNode::removeObject(PrunerHandle handle, PxU32& timeStamp)
{
for(PxU32 i=0;i<mNbFree;i++)
{
if(mFreeHandles[i]==handle)
{
// We found the object we want to remove. Close the gap as usual.
timeStamp = mFreeStamps[i];
remove(i);
return true;
}
}
return false;
}
PxU32 MaverickNode::removeMarkedObjects(PxU32 timeStamp)
{
PxU32 nbRemoved=0;
PxU32 i=0;
while(i<mNbFree)
{
if(mFreeStamps[i]==timeStamp)
{
nbRemoved++;
remove(i);
}
else i++;
}
return nbRemoved;
}
void MaverickNode::shiftOrigin(const PxVec3& shift)
{
for(PxU32 i=0;i<mNbFree;i++)
{
mFreeBounds[i].minimum -= shift;
mFreeBounds[i].maximum -= shift;
mFreeTransforms[i].p -= shift;
}
}
| 4,206 | C++ | 27.619047 | 153 | 0.725392 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuBucketPruner.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_BUCKET_PRUNER_H
#define GU_BUCKET_PRUNER_H
#include "common/PxPhysXCommonConfig.h"
#include "GuPruner.h"
#include "GuSqInternal.h"
#include "GuPruningPool.h"
#include "foundation/PxHash.h"
#define FREE_PRUNER_SIZE 16
//#define USE_REGULAR_HASH_MAP
#ifdef USE_REGULAR_HASH_MAP
#include "foundation/PxHashMap.h"
#endif
namespace physx
{
class PxRenderOutput;
namespace Gu
{
typedef PxU32 BucketWord;
#if PX_VC
#pragma warning(push)
#pragma warning( disable : 4324 ) // Padding was added at the end of a structure because of a __declspec(align) value.
#endif
PX_ALIGN_PREFIX(16) struct BucketBox
{
PxVec3 mCenter;
PxU32 mData0; // Integer-encoded min value along sorting axis
PxVec3 mExtents;
PxU32 mData1; // Integer-encoded max value along sorting axis
#ifdef _DEBUG
// PT: we need the original min value for debug checks. Using the center/extents version
// fails because recomputing the min from them introduces FPU accuracy errors in the values.
float mDebugMin;
#endif
PX_FORCE_INLINE PxVec3 getMin() const
{
return mCenter - mExtents;
}
PX_FORCE_INLINE PxVec3 getMax() const
{
return mCenter + mExtents;
}
PX_FORCE_INLINE void setEmpty()
{
mCenter = PxVec3(0.0f);
mExtents = PxVec3(-PX_MAX_BOUNDS_EXTENTS);
#ifdef _DEBUG
mDebugMin = PX_MAX_BOUNDS_EXTENTS;
#endif
}
}PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16) struct BucketPrunerNode
{
BucketPrunerNode();
void classifyBoxes( float limitX, float limitZ,
PxU32 nb,
BucketBox* PX_RESTRICT boxes,
const PrunerPayload* PX_RESTRICT objects,
const PxTransform* PX_RESTRICT transforms,
BucketBox* PX_RESTRICT sortedBoxes,
PrunerPayload* PX_RESTRICT sortedObjects,
PxTransform* PX_RESTRICT sortedTransforms,
bool isCrossBucket, PxU32 sortAxis);
PX_FORCE_INLINE void initCounters()
{
for(PxU32 i=0;i<5;i++)
mCounters[i] = 0;
for(PxU32 i=0;i<5;i++)
mOffsets[i] = 0;
}
BucketWord mCounters[5]; // Number of objects in each of the 5 children
BucketWord mOffsets[5]; // Start index of objects for each of the 5 children
BucketBox mBucketBox[5]; // AABBs around objects for each of the 5 children
PxU16 mOrder[8]; // PNS: 5 children => 3 bits/index => 3*5=15 bits total, for each of the 8 canonical directions
}PX_ALIGN_SUFFIX(16);
PX_FORCE_INLINE PxU32 PxComputeHash(const PrunerPayload& payload)
{
#if PX_P64_FAMILY
// const PxU32 h0 = PxHash((const void*)payload.data[0]);
// const PxU32 h1 = PxHash((const void*)payload.data[1]);
const PxU32 h0 = PxU32(PX_MAX_U32 & payload.data[0]);
const PxU32 h1 = PxU32(PX_MAX_U32 & payload.data[1]);
return physx::PxComputeHash(PxU64(h0)|(PxU64(h1)<<32));
#else
return physx::PxComputeHash(PxU64(payload.data[0])|(PxU64(payload.data[1])<<32));
#endif
}
#ifdef USE_REGULAR_HASH_MAP
struct BucketPrunerPair : public PxUserAllocated
{
PX_FORCE_INLINE BucketPrunerPair() {}
PX_FORCE_INLINE BucketPrunerPair(PxU32 index, PxU32 stamp) : mCoreIndex(index), mTimeStamp(stamp) {}
PxU32 mCoreIndex; // index in mCoreObjects
PxU32 mTimeStamp;
};
typedef PxHashMap<PrunerPayload, BucketPrunerPair> BucketPrunerMap;
#else
struct BucketPrunerPair : public PxUserAllocated
{
PrunerPayload mData;
PxU32 mCoreIndex; // index in mCoreObjects
PxU32 mTimeStamp;
};
// Custom hash-map - currently faster than the regular hash-map (PxHashMap), in particular for 'find-and-erase' operations.
class BucketPrunerMap : public PxUserAllocated
{
public:
BucketPrunerMap();
~BucketPrunerMap();
void purge();
void shrinkMemory();
BucketPrunerPair* addPair (const PrunerPayload& payload, PxU32 coreIndex, PxU32 timeStamp);
bool removePair (const PrunerPayload& payload, PxU32& coreIndex, PxU32& timeStamp);
const BucketPrunerPair* findPair (const PrunerPayload& payload) const;
PX_FORCE_INLINE PxU32 getPairIndex(const BucketPrunerPair* pair) const
{
return (PxU32((size_t(pair) - size_t(mActivePairs)))/sizeof(BucketPrunerPair));
}
PxU32 mHashSize;
PxU32 mMask;
PxU32 mNbActivePairs;
PxU32* mHashTable;
PxU32* mNext;
BucketPrunerPair* mActivePairs;
PxU32 mReservedMemory;
PX_FORCE_INLINE BucketPrunerPair* findPair(const PrunerPayload& payload, PxU32 hashValue) const;
void removePairInternal(const PrunerPayload& payload, PxU32 hashValue, PxU32 pairIndex);
void reallocPairs();
void reserveMemory(PxU32 memSize);
};
#endif
class BucketPrunerCore : public PxUserAllocated
{
public:
PX_PHYSX_COMMON_API BucketPrunerCore(bool externalMemory=true);
PX_PHYSX_COMMON_API ~BucketPrunerCore();
void release();
void setExternalMemory(PxU32 nbObjects, PxBounds3* boxes, PrunerPayload* objects, PxTransform* transforms);
PX_PHYSX_COMMON_API bool addObject(const PrunerPayload& object, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp=0);
bool removeObject(const PrunerPayload& object, PxU32& timeStamp);
bool updateObject(const PxBounds3& worldAABB, const PrunerPayload& object, const PxTransform& transform);
// PT: look for objects marked with input timestamp everywhere in the structure, and remove them. This is the same
// as calling 'removeObject' individually for all these objects, but much more efficient. Returns number of removed objects.
PxU32 removeMarkedObjects(PxU32 timeStamp);
PX_PHYSX_COMMON_API bool raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback&) const;
PX_PHYSX_COMMON_API bool overlap(const ShapeData& queryVolume, PrunerOverlapCallback&) const;
PX_PHYSX_COMMON_API bool sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback&) const;
void getGlobalBounds(PxBounds3& bounds) const;
void shiftOrigin(const PxVec3& shift);
void visualize(PxRenderOutput& out, PxU32 color) const;
PX_FORCE_INLINE void build() { classifyBoxes(); }
#ifdef FREE_PRUNER_SIZE
PX_FORCE_INLINE PxU32 getNbObjects() const { return mNbFree + mCoreNbObjects; }
#else
PX_FORCE_INLINE PxU32 getNbObjects() const { return mCoreNbObjects; }
#endif
// private:
PxU32 mCoreNbObjects; // Current number of objects in core arrays
PxU32 mCoreCapacity; // Capacity of core arrays
PxBounds3* mCoreBoxes; // Core array
PrunerPayload* mCoreObjects; // Core array
PxTransform* mCoreTransforms;
PxU32* mCoreRemap; // Remaps core index to sorted index, i.e. sortedIndex = mCoreRemap[coreIndex]
BucketBox* mSortedWorldBoxes; // Sorted array
PrunerPayload* mSortedObjects; // Sorted array
PxTransform* mSortedTransforms;
#ifdef FREE_PRUNER_SIZE
PxU32 mNbFree; // Current number of objects in the "free array" (mFreeObjects/mFreeBounds)
PrunerPayload mFreeObjects[FREE_PRUNER_SIZE]; // mNbFree objects are stored here
PxBounds3 mFreeBounds[FREE_PRUNER_SIZE]; // mNbFree object bounds are stored here
PxTransform mFreeTransforms[FREE_PRUNER_SIZE]; // mNbFree transforms are stored here
PxU32 mFreeStamps[FREE_PRUNER_SIZE];
#endif
BucketPrunerMap mMap; // Maps (PrunerPayload) object to corresponding index in core array.
// Objects in the free array do not appear in this map.
PxU32 mSortedNb;
PxU32 mSortedCapacity;
PxU32 mSortAxis;
BucketBox mGlobalBox; // Global bounds around all objects in the structure (except the ones in the "free" array)
BucketPrunerNode mLevel1;
BucketPrunerNode mLevel2[5];
BucketPrunerNode mLevel3[5][5];
bool mDirty;
bool mOwnMemory;
private:
PX_PHYSX_COMMON_API void classifyBoxes();
void allocateSortedMemory(PxU32 nb);
void resizeCore();
PX_FORCE_INLINE void addObjectInternal(const PrunerPayload& object, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp);
};
#if PX_VC
#pragma warning(pop)
#endif
class BucketPruner : public Pruner
{
public:
PX_PHYSX_COMMON_API BucketPruner(PxU64 contextID);
virtual ~BucketPruner();
// BasePruner
DECLARE_BASE_PRUNER_API
//~BasePruner
// Pruner
DECLARE_PRUNER_API_COMMON
//~Pruner
private:
BucketPrunerCore mCore;
PruningPool mPool;
};
}
}
#endif
| 10,356 | C | 35.46831 | 149 | 0.708285 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuCookingSDF.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_COOKING_SDF_BUILDER_H
#define GU_COOKING_SDF_BUILDER_H
#include "foundation/PxArray.h"
#include "common/PxPhysXCommonConfig.h"
namespace physx
{
class PxTriangleMeshDesc;
PX_PHYSX_COMMON_API bool buildSDF(PxTriangleMeshDesc& desc, PxArray<PxReal>& sdf, PxArray<PxU8>& sdfDataSubgrids, PxArray<PxU32>& sdfSubgridsStartSlots);
}
#endif
| 2,049 | C | 46.674418 | 154 | 0.770132 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuSweepSharedTests.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_SWEEP_SHARED_TESTS_H
#define GU_SWEEP_SHARED_TESTS_H
#include "GuBoxConversion.h"
namespace physx
{
PX_FORCE_INLINE void computeWorldToBoxMatrix(PxMat34& worldToBox, const physx::Gu::Box& box)
{
PxMat34 boxToWorld;
physx::buildMatrixFromBox(boxToWorld, box);
worldToBox = boxToWorld.getInverseRT();
}
PX_FORCE_INLINE PxU32 getTriangleIndex(PxU32 i, PxU32 cachedIndex)
{
PxU32 triangleIndex;
if(i==0) triangleIndex = cachedIndex;
else if(i==cachedIndex) triangleIndex = 0;
else triangleIndex = i;
return triangleIndex;
}
}
#endif
| 2,258 | C | 40.072727 | 92 | 0.763508 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuCallbackAdapter.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_CALLBACK_ADAPTER_H
#define GU_CALLBACK_ADAPTER_H
#include "GuPruner.h"
#include "GuPruningPool.h"
namespace physx
{
namespace Gu
{
struct RaycastCallbackAdapter
{
PX_FORCE_INLINE RaycastCallbackAdapter(PrunerRaycastCallback& pcb, const PruningPool& pool) : mCallback(pcb), mPool(pool) {}
PX_FORCE_INLINE bool invoke(PxReal& distance, PxU32 primIndex)
{
return mCallback.invoke(distance, primIndex, mPool.getObjects(), mPool.getTransforms());
}
PrunerRaycastCallback& mCallback;
const PruningPool& mPool;
PX_NOCOPY(RaycastCallbackAdapter)
};
struct OverlapCallbackAdapter
{
PX_FORCE_INLINE OverlapCallbackAdapter(PrunerOverlapCallback& pcb, const PruningPool& pool) : mCallback(pcb), mPool(pool) {}
PX_FORCE_INLINE bool invoke(PxU32 primIndex)
{
return mCallback.invoke(primIndex, mPool.getObjects(), mPool.getTransforms());
}
PrunerOverlapCallback& mCallback;
const PruningPool& mPool;
PX_NOCOPY(OverlapCallbackAdapter)
};
}
}
#endif
| 2,695 | C | 36.444444 | 126 | 0.762894 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuFactory.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuFactory.h"
#include "GuAABBPruner.h"
#include "GuBucketPruner.h"
#include "GuIncrementalAABBPruner.h"
using namespace physx;
using namespace Gu;
Pruner* physx::Gu::createBucketPruner(PxU64 contextID)
{
return PX_NEW(BucketPruner)(contextID);
}
Pruner* physx::Gu::createAABBPruner(PxU64 contextID, bool dynamic, CompanionPrunerType cpType, BVHBuildStrategy buildStrategy, PxU32 nbObjectsPerNode)
{
return PX_NEW(AABBPruner)(dynamic, contextID, cpType, buildStrategy, nbObjectsPerNode);
}
Pruner* physx::Gu::createIncrementalPruner(PxU64 contextID)
{
return PX_NEW(IncrementalAABBPruner)(32, contextID);
}
| 2,322 | C++ | 43.673076 | 150 | 0.773902 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuRaycastTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geometry/PxSphereGeometry.h"
#include "geometry/PxConvexMeshGeometry.h"
#include "geometry/PxTetrahedronMeshGeometry.h"
#include "geometry/PxCustomGeometry.h"
#include "GuMidphaseInterface.h"
#include "GuInternal.h"
#include "GuIntersectionRayCapsule.h"
#include "GuIntersectionRaySphere.h"
#include "GuIntersectionRayPlane.h"
#include "GuHeightFieldUtil.h"
#include "GuDistancePointSegment.h"
#include "GuConvexMesh.h"
#include "CmScaling.h"
using namespace physx;
using namespace Gu;
////////////////////////////////////////////////// raycasts //////////////////////////////////////////////////////////////////
PxU32 raycast_box(GU_RAY_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eBOX);
PX_ASSERT(maxHits && hits);
PX_UNUSED(threadContext);
PX_UNUSED(maxHits);
PX_UNUSED(stride);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom);
const PxTransform& absPose = pose;
PxVec3 localOrigin = rayOrigin - absPose.p;
localOrigin = absPose.q.rotateInv(localOrigin);
const PxVec3 localDir = absPose.q.rotateInv(rayDir);
PxVec3 localImpact;
PxReal t;
PxU32 rval = rayAABBIntersect2(-boxGeom.halfExtents, boxGeom.halfExtents, localOrigin, localDir, localImpact, t);
if(!rval)
return 0;
if(t>maxDist)
return 0;
hits->distance = t; //worldRay.orig.distance(hit.worldImpact); //should be the same, assuming ray dir was normalized!!
hits->faceIndex = 0xffffffff;
hits->u = 0.0f;
hits->v = 0.0f;
PxHitFlags outFlags = PxHitFlags(0);
if((hitFlags & PxHitFlag::ePOSITION))
{
outFlags |= PxHitFlag::ePOSITION;
if(t!=0.0f)
hits->position = absPose.transform(localImpact);
else
hits->position = rayOrigin;
}
// Compute additional information if needed
if(hitFlags & PxHitFlag::eNORMAL)
{
outFlags |= PxHitFlag::eNORMAL;
//Because rayAABBIntersect2 set t = 0 if start point inside shape
if(t == 0)
{
hits->normal = -rayDir;
}
else
{
//local space normal is:
rval--;
PxVec3 n(0.0f);
n[rval] = PxReal((localImpact[rval] > 0.0f) ? 1.0f : -1.0f);
hits->normal = absPose.q.rotate(n);
}
}
else
{
hits->normal = PxVec3(0.0f);
}
hits->flags = outFlags;
return 1;
}
PxU32 raycast_sphere(GU_RAY_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eSPHERE);
PX_ASSERT(maxHits && hits);
PX_UNUSED(threadContext);
PX_UNUSED(maxHits);
PX_UNUSED(stride);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom);
if(!intersectRaySphere(rayOrigin, rayDir, maxDist, pose.p, sphereGeom.radius, hits->distance, &hits->position))
return 0;
/* // PT: should be useless now
hit.distance = worldRay.orig.distance(hit.worldImpact);
if(hit.distance>maxDist)
return false;
*/
// PT: we can't avoid computing the position here since it's needed to compute the normal anyway
hits->faceIndex = 0xffffffff;
hits->u = 0.0f;
hits->v = 0.0f;
// Compute additional information if needed
PxHitFlags outFlags = PxHitFlag::ePOSITION;
if(hitFlags & PxHitFlag::eNORMAL)
{
// User requested impact normal
//Because intersectRaySphere set distance = 0 if start point inside shape
if(hits->distance == 0.0f)
{
hits->normal = -rayDir;
}
else
{
hits->normal = hits->position - pose.p;
hits->normal.normalize();
}
outFlags |= PxHitFlag::eNORMAL;
}
else
{
hits->normal = PxVec3(0.0f);
}
hits->flags = outFlags;
return 1;
}
PxU32 raycast_capsule(GU_RAY_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eCAPSULE);
PX_ASSERT(maxHits && hits);
PX_UNUSED(threadContext);
PX_UNUSED(maxHits);
PX_UNUSED(stride);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom);
// TODO: PT: could we simplify this ?
Capsule capsule;
getCapsuleSegment(pose, capsuleGeom, capsule);
capsule.radius = capsuleGeom.radius;
PxReal t = 0.0f;
if(!intersectRayCapsule(rayOrigin, rayDir, capsule, t))
return 0;
if(t<0.0f || t>maxDist)
return 0;
// PT: we can't avoid computing the position here since it's needed to compute the normal anyway
hits->position = rayOrigin + rayDir*t; // PT: will be rayOrigin for t=0.0f (i.e. what the spec wants)
hits->distance = t;
hits->faceIndex = 0xffffffff;
hits->u = 0.0f;
hits->v = 0.0f;
// Compute additional information if needed
PxHitFlags outFlags = PxHitFlag::ePOSITION;
if(hitFlags & PxHitFlag::eNORMAL)
{
outFlags |= PxHitFlag::eNORMAL;
if(t==0.0f)
{
hits->normal = -rayDir;
}
else
{
PxReal capsuleT;
distancePointSegmentSquared(capsule, hits->position, &capsuleT);
capsule.computePoint(hits->normal, capsuleT);
hits->normal = hits->position - hits->normal; //this should never be zero. It should have a magnitude of the capsule radius.
hits->normal.normalize();
}
}
else
{
hits->normal = PxVec3(0.0f);
}
hits->flags = outFlags;
return 1;
}
PxU32 raycast_plane(GU_RAY_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::ePLANE);
PX_ASSERT(maxHits && hits);
PX_UNUSED(threadContext);
PX_UNUSED(hitFlags);
PX_UNUSED(maxHits);
PX_UNUSED(stride);
PX_UNUSED(geom);
// const PxPlaneGeometry& planeGeom = static_cast<const PxPlaneGeometry&>(geom);
// Perform backface culling so that we can pick objects beyond planes
const PxPlane plane = getPlane(pose);
if(rayDir.dot(plane.n)>=0.0f)
return false;
PxReal distanceAlongLine;
if(!intersectRayPlane(rayOrigin, rayDir, plane, distanceAlongLine, &hits->position))
return 0;
/*
PxReal test = worldRay.orig.distance(hit.worldImpact);
PxReal dd;
PxVec3 pp;
PxSegmentPlaneIntersect(worldRay.orig, worldRay.orig+worldRay.dir*1000.0f, plane, dd, pp);
*/
if(distanceAlongLine<0.0f)
return 0;
if(distanceAlongLine>maxDist)
return 0;
hits->distance = distanceAlongLine;
hits->faceIndex = 0xffffffff;
hits->u = 0.0f;
hits->v = 0.0f;
hits->flags = PxHitFlag::ePOSITION|PxHitFlag::eNORMAL;
hits->normal = plane.n;
return 1;
}
PxU32 raycast_convexMesh(GU_RAY_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eCONVEXMESH);
PX_ASSERT(maxHits && hits);
PX_ASSERT(PxAbs(rayDir.magnitudeSquared()-1)<1e-4f);
PX_UNUSED(threadContext);
PX_UNUSED(maxHits);
PX_UNUSED(stride);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom);
ConvexMesh* convexMesh = static_cast<ConvexMesh*>(convexGeom.convexMesh);
PxGeomRaycastHit& hit = *hits;
//scaling: transform the ray to vertex space
const PxMat34 world2vertexSkew = convexGeom.scale.getInverse() * pose.getInverse();
//ConvexMesh* cmesh = static_cast<ConvexMesh*>(convexGeom.convexMesh);
const PxU32 nPolys = convexMesh->getNbPolygonsFast();
const HullPolygonData* PX_RESTRICT polysEA = convexMesh->getPolygons();
const HullPolygonData* polys = polysEA;
const PxVec3 vrayOrig = world2vertexSkew.transform(rayOrigin);
const PxVec3 vrayDir = world2vertexSkew.rotate(rayDir);
/*
Purely convex planes based algorithm
Iterate all planes of convex, with following rules:
* determine of ray origin is inside them all or not.
* planes parallel to ray direction are immediate early out if we're on the outside side (plane normal is sep axis)
* else
- for all planes the ray direction "enters" from the front side, track the one furthest along the ray direction (A)
- for all planes the ray direction "exits" from the back side, track the one furthest along the negative ray direction (B)
if the ray origin is outside the convex and if along the ray, A comes before B, the directed line stabs the convex at A
*/
bool originInsideAllPlanes = true;
PxReal latestEntry = -FLT_MAX;
PxReal earliestExit = FLT_MAX;
// PxU32 bestPolygonIndex = 0;
hit.faceIndex = 0xffffffff;
for(PxU32 i=0;i<nPolys;i++)
{
const HullPolygonData& poly = polys[i];
const PxPlane& vertSpacePlane = poly.mPlane;
const PxReal distToPlane = vertSpacePlane.distance(vrayOrig);
const PxReal dn = vertSpacePlane.n.dot(vrayDir);
const PxReal distAlongRay = -distToPlane/dn; // PT: TODO: potential divide by zero here!
// PT: TODO: this is computed again in the last branch!
if(distToPlane > 0.0f)
originInsideAllPlanes = false; //origin not behind plane == ray starts outside the convex.
if(dn > 1E-7f) //the ray direction "exits" from the back side
{
earliestExit = physx::intrinsics::selectMin(earliestExit, distAlongRay);
}
else if(dn < -1E-7f) //the ray direction "enters" from the front side
{
if(distAlongRay > latestEntry)
{
latestEntry = distAlongRay;
hit.faceIndex = i;
}
}
else
{
//plane normal and ray dir are orthogonal
if(distToPlane > 0.0f)
return 0; //a plane is parallel with ray -- and we're outside the ray -- we definitely miss the entire convex!
}
}
if(originInsideAllPlanes) //ray starts inside convex
{
hit.distance = 0.0f;
hit.faceIndex = 0xffffffff;
hit.u = 0.0f;
hit.v = 0.0f;
hit.position = rayOrigin;
hit.normal = -rayDir;
hit.flags = PxHitFlag::eNORMAL|PxHitFlag::ePOSITION;
return 1;
}
// AP: changed to latestEntry < maxDist-1e-5f so that we have a conservatively negative result near end of ray
if(latestEntry < earliestExit && latestEntry > 0.0f && latestEntry < maxDist-1e-5f)
{
PxHitFlags outFlags = PxHitFlag::eFACE_INDEX;
if(hitFlags & PxHitFlag::ePOSITION)
{
outFlags |= PxHitFlag::ePOSITION;
const PxVec3 pointOnPlane = vrayOrig + latestEntry * vrayDir;
hit.position = pose.transform(Cm::toMat33(convexGeom.scale) * pointOnPlane);
}
hit.distance = latestEntry;
hit.u = 0.0f;
hit.v = 0.0f;
hit.normal = PxVec3(0.0f);
// Compute additional information if needed
if(hitFlags & PxHitFlag::eNORMAL)
{
outFlags |= PxHitFlag::eNORMAL;
//when we have nonuniform scaling we actually have to transform by the transpose of the inverse of vertex2worldSkew.M == transpose of world2vertexSkew:
hit.normal = world2vertexSkew.rotateTranspose(polys[hit.faceIndex].mPlane.n);
hit.normal.normalize();
}
hit.flags = outFlags;
return 1;
}
return 0;
}
PxU32 raycast_particlesystem(GU_RAY_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::ePARTICLESYSTEM);
PX_ASSERT(PxAbs(rayDir.magnitudeSquared() - 1)<1e-4f);
PX_UNUSED(threadContext);
PX_UNUSED(stride);
PX_UNUSED(rayDir);
PX_UNUSED(pose);
PX_UNUSED(rayOrigin);
PX_UNUSED(maxHits);
PX_UNUSED(maxDist);
PX_UNUSED(hits);
PX_UNUSED(hitFlags);
PX_UNUSED(geom);
return 0;
}
PxU32 raycast_softbody(GU_RAY_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eTETRAHEDRONMESH);
PX_ASSERT(PxAbs(rayDir.magnitudeSquared() - 1)<1e-4f);
PX_UNUSED(threadContext);
PX_UNUSED(stride);
PX_UNUSED(rayDir);
PX_UNUSED(pose);
PX_UNUSED(rayOrigin);
PX_UNUSED(maxHits);
PX_UNUSED(maxDist);
PX_UNUSED(hits);
PX_UNUSED(hitFlags);
const PxTetrahedronMeshGeometry& meshGeom = static_cast<const PxTetrahedronMeshGeometry&>(geom);
PX_UNUSED(meshGeom);
//ML: need to implement raycastTetrahedronMesh
return 0;
}
PxU32 raycast_triangleMesh(GU_RAY_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_ASSERT(geom.getType() == PxGeometryType::eTRIANGLEMESH);
PX_ASSERT(PxAbs(rayDir.magnitudeSquared()-1)<1e-4f);
const PxTriangleMeshGeometry& meshGeom = static_cast<const PxTriangleMeshGeometry&>(geom);
TriangleMesh* meshData = static_cast<TriangleMesh*>(meshGeom.triangleMesh);
return Midphase::raycastTriangleMesh(meshData, meshGeom, pose, rayOrigin, rayDir, maxDist, hitFlags, maxHits, hits, stride);
}
PxU32 raycast_hairsystem(GU_RAY_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eHAIRSYSTEM);
PX_ASSERT(PxAbs(rayDir.magnitudeSquared() - 1)<1e-4f);
PX_UNUSED(threadContext);
PX_UNUSED(stride);
PX_UNUSED(rayDir);
PX_UNUSED(pose);
PX_UNUSED(rayOrigin);
PX_UNUSED(maxHits);
PX_UNUSED(maxDist);
PX_UNUSED(hits);
PX_UNUSED(hitFlags);
PX_UNUSED(geom);
return 0;
}
namespace
{
struct HFTraceSegmentCallback
{
PX_NOCOPY(HFTraceSegmentCallback)
public:
PxU8* mHits;
const PxU32 mMaxHits;
const PxU32 mStride;
PxU32 mNbHits;
const HeightFieldUtil& mUtil;
const PxTransform& mPose;
const PxVec3& mRayDir;
const PxVec3& mLocalRayDir;
const PxVec3& mLocalRayOrig;
const PxHitFlags mHitFlags;
const bool mIsDoubleSided;
HFTraceSegmentCallback( PxGeomRaycastHit* hits, PxU32 maxHits, PxU32 stride, const PxHitFlags hitFlags, const HeightFieldUtil& hfUtil, const PxTransform& pose,
const PxVec3& rayDir, const PxVec3& localRayDir, const PxVec3& localRayOrig,
bool isDoubleSided) :
mHits (reinterpret_cast<PxU8*>(hits)),
mMaxHits (maxHits),
mStride (stride),
mNbHits (0),
mUtil (hfUtil),
mPose (pose),
mRayDir (rayDir),
mLocalRayDir (localRayDir),
mLocalRayOrig (localRayOrig),
mHitFlags (hitFlags),
mIsDoubleSided (isDoubleSided)
{
PX_ASSERT(maxHits > 0);
}
PX_FORCE_INLINE bool onEvent(PxU32, const PxU32*)
{
return true;
}
PX_FORCE_INLINE bool underFaceHit(const HeightFieldUtil&, const PxVec3&, const PxVec3&, PxF32, PxF32, PxF32, PxU32)
{
return true; // true means continue traversal
}
PxAgain faceHit(const HeightFieldUtil&, const PxVec3& aHitPoint, PxU32 aTriangleIndex, PxReal u, PxReal v)
{
// traversal is strictly sorted so there's no need to sort hits
if(mNbHits >= mMaxHits)
return false; // false = stop traversal
PxGeomRaycastHit& hit = *reinterpret_cast<PxGeomRaycastHit*>(mHits);
mNbHits++;
mHits += mStride;
hit.position = aHitPoint;
hit.faceIndex = aTriangleIndex;
hit.u = u;
hit.v = v;
hit.flags = PxHitFlag::eUV | PxHitFlag::eFACE_INDEX; // UVs and face index are always set
if(mHitFlags & PxHitFlag::eNORMAL)
{
// We need the normal for the dot product.
PxVec3 normal = mPose.q.rotate(mUtil.getNormalAtShapePoint(hit.position.x, hit.position.z));
normal.normalize();
if(mIsDoubleSided && normal.dot(mRayDir) > 0.0f) // comply with normal spec for double sided (should always face opposite rayDir)
hit.normal = -normal;
else
hit.normal = normal;
hit.flags |= PxHitFlag::eNORMAL;
}
hit.distance = physx::intrinsics::selectMax(0.f, (hit.position - mLocalRayOrig).dot(mLocalRayDir));
if(mHitFlags & PxHitFlag::ePOSITION)
{
hit.position = mPose.transform(hit.position);
hit.flags |= PxHitFlag::ePOSITION;
}
return (mNbHits < mMaxHits); // true = continue traversal, false = stop traversal
}
};
}
PxU32 raycast_heightField(GU_RAY_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eHEIGHTFIELD);
PX_ASSERT(maxHits && hits);
PX_UNUSED(threadContext);
const PxHeightFieldGeometry& hfGeom = static_cast<const PxHeightFieldGeometry&>(geom);
const PxTransform invAbsPose = pose.getInverse();
const PxVec3 localRayOrig = invAbsPose.transform(rayOrigin);
const PxVec3 localRayDir = invAbsPose.rotate(rayDir);
const bool isDoubleSided = hfGeom.heightFieldFlags.isSet(PxMeshGeometryFlag::eDOUBLE_SIDED);
const bool bothSides = isDoubleSided || (hitFlags & PxHitFlag::eMESH_BOTH_SIDES);
const HeightFieldTraceUtil hfUtil(hfGeom);
PxVec3 normRayDir = localRayDir;
normRayDir.normalizeSafe(); // nothing will happen if length is < PX_NORMALIZATION_EPSILON
// pretest if we intersect HF bounds. If no early exit, if yes move the origin and shorten the maxDist
// to deal with precision issues with large maxDist
PxBounds3 hfLocalBounds;
hfUtil.computeLocalBounds(hfLocalBounds);
// PT: inflate the bounds like we do in the scene-tree (see PX-1179)
const PxVec3 center = hfLocalBounds.getCenter();
const PxVec3 extents = hfLocalBounds.getExtents() * 1.01f; //SQ_PRUNER_INFLATION;
hfLocalBounds.minimum = center - extents;
hfLocalBounds.maximum = center + extents;
PxVec3 localImpact;
PxReal t; // closest intersection, t==0 hit inside
PxU32 rval = rayAABBIntersect2(hfLocalBounds.minimum, hfLocalBounds.maximum, localRayOrig, localRayDir, localImpact, t);
// early exit we miss the AABB
if (!rval)
return 0;
if (t > maxDist)
return 0;
// PT: if eMESH_ANY is used then eMESH_MULTIPLE won't be, and we'll stop the query after 1 hit is found. There is no difference
// between 'any hit' and 'closest hit' for HFs since hits are reported in order.
HFTraceSegmentCallback callback(hits, hitFlags.isSet(PxHitFlag::eMESH_MULTIPLE) ? maxHits : 1, stride, hitFlags, hfUtil, pose,
rayDir, localRayDir, localRayOrig, isDoubleSided); // make sure we return only 1 hit without eMESH_MULTIPLE
PxReal offset = 0.0f;
PxReal maxDistOffset = maxDist;
PxVec3 localRayOrigOffset = localRayOrig;
// if we don't start inside the AABB box, offset the start pos, because of precision issues with large maxDist
if(t > 0.0f)
{
offset = t - GU_RAY_SURFACE_OFFSET;
// move the rayOrig to offset start pos
localRayOrigOffset = localRayOrig + normRayDir*offset;
}
// shorten the maxDist of the offset that was cut off and clip it
// we pick either the original maxDist, if maxDist is huge we clip it
maxDistOffset = PxMin(maxDist - offset, GU_RAY_SURFACE_OFFSET + 2.0f * PxMax(hfLocalBounds.maximum.x - hfLocalBounds.minimum.x, PxMax(hfLocalBounds.maximum.y - hfLocalBounds.minimum.y, hfLocalBounds.maximum.z - hfLocalBounds.minimum.z)));
hfUtil.traceSegment<HFTraceSegmentCallback, false, false>(localRayOrigOffset, normRayDir, maxDistOffset,
&callback, hfLocalBounds, !bothSides);
return callback.mNbHits;
}
static PxU32 raycast_custom(GU_RAY_FUNC_PARAMS)
{
const PxCustomGeometry& customGeom = static_cast<const PxCustomGeometry&>(geom);
if(customGeom.isValid())
return customGeom.callbacks->raycast(rayOrigin, rayDir, geom, pose, maxDist, hitFlags, maxHits, hits, stride, threadContext);
return 0;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// PT: table is not static because it's accessed as 'extern' within Gu (bypassing the function call).
RaycastFunc gRaycastMap[] =
{
raycast_sphere,
raycast_plane,
raycast_capsule,
raycast_box,
raycast_convexMesh,
raycast_particlesystem,
raycast_softbody,
raycast_triangleMesh,
raycast_heightField,
raycast_hairsystem,
raycast_custom
};
PX_COMPILE_TIME_ASSERT(sizeof(gRaycastMap) / sizeof(gRaycastMap[0]) == PxGeometryType::eGEOMETRY_COUNT);
// PT: the function is used by external modules (Np, CCT, Sq)
const Gu::GeomRaycastTable& Gu::getRaycastFuncTable()
{
return gRaycastMap;
}
| 20,109 | C++ | 30.619497 | 239 | 0.718385 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuWindingNumber.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuWindingNumberT.h"
#include "GuWindingNumber.h"
namespace physx
{
namespace Gu
{
PxF32 computeWindingNumber(const Gu::BVHNode* tree, const PxVec3& q, PxF32 beta, const PxHashMap<PxU32, ClusterApproximation>& clusters,
const PxU32* triangles, const PxVec3* points)
{
return Gu::computeWindingNumber<PxF32, PxVec3>(tree, q, beta, clusters, triangles, points);
}
PxF32 computeWindingNumber(const Gu::BVHNode* tree, const PxVec3& q, const PxHashMap<PxU32, ClusterApproximation>& clusters,
const PxU32* triangles, const PxVec3* points)
{
return Gu::computeWindingNumber<PxF32, PxVec3>(tree, q, 2.0f, clusters, triangles, points);
}
void precomputeClusterInformation(const Gu::BVHNode* tree, const PxU32* triangles, const PxU32 numTriangles,
const PxVec3* points, PxHashMap<PxU32, ClusterApproximation>& result, PxI32 rootNodeIndex)
{
Gu::precomputeClusterInformation<PxF32, PxVec3>(tree, triangles, numTriangles, points, result, rootNodeIndex);
}
PxF32 computeWindingNumber(const PxVec3& q, const PxU32* triangles, const PxU32 numTriangles, const PxVec3* points)
{
PxReal windingNumber = 0.0f;
for (PxU32 i = 0; i < numTriangles; ++i)
{
const PxU32* tri = &triangles[3 * i];
windingNumber += Gu::evaluateExact<PxReal, PxVec3>(points[tri[0]], points[tri[1]], points[tri[2]], q);
}
return windingNumber;
}
}
}
| 3,055 | C++ | 45.30303 | 137 | 0.756465 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuAABBTree.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuAABBTreeBounds.h"
#include "GuAABBTree.h"
#include "GuAABBTreeBuildStats.h"
#include "GuBounds.h"
#include "GuAABBTreeNode.h"
#include "GuSAH.h"
#include "foundation/PxMathUtils.h"
#include "foundation/PxFPU.h"
using namespace physx;
using namespace Gu;
///////////////////////////////////////////////////////////////////////////////
void AABBTreeBounds::init(PxU32 nbBounds, const PxBounds3* bounds)
{
PX_FREE(mBounds);
// PT: we always allocate one extra box, to make sure we can safely use V4 loads on the array
mBounds = PX_ALLOCATE(PxBounds3, (nbBounds + 1), "AABBTreeBounds");
if(bounds)
PxMemCopy(mBounds, bounds, nbBounds*sizeof(PxBounds3));
}
void AABBTreeBounds::resize(PxU32 newSize, PxU32 previousSize)
{
PxBounds3* newBounds = PX_ALLOCATE(PxBounds3, (newSize + 1), "AABBTreeBounds");
if(mBounds && previousSize)
PxMemCopy(newBounds, mBounds, sizeof(PxBounds3)*previousSize);
PX_FREE(mBounds);
mBounds = newBounds;
}
void AABBTreeBounds::release()
{
if(!mUserAllocated)
PX_FREE(mBounds);
}
///////////////////////////////////////////////////////////////////////////////
NodeAllocator::NodeAllocator() : mPool(NULL), mCurrentSlabIndex(0), mTotalNbNodes(0)
{
}
NodeAllocator::~NodeAllocator()
{
release();
}
void NodeAllocator::release()
{
const PxU32 nbSlabs = mSlabs.size();
for (PxU32 i = 0; i<nbSlabs; i++)
{
Slab& s = mSlabs[i];
PX_DELETE_ARRAY(s.mPool);
}
mSlabs.reset();
mCurrentSlabIndex = 0;
mTotalNbNodes = 0;
}
void NodeAllocator::init(PxU32 nbPrimitives, PxU32 limit)
{
const PxU32 maxSize = nbPrimitives * 2 - 1; // PT: max possible #nodes for a complete tree
const PxU32 estimatedFinalSize = maxSize <= 1024 ? maxSize : maxSize / limit;
mPool = PX_NEW(AABBTreeBuildNode)[estimatedFinalSize];
PxMemZero(mPool, sizeof(AABBTreeBuildNode)*estimatedFinalSize);
// Setup initial node. Here we have a complete permutation of the app's primitives.
mPool->mNodeIndex = 0;
mPool->mNbPrimitives = nbPrimitives;
mSlabs.pushBack(Slab(mPool, 1, estimatedFinalSize));
mCurrentSlabIndex = 0;
mTotalNbNodes = 1;
}
// PT: TODO: inline this?
AABBTreeBuildNode* NodeAllocator::getBiNode()
{
mTotalNbNodes += 2;
Slab& currentSlab = mSlabs[mCurrentSlabIndex];
if (currentSlab.mNbUsedNodes + 2 <= currentSlab.mMaxNbNodes)
{
AABBTreeBuildNode* biNode = currentSlab.mPool + currentSlab.mNbUsedNodes;
currentSlab.mNbUsedNodes += 2;
return biNode;
}
else
{
// Allocate new slab
const PxU32 size = 1024;
AABBTreeBuildNode* pool = PX_NEW(AABBTreeBuildNode)[size];
PxMemZero(pool, sizeof(AABBTreeBuildNode)*size);
mSlabs.pushBack(Slab(pool, 2, size));
mCurrentSlabIndex++;
return pool;
}
}
///////////////////////////////////////////////////////////////////////////////
PxU32 Gu::reshuffle(PxU32 nb, PxU32* const PX_RESTRICT prims, const PxVec3* PX_RESTRICT centers, float splitValue, PxU32 axis)
{
// PT: to avoid calling the unsafe [] operator
const size_t ptrValue = size_t(centers) + axis*sizeof(float);
const PxVec3* PX_RESTRICT centersX = reinterpret_cast<const PxVec3*>(ptrValue);
// Loop through all node-related primitives. Their indices range from mNodePrimitives[0] to mNodePrimitives[mNbPrimitives-1].
// Those indices map the global list in the tree builder.
PxU32 nbPos = 0;
for(PxU32 i=0; i<nb; i++)
{
// Get index in global list
const PxU32 index = prims[i];
// Test against the splitting value. The primitive value is tested against the enclosing-box center.
// [We only need an approximate partition of the enclosing box here.]
const float primitiveValue = centersX[index].x;
PX_ASSERT(primitiveValue == centers[index][axis]);
// Reorganize the list of indices in this order: positive - negative.
if (primitiveValue > splitValue)
{
// Swap entries
prims[i] = prims[nbPos];
prims[nbPos] = index;
// Count primitives assigned to positive space
nbPos++;
}
}
return nbPos;
}
static PxU32 split(const PxBounds3& box, PxU32 nb, PxU32* const PX_RESTRICT prims, PxU32 axis, const AABBTreeBuildParams& params)
{
// Get node split value
float splitValue = 0.0f;
//float defaultSplitValue = box.getCenter(axis);
//(void)defaultSplitValue;
if(params.mBuildStrategy==BVH_SPLATTER_POINTS_SPLIT_GEOM_CENTER)
{
// PT: experimental attempt at replicating BV4_SPLATTER_POINTS_SPLIT_GEOM_CENTER, but with boxes instead of triangles.
const PxBounds3* bounds = params.mBounds->getBounds();
for(PxU32 i=0;i<nb;i++)
{
const PxBounds3& current = bounds[prims[i]];
splitValue += current.getCenter(axis);
// splitValue += (*VP.Vertex[0])[axis];
// splitValue += (*VP.Vertex[1])[axis];
// splitValue += (*VP.Vertex[2])[axis];
}
// splitValue /= float(nb*3);
splitValue /= float(nb);
}
else
{
// Default split value = middle of the axis (using only the box)
splitValue = box.getCenter(axis);
}
return reshuffle(nb, prims, params.mCache, splitValue, axis);
}
void AABBTreeBuildNode::subdivide(const AABBTreeBuildParams& params, BuildStats& stats, NodeAllocator& allocator, PxU32* const indices)
{
PxU32* const PX_RESTRICT primitives = indices + mNodeIndex;
const PxU32 nbPrims = mNbPrimitives;
// Compute global box & means for current node. The box is stored in mBV.
Vec4V meansV;
{
const PxBounds3* PX_RESTRICT boxes = params.mBounds->getBounds();
PX_ASSERT(boxes);
PX_ASSERT(primitives);
PX_ASSERT(nbPrims);
Vec4V minV = V4LoadU(&boxes[primitives[0]].minimum.x);
Vec4V maxV = V4LoadU(&boxes[primitives[0]].maximum.x);
meansV = V4LoadU(¶ms.mCache[primitives[0]].x);
for (PxU32 i = 1; i<nbPrims; i++)
{
const PxU32 index = primitives[i];
const Vec4V curMinV = V4LoadU(&boxes[index].minimum.x);
const Vec4V curMaxV = V4LoadU(&boxes[index].maximum.x);
meansV = V4Add(meansV, V4LoadU(¶ms.mCache[index].x));
minV = V4Min(minV, curMinV);
maxV = V4Max(maxV, curMaxV);
}
StoreBounds(mBV, minV, maxV);
const float coeff = 1.0f / float(nbPrims);
meansV = V4Scale(meansV, FLoad(coeff));
}
// Check the user-defined limit. Also ensures we stop subdividing if we reach a leaf node.
if (nbPrims <= params.mLimit)
return;
bool validSplit = true;
PxU32 nbPos;
{
// Compute variances
Vec4V varsV = V4Zero();
for (PxU32 i = 0; i<nbPrims; i++)
{
const PxU32 index = primitives[i];
Vec4V centerV = V4LoadU(¶ms.mCache[index].x);
centerV = V4Sub(centerV, meansV);
centerV = V4Mul(centerV, centerV);
varsV = V4Add(varsV, centerV);
}
const float coeffNb1 = 1.0f / float(nbPrims - 1);
varsV = V4Scale(varsV, FLoad(coeffNb1));
PX_ALIGN(16, PxVec4) vars;
V4StoreA(varsV, &vars.x);
// Choose axis with greatest variance
const PxU32 axis = PxLargestAxis(PxVec3(vars.x, vars.y, vars.z));
// Split along the axis
nbPos = split(mBV, nbPrims, primitives, axis, params);
// Check split validity
if (!nbPos || nbPos == nbPrims)
validSplit = false;
}
// Check the subdivision has been successful
if (!validSplit)
{
// Here, all boxes lie in the same sub-space. Two strategies:
// - if we are over the split limit, make an arbitrary 50-50 split
// - else stop subdividing
if (nbPrims>params.mLimit)
{
nbPos = nbPrims >> 1;
}
else return;
}
// Now create children and assign their pointers.
mPos = allocator.getBiNode();
stats.increaseCount(2);
// Assign children
PX_ASSERT(!isLeaf());
AABBTreeBuildNode* Pos = const_cast<AABBTreeBuildNode*>(mPos);
AABBTreeBuildNode* Neg = Pos + 1;
Pos->mNodeIndex = mNodeIndex;
Pos->mNbPrimitives = nbPos;
Neg->mNodeIndex = mNodeIndex + nbPos;
Neg->mNbPrimitives = mNbPrimitives - nbPos;
}
void AABBTreeBuildNode::_buildHierarchy(const AABBTreeBuildParams& params, BuildStats& stats, NodeAllocator& nodeBase, PxU32* const indices)
{
// Subdivide current node
subdivide(params, stats, nodeBase, indices);
// Recurse
if (!isLeaf())
{
AABBTreeBuildNode* Pos = const_cast<AABBTreeBuildNode*>(getPos());
PX_ASSERT(Pos);
AABBTreeBuildNode* Neg = Pos + 1;
Pos->_buildHierarchy(params, stats, nodeBase, indices);
Neg->_buildHierarchy(params, stats, nodeBase, indices);
}
stats.mTotalPrims += mNbPrimitives;
}
void AABBTreeBuildNode::subdivideSAH(const AABBTreeBuildParams& params, SAH_Buffers& buffers, BuildStats& stats, NodeAllocator& allocator, PxU32* const indices)
{
PxU32* const PX_RESTRICT primitives = indices + mNodeIndex;
const PxU32 nbPrims = mNbPrimitives;
// Compute global box for current node. The box is stored in mBV.
computeGlobalBox(mBV, nbPrims, params.mBounds->getBounds(), primitives);
// Check the user-defined limit. Also ensures we stop subdividing if we reach a leaf node.
if (nbPrims <= params.mLimit)
return;
/////
PxU32 leftCount;
if(!buffers.split(leftCount, nbPrims, primitives, params.mBounds->getBounds(), params.mCache))
{
// Invalid split => fallback to previous strategy
subdivide(params, stats, allocator, indices);
return;
}
/////
// Now create children and assign their pointers.
mPos = allocator.getBiNode();
stats.increaseCount(2);
// Assign children
PX_ASSERT(!isLeaf());
AABBTreeBuildNode* Pos = const_cast<AABBTreeBuildNode*>(mPos);
AABBTreeBuildNode* Neg = Pos + 1;
Pos->mNodeIndex = mNodeIndex;
Pos->mNbPrimitives = leftCount;
Neg->mNodeIndex = mNodeIndex + leftCount;
Neg->mNbPrimitives = mNbPrimitives - leftCount;
}
void AABBTreeBuildNode::_buildHierarchySAH(const AABBTreeBuildParams& params, SAH_Buffers& sah, BuildStats& stats, NodeAllocator& nodeBase, PxU32* const indices)
{
// Subdivide current node
subdivideSAH(params, sah, stats, nodeBase, indices);
// Recurse
if (!isLeaf())
{
AABBTreeBuildNode* Pos = const_cast<AABBTreeBuildNode*>(getPos());
PX_ASSERT(Pos);
AABBTreeBuildNode* Neg = Pos + 1;
Pos->_buildHierarchySAH(params, sah, stats, nodeBase, indices);
Neg->_buildHierarchySAH(params, sah, stats, nodeBase, indices);
}
stats.mTotalPrims += mNbPrimitives;
}
///////////////////////////////////////////////////////////////////////////////
static PxU32* initAABBTreeBuild(const AABBTreeBuildParams& params, NodeAllocator& nodeAllocator, BuildStats& stats)
{
const PxU32 numPrimitives = params.mNbPrimitives;
if(!numPrimitives)
return NULL;
// Init stats
stats.setCount(1);
// Initialize indices. This list will be modified during build.
PxU32* indices = PX_ALLOCATE(PxU32, numPrimitives, "AABB tree indices");
// Identity permutation
for(PxU32 i=0;i<numPrimitives;i++)
indices[i] = i;
// Allocate a pool of nodes
nodeAllocator.init(numPrimitives, params.mLimit);
// Compute box centers only once and cache them
params.mCache = PX_ALLOCATE(PxVec3, (numPrimitives+1), "cache");
const PxBounds3* PX_RESTRICT boxes = params.mBounds->getBounds();
const float half = 0.5f;
const FloatV halfV = FLoad(half);
for(PxU32 i=0;i<numPrimitives;i++)
{
const Vec4V curMinV = V4LoadU(&boxes[i].minimum.x);
const Vec4V curMaxV = V4LoadU(&boxes[i].maximum.x);
const Vec4V centerV = V4Scale(V4Add(curMaxV, curMinV), halfV);
V4StoreU(centerV, ¶ms.mCache[i].x);
}
return indices;
}
PxU32* Gu::buildAABBTree(const AABBTreeBuildParams& params, NodeAllocator& nodeAllocator, BuildStats& stats)
{
// initialize the build first
PxU32* indices = initAABBTreeBuild(params, nodeAllocator, stats);
if(!indices)
return NULL;
// Build the hierarchy
if(params.mBuildStrategy==BVH_SAH)
{
SAH_Buffers buffers(params.mNbPrimitives);
nodeAllocator.mPool->_buildHierarchySAH(params, buffers, stats, nodeAllocator, indices);
}
else
nodeAllocator.mPool->_buildHierarchy(params, stats, nodeAllocator, indices);
return indices;
}
void Gu::flattenTree(const NodeAllocator& nodeAllocator, BVHNode* dest, const PxU32* remap)
{
// PT: gathers all build nodes allocated so far and flatten them to a linear destination array of smaller runtime nodes
PxU32 offset = 0;
const PxU32 nbSlabs = nodeAllocator.mSlabs.size();
for(PxU32 s=0;s<nbSlabs;s++)
{
const NodeAllocator::Slab& currentSlab = nodeAllocator.mSlabs[s];
AABBTreeBuildNode* pool = currentSlab.mPool;
for(PxU32 i=0;i<currentSlab.mNbUsedNodes;i++)
{
dest[offset].mBV = pool[i].mBV;
if(pool[i].isLeaf())
{
PxU32 index = pool[i].mNodeIndex;
if(remap)
index = remap[index];
const PxU32 nbPrims = pool[i].getNbPrimitives();
PX_ASSERT(nbPrims<16);
dest[offset].mData = (index<<5)|((nbPrims&15)<<1)|1;
}
else
{
PX_ASSERT(pool[i].mPos);
PxU32 localNodeIndex = 0xffffffff;
PxU32 nodeBase = 0;
for(PxU32 j=0;j<nbSlabs;j++)
{
if(pool[i].mPos >= nodeAllocator.mSlabs[j].mPool && pool[i].mPos < nodeAllocator.mSlabs[j].mPool + nodeAllocator.mSlabs[j].mNbUsedNodes)
{
localNodeIndex = PxU32(pool[i].mPos - nodeAllocator.mSlabs[j].mPool);
break;
}
nodeBase += nodeAllocator.mSlabs[j].mNbUsedNodes;
}
const PxU32 nodeIndex = nodeBase + localNodeIndex;
dest[offset].mData = nodeIndex << 1;
}
offset++;
}
}
}
void Gu::buildAABBTree(PxU32 nbBounds, const AABBTreeBounds& bounds, PxArray<BVHNode>& tree)
{
PX_SIMD_GUARD
// build the BVH
BuildStats stats;
NodeAllocator nodeAllocator;
PxU32* indices = buildAABBTree(AABBTreeBuildParams(1, nbBounds, &bounds), nodeAllocator, stats);
PX_ASSERT(indices);
// store the computed hierarchy
tree.resize(stats.getCount());
PX_ASSERT(tree.size() == nodeAllocator.mTotalNbNodes);
// store the results into BVHNode list
flattenTree(nodeAllocator, tree.begin(), indices);
PX_FREE(indices); // PT: we don't need the indices for a complete tree
}
///////////////////////////////////////////////////////////////////////////////
// Progressive building
class Gu::FIFOStack : public PxUserAllocated
{
public:
FIFOStack() : mStack("SQFIFOStack"), mCurIndex(0) {}
~FIFOStack() {}
PX_FORCE_INLINE PxU32 getNbEntries() const { return mStack.size(); }
PX_FORCE_INLINE void push(AABBTreeBuildNode* entry) { mStack.pushBack(entry); }
bool pop(AABBTreeBuildNode*& entry);
private:
PxArray<AABBTreeBuildNode*> mStack;
PxU32 mCurIndex; //!< Current index within the container
};
bool Gu::FIFOStack::pop(AABBTreeBuildNode*& entry)
{
const PxU32 NbEntries = mStack.size(); // Get current number of entries
if (!NbEntries)
return false; // Can be NULL when no value has been pushed. This is an invalid pop call.
entry = mStack[mCurIndex++]; // Get oldest entry, move to next one
if (mCurIndex == NbEntries)
{
// All values have been poped
mStack.clear();
mCurIndex = 0;
}
return true;
}
//~Progressive building
///////////////////////////////////////////////////////////////////////////////
BVHPartialRefitData::BVHPartialRefitData() : mParentIndices(NULL), mUpdateMap(NULL), mRefitHighestSetWord(0)
{
}
BVHPartialRefitData::~BVHPartialRefitData()
{
releasePartialRefitData(true);
}
void BVHPartialRefitData::releasePartialRefitData(bool clearRefitMap)
{
PX_FREE(mParentIndices);
PX_FREE(mUpdateMap);
if(clearRefitMap)
mRefitBitmask.clearAll();
mRefitHighestSetWord = 0;
}
static void createParentArray(PxU32 totalNbNodes, PxU32* parentIndices, const BVHNode* parentNode, const BVHNode* currentNode, const BVHNode* root)
{
const PxU32 parentIndex = PxU32(parentNode - root);
const PxU32 currentIndex = PxU32(currentNode - root);
PX_ASSERT(parentIndex<totalNbNodes);
PX_ASSERT(currentIndex<totalNbNodes);
PX_UNUSED(totalNbNodes);
parentIndices[currentIndex] = parentIndex;
if(!currentNode->isLeaf())
{
createParentArray(totalNbNodes, parentIndices, currentNode, currentNode->getPos(root), root);
createParentArray(totalNbNodes, parentIndices, currentNode, currentNode->getNeg(root), root);
}
}
PxU32* BVHPartialRefitData::getParentIndices()
{
// PT: lazy-create parent array. Memory is not wasted for purely static trees, or dynamic trees that only do "full refit".
if(!mParentIndices)
{
mParentIndices = PX_ALLOCATE(PxU32, mNbNodes, "AABB parent indices");
createParentArray(mNbNodes, mParentIndices, mNodes, mNodes, mNodes);
}
return mParentIndices;
}
void BVHPartialRefitData::createUpdateMap(PxU32 nbObjects)
{
// PT: we need an "update map" for PxBVH
// PT: TODO: consider refactoring with the AABBtree version
PX_FREE(mUpdateMap);
if(!nbObjects)
return;
mUpdateMap = PX_ALLOCATE(PxU32, nbObjects, "UpdateMap");
PxMemSet(mUpdateMap, 0xff, sizeof(PxU32)*nbObjects);
const PxU32 nbNodes = mNbNodes;
const BVHNode* nodes = mNodes;
const PxU32* indices = mIndices;
for(TreeNodeIndex i=0;i<nbNodes;i++)
{
if(nodes[i].isLeaf())
{
const PxU32 nbPrims = nodes[i].getNbRuntimePrimitives();
if(indices)
{
// PT: with multiple primitives per node, several mapping entries will point to the same node.
PX_ASSERT(nbPrims<16);
for(PxU32 j=0;j<nbPrims;j++)
{
const PxU32 index = nodes[i].getPrimitives(indices)[j];
PX_ASSERT(index<nbObjects);
mUpdateMap[index] = i;
}
}
else
{
PX_ASSERT(nbPrims==1);
const PxU32 index = nodes[i].getPrimitiveIndex();
PX_ASSERT(index<nbObjects);
mUpdateMap[index] = i;
}
}
}
}
///////////////////////////////////////////////////////////////////////////////
static PX_FORCE_INLINE PxU32 BitsToDwords(PxU32 nb_bits)
{
return (nb_bits>>5) + ((nb_bits&31) ? 1 : 0);
}
bool BitArray::init(PxU32 nb_bits)
{
mSize = BitsToDwords(nb_bits);
// Get ram for n bits
PX_FREE(mBits);
mBits = PX_ALLOCATE(PxU32, mSize, "BitArray::mBits");
// Set all bits to 0
clearAll();
return true;
}
void BitArray::resize(PxU32 maxBitNumber)
{
const PxU32 newSize = BitsToDwords(maxBitNumber);
if (newSize <= mSize)
return;
PxU32* newBits = PX_ALLOCATE(PxU32, newSize, "BitArray::mBits");
PxMemZero(newBits + mSize, (newSize - mSize) * sizeof(PxU32));
PxMemCopy(newBits, mBits, mSize*sizeof(PxU32));
PX_FREE(mBits);
mBits = newBits;
mSize = newSize;
}
///////////////////////////////////////////////////////////////////////////////
static PX_FORCE_INLINE PxU32 getNbPrimitives(PxU32 data) { return (data>>1)&15; }
static PX_FORCE_INLINE const PxU32* getPrimitives(const PxU32* base, PxU32 data) { return base + (data>>5); }
static PX_FORCE_INLINE const BVHNode* getPos(const BVHNode* base, PxU32 data) { return base + (data>>1); }
static PX_FORCE_INLINE PxU32 isLeaf(PxU32 data) { return data&1; }
template<const bool hasIndices>
static PX_FORCE_INLINE void refitNode(BVHNode* PX_RESTRICT current, const PxBounds3* PX_RESTRICT boxes, const PxU32* PX_RESTRICT indices, BVHNode* PX_RESTRICT const nodeBase)
{
// PT: we can safely use V4 loads on both boxes and nodes here:
// - it's safe on boxes because we allocated one extra box in the pruning pool
// - it's safe on nodes because there's always some data within the node, after the BV
const PxU32 data = current->mData;
Vec4V resultMinV, resultMaxV;
if(isLeaf(data))
{
const PxU32 nbPrims = getNbPrimitives(data);
if(nbPrims)
{
if(hasIndices)
{
const PxU32* primitives = getPrimitives(indices, data);
resultMinV = V4LoadU(&boxes[*primitives].minimum.x);
resultMaxV = V4LoadU(&boxes[*primitives].maximum.x);
if(nbPrims>1)
{
const PxU32* last = primitives + nbPrims;
primitives++;
while(primitives!=last)
{
resultMinV = V4Min(resultMinV, V4LoadU(&boxes[*primitives].minimum.x));
resultMaxV = V4Max(resultMaxV, V4LoadU(&boxes[*primitives].maximum.x));
primitives++;
}
}
}
else
{
PX_ASSERT(nbPrims==1);
const PxU32 primIndex = data>>5;
resultMinV = V4LoadU(&boxes[primIndex].minimum.x);
resultMaxV = V4LoadU(&boxes[primIndex].maximum.x);
}
}
else
{
// Might happen after a node has been invalidated
const float max = GU_EMPTY_BOUNDS_EXTENTS;
resultMinV = V4Load(max);
resultMaxV = V4Load(-max);
}
}
else
{
const BVHNode* pos = getPos(nodeBase, data);
const BVHNode* neg = pos+1;
const PxBounds3& posBox = pos->mBV;
const PxBounds3& negBox = neg->mBV;
resultMinV = V4Min(V4LoadU(&posBox.minimum.x), V4LoadU(&negBox.minimum.x));
// resultMaxV = V4Max(V4LoadU(&posBox.maximum.x), V4LoadU(&negBox.maximum.x));
#if PX_INTEL_FAMILY && !defined(PX_SIMD_DISABLED)
Vec4V posMinV = V4LoadU(&posBox.minimum.z);
Vec4V negMinV = V4LoadU(&negBox.minimum.z);
posMinV = _mm_shuffle_ps(posMinV, posMinV, _MM_SHUFFLE(0, 3, 2, 1));
negMinV = _mm_shuffle_ps(negMinV, negMinV, _MM_SHUFFLE(0, 3, 2, 1));
resultMaxV = V4Max(posMinV, negMinV);
#else
// PT: fixes the perf issue but not really convincing
resultMaxV = Vec4V_From_Vec3V(V3Max(V3LoadU(&posBox.maximum.x), V3LoadU(&negBox.maximum.x)));
#endif
}
// PT: the V4 stores overwrite the data after the BV, but we just put it back afterwards
V4StoreU(resultMinV, ¤t->mBV.minimum.x);
V4StoreU(resultMaxV, ¤t->mBV.maximum.x);
current->mData = data;
}
template<const bool hasIndices>
static void refitLoop(const PxBounds3* PX_RESTRICT boxes, BVHNode* const PX_RESTRICT nodeBase, const PxU32* PX_RESTRICT indices, PxU32 nbNodes)
{
PX_ASSERT(boxes);
PX_ASSERT(nodeBase);
// Bottom-up update
PxU32 index = nbNodes;
while(index--)
{
BVHNode* current = nodeBase + index;
if(index)
PxPrefetch(current - 1);
// PxBounds3 before = current->mBV;
if(hasIndices)
refitNode<1>(current, boxes, indices, nodeBase);
else
refitNode<0>(current, boxes, indices, nodeBase);
// if(current->mBV.minimum==before.minimum && current->mBV.maximum==before.maximum)
// break;
}
}
void BVHCoreData::fullRefit(const PxBounds3* boxes)
{
if(mIndices)
refitLoop<1>(boxes, mNodes, mIndices, mNbNodes);
else
refitLoop<0>(boxes, mNodes, mIndices, mNbNodes);
}
void BVHPartialRefitData::markNodeForRefit(TreeNodeIndex nodeIndex)
{
BitArray* PX_RESTRICT refitBitmask = &mRefitBitmask;
if(!refitBitmask->getBits())
refitBitmask->init(mNbNodes);
PX_ASSERT(nodeIndex<mNbNodes);
const PxU32* PX_RESTRICT parentIndices = getParentIndices();
PxU32 refitHighestSetWord = mRefitHighestSetWord;
PxU32 currentIndex = nodeIndex;
while(1)
{
PX_ASSERT(currentIndex<mNbNodes);
if(refitBitmask->isSet(currentIndex))
{
// We can early exit if we already visited the node!
goto Exit;
}
else
{
refitBitmask->setBit(currentIndex);
const PxU32 currentMarkedWord = currentIndex>>5;
refitHighestSetWord = PxMax(refitHighestSetWord, currentMarkedWord);
const PxU32 parentIndex = parentIndices[currentIndex];
PX_ASSERT(parentIndex == 0 || parentIndex < currentIndex);
if(currentIndex == parentIndex)
break;
currentIndex = parentIndex;
}
}
Exit:
mRefitHighestSetWord = refitHighestSetWord;
}
#define FIRST_VERSION
#ifdef FIRST_VERSION
template<const bool hasIndices>
static void refitMarkedLoop(const PxBounds3* PX_RESTRICT boxes, BVHNode* const PX_RESTRICT nodeBase, const PxU32* PX_RESTRICT indices, PxU32* PX_RESTRICT bits, PxU32 nbToGo)
{
#ifdef _DEBUG
PxU32 nbRefit=0;
#endif
PxU32 size = nbToGo;
while(size--)
{
// Test 32 bits at a time
const PxU32 currentBits = bits[size];
if(!currentBits)
continue;
PxU32 index = (size+1)<<5;
PxU32 mask = PxU32(1<<((index-1)&31));
PxU32 count=32;
while(count--)
{
index--;
PxPrefetch(nodeBase + index);
PX_ASSERT(size==index>>5);
PX_ASSERT(mask==PxU32(1<<(index&31)));
if(currentBits & mask)
{
if(hasIndices)
refitNode<1>(nodeBase + index, boxes, indices, nodeBase);
else
refitNode<0>(nodeBase + index, boxes, indices, nodeBase);
#ifdef _DEBUG
nbRefit++;
#endif
}
mask>>=1;
}
bits[size] = 0;
}
}
void BVHPartialRefitData::refitMarkedNodes(const PxBounds3* boxes)
{
if(!mRefitBitmask.getBits())
return; // No refit needed
{
/*const*/ PxU32* bits = const_cast<PxU32*>(mRefitBitmask.getBits());
PxU32 size = mRefitHighestSetWord+1;
#ifdef _DEBUG
if(1)
{
const PxU32 totalSize = mRefitBitmask.getSize();
for(PxU32 i=size;i<totalSize;i++)
{
PX_ASSERT(!bits[i]);
}
}
#endif
if(mIndices)
refitMarkedLoop<1>(boxes, mNodes, mIndices, bits, size);
else
refitMarkedLoop<0>(boxes, mNodes, mIndices, bits, size);
mRefitHighestSetWord = 0;
// mRefitBitmask.clearAll();
}
}
#endif
//#define SECOND_VERSION
#ifdef SECOND_VERSION
void BVHPartialRefitData::refitMarkedNodes(const PxBounds3* boxes)
{
/*const*/ PxU32* bits = const_cast<PxU32*>(mRefitBitmask.getBits());
if(!bits)
return; // No refit needed
const PxU32 lastSetBit = mRefitBitmask.findLast();
const PxU32* indices = mIndices;
BVHNode* const nodeBase = mNodes;
// PT: ### bitmap iterator pattern
for(PxU32 w = 0; w <= lastSetBit >> 5; ++w)
{
for(PxU32 b = bits[w]; b; b &= b-1)
{
const PxU32 index = (PxU32)(w<<5|PxLowestSetBit(b));
while(size--)
{
// Test 32 bits at a time
const PxU32 currentBits = bits[size];
if(!currentBits)
continue;
PxU32 index = (size+1)<<5;
PxU32 mask = PxU32(1<<((index-1)&31));
PxU32 count=32;
while(count--)
{
index--;
PxPrefetch(nodeBase + index);
PX_ASSERT(size==index>>5);
PX_ASSERT(mask==PxU32(1<<(index&31)));
if(currentBits & mask)
{
refitNode(nodeBase + index, boxes, indices, nodeBase);
#ifdef _DEBUG
nbRefit++;
#endif
}
mask>>=1;
}
bits[size] = 0;
}
mRefitHighestSetWord = 0;
// mRefitBitmask.clearAll();
}
}
#endif
///////////////////////////////////////////////////////////////////////////////
AABBTree::AABBTree() : mTotalPrims(0)
{
// Progressive building
mStack = NULL;
//~Progressive building
}
AABBTree::~AABBTree()
{
release(false);
}
void AABBTree::release(bool clearRefitMap)
{
// Progressive building
PX_DELETE(mStack);
//~Progressive building
releasePartialRefitData(clearRefitMap);
// PT: TODO: move some to BVHCoreData dtor
PX_DELETE_ARRAY(mNodes);
PX_FREE(mIndices);
mNbNodes = 0;
mNbIndices = 0;
}
// Initialize nodes/indices from the input tree merge data
void AABBTree::initTree(const AABBTreeMergeData& tree)
{
PX_ASSERT(mIndices == NULL);
PX_ASSERT(mNodes == NULL);
PX_ASSERT(mParentIndices == NULL);
// allocate,copy indices
mIndices = PX_ALLOCATE(PxU32, tree.mNbIndices, "AABB tree indices");
mNbIndices = tree.mNbIndices;
PxMemCopy(mIndices, tree.mIndices, sizeof(PxU32)*tree.mNbIndices);
// allocate,copy nodes
mNodes = PX_NEW(BVHNode)[tree.mNbNodes];
mNbNodes = tree.mNbNodes;
PxMemCopy(mNodes, tree.mNodes, sizeof(BVHNode)*tree.mNbNodes);
}
// Shift indices of the tree by offset. Used for merged trees, when initial indices needs to be shifted to match indices in current pruning pool
void AABBTree::shiftIndices(PxU32 offset)
{
for (PxU32 i = 0; i < mNbIndices; i++)
{
mIndices[i] += offset;
}
}
bool AABBTree::buildInit(const AABBTreeBuildParams& params, NodeAllocator& nodeAllocator, BuildStats& stats)
{
// Checkings
const PxU32 nbPrimitives = params.mNbPrimitives;
if(!nbPrimitives)
return false;
// Release previous tree
release();
// Initialize indices. This list will be modified during build.
mNbIndices = nbPrimitives;
PxU32* indices = initAABBTreeBuild(params, nodeAllocator, stats);
if(!indices)
return false;
PX_ASSERT(!mIndices);
mIndices = indices;
return true;
}
void AABBTree::buildEnd(const AABBTreeBuildParams& params, NodeAllocator& nodeAllocator, const BuildStats& stats)
{
PX_FREE(params.mCache);
// Get back total number of nodes
mNbNodes = stats.getCount();
mTotalPrims = stats.mTotalPrims;
mNodes = PX_NEW(BVHNode)[mNbNodes];
PX_ASSERT(mNbNodes==nodeAllocator.mTotalNbNodes);
flattenTree(nodeAllocator, mNodes);
nodeAllocator.release();
}
bool AABBTree::build(const AABBTreeBuildParams& params, NodeAllocator& nodeAllocator)
{
const PxU32 nbPrimitives = params.mNbPrimitives;
if(!nbPrimitives)
return false;
// Release previous tree
release();
BuildStats stats;
mNbIndices = nbPrimitives;
mIndices = buildAABBTree(params, nodeAllocator, stats);
if(!mIndices)
return false;
buildEnd(params, nodeAllocator, stats);
return true;
}
void AABBTree::shiftOrigin(const PxVec3& shift)
{
BVHNode* const nodeBase = mNodes;
const PxU32 totalNbNodes = mNbNodes;
for(PxU32 i=0; i<totalNbNodes; i++)
{
BVHNode& current = nodeBase[i];
if((i+1) < totalNbNodes)
PxPrefetch(nodeBase + i + 1);
current.mBV.minimum -= shift;
current.mBV.maximum -= shift;
}
}
// Progressive building
static PxU32 incrementalBuildHierarchy(FIFOStack& stack, AABBTreeBuildNode* node, const AABBTreeBuildParams& params, BuildStats& stats, NodeAllocator& nodeBase, PxU32* const indices)
{
node->subdivide(params, stats, nodeBase, indices);
if(!node->isLeaf())
{
AABBTreeBuildNode* pos = const_cast<AABBTreeBuildNode*>(node->getPos());
PX_ASSERT(pos);
AABBTreeBuildNode* neg = pos + 1;
stack.push(neg);
stack.push(pos);
}
stats.mTotalPrims += node->mNbPrimitives;
return node->mNbPrimitives;
}
PxU32 AABBTree::progressiveBuild(const AABBTreeBuildParams& params, NodeAllocator& nodeAllocator, BuildStats& stats, PxU32 progress, PxU32 limit)
{
if(progress==0)
{
if(!buildInit(params, nodeAllocator, stats))
return PX_INVALID_U32;
mStack = PX_NEW(FIFOStack);
mStack->push(nodeAllocator.mPool);
return progress++;
}
else if(progress==1)
{
PxU32 stackCount = mStack->getNbEntries();
if(stackCount)
{
PxU32 Total = 0;
const PxU32 Limit = limit;
while(Total<Limit)
{
AABBTreeBuildNode* Entry;
if(mStack->pop(Entry))
Total += incrementalBuildHierarchy(*mStack, Entry, params, stats, nodeAllocator, mIndices);
else
break;
}
return progress;
}
buildEnd(params, nodeAllocator, stats);
PX_DELETE(mStack);
return 0; // Done!
}
return PX_INVALID_U32;
}
//~Progressive building
PX_FORCE_INLINE static void setLeafData(PxU32& leafData, const BVHNode& node, const PxU32 indicesOffset)
{
const PxU32 index = indicesOffset + (node.mData >> 5);
const PxU32 nbPrims = node.getNbPrimitives();
PX_ASSERT(nbPrims < 16);
leafData = (index << 5) | ((nbPrims & 15) << 1) | 1;
}
// Copy the tree into nodes. Update node indices, leaf indices.
void AABBTree::addRuntimeChilds(PxU32& nodeIndex, const AABBTreeMergeData& treeParams)
{
PX_ASSERT(nodeIndex < mNbNodes + treeParams.mNbNodes + 1);
const PxU32 baseNodeIndex = nodeIndex;
// copy the src tree into dest tree nodes, update its data
for (PxU32 i = 0; i < treeParams.mNbNodes; i++)
{
PX_ASSERT(nodeIndex < mNbNodes + treeParams.mNbNodes + 1);
mNodes[nodeIndex].mBV = treeParams.mNodes[i].mBV;
if (treeParams.mNodes[i].isLeaf())
{
setLeafData(mNodes[nodeIndex].mData, treeParams.mNodes[i], mNbIndices);
}
else
{
const PxU32 srcNodeIndex = baseNodeIndex + (treeParams.mNodes[i].getPosIndex());
mNodes[nodeIndex].mData = srcNodeIndex << 1;
mParentIndices[srcNodeIndex] = nodeIndex;
mParentIndices[srcNodeIndex + 1] = nodeIndex;
}
nodeIndex++;
}
}
// Merge tree into targetNode, where target node is a leaf
// 1. Allocate new nodes/parent, copy all the nodes/parents
// 2. Create new node at the end, copy the data from target node
// 3. Copy the merge tree after the new node, create the parent map for them, update the leaf indices
// Schematic view:
// Target Nodes: ...Tn...
// Input tree: R1->Rc0, Rc1...
// Merged tree: ...Tnc->...->Nc0,R1->Rc0,Rc1...
// where new node: Nc0==Tn and Tnc is not a leaf anymore and points to Nc0
void AABBTree::mergeRuntimeLeaf(BVHNode& targetNode, const AABBTreeMergeData& treeParams, PxU32 targetMergeNodeIndex)
{
PX_ASSERT(mParentIndices);
PX_ASSERT(targetNode.isLeaf());
// 1. Allocate new nodes/parent, copy all the nodes/parents
// allocate new runtime pool with max combine number of nodes
// we allocate only 1 additional node each merge
BVHNode* newRuntimePool = PX_NEW(BVHNode)[mNbNodes + treeParams.mNbNodes + 1];
PxU32* newParentIndices = PX_ALLOCATE(PxU32, (mNbNodes + treeParams.mNbNodes + 1), "AABB parent indices");
// copy the whole target nodes, we will add the new node at the end together with the merge tree
PxMemCopy(newRuntimePool, mNodes, sizeof(BVHNode)*(mNbNodes));
PxMemCopy(newParentIndices, mParentIndices, sizeof(PxU32)*(mNbNodes));
// 2. Create new node at the end, copy the data from target node
PxU32 nodeIndex = mNbNodes;
// copy the targetNode at the end of the new nodes
newRuntimePool[nodeIndex].mBV = targetNode.mBV;
newRuntimePool[nodeIndex].mData = targetNode.mData;
// update the parent information
newParentIndices[nodeIndex] = targetMergeNodeIndex;
// mark for refit
if (mRefitBitmask.getBits() && mRefitBitmask.isSet(targetMergeNodeIndex))
{
mRefitBitmask.setBit(nodeIndex);
const PxU32 currentMarkedWord = nodeIndex >> 5;
mRefitHighestSetWord = PxMax(mRefitHighestSetWord, currentMarkedWord);
}
// swap pointers
PX_DELETE_ARRAY(mNodes);
mNodes = newRuntimePool;
PX_FREE(mParentIndices);
mParentIndices = newParentIndices;
// 3. Copy the merge tree after the new node, create the parent map for them, update the leaf indices
nodeIndex++;
addRuntimeChilds(nodeIndex, treeParams);
PX_ASSERT(nodeIndex == mNbNodes + 1 + treeParams.mNbNodes);
// update the parent information for the input tree root node
mParentIndices[mNbNodes + 1] = targetMergeNodeIndex;
// fix the child information for the target node, was a leaf before
mNodes[targetMergeNodeIndex].mData = mNbNodes << 1;
// update the total number of nodes
mNbNodes = mNbNodes + 1 + treeParams.mNbNodes;
}
// Merge tree into targetNode, where target node is not a leaf
// 1. Allocate new nodes/parent, copy the nodes/parents till targetNodePosIndex
// 2. Create new node , copy the data from target node
// 3. Copy the rest of the target tree nodes/parents at the end -> targetNodePosIndex + 1 + treeParams.mNbNodes
// 4. Copy the merge tree after the new node, create the parent map for them, update the leaf indices
// 5. Go through the nodes copied at the end and fix the parents/childs
// Schematic view:
// Target Nodes: ...Tn->...->Tc0,Tc1...
// Input tree: R1->Rc0, Rc1...
// Merged tree: ...Tn->...->Nc0,R1->Rc0,Rc1...,Tc0,Tc1...
// where new node: Nc0->...->Tc0,Tc1
void AABBTree::mergeRuntimeNode(BVHNode& targetNode, const AABBTreeMergeData& treeParams, PxU32 targetMergeNodeIndex)
{
PX_ASSERT(mParentIndices);
PX_ASSERT(!targetNode.isLeaf());
// Get the target node child pos, this is where we insert the new node and the input tree
const PxU32 targetNodePosIndex = targetNode.getPosIndex();
// 1. Allocate new nodes/parent, copy the nodes/parents till targetNodePosIndex
// allocate new runtime pool with max combine number of nodes
// we allocate only 1 additional node each merge
BVHNode* newRuntimePool = PX_NEW(BVHNode)[mNbNodes + treeParams.mNbNodes + 1];
PxU32* newParentIndices = PX_ALLOCATE(PxU32, (mNbNodes + treeParams.mNbNodes + 1), "AABB parent indices");
// copy the untouched part of the nodes and parents
PxMemCopy(newRuntimePool, mNodes, sizeof(BVHNode)*(targetNodePosIndex));
PxMemCopy(newParentIndices, mParentIndices, sizeof(PxU32)*(targetNodePosIndex));
PxU32 nodeIndex = targetNodePosIndex;
// 2. Create new node , copy the data from target node
newRuntimePool[nodeIndex].mBV = targetNode.mBV;
newRuntimePool[nodeIndex].mData = ((targetNode.mData >> 1) + 1 + treeParams.mNbNodes) << 1;
// update parent information
newParentIndices[nodeIndex] = targetMergeNodeIndex;
// handle mark for refit
if(mRefitBitmask.getBits() && mRefitBitmask.isSet(targetMergeNodeIndex))
{
mRefitBitmask.setBit(nodeIndex);
const PxU32 currentMarkedWord = nodeIndex >> 5;
mRefitHighestSetWord = PxMax(mRefitHighestSetWord, currentMarkedWord);
}
// 3. Copy the rest of the target tree nodes/parents at the end -> targetNodePosIndex + 1 + treeParams.mNbNodes
if(mNbNodes - targetNodePosIndex)
{
PX_ASSERT(mNbNodes - targetNodePosIndex > 0);
PxMemCopy(newRuntimePool + targetNodePosIndex + 1 + treeParams.mNbNodes, mNodes + targetNodePosIndex, sizeof(BVHNode)*(mNbNodes - targetNodePosIndex));
PxMemCopy(newParentIndices + targetNodePosIndex + 1 + treeParams.mNbNodes, mParentIndices + targetNodePosIndex, sizeof(PxU32)*(mNbNodes - targetNodePosIndex));
}
// swap the pointers, release the old memory
PX_DELETE_ARRAY(mNodes);
mNodes = newRuntimePool;
PX_FREE(mParentIndices);
mParentIndices = newParentIndices;
// 4. Copy the merge tree after the new node, create the parent map for them, update the leaf indices
nodeIndex++;
addRuntimeChilds(nodeIndex, treeParams);
PX_ASSERT(nodeIndex == targetNodePosIndex + 1 + treeParams.mNbNodes);
// update the total number of nodes
mNbNodes = mNbNodes + 1 + treeParams.mNbNodes;
// update the parent information for the input tree root node
mParentIndices[targetNodePosIndex + 1] = targetMergeNodeIndex;
// 5. Go through the nodes copied at the end and fix the parents/childs
for (PxU32 i = targetNodePosIndex + 1 + treeParams.mNbNodes; i < mNbNodes; i++)
{
// check if the parent is the targetNode, if yes update the parent to new node
if(mParentIndices[i] == targetMergeNodeIndex)
{
mParentIndices[i] = targetNodePosIndex;
}
else
{
// if parent node has been moved, update the parent node
if(mParentIndices[i] >= targetNodePosIndex)
{
mParentIndices[i] = mParentIndices[i] + 1 + treeParams.mNbNodes;
}
else
{
// if parent has not been moved, update its child information
const PxU32 parentIndex = mParentIndices[i];
// update the child information to point to Pos child
if(i % 2 != 0)
{
const PxU32 srcNodeIndex = mNodes[parentIndex].getPosIndex();
// if child index points to a node that has been moved, update the child index
PX_ASSERT(!mNodes[parentIndex].isLeaf());
PX_ASSERT(srcNodeIndex > targetNodePosIndex);
mNodes[parentIndex].mData = (1 + treeParams.mNbNodes + srcNodeIndex) << 1;
}
}
}
if(!mNodes[i].isLeaf())
{
// update the child node index
const PxU32 srcNodeIndex = 1 + treeParams.mNbNodes + mNodes[i].getPosIndex();
mNodes[i].mData = srcNodeIndex << 1;
}
}
}
// traverse the target node, the tree is inside the targetNode, and find the best place where merge the tree
void AABBTree::traverseRuntimeNode(BVHNode& targetNode, const AABBTreeMergeData& treeParams, PxU32 nodeIndex)
{
const BVHNode& srcNode = treeParams.getRootNode();
PX_ASSERT(srcNode.mBV.isInside(targetNode.mBV));
// Check if the srcNode(tree) can fit inside any of the target childs. If yes, traverse the target tree child
BVHNode& targetPosChild = *targetNode.getPos(mNodes);
if(srcNode.mBV.isInside(targetPosChild.mBV))
{
return traverseRuntimeNode(targetPosChild, treeParams, targetNode.getPosIndex());
}
BVHNode& targetNegChild = *targetNode.getNeg(mNodes);
if (srcNode.mBV.isInside(targetNegChild.mBV))
{
return traverseRuntimeNode(targetNegChild, treeParams, targetNode.getNegIndex());
}
// we cannot traverse target anymore, lets add the srcTree to current target node
if(targetNode.isLeaf())
mergeRuntimeLeaf(targetNode, treeParams, nodeIndex);
else
mergeRuntimeNode(targetNode, treeParams, nodeIndex);
}
// Merge the input tree into current tree.
// Traverse the tree and find the smallest node, where the whole new tree fits. When we find the node
// we create one new node pointing to the original children and the to the input tree root.
void AABBTree::mergeTree(const AABBTreeMergeData& treeParams)
{
// allocate new indices buffer
PxU32* newIndices = PX_ALLOCATE(PxU32, (mNbIndices + treeParams.mNbIndices), "AABB tree indices");
PxMemCopy(newIndices, mIndices, sizeof(PxU32)*mNbIndices);
PX_FREE(mIndices);
mIndices = newIndices;
mTotalPrims += treeParams.mNbIndices;
// copy the new indices, re-index using the provided indicesOffset. Note that indicesOffset
// must be provided, as original mNbIndices can be different than indicesOffset dues to object releases.
for (PxU32 i = 0; i < treeParams.mNbIndices; i++)
{
mIndices[mNbIndices + i] = treeParams.mIndicesOffset + treeParams.mIndices[i];
}
// check the mRefitBitmask if we fit all the new nodes
mRefitBitmask.resize(mNbNodes + treeParams.mNbNodes + 1);
// create the parent information so we can update it
getParentIndices();
// if new tree is inside the root AABB we will traverse the tree to find better node where to attach the tree subnodes
// if the root is a leaf we merge with the root.
if(treeParams.getRootNode().mBV.isInside(mNodes[0].mBV) && !mNodes[0].isLeaf())
{
traverseRuntimeNode(mNodes[0], treeParams, 0);
}
else
{
if(mNodes[0].isLeaf())
{
mergeRuntimeLeaf(mNodes[0], treeParams, 0);
}
else
{
mergeRuntimeNode(mNodes[0], treeParams, 0);
}
// increase the tree root AABB
mNodes[0].mBV.include(treeParams.getRootNode().mBV);
}
#ifdef _DEBUG
//verify parent indices
for (PxU32 i = 0; i < mNbNodes; i++)
{
if (i)
{
PX_ASSERT(mNodes[mParentIndices[i]].getPosIndex() == i || mNodes[mParentIndices[i]].getNegIndex() == i);
}
if (!mNodes[i].isLeaf())
{
PX_ASSERT(mParentIndices[mNodes[i].getPosIndex()] == i);
PX_ASSERT(mParentIndices[mNodes[i].getNegIndex()] == i);
}
}
// verify the tree nodes, leafs
for (PxU32 i = 0; i < mNbNodes; i++)
{
if (mNodes[i].isLeaf())
{
const PxU32 index = mNodes[i].mData >> 5;
const PxU32 nbPrim = mNodes[i].getNbPrimitives();
PX_ASSERT(index + nbPrim <= mNbIndices + treeParams.mNbIndices);
}
else
{
const PxU32 nodeIndex = (mNodes[i].getPosIndex());
PX_ASSERT(nodeIndex < mNbNodes);
}
}
#endif // _DEBUG
mNbIndices += treeParams.mNbIndices;
}
| 43,247 | C++ | 29.456338 | 182 | 0.703864 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuCapsule.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxIntrinsics.h"
#include "GuInternal.h"
#include "GuBox.h"
#include "GuCapsule.h"
using namespace physx;
/**
* Computes an OBB surrounding the capsule.
* \param box [out] the OBB
*/
void Gu::computeBoxAroundCapsule(const Gu::Capsule& capsule, Gu::Box& box)
{
// Box center = center of the two capsule's endpoints
box.center = capsule.computeCenter();
// Box extents
const PxF32 d = (capsule.p0 - capsule.p1).magnitude();
box.extents.x = capsule.radius + (d * 0.5f);
box.extents.y = capsule.radius;
box.extents.z = capsule.radius;
// Box orientation
if(d==0.0f)
{
box.rot = PxMat33(PxIdentity);
}
else
{
PxVec3 dir, right, up;
PxComputeBasisVectors(capsule.p0, capsule.p1, dir, right, up);
box.setAxes(dir, right, up);
}
}
| 2,469 | C++ | 38.206349 | 74 | 0.742001 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuSAH.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_SAH_H
#define GU_SAH_H
/** \addtogroup geomutils
@{
*/
#include "foundation/PxBounds3.h"
#include "CmRadixSort.h"
namespace physx
{
namespace Gu
{
struct SAH_Buffers
{
SAH_Buffers(PxU32 nb_prims);
~SAH_Buffers();
bool split(PxU32& leftCount, PxU32 nb, const PxU32* PX_RESTRICT prims, const PxBounds3* PX_RESTRICT boxes, const PxVec3* PX_RESTRICT centers);
Cm::RadixSortBuffered mSorters[3];
float* mKeys;
float* mCumulativeLower;
float* mCumulativeUpper;
PxU32 mNb;
};
}
}
/** @} */
#endif
| 2,254 | C | 35.967213 | 148 | 0.740018 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuExtendedBucketPruner.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_EXTENDED_BUCKET_PRUNER_H
#define GU_EXTENDED_BUCKET_PRUNER_H
#include "GuPrunerTypedef.h"
#include "GuAABBTreeUpdateMap.h"
#include "foundation/PxHashMap.h"
#include "GuAABBTreeBounds.h"
#include "GuSecondaryPruner.h"
namespace physx
{
class PxRenderOutput;
namespace Gu
{
class AABBTreeMergeData;
// Extended bucket pruner data, if an object belongs to the tree of trees, we need to
// remember node for the sub tree, the tree it belongs to and the main tree node
struct ExtendedBucketPrunerData
{
PxU32 mTimeStamp; // timestamp
TreeNodeIndex mSubTreeNode; // sub tree node index
PxU32 mMergeIndex; // index in bounds and merged trees array
};
// Merged tree structure, holds tree and its timeStamp, released when no objects is in the tree
// or timeStamped objects are released
struct MergedTree
{
AABBTree* mTree; // AABB tree
size_t mTimeStamp; //
};
// hashing function for PrunerPayload key
// PT: TODO: move this to PrunerPayload?
struct ExtendedBucketPrunerHash
{
PX_FORCE_INLINE uint32_t operator()(const PrunerPayload& payload) const
{
#if PX_P64_FAMILY
// const PxU32 h0 = PxHash((const void*)payload.data[0]);
// const PxU32 h1 = PxHash((const void*)payload.data[1]);
const PxU32 h0 = PxU32(PX_MAX_U32 & payload.data[0]);
const PxU32 h1 = PxU32(PX_MAX_U32 & payload.data[1]);
return physx::PxComputeHash(PxU64(h0) | (PxU64(h1) << 32));
#else
return physx::PxComputeHash(PxU64(payload.data[0]) | (PxU64(payload.data[1]) << 32));
#endif
}
PX_FORCE_INLINE bool equal(const PrunerPayload& k0, const PrunerPayload& k1) const
{
return (k0.data[0] == k1.data[0]) && (k0.data[1] == k1.data[1]);
}
};
// A.B. replace, this is useless, need to be able to traverse the map and release while traversing, also eraseAt failed
typedef PxHashMap<PrunerPayload, ExtendedBucketPrunerData, ExtendedBucketPrunerHash> ExtendedBucketPrunerMap;
// Extended bucket pruner holds single objects in a bucket pruner and AABBtrees in a tree of trees.
// Base usage of ExtendedBucketPruner is for dynamic AABBPruner new objects, that did not make it
// into new tree. Single objects go directly into a bucket pruner, while merged AABBtrees
// go into a tree of trees.
// PT: TODO: this is not a Pruner (doesn't use the Pruner API) so its name should be e.g. "ExtendedBucketPrunerCore".
// And it's also not always using a bucket pruner... so the whole "ExtendedBucketPruner" name everywhere is wrong.
class ExtendedBucketPruner
{
public:
ExtendedBucketPruner(PxU64 contextID, CompanionPrunerType type, const PruningPool* pool);
~ExtendedBucketPruner();
// release
void release();
// add single object into a bucket pruner directly
PX_FORCE_INLINE bool addObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp, const PoolIndex poolIndex)
{
return mCompanion ? mCompanion->addObject(object, handle, worldAABB, transform, timeStamp, poolIndex) : true;
}
// add AABB tree from pruning structure - adds new primitive into main AABB tree
void addTree(const AABBTreeMergeData& mergeData, PxU32 timeStamp);
// update object
bool updateObject(const PxBounds3& worldAABB, const PxTransform& transform, const PrunerPayload& object, PrunerHandle handle, const PoolIndex poolIndex);
// remove object, removed object is replaced in pruning pool by swapped object, indices needs to be updated
bool removeObject(const PrunerPayload& object, PrunerHandle handle, PxU32 objectIndex, const PrunerPayload& swapObject, PxU32 swapObjectIndex);
// swap object index, the object index can be in core pruner or tree of trees
void swapIndex(PxU32 objectIndex, const PrunerPayload& swapObject, PxU32 swapObjectIndex, bool corePrunerIncluded = true);
// refit marked nodes in tree of trees
void refitMarkedNodes(const PxBounds3* boxes);
// notify timestampChange - swap trees in incremental pruner
PX_FORCE_INLINE void timeStampChange()
{
if(mCompanion)
mCompanion->timeStampChange();
}
// look for objects marked with input timestamp everywhere in the structure, and remove them. This is the same
// as calling 'removeObject' individually for all these objects, but much more efficient. Returns number of removed objects.
PxU32 removeMarkedObjects(PxU32 timeStamp);
// queries against the pruner
bool raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback&) const;
bool overlap(const ShapeData& queryVolume, PrunerOverlapCallback&) const;
bool sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback&) const;
// origin shift
void shiftOrigin(const PxVec3& shift);
// debug visualize
void visualize(PxRenderOutput& out, PxU32 color) const;
PX_FORCE_INLINE void build()
{
if(mCompanion)
mCompanion->build();
}
PX_FORCE_INLINE PxU32 getNbObjects() const
{
const PxU32 nb = mCompanion ? mCompanion->getNbObjects() : 0;
return nb + mExtendedBucketPrunerMap.size();
}
void getGlobalBounds(PxBounds3&) const;
private:
// separate call for indices invalidation, object can be either in AABBPruner or Bucket pruner, but the swapped object can be
// in the tree of trees
void invalidateObject(const ExtendedBucketPrunerData& object, PxU32 objectIndex, const PrunerPayload& swapObject, PxU32 swapObjectIndex);
void resize(PxU32 size);
void buildMainAABBTree();
void cleanTrees();
#if PX_DEBUG
// Extended bucket pruner validity check
bool checkValidity();
#endif
CompanionPruner* mCompanion; // Companion pruner for single objects
const PruningPool* mPruningPool; // Pruning pool from AABB pruner
ExtendedBucketPrunerMap mExtendedBucketPrunerMap; // Map holding objects from tree merge - objects in tree of trees
AABBTree* mMainTree; // Main tree holding merged trees
AABBTreeUpdateMap mMainTreeUpdateMap; // Main tree updated map - merged trees index to nodes
AABBTreeUpdateMap mMergeTreeUpdateMap; // Merged tree update map used while tree is merged
AABBTreeBounds mBounds; // Merged trees bounds used for main tree building
MergedTree* mMergedTrees; // Merged trees
PxU32 mCurrentTreeIndex; // Current trees index
PxU32 mCurrentTreeCapacity; // Current tress capacity
bool mTreesDirty; // Dirty marker
};
}
}
#endif
| 8,527 | C | 44.121693 | 188 | 0.718658 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuSqInternal.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuSqInternal.h"
#include "CmVisualization.h"
#include "GuAABBTree.h"
#include "GuAABBTreeNode.h"
#include "GuIncrementalAABBTree.h"
#include "GuBVH.h"
using namespace physx;
using namespace Cm;
using namespace Gu;
static void drawBVH(const BVHNode* root, const BVHNode* node, PxRenderOutput& out_)
{
renderOutputDebugBox(out_, node->mBV);
if(node->isLeaf())
return;
drawBVH(root, node->getPos(root), out_);
drawBVH(root, node->getNeg(root), out_);
}
void visualizeTree(PxRenderOutput& out, PxU32 color, const BVH* tree)
{
if(tree && tree->getNodes())
{
out << PxTransform(PxIdentity);
out << color;
drawBVH(tree->getNodes(), tree->getNodes(), out);
}
}
void visualizeTree(PxRenderOutput& out, PxU32 color, const AABBTree* tree)
{
if(tree && tree->getNodes())
{
out << PxTransform(PxIdentity);
out << color;
drawBVH(tree->getNodes(), tree->getNodes(), out);
}
}
void visualizeTree(PxRenderOutput& out, PxU32 color, const IncrementalAABBTree* tree, DebugVizCallback* cb)
{
if(tree && tree->getNodes())
{
struct Local
{
static void _draw(const IncrementalAABBTreeNode* root, const IncrementalAABBTreeNode* node, PxRenderOutput& out_, DebugVizCallback* cb_)
{
PxBounds3 bounds;
V4StoreU(node->mBVMin, &bounds.minimum.x);
PX_ALIGN(16, PxVec4) max4;
V4StoreA(node->mBVMax, &max4.x);
bounds.maximum = PxVec3(max4.x, max4.y, max4.z);
bool discard = false;
if(cb_)
discard = cb_->visualizeNode(*node, bounds);
if(!discard)
Cm::renderOutputDebugBox(out_, bounds);
if(node->isLeaf())
return;
_draw(root, node->getPos(root), out_, cb_);
_draw(root, node->getNeg(root), out_, cb_);
}
};
out << PxTransform(PxIdentity);
out << color;
Local::_draw(tree->getNodes(), tree->getNodes(), out, cb);
}
}
| 3,507 | C++ | 33.392157 | 139 | 0.718848 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuBox.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxIntrinsics.h"
#include "GuBoxConversion.h"
#include "GuInternal.h"
using namespace physx;
void Gu::Box::create(const Gu::Capsule& capsule)
{
// Box center = center of the two LSS's endpoints
center = capsule.computeCenter();
// Box orientation
const PxVec3 dir = capsule.p1 - capsule.p0;
const float d = dir.magnitude();
if(d!=0.0f)
{
rot.column0 = dir / d;
PxComputeBasisVectors(rot.column0, rot.column1, rot.column2);
}
else
rot = PxMat33(PxIdentity);
// Box extents
extents.x = capsule.radius + (d * 0.5f);
extents.y = capsule.radius;
extents.z = capsule.radius;
}
/**
Returns edges.
\return 24 indices (12 edges) indexing the list returned by ComputePoints()
*/
const PxU8* Gu::getBoxEdges()
{
// 7+------+6 0 = ---
// /| /| 1 = +--
// / | / | 2 = ++-
// / 4+---/--+5 3 = -+-
// 3+------+2 / y z 4 = --+
// | / | / | / 5 = +-+
// |/ |/ |/ 6 = +++
// 0+------+1 *---x 7 = -++
static PxU8 Indices[] = {
0, 1, 1, 2, 2, 3, 3, 0,
7, 6, 6, 5, 5, 4, 4, 7,
1, 5, 6, 2,
3, 7, 4, 0
};
return Indices;
}
void Gu::computeOBBPoints(PxVec3* PX_RESTRICT pts, const PxVec3& center, const PxVec3& extents, const PxVec3& base0, const PxVec3& base1, const PxVec3& base2)
{
PX_ASSERT(pts);
// "Rotated extents"
const PxVec3 axis0 = base0 * extents.x;
const PxVec3 axis1 = base1 * extents.y;
const PxVec3 axis2 = base2 * extents.z;
// 7+------+6 0 = ---
// /| /| 1 = +--
// / | / | 2 = ++-
// / 4+---/--+5 3 = -+-
// 3+------+2 / y z 4 = --+
// | / | / | / 5 = +-+
// |/ |/ |/ 6 = +++
// 0+------+1 *---x 7 = -++
// Original code: 24 vector ops
/* pts[0] = box.center - Axis0 - Axis1 - Axis2;
pts[1] = box.center + Axis0 - Axis1 - Axis2;
pts[2] = box.center + Axis0 + Axis1 - Axis2;
pts[3] = box.center - Axis0 + Axis1 - Axis2;
pts[4] = box.center - Axis0 - Axis1 + Axis2;
pts[5] = box.center + Axis0 - Axis1 + Axis2;
pts[6] = box.center + Axis0 + Axis1 + Axis2;
pts[7] = box.center - Axis0 + Axis1 + Axis2;*/
// Rewritten: 12 vector ops
pts[0] = pts[3] = pts[4] = pts[7] = center - axis0;
pts[1] = pts[2] = pts[5] = pts[6] = center + axis0;
PxVec3 tmp = axis1 + axis2;
pts[0] -= tmp;
pts[1] -= tmp;
pts[6] += tmp;
pts[7] += tmp;
tmp = axis1 - axis2;
pts[2] += tmp;
pts[3] += tmp;
pts[4] -= tmp;
pts[5] -= tmp;
}
| 4,125 | C++ | 31.234375 | 158 | 0.620848 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuAABBTreeQuery.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_AABBTREEQUERY_H
#define GU_AABBTREEQUERY_H
#include "GuBVHTestsSIMD.h"
#include "GuAABBTreeBounds.h"
#include "foundation/PxInlineArray.h"
#include "GuAABBTreeNode.h"
namespace physx
{
namespace Gu
{
#define RAW_TRAVERSAL_STACK_SIZE 256
//////////////////////////////////////////////////////////////////////////
static PX_FORCE_INLINE void getBoundsTimesTwo(Vec4V& center, Vec4V& extents, const PxBounds3* bounds, PxU32 poolIndex)
{
const PxBounds3* objectBounds = bounds + poolIndex;
// PT: it's safe to V4LoadU because the pointer comes from the AABBTreeBounds class
const Vec4V minV = V4LoadU(&objectBounds->minimum.x);
const Vec4V maxV = V4LoadU(&objectBounds->maximum.x);
center = V4Add(maxV, minV);
extents = V4Sub(maxV, minV);
}
//////////////////////////////////////////////////////////////////////////
template<const bool tHasIndices, typename Test, typename Node, typename QueryCallback>
static PX_FORCE_INLINE bool doOverlapLeafTest(const Test& test, const Node* node, const PxBounds3* bounds, const PxU32* indices, QueryCallback& visitor)
{
PxU32 nbPrims = node->getNbPrimitives();
const bool doBoxTest = nbPrims > 1;
const PxU32* prims = tHasIndices ? node->getPrimitives(indices) : NULL;
while(nbPrims--)
{
const PxU32 primIndex = tHasIndices ? *prims++ : node->getPrimitiveIndex();
if(doBoxTest)
{
Vec4V center2, extents2;
getBoundsTimesTwo(center2, extents2, bounds, primIndex);
const float half = 0.5f;
const FloatV halfV = FLoad(half);
const Vec4V extents_ = V4Scale(extents2, halfV);
const Vec4V center_ = V4Scale(center2, halfV);
if(!test(Vec3V_From_Vec4V(center_), Vec3V_From_Vec4V(extents_)))
continue;
}
if(!visitor.invoke(primIndex))
return false;
}
return true;
}
template<const bool tHasIndices, typename Test, typename Tree, typename Node, typename QueryCallback>
class AABBTreeOverlap
{
public:
bool operator()(const AABBTreeBounds& treeBounds, const Tree& tree, const Test& test, QueryCallback& visitor)
{
const PxBounds3* bounds = treeBounds.getBounds();
PxInlineArray<const Node*, RAW_TRAVERSAL_STACK_SIZE> stack;
stack.forceSize_Unsafe(RAW_TRAVERSAL_STACK_SIZE);
const Node* const nodeBase = tree.getNodes();
stack[0] = nodeBase;
PxU32 stackIndex = 1;
while(stackIndex > 0)
{
const Node* node = stack[--stackIndex];
Vec3V center, extents;
node->getAABBCenterExtentsV(¢er, &extents);
while(test(center, extents))
{
if(node->isLeaf())
{
if(!doOverlapLeafTest<tHasIndices, Test, Node>(test, node, bounds, tree.getIndices(), visitor))
return false;
break;
}
const Node* children = node->getPos(nodeBase);
node = children;
stack[stackIndex++] = children + 1;
if(stackIndex == stack.capacity())
stack.resizeUninitialized(stack.capacity() * 2);
node->getAABBCenterExtentsV(¢er, &extents);
}
}
return true;
}
};
//////////////////////////////////////////////////////////////////////////
template <const bool tInflate, const bool tHasIndices, typename Node, typename QueryCallback> // use inflate=true for sweeps, inflate=false for raycasts
static PX_FORCE_INLINE bool doLeafTest( const Node* node, Gu::RayAABBTest& test, const PxBounds3* bounds, const PxU32* indices, PxReal& maxDist, QueryCallback& pcb)
{
PxU32 nbPrims = node->getNbPrimitives();
const bool doBoxTest = nbPrims > 1;
const PxU32* prims = tHasIndices ? node->getPrimitives(indices) : NULL;
while(nbPrims--)
{
const PxU32 primIndex = tHasIndices ? *prims++ : node->getPrimitiveIndex();
if(doBoxTest)
{
Vec4V center_, extents_;
getBoundsTimesTwo(center_, extents_, bounds, primIndex);
if(!test.check<tInflate>(Vec3V_From_Vec4V(center_), Vec3V_From_Vec4V(extents_)))
continue;
}
// PT:
// - 'maxDist' is the current best distance. It can be seen as a "maximum allowed distance" (as passed to the
// template by users initially) but also as the "current minimum impact distance", so the name is misleading.
// Either way this is where we write & communicate the final/best impact distance to users.
//
// - the invoke function also takes a distance parameter, and this one is in/out. In input we must pass the
// current best distance to the leaf node, so that subsequent leaf-level queries can cull things away as
// much as possible. In output users return a shrunk distance value if they found a hit. We need to pass a
// copy of 'maxDist' ('md') since it would be too dangerous to rely on the arbitrary user code to always do
// the right thing. In particular if we'd pass 'maxDist' to invoke directly, and the called code would NOT
// respect the passed max value, it could potentially return a hit further than the best 'maxDist'. At which
// point the '(md < oldMaxDist)' test would fail but the damage would have already been done ('maxDist' would
// have already been overwritten with a larger value than before). Hence, we need 'md'.
//
// - now 'oldMaxDist' however is more subtle. In theory we wouldn't need it and we could just use '(md < maxDist)'
// in the test below. But that opens the door to subtle bugs: 'maxDist' is a reference to some value somewhere
// in the user's code, and we call the same user in invoke. It turns out that the invoke code can access and
// modify 'maxDist' on their side, even if we do not pass it to invoke. It's basically the same problem as
// before, but much more difficult to see. It does happen with the current PhysX implementations of the invoke
// functions: they modify the 'md' that we send them, but *also* 'maxDist' without the code below knowing
// about it. So the subsequent test fails again because md == maxDist. A potential solution would have been to
// work on a local copy of 'maxDist' in operator(), only writing out the final distance when returning from the
// function. Another solution used below is to introduce that local copy just here in the leaf code: that's
// where 'oldMaxDist' comes from.
PxReal oldMaxDist = maxDist;
PxReal md = maxDist;
if(!pcb.invoke(md, primIndex))
return false;
if(md < oldMaxDist)
{
maxDist = md;
test.setDistance(md);
}
}
return true;
}
//////////////////////////////////////////////////////////////////////////
template <const bool tInflate, const bool tHasIndices, typename Tree, typename Node, typename QueryCallback> // use inflate=true for sweeps, inflate=false for raycasts
class AABBTreeRaycast
{
public:
bool operator()(
const AABBTreeBounds& treeBounds, const Tree& tree,
const PxVec3& origin, const PxVec3& unitDir, PxReal& maxDist, const PxVec3& inflation,
QueryCallback& pcb)
{
const PxBounds3* bounds = treeBounds.getBounds();
// PT: we will pass center*2 and extents*2 to the ray-box code, to save some work per-box
// So we initialize the test with values multiplied by 2 as well, to get correct results
Gu::RayAABBTest test(origin*2.0f, unitDir*2.0f, maxDist, inflation*2.0f);
PxInlineArray<const Node*, RAW_TRAVERSAL_STACK_SIZE> stack;
stack.forceSize_Unsafe(RAW_TRAVERSAL_STACK_SIZE);
const Node* const nodeBase = tree.getNodes();
stack[0] = nodeBase;
PxU32 stackIndex = 1;
while(stackIndex--)
{
const Node* node = stack[stackIndex];
Vec3V center, extents;
node->getAABBCenterExtentsV2(¢er, &extents);
if(test.check<tInflate>(center, extents)) // TODO: try timestamp ray shortening to skip this
{
while(!node->isLeaf())
{
const Node* children = node->getPos(nodeBase);
Vec3V c0, e0;
children[0].getAABBCenterExtentsV2(&c0, &e0);
const PxU32 b0 = test.check<tInflate>(c0, e0);
Vec3V c1, e1;
children[1].getAABBCenterExtentsV2(&c1, &e1);
const PxU32 b1 = test.check<tInflate>(c1, e1);
if(b0 && b1) // if both intersect, push the one with the further center on the stack for later
{
// & 1 because FAllGrtr behavior differs across platforms
const PxU32 bit = FAllGrtr(V3Dot(V3Sub(c1, c0), test.mDir), FZero()) & 1;
stack[stackIndex++] = children + bit;
node = children + (1 - bit);
if(stackIndex == stack.capacity())
stack.resizeUninitialized(stack.capacity() * 2);
}
else if(b0)
node = children;
else if(b1)
node = children + 1;
else
goto skip_leaf_code;
}
if(!doLeafTest<tInflate, tHasIndices, Node>(node, test, bounds, tree.getIndices(), maxDist, pcb))
return false;
skip_leaf_code:;
}
}
return true;
}
};
struct TraversalControl
{
enum Enum {
eDontGoDeeper,
eGoDeeper,
eGoDeeperNegFirst,
eAbort
};
};
template<typename T>
void traverseBVH(const Gu::BVHNode* nodes, T& traversalController, PxI32 rootNodeIndex = 0)
{
PxI32 index = rootNodeIndex;
PxInlineArray<PxI32, RAW_TRAVERSAL_STACK_SIZE> todoStack;
while (true)
{
const Gu::BVHNode& a = nodes[index];
TraversalControl::Enum control = traversalController.analyze(a, index);
if (control == TraversalControl::eAbort)
return;
if (!a.isLeaf() && (control == TraversalControl::eGoDeeper || control == TraversalControl::eGoDeeperNegFirst))
{
if (control == TraversalControl::eGoDeeperNegFirst)
{
todoStack.pushBack(a.getPosIndex());
index = a.getNegIndex(); //index gets processed next - assign negative index to it
}
else
{
todoStack.pushBack(a.getNegIndex());
index = a.getPosIndex(); //index gets processed next - assign positive index to it
}
continue;
}
if (todoStack.empty()) break;
index = todoStack.popBack();
}
}
}
}
#endif // SQ_AABBTREEQUERY_H
| 11,774 | C | 37.733553 | 169 | 0.667658 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuSweepTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geometry/PxSphereGeometry.h"
#include "geometry/PxCustomGeometry.h"
#include "GuSweepTests.h"
#include "GuVecCapsule.h"
#include "GuVecBox.h"
#include "GuVecTriangle.h"
#include "GuSweepTriangleUtils.h"
#include "GuInternal.h"
#include "GuGJKRaycast.h"
using namespace physx;
using namespace Gu;
using namespace Cm;
using namespace physx::aos;
//#define USE_VIRTUAL_GJK
#ifdef USE_VIRTUAL_GJK
static bool virtualGjkRaycastPenetration(const GjkConvex& a, const GjkConvex& b, const aos::Vec3VArg initialDir, const aos::FloatVArg initialLambda, const aos::Vec3VArg s, const aos::Vec3VArg r, aos::FloatV& lambda,
aos::Vec3V& normal, aos::Vec3V& closestA, const PxReal _inflation, const bool initialOverlap)
{
return gjkRaycastPenetration<GjkConvex, GjkConvex >(a, b, initialDir, initialLambda, s, r, lambda, normal, closestA, _inflation, initialOverlap);
}
#endif
bool sweepCapsule_BoxGeom(GU_CAPSULE_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(hitFlags);
PX_UNUSED(threadContext);
using namespace aos;
PX_ASSERT(geom.getType() == PxGeometryType::eBOX);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom);
const FloatV zero = FZero();
const Vec3V zeroV = V3Zero();
const Vec3V boxExtents0 = V3LoadU(boxGeom.halfExtents);
const FloatV dist = FLoad(distance);
const Vec3V worldDir = V3LoadU(unitDir);
const PxTransformV capPos = loadTransformU(capsulePose_);
const PxTransformV boxPos = loadTransformU(pose);
const PxMatTransformV aToB(boxPos.transformInv(capPos));
const FloatV capsuleHalfHeight = FLoad(capsuleGeom_.halfHeight);
const FloatV capsuleRadius = FLoad(lss.radius);
BoxV box(zeroV, boxExtents0);
CapsuleV capsule(aToB.p, aToB.rotate(V3Scale(V3UnitX(), capsuleHalfHeight)), capsuleRadius);
const Vec3V dir = boxPos.rotateInv(V3Neg(V3Scale(worldDir, dist)));
const bool isMtd = hitFlags & PxHitFlag::eMTD;
FloatV toi = FMax();
Vec3V closestA, normal;//closestA and normal is in the local space of box
const LocalConvex<CapsuleV> convexA(capsule);
const LocalConvex<BoxV> convexB(box);
const Vec3V initialSearchDir = V3Sub(capsule.getCenter(), box.getCenter());
#ifdef USE_VIRTUAL_GJK
if(!virtualGjkRaycastPenetration(convexA, convexB, initialSearchDir, zero, zeroV, dir, toi, normal, closestA, lss.radius + inflation, isMtd))
return false;
#else
if(!gjkRaycastPenetration<LocalConvex<CapsuleV>, LocalConvex<BoxV> >(convexA, convexB, initialSearchDir, zero, zeroV, dir, toi, normal, closestA, lss.radius + inflation, isMtd))
return false;
#endif
sweepHit.flags = PxHitFlag::eNORMAL;
if(FAllGrtrOrEq(zero, toi))
{
//initial overlap
if(isMtd)
{
sweepHit.flags |= PxHitFlag::ePOSITION;
const Vec3V worldPointA = boxPos.transform(closestA);
const Vec3V destNormal = boxPos.rotate(normal);
const FloatV length = toi;
const Vec3V destWorldPointA = V3NegScaleSub(destNormal, length, worldPointA);
V3StoreU(destWorldPointA, sweepHit.position);
V3StoreU(destNormal, sweepHit.normal);
FStore(length, &sweepHit.distance);
}
else
{
sweepHit.distance = 0.0f;
sweepHit.normal = -unitDir;
}
}
else
{
sweepHit.flags |= PxHitFlag::ePOSITION;
const Vec3V worldPointA = boxPos.transform(closestA);
const Vec3V destNormal = boxPos.rotate(normal);
const FloatV length = FMul(dist, toi);
const Vec3V destWorldPointA = V3ScaleAdd(worldDir, length, worldPointA);
V3StoreU(destNormal, sweepHit.normal);
V3StoreU(destWorldPointA, sweepHit.position);
FStore(length, &sweepHit.distance);
}
return true;
}
bool sweepBox_SphereGeom(GU_BOX_SWEEP_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eSPHERE);
PX_UNUSED(threadContext);
PX_UNUSED(hitFlags);
PX_UNUSED(boxGeom_);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom);
const FloatV zero = FZero();
const Vec3V zeroV = V3Zero();
const Vec3V boxExtents = V3LoadU(box.extents);
const FloatV worldDist = FLoad(distance);
const Vec3V unitDirV = V3LoadU(unitDir);
const FloatV sphereRadius = FLoad(sphereGeom.radius);
const PxTransformV spherePos = loadTransformU(pose);
const PxTransformV boxPos = loadTransformU(boxPose_);
const PxMatTransformV aToB(boxPos.transformInv(spherePos));
const BoxV boxV(zeroV, boxExtents);
const CapsuleV capsuleV(aToB.p, sphereRadius);
//transform into b space
const Vec3V dir = boxPos.rotateInv(V3Scale(unitDirV, worldDist));
const bool isMtd = hitFlags & PxHitFlag::eMTD;
FloatV toi;
Vec3V closestA, normal;//closestA and normal is in the local space of box
const Vec3V initialSearchDir = V3Sub(capsuleV.getCenter(), boxV.getCenter());
const LocalConvex<CapsuleV> convexA(capsuleV);
const LocalConvex<BoxV> convexB(boxV);
#ifdef USE_VIRTUAL_GJK
if(!virtualGjkRaycastPenetration(convexA, convexB, initialSearchDir, zero, zeroV, dir, toi, normal, closestA, sphereGeom.radius+inflation, isMtd))
return false;
#else
if(!gjkRaycastPenetration<LocalConvex<CapsuleV>, LocalConvex<BoxV> >(convexA, convexB, initialSearchDir, zero, zeroV, dir, toi, normal, closestA, sphereGeom.radius+inflation, isMtd))
return false;
#endif
sweepHit.flags = PxHitFlag::eNORMAL;
//initial overlap
if(FAllGrtrOrEq(zero, toi))
{
if(isMtd)
{
sweepHit.flags |= PxHitFlag::ePOSITION;
const Vec3V destWorldPointA = boxPos.transform(closestA);
const Vec3V destNormal = V3Neg(boxPos.rotate(normal));
const FloatV length = toi;
V3StoreU(destNormal, sweepHit.normal);
V3StoreU(destWorldPointA, sweepHit.position);
FStore(length, &sweepHit.distance);
}
else
{
sweepHit.distance = 0.0f;
sweepHit.normal = -unitDir;
}
}
else
{
sweepHit.flags |= PxHitFlag::ePOSITION;
const Vec3V destWorldPointA = boxPos.transform(closestA);
const Vec3V destNormal = V3Neg(boxPos.rotate(normal));
const FloatV length = FMul(worldDist, toi);
V3StoreU(destNormal, sweepHit.normal);
V3StoreU(destWorldPointA, sweepHit.position);
FStore(length, &sweepHit.distance);
}
return true;
}
bool sweepBox_CapsuleGeom(GU_BOX_SWEEP_FUNC_PARAMS)
{
using namespace aos;
PX_ASSERT(geom.getType() == PxGeometryType::eCAPSULE);
PX_UNUSED(threadContext);
PX_UNUSED(hitFlags);
PX_UNUSED(boxGeom_);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom);
const FloatV capsuleHalfHeight = FLoad(capsuleGeom.halfHeight);
const FloatV capsuleRadius = FLoad(capsuleGeom.radius);
const FloatV zero = FZero();
const Vec3V zeroV = V3Zero();
const Vec3V boxExtents = V3LoadU(box.extents);
const FloatV worldDist = FLoad(distance);
const Vec3V unitDirV = V3LoadU(unitDir);
const PxTransformV capPos = loadTransformU(pose);
const PxTransformV boxPos = loadTransformU(boxPose_);
const PxMatTransformV aToB(boxPos.transformInv(capPos));
const BoxV boxV(zeroV, boxExtents);
const CapsuleV capsuleV(aToB.p, aToB.rotate(V3Scale(V3UnitX(), capsuleHalfHeight)), capsuleRadius);
//transform into b space
const Vec3V dir = boxPos.rotateInv(V3Scale(unitDirV, worldDist));
const bool isMtd = hitFlags & PxHitFlag::eMTD;
FloatV toi;
Vec3V closestA, normal;//closestA and normal is in the local space of box
const Vec3V initialSearchDir = V3Sub(capsuleV.getCenter(), boxV.getCenter());
const LocalConvex<CapsuleV> convexA(capsuleV);
const LocalConvex<BoxV> convexB(boxV);
#ifdef USE_VIRTUAL_GJK
if(!virtualGjkRaycastPenetration(convexA, convexB, initialSearchDir, zero, zeroV, dir, toi, normal, closestA, capsuleGeom.radius+inflation, isMtd))
return false;
#else
if(!gjkRaycastPenetration<LocalConvex<CapsuleV>, LocalConvex<BoxV> >(convexA, convexB, initialSearchDir, zero, zeroV, dir, toi, normal, closestA, capsuleGeom.radius+inflation, isMtd))
return false;
#endif
sweepHit.flags = PxHitFlag::eNORMAL;
//initial overlap
if(FAllGrtrOrEq(zero, toi))
{
if(isMtd)
{
sweepHit.flags |= PxHitFlag::ePOSITION;
//initial overlap is toi < 0
const FloatV length = toi;
const Vec3V destWorldPointA = boxPos.transform(closestA);
const Vec3V destNormal = boxPos.rotate(normal);
V3StoreU(V3Neg(destNormal), sweepHit.normal);
V3StoreU(destWorldPointA, sweepHit.position);
FStore(length, &sweepHit.distance);
}
else
{
sweepHit.distance = 0.0f;
sweepHit.normal = -unitDir;
}
return true;
}
else
{
sweepHit.flags |= PxHitFlag::ePOSITION;
const Vec3V destWorldPointA = boxPos.transform(closestA);
const Vec3V destNormal = boxPos.rotate(normal);
const FloatV length = FMul(worldDist, toi);
V3StoreU(V3Neg(destNormal), sweepHit.normal);
V3StoreU(destWorldPointA, sweepHit.position);
FStore(length, &sweepHit.distance);
}
return true;
}
bool sweepBox_BoxGeom(GU_BOX_SWEEP_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eBOX);
PX_UNUSED(threadContext);
PX_UNUSED(boxGeom_);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom);
const FloatV zero = FZero();
const Vec3V zeroV = V3Zero();
const Vec3V boxExtents0 = V3LoadU(boxGeom.halfExtents);
const Vec3V boxExtents1 = V3LoadU(box.extents);
const FloatV worldDist = FLoad(distance);
const Vec3V unitDirV = V3LoadU(unitDir);
const PxTransformV boxTrans0 = loadTransformU(pose);
const PxTransformV boxTrans1 = loadTransformU(boxPose_);
const PxMatTransformV aToB(boxTrans1.transformInv(boxTrans0));
const BoxV box0(zeroV, boxExtents0);
const BoxV box1(zeroV, boxExtents1);
//transform into b space
const Vec3V dir = boxTrans1.rotateInv(V3Scale(unitDirV, worldDist));
const bool isMtd = hitFlags & PxHitFlag::eMTD;
FloatV toi;
Vec3V closestA, normal;//closestA and normal is in the local space of box
const RelativeConvex<BoxV> convexA(box0, aToB);
const LocalConvex<BoxV> convexB(box1);
#ifdef USE_VIRTUAL_GJK
if(!virtualGjkRaycastPenetration(convexA, convexB, aToB.p, zero, zeroV, dir, toi, normal, closestA, inflation, isMtd))
return false;
#else
if(!gjkRaycastPenetration<RelativeConvex<BoxV>, LocalConvex<BoxV> >(convexA, convexB, aToB.p, zero, zeroV, dir, toi, normal, closestA, inflation, isMtd))
return false;
#endif
sweepHit.flags = PxHitFlag::eNORMAL;
if(FAllGrtrOrEq(zero, toi))
{
if(isMtd)
{
sweepHit.flags |= PxHitFlag::ePOSITION;
const FloatV length = toi;
const Vec3V destWorldPointA = boxTrans1.transform(closestA);
const Vec3V destNormal = V3Normalize(boxTrans1.rotate(normal));
V3StoreU(V3Neg(destNormal), sweepHit.normal);
V3StoreU(destWorldPointA, sweepHit.position);
FStore(length, &sweepHit.distance);
}
else
{
sweepHit.distance = 0.0f;
sweepHit.normal = -unitDir;
}
}
else
{
sweepHit.flags |= PxHitFlag::ePOSITION;
const Vec3V destWorldPointA = boxTrans1.transform(closestA);
const Vec3V destNormal = V3Normalize(boxTrans1.rotate(normal));
const FloatV length = FMul(worldDist, toi);
V3StoreU(V3Neg(destNormal), sweepHit.normal);
V3StoreU(destWorldPointA, sweepHit.position);
FStore(length, &sweepHit.distance);
}
return true;
}
bool Gu::sweepBoxTriangles(GU_SWEEP_TRIANGLES_FUNC_PARAMS(PxBoxGeometry))
{
PX_UNUSED(hitFlags);
if(!nbTris)
return false;
const bool meshBothSides = hitFlags & PxHitFlag::eMESH_BOTH_SIDES;
const bool doBackfaceCulling = !doubleSided && !meshBothSides;
Box box;
buildFrom(box, pose.p, geom.halfExtents, pose.q);
PxGeomSweepHit sweepHit;
// Move to AABB space
PxMat34 worldToBox;
computeWorldToBoxMatrix(worldToBox, box);
const PxVec3 localDir = worldToBox.rotate(unitDir);
const PxVec3 localMotion = localDir * distance;
const Vec3V base0 = V3LoadU(worldToBox.m.column0);
const Vec3V base1 = V3LoadU(worldToBox.m.column1);
const Vec3V base2 = V3LoadU(worldToBox.m.column2);
const Mat33V matV(base0, base1, base2);
const Vec3V p = V3LoadU(worldToBox.p);
const PxMatTransformV worldToBoxV(p, matV);
const FloatV zero = FZero();
const Vec3V zeroV = V3Zero();
const Vec3V boxExtents = V3LoadU(box.extents);
const Vec3V boxDir = V3LoadU(localDir);
const FloatV inflationV = FLoad(inflation);
const Vec3V absBoxDir = V3Abs(boxDir);
const FloatV boxRadiusV = FAdd(V3Dot(absBoxDir, boxExtents), inflationV);
BoxV boxV(zeroV, boxExtents);
#if PX_DEBUG
PxU32 totalTestsExpected = nbTris;
PxU32 totalTestsReal = 0;
PX_UNUSED(totalTestsExpected);
PX_UNUSED(totalTestsReal);
#endif
Vec3V boxLocalMotion = V3LoadU(localMotion);
Vec3V minClosestA = zeroV, minNormal = zeroV;
PxU32 minTriangleIndex = 0;
PxVec3 bestTriNormal(0.0f);
FloatV dist = FLoad(distance);
const PxTransformV boxPos = loadTransformU(pose);
bool status = false;
const PxU32 idx = cachedIndex ? *cachedIndex : 0;
for(PxU32 ii=0;ii<nbTris;ii++)
{
const PxU32 triangleIndex = getTriangleIndex(ii, idx);
const Vec3V localV0 = V3LoadU(triangles[triangleIndex].verts[0]);
const Vec3V localV1 = V3LoadU(triangles[triangleIndex].verts[1]);
const Vec3V localV2 = V3LoadU(triangles[triangleIndex].verts[2]);
const Vec3V triV0 = worldToBoxV.transform(localV0);
const Vec3V triV1 = worldToBoxV.transform(localV1);
const Vec3V triV2 = worldToBoxV.transform(localV2);
const Vec3V triNormal = V3Cross(V3Sub(triV2, triV1),V3Sub(triV0, triV1));
if(doBackfaceCulling && FAllGrtrOrEq(V3Dot(triNormal, boxLocalMotion), zero)) // backface culling
continue;
const FloatV dp0 = V3Dot(triV0, boxDir);
const FloatV dp1 = V3Dot(triV1, boxDir);
const FloatV dp2 = V3Dot(triV2, boxDir);
const FloatV dp = FMin(dp0, FMin(dp1, dp2));
const Vec3V dpV = V3Merge(dp0, dp1, dp2);
const FloatV temp1 = FAdd(boxRadiusV, dist);
const BoolV con0 = FIsGrtr(dp, temp1);
const BoolV con1 = V3IsGrtr(zeroV, dpV);
if(BAllEqTTTT(BOr(con0, con1)))
continue;
#if PX_DEBUG
totalTestsReal++;
#endif
TriangleV triangleV(triV0, triV1, triV2);
FloatV lambda;
Vec3V closestA, normal;//closestA and normal is in the local space of convex hull
const LocalConvex<TriangleV> convexA(triangleV);
const LocalConvex<BoxV> convexB(boxV);
const Vec3V initialSearchDir = V3Sub(triangleV.getCenter(), boxV.getCenter());
#ifdef USE_VIRTUAL_GJK
if(virtualGjkRaycastPenetration(convexA, convexB, initialSearchDir, zero, zeroV, boxLocalMotion, lambda, normal, closestA, inflation, false))
#else
if(gjkRaycastPenetration<LocalConvex<TriangleV>, LocalConvex<BoxV> >(convexA, convexB, initialSearchDir, zero, zeroV, boxLocalMotion, lambda, normal, closestA, inflation, false))
#endif
{
//hitCount++;
if(FAllGrtrOrEq(zero, lambda))
{
hit.distance = 0.0f;
hit.faceIndex = triangleIndex;
hit.normal = -unitDir;
hit.flags = PxHitFlag::eNORMAL;
return true;
}
dist = FMul(dist, lambda);
boxLocalMotion = V3Scale(boxDir, dist);
minClosestA = closestA;
minNormal = normal;
minTriangleIndex = triangleIndex;
V3StoreU(triNormal, bestTriNormal);
status = true;
if(hitFlags & PxHitFlag::eMESH_ANY)
break;
}
}
if(!status)
return false;
hit.faceIndex = minTriangleIndex;
const Vec3V destNormal = V3Neg(V3Normalize(boxPos.rotate(minNormal)));
const Vec3V destWorldPointA = boxPos.transform(minClosestA);
V3StoreU(destNormal, hit.normal);
V3StoreU(destWorldPointA, hit.position);
FStore(dist, &hit.distance);
// PT: by design, returned normal is opposed to the sweep direction.
if(shouldFlipNormal(hit.normal, meshBothSides, doubleSided, bestTriNormal, unitDir))
hit.normal = -hit.normal;
hit.flags = PxHitFlag::ePOSITION|PxHitFlag::eNORMAL;
return true;
}
bool sweepCapsule_SphereGeom (GU_CAPSULE_SWEEP_FUNC_PARAMS);
bool sweepCapsule_PlaneGeom (GU_CAPSULE_SWEEP_FUNC_PARAMS);
bool sweepCapsule_CapsuleGeom (GU_CAPSULE_SWEEP_FUNC_PARAMS);
bool sweepCapsule_BoxGeom (GU_CAPSULE_SWEEP_FUNC_PARAMS);
bool sweepCapsule_BoxGeom_Precise (GU_CAPSULE_SWEEP_FUNC_PARAMS);
bool sweepCapsule_ConvexGeom (GU_CAPSULE_SWEEP_FUNC_PARAMS);
bool sweepCapsule_MeshGeom (GU_CAPSULE_SWEEP_FUNC_PARAMS);
bool sweepCapsule_HeightFieldGeom (GU_CAPSULE_SWEEP_FUNC_PARAMS);
bool sweepBox_SphereGeom (GU_BOX_SWEEP_FUNC_PARAMS);
bool sweepBox_SphereGeom_Precise (GU_BOX_SWEEP_FUNC_PARAMS);
bool sweepBox_PlaneGeom (GU_BOX_SWEEP_FUNC_PARAMS);
bool sweepBox_CapsuleGeom (GU_BOX_SWEEP_FUNC_PARAMS);
bool sweepBox_CapsuleGeom_Precise (GU_BOX_SWEEP_FUNC_PARAMS);
bool sweepBox_BoxGeom (GU_BOX_SWEEP_FUNC_PARAMS);
bool sweepBox_BoxGeom_Precise (GU_BOX_SWEEP_FUNC_PARAMS);
bool sweepBox_ConvexGeom (GU_BOX_SWEEP_FUNC_PARAMS);
bool sweepBox_MeshGeom (GU_BOX_SWEEP_FUNC_PARAMS);
bool sweepBox_HeightFieldGeom (GU_BOX_SWEEP_FUNC_PARAMS);
bool sweepBox_HeightFieldGeom_Precise(GU_BOX_SWEEP_FUNC_PARAMS);
bool sweepConvex_SphereGeom (GU_CONVEX_SWEEP_FUNC_PARAMS);
bool sweepConvex_PlaneGeom (GU_CONVEX_SWEEP_FUNC_PARAMS);
bool sweepConvex_CapsuleGeom (GU_CONVEX_SWEEP_FUNC_PARAMS);
bool sweepConvex_BoxGeom (GU_CONVEX_SWEEP_FUNC_PARAMS);
bool sweepConvex_ConvexGeom (GU_CONVEX_SWEEP_FUNC_PARAMS);
bool sweepConvex_MeshGeom (GU_CONVEX_SWEEP_FUNC_PARAMS);
bool sweepConvex_HeightFieldGeom (GU_CONVEX_SWEEP_FUNC_PARAMS);
static bool sweepCapsule_InvalidGeom(GU_CAPSULE_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_UNUSED(capsuleGeom_);
PX_UNUSED(capsulePose_);
PX_UNUSED(geom);
PX_UNUSED(pose);
PX_UNUSED(lss);
PX_UNUSED(unitDir);
PX_UNUSED(distance);
PX_UNUSED(sweepHit);
PX_UNUSED(hitFlags);
PX_UNUSED(inflation);
return false;
}
static bool sweepBox_InvalidGeom(GU_BOX_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_UNUSED(boxPose_);
PX_UNUSED(boxGeom_);
PX_UNUSED(geom);
PX_UNUSED(pose);
PX_UNUSED(box);
PX_UNUSED(unitDir);
PX_UNUSED(distance);
PX_UNUSED(sweepHit);
PX_UNUSED(hitFlags);
PX_UNUSED(inflation);
return false;
}
static bool sweepConvex_InvalidGeom(GU_CONVEX_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_UNUSED(geom);
PX_UNUSED(pose);
PX_UNUSED(convexGeom);
PX_UNUSED(convexPose);
PX_UNUSED(unitDir);
PX_UNUSED(distance);
PX_UNUSED(sweepHit);
PX_UNUSED(hitFlags);
PX_UNUSED(inflation);
return false;
}
static bool sweepCapsule_CustomGeom(GU_CAPSULE_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(lss);
if(geom.getType() == PxGeometryType::eCUSTOM)
return static_cast<const PxCustomGeometry&>(geom).callbacks->sweep(unitDir, distance, geom, pose, capsuleGeom_, capsulePose_, sweepHit, hitFlags, inflation, threadContext);
return false;
}
static bool sweepBox_CustomGeom(GU_BOX_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(box);
if(geom.getType() == PxGeometryType::eCUSTOM)
return static_cast<const PxCustomGeometry&>(geom).callbacks->sweep(unitDir, distance, geom, pose, boxGeom_, boxPose_, sweepHit, hitFlags, inflation, threadContext);
return false;
}
static bool sweepConvex_CustomGeom(GU_CONVEX_SWEEP_FUNC_PARAMS)
{
if(geom.getType() == PxGeometryType::eCUSTOM)
return static_cast<const PxCustomGeometry&>(geom).callbacks->sweep(unitDir, distance, geom, pose, convexGeom, convexPose, sweepHit, hitFlags, inflation, threadContext);
return false;
}
Gu::GeomSweepFuncs gGeomSweepFuncs =
{
{
sweepCapsule_SphereGeom,
sweepCapsule_PlaneGeom,
sweepCapsule_CapsuleGeom,
sweepCapsule_BoxGeom,
sweepCapsule_ConvexGeom,
sweepCapsule_InvalidGeom,
sweepCapsule_InvalidGeom,
sweepCapsule_MeshGeom,
sweepCapsule_HeightFieldGeom,
sweepCapsule_InvalidGeom,
sweepCapsule_CustomGeom
},
{
sweepCapsule_SphereGeom,
sweepCapsule_PlaneGeom,
sweepCapsule_CapsuleGeom,
sweepCapsule_BoxGeom_Precise,
sweepCapsule_ConvexGeom,
sweepCapsule_InvalidGeom,
sweepCapsule_InvalidGeom,
sweepCapsule_MeshGeom ,
sweepCapsule_HeightFieldGeom,
sweepCapsule_InvalidGeom,
sweepCapsule_CustomGeom
},
{
sweepBox_SphereGeom,
sweepBox_PlaneGeom,
sweepBox_CapsuleGeom,
sweepBox_BoxGeom,
sweepBox_ConvexGeom,
sweepBox_InvalidGeom,
sweepBox_InvalidGeom,
sweepBox_MeshGeom,
sweepBox_HeightFieldGeom,
sweepBox_InvalidGeom,
sweepBox_CustomGeom
},
{
sweepBox_SphereGeom_Precise,
sweepBox_PlaneGeom,
sweepBox_CapsuleGeom_Precise,
sweepBox_BoxGeom_Precise,
sweepBox_ConvexGeom,
sweepBox_InvalidGeom,
sweepBox_InvalidGeom,
sweepBox_MeshGeom,
sweepBox_HeightFieldGeom_Precise,
sweepBox_InvalidGeom,
sweepBox_CustomGeom
},
{
sweepConvex_SphereGeom, // 0
sweepConvex_PlaneGeom, // 1
sweepConvex_CapsuleGeom, // 2
sweepConvex_BoxGeom, // 3
sweepConvex_ConvexGeom, // 4
sweepConvex_InvalidGeom, // 5
sweepConvex_InvalidGeom, // 6
sweepConvex_MeshGeom, // 7
sweepConvex_HeightFieldGeom,// 8
sweepConvex_InvalidGeom, // 9
sweepConvex_CustomGeom // 10
}
};
PX_PHYSX_COMMON_API const GeomSweepFuncs& Gu::getSweepFuncTable()
{
return gGeomSweepFuncs;
}
| 22,163 | C++ | 31.982143 | 216 | 0.753328 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuIncrementalAABBTree.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxMemory.h"
#include "foundation/PxVecMath.h"
#include "foundation/PxFPU.h"
#include "foundation/PxMathUtils.h"
#include "GuIncrementalAABBTree.h"
#include "GuAABBTreeBuildStats.h"
#include "GuAABBTreeNode.h"
#include "GuBVH.h"
using namespace physx;
using namespace aos;
using namespace Gu;
#define SUPPORT_TREE_ROTATION 1
#define DEALLOCATE_RESET 0
IncrementalAABBTree::IncrementalAABBTree():
mIndicesPool("AABBTreeIndicesPool", 256),
mNodesPool("AABBTreeNodesPool", 256 ),
mRoot(NULL)
{
}
IncrementalAABBTree::~IncrementalAABBTree()
{
release();
}
void IncrementalAABBTree::release()
{
if(mRoot)
{
releaseNode(mRoot);
mRoot = NULL;
}
}
void IncrementalAABBTree::releaseNode(IncrementalAABBTreeNode* node)
{
PX_ASSERT(node);
if(node->isLeaf())
{
mIndicesPool.deallocate(node->mIndices);
}
else
{
releaseNode(node->mChilds[0]);
releaseNode(node->mChilds[1]);
}
if(!node->mParent)
{
mNodesPool.deallocate(reinterpret_cast<IncrementalAABBTreeNodePair*>(node));
return;
}
if(node->mParent->mChilds[1] == node)
{
mNodesPool.deallocate(reinterpret_cast<IncrementalAABBTreeNodePair*>(node->mParent->mChilds[0]));
}
}
// check if node is inside the given bounds
PX_FORCE_INLINE static bool nodeInsideBounds(const Vec4V& nodeMin, const Vec4V& nodeMax, const Vec4V& parentMin, const Vec4V& parentMax)
{
return !(PxIntBool(V4AnyGrtr3(parentMin, nodeMin)) || PxIntBool(V4AnyGrtr3(nodeMax, parentMax)));
}
// update the node parent hierarchy, when insert happen, we can early exit when the node is inside its parent
// no further update is needed
PX_FORCE_INLINE static void updateHierarchyAfterInsert(IncrementalAABBTreeNode* node)
{
IncrementalAABBTreeNode* parent = node->mParent;
IncrementalAABBTreeNode* testNode = node;
while(parent)
{
// check if we can early exit
if(!nodeInsideBounds(testNode->mBVMin, testNode->mBVMax, parent->mBVMin, parent->mBVMax))
{
parent->mBVMin = V4Min(parent->mChilds[0]->mBVMin, parent->mChilds[1]->mBVMin);
parent->mBVMax = V4Max(parent->mChilds[0]->mBVMax, parent->mChilds[1]->mBVMax);
}
else
break;
testNode = parent;
parent = parent->mParent;
}
}
// add an index into the leaf indices list and update the node bounds
PX_FORCE_INLINE static void addPrimitiveIntoNode(IncrementalAABBTreeNode* node, const PoolIndex index, const Vec4V& minV, const Vec4V& maxV)
{
PX_ASSERT(node->isLeaf());
AABBTreeIndices& nodeIndices = *node->mIndices;
PX_ASSERT(nodeIndices.nbIndices < INCR_NB_OBJECTS_PER_NODE);
// store the new handle
nodeIndices.indices[nodeIndices.nbIndices++] = index;
// increase the node bounds
node->mBVMin = V4Min(node->mBVMin, minV);
node->mBVMax = V4Max(node->mBVMax, maxV);
updateHierarchyAfterInsert(node);
}
// check if node does intersect with given bounds
PX_FORCE_INLINE static bool nodeIntersection(IncrementalAABBTreeNode& node, const Vec4V& minV, const Vec4V& maxV)
{
return !(PxIntBool(V4AnyGrtr3(node.mBVMin, maxV)) || PxIntBool(V4AnyGrtr3(minV, node.mBVMax)));
}
// traversal strategy
PX_FORCE_INLINE static PxU32 traversalDirection(const IncrementalAABBTreeNode& child0, const IncrementalAABBTreeNode& child1, const Vec4V& testCenterV,
bool testRotation, bool& rotateNode, PxU32& largesRotateNode)
{
// traverse in the direction of a node which is closer
// we compare the node and object centers
const Vec4V centerCh0V = V4Add(child0.mBVMax, child0.mBVMin);
const Vec4V centerCh1V = V4Add(child1.mBVMax, child1.mBVMin);
const Vec4V ch0D = V4Sub(testCenterV, centerCh0V);
const Vec4V ch1D = V4Sub(testCenterV, centerCh1V);
if(testRotation)
{
// if some volume is 3x larger than we do a rotation
const float volumeCompare = 3.0f;
PX_ALIGN(16, PxVec4) sizeCh0;
PX_ALIGN(16, PxVec4) sizeCh1;
const Vec4V sizeCh0V = V4Sub(child0.mBVMax, child0.mBVMin);
const Vec4V sizeCh1V = V4Sub(child1.mBVMax, child1.mBVMin);
V4StoreA(sizeCh0V, &sizeCh0.x);
V4StoreA(sizeCh1V, &sizeCh1.x);
const float volumeCh0 = sizeCh0.x*sizeCh0.y*sizeCh0.z;
const float volumeCh1 = sizeCh1.x*sizeCh1.y*sizeCh1.z;
if((volumeCh0*volumeCompare < volumeCh1) || (volumeCh1*volumeCompare < volumeCh0))
{
largesRotateNode = (volumeCh0 > volumeCh1) ? 0u : 1u;
rotateNode = true;
}
}
const BoolV con = FIsGrtr(V4Dot3(ch0D, ch0D), V4Dot3(ch1D, ch1D));
return (BAllEqTTTT(con) == 1) ? PxU32(1) : PxU32(0);
}
// remove an index from the leaf
PX_FORCE_INLINE static void removePrimitiveFromNode(IncrementalAABBTreeNode* node, const PoolIndex index)
{
AABBTreeIndices& indices = *node->mIndices;
PX_ASSERT(indices.nbIndices > 1);
for (PxU32 i = indices.nbIndices; i--; )
{
if(node->mIndices->indices[i] == index)
{
node->mIndices->indices[i] = node->mIndices->indices[--indices.nbIndices];
return;
}
}
// if handle was not found something is wrong here
PX_ASSERT(0);
}
// check if bounds are equal with given node min/max
PX_FORCE_INLINE static bool boundsEqual(const Vec4V& testMin, const Vec4V& testMax, const Vec4V& nodeMin, const Vec4V& nodeMax)
{
return (PxIntBool(V4AllEq(nodeMin, testMin)) && PxIntBool(V4AllEq(testMax, nodeMax)));
}
// update the node hierarchy bounds when remove happen, we can early exit if the bounds are equal and no bounds update
// did happen
PX_FORCE_INLINE static void updateHierarchyAfterRemove(IncrementalAABBTreeNode* node, const PxBounds3* bounds)
{
if(node->isLeaf())
{
const AABBTreeIndices& indices = *node->mIndices;
PX_ASSERT(indices.nbIndices > 0);
Vec4V bvMin = V4LoadU(&bounds[indices.indices[0]].minimum.x);
Vec4V bvMax = V4LoadU(&bounds[indices.indices[0]].maximum.x);
for(PxU32 i = 1; i < indices.nbIndices; i++)
{
const Vec4V minV = V4LoadU(&bounds[indices.indices[i]].minimum.x);
const Vec4V maxV = V4LoadU(&bounds[indices.indices[i]].maximum.x);
bvMin = V4Min(bvMin, minV);
bvMax = V4Max(bvMax, maxV);
}
node->mBVMin = V4ClearW(bvMin);
node->mBVMax = V4ClearW(bvMax);
}
else
{
node->mBVMin = V4Min(node->mChilds[0]->mBVMin, node->mChilds[1]->mBVMin);
node->mBVMax = V4Max(node->mChilds[0]->mBVMax, node->mChilds[1]->mBVMax);
}
IncrementalAABBTreeNode* parent = node->mParent;
while(parent)
{
const Vec4V newMinV = V4Min(parent->mChilds[0]->mBVMin, parent->mChilds[1]->mBVMin);
const Vec4V newMaxV = V4Max(parent->mChilds[0]->mBVMax, parent->mChilds[1]->mBVMax);
const bool earlyExit = boundsEqual(newMinV, newMaxV, parent->mBVMin, parent->mBVMax);
if(earlyExit)
break;
parent->mBVMin = newMinV;
parent->mBVMax = newMaxV;
parent = parent->mParent;
}
}
// split the leaf node along the most significant axis
IncrementalAABBTreeNode* IncrementalAABBTree::splitLeafNode(IncrementalAABBTreeNode* node, const PoolIndex index, const Vec4V& minV, const Vec4V& maxV, const PxBounds3* bounds)
{
PX_ASSERT(node->isLeaf());
IncrementalAABBTreeNode* returnNode = NULL;
// create new pairs of nodes, parent will remain the node (the one we split)
IncrementalAABBTreeNode* child0 = reinterpret_cast<IncrementalAABBTreeNode*>(mNodesPool.allocate());
IncrementalAABBTreeNode* child1 = child0 + 1;
AABBTreeIndices* newIndices = mIndicesPool.allocate();
// get the split axis
PX_ALIGN(16, PxVec4) vars;
PX_ALIGN(16, PxVec4) center;
const float half = 0.5f;
const FloatV halfV = FLoad(half);
const Vec4V newMinV = V4Min(node->mBVMin, minV);
const Vec4V newMaxV = V4Max(node->mBVMax, maxV);
const Vec4V centerV = V4Scale(V4Add(newMaxV, newMinV), halfV);
const Vec4V varsV = V4Sub(newMaxV, newMinV);
V4StoreA(varsV, &vars.x);
V4StoreA(centerV, ¢er.x);
const PxU32 axis = PxLargestAxis(PxVec3(vars.x, vars.y, vars.z));
// setup parent
child0->mParent = node;
child1->mParent = node;
child0->mIndices = node->mIndices;
child0->mChilds[1] = NULL;
child1->mIndices = newIndices;
child1->mChilds[1] = NULL;
AABBTreeIndices& child0Indices = *child0->mIndices; // the original node indices
AABBTreeIndices& child1Indices = *child1->mIndices; // new empty indices
child1Indices.nbIndices = 0;
// split the node
for(PxU32 i = child0Indices.nbIndices; i--;)
{
const PxBounds3& primitiveBounds = bounds[child0Indices.indices[i]];
const float pCenter = primitiveBounds.getCenter(axis);
if(center[axis] >= pCenter)
{
// move to new node
child1Indices.indices[child1Indices.nbIndices++] = child0Indices.indices[i];
child0Indices.nbIndices--;
child0Indices.indices[i] = child0Indices.indices[child0Indices.nbIndices];
}
}
// check where to put the new node, if there is still a free space
if(child0Indices.nbIndices == 0 || child1Indices.nbIndices == INCR_NB_OBJECTS_PER_NODE)
{
child0Indices.nbIndices = 1;
child0Indices.indices[0] = index;
returnNode = child0;
}
else
{
if(child0Indices.nbIndices == INCR_NB_OBJECTS_PER_NODE)
{
child1Indices.nbIndices = 1;
child1Indices.indices[0] = index;
returnNode = child1;
}
else
{
const PxBounds3& primitiveBounds = bounds[index];
const float pCenter = primitiveBounds.getCenter(axis);
if(center[axis] >= pCenter)
{
// move to new node
child1Indices.indices[child1Indices.nbIndices++] = index;
returnNode = child1;
}
else
{
// move to old node
child0Indices.indices[child0Indices.nbIndices++] = index;
returnNode = child0;
}
}
}
// update bounds for the new nodes
Vec4V bvMin = V4LoadU(&bounds[child0Indices.indices[0]].minimum.x);
Vec4V bvMax = V4LoadU(&bounds[child0Indices.indices[0]].maximum.x);
for(PxU32 i = 1; i < child0Indices.nbIndices; i++)
{
const Vec4V nodeMinV = V4LoadU(&bounds[child0Indices.indices[i]].minimum.x);
const Vec4V nodeMaxV = V4LoadU(&bounds[child0Indices.indices[i]].maximum.x);
bvMin = V4Min(bvMin, nodeMinV);
bvMax = V4Max(bvMax, nodeMaxV);
}
child0->mBVMin = V4ClearW(bvMin);
child0->mBVMax = V4ClearW(bvMax);
bvMin = V4LoadU(&bounds[child1Indices.indices[0]].minimum.x);
bvMax = V4LoadU(&bounds[child1Indices.indices[0]].maximum.x);
for(PxU32 i = 1; i < child1Indices.nbIndices; i++)
{
const Vec4V nodeMinV = V4LoadU(&bounds[child1Indices.indices[i]].minimum.x);
const Vec4V nodeMaxV = V4LoadU(&bounds[child1Indices.indices[i]].maximum.x);
bvMin = V4Min(bvMin, nodeMinV);
bvMax = V4Max(bvMax, nodeMaxV);
}
child1->mBVMin = V4ClearW(bvMin);
child1->mBVMax = V4ClearW(bvMax);
// node parent is the same, setup the new childs
node->mChilds[0] = child0;
node->mChilds[1] = child1;
node->mBVMin = newMinV;
node->mBVMax = newMaxV;
updateHierarchyAfterInsert(node);
PX_ASSERT(returnNode);
return returnNode;
}
void IncrementalAABBTree::rotateTree(IncrementalAABBTreeNode* node, NodeList& changedLeaf, PxU32 largesRotateNodeIn, const PxBounds3* bounds, bool rotateAgain)
{
PX_ASSERT(!node->isLeaf());
IncrementalAABBTreeNode* smallerNode = node->mChilds[(largesRotateNodeIn == 0) ? 1 : 0];
IncrementalAABBTreeNode* largerNode = node->mChilds[largesRotateNodeIn];
PX_ASSERT(!largerNode->isLeaf());
// take a leaf from larger node and add it to the smaller node
const Vec4V testCenterV = V4Add(smallerNode->mBVMax, smallerNode->mBVMin);
IncrementalAABBTreeNode* rotationNode = NULL; // store a node that seems not balanced
PxU32 largesRotateNode = 0;
bool rotateNode = false;
PxU32 traversalIndex = traversalDirection(*largerNode->mChilds[0], *largerNode->mChilds[1], testCenterV, false, rotateNode, largesRotateNode);
IncrementalAABBTreeNode* closestNode = largerNode->mChilds[traversalIndex];
while(!closestNode->isLeaf())
{
PxPrefetchLine(closestNode->mChilds[0]->mChilds[0]);
PxPrefetchLine(closestNode->mChilds[1]->mChilds[0]);
traversalIndex = traversalDirection(*closestNode->mChilds[0], *closestNode->mChilds[1], testCenterV, false, rotateNode, largesRotateNode);
closestNode = closestNode->mChilds[traversalIndex];
}
// we have the leaf that we want to rotate
// create new parent and remove the current leaf
changedLeaf.findAndReplaceWithLast(closestNode);
IncrementalAABBTreeNode* parent = closestNode->mParent;
IncrementalAABBTreeNodePair* removedPair = reinterpret_cast<IncrementalAABBTreeNodePair*>(parent->mChilds[0]);
PX_ASSERT(!parent->isLeaf());
// copy the remaining child into parent
IncrementalAABBTreeNode* remainingChild = (parent->mChilds[0] == closestNode) ? parent->mChilds[1] : parent->mChilds[0];
parent->mBVMax = remainingChild->mBVMax;
parent->mBVMin = remainingChild->mBVMin;
if(remainingChild->isLeaf())
{
parent->mIndices = remainingChild->mIndices;
parent->mChilds[1] = NULL;
changedLeaf.findAndReplaceWithLast(remainingChild);
changedLeaf.pushBack(parent);
}
else
{
parent->mChilds[0] = remainingChild->mChilds[0];
parent->mChilds[0]->mParent = parent;
parent->mChilds[1] = remainingChild->mChilds[1];
parent->mChilds[1]->mParent = parent;
}
// update the hieararchy after the node removal
if(parent->mParent)
{
updateHierarchyAfterRemove(parent->mParent, bounds);
}
// find new spot for the node
// take a leaf from larger node and add it to the smaller node
IncrementalAABBTreeNode* newSpotNode = NULL;
if(smallerNode->isLeaf())
{
newSpotNode = smallerNode;
}
else
{
const Vec4V testClosestNodeCenterV = V4Add(closestNode->mBVMax, closestNode->mBVMin);
rotationNode = NULL; // store a node that seems not balanced
largesRotateNode = 0;
rotateNode = false;
bool testRotation = rotateAgain;
traversalIndex = traversalDirection(*smallerNode->mChilds[0], *smallerNode->mChilds[1], testClosestNodeCenterV, testRotation, rotateNode, largesRotateNode);
if(rotateNode && !smallerNode->mChilds[largesRotateNode]->isLeaf())
{
rotationNode = smallerNode;
testRotation = false;
}
newSpotNode = smallerNode->mChilds[traversalIndex];
while(!newSpotNode->isLeaf())
{
PxPrefetchLine(newSpotNode->mChilds[0]->mChilds[0]);
PxPrefetchLine(newSpotNode->mChilds[1]->mChilds[0]);
traversalIndex = traversalDirection(*newSpotNode->mChilds[0], *newSpotNode->mChilds[1], testClosestNodeCenterV, testRotation, rotateNode, largesRotateNode);
if(!rotationNode && rotateNode && !newSpotNode->mChilds[largesRotateNode]->isLeaf())
{
rotationNode = newSpotNode;
testRotation = false;
}
newSpotNode = newSpotNode->mChilds[traversalIndex];
}
}
// we have the closest leaf in the smaller child, lets merge it with the closestNode
if(newSpotNode->getNbPrimitives() + closestNode->getNbPrimitives() <= INCR_NB_OBJECTS_PER_NODE)
{
// all primitives fit into new spot, we merge here simply
AABBTreeIndices* targetIndices = newSpotNode->mIndices;
const AABBTreeIndices* sourceIndices = closestNode->mIndices;
for(PxU32 i = 0; i < sourceIndices->nbIndices; i++)
{
targetIndices->indices[targetIndices->nbIndices++] = sourceIndices->indices[i];
}
PX_ASSERT(targetIndices->nbIndices <= INCR_NB_OBJECTS_PER_NODE);
if(changedLeaf.find(newSpotNode) == changedLeaf.end())
changedLeaf.pushBack(newSpotNode);
mIndicesPool.deallocate(closestNode->mIndices);
newSpotNode->mBVMin = V4Min(newSpotNode->mBVMin, closestNode->mBVMin);
newSpotNode->mBVMax = V4Max(newSpotNode->mBVMax, closestNode->mBVMax);
updateHierarchyAfterInsert(newSpotNode);
}
else
{
// we need to make new parent with newSpotNode and closestNode as childs
// create new pairs of nodes, parent will remain the node (the one we split)
IncrementalAABBTreeNode* child0 = reinterpret_cast<IncrementalAABBTreeNode*>(mNodesPool.allocate());
IncrementalAABBTreeNode* child1 = child0 + 1;
// setup parent
child0->mParent = newSpotNode;
child1->mParent = newSpotNode;
child0->mIndices = newSpotNode->mIndices;
child0->mChilds[1] = NULL;
child0->mBVMin = newSpotNode->mBVMin;
child0->mBVMax = newSpotNode->mBVMax;
child1->mIndices = closestNode->mIndices;
child1->mChilds[1] = NULL;
child1->mBVMin = closestNode->mBVMin;
child1->mBVMax = closestNode->mBVMax;
// node parent is the same, setup the new childs
newSpotNode->mChilds[0] = child0;
newSpotNode->mChilds[1] = child1;
newSpotNode->mBVMin = V4Min(child0->mBVMin, child1->mBVMin);
newSpotNode->mBVMax = V4Max(child0->mBVMax, child1->mBVMax);
updateHierarchyAfterInsert(newSpotNode);
changedLeaf.findAndReplaceWithLast(newSpotNode);
changedLeaf.pushBack(child0);
changedLeaf.pushBack(child1);
}
// deallocate the closestNode, it has been moved
#if DEALLOCATE_RESET
removedPair->mNode0.mChilds[0] = NULL;
removedPair->mNode0.mChilds[1] = NULL;
removedPair->mNode1.mChilds[0] = NULL;
removedPair->mNode1.mChilds[1] = NULL;
#endif
mNodesPool.deallocate(removedPair);
// try to do one more rotation for the newly added node part of tree
if(rotationNode)
{
rotateTree(rotationNode, changedLeaf, largesRotateNode, bounds, false);
}
}
// insert new bounds into tree
IncrementalAABBTreeNode* IncrementalAABBTree::insert(const PoolIndex index, const PxBounds3* bounds, NodeList& changedLeaf)
{
PX_SIMD_GUARD;
// get the bounds, reset the W value
const Vec4V minV = V4ClearW(V4LoadU(&bounds[index].minimum.x));
const Vec4V maxV = V4ClearW(V4LoadU(&bounds[index].maximum.x));
// check if tree is empty
if(!mRoot)
{
// make it a leaf
AABBTreeIndices* indices = mIndicesPool.construct(index);
mRoot = reinterpret_cast<IncrementalAABBTreeNode*> (mNodesPool.allocate());
mRoot->mBVMin = minV;
mRoot->mBVMax = maxV;
mRoot->mIndices = indices;
mRoot->mChilds[1] = NULL;
mRoot->mParent = NULL;
return mRoot;
}
else
{
// check if root is a leaf
if(mRoot->isLeaf())
{
// if we still can insert the primitive into the leaf, or we need to split
if(mRoot->getNbPrimitives() < INCR_NB_OBJECTS_PER_NODE)
{
// simply add the primitive into the current leaf
addPrimitiveIntoNode(mRoot, index, minV, maxV);
return mRoot;
}
else
{
// need to split the node
// check if the leaf is not marked as changed, we need to remove it
if(!changedLeaf.empty())
{
PX_ASSERT(changedLeaf.size() == 1);
if(changedLeaf[0] == mRoot)
changedLeaf.popBack();
}
IncrementalAABBTreeNode* retNode = splitLeafNode(mRoot, index, minV, maxV, bounds);
mRoot = retNode->mParent;
IncrementalAABBTreeNode* sibling = (mRoot->mChilds[0] == retNode) ? mRoot->mChilds[1] : mRoot->mChilds[0];
if(sibling->isLeaf())
changedLeaf.pushBack(sibling);
changedLeaf.pushBack(retNode);
return retNode;
}
}
else
{
const Vec4V testCenterV = V4Add(maxV, minV);
IncrementalAABBTreeNode* returnNode = NULL;
IncrementalAABBTreeNode* rotationNode = NULL; // store a node that seems not balanced
PxU32 largesRotateNode = 0;
bool rotateNode = false;
#if SUPPORT_TREE_ROTATION
bool testRotation = true;
#else
bool testRotation = false;
#endif
// we dont need to modify root, lets traverse the tree to find the right spot
PxU32 traversalIndex = traversalDirection(*mRoot->mChilds[0], *mRoot->mChilds[1], testCenterV, testRotation, rotateNode, largesRotateNode);
if(rotateNode && !mRoot->mChilds[largesRotateNode]->isLeaf())
{
rotationNode = mRoot;
testRotation = false;
}
IncrementalAABBTreeNode* baseNode = mRoot->mChilds[traversalIndex];
while(!baseNode->isLeaf())
{
PxPrefetchLine(baseNode->mChilds[0]->mChilds[0]);
PxPrefetchLine(baseNode->mChilds[1]->mChilds[0]);
traversalIndex = traversalDirection(*baseNode->mChilds[0], *baseNode->mChilds[1], testCenterV, testRotation, rotateNode, largesRotateNode);
if(!rotationNode && rotateNode && !baseNode->mChilds[largesRotateNode]->isLeaf())
{
rotationNode = baseNode;
testRotation = false;
}
baseNode = baseNode->mChilds[traversalIndex];
}
// if we still can insert the primitive into the leaf, or we need to split
if(baseNode->getNbPrimitives() < INCR_NB_OBJECTS_PER_NODE)
{
// simply add the primitive into the current leaf
addPrimitiveIntoNode(baseNode, index, minV, maxV);
returnNode = baseNode;
if(!changedLeaf.empty())
{
PX_ASSERT(changedLeaf.size() == 1);
if(changedLeaf[0] != baseNode)
changedLeaf.pushBack(baseNode);
}
else
changedLeaf.pushBack(baseNode);
}
else
{
// split
// check if the leaf is not marked as changed, we need to remove it
if(!changedLeaf.empty())
{
PX_ASSERT(changedLeaf.size() == 1);
if(changedLeaf[0] == baseNode)
changedLeaf.popBack();
}
IncrementalAABBTreeNode* retNode = splitLeafNode(baseNode, index, minV, maxV, bounds);
const IncrementalAABBTreeNode* splitParent = retNode->mParent;
changedLeaf.pushBack(splitParent->mChilds[0]);
changedLeaf.pushBack(splitParent->mChilds[1]);
returnNode = retNode;
}
if(rotationNode)
{
rotateTree(rotationNode, changedLeaf, largesRotateNode, bounds, true);
returnNode = NULL;
}
return returnNode;
}
}
}
// update the index, do a full remove/insert update
IncrementalAABBTreeNode* IncrementalAABBTree::update(IncrementalAABBTreeNode* node, const PoolIndex index, const PxBounds3* bounds, NodeList& changedLeaf)
{
PX_SIMD_GUARD;
IncrementalAABBTreeNode* removedNode = remove(node, index, bounds);
if(removedNode && removedNode->isLeaf())
{
changedLeaf.pushBack(removedNode);
}
return insert(index, bounds, changedLeaf);
}
// update the index, faster version with a lazy update of objects that moved just a bit
IncrementalAABBTreeNode* IncrementalAABBTree::updateFast(IncrementalAABBTreeNode* node, const PoolIndex index, const PxBounds3* bounds, NodeList& changedLeaf)
{
PX_SIMD_GUARD;
const Vec4V minV = V4ClearW(V4LoadU(&bounds[index].minimum.x));
const Vec4V maxV = V4ClearW(V4LoadU(&bounds[index].maximum.x));
// for update fast, we dont care if the tree gets slowly unbalanced, we are building a new tree already
if(nodeIntersection(*node, minV, maxV))
{
updateHierarchyAfterRemove(node, bounds);
return node;
}
else
{
IncrementalAABBTreeNode* removedNode = remove(node, index, bounds);
if(removedNode && removedNode->isLeaf())
{
changedLeaf.pushBack(removedNode);
}
return insert(index, bounds, changedLeaf);
}
}
// remove primitive from the tree, return a node if it moved to its parent
IncrementalAABBTreeNode* IncrementalAABBTree::remove(IncrementalAABBTreeNode* node, const PoolIndex index, const PxBounds3* bounds)
{
PX_SIMD_GUARD;
PX_ASSERT(node->isLeaf());
// if we just remove the primitive from the list
if(node->getNbPrimitives() > 1)
{
removePrimitiveFromNode(node, index);
// update the hierarchy
updateHierarchyAfterRemove(node, bounds);
return NULL;
}
else
{
// if root node and the last primitive remove root
if(node == mRoot)
{
#if DEALLOCATE_RESET
IncrementalAABBTreeNodePair* removedPair = reinterpret_cast<IncrementalAABBTreeNodePair*>(node);
removedPair->mNode0.mChilds[0] = NULL;
removedPair->mNode0.mChilds[1] = NULL;
removedPair->mNode1.mChilds[0] = NULL;
removedPair->mNode1.mChilds[1] = NULL;
#endif
mNodesPool.deallocate(reinterpret_cast<IncrementalAABBTreeNodePair*>(node));
mRoot = NULL;
return NULL;
}
else
{
// create new parent and remove the current leaf
IncrementalAABBTreeNode* parent = node->mParent;
IncrementalAABBTreeNodePair* removedPair = reinterpret_cast<IncrementalAABBTreeNodePair*>(parent->mChilds[0]);
PX_ASSERT(!parent->isLeaf());
// copy the remaining child into parent
IncrementalAABBTreeNode* remainingChild = (parent->mChilds[0] == node) ? parent->mChilds[1] : parent->mChilds[0];
parent->mBVMax = remainingChild->mBVMax;
parent->mBVMin = remainingChild->mBVMin;
if(remainingChild->isLeaf())
{
parent->mIndices = remainingChild->mIndices;
parent->mChilds[1] = NULL;
}
else
{
parent->mChilds[0] = remainingChild->mChilds[0];
parent->mChilds[0]->mParent = parent;
parent->mChilds[1] = remainingChild->mChilds[1];
parent->mChilds[1]->mParent = parent;
}
if(parent->mParent)
{
updateHierarchyAfterRemove(parent->mParent, bounds);
}
mIndicesPool.deallocate(node->mIndices);
#if DEALLOCATE_RESET
removedPair->mNode0.mChilds[0] = NULL;
removedPair->mNode0.mChilds[1] = NULL;
removedPair->mNode1.mChilds[0] = NULL;
removedPair->mNode1.mChilds[1] = NULL;
#endif
mNodesPool.deallocate(removedPair);
return parent;
}
}
}
// fixup the indices
void IncrementalAABBTree::fixupTreeIndices(IncrementalAABBTreeNode* node, const PoolIndex index, const PoolIndex newIndex)
{
PX_ASSERT(node->isLeaf());
AABBTreeIndices& indices = *node->mIndices;
for(PxU32 i = 0; i < indices.nbIndices; i++)
{
if(indices.indices[i] == index)
{
indices.indices[i] = newIndex;
return;
}
}
PX_ASSERT(0);
}
// shift node
static void shiftNode(IncrementalAABBTreeNode* node, const Vec4V& shiftV)
{
node->mBVMax = V4Sub(node->mBVMax, shiftV);
node->mBVMin = V4Sub(node->mBVMin, shiftV);
if(!node->isLeaf())
{
shiftNode(node->mChilds[0], shiftV);
shiftNode(node->mChilds[1], shiftV);
}
}
// shift origin
void IncrementalAABBTree::shiftOrigin(const PxVec3& shift)
{
if(mRoot)
{
const Vec4V shiftV = V4ClearW(V4LoadU(&shift.x));
shiftNode(mRoot, shiftV);
}
}
static void checkNode(IncrementalAABBTreeNode* node, IncrementalAABBTreeNode* parent, const PxBounds3* bounds, PoolIndex maxIndex, PxU32& numIndices, PxU32& numNodes)
{
PX_ASSERT(node->mParent == parent);
PX_ASSERT(!parent->isLeaf());
PX_ASSERT(parent->mChilds[0] == node || parent->mChilds[1] == node);
numNodes++;
if(!node->isLeaf())
{
PX_ASSERT(nodeInsideBounds(node->mChilds[0]->mBVMin, node->mChilds[0]->mBVMax, node->mBVMin, node->mBVMax));
PX_ASSERT(nodeInsideBounds(node->mChilds[1]->mBVMin, node->mChilds[1]->mBVMax, node->mBVMin, node->mBVMax));
const Vec4V testMinV = V4Min(parent->mChilds[0]->mBVMin, parent->mChilds[1]->mBVMin);
const Vec4V testMaxV = V4Max(parent->mChilds[0]->mBVMax, parent->mChilds[1]->mBVMax);
PX_UNUSED(testMinV);
PX_UNUSED(testMaxV);
PX_ASSERT(nodeInsideBounds(node->mBVMin, node->mBVMax, testMinV, testMaxV));
checkNode(node->mChilds[0], node, bounds, maxIndex, numIndices, numNodes);
checkNode(node->mChilds[1], node, bounds, maxIndex, numIndices, numNodes);
}
else
{
const AABBTreeIndices& indices = *node->mIndices;
PX_ASSERT(indices.nbIndices);
Vec4V testMinV = V4ClearW(V4LoadU(&bounds[indices.indices[0]].minimum.x));
Vec4V testMaxV = V4ClearW(V4LoadU(&bounds[indices.indices[0]].maximum.x));
for(PxU32 i = 0; i < indices.nbIndices; i++)
{
PX_ASSERT(indices.indices[i] < maxIndex);
numIndices++;
const Vec4V minV = V4ClearW(V4LoadU(&bounds[indices.indices[i]].minimum.x));
const Vec4V maxV = V4ClearW(V4LoadU(&bounds[indices.indices[i]].maximum.x));
testMinV = V4Min(testMinV, minV);
testMaxV = V4Max(testMaxV, maxV);
PX_ASSERT(nodeInsideBounds(minV, maxV, node->mBVMin, node->mBVMax));
}
PX_ASSERT(boundsEqual(testMinV, testMaxV, node->mBVMin, node->mBVMax));
}
}
void IncrementalAABBTree::hierarchyCheck(PoolIndex maxIndex, const PxBounds3* bounds)
{
PxU32 numHandles = 0;
PxU32 numPosNodes = 0;
PxU32 numNegNodes = 0;
if(mRoot && !mRoot->isLeaf())
{
checkNode(mRoot->mChilds[0], mRoot, bounds, maxIndex, numHandles, numPosNodes);
checkNode(mRoot->mChilds[1], mRoot, bounds, maxIndex, numHandles, numNegNodes);
PX_ASSERT(numHandles == maxIndex);
}
}
void IncrementalAABBTree::hierarchyCheck(const PxBounds3* bounds)
{
PxU32 numHandles = 0;
PxU32 numPosNodes = 0;
PxU32 numNegNodes = 0;
if(mRoot && !mRoot->isLeaf())
{
checkNode(mRoot->mChilds[0], mRoot, bounds, 0xFFFFFFFF, numHandles, numPosNodes);
checkNode(mRoot->mChilds[1], mRoot, bounds, 0xFFFFFFFF, numHandles, numNegNodes);
}
}
void IncrementalAABBTree::checkTreeLeaf(IncrementalAABBTreeNode* leaf, PoolIndex h)
{
PX_ASSERT(leaf->isLeaf());
const AABBTreeIndices& indices = *leaf->mIndices;
bool found = false;
for(PxU32 i = 0; i < indices.nbIndices; i++)
{
if(indices.indices[i] == h)
{
found = true;
break;
}
}
PX_UNUSED(found);
PX_ASSERT(found);
}
PxU32 IncrementalAABBTree::getTreeLeafDepth(IncrementalAABBTreeNode* leaf)
{
PxU32 depth = 1;
IncrementalAABBTreeNode* parent = leaf->mParent;
while(parent)
{
depth++;
parent = parent->mParent;
}
return depth;
}
// build the tree from given bounds
bool IncrementalAABBTree::build(const AABBTreeBuildParams& params, PxArray<IncrementalAABBTreeNode*>& mapping)
{
// Init stats
BuildStats stats;
const PxU32 nbPrimitives = params.mNbPrimitives;
if (!nbPrimitives)
return false;
PxU32* indices = buildAABBTree(params, mNodeAllocator, stats);
PX_ASSERT(indices);
PX_FREE(params.mCache);
IncrementalAABBTreeNode** treeNodes = PX_ALLOCATE(IncrementalAABBTreeNode*, stats.getCount(), "temp node helper array");
PxMemSet(treeNodes, 0, sizeof(IncrementalAABBTreeNode*)*(stats.getCount()));
clone(mapping, indices, treeNodes);
mRoot = treeNodes[0];
mRoot->mParent = NULL;
PX_FREE(indices);
PX_FREE(treeNodes);
mNodeAllocator.release();
return true;
}
// clone the tree, the tree is computed in the NodeAllocator, similar to AABBTree flatten
void IncrementalAABBTree::clone(PxArray<IncrementalAABBTreeNode*>& mapping, const PxU32* _indices, IncrementalAABBTreeNode** treeNodes)
{
PxU32 offset = 0;
const PxU32 nbSlabs = mNodeAllocator.mSlabs.size();
for (PxU32 s = 0; s<nbSlabs; s++)
{
const NodeAllocator::Slab& currentSlab = mNodeAllocator.mSlabs[s];
AABBTreeBuildNode* pool = currentSlab.mPool;
for (PxU32 i = 0; i < currentSlab.mNbUsedNodes; i++)
{
IncrementalAABBTreeNode* destNode = treeNodes[offset];
if(!destNode)
{
destNode = reinterpret_cast<IncrementalAABBTreeNode*>(mNodesPool.allocate());
treeNodes[offset] = destNode;
}
destNode->mBVMin = V4ClearW(V4LoadU(&pool[i].mBV.minimum.x));
destNode->mBVMax = V4ClearW(V4LoadU(&pool[i].mBV.maximum.x));
if (pool[i].isLeaf())
{
AABBTreeIndices* indices = mIndicesPool.allocate();
destNode->mIndices = indices;
destNode->mChilds[1] = NULL;
indices->nbIndices = pool[i].getNbPrimitives();
PX_ASSERT(indices->nbIndices <= 16);
const PxU32* sourceIndices = _indices + pool[i].mNodeIndex;
for (PxU32 iIndices = 0; iIndices < indices->nbIndices; iIndices++)
{
const PxU32 sourceIndex = sourceIndices[iIndices];
indices->indices[iIndices] = sourceIndex;
PX_ASSERT(sourceIndex < mapping.size());
mapping[sourceIndex] = destNode;
}
}
else
{
PX_ASSERT(pool[i].mPos);
PxU32 localNodeIndex = 0xffffffff;
PxU32 nodeBase = 0;
for (PxU32 j = 0; j<nbSlabs; j++)
{
if (pool[i].mPos >= mNodeAllocator.mSlabs[j].mPool && pool[i].mPos < mNodeAllocator.mSlabs[j].mPool + mNodeAllocator.mSlabs[j].mNbUsedNodes)
{
localNodeIndex = PxU32(pool[i].mPos - mNodeAllocator.mSlabs[j].mPool);
break;
}
nodeBase += mNodeAllocator.mSlabs[j].mNbUsedNodes;
}
const PxU32 nodeIndex = nodeBase + localNodeIndex;
IncrementalAABBTreeNode* child0 = treeNodes[nodeIndex];
IncrementalAABBTreeNode* child1 = treeNodes[nodeIndex + 1];
if(!child0)
{
PX_ASSERT(!child1);
child0 = reinterpret_cast<IncrementalAABBTreeNode*>(mNodesPool.allocate());
child1 = child0 + 1;
treeNodes[nodeIndex] = child0;
treeNodes[nodeIndex + 1] = child1;
}
destNode->mChilds[0] = child0;
destNode->mChilds[1] = child1;
child0->mParent = destNode;
child1->mParent = destNode;
}
offset++;
}
}
}
void IncrementalAABBTree::copyNode(IncrementalAABBTreeNode& destNode, const BVHNode& sourceNode,
const BVHNode* nodeBase, IncrementalAABBTreeNode* parent, const PxU32* primitivesBase,
PxArray<IncrementalAABBTreeNode*>& mapping)
{
destNode.mParent = parent;
destNode.mBVMin = V4ClearW(V4LoadU(&sourceNode.mBV.minimum.x));
destNode.mBVMax = V4ClearW(V4LoadU(&sourceNode.mBV.maximum.x));
if(sourceNode.isLeaf())
{
AABBTreeIndices* indices = mIndicesPool.allocate();
destNode.mIndices = indices;
indices->nbIndices = sourceNode.getNbPrimitives();
const PxU32* sourceIndices = sourceNode.getPrimitives(primitivesBase);
for(PxU32 i = 0; i < indices->nbIndices; i++)
{
const PxU32 sourceIndex = sourceIndices[i];
indices->indices[i] = sourceIndex;
mapping[sourceIndex] = &destNode;
}
}
else
{
IncrementalAABBTreeNodePair* nodePair = mNodesPool.construct();
IncrementalAABBTreeNode* child0 = &nodePair->mNode0;
IncrementalAABBTreeNode* child1 = &nodePair->mNode1;
destNode.mChilds[0] = child0;
destNode.mChilds[1] = child1;
copyNode(*destNode.mChilds[0], *sourceNode.getPos(nodeBase), nodeBase, &destNode, primitivesBase, mapping);
copyNode(*destNode.mChilds[1], *sourceNode.getNeg(nodeBase), nodeBase, &destNode, primitivesBase, mapping);
}
}
// build the tree from the prebuild AABB tree
void IncrementalAABBTree::copy(const BVH& bvh, PxArray<IncrementalAABBTreeNode*>& mapping)
{
if(bvh.getNbBounds() == 0)
return;
IncrementalAABBTreeNodePair* nodePair = mNodesPool.construct();
mRoot = &nodePair->mNode0;
const BVHNode* nodes = bvh.getNodes();
copyNode(*mRoot, *nodes, nodes, NULL, bvh.getIndices(), mapping);
}
| 34,872 | C++ | 31.622077 | 178 | 0.722385 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuMTD.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_MTD_H
#define GU_MTD_H
#include "foundation/PxVec3.h"
#include "foundation/PxTransform.h"
#include "geometry/PxGeometry.h"
namespace physx
{
namespace Gu
{
// PT: we use a define to be able to quickly change the signature of all MTD functions.
// (this also ensures they all use consistent names for passed parameters).
// \param[out] mtd computed depenetration dir
// \param[out] depth computed depenetration depth
// \param[in] geom0 first geometry object
// \param[in] pose0 pose of first geometry object
// \param[in] geom1 second geometry object
// \param[in] pose1 pose of second geometry object
// \param[in] cache optional cached data for triggers
#define GU_MTD_FUNC_PARAMS PxVec3& mtd, PxF32& depth, \
const PxGeometry& geom0, const PxTransform32& pose0, \
const PxGeometry& geom1, const PxTransform32& pose1
// PT: function pointer for Geom-indexed MTD functions
// See GU_MTD_FUNC_PARAMS for function parameters details.
// \return true if an overlap was found, false otherwise
// \note depenetration vector D is equal to mtd * depth. It should be applied to the 1st object, to get out of the 2nd object.
typedef bool (*GeomMTDFunc) (GU_MTD_FUNC_PARAMS);
PX_FORCE_INLINE PxF32 manualNormalize(PxVec3& mtd, const PxVec3& normal, PxReal lenSq)
{
const PxF32 len = PxSqrt(lenSq);
// We do a *manual* normalization to check for singularity condition
if(lenSq < 1e-6f)
mtd = PxVec3(1.0f, 0.0f, 0.0f); // PT: zero normal => pick up random one
else
mtd = normal * 1.0f / len;
return len;
}
}
}
#endif
| 3,284 | C | 42.799999 | 128 | 0.740865 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuSDF.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_SDF_H
#define GU_SDF_H
/** \addtogroup geomutils
@{
*/
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxVec3.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxArray.h"
#include "foundation/PxMathUtils.h"
namespace physx
{
class PxSDFBuilder;
class PxSerializationContext;
class PxDeserializationContext;
namespace Gu
{
/**
\brief Represents dimensions of signed distance field
*/
class Dim3
{
public:
/**
\brief Constructor
*/
Dim3()
{
}
/**
\brief Constructor
*/
Dim3(PxZERO d) : x(0), y(0), z(0)
{
PX_UNUSED(d);
}
/**
\brief Constructor
*/
Dim3(PxU32 _x, PxU32 _y, PxU32 _z) : x(_x), y(_y), z(_z)
{
}
/**
\brief Copy constructor
*/
Dim3(const Dim3& d) : x(d.x), y(d.y), z(d.z)
{
}
PxU32 x; //!< Size of X dimension
PxU32 y; //!< Size of Y dimension
PxU32 z; //!< Size of Z dimension
};
/**
\brief Represents a signed distance field.
*/
class SDF : public PxUserAllocated
{
public:
// PX_SERIALIZATION
SDF(const PxEMPTY) : mOwnsMemory(false) {}
void exportExtraData(PxSerializationContext& context);
void importExtraData(PxDeserializationContext& context);
static void getBinaryMetaData(PxOutputStream& stream);
//~PX_SERIALIZATION
/**
\brief Constructor
*/
SDF() : mSdf(NULL), mSubgridStartSlots(NULL), mSubgridSdf(NULL), mOwnsMemory(true)
{
}
/**
\brief Constructor
*/
SDF(PxZERO s)
: mMeshLower(PxZero), mSpacing(0.0f), mDims(PxZero), mNumSdfs(0), mSdf(NULL),
mSubgridSize(PxZero), mNumStartSlots(0), mSubgridStartSlots(NULL), mNumSubgridSdfs(0), mSubgridSdf(NULL), mSdfSubgrids3DTexBlockDim(PxZero),
mSubgridsMinSdfValue(0.0f), mSubgridsMaxSdfValue(0.0f), mBytesPerSparsePixel(0), mOwnsMemory(true)
{
PX_UNUSED(s);
}
/**
\brief Copy constructor
*/
SDF(const SDF& sdf)
: mMeshLower(sdf.mMeshLower), mSpacing(sdf.mSpacing), mDims(sdf.mDims), mNumSdfs(sdf.mNumSdfs), mSdf(sdf.mSdf),
mSubgridSize(sdf.mSubgridSize), mNumStartSlots(sdf.mNumStartSlots), mSubgridStartSlots(sdf.mSubgridStartSlots), mNumSubgridSdfs(sdf.mNumSubgridSdfs), mSubgridSdf(sdf.mSubgridSdf), mSdfSubgrids3DTexBlockDim(sdf.mSdfSubgrids3DTexBlockDim),
mSubgridsMinSdfValue(sdf.mSubgridsMinSdfValue), mSubgridsMaxSdfValue(sdf.mSubgridsMaxSdfValue), mBytesPerSparsePixel(sdf.mBytesPerSparsePixel),
mOwnsMemory(true)
{
}
static PX_FORCE_INLINE void decodeTriple(PxU32 id, PxU32& x, PxU32& y, PxU32& z)
{
x = id & 0x000003FF;
id = id >> 10;
y = id & 0x000003FF;
id = id >> 10;
z = id & 0x000003FF;
}
static PX_FORCE_INLINE PxReal decodeSample(PxU8* data, PxU32 index, PxU32 bytesPerSparsePixel, PxReal subgridsMinSdfValue, PxReal subgridsMaxSdfValue)
{
switch (bytesPerSparsePixel)
{
case 1:
return PxReal(data[index]) * (1.0f / 255.0f) * (subgridsMaxSdfValue - subgridsMinSdfValue) + subgridsMinSdfValue;
case 2:
{
PxU16* ptr = reinterpret_cast<PxU16*>(data);
return PxReal(ptr[index]) * (1.0f / 65535.0f) * (subgridsMaxSdfValue - subgridsMinSdfValue) + subgridsMinSdfValue;
}
case 4:
{
//If 4 bytes per subgrid pixel are available, then normal floats are used. No need to
//de-normalize integer values since the floats already contain real distance values
PxReal* ptr = reinterpret_cast<PxReal*>(data);
return ptr[index];
}
default:
PX_ASSERT(0);
}
return 0;
}
PX_PHYSX_COMMON_API PxReal decodeSparse(PxI32 xx, PxI32 yy, PxI32 zz) const;
PX_PHYSX_COMMON_API PxReal decodeDense(PxI32 x, PxI32 y, PxI32 z) const;
PX_FORCE_INLINE PxU32 nbSubgridsX() const
{
return mDims.x / mSubgridSize;
}
PX_FORCE_INLINE PxU32 nbSubgridsY() const
{
return mDims.y / mSubgridSize;
}
PX_FORCE_INLINE PxU32 nbSubgridsZ() const
{
return mDims.z / mSubgridSize;
}
PX_FORCE_INLINE PxVec3 getCellSize() const
{
return PxVec3(mSpacing);
}
PX_FORCE_INLINE bool subgridExists(PxU32 sgX, PxU32 sgY, PxU32 sgZ) const
{
const PxU32 nbX = mDims.x / mSubgridSize;
const PxU32 nbY = mDims.y / mSubgridSize;
//const PxU32 nbZ = mDims.z / mSubgridSize;
PxU32 startId = mSubgridStartSlots[sgZ * (nbX) * (nbY) + sgY * (nbX) + sgX];
return startId != 0xFFFFFFFFu;
}
/**
\brief Destructor
*/
~SDF();
PxReal* allocateSdfs(const PxVec3& meshLower, const PxReal& spacing, const PxU32 dimX, const PxU32 dimY, const PxU32 dimZ,
const PxU32 subgridSize, const PxU32 sdfSubgrids3DTexBlockDimX, const PxU32 sdfSubgrids3DTexBlockDimY, const PxU32 sdfSubgrids3DTexBlockDimZ,
PxReal subgridsMinSdfValue, PxReal subgridsMaxSdfValue, PxU32 bytesPerSparsePixel);
PxVec3 mMeshLower; //!< Lower bound of the original mesh
PxReal mSpacing; //!< Spacing of each sdf voxel
Dim3 mDims; //!< Dimension of the sdf
PxU32 mNumSdfs; //!< Number of sdf values
PxReal* mSdf; //!< Array of sdf
// Additional data to support sparse grid SDFs
PxU32 mSubgridSize; //!< The number of cells in a sparse subgrid block (full block has mSubgridSize^3 cells and (mSubgridSize+1)^3 samples). If set to zero, this indicates that only a dense background grid SDF is used without sparse blocks
PxU32 mNumStartSlots; //!< Array length of mSubgridStartSlots. Only used for serialization
PxU32* mSubgridStartSlots; //!< Array with start indices into the subgrid texture for every subgrid block. 10bits for z coordinate, 10bits for y and 10bits for x
PxU32 mNumSubgridSdfs; //!< Array length of mSubgridSdf. Only used for serialization
PxU8* mSubgridSdf; //!< The data to create the 3d texture containg the packed subgrid blocks. Stored as PxU8 to support multiple formats (8, 16 and 32 bits per pixel)
Dim3 mSdfSubgrids3DTexBlockDim; //!< Subgrid sdf is layed out as a 3d texture including packed blocks of size (mSubgridSize+1)^3
PxReal mSubgridsMinSdfValue; //!< The minimum value over all subgrid blocks. Used if normalized textures are used which is the case for 8 and 16bit formats
PxReal mSubgridsMaxSdfValue; //!< The maximum value over all subgrid blocks. Used if normalized textures are used which is the case for 8 and 16bit formats
PxU32 mBytesPerSparsePixel; //!< The number of bytes per subgrid pixel
bool mOwnsMemory; //!< Only false for binary deserialized data
};
/**
\brief Returns the number of times a point is enclosed by a triangle mesh. Therefore points with a winding number of 0 lie oufside of the mesh, others lie inside. The sign of the winding number
is dependent ond the triangle orientation. For close meshes, a robust inside/outside check should not test for a value of 0 exactly, inside = PxAbs(windingNumber) > 0.5f should be preferred.
\param[in] vertices The triangle mesh's vertices
\param[in] indices The triangle mesh's indices
\param[in] numTriangleIndices The number of indices
\param[in] width The number of grid points along the x direction
\param[in] height The number of grid points along the y direction
\param[in] depth The number of grid points along the z direction
\param[out] windingNumbers The winding number for the center of every grid cell, index rule is: index = z * width * height + y * width + x
\param[in] minExtents The grid's lower corner
\param[in] maxExtents The grid's upper corner
\param[out] sampleLocations Optional buffer to output the grid sample locations, index rule is: index = z * width * height + y * width + x
*/
PX_PHYSX_COMMON_API void windingNumbers(const PxVec3* vertices, const PxU32* indices, PxU32 numTriangleIndices, PxU32 width, PxU32 height, PxU32 depth,
PxReal* windingNumbers, PxVec3 minExtents, PxVec3 maxExtents, PxVec3* sampleLocations = NULL);
/**
\brief Returns if a point is enclosed by a triangle mesh.
\param[in] vertices The triangle mesh's vertices
\param[in] indices The triangle mesh's indices
\param[in] numTriangleIndices The number of indices
\param[in] width The number of grid points along the x direction
\param[in] height The number of grid points along the y direction
\param[in] depth The number of grid points along the z direction
\param[out] insideResult Booleans that indicate if the center of a grid cell is inside or outside, index rule is: index = z * width * height + y * width + x
\param[in] minExtents The grid's lower corner, the box formed by minExtent and maxExtent must include all vertices
\param[in] maxExtents The grid's upper corner, the box formed by minExtent and maxExtent must include all vertices
\param[out] sampleLocations Optional buffer to output the grid sample locations, index rule is: index = z * width * height + y * width + x
*/
PX_PHYSX_COMMON_API void windingNumbersInsideCheck(const PxVec3* vertices, const PxU32* indices, PxU32 numTriangleIndices, PxU32 width, PxU32 height, PxU32 depth,
bool* insideResult, PxVec3 minExtents, PxVec3 maxExtents, PxVec3* sampleLocations = NULL);
/**
\brief Returns the distance to the mesh's surface for all samples in a grid. The sign is dependent on the triangle orientation. Negative distances indicate that a sample is inside the mesh, positive
distances mean the sample is outside of the mesh.
\param[in] vertices The triangle mesh's vertices
\param[in] indices The triangle mesh's indices
\param[in] numTriangleIndices The number of indices
\param[in] width The number of grid points along the x direction
\param[in] height The number of grid points along the y direction
\param[in] depth The number of grid points along the z direction
\param[out] sdf The signed distance field (negative values indicate that a point is inside of the mesh), index rule is: index = z * width * height + y * width + x
\param[in] minExtents The grid's lower corner, the box formed by minExtent and maxExtent must include all vertices
\param[in] maxExtents The grid's upper corner, the box formed by minExtent and maxExtent must include all vertices
\param[out] sampleLocations Optional buffer to output the grid sample locations, index rule is: index = z * width * height + y * width + x
\param[in] cellCenteredSamples Determines if the sample points are chosen at cell centers or at cell origins
\param[in] numThreads The number of cpu threads to use during the computation
\param[in] sdfBuilder Optional pointer to a sdf builder to accelerate the sdf construction. The pointer is owned by the caller and must remain valid until the function terminates.
*/
PX_PHYSX_COMMON_API void SDFUsingWindingNumbers(const PxVec3* vertices, const PxU32* indices, PxU32 numTriangleIndices, PxU32 width, PxU32 height, PxU32 depth,
PxReal* sdf, PxVec3 minExtents, PxVec3 maxExtents, PxVec3* sampleLocations = NULL, bool cellCenteredSamples = true,
PxU32 numThreads = 1, PxSDFBuilder* sdfBuilder = NULL);
/**
\brief Returns the distance to the mesh's surface for all samples in a grid. The sign is dependent on the triangle orientation. Negative distances indicate that a sample is inside the mesh, positive
distances mean the sample is outside of the mesh. Near mesh surfaces, a higher resolution is available than further away from the surface (sparse sdf format) to save memory.
The samples are not cell centered but located at the cell origin. This is a requirement of the sparse grid format.
\param[in] vertices The triangle mesh's vertices
\param[in] indices The triangle mesh's indices
\param[in] numTriangleIndices The number of indices
\param[in] width The number of grid points along the x direction
\param[in] height The number of grid points along the y direction
\param[in] depth The number of grid points along the z direction
\param[in] minExtents The grid's lower corner, the box formed by minExtent and maxExtent must include all vertices
\param[in] maxExtents The grid's upper corner, the box formed by minExtent and maxExtent must include all vertices
\param[in] narrowBandThicknessRelativeToExtentDiagonal The thickness of the narrow band as a fraction of the sdf box diagonal length. Can be as small as 0 but a value of at least 0.01 is recommended.
\param[in] cellsPerSubgrid The number of cells in a sparse subgrid block (full block has mSubgridSize^3 cells and (mSubgridSize+1)^3 samples)
\param[out] sdfCoarse The coarse sdf as a dense 3d array of lower resolution (resulution is (with/cellsPerSubgrid+1, height/cellsPerSubgrid+1, depth/cellsPerSubgrid+1))
\param[out] sdfFineStartSlots The start slot indices of the subgrid blocks. If a subgrid block is empty, the start slot will be 0xFFFFFFFF
\param[out] subgridData The array containing subgrid data blocks
\param[out] denseSdf Provides acces to the denxe sdf that is used for compuation internally
\param[out] subgridsMinSdfValue The minimum value over all subgrid blocks. Used if normalized textures are used which is the case for 8 and 16bit formats
\param[out] subgridsMaxSdfValue The maximum value over all subgrid blocks. Used if normalized textures are used which is the case for 8 and 16bit formats
\param[in] numThreads The number of cpu threads to use during the computation
\param[in] sdfBuilder Optional pointer to a sdf builder to accelerate the sdf construction. The pointer is owned by the caller and must remain valid until the function terminates.
*/
PX_PHYSX_COMMON_API void SDFUsingWindingNumbersSparse(const PxVec3* vertices, const PxU32* indices, PxU32 numTriangleIndices, PxU32 width, PxU32 height, PxU32 depth,
const PxVec3& minExtents, const PxVec3& maxExtents, PxReal narrowBandThicknessRelativeToExtentDiagonal, PxU32 cellsPerSubgrid,
PxArray<PxReal>& sdfCoarse, PxArray<PxU32>& sdfFineStartSlots, PxArray<PxReal>& subgridData, PxArray<PxReal>& denseSdf,
PxReal& subgridsMinSdfValue, PxReal& subgridsMaxSdfValue, PxU32 numThreads = 1, PxSDFBuilder* sdfBuilder = NULL);
PX_PHYSX_COMMON_API void analyzeAndFixMesh(const PxVec3* vertices, const PxU32* indicesOrig, PxU32 numTriangleIndices, PxArray<PxU32>& repairedIndices);
/**
\brief Converts a sparse grid sdf to a format that can be used to create a 3d texture. 3d textures support very efficient
trilinear interpolation on the GPU which is very important during sdf evaluation.
\param[in] width The number of grid points along the x direction
\param[in] height The number of grid points along the y direction
\param[in] depth The number of grid points along the z direction
\param[in] cellsPerSubgrid The number of cells in a sparse subgrid block (full block has mSubgridSize^3 cells and (mSubgridSize+1)^3 samples)
\param[in,out] sdfFineStartSlots Array with linear start indices into the subgrid data array. This array gets converted by this method to start indices for every subgrid block in the 3d texture. The result uses 10bits for z coordinate, 10bits for y and 10bits for x
\param[in] sdfFineSubgridsIn Subgrid data array
\param[in] sdfFineSubgridsSize Number of elements in sdfFineSubgridsIn
\param[out] subgrids3DTexFormat The subgrid data organized in a 3d texture compatible order
\param[out] numSubgridsX Number of subgrid blocks in the 3d texture along x. The full texture dimension along x will be numSubgridsX*(cellsPerSubgrid+1).
\param[out] numSubgridsY Number of subgrid blocks in the 3d texture along y. The full texture dimension along y will be numSubgridsY*(cellsPerSubgrid+1).
\param[out] numSubgridsZ Number of subgrid blocks in the 3d texture along z. The full texture dimension along z will be numSubgridsZ*(cellsPerSubgrid+1).
*/
PX_PHYSX_COMMON_API void convertSparseSDFTo3DTextureLayout(PxU32 width, PxU32 height, PxU32 depth, PxU32 cellsPerSubgrid,
PxU32* sdfFineStartSlots, const PxReal* sdfFineSubgridsIn, PxU32 sdfFineSubgridsSize, PxArray<PxReal>& subgrids3DTexFormat,
PxU32& numSubgridsX, PxU32& numSubgridsY, PxU32& numSubgridsZ);
/**
\brief Extracts an isosurface as a triangular mesh from a signed distance function
\param[in] sdf The signed distance function
\param[out] isosurfaceVertices The vertices of the extracted isosurface
\param[out] isosurfaceTriangleIndices The triangles of the extracted isosurface
\param[in] numThreads The number of threads to use
*/
PX_PHYSX_COMMON_API void extractIsosurfaceFromSDF(const Gu::SDF& sdf, PxArray<PxVec3>& isosurfaceVertices, PxArray<PxU32>& isosurfaceTriangleIndices, PxU32 numThreads = 1);
/**
\brief A class that allows to efficiently project points onto the surface of a triangle mesh.
*/
class PxPointOntoTriangleMeshProjector
{
public:
/**
\brief Projects a point onto the surface of a triangle mesh.
\param[in] point The point to project
\return the projected point
*/
virtual PxVec3 projectPoint(const PxVec3& point) = 0;
/**
\brief Projects a point onto the surface of a triangle mesh.
\param[in] point The point to project
\param[out] closestTriangleIndex The index of the triangle on which the projected point is located
\return the projected point
*/
virtual PxVec3 projectPoint(const PxVec3& point, PxU32& closestTriangleIndex) = 0;
/**
\brief Releases the instance and its data
*/
virtual void release() = 0;
};
/**
\brief Creates a helper class that allows to efficiently project points onto the surface of a triangle mesh.
\param[in] vertices The triangle mesh's vertices
\param[in] triangleIndices The triangle mesh's indices
\param[in] numTriangles The number of triangles
\return A point onto triangle mesh projector instance. The caller needs to delete the instance once it is not used anymore by calling its release method
*/
PX_PHYSX_COMMON_API PxPointOntoTriangleMeshProjector* PxCreatePointOntoTriangleMeshProjector(const PxVec3* vertices, const PxU32* triangleIndices, PxU32 numTriangles);
/**
\brief Utility to convert from a linear index to x/y/z indices given the grid size (only sizeX and sizeY required)
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE void idToXYZ(PxU32 id, PxU32 sizeX, PxU32 sizeY, PxU32& xi, PxU32& yi, PxU32& zi)
{
xi = id % sizeX; id /= sizeX;
yi = id % sizeY;
zi = id / sizeY;
}
/**
\brief Utility to convert from x/y/z indices to a linear index given the grid size (only width and height required)
*/
PX_FORCE_INLINE PX_CUDA_CALLABLE PxU32 idx3D(PxU32 x, PxU32 y, PxU32 z, PxU32 width, PxU32 height)
{
return (z * height + y) * width + x;
}
/**
\brief Utility to encode 3 indices into a single integer. Each index is allowed to use up to 10 bits.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 encodeTriple(PxU32 x, PxU32 y, PxU32 z)
{
PX_ASSERT(x >= 0 && x < 1024);
PX_ASSERT(y >= 0 && y < 1024);
PX_ASSERT(z >= 0 && z < 1024);
return (z << 20) | (y << 10) | x;
}
/**
\brief Computes sample point locations from x/y/z indices
*/
PX_ALIGN_PREFIX(16)
struct GridQueryPointSampler
{
private:
PxVec3 mOrigin;
PxVec3 mCellSize;
PxI32 mOffsetX, mOffsetY, mOffsetZ;
PxI32 mStepX, mStepY, mStepZ;
public:
PX_CUDA_CALLABLE GridQueryPointSampler() {}
PX_CUDA_CALLABLE GridQueryPointSampler(const PxVec3& origin, const PxVec3& cellSize, bool cellCenteredSamples,
PxI32 offsetX = 0, PxI32 offsetY = 0, PxI32 offsetZ = 0, PxI32 stepX = 1, PxI32 stepY = 1, PxI32 stepZ = 1)
: mCellSize(cellSize), mOffsetX(offsetX), mOffsetY(offsetY), mOffsetZ(offsetZ), mStepX(stepX), mStepY(stepY), mStepZ(stepZ)
{
if (cellCenteredSamples)
mOrigin = origin + 0.5f * cellSize;
else
mOrigin = origin;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 getOrigin() const
{
return mOrigin;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 getActiveCellSize() const
{
return PxVec3(mCellSize.x * mStepX, mCellSize.y * mStepY, mCellSize.z * mStepZ);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 getPoint(PxI32 x, PxI32 y, PxI32 z) const
{
return PxVec3(mOrigin.x + (x * mStepX + mOffsetX) * mCellSize.x,
mOrigin.y + (y * mStepY + mOffsetY) * mCellSize.y,
mOrigin.z + (z * mStepZ + mOffsetZ) * mCellSize.z);
}
}
PX_ALIGN_SUFFIX(16);
/**
\brief Represents a dense SDF and allows to evaluate it. Uses trilinear interpolation between samples.
*/
class DenseSDF
{
public:
PxU32 mWidth, mHeight, mDepth;
private:
PxReal* mSdf;
public:
PX_INLINE PX_CUDA_CALLABLE DenseSDF(PxU32 width, PxU32 height, PxU32 depth, PxReal* sdf)
{
initialize(width, height, depth, sdf);
}
DenseSDF() {}
PX_FORCE_INLINE PX_CUDA_CALLABLE void initialize(PxU32 width, PxU32 height, PxU32 depth, PxReal* sdf)
{
this->mWidth = width;
this->mHeight = height;
this->mDepth = depth;
this->mSdf = sdf;
}
PX_FORCE_INLINE PxU32 memoryConsumption()
{
return mWidth * mHeight * mDepth * sizeof(PxReal);
}
PX_INLINE PX_CUDA_CALLABLE PxReal sampleSDFDirect(const PxVec3& samplePoint)
{
const PxU32 xBase = PxClamp(PxU32(samplePoint.x), 0u, mWidth - 2);
const PxU32 yBase = PxClamp(PxU32(samplePoint.y), 0u, mHeight - 2);
const PxU32 zBase = PxClamp(PxU32(samplePoint.z), 0u, mDepth - 2);
return Interpolation::PxTriLerp(
mSdf[idx3D(xBase, yBase, zBase, mWidth, mHeight)],
mSdf[idx3D(xBase + 1, yBase, zBase, mWidth, mHeight)],
mSdf[idx3D(xBase, yBase + 1, zBase, mWidth, mHeight)],
mSdf[idx3D(xBase + 1, yBase + 1, zBase, mWidth, mHeight)],
mSdf[idx3D(xBase, yBase, zBase + 1, mWidth, mHeight)],
mSdf[idx3D(xBase + 1, yBase, zBase + 1, mWidth, mHeight)],
mSdf[idx3D(xBase, yBase + 1, zBase + 1, mWidth, mHeight)],
mSdf[idx3D(xBase + 1, yBase + 1, zBase + 1, mWidth, mHeight)], samplePoint.x - xBase, samplePoint.y - yBase, samplePoint.z - zBase);
}
};
}
}
/** @} */
#endif
| 23,659 | C | 45.120858 | 267 | 0.728602 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuMaverickNode.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_MAVERICK_NODE_H
#define GU_MAVERICK_NODE_H
#include "foundation/PxBounds3.h"
#include "foundation/PxTransform.h"
#include "common/PxPhysXCommonConfig.h"
#include "GuPrunerPayload.h"
#include "GuPrunerTypedef.h"
#define FREE_PRUNER_SIZE 16
#ifdef FREE_PRUNER_SIZE
namespace physx
{
namespace Gu
{
class MaverickNode
{
public:
MaverickNode() : mNbFree(0) {}
~MaverickNode() {}
PX_FORCE_INLINE void release() { mNbFree = 0; }
PX_FORCE_INLINE const PxU32* getPrimitives(const PxU32*) const { return mIndices; }
PX_FORCE_INLINE PxU32 getPrimitiveIndex() const { return 0; }
PX_FORCE_INLINE PxU32 getNbPrimitives() const { return mNbFree; }
bool addObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp);
bool updateObject(const PrunerPayload& object, const PxBounds3& worldAABB, const PxTransform& transform);
bool updateObject(PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform);
bool removeObject(const PrunerPayload& object, PxU32& timeStamp);
bool removeObject(PrunerHandle handle, PxU32& timeStamp);
PxU32 removeMarkedObjects(PxU32 timeStamp);
void shiftOrigin(const PxVec3& shift);
void remove(PxU32 index);
PxU32 mNbFree; // Current number of objects in the "free array" (mFreeObjects/mFreeBounds)
PrunerPayload mFreeObjects[FREE_PRUNER_SIZE]; // mNbFree objects are stored here
PrunerHandle mFreeHandles[FREE_PRUNER_SIZE]; // mNbFree handles are stored here
PxBounds3 mFreeBounds[FREE_PRUNER_SIZE]; // mNbFree object bounds are stored here
PxTransform mFreeTransforms[FREE_PRUNER_SIZE]; // mNbFree transforms are stored here
PxU32 mFreeStamps[FREE_PRUNER_SIZE];
static const PxU32 mIndices[FREE_PRUNER_SIZE];
};
}
}
#endif
#endif
| 3,637 | C | 42.831325 | 148 | 0.739071 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuPruningPool.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuPruningPool.h"
#include "foundation/PxMemory.h"
#include "common/PxProfileZone.h"
using namespace physx;
using namespace Gu;
PruningPool::PruningPool(PxU64 contextID, TransformCacheMode mode) :
mNbObjects (0),
mMaxNbObjects (0),
mObjects (NULL),
mTransforms (NULL),
mTransformCacheMode (mode),
mHandleToIndex (NULL),
mIndexToHandle (NULL),
mFirstRecycledHandle(INVALID_PRUNERHANDLE),
mContextID (contextID)
{
}
PruningPool::~PruningPool()
{
mWorldBoxes.release();
PX_FREE(mIndexToHandle);
PX_FREE(mHandleToIndex);
PX_FREE(mTransforms);
PX_FREE(mObjects);
}
bool PruningPool::resize(PxU32 newCapacity)
{
PX_PROFILE_ZONE("PruningPool::resize", mContextID);
const bool useTransforms = mTransformCacheMode!=TRANSFORM_CACHE_UNUSED;
PxTransform* newTransforms = useTransforms ? PX_ALLOCATE(PxTransform, newCapacity, "Pruner transforms") : NULL;
if(useTransforms && !newTransforms)
return false;
PrunerPayload* newData = PX_ALLOCATE(PrunerPayload, newCapacity, "PrunerPayload*");
PrunerHandle* newIndexToHandle = PX_ALLOCATE(PrunerHandle, newCapacity, "Pruner Index Mapping");
PoolIndex* newHandleToIndex = PX_ALLOCATE(PoolIndex, newCapacity, "Pruner Index Mapping");
if( (!newData) || (!newIndexToHandle) || (!newHandleToIndex))
{
PX_FREE(newHandleToIndex);
PX_FREE(newIndexToHandle);
PX_FREE(newTransforms);
PX_FREE(newData);
return false;
}
mWorldBoxes.resize(newCapacity, mNbObjects);
if(mObjects) PxMemCopy(newData, mObjects, mNbObjects*sizeof(PrunerPayload));
if(mTransforms) PxMemCopy(newTransforms, mTransforms, mNbObjects*sizeof(PxTransform));
if(mIndexToHandle) PxMemCopy(newIndexToHandle, mIndexToHandle, mNbObjects*sizeof(PrunerHandle));
if(mHandleToIndex) PxMemCopy(newHandleToIndex, mHandleToIndex, mMaxNbObjects*sizeof(PoolIndex)); // PT: why mMaxNbObjects here? on purpose?
mMaxNbObjects = newCapacity;
PX_FREE(mIndexToHandle);
PX_FREE(mHandleToIndex);
PX_FREE(mTransforms);
PX_FREE(mObjects);
mObjects = newData;
mTransforms = newTransforms;
mHandleToIndex = newHandleToIndex;
mIndexToHandle = newIndexToHandle;
return true;
}
void PruningPool::preallocate(PxU32 newCapacity)
{
if(newCapacity>mMaxNbObjects)
resize(newCapacity);
}
PxU32 PruningPool::addObjects(PrunerHandle* results, const PxBounds3* bounds, const PrunerPayload* data, const PxTransform* transforms, PxU32 count)
{
PX_PROFILE_ZONE("PruningPool::addObjects", mContextID);
PX_ASSERT((!transforms && mTransformCacheMode==TRANSFORM_CACHE_UNUSED) || (transforms && mTransformCacheMode!=TRANSFORM_CACHE_UNUSED));
for(PxU32 i=0;i<count;i++)
{
if(mNbObjects==mMaxNbObjects) // increase the capacity on overflow
{
const PxU32 newCapacity = PxU32(float(mMaxNbObjects)*1.5f);
if(!resize(PxMax<PxU32>(newCapacity, 64)))
//if(!resize(PxMax<PxU32>(mMaxNbObjects*2, 64)))
{
// pool can return an invalid handle if memory alloc fails
// should probably have an error here or not handle this
results[i] = INVALID_PRUNERHANDLE; // PT: we need to write the potentially invalid handle to let users know which object failed first
return i;
}
}
PX_ASSERT(mNbObjects!=mMaxNbObjects);
const PoolIndex index = mNbObjects++;
// update mHandleToIndex and mIndexToHandle mappings
PrunerHandle handle;
if(mFirstRecycledHandle != INVALID_PRUNERHANDLE)
{
// mFirstRecycledHandle is an entry into a freelist for removed slots
// this path is only taken if we have any removed slots
handle = mFirstRecycledHandle;
mFirstRecycledHandle = mHandleToIndex[handle];
}
else
{
handle = index;
}
// PT: TODO: investigate why we added mIndexToHandle/mHandleToIndex. The initial design with 'Prunable' objects didn't need these arrays.
// PT: these arrays are "parallel"
mWorldBoxes.getBounds() [index] = bounds[i]; // store the payload/userData and AABB in parallel arrays
mObjects [index] = data[i];
mIndexToHandle [index] = handle;
if(transforms && mTransforms)
mTransforms [index] = transforms[i];
mHandleToIndex[handle] = index;
results[i] = handle;
}
return count;
}
PoolIndex PruningPool::removeObject(PrunerHandle h, PrunerPayloadRemovalCallback* removalCallback)
{
PX_PROFILE_ZONE("PruningPool::removeObject", mContextID);
PX_ASSERT(mNbObjects);
// remove the object and its AABB by provided PrunerHandle and update mHandleToIndex and mIndexToHandle mappings
const PoolIndex indexOfRemovedObject = mHandleToIndex[h]; // retrieve object's index from handle
if(removalCallback)
removalCallback->invoke(1, &mObjects[indexOfRemovedObject]);
const PoolIndex indexOfLastObject = --mNbObjects; // swap the object at last index with index
if(indexOfLastObject!=indexOfRemovedObject)
{
// PT: move last object's data to recycled spot (from removed object)
// PT: the last object has moved so we need to handle the mappings for this object
// PT: TODO: investigate where this double-mapping comes from. It was not needed in the original design.
// PT: these arrays are "parallel"
PxBounds3* bounds = mWorldBoxes.getBounds();
const PrunerHandle handleOfLastObject = mIndexToHandle[indexOfLastObject];
bounds [indexOfRemovedObject] = bounds [indexOfLastObject];
mObjects [indexOfRemovedObject] = mObjects [indexOfLastObject];
if(mTransforms)
mTransforms [indexOfRemovedObject] = mTransforms [indexOfLastObject];
mIndexToHandle [indexOfRemovedObject] = handleOfLastObject;
mHandleToIndex[handleOfLastObject] = indexOfRemovedObject;
}
// mHandleToIndex also stores the freelist for removed handles (in place of holes formed by removed handles)
mHandleToIndex[h] = mFirstRecycledHandle; // update linked list of available recycled handles
mFirstRecycledHandle = h; // update the list head
return indexOfLastObject;
}
void PruningPool::shiftOrigin(const PxVec3& shift)
{
PX_PROFILE_ZONE("PruningPool::shiftOrigin", mContextID);
const PxU32 nb = mNbObjects;
PxBounds3* bounds = mWorldBoxes.getBounds();
for(PxU32 i=0; i<nb; i++)
{
bounds[i].minimum -= shift;
bounds[i].maximum -= shift;
}
if(mTransforms && mTransformCacheMode==TRANSFORM_CACHE_GLOBAL)
{
for(PxU32 i=0; i<nb; i++)
mTransforms[i].p -= shift;
}
}
template<const bool hasTransforms>
static void updateAndInflateBounds(PruningPool& pool, const PrunerHandle* PX_RESTRICT handles, const PxU32* PX_RESTRICT boundsIndices, const PxBounds3* PX_RESTRICT newBounds,
const PxTransform32* PX_RESTRICT newTransforms, PxU32 count, float epsilon)
{
PxBounds3* PX_RESTRICT bounds = pool.mWorldBoxes.getBounds();
PxTransform* PX_RESTRICT transforms = hasTransforms ? pool.mTransforms : NULL;
if(boundsIndices)
{
while(count--)
{
const PoolIndex poolIndex = pool.getIndex(*handles++);
PX_ASSERT(poolIndex!=INVALID_PRUNERHANDLE);
const PxU32 remappedIndex = *boundsIndices++;
if(hasTransforms)
transforms[poolIndex] = newTransforms[remappedIndex];
inflateBounds<true>(bounds[poolIndex], newBounds[remappedIndex], epsilon);
}
}
else
{
while(count--)
{
const PoolIndex poolIndex = pool.getIndex(*handles++);
PX_ASSERT(poolIndex!=INVALID_PRUNERHANDLE);
if(hasTransforms)
{
transforms[poolIndex] = *newTransforms;
newTransforms++;
}
inflateBounds<true>(bounds[poolIndex], *newBounds++, epsilon);
}
}
}
void PruningPool::updateAndInflateBounds(const PrunerHandle* handles, const PxU32* boundsIndices, const PxBounds3* newBounds,
const PxTransform32* newTransforms, PxU32 count, float epsilon)
{
PX_PROFILE_ZONE("PruningPool::updateAndInflateBounds", mContextID);
if(mTransforms)
::updateAndInflateBounds<1>(*this, handles, boundsIndices, newBounds, newTransforms, count, epsilon);
else
::updateAndInflateBounds<0>(*this, handles, boundsIndices, newBounds, NULL, count, epsilon);
}
| 9,531 | C++ | 34.834586 | 174 | 0.754066 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuActorShapeMap.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuActorShapeMap.h"
#include "foundation/PxMemory.h"
using namespace physx;
using namespace Gu;
namespace physx
{
namespace Gu
{
/*PX_FORCE_INLINE*/ uint32_t PxComputeHash(const ActorShapeMap::ActorShape& owner)
{
PX_ASSERT(!(size_t(owner.mActor)&3));
PX_ASSERT(!(size_t(owner.mShape)&3));
const uint32_t id0 = uint32_t(size_t(owner.mActor)>>2);
const uint32_t id1 = uint32_t(size_t(owner.mShape)>>2);
const uint64_t mix = (uint64_t(id0)<<32)|uint64_t(id1);
return ::PxComputeHash(mix);
}
}
}
ActorShapeMap::ActorShapeMap() : mCacheSize(0), mCache(NULL)
{
}
ActorShapeMap::~ActorShapeMap()
{
PX_FREE(mCache);
}
void ActorShapeMap::resizeCache(PxU32 index)
{
PxU32 size = mCacheSize ? mCacheSize*2 : 64;
const PxU32 minSize = index+1;
if(minSize>size)
size = minSize*2;
Cache* items = PX_ALLOCATE(Cache, size, "Cache");
if(mCache)
PxMemCopy(items, mCache, mCacheSize*sizeof(Cache));
PxMemZero(items+mCacheSize, (size-mCacheSize)*sizeof(Cache));
PX_FREE(mCache);
mCache = items;
mCacheSize = size;
}
bool ActorShapeMap::add(PxU32 actorIndex, const void* actor, const void* shape, ActorShapeData actorShapeData)
{
if(actorIndex!=PX_INVALID_INDEX)
{
if(actorIndex>=mCacheSize)
resizeCache(actorIndex);
//if(!mCache[actorIndex].mActor)
if(!mCache[actorIndex].mShape)
{
//mCache[actorIndex].mActor = actor;
mCache[actorIndex].mShape = shape;
mCache[actorIndex].mData = actorShapeData;
return true;
}
//PX_ASSERT(mCache[actorIndex].mActor==actor);
PX_ASSERT(mCache[actorIndex].mShape);
if(mCache[actorIndex].mShape==shape)
{
mCache[actorIndex].mData = actorShapeData;
return false;
}
}
return mDatabase.insert(ActorShape(actor, shape), actorShapeData);
}
bool ActorShapeMap::remove(PxU32 actorIndex, const void* actor, const void* shape, ActorShapeData* removed)
{
if(actorIndex!=PX_INVALID_INDEX)
{
//if(mCache[actorIndex].mActor==actor && mCache[actorIndex].mShape==shape)
if(mCache[actorIndex].mShape==shape)
{
//mCache[actorIndex].mActor = NULL;
mCache[actorIndex].mShape = NULL;
PX_ASSERT(!mDatabase.erase(ActorShape(actor, shape)));
if(removed)
*removed = mCache[actorIndex].mData;
return true;
}
}
PxHashMap<ActorShape, ActorShapeData>::Entry removedEntry;
const bool found = mDatabase.erase(ActorShape(actor, shape), removedEntry);
if(found && removed)
*removed = removedEntry.second;
return found;
}
ActorShapeData ActorShapeMap::find(PxU32 actorIndex, const void* actor, const void* shape) const
{
if(actorIndex!=PX_INVALID_INDEX)
{
if(mCache[actorIndex].mShape==shape)
//if(mCache[actorIndex].mActor==actor && mCache[actorIndex].mShape==shape)
{
return mCache[actorIndex].mData;
}
}
const PxHashMap<ActorShape, ActorShapeData>::Entry* e = mDatabase.find(ActorShape(actor, shape));
PX_ASSERT(e);
return e->second;
}
| 4,580 | C++ | 31.260563 | 110 | 0.731878 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuGeometryChecks.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_GEOMETRY_CHECKS_H
#define GU_GEOMETRY_CHECKS_H
#include "geometry/PxBoxGeometry.h"
#include "geometry/PxSphereGeometry.h"
#include "geometry/PxCapsuleGeometry.h"
#include "geometry/PxPlaneGeometry.h"
#include "geometry/PxConvexMeshGeometry.h"
#include "geometry/PxParticleSystemGeometry.h"
#include "geometry/PxTetrahedronMeshGeometry.h"
#include "geometry/PxTriangleMeshGeometry.h"
#include "geometry/PxHeightFieldGeometry.h"
#include "geometry/PxHairSystemGeometry.h"
#include "geometry/PxCustomGeometry.h"
namespace physx
{
// We sometimes overload capsule code for spheres, so every sphere should have
// valid capsule data (height = 0). This is preferable to a typedef so that we
// can maintain traits separately for a sphere, but some care is required to deal
// with the fact that when a reference to a capsule is extracted, it may have its
// type field set to eSPHERE
template <typename T>
struct PxcGeometryTraits
{
enum {TypeID = PxGeometryType::eINVALID };
};
template <typename T> struct PxcGeometryTraits<const T> { enum { TypeID = PxcGeometryTraits<T>::TypeID }; };
template <> struct PxcGeometryTraits<PxBoxGeometry> { enum { TypeID = PxGeometryType::eBOX }; };
template <> struct PxcGeometryTraits<PxSphereGeometry> { enum { TypeID = PxGeometryType::eSPHERE }; };
template <> struct PxcGeometryTraits<PxCapsuleGeometry> { enum { TypeID = PxGeometryType::eCAPSULE }; };
template <> struct PxcGeometryTraits<PxPlaneGeometry> { enum { TypeID = PxGeometryType::ePLANE }; };
template <> struct PxcGeometryTraits<PxParticleSystemGeometry> { enum { TypeID = PxGeometryType::ePARTICLESYSTEM}; };
template <> struct PxcGeometryTraits<PxConvexMeshGeometry> { enum { TypeID = PxGeometryType::eCONVEXMESH }; };
template <> struct PxcGeometryTraits<PxTriangleMeshGeometry> { enum { TypeID = PxGeometryType::eTRIANGLEMESH }; };
template <> struct PxcGeometryTraits<PxTetrahedronMeshGeometry> { enum { TypeID = PxGeometryType::eTETRAHEDRONMESH }; };
template <> struct PxcGeometryTraits<PxHeightFieldGeometry> { enum { TypeID = PxGeometryType::eHEIGHTFIELD }; };
template <> struct PxcGeometryTraits<PxHairSystemGeometry> { enum { TypeID = PxGeometryType::eHAIRSYSTEM }; };
template <> struct PxcGeometryTraits<PxCustomGeometry> { enum { TypeID = PxGeometryType::eCUSTOM }; };
template<class T> PX_CUDA_CALLABLE PX_FORCE_INLINE void checkType(const PxGeometry& geometry)
{
PX_ASSERT(PxU32(geometry.getType()) == PxU32(PxcGeometryTraits<T>::TypeID));
PX_UNUSED(geometry);
}
template<> PX_CUDA_CALLABLE PX_FORCE_INLINE void checkType<PxCapsuleGeometry>(const PxGeometry& geometry)
{
PX_ASSERT(geometry.getType() == PxGeometryType::eCAPSULE || geometry.getType() == PxGeometryType::eSPHERE);
PX_UNUSED(geometry);
}
template<> PX_CUDA_CALLABLE PX_FORCE_INLINE void checkType<const PxCapsuleGeometry>(const PxGeometry& geometry)
{
PX_ASSERT(geometry.getType()== PxGeometryType::eCAPSULE || geometry.getType() == PxGeometryType::eSPHERE);
PX_UNUSED(geometry);
}
}
#if !defined(__CUDACC__)
// the shape structure relies on punning capsules and spheres
PX_COMPILE_TIME_ASSERT(PX_OFFSET_OF(physx::PxCapsuleGeometry, radius) == PX_OFFSET_OF(physx::PxSphereGeometry, radius));
#endif
#endif
| 4,973 | C | 50.27835 | 122 | 0.760708 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuSecondaryPruner.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_SECONDARY_PRUNER_H
#define GU_SECONDARY_PRUNER_H
#include "common/PxPhysXCommonConfig.h"
#include "GuPruner.h"
namespace physx
{
class PxRenderOutput;
namespace Gu
{
class PruningPool;
class CompanionPruner : public PxUserAllocated
{
public:
CompanionPruner() {}
virtual ~CompanionPruner() {}
virtual bool addObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp, PoolIndex poolIndex) = 0;
virtual bool updateObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PoolIndex poolIndex) = 0;
virtual bool removeObject(const PrunerPayload& object, PrunerHandle handle, PxU32 objectIndex, PxU32 swapObjectIndex) = 0;
virtual void swapIndex(PxU32 objectIndex, PxU32 swapObjectIndex) = 0;
virtual PxU32 removeMarkedObjects(PxU32 timeStamp) = 0;
virtual void shiftOrigin(const PxVec3& shift) = 0;
virtual void timeStampChange() = 0;
virtual void build() = 0;
virtual PxU32 getNbObjects() const = 0;
virtual void release() = 0;
virtual void visualize(PxRenderOutput& out, PxU32 color) const = 0;
virtual bool raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& prunerCallback) const = 0;
virtual bool overlap(const ShapeData& queryVolume, PrunerOverlapCallback& prunerCallback) const = 0;
virtual bool sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& prunerCallback) const = 0;
virtual void getGlobalBounds(PxBounds3&) const = 0;
};
CompanionPruner* createCompanionPruner(PxU64 contextID, CompanionPrunerType type, const PruningPool* pool);
}
}
#endif
| 3,775 | C | 52.183098 | 175 | 0.705166 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuAABBTreeNode.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_AABBTREE_NODE_H
#define GU_AABBTREE_NODE_H
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxBounds3.h"
#include "foundation/PxVecMath.h"
#include "foundation/PxUserAllocated.h"
namespace physx
{
using namespace aos;
namespace Gu
{
struct BVHNode : public PxUserAllocated
{
public:
PX_FORCE_INLINE BVHNode() {}
PX_FORCE_INLINE ~BVHNode() {}
PX_FORCE_INLINE PxU32 isLeaf() const { return mData&1; }
PX_FORCE_INLINE const PxU32* getPrimitives(const PxU32* base) const { return base + (mData>>5); }
PX_FORCE_INLINE PxU32* getPrimitives(PxU32* base) { return base + (mData>>5); }
PX_FORCE_INLINE PxU32 getPrimitiveIndex() const { return mData>>5; }
PX_FORCE_INLINE PxU32 getNbPrimitives() const { return (mData>>1)&15; }
PX_FORCE_INLINE PxU32 getPosIndex() const { return mData>>1; }
PX_FORCE_INLINE PxU32 getNegIndex() const { return (mData>>1) + 1; }
PX_FORCE_INLINE const BVHNode* getPos(const BVHNode* base) const { return base + (mData>>1); }
PX_FORCE_INLINE const BVHNode* getNeg(const BVHNode* base) const { const BVHNode* P = getPos(base); return P ? P+1 : NULL; }
PX_FORCE_INLINE BVHNode* getPos(BVHNode* base) { return base + (mData >> 1); }
PX_FORCE_INLINE BVHNode* getNeg(BVHNode* base) { BVHNode* P = getPos(base); return P ? P + 1 : NULL; }
PX_FORCE_INLINE PxU32 getNbRuntimePrimitives() const { return (mData>>1)&15; }
PX_FORCE_INLINE void setNbRunTimePrimitives(PxU32 val)
{
PX_ASSERT(val<16);
PxU32 data = mData & ~(15<<1);
data |= val<<1;
mData = data;
}
PX_FORCE_INLINE void getAABBCenterExtentsV(Vec3V* center, Vec3V* extents) const
{
const Vec4V minV = V4LoadU(&mBV.minimum.x);
const Vec4V maxV = V4LoadU(&mBV.maximum.x);
const float half = 0.5f;
const FloatV halfV = FLoad(half);
*extents = Vec3V_From_Vec4V(V4Scale(V4Sub(maxV, minV), halfV));
*center = Vec3V_From_Vec4V(V4Scale(V4Add(maxV, minV), halfV));
}
PX_FORCE_INLINE void getAABBCenterExtentsV2(Vec3V* center, Vec3V* extents) const
{
const Vec4V minV = V4LoadU(&mBV.minimum.x);
const Vec4V maxV = V4LoadU(&mBV.maximum.x);
*extents = Vec3V_From_Vec4V(V4Sub(maxV, minV));
*center = Vec3V_From_Vec4V(V4Add(maxV, minV));
}
PxBounds3 mBV; // Global bounding-volume enclosing all the node-related primitives
PxU32 mData; // 27 bits node or prim index|4 bits #prims|1 bit leaf
};
} // namespace Gu
}
#endif // GU_AABBTREE_NODE_H
| 4,429 | C | 43.747474 | 129 | 0.67487 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuCCTSweepTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geometry/PxSphereGeometry.h"
#include "GuSweepTests.h"
#include "GuHeightFieldUtil.h"
#include "GuEntityReport.h"
#include "GuDistanceSegmentBox.h"
#include "GuDistancePointBox.h"
#include "GuSweepBoxSphere.h"
#include "GuSweepCapsuleBox.h"
#include "GuSweepBoxBox.h"
#include "GuSweepBoxTriangle_SAT.h"
#include "GuSweepTriangleUtils.h"
#include "GuInternal.h"
#include "foundation/PxVecMath.h"
using namespace physx;
using namespace Gu;
using namespace Cm;
using namespace aos;
static const bool gValidateBoxRadiusComputation = false;
///////////////////////////////////////////
bool sweepCapsule_BoxGeom_Precise(GU_CAPSULE_SWEEP_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eBOX);
PX_UNUSED(threadContext);
PX_UNUSED(inflation);
PX_UNUSED(capsulePose_);
PX_UNUSED(capsuleGeom_);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom);
if (lss.p0 == lss.p1) // The capsule is actually a sphere
{
//TODO: Check if this is really faster than using a "sphere-aware" version of sweepCapsuleBox
Box box; buildFrom(box, pose.p, boxGeom.halfExtents, pose.q);
if(!sweepBoxSphere(box, lss.radius, lss.p0, unitDir, distance, sweepHit.distance, sweepHit.normal, hitFlags))
return false;
sweepHit.normal = -sweepHit.normal;
sweepHit.flags = PxHitFlag::eNORMAL;
if(hitFlags & PxHitFlag::ePOSITION && sweepHit.distance!=0.0f)
{
// The sweep test doesn't compute the impact point automatically, so we have to do it here.
const PxVec3 newSphereCenter = lss.p0 + unitDir * sweepHit.distance;
PxVec3 closest;
const PxReal d = distancePointBoxSquared(newSphereCenter, box.center, box.extents, box.rot, &closest);
PX_UNUSED(d);
// Compute point on the box, after sweep
closest = box.rotate(closest);
sweepHit.position = closest + box.center;
sweepHit.flags |= PxHitFlag::ePOSITION;
}
}
else
{
if(!sweepCapsuleBox(lss, pose, boxGeom.halfExtents, unitDir, distance, sweepHit.position, sweepHit.distance, sweepHit.normal, hitFlags))
return false;
sweepHit.flags = PxHitFlag::eNORMAL;
if((hitFlags & PxHitFlag::ePOSITION) && sweepHit.distance!=0.0f)
{
// The sweep test doesn't compute the impact point automatically, so we have to do it here.
Capsule movedCaps = lss;
movedCaps.p0 += unitDir * sweepHit.distance;
movedCaps.p1 += unitDir * sweepHit.distance;
Box box;
buildFrom(box, pose.p, boxGeom.halfExtents, pose.q);
PxVec3 closest;
const PxReal d = distanceSegmentBoxSquared(movedCaps, box, NULL, &closest);
PX_UNUSED(d);
// Compute point on the box, after sweep
closest = pose.q.rotate(closest);
sweepHit.position = closest + pose.p;
sweepHit.flags |= PxHitFlag::ePOSITION;
}
}
return true;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool sweepBox_SphereGeom_Precise(GU_BOX_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_UNUSED(boxPose_);
PX_UNUSED(boxGeom_);
PX_ASSERT(geom.getType() == PxGeometryType::eSPHERE);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom);
// PT: move to relative space
const Box relBox(box.center - pose.p, box.extents, box.rot);
const PxReal sphereRadius = sphereGeom.radius + inflation;
if(!sweepBoxSphere(relBox, sphereRadius, PxVec3(0), -unitDir, distance, sweepHit.distance, sweepHit.normal, hitFlags))
return false;
sweepHit.flags = PxHitFlag::eNORMAL;
if((hitFlags & PxHitFlag::ePOSITION) && sweepHit.distance!=0.0f)
{
// The sweep test doesn't compute the impact point automatically, so we have to do it here.
const PxVec3 motion = sweepHit.distance * unitDir;
const PxVec3 newSphereCenter = - motion;
PxVec3 closest;
const PxReal d = distancePointBoxSquared(newSphereCenter, relBox.center, relBox.extents, relBox.rot, &closest);
PX_UNUSED(d);
// Compute point on the box, after sweep
sweepHit.position = relBox.rotate(closest) + box.center + motion; // PT: undo move to local space here
sweepHit.flags |= PxHitFlag::ePOSITION;
}
return true;
}
bool sweepBox_CapsuleGeom_Precise(GU_BOX_SWEEP_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eCAPSULE);
PX_UNUSED(inflation);
PX_UNUSED(boxGeom_);
PX_UNUSED(threadContext);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom);
// PT: move to relative space
const PxVec3 delta = box.center - pose.p;
Box relBox(delta, box.extents, box.rot);
Capsule capsule;
const PxVec3 halfHeightVector = getCapsuleHalfHeightVector(pose, capsuleGeom);
capsule.p0 = halfHeightVector;
capsule.p1 = -halfHeightVector;
capsule.radius = capsuleGeom.radius;
// PT: TODO: remove this. We convert to PxTansform here but inside sweepCapsuleBox we convert back to a matrix.
const PxTransform boxWorldPose(delta, boxPose_.q);
PxVec3 n;
if(!sweepCapsuleBox(capsule, boxWorldPose, relBox.extents, -unitDir, distance, sweepHit.position, sweepHit.distance, n, hitFlags))
return false;
sweepHit.normal = -n;
sweepHit.flags = PxHitFlag::eNORMAL;
if((hitFlags & PxHitFlag::ePOSITION) && sweepHit.distance!=0.0f)
{
// The sweep test doesn't compute the impact point automatically, so we have to do it here.
relBox.center += (unitDir * sweepHit.distance);
PxVec3 closest;
const PxReal d = distanceSegmentBoxSquared(capsule, relBox, NULL, &closest);
PX_UNUSED(d);
// Compute point on the box, after sweep
sweepHit.position = relBox.transform(closest) + pose.p; // PT: undo move to local space here
sweepHit.flags |= PxHitFlag::ePOSITION;
}
return true;
}
bool sweepBox_BoxGeom_Precise(GU_BOX_SWEEP_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eBOX);
PX_UNUSED(threadContext);
PX_UNUSED(inflation);
PX_UNUSED(boxPose_);
PX_UNUSED(boxGeom_);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom);
// PT: move to local space
const Box relBox(box.center - pose.p, box.extents, box.rot);
Box staticBox; buildFrom(staticBox, PxVec3(0), boxGeom.halfExtents, pose.q);
if(!sweepBoxBox(relBox, staticBox, unitDir, distance, hitFlags, sweepHit))
return false;
if(sweepHit.distance!=0.0f)
sweepHit.position += pose.p; // PT: undo move to local space
return true;
}
// PT: test: new version for CCT, based on code for general sweeps. Just to check it works or not with rotations
// TODO: refactor this and the similar code in sweptBox for box-vs-mesh. Not so easy though.
static bool sweepBoxVsTriangles(PxU32 nbTris, const PxTriangle* triangles, const Box& box, const PxVec3& unitDir, const PxReal distance, PxGeomSweepHit& sweepHit,
PxHitFlags hitFlags, bool isDoubleSided, const PxU32* cachedIndex)
{
if(!nbTris)
return false;
const bool meshBothSides = hitFlags & PxHitFlag::eMESH_BOTH_SIDES;
const bool doBackfaceCulling = !isDoubleSided && !meshBothSides;
// Move to AABB space
PxMat34 worldToBox;
computeWorldToBoxMatrix(worldToBox, box);
const PxVec3 localDir = worldToBox.rotate(unitDir);
const PxVec3 localMotion = localDir * distance;
bool status = false;
sweepHit.distance = distance; //was PX_MAX_F32, but that may trigger an assert in the caller!
const PxVec3 oneOverMotion(
localDir.x!=0.0f ? 1.0f/localMotion.x : 0.0f,
localDir.y!=0.0f ? 1.0f/localMotion.y : 0.0f,
localDir.z!=0.0f ? 1.0f/localMotion.z : 0.0f);
// PT: experimental code, don't clean up before I test it more and validate it
// Project box
/*float boxRadius0 =
PxAbs(dir.x) * box.extents.x
+ PxAbs(dir.y) * box.extents.y
+ PxAbs(dir.z) * box.extents.z;*/
float boxRadius =
PxAbs(localDir.x) * box.extents.x
+ PxAbs(localDir.y) * box.extents.y
+ PxAbs(localDir.z) * box.extents.z;
if(gValidateBoxRadiusComputation) // PT: run this to check the box radius is correctly computed
{
PxVec3 boxVertices2[8];
box.computeBoxPoints(boxVertices2);
float dpmin = FLT_MAX;
float dpmax = -FLT_MAX;
for(int i=0;i<8;i++)
{
const float dp = boxVertices2[i].dot(unitDir);
if(dp<dpmin) dpmin = dp;
if(dp>dpmax) dpmax = dp;
}
const float goodRadius = (dpmax-dpmin)/2.0f;
PX_UNUSED(goodRadius);
}
const float dpc0 = box.center.dot(unitDir);
float localMinDist = 1.0f;
#if PX_DEBUG
PxU32 totalTestsExpected = nbTris;
PxU32 totalTestsReal = 0;
PX_UNUSED(totalTestsExpected);
PX_UNUSED(totalTestsReal);
#endif
const PxU32 idx = cachedIndex ? *cachedIndex : 0;
PxVec3 bestTriNormal(0.0f);
for(PxU32 ii=0;ii<nbTris;ii++)
{
const PxU32 triangleIndex = getTriangleIndex(ii, idx);
const PxTriangle& tri = triangles[triangleIndex];
if(!cullTriangle(tri.verts, unitDir, boxRadius, localMinDist*distance, dpc0))
continue;
#if PX_DEBUG
totalTestsReal++;
#endif
// Move to box space
const PxTriangle currentTriangle(
worldToBox.transform(tri.verts[0]),
worldToBox.transform(tri.verts[1]),
worldToBox.transform(tri.verts[2]));
PxF32 t = PX_MAX_F32; // could be better!
if(triBoxSweepTestBoxSpace(currentTriangle, box.extents, localMotion, oneOverMotion, localMinDist, t, doBackfaceCulling))
{
if(t < localMinDist)
{
// PT: test if shapes initially overlap
if(t==0.0f)
return setInitialOverlapResults(sweepHit, unitDir, triangleIndex);
localMinDist = t;
sweepHit.distance = t * distance;
sweepHit.faceIndex = triangleIndex;
status = true;
// PT: TODO: optimize this.... already computed in triBoxSweepTestBoxSpace...
currentTriangle.denormalizedNormal(bestTriNormal);
if(hitFlags & PxHitFlag::eMESH_ANY)
break;
}
}
}
if(status)
{
sweepHit.flags = PxHitFlag::eFACE_INDEX;
// PT: TODO: refactor with computeBoxLocalImpact (TA34704)
if(hitFlags & (PxHitFlag::eNORMAL|PxHitFlag::ePOSITION))
{
const PxTriangle& tri = triangles[sweepHit.faceIndex];
// Move to box space
const PxTriangle currentTriangle(
worldToBox.transform(tri.verts[0]),
worldToBox.transform(tri.verts[1]),
worldToBox.transform(tri.verts[2]));
computeBoxTriImpactData(sweepHit.position, sweepHit.normal, box.extents, localDir, currentTriangle, sweepHit.distance);
if(hitFlags & PxHitFlag::eNORMAL)
{
PxVec3 localNormal = sweepHit.normal; // PT: both local space & local variable
localNormal.normalize();
if(shouldFlipNormal(localNormal, meshBothSides, isDoubleSided, bestTriNormal, localDir))
localNormal = -localNormal;
sweepHit.normal = box.rotate(localNormal);
sweepHit.flags |= PxHitFlag::eNORMAL;
}
if(hitFlags & PxHitFlag::ePOSITION)
{
sweepHit.position = box.rotate(sweepHit.position) + box.center;
sweepHit.flags |= PxHitFlag::ePOSITION;
}
}
}
return status;
}
bool sweepBox_HeightFieldGeom_Precise(GU_BOX_SWEEP_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eHEIGHTFIELD);
PX_UNUSED(threadContext);
PX_UNUSED(inflation);
PX_UNUSED(boxPose_);
PX_UNUSED(boxGeom_);
const PxHeightFieldGeometry& heightFieldGeom = static_cast<const PxHeightFieldGeometry&>(geom);
// Compute swept box
Box sweptBox;
computeSweptBox(sweptBox, box.extents, box.center, box.rot, unitDir, distance);
//### Temp hack until we can directly collide the OBB against the HF
const PxTransform sweptBoxTR = sweptBox.getTransform();
const PxBounds3 bounds = PxBounds3::poseExtent(sweptBoxTR, sweptBox.extents);
sweepHit.distance = PX_MAX_F32;
struct LocalReport : OverlapReport
{
virtual bool reportTouchedTris(PxU32 nb, const PxU32* indices)
{
for(PxU32 i=0; i<nb; i++)
{
const PxU32 triangleIndex = indices[i];
PxTriangle currentTriangle; // in world space
mHFUtil->getTriangle(*mPose, currentTriangle, NULL, NULL, triangleIndex, true, true);
PxGeomSweepHit sweepHit_;
const bool b = sweepBoxVsTriangles(1, ¤tTriangle, mBox, mDir, mDist, sweepHit_, mHitFlags, mIsDoubleSided, NULL);
if(b && sweepHit_.distance<mHit->distance)
{
*mHit = sweepHit_;
mHit->faceIndex = triangleIndex;
mStatus = true;
}
}
return true;
}
const HeightFieldUtil* mHFUtil;
const PxTransform* mPose;
PxGeomSweepHit* mHit;
bool mStatus;
Box mBox;
PxVec3 mDir;
float mDist;
PxHitFlags mHitFlags;
bool mIsDoubleSided;
} myReport;
HeightFieldUtil hfUtil(heightFieldGeom);
myReport.mBox = box;
myReport.mDir = unitDir;
myReport.mDist = distance;
myReport.mHitFlags = hitFlags;
myReport.mHFUtil = &hfUtil;
myReport.mStatus = false;
myReport.mPose = &pose;
myReport.mHit = &sweepHit;
const PxU32 meshBothSides = hitFlags & PxHitFlag::eMESH_BOTH_SIDES;
myReport.mIsDoubleSided = (heightFieldGeom.heightFieldFlags & PxMeshGeometryFlag::eDOUBLE_SIDED) || meshBothSides;
hfUtil.overlapAABBTriangles(pose, bounds, myReport);
return myReport.mStatus;
}
bool Gu::sweepBoxTriangles_Precise(GU_SWEEP_TRIANGLES_FUNC_PARAMS(PxBoxGeometry))
{
PX_UNUSED(inflation);
Box box;
buildFrom(box, pose.p, geom.halfExtents, pose.q);
return sweepBoxVsTriangles(nbTris, triangles, box, unitDir, distance, hit, hitFlags, doubleSided, cachedIndex);
}
| 14,727 | C++ | 32.096629 | 162 | 0.723705 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuSweepMTD.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geometry/PxConvexMeshGeometry.h"
#include "GuHeightFieldUtil.h"
#include "GuEntityReport.h"
#include "GuConvexMesh.h"
#include "GuSweepSharedTests.h"
#include "GuConvexUtilsInternal.h"
#include "GuTriangleMesh.h"
#include "GuVecBox.h"
#include "GuVecTriangle.h"
#include "GuVecConvexHullNoScale.h"
#include "GuMidphaseInterface.h"
#include "GuPCMContactConvexCommon.h"
#include "GuSweepMTD.h"
#include "GuPCMShapeConvex.h"
#include "GuDistanceSegmentSegment.h"
#include "GuDistancePointSegment.h"
#include "GuInternal.h"
#include "GuConvexEdgeFlags.h"
#include "GuMTD.h"
#include "CmMatrix34.h"
using namespace physx;
using namespace Gu;
using namespace Cm;
using namespace aos;
#define BATCH_TRIANGLE_NUMBER 32u
struct MTDTriangle : public PxTriangle
{
public:
PxU8 extraTriData;//active edge flag data
};
struct MeshMTDGenerationCallback : MeshHitCallback<PxGeomRaycastHit>
{
public:
PxArray<PxU32>& container;
MeshMTDGenerationCallback(PxArray<PxU32>& tempContainer)
: MeshHitCallback<PxGeomRaycastHit>(CallbackMode::eMULTIPLE), container(tempContainer)
{
}
virtual PxAgain processHit(
const PxGeomRaycastHit& hit, const PxVec3&, const PxVec3&, const PxVec3&, PxReal&, const PxU32*)
{
container.pushBack(hit.faceIndex);
return true;
}
void operator=(const MeshMTDGenerationCallback&) {}
};
static bool getMTDPerTriangle(const MeshPersistentContact* manifoldContacts, const PxU32 numContacts, const PxU32 triangleIndex, Vec3V& normal, Vec3V& closestA, Vec3V& closestB, PxU32& faceIndex, FloatV& deepestPen)
{
FloatV deepest = V4GetW(manifoldContacts[0].mLocalNormalPen);
PxU32 index = 0;
for(PxU32 k=1; k<numContacts; ++k)
{
const FloatV pen = V4GetW(manifoldContacts[k].mLocalNormalPen);
if(FAllGrtr(deepest, pen))
{
deepest = pen;
index = k;
}
}
if(FAllGrtr(deepestPen, deepest))
{
PX_ASSERT(triangleIndex == manifoldContacts[index].mFaceIndex);
faceIndex = triangleIndex;
deepestPen = deepest;
normal = Vec3V_From_Vec4V(manifoldContacts[index].mLocalNormalPen);
closestA = manifoldContacts[index].mLocalPointB;
closestB = manifoldContacts[index].mLocalPointA;
return true;
}
return false;
}
static void midPhaseQuery(const PxTriangleMeshGeometry& meshGeom, const PxTransform& pose, const Box& bound, PxArray<PxU32>& tempContainer)
{
TriangleMesh* meshData = static_cast<TriangleMesh*>(meshGeom.triangleMesh);
Box vertexSpaceBox;
computeVertexSpaceOBB(vertexSpaceBox, bound, pose, meshGeom.scale);
MeshMTDGenerationCallback callback(tempContainer);
Midphase::intersectOBB(meshData, vertexSpaceBox, callback, true);
}
// PT: TODO: refactor with EntityReportContainerCallback
struct MidPhaseQueryLocalReport : OverlapReport
{
MidPhaseQueryLocalReport(PxArray<PxU32>& _container) : container(_container)
{
}
virtual bool reportTouchedTris(PxU32 nb, const PxU32* indices)
{
for(PxU32 i=0; i<nb; i++)
container.pushBack(indices[i]);
return true;
}
PxArray<PxU32>& container;
private:
MidPhaseQueryLocalReport operator=(MidPhaseQueryLocalReport& report);
};
static void midPhaseQuery(const HeightFieldUtil& hfUtil, const PxTransform& pose, const PxBounds3& bounds, PxArray<PxU32>& tempContainer)
{
MidPhaseQueryLocalReport localReport(tempContainer);
hfUtil.overlapAABBTriangles(pose, bounds, localReport);
}
static bool calculateMTD( const CapsuleV& capsuleV, const FloatVArg inflatedRadiusV, const bool isDoubleSide, const MTDTriangle* triangles, const PxU32 nbTriangles, const PxU32 startIndex, MeshPersistentContact* manifoldContacts,
PxU32& numContacts, Vec3V& normal, Vec3V& closestA, Vec3V& closestB, PxU32& faceIndex, FloatV& mtd)
{
const FloatV zero = FZero();
bool hadContacts = false;
FloatV deepestPen = mtd;
for(PxU32 j=0; j<nbTriangles; ++j)
{
numContacts = 0;
const MTDTriangle& curTri = triangles[j];
TriangleV triangleV;
triangleV.verts[0] = V3LoadU(curTri.verts[0]);
triangleV.verts[1] = V3LoadU(curTri.verts[1]);
triangleV.verts[2] = V3LoadU(curTri.verts[2]);
const PxU8 triFlag = curTri.extraTriData;
const Vec3V triangleNormal = triangleV.normal();
const Vec3V v = V3Sub(capsuleV.getCenter(), triangleV.verts[0]);
const FloatV dotV = V3Dot(triangleNormal, v);
// Backface culling
const bool culled = !isDoubleSide && (FAllGrtr(zero, dotV));
if(culled)
continue;
PCMCapsuleVsMeshContactGeneration::processTriangle(triangleV, j+startIndex, capsuleV, inflatedRadiusV, triFlag, manifoldContacts, numContacts);
if(numContacts ==0)
continue;
hadContacts = true;
getMTDPerTriangle(manifoldContacts, numContacts, j + startIndex, normal, closestA, closestB, faceIndex, deepestPen);
}
mtd = deepestPen;
return hadContacts;
}
static PX_FORCE_INLINE bool finalizeMTD(PxGeomSweepHit& hit, const Vec3VArg translationV, const Vec3VArg posV, PxU32 triangleIndex, bool foundInitial)
{
if(foundInitial)
{
const FloatV translationF = V3Length(translationV);
const FloatV distV = FNeg(translationF);
const BoolV con = FIsGrtr(translationF, FZero());
const Vec3V nrm = V3Sel(con, V3ScaleInv(translationV, translationF), V3Zero());
FStore(distV, &hit.distance);
V3StoreU(posV, hit.position);
V3StoreU(nrm, hit.normal);
hit.faceIndex = triangleIndex;
}
return foundInitial;
}
bool physx::Gu::computeCapsule_TriangleMeshMTD( const PxTriangleMeshGeometry& triMeshGeom, const PxTransform& pose, CapsuleV& capsuleV, PxReal inflatedRadius,
bool isDoubleSided, PxGeomSweepHit& hit)
{
TriangleMesh* triMesh = static_cast<TriangleMesh*>(triMeshGeom.triangleMesh);
const PxU8* extraTrigData = triMesh->getExtraTrigData();
const bool flipsNormal = triMeshGeom.scale.hasNegativeDeterminant();
//inflated the capsule by 15% in case of some disagreement between sweep and mtd calculation. If sweep said initial overlap, but mtd has a positive separation,
//we are still be able to return a valid normal but we should zero the distance.
const FloatV inflatedRadiusV = FLoad(inflatedRadius*1.15f);
const PxMat34 vertexToWorldSkew = pose * triMeshGeom.scale;
const Vec3V zeroV = V3Zero();
Vec3V closestA = zeroV, closestB = zeroV, normal = zeroV;
/////
MeshPersistentContact manifoldContacts[64];
PxU32 numContacts = 0;
PxArray<PxU32> tempContainer;
tempContainer.reserve(128);
PxU32 triangleIndex = 0xfffffff;
Vec3V translation = zeroV;
bool foundInitial = false;
const PxU32 iterations = 4;
/////
for(PxU32 i=0; i<iterations; ++i)
{
tempContainer.forceSize_Unsafe(0);
{
Capsule inflatedCapsule;
V3StoreU(capsuleV.p0, inflatedCapsule.p0);
V3StoreU(capsuleV.p1, inflatedCapsule.p1);
inflatedCapsule.radius = inflatedRadius;
Box capsuleBox;
computeBoxAroundCapsule(inflatedCapsule, capsuleBox);
midPhaseQuery(triMeshGeom, pose, capsuleBox, tempContainer);
}
// Get results
const PxU32 nbTriangles = tempContainer.size();
if(!nbTriangles)
break;
FloatV mtd;
{
bool hadContacts = false;
const PxU32 nbBatches = (nbTriangles + BATCH_TRIANGLE_NUMBER - 1)/BATCH_TRIANGLE_NUMBER;
mtd = FMax();
MTDTriangle triangles[BATCH_TRIANGLE_NUMBER];
for(PxU32 a = 0; a < nbBatches; ++a)
{
const PxU32 startIndex = a * BATCH_TRIANGLE_NUMBER;
const PxU32 nbTrigs = PxMin(nbTriangles - startIndex, BATCH_TRIANGLE_NUMBER);
for(PxU32 k=0; k<nbTrigs; k++)
{
//triangle world space
const PxU32 currentTriangleIndex = tempContainer[startIndex+k];
triMesh->computeWorldTriangle(triangles[k], currentTriangleIndex, vertexToWorldSkew, flipsNormal);
triangles[k].extraTriData = getConvexEdgeFlags(extraTrigData, currentTriangleIndex);
}
//ML: mtd has back face culling, so if the capsule's center is below the triangle, we won't generate any contacts
hadContacts = calculateMTD(capsuleV, inflatedRadiusV, isDoubleSided, triangles, nbTrigs, startIndex, manifoldContacts, numContacts, normal, closestA, closestB, triangleIndex, mtd) || hadContacts;
}
if(!hadContacts)
break;
triangleIndex = tempContainer[triangleIndex];
foundInitial = true;
}
//move the capsule to depenetrate it
const FloatV distV = FSub(mtd, capsuleV.radius);
if(FAllGrtr(FZero(), distV))
{
Vec3V center = capsuleV.getCenter();
const Vec3V t = V3Scale(normal, distV);
translation = V3Sub(translation, t);
center = V3Sub(center, t);
capsuleV.setCenter(center);
}
else
{
if(i == 0)
{
//First iteration so keep this normal
hit.distance = 0.0f;
V3StoreU(closestA, hit.position);
V3StoreU(normal, hit.normal);
hit.faceIndex = triangleIndex;
return true;
}
break;
}
}
return finalizeMTD(hit, translation, closestA, triangleIndex, foundInitial);
}
bool physx::Gu::computeCapsule_HeightFieldMTD(const PxHeightFieldGeometry& heightFieldGeom, const PxTransform& pose, CapsuleV& capsuleV, PxReal inflatedRadius, bool isDoubleSided, PxGeomSweepHit& hit)
{
//inflated the capsule by 1% in case of some disagreement between sweep and mtd calculation.If sweep said initial overlap, but mtd has a positive separation,
//we are still be able to return a valid normal but we should zero the distance.
const FloatV inflatedRadiusV = FLoad(inflatedRadius*1.01f);
const HeightFieldUtil hfUtil(heightFieldGeom);
const Vec3V zeroV = V3Zero();
Vec3V closestA = zeroV, closestB = zeroV, normal = zeroV;
/////
MeshPersistentContact manifoldContacts[64];
PxU32 numContacts = 0;
PxArray<PxU32> tempContainer;
tempContainer.reserve(128);
PxU32 triangleIndex = 0xfffffff;
Vec3V translation = zeroV;
bool foundInitial = false;
const PxU32 iterations = 4;
/////
for(PxU32 i=0; i<iterations; ++i)
{
tempContainer.forceSize_Unsafe(0);
{
Capsule inflatedCapsule;
V3StoreU(capsuleV.p0, inflatedCapsule.p0);
V3StoreU(capsuleV.p1, inflatedCapsule.p1);
inflatedCapsule.radius = inflatedRadius;
Box capsuleBox;
computeBoxAroundCapsule(inflatedCapsule, capsuleBox);
const PxTransform capsuleBoxTransform = capsuleBox.getTransform();
const PxBounds3 bounds = PxBounds3::poseExtent(capsuleBoxTransform, capsuleBox.extents);
midPhaseQuery(hfUtil, pose, bounds, tempContainer);
}
// Get results
const PxU32 nbTriangles = tempContainer.size();
if(!nbTriangles)
break;
FloatV mtd;
{
bool hadContacts = false;
const PxU32 nbBatches = (nbTriangles + BATCH_TRIANGLE_NUMBER - 1)/BATCH_TRIANGLE_NUMBER;
mtd = FMax();
MTDTriangle triangles[BATCH_TRIANGLE_NUMBER];
for(PxU32 a = 0; a < nbBatches; ++a)
{
const PxU32 startIndex = a * BATCH_TRIANGLE_NUMBER;
const PxU32 nbTrigs = PxMin(nbTriangles - startIndex, BATCH_TRIANGLE_NUMBER);
for(PxU32 k=0; k<nbTrigs; k++)
{
//triangle vertex space
const PxU32 currentTriangleIndex = tempContainer[startIndex+k];
hfUtil.getTriangle(pose, triangles[k], NULL, NULL, currentTriangleIndex, true);
triangles[k].extraTriData = ETD_CONVEX_EDGE_ALL;
}
//ML: mtd has back face culling, so if the capsule's center is below the triangle, we won't generate any contacts
hadContacts = calculateMTD(capsuleV, inflatedRadiusV, isDoubleSided, triangles, nbTrigs, startIndex, manifoldContacts, numContacts, normal, closestA, closestB, triangleIndex, mtd) || hadContacts;
}
if(!hadContacts)
break;
triangleIndex = tempContainer[triangleIndex];
foundInitial = true;
}
const FloatV distV = FSub(mtd, capsuleV.radius);
if(FAllGrtr(FZero(), distV))
{
//move the capsule to depenetrate it
Vec3V center = capsuleV.getCenter();
const Vec3V t = V3Scale(normal, distV);
translation = V3Sub(translation, t);
center = V3Sub(center, t);
capsuleV.setCenter(center);
}
else
{
if(i == 0)
{
//First iteration so keep this normal
hit.distance = 0.0f;
V3StoreU(closestA, hit.position);
V3StoreU(normal, hit.normal);
hit.faceIndex = triangleIndex;
return true;
}
break;
}
}
return finalizeMTD(hit, translation, closestA, triangleIndex, foundInitial);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
static bool calculateMTD( const PolygonalData& polyData, const SupportLocal* polyMap, const PxTransformV& convexTransform, const PxMatTransformV& meshToConvex, bool isDoubleSided, const FloatVArg inflation, const MTDTriangle* triangles, PxU32 nbTriangles, PxU32 startIndex,
MeshPersistentContact* manifoldContacts, PxU32& numContacts, Vec3V& normal, Vec3V& closestA, Vec3V& closestB, PxU32& faceIndex, FloatV& mtd)
{
bool hadContacts = false;
FloatV deepestPen = mtd;
for(PxU32 j=0; j<nbTriangles; ++j)
{
numContacts = 0;
const MTDTriangle& curTri = triangles[j];
const PxU8 triFlag = curTri.extraTriData;
PCMConvexVsMeshContactGeneration::processTriangle(polyData, polyMap, curTri.verts, j+startIndex, triFlag, inflation, isDoubleSided, convexTransform, meshToConvex, manifoldContacts, numContacts);
if(numContacts ==0)
continue;
hadContacts = true;
getMTDPerTriangle(manifoldContacts, numContacts, j+startIndex, normal, closestA, closestB, faceIndex, deepestPen);
}
mtd = deepestPen;
return hadContacts;
}
bool physx::Gu::computeBox_TriangleMeshMTD(const PxTriangleMeshGeometry& triMeshGeom, const PxTransform& pose, const Box& _box, const PxTransform& boxTransform, PxReal inflation, bool isDoubleSided, PxGeomSweepHit& hit)
{
TriangleMesh* triMesh = static_cast<TriangleMesh*>(triMeshGeom.triangleMesh);
const PxU8* extraTrigData = triMesh->getExtraTrigData();
const bool flipsNormal = triMeshGeom.scale.hasNegativeDeterminant();
const Vec3V zeroV = V3Zero();
Vec3V closestA = zeroV, closestB = zeroV, normal = zeroV;
Vec3V worldNormal = zeroV, worldContactA = zeroV;//, worldContactB = zeroV;
Box box = _box;
const QuatV q0 = QuatVLoadU(&boxTransform.q.x);
const Vec3V p0 = V3LoadU(&boxTransform.p.x);
const Vec3V boxExtents = V3LoadU(box.extents);
const FloatV minMargin = CalculateMTDBoxMargin(boxExtents);
const FloatV inflationV = FAdd(FLoad(inflation), minMargin);
PxReal boundInflation;
FStore(inflationV, &boundInflation);
box.extents += PxVec3(boundInflation);
const BoxV boxV(zeroV, boxExtents);
Vec3V boxCenter = V3LoadU(box.center);
//create the polyData based on the original data
PolygonalData polyData;
const PCMPolygonalBox polyBox(_box.extents);
polyBox.getPolygonalData(&polyData);
const Mat33V identity = M33Identity();
const PxMat34 meshToWorldSkew = pose * triMeshGeom.scale;
PxTransformV boxTransformV(p0, q0);//box
/////
MeshPersistentContact manifoldContacts[64];
PxU32 numContacts = 0;
PxArray<PxU32> tempContainer;
tempContainer.reserve(128);
PxU32 triangleIndex = 0xfffffff;
Vec3V translation = zeroV;
bool foundInitial = false;
const PxU32 iterations = 4;
/////
for(PxU32 i=0; i<iterations; ++i)
{
tempContainer.forceSize_Unsafe(0);
{
midPhaseQuery(triMeshGeom, pose, box, tempContainer);
}
// Get results
const PxU32 nbTriangles = tempContainer.size();
if(!nbTriangles)
break;
boxTransformV.p = boxCenter;
SupportLocalImpl<BoxV> boxMap(boxV, boxTransformV, identity, identity, true);
boxMap.setShapeSpaceCenterofMass(zeroV);
// Move to AABB space
PxMat34 WorldToBox;
computeWorldToBoxMatrix(WorldToBox, box);
const PxMat34 meshToBox = WorldToBox*meshToWorldSkew;
const Mat33V rot(V3LoadU(meshToBox.m.column0), V3LoadU(meshToBox.m.column1), V3LoadU(meshToBox.m.column2));
const PxMatTransformV meshToConvex(V3LoadU(meshToBox.p), rot);
FloatV mtd;
{
bool hadContacts = false;
const PxU32 nbBatches = (nbTriangles + BATCH_TRIANGLE_NUMBER - 1)/BATCH_TRIANGLE_NUMBER;
mtd = FMax();
MTDTriangle triangles[BATCH_TRIANGLE_NUMBER];
for(PxU32 a = 0; a < nbBatches; ++a)
{
const PxU32 startIndex = a * BATCH_TRIANGLE_NUMBER;
const PxU32 nbTrigs = PxMin(nbTriangles - startIndex, BATCH_TRIANGLE_NUMBER);
for(PxU32 k=0; k<nbTrigs; k++)
{
//triangle vertex space
const PxU32 currentTriangleIndex = tempContainer[startIndex+k];
triMesh->getLocalTriangle(triangles[k], currentTriangleIndex, flipsNormal);
triangles[k].extraTriData = getConvexEdgeFlags(extraTrigData, currentTriangleIndex);
}
//ML: mtd has back face culling, so if the capsule's center is below the triangle, we won't generate any contacts
hadContacts = calculateMTD(polyData, &boxMap, boxTransformV, meshToConvex, isDoubleSided, inflationV, triangles, nbTrigs, startIndex, manifoldContacts, numContacts, normal, closestA, closestB, triangleIndex, mtd) || hadContacts;
}
if(!hadContacts)
break;
triangleIndex = tempContainer[triangleIndex];
foundInitial = true;
}
const FloatV distV = mtd;
worldNormal = boxTransformV.rotate(normal);
worldContactA = boxTransformV.transform(closestA);
if(FAllGrtr(FZero(), distV))
{
const Vec3V t = V3Scale(worldNormal, mtd);
translation = V3Sub(translation, t);
boxCenter = V3Sub(boxCenter, t);
V3StoreU(boxCenter, box.center);
}
else
{
if(i == 0)
{
//First iteration so keep this normal
hit.distance = 0.0f;
V3StoreU(worldContactA, hit.position);
V3StoreU(worldNormal, hit.normal);
hit.faceIndex = triangleIndex;
return true;
}
break;
}
}
return finalizeMTD(hit, translation, worldContactA, triangleIndex, foundInitial);
}
bool physx::Gu::computeBox_HeightFieldMTD(const PxHeightFieldGeometry& heightFieldGeom, const PxTransform& pose, const Box& _box, const PxTransform& boxTransform, PxReal inflation, bool isDoubleSided, PxGeomSweepHit& hit)
{
const HeightFieldUtil hfUtil(heightFieldGeom);
const Vec3V zeroV = V3Zero();
Vec3V closestA = zeroV, closestB = zeroV, normal = zeroV;
Vec3V worldNormal = zeroV, worldContactA = zeroV;//, worldContactB = zeroV;
Box box = _box;
const QuatV q0 = QuatVLoadU(&boxTransform.q.x);
const Vec3V p0 = V3LoadU(&boxTransform.p.x);
const Vec3V boxExtents = V3LoadU(box.extents);
const FloatV minMargin = CalculateMTDBoxMargin(boxExtents);
const FloatV inflationV = FAdd(FLoad(inflation), minMargin);
//const FloatV inflationV = FLoad(inflation);
PxReal boundInflation;
FStore(inflationV, &boundInflation);
box.extents += PxVec3(boundInflation);
const BoxV boxV(zeroV, boxExtents);
Vec3V boxCenter = V3LoadU(box.center);
//create the polyData based on the original box
PolygonalData polyData;
const PCMPolygonalBox polyBox(_box.extents);
polyBox.getPolygonalData(&polyData);
const Mat33V identity = M33Identity();
const Matrix34FromTransform meshToWorldSkew(pose);
PxTransformV boxTransformV(p0, q0);//box
/////
MeshPersistentContact manifoldContacts[64];
PxU32 numContacts = 0;
PxArray<PxU32> tempContainer;
tempContainer.reserve(128);
PxU32 triangleIndex = 0xfffffff;
Vec3V translation = zeroV;
bool foundInitial = false;
const PxU32 iterations = 4;
/////
for(PxU32 i=0; i<iterations; ++i)
{
tempContainer.forceSize_Unsafe(0);
{
const PxBounds3 bounds = PxBounds3::poseExtent(box.getTransform(), box.extents);
midPhaseQuery(hfUtil, pose, bounds, tempContainer);
}
// Get results
const PxU32 nbTriangles = tempContainer.size();
if(!nbTriangles)
break;
boxTransformV.p = boxCenter;
SupportLocalImpl<BoxV> boxMap(boxV, boxTransformV, identity, identity, true);
boxMap.setShapeSpaceCenterofMass(zeroV);
// Move to AABB space
PxMat34 WorldToBox;
computeWorldToBoxMatrix(WorldToBox, box);
const PxMat34 meshToBox = WorldToBox*meshToWorldSkew;
const Mat33V rot(V3LoadU(meshToBox.m.column0), V3LoadU(meshToBox.m.column1), V3LoadU(meshToBox.m.column2));
const PxMatTransformV meshToConvex(V3LoadU(meshToBox.p), rot);
FloatV mtd;
{
bool hadContacts = false;
const PxU32 nbBatches = (nbTriangles + BATCH_TRIANGLE_NUMBER - 1)/BATCH_TRIANGLE_NUMBER;
mtd = FMax();
MTDTriangle triangles[BATCH_TRIANGLE_NUMBER];
for(PxU32 a = 0; a < nbBatches; ++a)
{
const PxU32 startIndex = a * BATCH_TRIANGLE_NUMBER;
const PxU32 nbTrigs = PxMin(nbTriangles - startIndex, BATCH_TRIANGLE_NUMBER);
for(PxU32 k=0; k<nbTrigs; k++)
{
//triangle vertex space
const PxU32 currentTriangleIndex = tempContainer[startIndex+k];
hfUtil.getTriangle(pose, triangles[k], NULL, NULL, currentTriangleIndex, false, false);
triangles[k].extraTriData = ETD_CONVEX_EDGE_ALL;
}
//ML: mtd has back face culling, so if the box's center is below the triangle, we won't generate any contacts
hadContacts = calculateMTD(polyData, &boxMap, boxTransformV, meshToConvex, isDoubleSided, inflationV, triangles, nbTrigs, startIndex, manifoldContacts, numContacts, normal, closestA, closestB, triangleIndex, mtd) || hadContacts;
}
if(!hadContacts)
break;
triangleIndex = tempContainer[triangleIndex];
foundInitial = true;
}
const FloatV distV = mtd;
worldNormal = boxTransformV.rotate(normal);
worldContactA = boxTransformV.transform(closestA);
if(FAllGrtr(FZero(), distV))
{
//worldContactB = boxTransformV.transform(closestB);
const Vec3V t = V3Scale(worldNormal, mtd);
translation = V3Sub(translation, t);
boxCenter = V3Sub(boxCenter, t);
V3StoreU(boxCenter, box.center);
}
else
{
if(i == 0)
{
//First iteration so keep this normal
hit.distance = 0.0f;
V3StoreU(worldContactA, hit.position);
V3StoreU(worldNormal, hit.normal);
hit.faceIndex = triangleIndex;
return true;
}
break;
}
}
return finalizeMTD(hit, translation, worldContactA, triangleIndex, foundInitial);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool physx::Gu::computeConvex_TriangleMeshMTD( const PxTriangleMeshGeometry& triMeshGeom, const PxTransform& pose, const PxConvexMeshGeometry& convexGeom, const PxTransform& convexPose, PxReal inflation,
bool isDoubleSided, PxGeomSweepHit& hit)
{
const Vec3V zeroV = V3Zero();
TriangleMesh* triMesh = static_cast<TriangleMesh*>(triMeshGeom.triangleMesh);
ConvexMesh* cm = static_cast<ConvexMesh*>(convexGeom.convexMesh);
const PxU8* extraTrigData = triMesh->getExtraTrigData();
const bool flipsNormal = triMeshGeom.scale.hasNegativeDeterminant();
ConvexHullData* hullData = &cm->getHull();
const bool idtScaleConvex = convexGeom.scale.isIdentity();
FastVertex2ShapeScaling convexScaling;
if(!idtScaleConvex)
convexScaling.init(convexGeom.scale);
const PxVec3 _shapeSpaceCenterOfMass = convexScaling * hullData->mCenterOfMass;
const Vec3V shapeSpaceCenterOfMass = V3LoadU(_shapeSpaceCenterOfMass);
const QuatV q0 = QuatVLoadU(&convexPose.q.x);
const Vec3V p0 = V3LoadU(&convexPose.p.x);
PxTransformV convexTransformV(p0, q0);
const Vec3V vScale = V3LoadU_SafeReadW(convexGeom.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&convexGeom.scale.rotation.x);
const ConvexHullV convexHull(hullData, V3Zero(), vScale, vQuat, idtScaleConvex);
PX_ALIGN(16, PxU8 convexBuff[sizeof(SupportLocalImpl<ConvexHullV>)]);
const FloatV convexMargin = CalculateMTDConvexMargin(hullData, vScale);
const FloatV inflationV = FAdd(FLoad(inflation), convexMargin);
PxReal boundInflation;
FStore(inflationV, &boundInflation);
Vec3V closestA = zeroV, closestB = zeroV, normal = zeroV;
const PxMat34 meshToWorldSkew = pose * triMeshGeom.scale;
PolygonalData polyData;
getPCMConvexData(convexHull, idtScaleConvex, polyData);
Vec3V center = p0;
PxTransform tempConvexPose = convexPose;
Vec3V worldNormal = zeroV, worldContactA = zeroV;//, worldContactB = zeroV;
/////
MeshPersistentContact manifoldContacts[64];
PxU32 numContacts = 0;
PxArray<PxU32> tempContainer;
tempContainer.reserve(128);
PxU32 triangleIndex = 0xfffffff;
Vec3V translation = zeroV;
bool foundInitial = false;
const PxU32 iterations = 2; // PT: TODO: why 2 here instead of 4?
/////
for(PxU32 i=0; i<iterations; ++i)
{
tempContainer.forceSize_Unsafe(0);
SupportLocal* convexMap;
{
//ML:: construct convex hull data
V3StoreU(center, tempConvexPose.p);
convexTransformV.p = center;
convexMap = idtScaleConvex ? static_cast<SupportLocal*>(PX_PLACEMENT_NEW(convexBuff, SupportLocalImpl<ConvexHullNoScaleV>)(static_cast<const ConvexHullNoScaleV&>(convexHull), convexTransformV, convexHull.vertex2Shape, convexHull.shape2Vertex, idtScaleConvex)) :
static_cast<SupportLocal*>(PX_PLACEMENT_NEW(convexBuff, SupportLocalImpl<ConvexHullV>)(convexHull, convexTransformV, convexHull.vertex2Shape, convexHull.shape2Vertex, idtScaleConvex));
convexMap->setShapeSpaceCenterofMass(shapeSpaceCenterOfMass);
Box hullOBB;
computeOBBAroundConvex(hullOBB, convexGeom, cm, tempConvexPose);
hullOBB.extents += PxVec3(boundInflation);
midPhaseQuery(triMeshGeom, pose, hullOBB, tempContainer);
}
// Get results
const PxU32 nbTriangles = tempContainer.size();
if(!nbTriangles)
break;
// Move to AABB space
const Matrix34FromTransform worldToConvex(tempConvexPose.getInverse());
const PxMat34 meshToConvex = worldToConvex*meshToWorldSkew;
const Mat33V rot(V3LoadU(meshToConvex.m.column0), V3LoadU(meshToConvex.m.column1), V3LoadU(meshToConvex.m.column2));
const PxMatTransformV meshToConvexV(V3LoadU(meshToConvex.p), rot);
FloatV mtd;
{
bool hadContacts = false;
const PxU32 nbBatches = (nbTriangles + BATCH_TRIANGLE_NUMBER - 1)/BATCH_TRIANGLE_NUMBER;
mtd = FMax();
MTDTriangle triangles[BATCH_TRIANGLE_NUMBER];
for(PxU32 a = 0; a < nbBatches; ++a)
{
const PxU32 startIndex = a * BATCH_TRIANGLE_NUMBER;
const PxU32 nbTrigs = PxMin(nbTriangles - startIndex, BATCH_TRIANGLE_NUMBER);
for(PxU32 k=0; k<nbTrigs; k++)
{
//triangle vertex space
const PxU32 currentTriangleIndex = tempContainer[startIndex+k];
triMesh->getLocalTriangle(triangles[k], currentTriangleIndex, flipsNormal);
triangles[k].extraTriData = getConvexEdgeFlags(extraTrigData, currentTriangleIndex);
}
//ML: mtd has back face culling, so if the capsule's center is below the triangle, we won't generate any contacts
hadContacts = calculateMTD(polyData, convexMap, convexTransformV, meshToConvexV, isDoubleSided, inflationV, triangles, nbTrigs, startIndex, manifoldContacts, numContacts, normal, closestA, closestB, triangleIndex, mtd) || hadContacts;
}
if(!hadContacts)
break;
triangleIndex = tempContainer[triangleIndex];
foundInitial = true;
}
const FloatV distV = mtd;
worldNormal = convexTransformV.rotate(normal);
worldContactA = convexTransformV.transform(closestA);
if(FAllGrtr(FZero(), distV))
{
const Vec3V t = V3Scale(worldNormal, mtd);
translation = V3Sub(translation, t);
center = V3Sub(center, t);
}
else
{
if(i == 0)
{
//First iteration so keep this normal
hit.distance = 0.0f;
V3StoreU(worldContactA, hit.position);
V3StoreU(worldNormal, hit.normal);
hit.faceIndex = triangleIndex;
return true;
}
break;
}
}
return finalizeMTD(hit, translation, worldContactA, triangleIndex, foundInitial);
}
bool physx::Gu::computeConvex_HeightFieldMTD(const PxHeightFieldGeometry& heightFieldGeom, const PxTransform& pose, const PxConvexMeshGeometry& convexGeom, const PxTransform& convexPose, PxReal inflation, bool isDoubleSided, PxGeomSweepHit& hit)
{
const HeightFieldUtil hfUtil(heightFieldGeom);
const Vec3V zeroV = V3Zero();
ConvexMesh* cm = static_cast<ConvexMesh*>(convexGeom.convexMesh);
ConvexHullData* hullData = &cm->getHull();
const bool idtScaleConvex = convexGeom.scale.isIdentity();
FastVertex2ShapeScaling convexScaling;
if(!idtScaleConvex)
convexScaling.init(convexGeom.scale);
const PxVec3 _shapeSpaceCenterOfMass = convexScaling * hullData->mCenterOfMass;
const Vec3V shapeSpaceCenterOfMass = V3LoadU(_shapeSpaceCenterOfMass);
const QuatV q0 = QuatVLoadU(&convexPose.q.x);
const Vec3V p0 = V3LoadU(&convexPose.p.x);
PxTransformV convexTransformV(p0, q0);
const Vec3V vScale = V3LoadU_SafeReadW(convexGeom.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&convexGeom.scale.rotation.x);
const ConvexHullV convexHull(hullData, zeroV, vScale, vQuat, idtScaleConvex);
PX_ALIGN(16, PxU8 convexBuff[sizeof(SupportLocalImpl<ConvexHullV>)]);
const FloatV convexMargin = CalculateMTDConvexMargin(hullData, vScale);
const FloatV inflationV = FAdd(FLoad(inflation), convexMargin);
PxReal boundInflation;
FStore(inflationV, &boundInflation);
Vec3V closestA = zeroV, closestB = zeroV, normal = zeroV;
Vec3V worldNormal = zeroV, worldContactA = zeroV;//, worldContactB = zeroV;
PolygonalData polyData;
getPCMConvexData(convexHull, idtScaleConvex, polyData);
Vec3V center = p0;
PxTransform tempConvexPose = convexPose;
const Matrix34FromTransform meshToWorldSkew(pose);
/////
MeshPersistentContact manifoldContacts[64];
PxU32 numContacts = 0;
PxArray<PxU32> tempContainer;
tempContainer.reserve(128);
PxU32 triangleIndex = 0xfffffff;
Vec3V translation = zeroV;
bool foundInitial = false;
const PxU32 iterations = 2; // PT: TODO: why 2 here instead of 4?
/////
for(PxU32 i=0; i<iterations; ++i)
{
tempContainer.forceSize_Unsafe(0);
SupportLocal* convexMap;
{
//ML:: construct convex hull data
V3StoreU(center, tempConvexPose.p);
convexTransformV.p = center;
convexMap = idtScaleConvex ? static_cast<SupportLocal*>(PX_PLACEMENT_NEW(convexBuff, SupportLocalImpl<ConvexHullNoScaleV>)(static_cast<const ConvexHullNoScaleV&>(convexHull), convexTransformV, convexHull.vertex2Shape, convexHull.shape2Vertex, idtScaleConvex)) :
static_cast<SupportLocal*>(PX_PLACEMENT_NEW(convexBuff, SupportLocalImpl<ConvexHullV>)(convexHull, convexTransformV, convexHull.vertex2Shape, convexHull.shape2Vertex, idtScaleConvex));
convexMap->setShapeSpaceCenterofMass(shapeSpaceCenterOfMass);
Box hullOBB;
computeOBBAroundConvex(hullOBB, convexGeom, cm, tempConvexPose);
hullOBB.extents += PxVec3(boundInflation);
const PxBounds3 bounds = PxBounds3::basisExtent(hullOBB.center, hullOBB.rot, hullOBB.extents);
midPhaseQuery(hfUtil, pose, bounds, tempContainer);
}
// Get results
const PxU32 nbTriangles = tempContainer.size();
if(!nbTriangles)
break;
// Move to AABB space
const Matrix34FromTransform worldToConvex(tempConvexPose.getInverse());
const PxMat34 meshToConvex = worldToConvex*meshToWorldSkew;
const Mat33V rot(V3LoadU(meshToConvex.m.column0), V3LoadU(meshToConvex.m.column1), V3LoadU(meshToConvex.m.column2));
const PxMatTransformV meshToConvexV(V3LoadU(meshToConvex.p), rot);
FloatV mtd;
{
bool hadContacts = false;
const PxU32 nbBatches = (nbTriangles + BATCH_TRIANGLE_NUMBER - 1)/BATCH_TRIANGLE_NUMBER;
mtd = FMax();
MTDTriangle triangles[BATCH_TRIANGLE_NUMBER];
for(PxU32 a = 0; a < nbBatches; ++a)
{
const PxU32 startIndex = a * BATCH_TRIANGLE_NUMBER;
const PxU32 nbTrigs = PxMin(nbTriangles - startIndex, BATCH_TRIANGLE_NUMBER);
for(PxU32 k=0; k<nbTrigs; k++)
{
//triangle vertex space
const PxU32 currentTriangleIndex = tempContainer[startIndex+k];
hfUtil.getTriangle(pose, triangles[k], NULL, NULL, currentTriangleIndex, false, false);
triangles[k].extraTriData = ETD_CONVEX_EDGE_ALL;
}
//ML: mtd has back face culling, so if the capsule's center is below the triangle, we won't generate any contacts
hadContacts = calculateMTD(polyData, convexMap, convexTransformV, meshToConvexV, isDoubleSided, inflationV, triangles, nbTrigs, startIndex, manifoldContacts, numContacts, normal, closestA, closestB, triangleIndex, mtd) || hadContacts;
}
if(!hadContacts)
break;
triangleIndex = tempContainer[triangleIndex];
foundInitial = true;
}
const FloatV distV = mtd;
worldNormal = convexTransformV.rotate(normal);
worldContactA = convexTransformV.transform(closestA);
if(FAllGrtr(FZero(), distV))
{
const Vec3V t = V3Scale(worldNormal, mtd);
translation = V3Sub(translation, t);
center = V3Sub(center, t);
}
else
{
if(i == 0)
{
//First iteration so keep this normal
hit.distance = 0.0f;
V3StoreU(worldContactA, hit.position);
V3StoreU(worldNormal, hit.normal);
hit.faceIndex = triangleIndex;
return true;
}
break;
}
}
return finalizeMTD(hit, translation, worldContactA, triangleIndex, foundInitial);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool physx::Gu::computeSphere_SphereMTD(const Sphere& sphere0, const Sphere& sphere1, PxGeomSweepHit& hit)
{
const PxVec3 delta = sphere1.center - sphere0.center;
const PxReal d2 = delta.magnitudeSquared();
const PxReal radiusSum = sphere0.radius + sphere1.radius;
const PxReal d = manualNormalize(hit.normal, delta, d2);
hit.distance = d - radiusSum;
hit.position = sphere0.center + hit.normal * sphere0.radius;
return true;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool physx::Gu::computeSphere_CapsuleMTD( const Sphere& sphere, const Capsule& capsule, PxGeomSweepHit& hit)
{
const PxReal radiusSum = sphere.radius + capsule.radius;
PxReal u;
distancePointSegmentSquared(capsule, sphere.center, &u);
const PxVec3 normal = capsule.getPointAt(u) - sphere.center;
const PxReal lenSq = normal.magnitudeSquared();
const PxF32 d = manualNormalize(hit.normal, normal, lenSq);
hit.distance = d - radiusSum;
hit.position = sphere.center + hit.normal * sphere.radius;
return true;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool physx::Gu::computeCapsule_CapsuleMTD(const Capsule& capsule0, const Capsule& capsule1, PxGeomSweepHit& hit)
{
PxReal s,t;
distanceSegmentSegmentSquared(capsule0, capsule1, &s, &t);
const PxReal radiusSum = capsule0.radius + capsule1.radius;
const PxVec3 pointAtCapsule0 = capsule0.getPointAt(s);
const PxVec3 pointAtCapsule1 = capsule1.getPointAt(t);
const PxVec3 normal = pointAtCapsule0 - pointAtCapsule1;
const PxReal lenSq = normal.magnitudeSquared();
const PxF32 len = manualNormalize(hit.normal, normal, lenSq);
hit.distance = len - radiusSum;
hit.position = pointAtCapsule1 + hit.normal * capsule1.radius;
return true;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool physx::Gu::computePlane_CapsuleMTD(const PxPlane& plane, const Capsule& capsule, PxGeomSweepHit& hit)
{
const PxReal d0 = plane.distance(capsule.p0);
const PxReal d1 = plane.distance(capsule.p1);
PxReal dmin;
PxVec3 point;
if(d0 < d1)
{
dmin = d0;
point = capsule.p0;
}
else
{
dmin = d1;
point = capsule.p1;
}
hit.normal = plane.n;
hit.distance = dmin - capsule.radius;
hit.position = point - hit.normal * dmin;
return true;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool physx::Gu::computePlane_BoxMTD(const PxPlane& plane, const Box& box, PxGeomSweepHit& hit)
{
PxVec3 pts[8];
box.computeBoxPoints(pts);
PxReal dmin = plane.distance(pts[0]);
PxU32 index = 0;
for(PxU32 i=1;i<8;i++)
{
const PxReal d = plane.distance(pts[i]);
if(dmin > d)
{
index = i;
dmin = d;
}
}
hit.normal = plane.n;
hit.distance = dmin;
hit.position = pts[index] - plane.n*dmin;
return true;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool physx::Gu::computePlane_ConvexMTD(const PxPlane& plane, const PxConvexMeshGeometry& convexGeom, const PxTransform& convexPose, PxGeomSweepHit& hit)
{
const ConvexMesh* convexMesh = static_cast<const ConvexMesh*>(convexGeom.convexMesh);
const FastVertex2ShapeScaling convexScaling(convexGeom.scale);
PxU32 nbVerts = convexMesh->getNbVerts();
const PxVec3* PX_RESTRICT verts = convexMesh->getVerts();
PxVec3 worldPointMin = convexPose.transform(convexScaling * verts[0]);
PxReal dmin = plane.distance(worldPointMin);
for(PxU32 i=1;i<nbVerts;i++)
{
const PxVec3 worldPoint = convexPose.transform(convexScaling * verts[i]);
const PxReal d = plane.distance(worldPoint);
if(dmin > d)
{
dmin = d;
worldPointMin = worldPoint;
}
}
hit.normal = plane.n;
hit.distance = dmin;
hit.position = worldPointMin - plane.n * dmin;
return true;
}
| 38,568 | C++ | 32.480035 | 274 | 0.718601 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuGjkQuery.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geometry/PxGjkQuery.h"
#include "GuInternal.h"
#include "GuOverlapTests.h"
#include "GuSweepTests.h"
#include "GuRaycastTests.h"
#include "GuBoxConversion.h"
#include "GuTriangleMesh.h"
#include "GuMTD.h"
#include "GuBounds.h"
#include "GuDistancePointSegment.h"
#include "GuConvexMesh.h"
#include "GuDistancePointBox.h"
#include "GuMidphaseInterface.h"
#include "foundation/PxFPU.h"
using namespace physx;
using namespace Gu;
#include "GuGJK.h"
#include "GuGJKPenetration.h"
#include "GuGJKRaycast.h"
#include "GuEPA.h"
#include "geomutils/PxContactBuffer.h"
using namespace aos;
static PX_SUPPORT_INLINE PxVec3 Vec3V_To_PxVec3(const Vec3V& a)
{
PxVec3 v;
V3StoreU(a, v);
return v;
}
static PX_SUPPORT_INLINE PxReal FloatV_To_PxReal(const FloatV& a)
{
PxF32 f;
FStore(a, &f);
return f;
}
struct CustomConvexV : ConvexV
{
const PxGjkQuery::Support* s;
PxReal supportScale;
CustomConvexV(const PxGjkQuery::Support& _s) : ConvexV(Gu::ConvexType::eCUSTOM), s(&_s), supportScale(1.0f)
{
setMinMargin(FLoad(0.001f));
setSweepMargin(FLoad(0.001f));
}
PX_SUPPORT_INLINE Vec3V supportPoint(const PxI32 /*index*/) const
{
return supportLocal(V3LoadU(PxVec3(1, 0, 0)));
}
PX_SUPPORT_INLINE Vec3V supportLocal(const Vec3V& dir) const
{
return V3Scale(V3LoadU(s->supportLocal(Vec3V_To_PxVec3(dir))), FLoad(supportScale));
}
PX_SUPPORT_INLINE Vec3V supportLocal(const Vec3V& dir, PxI32& index) const
{
index = 0;
return supportLocal(dir);
}
PX_SUPPORT_INLINE Vec3V supportRelative(const Vec3V& dir, const PxMatTransformV& aTob, const PxMatTransformV& aTobT) const
{
const Vec3V _dir = aTobT.rotate(dir);
const Vec3V p = supportLocal(_dir);
return aTob.transform(p);
}
PX_SUPPORT_INLINE Vec3V supportRelative(const Vec3V& dir, const PxMatTransformV& aTob, const PxMatTransformV& aTobT, PxI32& index) const
{
index = 0;
return supportRelative(dir, aTob, aTobT);
}
};
bool PxGjkQuery::proximityInfo(const Support& a, const Support& b, const PxTransform& poseA, const PxTransform& poseB, PxReal contactDistance, PxReal toleranceLength, PxVec3& pointA, PxVec3& pointB, PxVec3& separatingAxis, PxReal& separation)
{
const PxTransformV transf0 = loadTransformU(poseA);
const PxTransformV transf1 = loadTransformU(poseB);
const PxTransformV curRTrans(transf1.transformInv(transf0));
const PxMatTransformV aToB(curRTrans);
const PxReal degenerateScale = 0.001f;
CustomConvexV supportA(a);
CustomConvexV supportB(b);
const RelativeConvex<CustomConvexV> convexA(supportA, aToB);
const LocalConvex<CustomConvexV> convexB(supportB);
Vec3V initialSearchDir = aToB.p;
FloatV contactDist = FLoad((a.getMargin() + b.getMargin()) + contactDistance);
Vec3V aPoints[4];
Vec3V bPoints[4];
PxU8 size = 0;
GjkOutput output;
GjkStatus status = gjkPenetration(convexA, convexB, initialSearchDir, contactDist, true, aPoints, bPoints, size, output);
if (status == GJK_DEGENERATE)
{
supportA.supportScale = supportB.supportScale = 1.0f - degenerateScale;
status = gjkPenetration(convexA, convexB, initialSearchDir, contactDist, true, aPoints, bPoints, size, output);
supportA.supportScale = supportB.supportScale = 1.0f;
}
if (status == GJK_CONTACT || status == GJK_DEGENERATE)
{
separatingAxis = poseB.rotate(Vec3V_To_PxVec3(output.normal).getNormalized());
pointA = poseB.transform(Vec3V_To_PxVec3(output.closestA)) - separatingAxis * a.getMargin();
pointB = poseB.transform(Vec3V_To_PxVec3(output.closestB)) + separatingAxis * b.getMargin();
separation = (pointA - pointB).dot(separatingAxis);
return true;
}
if (status == EPA_CONTACT)
{
status = epaPenetration(convexA, convexB, aPoints, bPoints, size, true, FLoad(toleranceLength), output);
if (status == EPA_CONTACT || status == EPA_DEGENERATE)
{
separatingAxis = poseB.rotate(Vec3V_To_PxVec3(output.normal).getNormalized());
pointA = poseB.transform(Vec3V_To_PxVec3(output.closestA)) - separatingAxis * a.getMargin();
pointB = poseB.transform(Vec3V_To_PxVec3(output.closestB)) + separatingAxis * b.getMargin();
separation = (pointA - pointB).dot(separatingAxis);
return true;
}
}
return false;
}
struct PointConvexV : ConvexV
{
Vec3V zero;
PointConvexV() : ConvexV(Gu::ConvexType::eCUSTOM)
{
zero = V3Zero();
setMinMargin(FLoad(0.001f));
setSweepMargin(FLoad(0.001f));
}
PX_SUPPORT_INLINE Vec3V supportPoint(const PxI32 /*index*/) const
{
return zero;
}
PX_SUPPORT_INLINE Vec3V supportLocal(const Vec3V& /*dir*/) const
{
return zero;
}
PX_SUPPORT_INLINE Vec3V supportLocal(const Vec3V& dir, PxI32& index) const
{
index = 0;
return supportLocal(dir);
}
PX_SUPPORT_INLINE Vec3V supportRelative(const Vec3V& dir, const PxMatTransformV& aTob, const PxMatTransformV& aTobT) const
{
const Vec3V _dir = aTobT.rotate(dir);
const Vec3V p = supportLocal(_dir);
return aTob.transform(p);
}
PX_SUPPORT_INLINE Vec3V supportRelative(const Vec3V& dir, const PxMatTransformV& aTob, const PxMatTransformV& aTobT, PxI32& index) const
{
index = 0;
return supportRelative(dir, aTob, aTobT);
}
};
bool PxGjkQuery::raycast(const Support& shape, const PxTransform& pose, const PxVec3& rayStart, const PxVec3& unitDir, PxReal maxDist, PxReal& t, PxVec3& n, PxVec3& p)
{
const PxTransformV transf0 = loadTransformU(pose);
const PxTransformV transf1 = PxTransformV(V3LoadU(rayStart));
const PxTransformV curRTrans(transf1.transformInv(transf0));
const PxMatTransformV aToB(curRTrans);
CustomConvexV supportA(shape);
PointConvexV supportB;
const RelativeConvex<CustomConvexV> convexA(supportA, aToB);
const LocalConvex<PointConvexV> convexB(supportB);
Vec3V initialDir = aToB.p;
FloatV initialLambda = FLoad(0);
Vec3V s = V3Zero();
Vec3V r = V3LoadU(unitDir * maxDist);
FloatV lambda;
Vec3V normal, closestA;
if (gjkRaycast(convexA, convexB, initialDir, initialLambda, s, r, lambda, normal, closestA, shape.getMargin()))
{
t = FloatV_To_PxReal(lambda) * maxDist;
n = -Vec3V_To_PxVec3(normal).getNormalized();
p = Vec3V_To_PxVec3(closestA) + n * shape.getMargin() + rayStart;
return true;
}
return false;
}
bool PxGjkQuery::overlap(const Support& a, const Support& b, const PxTransform& poseA, const PxTransform& poseB)
{
const PxTransformV transf0 = loadTransformU(poseA);
const PxTransformV transf1 = loadTransformU(poseB);
const PxTransformV curRTrans(transf1.transformInv(transf0));
const PxMatTransformV aToB(curRTrans);
CustomConvexV supportA(a);
CustomConvexV supportB(b);
const RelativeConvex<CustomConvexV> convexA(supportA, aToB);
const LocalConvex<CustomConvexV> convexB(supportB);
Vec3V initialSearchDir = aToB.p;
FloatV contactDist = FLoad(a.getMargin() + b.getMargin());
Vec3V closestA, closestB, normal;
FloatV distance;
GjkStatus status = gjk(convexA, convexB, initialSearchDir, contactDist, closestA, closestB, normal, distance);
return status == GJK_CLOSE || status == GJK_CONTACT;
}
bool PxGjkQuery::sweep(const Support& a, const Support& b, const PxTransform& poseA, const PxTransform& poseB, const PxVec3& unitDir, PxReal maxDist, PxReal& t, PxVec3& n, PxVec3& p)
{
const PxTransformV transf0 = loadTransformU(poseA);
const PxTransformV transf1 = loadTransformU(poseB);
const PxTransformV curRTrans(transf1.transformInv(transf0));
const PxMatTransformV aToB(curRTrans);
CustomConvexV supportA(a);
CustomConvexV supportB(b);
const RelativeConvex<CustomConvexV> convexA(supportA, aToB);
const LocalConvex<CustomConvexV> convexB(supportB);
Vec3V initialDir = aToB.p;
FloatV initialLambda = FLoad(0);
Vec3V s = V3Zero();
Vec3V r = V3LoadU(poseB.rotateInv(unitDir * maxDist));
FloatV lambda;
Vec3V normal, closestA;
if (gjkRaycast(convexA, convexB, initialDir, initialLambda, s, r, lambda, normal, closestA, a.getMargin() + b.getMargin()))
{
t = FloatV_To_PxReal(lambda) * maxDist;
n = poseB.rotate(-(Vec3V_To_PxVec3(normal)).getNormalized());
p = poseB.transform(Vec3V_To_PxVec3(closestA)) + n * a.getMargin();
return true;
}
return false;
}
| 9,711 | C++ | 33.935252 | 242 | 0.748739 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuAABBPruner.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "common/PxProfileZone.h"
#include "foundation/PxIntrinsics.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxBitUtils.h"
#include "GuAABBPruner.h"
#include "GuPrunerMergeData.h"
#include "GuCallbackAdapter.h"
#include "GuSphere.h"
#include "GuBox.h"
#include "GuCapsule.h"
#include "GuAABBTreeQuery.h"
#include "GuAABBTreeNode.h"
#include "GuQuery.h"
#include "CmVisualization.h"
using namespace physx;
using namespace Gu;
using namespace Cm;
// PT: TODO: this is copied from SqBounds.h, should be either moved to Gu and shared or passed as a user parameter
#define SQ_PRUNER_EPSILON 0.005f
#define SQ_PRUNER_INFLATION (1.0f + SQ_PRUNER_EPSILON) // pruner test shape inflation (not narrow phase shape)
AABBPruner::AABBPruner(bool incrementalRebuild, PxU64 contextID, CompanionPrunerType cpType, BVHBuildStrategy buildStrategy, PxU32 nbObjectsPerNode) :
mAABBTree (NULL),
mNewTree (NULL),
mNbCachedBoxes (0),
mNbCalls (0),
mTimeStamp (0),
mBucketPruner (contextID, cpType, &mPool),
mProgress (BUILD_NOT_STARTED),
mRebuildRateHint (100),
mAdaptiveRebuildTerm(0),
mNbObjectsPerNode (nbObjectsPerNode),
mBuildStrategy (buildStrategy),
mPool (contextID, TRANSFORM_CACHE_GLOBAL),
mIncrementalRebuild (incrementalRebuild),
mUncommittedChanges (false),
mNeedsNewTree (false),
mNewTreeFixups ("AABBPruner::mNewTreeFixups")
{
PX_ASSERT(nbObjectsPerNode<16);
}
AABBPruner::~AABBPruner()
{
release();
}
bool AABBPruner::addObjects(PrunerHandle* results, const PxBounds3* bounds, const PrunerPayload* data, const PxTransform* transforms, PxU32 count, bool hasPruningStructure)
{
PX_PROFILE_ZONE("SceneQuery.prunerAddObjects", mPool.mContextID);
if(!count)
return true;
// no need to do refitMarked for added objects since they are not in the tree
// if we have provided pruning structure, we will merge it, the changes will be applied after the objects has been addded
if(!hasPruningStructure || !mAABBTree)
mUncommittedChanges = true;
// PT: TODO: 'addObjects' for bucket pruner too. Not urgent since we always call the function with count=1 at the moment
const PxU32 valid = mPool.addObjects(results, bounds, data, transforms, count);
// Bucket pruner is only used while the dynamic pruner is rebuilding
// For the static pruner a full rebuild will happen in commit() every time we modify something, this is not true if
// pruning structure was provided. The objects tree will be merged directly into the static tree. No rebuild will be triggered.
if(mIncrementalRebuild && mAABBTree)
{
PX_PROFILE_ZONE("SceneQuery.bucketPrunerAddObjects", mPool.mContextID);
mNeedsNewTree = true; // each add forces a tree rebuild
// if a pruner structure is provided, we dont move the new objects into bucket pruner
// the pruning structure will be merged into the bucket pruner
if(!hasPruningStructure)
{
for(PxU32 i=0;i<valid;i++)
{
// PT: poolIndex fetched in vain for bucket pruner companion...
// Since the incremental tree references the same pool we could just retrieve the poolIndex there, from the handle...
const PrunerHandle handle = results[i];
const PoolIndex poolIndex = mPool.getIndex(handle);
mBucketPruner.addObject(data[i], handle, bounds[i], transforms[i], mTimeStamp, poolIndex);
}
}
}
return valid==count;
}
void AABBPruner::updateObjects(const PrunerHandle* handles, PxU32 count, float inflation, const PxU32* boundsIndices, const PxBounds3* newBounds, const PxTransform32* newTransforms)
{
PX_PROFILE_ZONE("SceneQuery.prunerUpdateObjects", mPool.mContextID);
if(!count)
return;
mUncommittedChanges = true;
if(handles && boundsIndices && newBounds)
mPool.updateAndInflateBounds(handles, boundsIndices, newBounds, newTransforms, count, inflation);
if(mIncrementalRebuild && mAABBTree)
{
mNeedsNewTree = true; // each update forces a tree rebuild
const PxBounds3* currentBounds = mPool.getCurrentWorldBoxes();
const PxTransform* currentTransforms = mPool.getTransforms();
const PrunerPayload* data = mPool.getObjects();
const bool addToRefit = mProgress == BUILD_NEW_MAPPING || mProgress == BUILD_FULL_REFIT || mProgress==BUILD_LAST_FRAME;
for(PxU32 i=0; i<count; i++)
{
const PrunerHandle handle = handles[i];
const PoolIndex poolIndex = mPool.getIndex(handle);
const TreeNodeIndex treeNodeIndex = mTreeMap[poolIndex];
if(treeNodeIndex != INVALID_NODE_ID) // this means it's in the current tree still and hasn't been removed
mAABBTree->markNodeForRefit(treeNodeIndex);
else // otherwise it means it should be in the bucket pruner
{
PX_ASSERT(&data[poolIndex]==&mPool.getPayloadData(handle));
bool found = mBucketPruner.updateObject(currentBounds[poolIndex], currentTransforms[poolIndex], data[poolIndex], handle, poolIndex);
PX_UNUSED(found); PX_ASSERT(found);
}
if(addToRefit)
mToRefit.pushBack(poolIndex);
}
}
}
void AABBPruner::removeObjects(const PrunerHandle* handles, PxU32 count, PrunerPayloadRemovalCallback* removalCallback)
{
PX_PROFILE_ZONE("SceneQuery.prunerRemoveObjects", mPool.mContextID);
if(!count)
return;
mUncommittedChanges = true;
for(PxU32 i=0; i<count; i++)
{
const PrunerHandle h = handles[i];
// copy the payload/userdata before removing it since we need to know the payload/userdata to remove it from the bucket pruner
const PrunerPayload removedData = mPool.getPayloadData(h);
const PoolIndex poolIndex = mPool.getIndex(h); // save the pool index for removed object
const PoolIndex poolRelocatedLastIndex = mPool.removeObject(h, removalCallback); // save the lastIndex returned by removeObject
if(mIncrementalRebuild && mAABBTree)
{
mNeedsNewTree = true;
const TreeNodeIndex treeNodeIndex = mTreeMap[poolIndex]; // already removed from pool but still in tree map
const PrunerPayload swappedData = mPool.getObjects()[poolIndex];
if(treeNodeIndex!=INVALID_NODE_ID) // can be invalid if removed
{
mAABBTree->markNodeForRefit(treeNodeIndex); // mark the spot as blank
mBucketPruner.swapIndex(poolIndex, swappedData, poolRelocatedLastIndex); // if swapped index is in bucket pruner
}
else
{
bool status = mBucketPruner.removeObject(removedData, h, poolIndex, swappedData, poolRelocatedLastIndex);
// PT: removed assert to avoid crashing all UTs
//PX_ASSERT(status);
PX_UNUSED(status);
}
mTreeMap.invalidate(poolIndex, poolRelocatedLastIndex, *mAABBTree);
if(mNewTree)
mNewTreeFixups.pushBack(NewTreeFixup(poolIndex, poolRelocatedLastIndex));
}
}
if (mPool.getNbActiveObjects()==0)
{
// this is just to make sure we release all the internal data once all the objects are out of the pruner
// since this is the only place we know that and we don't want to keep memory reserved
release();
// Pruner API requires a commit before the next query, even if we ended up removing the entire tree here. This
// forces that to happen.
mUncommittedChanges = true;
}
}
bool AABBPruner::overlap(const ShapeData& queryVolume, PrunerOverlapCallback& pcbArgName) const
{
PX_ASSERT(!mUncommittedChanges);
bool again = true;
if(mAABBTree)
{
OverlapCallbackAdapter pcb(pcbArgName, mPool);
switch(queryVolume.getType())
{
case PxGeometryType::eBOX:
{
if(queryVolume.isOBB())
{
const DefaultOBBAABBTest test(queryVolume);
again = AABBTreeOverlap<true, OBBAABBTest, AABBTree, BVHNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
else
{
const DefaultAABBAABBTest test(queryVolume);
again = AABBTreeOverlap<true, AABBAABBTest, AABBTree, BVHNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
}
break;
case PxGeometryType::eCAPSULE:
{
const DefaultCapsuleAABBTest test(queryVolume, SQ_PRUNER_INFLATION);
again = AABBTreeOverlap<true, CapsuleAABBTest, AABBTree, BVHNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
break;
case PxGeometryType::eSPHERE:
{
const DefaultSphereAABBTest test(queryVolume);
again = AABBTreeOverlap<true, SphereAABBTest, AABBTree, BVHNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
break;
case PxGeometryType::eCONVEXMESH:
{
const DefaultOBBAABBTest test(queryVolume);
again = AABBTreeOverlap<true, OBBAABBTest, AABBTree, BVHNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
break;
default:
PX_ALWAYS_ASSERT_MESSAGE("unsupported overlap query volume geometry type");
}
}
if(again && mIncrementalRebuild && mBucketPruner.getNbObjects())
again = mBucketPruner.overlap(queryVolume, pcbArgName);
return again;
}
bool AABBPruner::sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& pcbArgName) const
{
PX_ASSERT(!mUncommittedChanges);
bool again = true;
if(mAABBTree)
{
RaycastCallbackAdapter pcb(pcbArgName, mPool);
const PxBounds3& aabb = queryVolume.getPrunerInflatedWorldAABB();
again = AABBTreeRaycast<true, true, AABBTree, BVHNode, RaycastCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, aabb.getCenter(), unitDir, inOutDistance, aabb.getExtents(), pcb);
}
if(again && mIncrementalRebuild && mBucketPruner.getNbObjects())
again = mBucketPruner.sweep(queryVolume, unitDir, inOutDistance, pcbArgName);
return again;
}
bool AABBPruner::raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& pcbArgName) const
{
PX_ASSERT(!mUncommittedChanges);
bool again = true;
if(mAABBTree)
{
RaycastCallbackAdapter pcb(pcbArgName, mPool);
again = AABBTreeRaycast<false, true, AABBTree, BVHNode, RaycastCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, origin, unitDir, inOutDistance, PxVec3(0.0f), pcb);
}
if(again && mIncrementalRebuild && mBucketPruner.getNbObjects())
again = mBucketPruner.raycast(origin, unitDir, inOutDistance, pcbArgName);
return again;
}
// This isn't part of the pruner virtual interface, but it is part of the public interface
// of AABBPruner - it gets called by SqManager to force a rebuild, and requires a commit() before
// queries can take place
void AABBPruner::purge()
{
release();
mUncommittedChanges = true; // this ensures a commit() must happen before any query
}
void AABBPruner::setRebuildRateHint(PxU32 nbStepsForRebuild)
{
PX_ASSERT(nbStepsForRebuild > 3);
mRebuildRateHint = (nbStepsForRebuild-3); // looks like a magic number to account for the rebuild pipeline latency
mAdaptiveRebuildTerm = 0;
}
// Commit either performs a refit if background rebuild is not yet finished
// or swaps the current tree for the second tree rebuilt in the background
void AABBPruner::commit()
{
PX_PROFILE_ZONE("SceneQuery.prunerCommit", mPool.mContextID);
if(!mUncommittedChanges && (mProgress != BUILD_FINISHED))
// Q: seems like this is both for refit and finalization so is this is correct?
// i.e. in a situation when we started rebuilding a tree and didn't add anything since
// who is going to set mUncommittedChanges to true?
// A: it's set in buildStep at final stage, so that finalization is forced.
// Seems a bit difficult to follow and verify correctness.
return;
mUncommittedChanges = false;
if(!mAABBTree || !mIncrementalRebuild)
{
if(!mIncrementalRebuild && mAABBTree)
PxGetFoundation().error(PxErrorCode::ePERF_WARNING, PX_FL, "SceneQuery static AABB Tree rebuilt, because a shape attached to a static actor was added, removed or moved, and PxSceneQueryDesc::staticStructure is set to eSTATIC_AABB_TREE.");
fullRebuildAABBTree();
return;
}
// Note: it is not safe to call AABBPruner::build() here
// because the first thread will perform one step of the incremental update,
// continue raycasting, while the second thread performs the next step in
// the incremental update
// Calling Refit() below is safe. It will call
// StaticPruner::build() when necessary. Both will early
// exit if the tree is already up to date, if it is not already, then we
// must be the first thread performing raycasts on a dirty tree and other
// scene query threads will be locked out by the write lock in
// PrunerManager::flushUpdates()
if (mProgress != BUILD_FINISHED)
{
// Calling refit because the second tree is not ready to be swapped in (mProgress != BUILD_FINISHED)
// Generally speaking as long as things keep moving the second build will never catch up with true state
refitUpdatedAndRemoved();
}
else
{
PX_PROFILE_ZONE("SceneQuery.prunerNewTreeFinalize", mPool.mContextID);
{
PX_PROFILE_ZONE("SceneQuery.prunerNewTreeSwitch", mPool.mContextID);
PX_DELETE(mAABBTree); // delete the old tree
mCachedBoxes.release();
mProgress = BUILD_NOT_STARTED; // reset the build state to initial
// Adjust adaptive term to get closer to specified rebuild rate.
// perform an even division correction to make sure the rebuild rate adds up
if (mNbCalls > mRebuildRateHint)
mAdaptiveRebuildTerm++;
else if (mNbCalls < mRebuildRateHint)
mAdaptiveRebuildTerm--;
// Switch trees
#if PX_DEBUG
mNewTree->validate();
#endif
mAABBTree = mNewTree; // set current tree to progressively rebuilt tree
mNewTree = NULL; // clear out the progressively rebuild tree pointer
mNodeAllocator.release();
}
{
PX_PROFILE_ZONE("SceneQuery.prunerNewTreeMapping", mPool.mContextID);
// rebuild the tree map to match the current (newly built) tree
mTreeMap.initMap(PxMax(mPool.getNbActiveObjects(), mNbCachedBoxes), *mAABBTree);
// The new mapping has been computed using only indices stored in the new tree. Those indices map the pruning pool
// we had when starting to build the tree. We need to re-apply recorded moves to fix the tree that finished rebuilding.
// AP: the problem here is while we are rebuilding the tree there are ongoing modifications to the current tree
// but the background build has a cached copy of all the AABBs at the time it was started
// (and will produce indices referencing those)
// Things that can happen in the meantime: update, remove, add, commit
for(NewTreeFixup* r = mNewTreeFixups.begin(); r < mNewTreeFixups.end(); r++)
{
// PT: we're not doing a full refit after this point anymore, so the remaining deleted objects must be manually marked for
// refit (otherwise their AABB in the tree would remain valid, leading to crashes when the corresponding index is 0xffffffff).
// We must do this before invalidating the corresponding tree nodes in the map, obviously (otherwise we'd be reading node
// indices that we already invalidated).
const PoolIndex poolIndex = r->removedIndex;
const TreeNodeIndex treeNodeIndex = mTreeMap[poolIndex];
if(treeNodeIndex!=INVALID_NODE_ID)
mAABBTree->markNodeForRefit(treeNodeIndex);
mTreeMap.invalidate(r->removedIndex, r->relocatedLastIndex, *mAABBTree);
}
mNewTreeFixups.clear(); // clear out the fixups since we just applied them all
}
{
PX_PROFILE_ZONE("SceneQuery.prunerNewTreeFinalRefit", mPool.mContextID);
const PxU32 size = mToRefit.size();
for(PxU32 i=0;i<size;i++)
{
const PoolIndex poolIndex = mToRefit[i];
const TreeNodeIndex treeNodeIndex = mTreeMap[poolIndex];
if(treeNodeIndex!=INVALID_NODE_ID)
mAABBTree->markNodeForRefit(treeNodeIndex);
}
mToRefit.clear();
refitUpdatedAndRemoved();
}
{
PX_PROFILE_ZONE("SceneQuery.prunerNewTreeRemoveObjects", mPool.mContextID);
PxU32 nbRemovedPairs = mBucketPruner.removeMarkedObjects(mTimeStamp-1);
PX_UNUSED(nbRemovedPairs);
mNeedsNewTree = mBucketPruner.getNbObjects()>0;
}
}
updateBucketPruner();
}
void AABBPruner::shiftOrigin(const PxVec3& shift)
{
mPool.shiftOrigin(shift);
if(mAABBTree)
mAABBTree->shiftOrigin(shift);
if(mIncrementalRebuild)
mBucketPruner.shiftOrigin(shift);
if(mNewTree)
mNewTree->shiftOrigin(shift);
}
void AABBPruner::visualize(PxRenderOutput& out, PxU32 primaryColor, PxU32 secondaryColor) const
{
// getAABBTree() asserts when pruner is dirty. NpScene::visualization() does not enforce flushUpdate. see DE7834
visualizeTree(out, primaryColor, mAABBTree);
// Render added objects not yet in the tree
out << PxTransform(PxIdentity);
out << PxU32(PxDebugColor::eARGB_WHITE);
if(mIncrementalRebuild && mBucketPruner.getNbObjects())
mBucketPruner.visualize(out, secondaryColor);
}
bool AABBPruner::buildStep(bool synchronousCall)
{
PX_PROFILE_ZONE("SceneQuery.prunerBuildStep", mPool.mContextID);
PX_ASSERT(mIncrementalRebuild);
if(mNeedsNewTree)
{
if(mProgress==BUILD_NOT_STARTED)
{
if(!synchronousCall || !prepareBuild())
return false;
}
else if(mProgress==BUILD_INIT)
{
mNewTree->progressiveBuild(mBuilder, mNodeAllocator, mBuildStats, 0, 0);
mProgress = BUILD_IN_PROGRESS;
mNbCalls = 0;
// Use a heuristic to estimate the number of work units needed for rebuilding the tree.
// The general idea is to use the number of work units of the previous tree to build the new tree.
// This works fine as long as the number of leaves remains more or less the same for the old and the
// new tree. If that is not the case, this estimate can be way off and the work units per step will
// be either much too small or too large. Hence, in that case we will try to estimate the number of work
// units based on the number of leaves of the new tree as follows:
//
// - Assume new tree with n leaves is perfectly-balanced
// - Compute the depth of perfectly-balanced tree with n leaves
// - Estimate number of working units for the new tree
const PxU32 depth = PxILog2(mBuilder.mNbPrimitives); // Note: This is the depth without counting the leaf layer
const PxU32 estimatedNbWorkUnits = depth * mBuilder.mNbPrimitives; // Estimated number of work units for new tree
const PxU32 estimatedNbWorkUnitsOld = mAABBTree ? mAABBTree->getTotalPrims() : 0;
if ((estimatedNbWorkUnits <= (estimatedNbWorkUnitsOld << 1)) && (estimatedNbWorkUnits >= (estimatedNbWorkUnitsOld >> 1)))
// The two estimates do not differ by more than a factor 2
mTotalWorkUnits = estimatedNbWorkUnitsOld;
else
{
mAdaptiveRebuildTerm = 0;
mTotalWorkUnits = estimatedNbWorkUnits;
}
const PxI32 totalWorkUnits = PxI32(mTotalWorkUnits + (mAdaptiveRebuildTerm * mBuilder.mNbPrimitives));
mTotalWorkUnits = PxU32(PxMax(totalWorkUnits, 0));
}
else if(mProgress==BUILD_IN_PROGRESS)
{
mNbCalls++;
const PxU32 Limit = 1 + (mTotalWorkUnits / mRebuildRateHint);
// looks like progressiveRebuild returns 0 when finished
if(!mNewTree->progressiveBuild(mBuilder, mNodeAllocator, mBuildStats, 1, Limit))
{
// Done
mProgress = BUILD_NEW_MAPPING;
#if PX_DEBUG
mNewTree->validate();
#endif
}
}
else if(mProgress==BUILD_NEW_MAPPING)
{
mNbCalls++;
mProgress = BUILD_FULL_REFIT;
// PT: we can't call fullRefit without creating the new mapping first: the refit function will fetch boxes from
// the pool using "primitive indices" captured in the tree. But some of these indices may have been invalidated
// if objects got removed while the tree was built. So we need to invalidate the corresponding nodes before refit,
// that way the #prims will be zero and the code won't fetch a wrong box (which may now below to a different object).
{
PX_PROFILE_ZONE("SceneQuery.prunerNewTreeMapping", mPool.mContextID);
if(mNewTreeFixups.size())
{
mNewTreeMap.initMap(PxMax(mPool.getNbActiveObjects(), mNbCachedBoxes), *mNewTree);
// The new mapping has been computed using only indices stored in the new tree. Those indices map the pruning pool
// we had when starting to build the tree. We need to re-apply recorded moves to fix the tree.
for(NewTreeFixup* r = mNewTreeFixups.begin(); r < mNewTreeFixups.end(); r++)
mNewTreeMap.invalidate(r->removedIndex, r->relocatedLastIndex, *mNewTree);
mNewTreeFixups.clear();
#if PX_DEBUG
mNewTree->validate();
#endif
}
}
}
else if(mProgress==BUILD_FULL_REFIT)
{
mNbCalls++;
mProgress = BUILD_LAST_FRAME;
{
PX_PROFILE_ZONE("SceneQuery.prunerNewTreeFullRefit", mPool.mContextID);
// We need to refit the new tree because objects may have moved while we were building it.
mNewTree->fullRefit(mPool.getCurrentWorldBoxes());
}
}
else if(mProgress==BUILD_LAST_FRAME)
{
mProgress = BUILD_FINISHED;
}
// This is required to be set because commit handles both refit and a portion of build finalization (why?)
// This is overly conservative also only necessary in case there were no updates at all to the tree since the last tree swap
// It also overly conservative in a sense that it could be set only if mProgress was just set to BUILD_FINISHED
// If run asynchronously from a different thread, we touched just the new AABB build phase, we should not mark the main tree as dirty
if(synchronousCall)
mUncommittedChanges = true;
return mProgress==BUILD_FINISHED;
}
return false;
}
bool AABBPruner::prepareBuild()
{
PX_PROFILE_ZONE("SceneQuery.prepareBuild", mPool.mContextID);
PX_ASSERT(mIncrementalRebuild);
if(mNeedsNewTree)
{
if(mProgress==BUILD_NOT_STARTED)
{
const PxU32 nbObjects = mPool.getNbActiveObjects();
if(!nbObjects)
return false;
mNodeAllocator.release();
PX_DELETE(mNewTree);
mNewTree = PX_NEW(AABBTree);
mNbCachedBoxes = nbObjects;
mCachedBoxes.init(nbObjects, mPool.getCurrentWorldBoxes());
// PT: objects currently in the bucket pruner will be in the new tree. They are marked with the
// current timestamp (mTimeStamp). However more objects can get added while we compute the new tree,
// and those ones will not be part of it. These new objects will be marked with the new timestamp
// value (mTimeStamp+1), and we can use these different values to remove the proper objects from
// the bucket pruner (when switching to the new tree).
mTimeStamp++;
// notify the incremental pruner to swap trees (for incremental pruner companion)
mBucketPruner.timeStampChange();
mBuilder.reset();
mBuilder.mNbPrimitives = mNbCachedBoxes;
mBuilder.mBounds = &mCachedBoxes;
mBuilder.mLimit = mNbObjectsPerNode;
mBuilder.mBuildStrategy = mBuildStrategy;
mBuildStats.reset();
// start recording modifications to the tree made during rebuild to reapply (fix the new tree) eventually
PX_ASSERT(mNewTreeFixups.size()==0);
mProgress = BUILD_INIT;
}
}
else
return false;
return true;
}
/**
* Builds an AABB-tree for objects in the pruning pool.
* \return true if success
*/
bool AABBPruner::fullRebuildAABBTree()
{
PX_PROFILE_ZONE("SceneQuery.prunerFullRebuildAABBTree", mPool.mContextID);
// Release possibly already existing tree
PX_DELETE(mAABBTree);
// Don't bother building an AABB-tree if there isn't a single static object
const PxU32 nbObjects = mPool.getNbActiveObjects();
if(!nbObjects)
return true;
bool Status;
{
// Create a new tree
mAABBTree = PX_NEW(AABBTree);
Status = mAABBTree->build(AABBTreeBuildParams(mNbObjectsPerNode, nbObjects, &mPool.getCurrentAABBTreeBounds(), mBuildStrategy), mNodeAllocator);
}
// No need for the tree map for static pruner
if(mIncrementalRebuild)
mTreeMap.initMap(PxMax(nbObjects, mNbCachedBoxes), *mAABBTree);
return Status;
}
// called in the end of commit(), but only if mIncrementalRebuild is true
void AABBPruner::updateBucketPruner()
{
PX_PROFILE_ZONE("SceneQuery.prunerUpdateBucketPruner", mPool.mContextID);
PX_ASSERT(mIncrementalRebuild);
mBucketPruner.build();
}
void AABBPruner::release() // this can be called from purge()
{
mBucketPruner.release();
mTimeStamp = 0;
mTreeMap.release();
mNewTreeMap.release();
mCachedBoxes.release();
mBuilder.reset();
mNodeAllocator.release();
PX_DELETE(mNewTree);
PX_DELETE(mAABBTree);
mNbCachedBoxes = 0;
mProgress = BUILD_NOT_STARTED;
mNewTreeFixups.clear();
mUncommittedChanges = false;
}
// Refit current tree
void AABBPruner::refitUpdatedAndRemoved()
{
PX_PROFILE_ZONE("SceneQuery.prunerRefitUpdatedAndRemoved", mPool.mContextID);
PX_ASSERT(mIncrementalRebuild);
AABBTree* tree = getAABBTree();
if(!tree)
return;
#if PX_DEBUG
tree->validate();
#endif
//### missing a way to skip work if not needed
const PxU32 nbObjects = mPool.getNbActiveObjects();
// At this point there still can be objects in the tree that are blanked out so it's an optimization shortcut (not required)
if(!nbObjects)
return;
mBucketPruner.refitMarkedNodes(mPool.getCurrentWorldBoxes());
tree->refitMarkedNodes(mPool.getCurrentWorldBoxes());
}
void AABBPruner::merge(const void* mergeParams)
{
const AABBPrunerMergeData& pruningStructure = *reinterpret_cast<const AABBPrunerMergeData*> (mergeParams);
if(!pruningStructure.mAABBTreeNodes)
return;
if(mAABBTree)
{
// index in pruning pool, where new objects were added
const PxU32 pruningPoolIndex = mPool.getNbActiveObjects() - pruningStructure.mNbObjects;
// create tree from given nodes and indices
AABBTreeMergeData aabbTreeMergeParams(pruningStructure.mNbNodes, pruningStructure.mAABBTreeNodes,
pruningStructure.mNbObjects, pruningStructure.mAABBTreeIndices, pruningPoolIndex);
if(!mIncrementalRebuild)
{
// merge tree directly
mAABBTree->mergeTree(aabbTreeMergeParams);
}
else
{
mBucketPruner.addTree(aabbTreeMergeParams, mTimeStamp);
}
}
}
void AABBPruner::getGlobalBounds(PxBounds3& bounds) const
{
if(mAABBTree && mAABBTree->getNodes())
bounds = mAABBTree->getNodes()->mBV;
else
bounds.setEmpty();
if(mIncrementalRebuild && mBucketPruner.getNbObjects())
{
PxBounds3 extBounds;
mBucketPruner.getGlobalBounds(extBounds);
bounds.include(extBounds);
}
}
| 27,670 | C++ | 34.843264 | 241 | 0.744742 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuSDF.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuSDF.h"
#include "GuAABBTreeNode.h"
#include "GuAABBTree.h"
#include "GuAABBTreeBounds.h"
#include "GuWindingNumber.h"
#include "GuAABBTreeNode.h"
#include "GuDistancePointBox.h"
#include "GuDistancePointTriangle.h"
#include "GuAABBTreeQuery.h"
#include "GuIntersectionRayTriangle.h"
#include "GuIntersectionRayBox.h"
#include "foundation/PxAtomic.h"
#include "foundation/PxThread.h"
#include "common/GuMeshAnalysis.h"
#include "GuMeshAnalysis.h"
#include "PxSDFBuilder.h"
#include "GuDistancePointSegment.h"
#include "common/PxSerialFramework.h"
#define EXTENDED_DEBUG 0
namespace physx
{
namespace Gu
{
SDF::~SDF()
{
if(mOwnsMemory)
{
PX_FREE(mSdf);
PX_FREE(mSubgridStartSlots);
PX_FREE(mSubgridSdf);
}
}
PxReal* SDF::allocateSdfs(const PxVec3& meshLower, const PxReal& spacing, const PxU32 dimX, const PxU32 dimY, const PxU32 dimZ,
const PxU32 subgridSize, const PxU32 sdfSubgrids3DTexBlockDimX, const PxU32 sdfSubgrids3DTexBlockDimY, const PxU32 sdfSubgrids3DTexBlockDimZ,
PxReal minSdfValueSubgrids, PxReal maxSdfValueSubgrids, PxU32 sparsePixelNumBytes)
{
PX_ASSERT(!mSdf);
PX_ASSERT(!mSubgridStartSlots);
PX_ASSERT(!mSubgridSdf);
mMeshLower = meshLower;
mSpacing = spacing;
mDims.x = dimX;
mDims.y = dimY;
mDims.z = dimZ;
mSubgridSize = subgridSize;
mSdfSubgrids3DTexBlockDim.x = sdfSubgrids3DTexBlockDimX;
mSdfSubgrids3DTexBlockDim.y = sdfSubgrids3DTexBlockDimY;
mSdfSubgrids3DTexBlockDim.z = sdfSubgrids3DTexBlockDimZ;
mSubgridsMinSdfValue = minSdfValueSubgrids;
mSubgridsMaxSdfValue = maxSdfValueSubgrids;
mBytesPerSparsePixel = sparsePixelNumBytes;
if (subgridSize > 0)
{
//Sparse sdf
PX_ASSERT(dimX % subgridSize == 0);
PX_ASSERT(dimY % subgridSize == 0);
PX_ASSERT(dimZ % subgridSize == 0);
PxU32 x = dimX / subgridSize;
PxU32 y = dimY / subgridSize;
PxU32 z = dimZ / subgridSize;
mNumSdfs = (x + 1) * (y + 1) * (z + 1);
mNumSubgridSdfs = mBytesPerSparsePixel * sdfSubgrids3DTexBlockDimX * (subgridSize + 1) * sdfSubgrids3DTexBlockDimY * (subgridSize + 1) * sdfSubgrids3DTexBlockDimZ * (subgridSize + 1);
mNumStartSlots = x * y * z;
mSubgridSdf = PX_ALLOCATE(PxU8, mNumSubgridSdfs, "PxU8");
mSubgridStartSlots = PX_ALLOCATE(PxU32, mNumStartSlots, "PxU32");
mSdf = PX_ALLOCATE(PxReal, mNumSdfs, "PxReal");
}
else
{
//Dense sdf - no sparse grid data required
mSubgridStartSlots = NULL;
mSubgridSdf = NULL;
mNumSdfs = dimX * dimY*dimZ;
mNumSubgridSdfs = 0;
mNumStartSlots = 0;
mSdf = PX_ALLOCATE(PxReal, mNumSdfs, "PxReal");
}
return mSdf;
}
void SDF::exportExtraData(PxSerializationContext& context)
{
if (mSdf)
{
context.alignData(PX_SERIAL_ALIGN);
context.writeData(mSdf, mNumSdfs * sizeof(PxReal));
}
if (mNumStartSlots)
{
context.alignData(PX_SERIAL_ALIGN);
context.writeData(mSubgridStartSlots, mNumStartSlots * sizeof(PxU32));
}
if (mSubgridSdf)
{
context.alignData(PX_SERIAL_ALIGN);
context.writeData(mSubgridSdf, mNumSubgridSdfs * sizeof(PxU8));
}
}
void SDF::importExtraData(PxDeserializationContext& context)
{
if (mSdf)
mSdf = context.readExtraData<PxReal, PX_SERIAL_ALIGN>(mNumSdfs);
if (mSubgridStartSlots)
mSubgridStartSlots = context.readExtraData<PxU32, PX_SERIAL_ALIGN>(mNumStartSlots);
if (mSubgridSdf)
mSubgridSdf = context.readExtraData<PxU8, PX_SERIAL_ALIGN>(mNumSubgridSdfs);
}
void buildTree(const PxU32* triangles, const PxU32 numTriangles, const PxVec3* points, PxArray<Gu::BVHNode>& tree, PxF32 enlargement = 1e-4f)
{
//Computes a bounding box for every triangle in triangles
Gu::AABBTreeBounds boxes;
boxes.init(numTriangles);
for (PxU32 i = 0; i < numTriangles; ++i)
{
const PxU32* tri = &triangles[3 * i];
PxBounds3 box = PxBounds3::empty();
box.include(points[tri[0]]);
box.include(points[tri[1]]);
box.include(points[tri[2]]);
box.fattenFast(enlargement);
boxes.getBounds()[i] = box;
}
Gu::buildAABBTree(numTriangles, boxes, tree);
}
class LineSegmentTrimeshIntersectionTraversalController
{
private:
const PxU32* mTriangles;
const PxVec3* mPoints;
PxVec3 mSegmentStart;
PxVec3 mSegmentEnd;
PxVec3 mDirection;
bool mIntersects;
public:
LineSegmentTrimeshIntersectionTraversalController(const PxU32* triangles, const PxVec3* points, PxVec3 segmentStart, PxVec3 segmentEnd)
: mTriangles(triangles), mPoints(points), mSegmentStart(segmentStart), mSegmentEnd(segmentEnd), mDirection(segmentEnd - segmentStart), mIntersects(false)
{
}
void reset(PxVec3 segmentStart, PxVec3 segmentEnd)
{
mSegmentStart = segmentStart;
mSegmentEnd = segmentEnd;
mDirection = segmentEnd - segmentStart;
mIntersects = false;
}
bool intersectionDetected() const
{
return mIntersects;
}
PX_FORCE_INLINE Gu::TraversalControl::Enum analyze(const Gu::BVHNode& node, PxI32)
{
if (node.isLeaf())
{
PxI32 j = node.getPrimitiveIndex();
const PxU32* tri = &mTriangles[3 * j];
PxReal at, au, av;
if (Gu::intersectRayTriangle(mSegmentStart, mDirection, mPoints[tri[0]], mPoints[tri[1]], mPoints[tri[2]], at, au, av, false, 1e-4f) && at >= 0.0f && at <= 1.0f)
{
mIntersects = true;
return TraversalControl::eAbort;
}
return TraversalControl::eDontGoDeeper;
}
PxReal tnear, tfar;
if (Gu::intersectRayAABB(node.mBV.minimum, node.mBV.maximum, mSegmentStart, mDirection, tnear, tfar) >= 0 && ((tnear >= 0.0f && tnear <= 1.0f) || (tfar >= 0.0f && tfar <= 1.0f) || node.mBV.contains(mSegmentStart)))
return TraversalControl::eGoDeeper;
return TraversalControl::eDontGoDeeper;
}
private:
PX_NOCOPY(LineSegmentTrimeshIntersectionTraversalController)
};
class ClosestDistanceToTrimeshTraversalController
{
private:
PxReal mClosestDistanceSquared;
const PxU32* mTriangles;
const PxVec3* mPoints;
const Gu::BVHNode* mNodes;
PxVec3 mQueryPoint;
PxVec3 mClosestPoint;
PxI32 mClosestTriId;
public:
PX_FORCE_INLINE ClosestDistanceToTrimeshTraversalController(){}
PX_FORCE_INLINE ClosestDistanceToTrimeshTraversalController(const PxU32* triangles, const PxVec3* points, Gu::BVHNode* nodes) :
mTriangles(triangles), mPoints(points), mNodes(nodes), mQueryPoint(0.0f), mClosestPoint(0.0f), mClosestTriId(-1)
{
initialize(triangles, points, nodes);
}
void initialize(const PxU32* triangles, const PxVec3* points, Gu::BVHNode* nodes)
{
mTriangles = triangles;
mPoints = points;
mNodes = nodes;
mQueryPoint = PxVec3(0.0f);
mClosestPoint = PxVec3(0.0f);
mClosestTriId = -1;
mClosestDistanceSquared = PX_MAX_F32;
}
PX_FORCE_INLINE void setQueryPoint(const PxVec3& queryPoint)
{
this->mQueryPoint = queryPoint;
mClosestDistanceSquared = FLT_MAX;
mClosestPoint = PxVec3(0.0f);
mClosestTriId = -1;
}
PX_FORCE_INLINE const PxVec3& getClosestPoint() const
{
return mClosestPoint;
}
PX_FORCE_INLINE PxReal distancePointBoxSquared(const PxBounds3& box, const PxVec3& point)
{
PxVec3 closestPt = box.minimum.maximum(box.maximum.minimum(point));
return (closestPt - point).magnitudeSquared();
}
PX_FORCE_INLINE Gu::TraversalControl::Enum analyze(const Gu::BVHNode& node, PxI32)
{
if (distancePointBoxSquared(node.mBV, mQueryPoint) >= mClosestDistanceSquared)
return Gu::TraversalControl::eDontGoDeeper;
if (node.isLeaf())
{
const PxI32 j = node.getPrimitiveIndex();
const PxU32* tri = &mTriangles[3 * j];
aos::FloatV t1, t2;
aos::Vec3V q = V3LoadU(mQueryPoint);
aos::Vec3V a = V3LoadU(mPoints[tri[0]]);
aos::Vec3V b = V3LoadU(mPoints[tri[1]]);
aos::Vec3V c = V3LoadU(mPoints[tri[2]]);
aos::Vec3V cp;
aos::FloatV d = Gu::distancePointTriangleSquared2UnitBox(q, a, b, c, t1, t2, cp);
PxReal d2;
FStore(d, &d2);
PxVec3 closest;
V3StoreU(cp, closest);
//const PxVec3 closest = closestPtPointTriangle2UnitBox(mQueryPoint, mPoints[tri[0]], mPoints[tri[1]], mPoints[tri[2]]);
//PxReal d2 = (closest - mQueryPoint).magnitudeSquared();
if (d2 < mClosestDistanceSquared)
{
mClosestDistanceSquared = d2;
mClosestTriId = j;
mClosestPoint = closest;
}
return Gu::TraversalControl::eDontGoDeeper;
}
const Gu::BVHNode& nodePos = mNodes[node.getPosIndex()];
const PxReal distSquaredPos = distancePointBoxSquared(nodePos.mBV, mQueryPoint);
const Gu::BVHNode& nodeNeg = mNodes[node.getNegIndex()];
const PxReal distSquaredNeg = distancePointBoxSquared(nodeNeg.mBV, mQueryPoint);
if (distSquaredPos < distSquaredNeg)
{
if (distSquaredPos < mClosestDistanceSquared)
return Gu::TraversalControl::eGoDeeper;
}
else
{
if (distSquaredNeg < mClosestDistanceSquared)
return Gu::TraversalControl::eGoDeeperNegFirst;
}
return Gu::TraversalControl::eDontGoDeeper;
}
PxI32 getClosestTriId() const { return mClosestTriId; }
void setClosestStart(const PxReal closestDistanceSquared, PxI32 closestTriangle, const PxVec3& closestPoint)
{
mClosestDistanceSquared = closestDistanceSquared;
mClosestTriId = closestTriangle;
mClosestPoint = closestPoint;
}
private:
PX_NOCOPY(ClosestDistanceToTrimeshTraversalController)
};
class PointOntoTriangleMeshProjector : public PxPointOntoTriangleMeshProjector, public PxUserAllocated
{
PxArray<Gu::BVHNode> mNodes;
ClosestDistanceToTrimeshTraversalController mEvaluator;
public:
PointOntoTriangleMeshProjector(const PxVec3* vertices, const PxU32* indices, PxU32 numTriangles)
{
buildTree(indices, numTriangles, vertices, mNodes);
mEvaluator.initialize(indices, vertices, mNodes.begin());
}
virtual PxVec3 projectPoint(const PxVec3& point) PX_OVERRIDE
{
mEvaluator.setQueryPoint(point);
Gu::traverseBVH(mNodes.begin(), mEvaluator);
PxVec3 closestPoint = mEvaluator.getClosestPoint();
return closestPoint;
}
virtual PxVec3 projectPoint(const PxVec3& point, PxU32& closetTriangleIndex) PX_OVERRIDE
{
mEvaluator.setQueryPoint(point);
Gu::traverseBVH(mNodes.begin(), mEvaluator);
PxVec3 closestPoint = mEvaluator.getClosestPoint();
closetTriangleIndex = mEvaluator.getClosestTriId();
return closestPoint;
}
virtual void release() PX_OVERRIDE
{
mNodes.reset();
PX_FREE_THIS;
}
};
PxPointOntoTriangleMeshProjector* PxCreatePointOntoTriangleMeshProjector(const PxVec3* vertices, const PxU32* indices, PxU32 numTriangleIndices)
{
return PX_NEW(PointOntoTriangleMeshProjector)(vertices, indices, numTriangleIndices);
}
void windingNumbers(const PxVec3* vertices, const PxU32* indices, PxU32 numTriangleIndices, PxU32 width, PxU32 height, PxU32 depth,
PxReal* windingNumbers, PxVec3 min, PxVec3 max, PxVec3* sampleLocations)
{
const PxVec3 extents(max - min);
const PxVec3 delta(extents.x / width, extents.y / height, extents.z / depth);
const PxVec3 offset = min + PxVec3(0.5f * delta.x, 0.5f * delta.y, 0.5f * delta.z);
PxArray<Gu::BVHNode> tree;
buildTree(indices, numTriangleIndices / 3, vertices, tree);
PxHashMap<PxU32, Gu::ClusterApproximation> clusters;
Gu::precomputeClusterInformation(tree.begin(), indices, numTriangleIndices / 3, vertices, clusters);
for (PxU32 x = 0; x < width; ++x)
{
for (PxU32 y = 0; y < height; ++y)
{
for (PxU32 z = 0; z < depth; ++z)
{
PxVec3 queryPoint(x * delta.x + offset.x, y * delta.y + offset.y, z * delta.z + offset.z);
PxReal windingNumber = Gu::computeWindingNumber(tree.begin(), queryPoint, clusters, indices, vertices);
windingNumbers[z * width * height + y * width + x] = windingNumber; // > 0.5f ? PxU32(-1) : 0;
if (sampleLocations)
sampleLocations[z * width * height + y * width + x] = queryPoint;
}
}
}
}
struct Range
{
PxI32 mStart;
PxI32 mEnd;
bool mInsideStart;
bool mInsideEnd;
Range(PxI32 start, PxI32 end, bool insideStart, bool insideEnd) : mStart(start), mEnd(end), mInsideStart(insideStart), mInsideEnd(insideEnd) { }
};
struct SDFCalculationData
{
const PxVec3* vertices;
const PxU32* indices;
PxU32 numTriangleIndices;
PxU32 width;
PxU32 height;
PxU32 depth;
PxReal* sdf;
PxVec3* sampleLocations;
GridQueryPointSampler* pointSampler;
PxArray<Gu::BVHNode>* tree;
PxHashMap<PxU32, Gu::ClusterApproximation>* clusters;
PxI32 batchSize = 32;
PxI32 end;
PxI32* progress;
bool optimizeInsideOutsideCalculation; //Toggle to enable an additional optimization for faster inside/outside classification
bool signOnly;
};
void windingNumbersInsideCheck(const PxVec3* vertices, const PxU32* indices, PxU32 numTriangleIndices, PxU32 width, PxU32 height, PxU32 depth,
bool* insideResult, PxVec3 min, PxVec3 max, PxVec3* sampleLocations)
{
#if PX_DEBUG
PxBounds3 bounds(min, max);
for (PxU32 i = 0; i < numTriangleIndices; ++i)
PX_ASSERT(bounds.contains(vertices[indices[i]]));
#endif
const PxVec3 extents(max - min);
const PxVec3 delta(extents.x / width, extents.y / height, extents.z / depth);
const PxVec3 offset = min + PxVec3(0.5f * delta.x, 0.5f * delta.y, -0.5f * delta.z);
PxArray<Gu::BVHNode> tree;
buildTree(indices, numTriangleIndices / 3, vertices, tree);
PxHashMap<PxU32, Gu::ClusterApproximation> clusters;
Gu::precomputeClusterInformation(tree.begin(), indices, numTriangleIndices / 3, vertices, clusters);
LineSegmentTrimeshIntersectionTraversalController intersector(indices, vertices, PxVec3(0.0f), PxVec3(0.0f));
PxArray<Range> stack;
for (PxU32 x = 0; x < width; ++x)
{
for (PxU32 y = 0; y < height; ++y)
{
stack.pushBack(Range(0, depth+2, false, false));
while (stack.size() > 0)
{
Range r = stack.popBack();
PxI32 center = (r.mStart + r.mEnd) / 2;
if (center == r.mStart)
{
if (r.mStart > 0 && r.mStart <= PxI32(depth))
{
insideResult[(r.mStart - 1) * width * height + y * width + x] = r.mInsideStart;
if (sampleLocations)
sampleLocations[(r.mStart - 1) * width * height + y * width + x] = PxVec3(x * delta.x + offset.x, y * delta.y + offset.y, r.mStart * delta.z + offset.z);
}
continue;
}
PxVec3 queryPoint = PxVec3(x * delta.x + offset.x, y * delta.y + offset.y, center * delta.z + offset.z);
bool inside = Gu::computeWindingNumber(tree.begin(), queryPoint, clusters, indices, vertices) > 0.5f;
if (inside != r.mInsideStart)
stack.pushBack(Range(r.mStart, center, r.mInsideStart, inside));
else
{
PxVec3 p = PxVec3(x * delta.x + offset.x, y * delta.y + offset.y, r.mStart * delta.z + offset.z);
intersector.reset(p, queryPoint);
Gu::traverseBVH(tree.begin(), intersector);
if (!intersector.intersectionDetected())
{
PxI32 e = PxMin(center, PxI32(depth) + 1);
for (PxI32 z = PxMax(1, r.mStart); z < e; ++z)
{
insideResult[(z - 1) * width * height + y * width + x] = inside;
if (sampleLocations)
sampleLocations[(z - 1) * width * height + y * width + x] = queryPoint;
}
}
else
stack.pushBack(Range(r.mStart, center, r.mInsideStart, inside));
}
if (inside != r.mInsideEnd)
stack.pushBack(Range(center, r.mEnd, inside, r.mInsideEnd));
else
{
PxVec3 p = PxVec3(x * delta.x + offset.x, y * delta.y + offset.y, r.mEnd * delta.z + offset.z);
intersector.reset(queryPoint, p);
Gu::traverseBVH(tree.begin(), intersector);
if (!intersector.intersectionDetected())
{
PxI32 e = PxMin(r.mEnd, PxI32(depth) + 1);
for (PxI32 z = PxMax(1, center); z < e; ++z)
{
insideResult[(z - 1) * width * height + y * width + x] = inside;
if (sampleLocations)
sampleLocations[(z - 1) * width * height + y * width + x] = queryPoint;
}
}
else
stack.pushBack(Range(center, r.mEnd, inside, r.mInsideEnd));
}
}
}
}
}
void idToXY(PxU32 id, PxU32 sizeX, PxU32& xi, PxU32& yi)
{
xi = id % sizeX;
yi = id / sizeX;
}
void* computeSDFThreadJob(void* data)
{
SDFCalculationData& d = *reinterpret_cast<SDFCalculationData*>(data);
PxI32 lastTriangle = -1;
PxArray<Range> stack;
LineSegmentTrimeshIntersectionTraversalController intersector(d.indices, d.vertices, PxVec3(0.0f), PxVec3(0.0f));
PxI32 start = physx::PxAtomicAdd(d.progress, d.batchSize) - d.batchSize;
while (start < d.end)
{
PxI32 end = PxMin(d.end, start + d.batchSize);
PxU32 yStart, zStart;
idToXY(start, d.height, yStart, zStart);
for (PxI32 id = start; id < end; ++id)
{
PxU32 y, z;
idToXY(id, d.height, y, z);
if (y < yStart)
yStart = 0;
if (d.optimizeInsideOutsideCalculation)
{
stack.pushBack(Range(0, d.width + 2, false, false));
while (stack.size() > 0)
{
Range r = stack.popBack();
PxI32 center = (r.mStart + r.mEnd) / 2;
if (center == r.mStart)
{
if (r.mStart > 0 && r.mStart <= PxI32(d.width))
{
if (r.mInsideStart)
d.sdf[z * d.width * d.height + y * d.width + (r.mStart - 1)] *= -1.0f;
}
continue;
}
PxVec3 queryPoint = d.pointSampler->getPoint(center - 1, y, z);
bool inside = false;
bool computeWinding = true;
if (id > start && y > yStart)
{
PxReal s = d.sdf[z * d.width * d.height + (y - 1) * d.width + (center - 1)];
if (PxAbs(s) > d.pointSampler->getActiveCellSize().y)
{
inside = s < 0.0f;
computeWinding = false;
}
}
if (computeWinding)
inside = Gu::computeWindingNumber(d.tree->begin(), queryPoint, *d.clusters, d.indices, d.vertices) > 0.5f;
if (inside != r.mInsideStart)
stack.pushBack(Range(r.mStart, center, r.mInsideStart, inside));
else
{
PxVec3 p = d.pointSampler->getPoint(r.mStart - 1, y, z);
intersector.reset(p, queryPoint);
Gu::traverseBVH(d.tree->begin(), intersector);
if (!intersector.intersectionDetected())
{
PxI32 e = PxMin(center, PxI32(d.width) + 1);
for (PxI32 x = PxMax(1, r.mStart); x < e; ++x)
{
if (inside)
d.sdf[z * d.width * d.height + y * d.width + (x - 1)] *= -1.0f;
}
}
else
stack.pushBack(Range(r.mStart, center, r.mInsideStart, inside));
}
if (inside != r.mInsideEnd)
stack.pushBack(Range(center, r.mEnd, inside, r.mInsideEnd));
else
{
PxVec3 p = d.pointSampler->getPoint(r.mEnd - 1, y, z);
intersector.reset(queryPoint, p);
Gu::traverseBVH(d.tree->begin(), intersector);
if (!intersector.intersectionDetected())
{
PxI32 e = PxMin(r.mEnd, PxI32(d.width) + 1);
for (PxI32 x = PxMax(1, center); x < e; ++x)
{
if (inside)
d.sdf[z * d.width * d.height + y * d.width + (x - 1)] *= -1.0f;
}
}
else
stack.pushBack(Range(center, r.mEnd, inside, r.mInsideEnd));
}
}
}
if (!d.signOnly)
{
for (PxU32 x = 0; x < d.width; ++x)
{
const PxU32 index = z * d.width * d.height + y * d.width + x;
PxVec3 queryPoint = d.pointSampler->getPoint(x, y, z);
ClosestDistanceToTrimeshTraversalController cd(d.indices, d.vertices, d.tree->begin());
cd.setQueryPoint(queryPoint);
if (lastTriangle != -1)
{
//Warm-start the query with a lower-bound distance based on the triangle found by the previous query.
//This helps to cull the tree traversal more effectively in the closest point query.
PxU32 i0 = d.indices[3 * lastTriangle];
PxU32 i1 = d.indices[3 * lastTriangle + 1];
PxU32 i2 = d.indices[3 * lastTriangle + 2];
//const PxVec3 closest = Gu::closestPtPointTriangle2UnitBox(queryPoint, d.vertices[i0], d.vertices[i1], d.vertices[i2]);
//PxReal d2 = (closest - queryPoint).magnitudeSquared();
aos::FloatV t1, t2;
aos::Vec3V q = aos::V3LoadU(queryPoint);
aos::Vec3V a = aos::V3LoadU(d.vertices[i0]);
aos::Vec3V b = aos::V3LoadU(d.vertices[i1]);
aos::Vec3V c = aos::V3LoadU(d.vertices[i2]);
aos::Vec3V cp;
aos::FloatV dist2 = Gu::distancePointTriangleSquared2UnitBox(q, a, b, c, t1, t2, cp);
PxReal d2;
aos::FStore(dist2, &d2);
PxVec3 closest;
aos::V3StoreU(cp, closest);
cd.setClosestStart(d2, lastTriangle, closest);
}
Gu::traverseBVH(d.tree->begin(), cd);
PxVec3 closestPoint = cd.getClosestPoint();
PxReal closestDistance = (closestPoint - queryPoint).magnitude();
lastTriangle = cd.getClosestTriId();
PxReal sign = 1.f;
if (!d.optimizeInsideOutsideCalculation)
{
PxReal windingNumber = Gu::computeWindingNumber(d.tree->begin(), queryPoint, *d.clusters, d.indices, d.vertices);
sign = windingNumber > 0.5f ? -1.f : 1.f;
}
d.sdf[index] *= closestDistance * sign;
if (d.sampleLocations)
d.sampleLocations[index] = queryPoint;
}
}
}
start = physx::PxAtomicAdd(d.progress, d.batchSize) - d.batchSize;
}
return NULL;
}
struct PxI32x3
{
PxI32x3(PxI32 x_, PxI32 y_, PxI32 z_) : x(x_), y(y_), z(z_)
{}
PxI32 x;
PxI32 y;
PxI32 z;
};
//Applies per pixel operations similar to the one uses by the fast marching methods to build SDFs out of binary image bitmaps
//This allows to fill in correct distance values in regions where meshes habe holes
struct PixelProcessor
{
PxVec3 mCellSize;
PxI32 mWidth;
PxI32 mHeight;
PxI32 mDepth;
PixelProcessor(PxVec3 cellSize, PxI32 width, PxI32 height, PxI32 depth) :
mCellSize(cellSize), mWidth(width), mHeight(height), mDepth(depth)
{
}
//Estimates distance values near at mesh holes by estimating the location of the mesh surface. This can be done by analyzing
//the sign change of the imperfect SDF. The signs are computed using winding numbers which are immune to meshes with holes.
bool init(PxI32x3 p, const PxReal* sdf, PxReal& newValue) const
{
PxReal initialValue = sdf[idx3D(p.x, p.y, p.z, mWidth, mHeight)];
newValue = PxAbs(initialValue);
for (PxI32 z = PxMax(0, p.z - 1); z <= PxMin(mDepth - 1, p.z + 1); ++z)
for (PxI32 y = PxMax(0, p.y - 1); y <= PxMin(mHeight - 1, p.y + 1); ++y)
for (PxI32 x = PxMax(0, p.x - 1); x <= PxMin(mWidth - 1, p.x + 1); ++x)
{
if (x == p.x && y == p.y && z == p.z)
continue;
PxReal value = sdf[idx3D(x, y, z, mWidth, mHeight)];
if (PxSign(initialValue) != PxSign(value))
{
PxReal distance = 0;
if (x != p.x)
distance += mCellSize.x*mCellSize.x;
if (y != p.y)
distance += mCellSize.y*mCellSize.y;
if (z != p.z)
distance += mCellSize.z*mCellSize.z;
distance = PxSqrt(distance);
PxReal delta = PxAbs(value - initialValue);
if (0.99f * delta > distance)
{
PxReal scaling = distance / delta;
PxReal v = 0.99f * scaling * initialValue;
newValue = PxMin(newValue, PxAbs(v));
}
}
}
if (initialValue < 0)
newValue = -newValue;
if (newValue !=initialValue)
return true;
return false;
}
//Processes a pixel in a 3D sdf by applying the rule from the fast marching method. Only works on pixels with the same sign.
bool process(PxI32x3 p, PxReal* sdf, PxReal& newValue) const
{
PxReal initialValue = sdf[idx3D(p.x, p.y, p.z, mWidth, mHeight)];
if (initialValue == 0.0f)
return false;
PxReal sign = PxSign(initialValue);
newValue = PxAbs(initialValue);
for (PxI32 z = PxMax(0, p.z - 1); z <= PxMin(mDepth - 1, p.z + 1); ++z)
for (PxI32 y = PxMax(0, p.y - 1); y <= PxMin(mHeight - 1, p.y + 1); ++y)
for (PxI32 x = PxMax(0, p.x - 1); x <= PxMin(mWidth - 1, p.x + 1); ++x)
{
if (x == p.x && y == p.y && z == p.z)
continue;
PxReal value = sdf[idx3D(x, y, z, mWidth, mHeight)];
if (sign == PxSign(value))
{
PxReal distance = 0;
if (x != p.x)
distance += mCellSize.x*mCellSize.x;
if (y != p.y)
distance += mCellSize.y*mCellSize.y;
if (z != p.z)
distance += mCellSize.z*mCellSize.z;
distance = PxSqrt(distance);
PxReal absValue = PxAbs(value);
if(absValue + 1.01f*distance < newValue)
newValue = absValue + distance;
}
}
newValue = sign * newValue;
if (newValue != initialValue)
{
sdf[idx3D(p.x, p.y, p.z, mWidth, mHeight)] = newValue;
return true;
}
return false;
}
};
//Allows to store the new value of a SDF pixel to apply the change later. This avoids the need of double buffering the SDF data.
struct Mutation
{
PxI32x3 mIndex;
PxReal mNewValue;
Mutation(const PxI32x3& index, PxReal newValue) : mIndex(index), mNewValue(newValue)
{
}
};
void applyMutations(PxArray<Mutation>& mutations, PxU32 start, PxU32 end, PxReal* sdfs, PxU32 width, PxU32 height)
{
for (PxU32 i = start; i < end; ++i)
{
Mutation m = mutations[i];
sdfs[idx3D(m.mIndex.x, m.mIndex.y, m.mIndex.z, width, height)] = m.mNewValue;
}
}
//Approximates the solution of an Eikonal equation on a dense grid
void fixSdfForNonClosedGeometry(PxU32 width, PxU32 height, PxU32 depth,
PxReal* sdf, const PxVec3& cellSize)
{
PxArray<Mutation> mutations;
PixelProcessor processor(cellSize, width, height, depth);
for (PxU32 z = 0; z < depth; ++z)
for (PxU32 y = 0; y < height; ++y)
for (PxU32 x = 0; x < width; ++x)
{
//Process only cells where a sign change occurs
PxReal newValue;
if (processor.init(PxI32x3(x, y, z), sdf, newValue))
mutations.pushBack(Mutation(PxI32x3(x, y, z), newValue));
}
//printf("numMutations: %i\n", mutations.size());
applyMutations(mutations, 0, mutations.size(), sdf, width, height);
PxU32 maxMutationLoops = 1000;
PxU32 counter = 0;
while (mutations.size() > 0 && counter < maxMutationLoops)
{
PxU32 size = mutations.size();
for (PxU32 i = 0; i < size; ++i)
{
PxI32x3 p = mutations[i].mIndex;
//Process neighbors of item on stack
for (PxI32 z = PxMax(0, p.z - 1); z <= PxMin(PxI32(depth) - 1, p.z + 1); ++z)
for (PxI32 y = PxMax(0, p.y - 1); y <= PxMin(PxI32(height) - 1, p.y + 1); ++y)
for (PxI32 x = PxMax(0, p.x - 1); x <= PxMin(PxI32(width) - 1, p.x + 1); ++x)
{
if (x == p.x && y == p.y && z == p.z)
continue;
PxReal newValue;
if (processor.process(PxI32x3(x, y, z), sdf, newValue))
mutations.pushBack(Mutation(PxI32x3(x, y, z), newValue));
}
}
mutations.removeRange(0, size);
++counter;
}
//For safety reasons: Check all cells again
for (PxU32 z = 0; z < depth; ++z)
for (PxU32 y = 0; y < height; ++y)
for (PxU32 x = 0; x < width; ++x)
{
//Look at all neighbors
PxReal newValue;
if (processor.init(PxI32x3(x, y, z), sdf, newValue))
mutations.pushBack(Mutation(PxI32x3(x, y, z), newValue));
}
counter = 0;
while (mutations.size() > 0 && counter < maxMutationLoops)
{
PxU32 size = mutations.size();
for (PxU32 i = 0; i < size; ++i)
{
PxI32x3 p = mutations[i].mIndex;
//Process neighbors of item on stack
for (PxI32 z = PxMax(0, p.z - 1); z <= PxMin(PxI32(depth) - 1, p.z + 1); ++z)
for (PxI32 y = PxMax(0, p.y - 1); y <= PxMin(PxI32(height) - 1, p.y + 1); ++y)
for (PxI32 x = PxMax(0, p.x - 1); x <= PxMin(PxI32(width) - 1, p.x + 1); ++x)
{
if (x == p.x && y == p.y && z == p.z)
continue;
PxReal newValue;
if (processor.process(PxI32x3(x, y, z), sdf, newValue))
mutations.pushBack(Mutation(PxI32x3(x, y, z), newValue));
}
}
mutations.removeRange(0, size);
++counter;
}
}
void SDFUsingWindingNumbers(PxArray<Gu::BVHNode>& tree, PxHashMap<PxU32, Gu::ClusterApproximation>& clusters, const PxVec3* vertices, const PxU32* indices, PxU32 numTriangleIndices, PxU32 width, PxU32 height, PxU32 depth,
PxReal* sdf, GridQueryPointSampler& sampler, PxVec3* sampleLocations, PxU32 numThreads, bool isWatertight, bool allVerticesInsideSamplingBox)
{
bool optimizeInsideOutsideCalculation = allVerticesInsideSamplingBox && isWatertight;
numThreads = PxMax(numThreads, 1u);
PxI32 progress = 0;
PxArray<PxThread*> threads;
PxArray<SDFCalculationData> perThreadData;
for (PxU32 i = 0; i < numThreads; ++i)
{
perThreadData.pushBack(SDFCalculationData());
SDFCalculationData& d = perThreadData[i];
d.vertices = vertices;
d.indices = indices;
d.numTriangleIndices = numTriangleIndices;
d.width = width;
d.height = height;
d.depth = depth;
d.sdf = sdf;
d.sampleLocations = sampleLocations;
d.optimizeInsideOutsideCalculation = optimizeInsideOutsideCalculation;
d.pointSampler = &sampler;
d.progress = &progress;
d.tree = &tree;
d.clusters = &clusters;
d.end = depth * height;
d.signOnly = false;
}
PxU32 l = width * height * depth;
for (PxU32 i = 0; i < l; ++i)
sdf[i] = 1.0f;
for (PxU32 i = 0; i < numThreads; ++i)
{
if (perThreadData.size() == 1)
computeSDFThreadJob(&perThreadData[i]);
else
{
threads.pushBack(PX_NEW(PxThread)(computeSDFThreadJob, &perThreadData[i], "thread"));
threads[i]->start();
}
}
for (PxU32 i = 0; i < threads.size(); ++i)
{
threads[i]->waitForQuit();
}
for (PxU32 i = 0; i < threads.size(); ++i)
{
threads[i]->~PxThreadT();
PX_FREE(threads[i]);
}
if (!isWatertight)
fixSdfForNonClosedGeometry(width, height, depth, sdf, sampler.getActiveCellSize());
}
//Helper class to extract surface triangles from a tetmesh
struct SortedTriangle
{
public:
PxI32 mA;
PxI32 mB;
PxI32 mC;
bool mFlipped;
PX_FORCE_INLINE SortedTriangle(PxI32 a, PxI32 b, PxI32 c)
{
mA = a; mB = b; mC = c; mFlipped = false;
if (mA > mB) { PxSwap(mA, mB); mFlipped = !mFlipped; }
if (mB > mC) { PxSwap(mB, mC); mFlipped = !mFlipped; }
if (mA > mB) { PxSwap(mA, mB); mFlipped = !mFlipped; }
}
};
struct TriangleHash
{
PX_FORCE_INLINE std::size_t operator()(const SortedTriangle& k) const
{
return k.mA ^ k.mB ^ k.mC;
}
PX_FORCE_INLINE bool equal(const SortedTriangle& first, const SortedTriangle& second) const
{
return first.mA == second.mA && first.mB == second.mB && first.mC == second.mC;
}
};
PxReal signedVolume(const PxVec3* points, const PxU32* triangles, PxU32 numTriangles, const PxU32* triangleSubset = NULL, PxU32 setLength = 0)
{
PxReal signedVolume = 0;
const PxU32 l = triangleSubset ? setLength : numTriangles;
for (PxU32 j = 0; j < l; ++j)
{
const PxU32 i = triangleSubset ? triangleSubset[j] : j;
const PxU32* tri = &triangles[3 * i];
PxVec3 a = points[tri[0]];
PxVec3 b = points[tri[1]];
PxVec3 c = points[tri[2]];
PxReal y = a.dot(b.cross(c));
signedVolume += y;
}
signedVolume *= (1.0f / 6.0f);
return signedVolume;
}
void analyzeAndFixMesh(const PxVec3* vertices, const PxU32* indicesOrig, PxU32 numTriangleIndices, PxArray<PxU32>& repairedIndices)
{
const PxU32* indices = indicesOrig;
PxI32 numVertices = -1;
for (PxU32 i = 0; i < numTriangleIndices; ++i)
numVertices = PxMax(numVertices, PxI32(indices[i]));
++numVertices;
//Check for duplicate vertices
PxArray<PxI32> map;
MeshAnalyzer::mapDuplicatePoints<PxVec3, PxReal>(vertices, PxU32(numVertices), map, 0.0f);
bool hasDuplicateVertices = false;
for (PxU32 i = 0; i < map.size(); ++i)
{
if (map[i] != PxI32(i))
{
hasDuplicateVertices = true;
break;
}
}
if (hasDuplicateVertices)
{
repairedIndices.resize(numTriangleIndices);
for (PxU32 i = 0; i < numTriangleIndices; ++i)
repairedIndices[i] = map[indices[i]];
indices = repairedIndices.begin();
}
//Check for duplicate triangles
PxHashMap<SortedTriangle, PxI32, TriangleHash> tris;
bool hasDuplicateTriangles = false;
for (PxU32 i = 0; i < numTriangleIndices; i += 3)
{
SortedTriangle tri(indices[i], indices[i + 1], indices[i + 2]);
if (const PxPair<const SortedTriangle, PxI32>* ptr = tris.find(tri))
{
tris[tri] = ptr->second + 1;
hasDuplicateTriangles = true;
}
else
tris.insert(tri, 1);
}
if (hasDuplicateTriangles)
{
repairedIndices.clear();
for (PxHashMap<SortedTriangle, PxI32, TriangleHash>::Iterator iter = tris.getIterator(); !iter.done(); ++iter)
{
repairedIndices.pushBack(iter->first.mA);
if (iter->first.mFlipped)
{
repairedIndices.pushBack(iter->first.mC);
repairedIndices.pushBack(iter->first.mB);
}
else
{
repairedIndices.pushBack(iter->first.mB);
repairedIndices.pushBack(iter->first.mC);
}
}
}
else
{
if (!hasDuplicateVertices) //reqairedIndices is already initialized if hasDuplicateVertices is true
{
repairedIndices.resize(numTriangleIndices);
for (PxU32 i = 0; i < numTriangleIndices; ++i)
repairedIndices[i] = indices[i];
}
}
PxHashMap<PxU64, PxI32> edges;
PxArray<bool> flipTriangle;
PxArray<PxArray<PxU32>> connectedTriangleGroups;
Triangle* triangles = reinterpret_cast<Triangle*>(repairedIndices.begin());
bool success = MeshAnalyzer::buildConsistentTriangleOrientationMap(triangles, repairedIndices.size() / 3, flipTriangle, edges, connectedTriangleGroups);
bool meshIsWatertight = true;
for (PxHashMap<PxU64, PxI32>::Iterator iter = edges.getIterator(); !iter.done(); ++iter)
{
if (iter->second != -1)
{
meshIsWatertight = false;
break;
}
}
if (success)
{
if (hasDuplicateTriangles && meshIsWatertight && connectedTriangleGroups.size() == 1)
{
for (PxU32 i = 0; i < flipTriangle.size(); ++i)
{
Triangle& t = triangles[i];
if (flipTriangle[i])
PxSwap(t[0], t[1]);
}
if (signedVolume(vertices, repairedIndices.begin(), repairedIndices.size() / 3) < 0.0f)
{
PxU32 numTriangles = repairedIndices.size() / 3;
for (PxU32 j = 0; j < numTriangles; ++j)
{
PxU32* tri = &repairedIndices[j * 3];
PxSwap(tri[1], tri[2]);
}
}
}
}
else
{
//Here it is not possible to guarantee that the mesh fixing can succeed
PxGetFoundation().error(PxErrorCode::eDEBUG_WARNING, PX_FL, "SDF creation: Mesh is not suitable for SDF (non-manifold, not watertight, duplicated triangles, ...) and automatic repair failed. The computed SDF might not work as expected. If collisions are not good, try to improve the mesh structure e.g., by applying remeshing.");
//connectedTriangleGroups won't have any elements, so return
return;
}
if (!meshIsWatertight)
{
PxGetFoundation().error(PxErrorCode::eDEBUG_WARNING, PX_FL, "SDF creation: Input mesh is not watertight. The SDF will try to close the holes.");
}
}
void SDFUsingWindingNumbers(const PxVec3* vertices, const PxU32* indicesOrig, PxU32 numTriangleIndices, PxU32 width, PxU32 height, PxU32 depth,
PxReal* sdf, PxVec3 minExtents, PxVec3 maxExtents, PxVec3* sampleLocations, bool cellCenteredSamples, PxU32 numThreads, PxSDFBuilder* sdfBuilder)
{
PxArray<PxU32> repairedIndices;
//Analyze the mesh to catch and fix some special cases
//There are meshes where every triangle is present once with cw and once with ccw orientation. Try to filter out only one set
analyzeAndFixMesh(vertices, indicesOrig, numTriangleIndices, repairedIndices);
const PxU32* indices = repairedIndices.size() > 0 ? repairedIndices.begin() : indicesOrig;
if (repairedIndices.size() > 0)
numTriangleIndices = repairedIndices.size();
if (sdfBuilder)
{
PxI32 numVertices = -1;
for (PxU32 i = 0; i < numTriangleIndices; ++i)
numVertices = PxMax(numVertices, PxI32(indices[i]));
++numVertices;
sdfBuilder->buildSDF(vertices, PxU32(numVertices), indices, numTriangleIndices, width, height, depth, minExtents, maxExtents, cellCenteredSamples, sdf);
}
else
{
PxArray<Gu::BVHNode> tree;
buildTree(indices, numTriangleIndices / 3, vertices, tree);
PxHashMap<PxU32, Gu::ClusterApproximation> clusters;
Gu::precomputeClusterInformation(tree.begin(), indices, numTriangleIndices / 3, vertices, clusters);
const PxVec3 extents(maxExtents - minExtents);
GridQueryPointSampler sampler(minExtents, PxVec3(extents.x / width, extents.y / height, extents.z / depth), cellCenteredSamples);
bool isWatertight = MeshAnalyzer::checkMeshWatertightness(reinterpret_cast<const Triangle*>(indices), numTriangleIndices / 3);
bool allSamplesInsideBox = true;
PxBounds3 box(minExtents, maxExtents);
for (PxU32 i = 0; i < numTriangleIndices; ++i)
{
PxVec3 v = vertices[indices[i]];
if (!box.contains(v))
{
allSamplesInsideBox = false;
break;
}
}
SDFUsingWindingNumbers(tree, clusters, vertices, indices, numTriangleIndices, width, height, depth, sdf, sampler, sampleLocations, numThreads, isWatertight, allSamplesInsideBox);
}
#if EXTENDED_DEBUG
bool debug = false;
if (debug && sdfBuilder)
{
PX_UNUSED(sdfBuilder);
PxArray<PxReal> sdf2;
sdf2.resize(width * height * depth);
PxI32 numVertices = -1;
for (PxU32 i = 0; i < numTriangleIndices; ++i)
numVertices = PxMax(numVertices, PxI32(indices[i]));
++numVertices;
sdfBuilder->buildSDF(vertices, PxU32(numVertices), indices, numTriangleIndices, width, height, depth, minExtents, maxExtents, cellCenteredSamples, sdf2.begin());
for (PxU32 i = 0; i < sdf2.size(); ++i)
{
PxReal diff = sdf[i] - sdf2[i];
//PxReal diffOfAbs = PxAbs(sdf[i]) - PxAbs(sdf2[i]);
if(PxAbs(diff) > 1e-3f)
PxGetFoundation().error(physx::PxErrorCode::eDEBUG_WARNING, PX_FL, "SDFs don't match %f %f", PxF64(sdf[i]), PxF64(sdf2[i]));
}
}
#endif
}
void convertSparseSDFTo3DTextureLayout(PxU32 width, PxU32 height, PxU32 depth, PxU32 cellsPerSubgrid,
PxU32* sdfFineStartSlots, const PxReal* sdfFineSubgridsIn, PxU32 sparseSDFNumEntries, PxArray<PxReal>& subgrids3DTexFormat,
PxU32& numSubgridsX, PxU32& numSubgridsY, PxU32& numSubgridsZ)
{
PxU32 valuesPerSubgrid = (cellsPerSubgrid + 1)*(cellsPerSubgrid + 1)*(cellsPerSubgrid + 1);
PX_ASSERT(sparseSDFNumEntries % valuesPerSubgrid == 0);
PxU32 numSubgrids = sparseSDFNumEntries / valuesPerSubgrid;
PxReal cubicRoot = PxPow(PxReal(numSubgrids), 1.0f / 3.0f);
PxU32 up = PxMax(1u, PxU32(PxCeil(cubicRoot)));
PxU32 debug = numSubgrids;
//Arrange numSubgrids in a 3d layout
PxU32 n = numSubgrids;
numSubgridsX = PxMin(up, n);
n = (n + up - 1) / up;
numSubgridsY = PxMin(up, n);
n = (n + up - 1) / up;
numSubgridsZ = PxMin(up, n);
PxU32 debug2 = numSubgridsX * numSubgridsY * numSubgridsZ;
PX_ASSERT(debug2 >= debug);
PX_UNUSED(debug);
PX_UNUSED(debug2);
PxU32 size = valuesPerSubgrid * numSubgridsX * numSubgridsY * numSubgridsZ;
PxReal placeholder = 1234567;
subgrids3DTexFormat.resize(size, placeholder);
PxU32 w = width / cellsPerSubgrid;
PxU32 h = height / cellsPerSubgrid;
PxU32 d = depth / cellsPerSubgrid;
PxU32 l = (w)*(h)*(d);
for (PxU32 i = 0; i < l; ++i)
{
PxU32 startSlot = sdfFineStartSlots[i];
if (startSlot != 0xFFFFFFFF)
{
PxU32 baseIndex = startSlot * (cellsPerSubgrid + 1) * (cellsPerSubgrid + 1) * (cellsPerSubgrid + 1);
const PxReal* sdfFine = &sdfFineSubgridsIn[baseIndex];
PxU32 startSlotX, startSlotY, startSlotZ;
idToXYZ(startSlot, numSubgridsX, numSubgridsY, startSlotX, startSlotY, startSlotZ);
sdfFineStartSlots[i] = encodeTriple(startSlotX, startSlotY, startSlotZ);
for (PxU32 zLocal = 0; zLocal <= cellsPerSubgrid; ++zLocal)
{
for (PxU32 yLocal = 0; yLocal <= cellsPerSubgrid; ++yLocal)
{
for (PxU32 xLocal = 0; xLocal <= cellsPerSubgrid; ++xLocal)
{
PxReal sdfValue = sdfFine[idx3D(xLocal, yLocal, zLocal, cellsPerSubgrid+1, cellsPerSubgrid+1)];
PxU32 index = idx3D(xLocal + startSlotX * (cellsPerSubgrid + 1), yLocal + startSlotY * (cellsPerSubgrid + 1), zLocal + startSlotZ * (cellsPerSubgrid + 1),
numSubgridsX * (cellsPerSubgrid + 1), numSubgridsY * (cellsPerSubgrid + 1));
PX_ASSERT(subgrids3DTexFormat[index] == placeholder);
subgrids3DTexFormat[index] = sdfValue;
PX_ASSERT(PxIsFinite(sdfValue));
}
}
}
}
}
}
struct Interval
{
PxReal min;
PxReal max;
PX_CUDA_CALLABLE Interval() : min(FLT_MAX), max(-FLT_MAX)
{}
PX_CUDA_CALLABLE Interval(PxReal min_, PxReal max_) : min(min_), max(max_)
{}
PX_FORCE_INLINE PX_CUDA_CALLABLE bool overlaps(const Interval& i)
{
return !(min > i.max || i.min > max);
}
};
void SDFUsingWindingNumbersSparse(const PxVec3* vertices, const PxU32* indices, PxU32 numTriangleIndices, PxU32 width, PxU32 height, PxU32 depth,
const PxVec3& minExtents, const PxVec3& maxExtents, PxReal narrowBandThickness, PxU32 cellsPerSubgrid,
PxArray<PxReal>& sdfCoarse, PxArray<PxU32>& sdfFineStartSlots, PxArray<PxReal>& subgridData, PxArray<PxReal>& denseSdf,
PxReal& subgridsMinSdfValue, PxReal& subgridsMaxSdfValue, PxU32 numThreads, PxSDFBuilder* sdfBuilder)
{
PX_ASSERT(width % cellsPerSubgrid == 0);
PX_ASSERT(height % cellsPerSubgrid == 0);
PX_ASSERT(depth % cellsPerSubgrid == 0);
const PxVec3 extents(maxExtents - minExtents);
const PxVec3 delta(extents.x / width, extents.y / height, extents.z / depth);
const PxU32 w = width / cellsPerSubgrid;
const PxU32 h = height / cellsPerSubgrid;
const PxU32 d = depth / cellsPerSubgrid;
denseSdf.resize((width + 1) * (height + 1) * (depth + 1));
SDFUsingWindingNumbers(vertices, indices, numTriangleIndices, width + 1, height + 1, depth + 1, denseSdf.begin(), minExtents, maxExtents + delta, NULL, false, numThreads, sdfBuilder);
sdfCoarse.clear();
sdfFineStartSlots.clear();
subgridData.clear();
sdfCoarse.reserve((w + 1) * (h + 1) * (d + 1));
sdfFineStartSlots.reserve(w * h * d);
for (PxU32 zBlock = 0; zBlock < d; ++zBlock)
for (PxU32 yBlock = 0; yBlock < h; ++yBlock)
for (PxU32 xBlock = 0; xBlock < w; ++xBlock)
{
sdfFineStartSlots.pushBack(0xFFFFFFFF);
}
for (PxU32 zBlock = 0; zBlock <= d; ++zBlock)
for (PxU32 yBlock = 0; yBlock <= h; ++yBlock)
for (PxU32 xBlock = 0; xBlock <= w; ++xBlock)
{
PxU32 x = xBlock * cellsPerSubgrid;
PxU32 y = yBlock * cellsPerSubgrid;
PxU32 z = zBlock * cellsPerSubgrid;
const PxU32 index = idx3D(x, y, z, width+1, height+1);
PX_ASSERT(index < denseSdf.size());
PxReal sdfValue = denseSdf[index];
sdfCoarse.pushBack(sdfValue);
}
#if DEBUG
for (PxU32 zBlock = 0; zBlock <= d; ++zBlock)
for (PxU32 yBlock = 0; yBlock <= h; ++yBlock)
for (PxU32 xBlock = 0; xBlock <= w; ++xBlock)
{
PxU32 x = xBlock * cellsPerSubgrid;
PxU32 y = yBlock * cellsPerSubgrid;
PxU32 z = zBlock * cellsPerSubgrid;
const PxU32 index = idx3D(x, y, z, width+1, height+1);
const PxU32 indexCoarse = idx3D(xBlock, yBlock, zBlock, w+1, h+1);
PX_ASSERT(sdfCoarse[indexCoarse] == denseSdf[index]);
PX_UNUSED(indexCoarse);
PX_UNUSED(index);
}
#endif
Interval narrowBandInterval(-narrowBandThickness, narrowBandThickness);
DenseSDF coarseEval(w + 1, h + 1, d + 1, sdfCoarse.begin());
PxReal s = 1.0f / cellsPerSubgrid;
const PxReal errorThreshold = 1e-6f * extents.magnitude();
PxU32 subgridIndexer = 0;
subgridsMaxSdfValue = -FLT_MAX;
subgridsMinSdfValue = FLT_MAX;
for (PxU32 zBlock = 0; zBlock < d; ++zBlock)
{
for (PxU32 yBlock = 0; yBlock < h; ++yBlock)
{
for (PxU32 xBlock = 0; xBlock < w; ++xBlock)
{
bool subgridRequired = false;
Interval inverval;
PxReal maxAbsError = 0.0f;
for (PxU32 zLocal = 0; zLocal <= cellsPerSubgrid; ++zLocal)
{
for (PxU32 yLocal = 0; yLocal <= cellsPerSubgrid; ++yLocal)
{
for (PxU32 xLocal = 0; xLocal <= cellsPerSubgrid; ++xLocal)
{
PxU32 x = xBlock * cellsPerSubgrid + xLocal;
PxU32 y = yBlock * cellsPerSubgrid + yLocal;
PxU32 z = zBlock * cellsPerSubgrid + zLocal;
const PxU32 index = idx3D(x, y, z, width+1, height+1);
PxReal sdfValue = denseSdf[index];
inverval.max = PxMax(inverval.max, sdfValue);
inverval.min = PxMin(inverval.min, sdfValue);
maxAbsError = PxMax(maxAbsError, PxAbs(sdfValue - coarseEval.sampleSDFDirect(PxVec3(xBlock + xLocal * s, yBlock + yLocal * s, zBlock + zLocal * s))));
}
}
}
subgridRequired = narrowBandInterval.overlaps(inverval);
if (maxAbsError < errorThreshold)
subgridRequired = false; //No need for a subgrid if the coarse SDF is already almost exact
if (subgridRequired)
{
subgridsMaxSdfValue = PxMax(subgridsMaxSdfValue, inverval.max);
subgridsMinSdfValue = PxMin(subgridsMinSdfValue, inverval.min);
for (PxU32 zLocal = 0; zLocal <= cellsPerSubgrid; ++zLocal)
{
for (PxU32 yLocal = 0; yLocal <= cellsPerSubgrid; ++yLocal)
{
for (PxU32 xLocal = 0; xLocal <= cellsPerSubgrid; ++xLocal)
{
PxU32 x = xBlock * cellsPerSubgrid + xLocal;
PxU32 y = yBlock * cellsPerSubgrid + yLocal;
PxU32 z = zBlock * cellsPerSubgrid + zLocal;
const PxU32 index = z * (width + 1) * (height + 1) + y * (width + 1) + x;
PxReal sdfValue = denseSdf[index];
subgridData.pushBack(sdfValue);
}
}
}
sdfFineStartSlots[idx3D(xBlock, yBlock, zBlock, w, h)] = subgridIndexer;
++subgridIndexer;
}
}
}
}
}
PX_INLINE PxReal decodeSparse2(const SDF& sdf, PxI32 xx, PxI32 yy, PxI32 zz)
{
if (xx < 0 || yy < 0 || zz < 0 || xx > PxI32(sdf.mDims.x) || yy > PxI32(sdf.mDims.y) || zz > PxI32(sdf.mDims.z))
return 1.0f; //Return a value >0 that counts as outside
const PxU32 nbX = sdf.mDims.x / sdf.mSubgridSize;
const PxU32 nbY = sdf.mDims.y / sdf.mSubgridSize;
const PxU32 nbZ = sdf.mDims.z / sdf.mSubgridSize;
PxU32 xBase = xx / sdf.mSubgridSize;
PxU32 yBase = yy / sdf.mSubgridSize;
PxU32 zBase = zz / sdf.mSubgridSize;
PxU32 x = xx % sdf.mSubgridSize;
PxU32 y = yy % sdf.mSubgridSize;
PxU32 z = zz % sdf.mSubgridSize;
if (xBase == nbX)
{
--xBase;
x = sdf.mSubgridSize;
}
if (yBase == nbY)
{
--yBase;
y = sdf.mSubgridSize;
}
if (zBase == nbZ)
{
--zBase;
z = sdf.mSubgridSize;
}
PxU32 startId = sdf.mSubgridStartSlots[zBase * (nbX) * (nbY)+yBase * (nbX)+xBase];
if (startId != 0xFFFFFFFFu)
{
SDF::decodeTriple(startId, xBase, yBase, zBase);
/*if (xBase >= mSdfSubgrids3DTexBlockDim.x || yBase >= mSdfSubgrids3DTexBlockDim.y || zBase >= mSdfSubgrids3DTexBlockDim.z)
{
PxGetFoundation().error(::physx::PxErrorCode::eINTERNAL_ERROR, PX_FL, "Out of bounds subgrid index\n");
//printf("%i %i %i %i\n", PxI32(startId), PxI32(xBase), PxI32(yBase), PxI32(zBase));
}*/
xBase *= (sdf.mSubgridSize + 1);
yBase *= (sdf.mSubgridSize + 1);
zBase *= (sdf.mSubgridSize + 1);
const PxU32 w = sdf.mSdfSubgrids3DTexBlockDim.x * (sdf.mSubgridSize + 1);
const PxU32 h = sdf.mSdfSubgrids3DTexBlockDim.y * (sdf.mSubgridSize + 1);
const PxU32 index = idx3D(xBase + x, yBase + y, zBase + z, w, h);
//if (mBytesPerSparsePixel * index >= mNumSubgridSdfs)
// PxGetFoundation().error(::physx::PxErrorCode::eINTERNAL_ERROR, PX_FL, "Out of bounds sdf subgrid access\n");
return SDF::decodeSample(sdf.mSubgridSdf, index,
sdf.mBytesPerSparsePixel, sdf.mSubgridsMinSdfValue, sdf.mSubgridsMaxSdfValue);
}
else
{
DenseSDF coarseEval(nbX + 1, nbY + 1, nbZ + 1, sdf.mSdf);
PxReal s = 1.0f / sdf.mSubgridSize;
PxReal result = coarseEval.sampleSDFDirect(PxVec3(xBase + x * s, yBase + y * s, zBase + z * s));
return result;
}
}
PxReal SDF::decodeSparse(PxI32 xx, PxI32 yy, PxI32 zz) const
{
return decodeSparse2(*this, xx, yy, zz);
}
PX_FORCE_INLINE PxU64 key(PxI32 xId, PxI32 yId, PxI32 zId)
{
const PxI32 offset = 1 << 19;
return (PxU64(zId + offset) << 42) | (PxU64(yId + offset) << 21) | (PxU64(xId + offset) << 0);
}
const PxI32 offsets[3][3][3] = { { {0,-1,0}, {0,-1,-1}, {0,0,-1} },
{ {0,0,-1}, {-1,0,-1}, {-1,0,0} } ,
{ {-1,0,0}, {-1,-1,0}, {0,-1,0} } };
const PxI32 projections[3][2] = { {1, 2}, {2, 0}, {0, 1} };
PX_FORCE_INLINE PxReal dirSign(PxI32 principalDirection, const PxVec3& start, const PxVec3& middle, const PxVec3& end)
{
PxReal a0 = middle[projections[principalDirection][0]] - start[projections[principalDirection][0]];
PxReal a1 = middle[projections[principalDirection][1]] - start[projections[principalDirection][1]];
PxReal b0 = end[projections[principalDirection][0]] - middle[projections[principalDirection][0]];
PxReal b1 = end[projections[principalDirection][1]] - middle[projections[principalDirection][1]];
return a0 * b1 - a1 * b0;
}
PX_FORCE_INLINE PxI32 indexOfMostConcaveCorner(PxI32 principalDirection, const PxVec3& a, const PxVec3& b, const PxVec3& c, const PxVec3& d)
{
PxReal minimum = 0;
PxI32 result = -1;
PxReal s = dirSign(principalDirection, a, b, c);
if (s <= minimum)
{
minimum = s;
result = 1;
}
s = dirSign(principalDirection, b, c, d);
if (s <= minimum)
{
minimum = s;
result = 2;
}
s = dirSign(principalDirection, c, d, a);
if (s <= minimum)
{
minimum = s;
result = 3;
}
s = dirSign(principalDirection, d, a, b);
if (s <= minimum)
{
minimum = s;
result = 0;
}
return result;
}
bool generatePointInCell(const Gu::SDF& sdf, PxI32 x, PxI32 y, PxI32 z, PxVec3& point, PxReal corners[2][2][2])
{
const PxReal threshold = 0.0f;
PxU32 positiveCounter = 0;
PxU32 negativeCounter = 0;
for (PxI32 xx = 0; xx <= 1; ++xx) for (PxI32 yy = 0; yy <= 1; ++yy) for (PxI32 zz = 0; zz <= 1; ++zz)
{
PxReal v = corners[xx][yy][zz];
if (v > 0)
++positiveCounter;
if (v < 0)
++negativeCounter;
}
PxBounds3 box;
box.minimum = sdf.mMeshLower + PxVec3(x * sdf.mSpacing, y * sdf.mSpacing, z * sdf.mSpacing);
box.maximum = box.minimum + PxVec3(sdf.mSpacing);
if (positiveCounter == 8 || negativeCounter == 8)
{
//Nothing to do because surface does not cross the current cell
}
else
{
//If point is not completely inside or outside, then find a point inside the cube that divides it into 8 cuboids
PxU32 counter = 0;
PxVec3 sum(0.0f);
for (PxI32 a = 0; a <= 1; ++a) for (PxI32 b = 0; b <= 1; ++b)
{
PxReal p = corners[a][b][0];
PxReal q = corners[a][b][1];
if ((p <= threshold && q >= threshold) || (q <= threshold && p >= threshold))
{
PxReal t = (q != p) ? PxClamp((threshold - p) / (q - p), 0.0f, 1.0f) : 0.5f;
sum += PxVec3(PxReal(a), PxReal(b), t);
++counter;
}
}
for (PxI32 a = 0; a <= 1; ++a) for (PxI32 b = 0; b <= 1; ++b)
{
PxReal p = corners[b][0][a];
PxReal q = corners[b][1][a];
if ((p <= threshold && q >= threshold) || (q <= threshold && p >= threshold))
{
PxReal t = (q != p) ? PxClamp((threshold - p) / (q - p), 0.0f, 1.0f) : 0.5f;
sum += PxVec3(PxReal(b), t, PxReal(a));
++counter;
}
}
for (PxI32 a = 0; a <= 1; ++a) for (PxI32 b = 0; b <= 1; ++b)
{
PxReal p = corners[0][a][b];
PxReal q = corners[1][a][b];
if ((p <= threshold && q >= threshold) || (q <= threshold && p >= threshold))
{
PxReal t = (q != p) ? PxClamp((threshold - p) / (q - p), 0.0f, 1.0f) : 0.5f;
sum += PxVec3(t, PxReal(a), PxReal(b));
++counter;
}
}
if (counter > 0)
{
point = box.minimum + sum * (sdf.mSpacing / counter);
return true;
}
}
return false;
}
PX_FORCE_INLINE bool generatePointInCell(const Gu::SDF& sdf, PxI32 x, PxI32 y, PxI32 z, PxVec3& point)
{
PxReal corners[2][2][2];
for (PxI32 xx = 0; xx <= 1; ++xx) for (PxI32 yy = 0; yy <= 1; ++yy) for (PxI32 zz = 0; zz <= 1; ++zz)
{
PxReal v = sdf.decodeSparse(x + xx, y + yy, z + zz);
corners[xx][yy][zz] = v;
}
return generatePointInCell(sdf, x, y, z, point, corners);
}
PX_FORCE_INLINE bool generatePointInCellUsingCache(const Gu::SDF& sdf, PxI32 xBase, PxI32 yBase, PxI32 zBase, PxI32 x, PxI32 y, PxI32 z, PxVec3& point, const PxArray<PxReal>& cache)
{
const PxU32 s = sdf.mSubgridSize + 1;
PxReal corners[2][2][2];
for (PxI32 xx = 0; xx <= 1; ++xx) for (PxI32 yy = 0; yy <= 1; ++yy) for (PxI32 zz = 0; zz <= 1; ++zz)
{
PxReal v = cache[idx3D(x + xx, y + yy, z + zz, s, s)];
corners[xx][yy][zz] = v;
}
return generatePointInCell(sdf, xBase * sdf.mSubgridSize + x, yBase * sdf.mSubgridSize + y, zBase * sdf.mSubgridSize + z, point, corners);
}
PxReal SDF::decodeDense(PxI32 x, PxI32 y, PxI32 z) const
{
if (x < 0 || y < 0 || z < 0 || x >= PxI32(mDims.x) || y >= PxI32(mDims.y) || z >= PxI32(mDims.z))
return 1.0; //Return a value >0 that counts as outside
return mSdf[idx3D(x, y, z, mDims.x, mDims.y)];
}
PX_FORCE_INLINE bool generatePointInCellDense(const Gu::SDF& sdf, PxI32 x, PxI32 y, PxI32 z, PxVec3& point)
{
PxReal corners[2][2][2];
for (PxI32 xx = 0; xx <= 1; ++xx) for (PxI32 yy = 0; yy <= 1; ++yy) for (PxI32 zz = 0; zz <= 1; ++zz)
{
PxReal v = sdf.decodeDense(x + xx, y + yy, z + zz);
corners[xx][yy][zz] = v;
}
return generatePointInCell(sdf, x, y, z, point, corners);
}
PX_FORCE_INLINE bool canSkipSubgrid(const Gu::SDF& sdf, PxI32 i, PxI32 j, PxI32 k)
{
const PxReal t = 0.1f * sdf.mSpacing;
const PxI32 nbX = sdf.mDims.x / sdf.mSubgridSize;
const PxI32 nbY = sdf.mDims.y / sdf.mSubgridSize;
const PxI32 nbZ = sdf.mDims.z / sdf.mSubgridSize;
if (i < 0 || j < 0 || k < 0 || i >= nbX || j >= nbY || k >= nbZ)
return false;
if (sdf.mSubgridStartSlots[k * (nbX) * (nbY)+j * (nbX)+i] == 0xFFFFFFFFu)
{
PxU32 positiveCounter = 0;
PxU32 negativeCounter = 0;
for (PxI32 xx = 0; xx <= 1; ++xx) for (PxI32 yy = 0; yy <= 1; ++yy) for (PxI32 zz = 0; zz <= 1; ++zz)
{
PxReal v = decodeSparse2(sdf, (i + xx)* sdf.mSubgridSize, (j + yy) * sdf.mSubgridSize, (k + zz) * sdf.mSubgridSize);
if (v > t)
++positiveCounter;
if (v < t)
++negativeCounter;
}
if (positiveCounter == 8 || negativeCounter == 8)
return true;
}
return false;
}
struct Map : public PxHashMap<PxU64, PxU32>, public PxUserAllocated
{
Map()
{
}
};
struct CellToPoint
{
PxArray<Map*> cellToPoint;
CellToPoint(PxU32 numThreads)
{
cellToPoint.resize(numThreads);
for (PxU32 i = 0; i < cellToPoint.size(); ++i)
cellToPoint[i] = PX_NEW(Map);
}
~CellToPoint()
{
for (PxU32 i = 0; i < cellToPoint.size(); ++i)
{
PX_DELETE(cellToPoint[i]);
}
}
const PxPair<const PxU64, PxU32>* find(PxI32 xId, PxI32 yId, PxI32 zId) const
{
PxU64 k = key(xId, yId, zId);
for (PxU32 i = 0; i < cellToPoint.size(); ++i)
{
const PxPair<const PxU64, PxU32>* f = cellToPoint[i]->find(k);
if (f)
return f;
}
return NULL;
}
void insert(PxI32 threadId, PxI32 xId, PxI32 yId, PxI32 zId, PxU32 value)
{
cellToPoint[threadId]->insert(key(xId, yId, zId), value);
}
};
PX_INLINE void createTriangles(PxI32 xId, PxI32 yId, PxI32 zId, PxReal d0, PxReal ds[3],
const CellToPoint& cellToPoint, const PxArray<PxVec3>& points, PxArray<PxU32>& triangleIndices)
{
bool flipTriangleOrientation = false;
const PxReal threshold = 0.0f;
PxI32 num = 0;
for (PxI32 dim = 0; dim < 3; dim++)
{
PxReal d = ds[dim];
if ((d0 <= threshold && d >= threshold) || (d <= threshold && d0 >= threshold))
num++;
}
if (num == 0)
return;
PxI32 buffer[4];
const PxPair<const PxU64, PxU32>* f = cellToPoint.find(xId, yId, zId);
if (!f)
return;
buffer[0] = f->second;
PxVec3 v0 = points[buffer[0]];
for (PxI32 dim = 0; dim < 3; dim++)
{
PxReal d = ds[dim];
bool b1 = d0 <= threshold && d >= threshold;
bool b2 = d <= threshold && d0 >= threshold;
if (b1 || b2)
{
bool flip = flipTriangleOrientation == b1;
bool skip = false;
for (PxI32 ii = 0; ii < 3; ++ii)
{
f = cellToPoint.find(xId + offsets[dim][ii][0], yId + offsets[dim][ii][1], zId + offsets[dim][ii][2]);
if (f)
buffer[ii + 1] = f->second;
else
skip = true;
}
if (skip)
continue;
PxI32 shift = PxMax(0, indexOfMostConcaveCorner(dim, v0, points[buffer[1]], points[buffer[2]], points[buffer[3]])) % 2;
//Split the quad into two triangles
for (PxI32 ii = 0; ii < 2; ++ii)
{
triangleIndices.pushBack(buffer[shift]);
if (flip)
{
for (PxI32 jj = 2; jj >= 1; --jj)
triangleIndices.pushBack(buffer[(ii + jj + shift) % 4]);
}
else
{
for (PxI32 jj = 1; jj < 3; ++jj)
triangleIndices.pushBack(buffer[(ii + jj + shift) % 4]);
}
}
}
}
}
PX_INLINE void populateSubgridCache(const Gu::SDF& sdf, PxArray<PxReal>& sdfCache, PxI32 i, PxI32 j, PxI32 k)
{
const PxU32 s = sdf.mSubgridSize + 1;
for (PxU32 z = 0; z <= sdf.mSubgridSize; ++z)
for (PxU32 y = 0; y <= sdf.mSubgridSize; ++y)
for (PxU32 x = 0; x <= sdf.mSubgridSize; ++x)
{
sdfCache[idx3D(x, y, z, s, s)] =
decodeSparse2(sdf, i * PxI32(sdf.mSubgridSize) + PxI32(x),
j * PxI32(sdf.mSubgridSize) + PxI32(y),
k * PxI32(sdf.mSubgridSize) + PxI32(z));
}
}
struct IsosurfaceThreadData
{
const Gu::SDF& sdf;
PxArray<PxVec3> isosurfaceVertices;
const PxArray<PxVec3>& allIsosurfaceVertices;
PxArray<PxU32> isosurfaceTriangleIndices;
PxArray<PxReal> sdfCache;
CellToPoint& cellToPoint;
PxU32 startIndex;
PxU32 endIndex;
PxU32 threadIndex;
PxI32 nbX;
PxI32 nbY;
PxI32 nbZ;
IsosurfaceThreadData(const Gu::SDF& sdf_, CellToPoint& cellToPoint_, const PxArray<PxVec3>& allIsosurfaceVertices_) :
sdf(sdf_), allIsosurfaceVertices(allIsosurfaceVertices_), cellToPoint(cellToPoint_)
{ }
};
void* computeIsosurfaceVerticesThreadJob(void* data)
{
IsosurfaceThreadData & d = *reinterpret_cast<IsosurfaceThreadData*>(data);
for (PxU32 indexer = d.startIndex; indexer < d.endIndex; ++indexer)
{
PxU32 ii, jj, kk;
idToXYZ(indexer, d.nbX, d.nbY, ii, jj, kk);
PxI32 i = PxI32(ii) - 1;
PxI32 j = PxI32(jj) - 1;
PxI32 k = PxI32(kk) - 1;
if (canSkipSubgrid(d.sdf, i, j, k))
continue;
populateSubgridCache(d.sdf, d.sdfCache, i, j, k);
//Process the subgrid
for (PxU32 z = 0; z < d.sdf.mSubgridSize; ++z)
{
for (PxU32 y = 0; y < d.sdf.mSubgridSize; ++y)
{
for (PxU32 x = 0; x < d.sdf.mSubgridSize; ++x)
{
PxVec3 p;
if (generatePointInCellUsingCache(d.sdf, i, j, k, x, y, z, p, d.sdfCache))
{
PxU32 xId = i * d.sdf.mSubgridSize + x;
PxU32 yId = j * d.sdf.mSubgridSize + y;
PxU32 zId = k * d.sdf.mSubgridSize + z;
d.cellToPoint.insert(d.threadIndex, xId, yId, zId, d.isosurfaceVertices.size());
d.isosurfaceVertices.pushBack(p);
}
}
}
}
}
return NULL;
}
void* computeIsosurfaceTrianglesThreadJob(void* data)
{
IsosurfaceThreadData & d = *reinterpret_cast<IsosurfaceThreadData*>(data);
const PxU32 s = d.sdf.mSubgridSize + 1;
for (PxU32 indexer = d.startIndex; indexer < d.endIndex; ++indexer)
{
PxU32 ii, jj, kk;
idToXYZ(indexer, d.nbX, d.nbY, ii, jj, kk);
PxI32 i = PxI32(ii) - 1;
PxI32 j = PxI32(jj) - 1;
PxI32 k = PxI32(kk) - 1;
if (canSkipSubgrid(d.sdf, i, j, k))
continue;
populateSubgridCache(d.sdf, d.sdfCache, i, j, k);
PxReal ds[3];
//Process the subgrid
for (PxU32 z = 0; z < d.sdf.mSubgridSize; ++z)
{
for (PxU32 y = 0; y < d.sdf.mSubgridSize; ++y)
{
for (PxU32 x = 0; x < d.sdf.mSubgridSize; ++x)
{
PxReal d0 = d.sdfCache[idx3D(x, y, z, s, s)];
ds[0] = d.sdfCache[idx3D(x + 1, y, z, s, s)];
ds[1] = d.sdfCache[idx3D(x, y + 1, z, s, s)];
ds[2] = d.sdfCache[idx3D(x, y, z + 1, s, s)];
createTriangles(x + i * d.sdf.mSubgridSize, y + j * d.sdf.mSubgridSize, z + k * d.sdf.mSubgridSize, d0, ds,
d.cellToPoint, d.allIsosurfaceVertices, d.isosurfaceTriangleIndices);
}
}
}
}
return NULL;
}
void extractIsosurfaceFromSDFSerial(const Gu::SDF& sdf, PxArray<PxVec3>& isosurfaceVertices, PxArray<PxU32>& isosurfaceTriangleIndices)
{
isosurfaceVertices.clear();
isosurfaceTriangleIndices.clear();
const PxI32 nbX = sdf.mDims.x / PxMax(1u, sdf.mSubgridSize);
const PxI32 nbY = sdf.mDims.y / PxMax(1u, sdf.mSubgridSize);
const PxI32 nbZ = sdf.mDims.z / PxMax(1u, sdf.mSubgridSize);
PxU32 sizeEstimate = PxU32(PxSqrt(PxReal(nbX*nbY * nbZ)));
CellToPoint cellToPoint(1);
isosurfaceVertices.reserve(sizeEstimate);
isosurfaceTriangleIndices.reserve(sizeEstimate);
PxArray<PxReal> sdfCache;
sdfCache.resize((sdf.mSubgridSize + 1) * (sdf.mSubgridSize + 1) * (sdf.mSubgridSize + 1));
if (sdf.mSubgridSize == 0)
{
//Dense SDF
for (PxI32 k = -1; k <= nbZ; ++k)
for (PxI32 j = -1; j <= nbY; ++j)
for (PxI32 i = -1; i <= nbX; ++i)
{
PxVec3 p;
if (generatePointInCellDense(sdf, i, j, k, p))
{
cellToPoint.insert(0, i, j, k, isosurfaceVertices.size());
isosurfaceVertices.pushBack(p);
}
}
}
else
{
for (PxI32 k = -1; k <= nbZ; ++k)
{
for (PxI32 j = -1; j <= nbY; ++j)
{
for (PxI32 i = -1; i <= nbX; ++i)
{
if (canSkipSubgrid(sdf, i, j, k))
continue;
populateSubgridCache(sdf, sdfCache, i, j, k);
//Process the subgrid
for (PxU32 z = 0; z < sdf.mSubgridSize; ++z)
{
for (PxU32 y = 0; y < sdf.mSubgridSize; ++y)
{
for (PxU32 x = 0; x < sdf.mSubgridSize; ++x)
{
PxVec3 p;
if (generatePointInCellUsingCache(sdf, i, j, k, x, y, z, p, sdfCache))
{
PxU32 xId = i * sdf.mSubgridSize + x;
PxU32 yId = j * sdf.mSubgridSize + y;
PxU32 zId = k * sdf.mSubgridSize + z;
cellToPoint.insert(0, xId, yId, zId, isosurfaceVertices.size());
isosurfaceVertices.pushBack(p);
}
}
}
}
}
}
}
}
if (sdf.mSubgridSize == 0)
{
for (PxI32 k = -1; k <= nbZ; ++k)
for (PxI32 j = -1; j <= nbY; ++j)
for (PxI32 i = -1; i <= nbX; ++i)
{
PxReal d0 = sdf.decodeDense(i, j, k);
PxReal ds[3];
ds[0] = sdf.decodeDense(i + 1, j, k);
ds[1] = sdf.decodeDense(i, j + 1, k);
ds[2] = sdf.decodeDense(i, j, k + 1);
createTriangles(i, j, k, d0, ds, cellToPoint, isosurfaceVertices, isosurfaceTriangleIndices);
}
}
else
{
const PxU32 s = sdf.mSubgridSize + 1;
for (PxI32 k = -1; k <= nbZ; ++k)
{
for (PxI32 j = -1; j <= nbY; ++j)
{
for (PxI32 i = -1; i <= nbX; ++i)
{
if (canSkipSubgrid(sdf, i, j, k))
continue;
populateSubgridCache(sdf, sdfCache, i, j, k);
PxReal ds[3];
//Process the subgrid
for (PxU32 z = 0; z < sdf.mSubgridSize; ++z)
{
for (PxU32 y = 0; y < sdf.mSubgridSize; ++y)
{
for (PxU32 x = 0; x < sdf.mSubgridSize; ++x)
{
PxReal d0 = sdfCache[idx3D(x, y, z, s, s)];
ds[0] = sdfCache[idx3D(x + 1, y, z, s, s)];
ds[1] = sdfCache[idx3D(x, y + 1, z, s, s)];
ds[2] = sdfCache[idx3D(x, y, z + 1, s, s)];
createTriangles(x + i * sdf.mSubgridSize, y + j * sdf.mSubgridSize, z + k * sdf.mSubgridSize, d0, ds,
cellToPoint, isosurfaceVertices, isosurfaceTriangleIndices);
}
}
}
}
}
}
}
}
void extractIsosurfaceFromSDF(const Gu::SDF& sdf, PxArray<PxVec3>& isosurfaceVertices, PxArray<PxU32>& isosurfaceTriangleIndices, PxU32 numThreads)
{
if (sdf.mSubgridSize == 0)
{
//Handle dense SDFs using the serial fallback
extractIsosurfaceFromSDFSerial(sdf, isosurfaceVertices, isosurfaceTriangleIndices);
return;
}
numThreads = PxMax(1u, numThreads);
PxArray<PxThread*> threads;
PxArray<IsosurfaceThreadData> perThreadData;
CellToPoint cellToPoint(numThreads);
const PxI32 nbX = sdf.mDims.x / PxMax(1u, sdf.mSubgridSize);
const PxI32 nbY = sdf.mDims.y / PxMax(1u, sdf.mSubgridSize);
const PxI32 nbZ = sdf.mDims.z / PxMax(1u, sdf.mSubgridSize);
PxU32 l = (nbX + 2) * (nbY + 2) * (nbZ + 2);
PxU32 range = l / numThreads;
for (PxU32 i = 0; i < numThreads; ++i)
{
perThreadData.pushBack(IsosurfaceThreadData(sdf, cellToPoint, isosurfaceVertices));
IsosurfaceThreadData& d = perThreadData[i];
d.startIndex = i * range;
d.endIndex = (i + 1) * range;
if (i == numThreads - 1)
d.endIndex = l;
d.nbX = nbX + 2;
d.nbY = nbY + 2;
d.nbZ = nbZ + 2;
d.sdfCache.resize((sdf.mSubgridSize + 1) * (sdf.mSubgridSize + 1) * (sdf.mSubgridSize + 1));
d.threadIndex = i;
}
for (PxU32 i = 0; i < numThreads; ++i)
{
if (perThreadData.size() == 1)
computeIsosurfaceVerticesThreadJob(&perThreadData[i]);
else
{
threads.pushBack(PX_NEW(PxThread)(computeIsosurfaceVerticesThreadJob, &perThreadData[i], "thread"));
threads[i]->start();
}
}
for (PxU32 i = 0; i < threads.size(); ++i)
{
threads[i]->waitForQuit();
}
for (PxU32 i = 0; i < threads.size(); ++i)
{
threads[i]->~PxThreadT();
PX_FREE(threads[i]);
}
//Collect vertices
PxU32 sum = 0;
for (PxU32 i = 0; i < perThreadData.size(); ++i)
{
IsosurfaceThreadData& d = perThreadData[i];
if (sum > 0)
{
for (PxHashMap<PxU64, PxU32>::Iterator iter = cellToPoint.cellToPoint[i]->getIterator(); !iter.done(); ++iter)
iter->second += sum;
}
sum += d.isosurfaceVertices.size();
}
isosurfaceVertices.reserve(sum);
for (PxU32 i = 0; i < perThreadData.size(); ++i)
{
IsosurfaceThreadData& d = perThreadData[i];
for (PxU32 j = 0; j < d.isosurfaceVertices.size(); ++j)
isosurfaceVertices.pushBack(d.isosurfaceVertices[j]);
d.isosurfaceTriangleIndices.reset(); //Release memory that is not needed anymore
}
threads.clear();
for (PxU32 i = 0; i < numThreads; ++i)
{
if (perThreadData.size() == 1)
computeIsosurfaceTrianglesThreadJob(&perThreadData[i]);
else
{
threads.pushBack(PX_NEW(PxThread)(computeIsosurfaceTrianglesThreadJob, &perThreadData[i], "thread"));
threads[i]->start();
}
}
for (PxU32 i = 0; i < threads.size(); ++i)
{
threads[i]->waitForQuit();
}
for (PxU32 i = 0; i < threads.size(); ++i)
{
threads[i]->~PxThreadT();
PX_FREE(threads[i]);
}
//Collect triangles
sum = 0;
for (PxU32 i = 0; i < perThreadData.size(); ++i)
sum += perThreadData[i].isosurfaceTriangleIndices.size();
isosurfaceTriangleIndices.resize(sum);
for (PxU32 i = 0; i < perThreadData.size(); ++i)
{
IsosurfaceThreadData& d = perThreadData[i];
for (PxU32 j = 0; j < d.isosurfaceTriangleIndices.size(); ++j)
isosurfaceTriangleIndices.pushBack(d.isosurfaceTriangleIndices[j]);
}
}
}
}
| 68,852 | C++ | 29.931267 | 332 | 0.639008 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuExtendedBucketPruner.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxBitMap.h"
#include "GuExtendedBucketPruner.h"
#include "GuAABBTree.h"
#include "GuAABBTreeQuery.h"
#include "GuQuery.h"
#include "GuCallbackAdapter.h"
#include "GuSqInternal.h"
using namespace physx;
using namespace Gu;
#define EXT_NB_OBJECTS_PER_NODE 4
// PT: TODO: this is copied from SqBounds.h, should be either moved to Gu and shared or passed as a user parameter
#define SQ_PRUNER_EPSILON 0.005f
#define SQ_PRUNER_INFLATION (1.0f + SQ_PRUNER_EPSILON) // pruner test shape inflation (not narrow phase shape)
ExtendedBucketPruner::ExtendedBucketPruner(PxU64 contextID, CompanionPrunerType type, const PruningPool* pool) :
mCompanion (createCompanionPruner(contextID, type, pool)),
mPruningPool (pool),
mMainTree (NULL),
mMergedTrees (NULL),
mCurrentTreeIndex (0),
mTreesDirty (false)
{
// preallocated size for bounds, trees
mCurrentTreeCapacity = 32;
mBounds.init(mCurrentTreeCapacity);
mMergedTrees = PX_ALLOCATE(MergedTree, mCurrentTreeCapacity, "AABB trees");
mExtendedBucketPrunerMap.reserve(mCurrentTreeCapacity);
// create empty main tree
mMainTree = PX_NEW(AABBTree);
// create empty merge trees
for (PxU32 i = 0; i < mCurrentTreeCapacity; i++)
{
mMergedTrees[i].mTimeStamp = 0;
mMergedTrees[i].mTree = PX_NEW(AABBTree);
}
}
//////////////////////////////////////////////////////////////////////////
ExtendedBucketPruner::~ExtendedBucketPruner()
{
// release main tree
PX_DELETE(mMainTree);
// release merged trees
for (PxU32 i = 0; i < mCurrentTreeCapacity; i++)
{
AABBTree* aabbTree = mMergedTrees[i].mTree;
PX_DELETE(aabbTree);
}
mBounds.release();
PX_FREE(mMergedTrees);
PX_DELETE(mCompanion);
}
//////////////////////////////////////////////////////////////////////////
// release all objects in bucket pruner
void ExtendedBucketPruner::release()
{
if(mCompanion)
mCompanion->release();
mMainTreeUpdateMap.release();
mMergeTreeUpdateMap.release();
// release all objecs from the map
mExtendedBucketPrunerMap.clear();
// release all merged trees
for (PxU32 i = 0; i < mCurrentTreeCapacity; i++)
{
mMergedTrees[i].mTimeStamp = 0;
mMergedTrees[i].mTree->release();
}
// reset current tree index
mCurrentTreeIndex = 0;
}
//////////////////////////////////////////////////////////////////////////
// Add a tree from a pruning structure
// 1. get new tree index
// 2. initialize merged tree, bounds
// 3. create update map for the merged tree
// 4. build new tree of trees from given trees bounds
// 5. add new objects into extended bucket pruner map
// 6. shift indices in the merged tree
void ExtendedBucketPruner::addTree(const AABBTreeMergeData& mergeData, PxU32 timeStamp)
{
// check if we have to resize
if(mCurrentTreeIndex == mCurrentTreeCapacity)
{
resize(mCurrentTreeCapacity*2);
}
// get current merge tree index
const PxU32 mergeTreeIndex = mCurrentTreeIndex++;
// get payloads/userdata pointers - the pointers start at mIndicesOffset, thats where all
// objects were added before merge was called
const PrunerPayload* data = &mPruningPool->getObjects()[mergeData.mIndicesOffset];
// setup merged tree with the merge data and timestamp
mMergedTrees[mergeTreeIndex].mTimeStamp = timeStamp;
AABBTree& mergedTree = *mMergedTrees[mergeTreeIndex].mTree;
mergedTree.initTree(mergeData);
// set bounds
mBounds.getBounds()[mergeTreeIndex] = mergeData.getRootNode().mBV;
// update temporally update map for the current merge tree, map is used to setup the base extended bucket pruner map
mMergeTreeUpdateMap.initMap(mergeData.mNbIndices, mergedTree);
// create new base tree of trees
buildMainAABBTree();
// Add each object into extended bucket pruner hash map
for (PxU32 i = 0; i < mergeData.mNbIndices; i++)
{
ExtendedBucketPrunerData mapData;
mapData.mMergeIndex = mergeTreeIndex;
mapData.mTimeStamp = timeStamp;
PX_ASSERT(mMergeTreeUpdateMap[i] < mergedTree.getNbNodes());
// get node information from the merge tree update map
mapData.mSubTreeNode = mMergeTreeUpdateMap[i];
mExtendedBucketPrunerMap.insert(data[i], mapData);
}
// merged tree indices needs to be shifted now, we cannot shift it in init - the update map
// could not be constructed otherwise, as the indices wont start from 0. The indices
// needs to be shifted by offset from the pruning pool, where the new objects were added into the pruning pool.
mergedTree.shiftIndices(mergeData.mIndicesOffset);
#if PX_DEBUG
checkValidity();
#endif // PX_DEBUG
}
//////////////////////////////////////////////////////////////////////////
// Builds the new main AABB tree with given current active merged trees and its bounds
void ExtendedBucketPruner::buildMainAABBTree()
{
// create the AABB tree from given merged trees bounds
NodeAllocator nodeAllocator;
bool status = mMainTree->build(AABBTreeBuildParams(EXT_NB_OBJECTS_PER_NODE, mCurrentTreeIndex, &mBounds), nodeAllocator);
PX_UNUSED(status);
PX_ASSERT(status);
// Init main tree update map for the new main tree
mMainTreeUpdateMap.initMap(mCurrentTreeIndex, *mMainTree);
}
//////////////////////////////////////////////////////////////////////////
// resize internal memory, buffers
void ExtendedBucketPruner::resize(PxU32 size)
{
PX_ASSERT(size > mCurrentTreeCapacity);
mBounds.resize(size, mCurrentTreeCapacity);
// allocate new merged trees
MergedTree* newMergeTrees = PX_ALLOCATE(MergedTree, size, "AABB trees");
// copy previous merged trees
PxMemCopy(newMergeTrees, mMergedTrees, sizeof(MergedTree)*mCurrentTreeCapacity);
PX_FREE(mMergedTrees);
mMergedTrees = newMergeTrees;
// allocate new trees for merged trees
for (PxU32 i = mCurrentTreeCapacity; i < size; i++)
{
mMergedTrees[i].mTimeStamp = 0;
mMergedTrees[i].mTree = PX_NEW(AABBTree);
}
mCurrentTreeCapacity = size;
}
//////////////////////////////////////////////////////////////////////////
// Update object
bool ExtendedBucketPruner::updateObject(const PxBounds3& worldAABB, const PxTransform& transform, const PrunerPayload& object, PrunerHandle handle, const PoolIndex poolIndex)
{
const ExtendedBucketPrunerMap::Entry* extendedPrunerEntry = mExtendedBucketPrunerMap.find(object);
// if object is not in tree of trees, it is in bucket pruner core
if(!extendedPrunerEntry)
{
if(mCompanion)
mCompanion->updateObject(object, handle, worldAABB, transform, poolIndex);
}
else
{
const ExtendedBucketPrunerData& data = extendedPrunerEntry->second;
PX_ASSERT(data.mMergeIndex < mCurrentTreeIndex);
// update tree where objects belongs to
AABBTree& tree = *mMergedTrees[data.mMergeIndex].mTree;
PX_ASSERT(data.mSubTreeNode < tree.getNbNodes());
// mark for refit node in merged tree
tree.markNodeForRefit(data.mSubTreeNode);
PX_ASSERT(mMainTreeUpdateMap[data.mMergeIndex] < mMainTree->getNbNodes());
// mark for refit node in main aabb tree
mMainTree->markNodeForRefit(mMainTreeUpdateMap[data.mMergeIndex]);
mTreesDirty = true;
}
return true;
}
//////////////////////////////////////////////////////////////////////////
// refit merged nodes
// 1. refit nodes in merged trees
// 2. check if after refit root node is valid - might happen edge case
// where all objects were released - the root node is then invalid
// in this edge case we need to compact the merged trees array
// and create new main AABB tree
// 3. If all merged trees bounds are valid - refit main tree
// 4. If bounds are invalid create new main AABB tree
void ExtendedBucketPruner::refitMarkedNodes(const PxBounds3* boxes)
{
// if no tree needs update early exit
if(!mTreesDirty)
return;
// refit trees and update bounds for main tree
PxU32 nbValidTrees = 0;
for (PxU32 i = mCurrentTreeIndex; i--; )
{
AABBTree& tree = *mMergedTrees[i].mTree;
tree.refitMarkedNodes(boxes);
const PxBounds3& bounds = tree.getNodes()[0].mBV;
// check if bounds are valid, if all objects of the tree were released, the bounds
// will be invalid, in that case we cannot use this tree anymore.
if(bounds.isValid())
{
nbValidTrees++;
}
mBounds.getBounds()[i] = bounds;
}
if(nbValidTrees == mCurrentTreeIndex)
{
// no tree has been removed refit main tree
mMainTree->refitMarkedNodes(mBounds.getBounds());
}
else
{
// edge case path, tree does not have a valid root node bounds - all objects from the tree were released
// we might even fire perf warning
// compact the tree array - no holes in the array, remember the swap position
PxU32* swapMap = PX_ALLOCATE(PxU32, (mCurrentTreeIndex + 1), "Swap Map");
PxU32 writeIndex = 0;
for (PxU32 i = 0; i < mCurrentTreeIndex; i++)
{
AABBTree& tree = *mMergedTrees[i].mTree;
if(tree.getNodes()[0].mBV.isValid())
{
// we have to store the tree into an empty location
if(i != writeIndex)
{
PX_ASSERT(writeIndex < i);
AABBTree* ptr = mMergedTrees[writeIndex].mTree;
mMergedTrees[writeIndex] = mMergedTrees[i];
mMergedTrees[i].mTree = ptr;
mBounds.getBounds()[writeIndex] = mBounds.getBounds()[i];
}
// remember the swap location
swapMap[i] = writeIndex;
writeIndex++;
}
else
{
// tree is not valid, release it
tree.release();
mMergedTrees[i].mTimeStamp = 0;
}
// remember the swap
swapMap[mCurrentTreeIndex] = i;
}
PX_ASSERT(writeIndex == nbValidTrees);
// new merged trees size
mCurrentTreeIndex = nbValidTrees;
if(mCurrentTreeIndex)
{
// trees have changed, we need to rebuild the main tree
buildMainAABBTree();
// fixup the object entries, the merge index has changed
for (ExtendedBucketPrunerMap::Iterator iter = mExtendedBucketPrunerMap.getIterator(); !iter.done(); ++iter)
{
ExtendedBucketPrunerData& data = iter->second;
PX_ASSERT(swapMap[data.mMergeIndex] < nbValidTrees);
data.mMergeIndex = swapMap[data.mMergeIndex];
}
}
else
{
// if there is no tree release the main tree
mMainTree->release();
}
PX_FREE(swapMap);
}
#if PX_DEBUG
checkValidity();
#endif
mTreesDirty = false;
}
//////////////////////////////////////////////////////////////////////////
// remove object
bool ExtendedBucketPruner::removeObject(const PrunerPayload& object, PrunerHandle handle, PxU32 objectIndex, const PrunerPayload& swapObject, PxU32 swapObjectIndex)
{
ExtendedBucketPrunerMap::Entry dataEntry;
// if object is not in tree of trees, it is in bucket pruner core
if (!mExtendedBucketPrunerMap.erase(object, dataEntry))
{
// we need to call invalidateObjects, it might happen that the swapped object
// does belong to the extended bucket pruner, in that case the objects index
// needs to be swapped.
// do not call additional bucket pruner swap, that does happen during remove
swapIndex(objectIndex, swapObject, swapObjectIndex, false);
return mCompanion ? mCompanion->removeObject(object, handle, objectIndex, swapObjectIndex) : true;
}
else
{
const ExtendedBucketPrunerData& data = dataEntry.second;
// mark tree nodes where objects belongs to
AABBTree& tree = *mMergedTrees[data.mMergeIndex].mTree;
PX_ASSERT(data.mSubTreeNode < tree.getNbNodes());
// mark the merged tree for refit
tree.markNodeForRefit(data.mSubTreeNode);
PX_ASSERT(mMainTreeUpdateMap[data.mMergeIndex] < mMainTree->getNbNodes());
// mark the main tree for refit
mMainTree->markNodeForRefit(mMainTreeUpdateMap[data.mMergeIndex]);
// call invalidate object to swap the object indices in the merged trees
invalidateObject(data, objectIndex, swapObject, swapObjectIndex);
mTreesDirty = true;
}
#if PX_DEBUG
checkValidity();
#endif // PX_DEBUG
return true;
}
//////////////////////////////////////////////////////////////////////////
// invalidate object
// remove the objectIndex from the merged tree
void ExtendedBucketPruner::invalidateObject(const ExtendedBucketPrunerData& data, PxU32 objectIndex, const PrunerPayload& swapObject, PxU32 swapObjectIndex)
{
// get the merged tree
AABBTree& tree = *mMergedTrees[data.mMergeIndex].mTree;
PX_ASSERT(data.mSubTreeNode < tree.getNbNodes());
PX_ASSERT(tree.getNodes()[data.mSubTreeNode].isLeaf());
// get merged tree node
BVHNode& node0 = tree.getNodes()[data.mSubTreeNode];
const PxU32 nbPrims = node0.getNbRuntimePrimitives();
PX_ASSERT(nbPrims <= EXT_NB_OBJECTS_PER_NODE);
// retrieve the primitives pointer
PxU32* primitives = node0.getPrimitives(tree.getIndices());
PX_ASSERT(primitives);
// Look for desired pool index in the leaf
bool foundIt = false;
for (PxU32 i = 0; i < nbPrims; i++)
{
if (objectIndex == primitives[i])
{
foundIt = true;
const PxU32 last = nbPrims - 1;
node0.setNbRunTimePrimitives(last);
primitives[i] = INVALID_POOL_ID; // Mark primitive index as invalid in the node
// Swap within the leaf node. No need to update the mapping since they should all point
// to the same tree node anyway.
if (last != i)
PxSwap(primitives[i], primitives[last]);
break;
}
}
PX_ASSERT(foundIt);
PX_UNUSED(foundIt);
swapIndex(objectIndex, swapObject, swapObjectIndex);
}
// Swap object index
// if swapObject is in a merged tree its index needs to be swapped with objectIndex
void ExtendedBucketPruner::swapIndex(PxU32 objectIndex, const PrunerPayload& swapObject, PxU32 swapObjectIndex, bool corePrunerIncluded)
{
PX_UNUSED(corePrunerIncluded);
if (objectIndex == swapObjectIndex)
return;
const ExtendedBucketPrunerMap::Entry* extendedPrunerSwapEntry = mExtendedBucketPrunerMap.find(swapObject);
// if swapped object index is in extended pruner, we have to fix the primitives index
if (extendedPrunerSwapEntry)
{
const ExtendedBucketPrunerData& swapData = extendedPrunerSwapEntry->second;
AABBTree& swapTree = *mMergedTrees[swapData.mMergeIndex].mTree;
// With multiple primitives per leaf, tree nodes may very well be the same for different pool indices.
// However the pool indices may be the same when a swap has been skipped in the pruning pool, in which
// case there is nothing to do.
PX_ASSERT(swapData.mSubTreeNode < swapTree.getNbNodes());
PX_ASSERT(swapTree.getNodes()[swapData.mSubTreeNode].isLeaf());
BVHNode* node1 = swapTree.getNodes() + swapData.mSubTreeNode;
const PxU32 nbPrims = node1->getNbRuntimePrimitives();
PX_ASSERT(nbPrims <= EXT_NB_OBJECTS_PER_NODE);
// retrieve the primitives pointer
PxU32* primitives = node1->getPrimitives(swapTree.getIndices());
PX_ASSERT(primitives);
// look for desired pool index in the leaf
bool foundIt = false;
for (PxU32 i = 0; i < nbPrims; i++)
{
if (swapObjectIndex == primitives[i])
{
foundIt = true;
primitives[i] = objectIndex; // point node to the pool object moved to
break;
}
}
PX_ASSERT(foundIt);
PX_UNUSED(foundIt);
}
else
{
if(corePrunerIncluded)
if(mCompanion)
mCompanion->swapIndex(objectIndex, swapObjectIndex);
}
}
//////////////////////////////////////////////////////////////////////////
// Optimized removal of timestamped objects from the extended bucket pruner
PxU32 ExtendedBucketPruner::removeMarkedObjects(PxU32 timeStamp)
{
// remove objects from the core bucket pruner
PxU32 retVal = mCompanion ? mCompanion->removeMarkedObjects(timeStamp) : 0;
// nothing to be removed
if(!mCurrentTreeIndex)
return retVal;
// if last merged tree is the timeStamp to remove, we can clear all
// this is safe as the merged trees array is time ordered, never shifted
if(mMergedTrees[mCurrentTreeIndex - 1].mTimeStamp == timeStamp)
{
retVal += mExtendedBucketPrunerMap.size();
cleanTrees();
return retVal;
}
// get the highest index in the merged trees array, where timeStamp match
// we release than all trees till the index
PxU32 highestTreeIndex = 0xFFFFFFFF;
for (PxU32 i = 0; i < mCurrentTreeIndex; i++)
{
if(mMergedTrees[i].mTimeStamp == timeStamp)
highestTreeIndex = i;
else
break;
}
// if no timestamp found early exit
if(highestTreeIndex == 0xFFFFFFFF)
return retVal;
PX_ASSERT(highestTreeIndex < mCurrentTreeIndex);
// get offset, where valid trees start
const PxU32 mergeTreeOffset = highestTreeIndex + 1;
// shrink the array to merged trees with a valid timeStamp
mCurrentTreeIndex = mCurrentTreeIndex - mergeTreeOffset;
// go over trees and swap released trees with valid trees from the back (valid trees are at the back)
for (PxU32 i = 0; i < mCurrentTreeIndex; i++)
{
// store bounds, timestamp
mBounds.getBounds()[i] = mMergedTrees[mergeTreeOffset + i].mTree->getNodes()[0].mBV;
mMergedTrees[i].mTimeStamp = mMergedTrees[mergeTreeOffset + i].mTimeStamp;
// release the tree with timestamp
AABBTree* ptr = mMergedTrees[i].mTree;
ptr->release();
// store the valid tree
mMergedTrees[i].mTree = mMergedTrees[mergeTreeOffset + i].mTree;
// store the release tree at the offset
mMergedTrees[mergeTreeOffset + i].mTree = ptr;
mMergedTrees[mergeTreeOffset + i].mTimeStamp = 0;
}
// release the rest of the trees with not valid timestamp
for (PxU32 i = mCurrentTreeIndex; i <= highestTreeIndex; i++)
{
mMergedTrees[i].mTree->release();
mMergedTrees[i].mTimeStamp = 0;
}
// build new main AABB tree with only trees with valid valid timeStamp
buildMainAABBTree();
// remove all unnecessary trees and map entries
bool removeEntry = false;
PxU32 numRemovedEntries = 0;
ExtendedBucketPrunerMap::EraseIterator eraseIterator = mExtendedBucketPrunerMap.getEraseIterator();
ExtendedBucketPrunerMap::Entry* entry = eraseIterator.eraseCurrentGetNext(removeEntry);
while (entry)
{
ExtendedBucketPrunerData& data = entry->second;
// data to be removed
if (data.mTimeStamp == timeStamp)
{
removeEntry = true;
numRemovedEntries++;
}
else
{
// update the merge index and main tree node index
PX_ASSERT(highestTreeIndex < data.mMergeIndex);
data.mMergeIndex -= mergeTreeOffset;
removeEntry = false;
}
entry = eraseIterator.eraseCurrentGetNext(removeEntry);
}
#if PX_DEBUG
checkValidity();
#endif // PX_DEBUG
// return the number of removed objects
return retVal + numRemovedEntries;
}
//////////////////////////////////////////////////////////////////////////
// clean all trees, all objects have been released
void ExtendedBucketPruner::cleanTrees()
{
for (PxU32 i = 0; i < mCurrentTreeIndex; i++)
{
mMergedTrees[i].mTree->release();
mMergedTrees[i].mTimeStamp = 0;
}
mExtendedBucketPrunerMap.clear();
mCurrentTreeIndex = 0;
mMainTree->release();
}
//////////////////////////////////////////////////////////////////////////
// shift origin
void ExtendedBucketPruner::shiftOrigin(const PxVec3& shift)
{
mMainTree->shiftOrigin(shift);
for(PxU32 i=0; i<mCurrentTreeIndex; i++)
mMergedTrees[i].mTree->shiftOrigin(shift);
if(mCompanion)
mCompanion->shiftOrigin(shift);
}
//////////////////////////////////////////////////////////////////////////
// Queries implementation
//////////////////////////////////////////////////////////////////////////
// Raycast/sweeps callback for main AABB tree
template<const bool tInflate>
struct MainTreeRaycastPrunerCallback
{
MainTreeRaycastPrunerCallback(const PxVec3& origin, const PxVec3& unitDir, const PxVec3& extent, PrunerRaycastCallback& prunerCallback, const PruningPool* pool, const MergedTree* mergedTrees)
: mOrigin(origin), mUnitDir(unitDir), mExtent(extent), mPrunerCallback(prunerCallback), mPruningPool(pool), mMergedTrees(mergedTrees)
{
}
bool invoke(PxReal& distance, PxU32 primIndex)
{
const AABBTree* aabbTree = mMergedTrees[primIndex].mTree;
// raycast the merged tree
RaycastCallbackAdapter pcb(mPrunerCallback, *mPruningPool);
return AABBTreeRaycast<tInflate, true, AABBTree, BVHNode, RaycastCallbackAdapter>()(mPruningPool->getCurrentAABBTreeBounds(), *aabbTree, mOrigin, mUnitDir, distance, mExtent, pcb);
}
PX_NOCOPY(MainTreeRaycastPrunerCallback)
private:
const PxVec3& mOrigin;
const PxVec3& mUnitDir;
const PxVec3& mExtent;
PrunerRaycastCallback& mPrunerCallback;
const PruningPool* mPruningPool;
const MergedTree* mMergedTrees;
};
//////////////////////////////////////////////////////////////////////////
// raycast against the extended bucket pruner
bool ExtendedBucketPruner::raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& prunerCallback) const
{
bool again = mCompanion ? mCompanion->raycast(origin, unitDir, inOutDistance, prunerCallback) : true;
if(again && mExtendedBucketPrunerMap.size())
{
const PxVec3 extent(0.0f);
// main tree callback
MainTreeRaycastPrunerCallback<false> pcb(origin, unitDir, extent, prunerCallback, mPruningPool, mMergedTrees);
// traverse the main tree
again = AABBTreeRaycast<false, true, AABBTree, BVHNode, MainTreeRaycastPrunerCallback<false>>()(mBounds, *mMainTree, origin, unitDir, inOutDistance, extent, pcb);
}
return again;
}
//////////////////////////////////////////////////////////////////////////
// overlap main tree callback
template<typename Test>
struct MainTreeOverlapPrunerCallback
{
MainTreeOverlapPrunerCallback(const Test& test, PrunerOverlapCallback& prunerCallback, const PruningPool* pool, const MergedTree* mergedTrees)
: mTest(test), mPrunerCallback(prunerCallback), mPruningPool(pool), mMergedTrees(mergedTrees)
{
}
bool invoke(PxU32 primIndex)
{
const AABBTree* aabbTree = mMergedTrees[primIndex].mTree;
// overlap the merged tree
OverlapCallbackAdapter pcb(mPrunerCallback, *mPruningPool);
return AABBTreeOverlap<true, Test, AABBTree, BVHNode, OverlapCallbackAdapter>()(mPruningPool->getCurrentAABBTreeBounds(), *aabbTree, mTest, pcb);
}
PX_NOCOPY(MainTreeOverlapPrunerCallback)
private:
const Test& mTest;
PrunerOverlapCallback& mPrunerCallback;
const PruningPool* mPruningPool;
const MergedTree* mMergedTrees;
};
//////////////////////////////////////////////////////////////////////////
// overlap implementation
bool ExtendedBucketPruner::overlap(const ShapeData& queryVolume, PrunerOverlapCallback& prunerCallback) const
{
bool again = mCompanion ? mCompanion->overlap(queryVolume, prunerCallback) : true;
if(again && mExtendedBucketPrunerMap.size())
{
switch (queryVolume.getType())
{
case PxGeometryType::eBOX:
{
if (queryVolume.isOBB())
{
const DefaultOBBAABBTest test(queryVolume);
MainTreeOverlapPrunerCallback<OBBAABBTest> pcb(test, prunerCallback, mPruningPool, mMergedTrees);
again = AABBTreeOverlap<true, OBBAABBTest, AABBTree, BVHNode, MainTreeOverlapPrunerCallback<OBBAABBTest>>()(mBounds, *mMainTree, test, pcb);
}
else
{
const DefaultAABBAABBTest test(queryVolume);
MainTreeOverlapPrunerCallback<AABBAABBTest> pcb(test, prunerCallback, mPruningPool, mMergedTrees);
again = AABBTreeOverlap<true, AABBAABBTest, AABBTree, BVHNode, MainTreeOverlapPrunerCallback<AABBAABBTest>>()(mBounds, *mMainTree, test, pcb);
}
}
break;
case PxGeometryType::eCAPSULE:
{
const DefaultCapsuleAABBTest test(queryVolume, SQ_PRUNER_INFLATION);
MainTreeOverlapPrunerCallback<CapsuleAABBTest> pcb(test, prunerCallback, mPruningPool, mMergedTrees);
again = AABBTreeOverlap<true, CapsuleAABBTest, AABBTree, BVHNode, MainTreeOverlapPrunerCallback<CapsuleAABBTest>>()(mBounds, *mMainTree, test, pcb);
}
break;
case PxGeometryType::eSPHERE:
{
const DefaultSphereAABBTest test(queryVolume);
MainTreeOverlapPrunerCallback<SphereAABBTest> pcb(test, prunerCallback, mPruningPool, mMergedTrees);
again = AABBTreeOverlap<true, SphereAABBTest, AABBTree, BVHNode, MainTreeOverlapPrunerCallback<SphereAABBTest>>()(mBounds, *mMainTree, test, pcb);
}
break;
case PxGeometryType::eCONVEXMESH:
{
const DefaultOBBAABBTest test(queryVolume);
MainTreeOverlapPrunerCallback<OBBAABBTest> pcb(test, prunerCallback, mPruningPool, mMergedTrees);
again = AABBTreeOverlap<true, OBBAABBTest, AABBTree, BVHNode, MainTreeOverlapPrunerCallback<OBBAABBTest>>()(mBounds, *mMainTree, test, pcb);
}
break;
default:
PX_ALWAYS_ASSERT_MESSAGE("unsupported overlap query volume geometry type");
}
}
return again;
}
//////////////////////////////////////////////////////////////////////////
// sweep implementation
bool ExtendedBucketPruner::sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& prunerCallback) const
{
bool again = mCompanion ? mCompanion->sweep(queryVolume, unitDir, inOutDistance, prunerCallback) : true;
if(again && mExtendedBucketPrunerMap.size())
{
const PxBounds3& aabb = queryVolume.getPrunerInflatedWorldAABB();
const PxVec3 extents = aabb.getExtents();
const PxVec3 center = aabb.getCenter();
MainTreeRaycastPrunerCallback<true> pcb(center, unitDir, extents, prunerCallback, mPruningPool, mMergedTrees);
again = AABBTreeRaycast<true, true, AABBTree, BVHNode, MainTreeRaycastPrunerCallback<true>>()(mBounds, *mMainTree, center, unitDir, inOutDistance, extents, pcb);
}
return again;
}
//////////////////////////////////////////////////////////////////////////
void ExtendedBucketPruner::getGlobalBounds(PxBounds3& bounds) const
{
if(mCompanion)
mCompanion->getGlobalBounds(bounds);
else
bounds.setEmpty();
if(mExtendedBucketPrunerMap.size() && mMainTree && mMainTree->getNodes())
bounds.include(mMainTree->getNodes()->mBV);
}
//////////////////////////////////////////////////////////////////////////
void ExtendedBucketPruner::visualize(PxRenderOutput& out, PxU32 color) const
{
visualizeTree(out, color, mMainTree);
for(PxU32 i=0; i<mCurrentTreeIndex; i++)
visualizeTree(out, color, mMergedTrees[i].mTree);
if(mCompanion)
mCompanion->visualize(out, color);
}
//////////////////////////////////////////////////////////////////////////
#if PX_DEBUG
// extended bucket pruner validity check
bool ExtendedBucketPruner::checkValidity()
{
PxBitMap testBitmap;
testBitmap.resizeAndClear(mCurrentTreeIndex);
for (PxU32 i = 0; i < mMainTree->getNbNodes(); i++)
{
const BVHNode& node = mMainTree->getNodes()[i];
if(node.isLeaf())
{
const PxU32 nbPrims = node.getNbRuntimePrimitives();
PX_ASSERT(nbPrims <= EXT_NB_OBJECTS_PER_NODE);
const PxU32* primitives = node.getPrimitives(mMainTree->getIndices());
for (PxU32 j = 0; j < nbPrims; j++)
{
const PxU32 index = primitives[j];
// check if index is correct
PX_ASSERT(index < mCurrentTreeIndex);
// mark the index in the test bitmap, must be once set only, all merged trees must be in the main tree
PX_ASSERT(testBitmap.test(index) == PxIntFalse);
testBitmap.set(index);
}
}
}
PxBitMap mergeTreeTestBitmap;
mergeTreeTestBitmap.resizeAndClear(mPruningPool->getNbActiveObjects());
for (PxU32 i = 0; i < mCurrentTreeIndex; i++)
{
// check if bounds are the same as the merged tree root bounds
PX_ASSERT(mBounds.getBounds()[i].maximum.x == mMergedTrees[i].mTree->getNodes()[0].mBV.maximum.x);
PX_ASSERT(mBounds.getBounds()[i].maximum.y == mMergedTrees[i].mTree->getNodes()[0].mBV.maximum.y);
PX_ASSERT(mBounds.getBounds()[i].maximum.z == mMergedTrees[i].mTree->getNodes()[0].mBV.maximum.z);
PX_ASSERT(mBounds.getBounds()[i].minimum.x == mMergedTrees[i].mTree->getNodes()[0].mBV.minimum.x);
PX_ASSERT(mBounds.getBounds()[i].minimum.y == mMergedTrees[i].mTree->getNodes()[0].mBV.minimum.y);
PX_ASSERT(mBounds.getBounds()[i].minimum.z == mMergedTrees[i].mTree->getNodes()[0].mBV.minimum.z);
// check each tree
const AABBTree& mergedTree = *mMergedTrees[i].mTree;
for (PxU32 j = 0; j < mergedTree.getNbNodes(); j++)
{
const BVHNode& node = mergedTree.getNodes()[j];
if (node.isLeaf())
{
const PxU32 nbPrims = node.getNbRuntimePrimitives();
PX_ASSERT(nbPrims <= EXT_NB_OBJECTS_PER_NODE);
const PxU32* primitives = node.getPrimitives(mergedTree.getIndices());
for (PxU32 k = 0; k < nbPrims; k++)
{
const PxU32 index = primitives[k];
// check if index is correct
PX_ASSERT(index < mPruningPool->getNbActiveObjects());
// mark the index in the test bitmap, must be once set only, all merged trees must be in the main tree
PX_ASSERT(mergeTreeTestBitmap.test(index) == PxIntFalse);
mergeTreeTestBitmap.set(index);
const PrunerPayload& payload = mPruningPool->getObjects()[index];
const ExtendedBucketPrunerMap::Entry* extendedPrunerSwapEntry = mExtendedBucketPrunerMap.find(payload);
PX_ASSERT(extendedPrunerSwapEntry);
const ExtendedBucketPrunerData& data = extendedPrunerSwapEntry->second;
PX_ASSERT(data.mMergeIndex == i);
PX_ASSERT(data.mSubTreeNode == j);
}
}
}
}
for (PxU32 i = mCurrentTreeIndex; i < mCurrentTreeCapacity; i++)
{
PX_ASSERT(mMergedTrees[i].mTree->getIndices() == NULL);
PX_ASSERT(mMergedTrees[i].mTree->getNodes() == NULL);
}
for (ExtendedBucketPrunerMap::Iterator iter = mExtendedBucketPrunerMap.getIterator(); !iter.done(); ++iter)
{
const ExtendedBucketPrunerData& data = iter->second;
PX_ASSERT(mMainTreeUpdateMap[data.mMergeIndex] < mMainTree->getNbNodes());
PX_ASSERT(data.mMergeIndex < mCurrentTreeIndex);
PX_ASSERT(data.mSubTreeNode < mMergedTrees[data.mMergeIndex].mTree->getNbNodes());
}
return true;
}
#endif
| 30,845 | C++ | 34.577855 | 192 | 0.70462 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuBounds.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuBounds.h"
#include "geometry/PxBoxGeometry.h"
#include "geometry/PxSphereGeometry.h"
#include "geometry/PxPlaneGeometry.h"
#include "geometry/PxConvexMeshGeometry.h"
#include "geometry/PxTetrahedronMeshGeometry.h"
#include "geometry/PxTriangleMeshGeometry.h"
#include "geometry/PxHeightFieldGeometry.h"
#include "geometry/PxCustomGeometry.h"
#include "GuInternal.h"
#include "CmUtils.h"
#include "GuConvexMesh.h"
#include "GuConvexMeshData.h"
#include "GuTriangleMesh.h"
#include "GuTetrahedronMesh.h"
#include "GuHeightFieldData.h"
#include "GuHeightField.h"
#include "GuConvexUtilsInternal.h"
#include "GuBoxConversion.h"
using namespace physx;
using namespace Gu;
using namespace aos;
// Compute global box for current node. The box is stored in mBV.
void Gu::computeGlobalBox(PxBounds3& bounds, PxU32 nbPrims, const PxBounds3* PX_RESTRICT boxes, const PxU32* PX_RESTRICT primitives)
{
PX_ASSERT(boxes);
PX_ASSERT(primitives);
PX_ASSERT(nbPrims);
Vec4V minV = V4LoadU(&boxes[primitives[0]].minimum.x);
Vec4V maxV = V4LoadU(&boxes[primitives[0]].maximum.x);
for (PxU32 i=1; i<nbPrims; i++)
{
const PxU32 index = primitives[i];
minV = V4Min(minV, V4LoadU(&boxes[index].minimum.x));
maxV = V4Max(maxV, V4LoadU(&boxes[index].maximum.x));
}
StoreBounds(bounds, minV, maxV);
}
void Gu::computeBoundsAroundVertices(PxBounds3& bounds, PxU32 nbVerts, const PxVec3* PX_RESTRICT verts)
{
// PT: we can safely V4LoadU the first N-1 vertices. We must V3LoadU the last vertex, to make sure we don't read
// invalid memory. Since we have to special-case that last vertex anyway, we reuse that code to also initialize
// the minV/maxV values (bypassing the need for a 'setEmpty()' initialization).
if(!nbVerts)
{
bounds.setEmpty();
return;
}
PxU32 nbSafe = nbVerts-1;
// PT: read last (unsafe) vertex using V3LoadU, initialize minV/maxV
const Vec4V lastVertexV = Vec4V_From_Vec3V(V3LoadU(&verts[nbSafe].x));
Vec4V minV = lastVertexV;
Vec4V maxV = lastVertexV;
// PT: read N-1 first (safe) vertices using V4LoadU
while(nbSafe--)
{
const Vec4V vertexV = V4LoadU(&verts->x);
verts++;
minV = V4Min(minV, vertexV);
maxV = V4Max(maxV, vertexV);
}
StoreBounds(bounds, minV, maxV);
}
void Gu::computeLocalBoundsAndGeomEpsilon(const PxVec3* vertices, PxU32 nbVerties, PxBounds3& localBounds, PxReal& geomEpsilon)
{
computeBoundsAroundVertices(localBounds, nbVerties, vertices);
// Derive a good geometric epsilon from local bounds. We must do this before bounds extrusion for heightfields.
//
// From Charles Bloom:
// "Epsilon must be big enough so that the consistency condition abs(D(Hit))
// <= Epsilon is satisfied for all queries. You want the smallest epsilon
// you can have that meets that constraint. Normal floats have a 24 bit
// mantissa. When you do any float addition, you may have round-off error
// that makes the result off by roughly 2^-24 * result. Our result is
// scaled by the position values. If our world is strictly required to be
// in a box of world size W (each coordinate in -W to W), then the maximum
// error is 2^-24 * W. Thus Epsilon must be at least >= 2^-24 * W. If
// you're doing coordinate transforms, that may scale your error up by some
// amount, so you'll need a bigger epsilon. In general something like
// 2^-22*W is reasonable. If you allow scaled transforms, it needs to be
// something like 2^-22*W*MAX_SCALE."
// PT: TODO: runtime checkings for this
PxReal eps = 0.0f;
for (PxU32 i = 0; i < 3; i++)
eps = PxMax(eps, PxMax(PxAbs(localBounds.maximum[i]), PxAbs(localBounds.minimum[i])));
eps *= powf(2.0f, -22.0f);
geomEpsilon = eps;
}
static PX_FORCE_INLINE void transformNoEmptyTest(PxVec3p& c, PxVec3p& ext, const PxMat33& rot, const PxVec3& pos, const CenterExtentsPadded& bounds)
{
c = rot.transform(bounds.mCenter) + pos;
ext = Cm::basisExtent(rot.column0, rot.column1, rot.column2, bounds.mExtents);
}
// PT: this one may have duplicates in GuBV4_BoxSweep_Internal.h & GuBV4_Raycast.cpp
static PX_FORCE_INLINE Vec4V multiply3x3V(const Vec4V p, const PxMat33Padded& mat_Padded)
{
Vec4V ResV = V4Scale(V4LoadU(&mat_Padded.column0.x), V4GetX(p));
ResV = V4Add(ResV, V4Scale(V4LoadU(&mat_Padded.column1.x), V4GetY(p)));
ResV = V4Add(ResV, V4Scale(V4LoadU(&mat_Padded.column2.x), V4GetZ(p)));
return ResV;
}
static PX_FORCE_INLINE void transformNoEmptyTestV(PxVec3p& c, PxVec3p& ext, const PxMat33Padded& rot, const PxVec3& pos, const CenterExtentsPadded& bounds)
{
const Vec4V boundsCenterV = V4LoadU(&bounds.mCenter.x); // PT: this load is safe since extents follow center in the class
// PT: unfortunately we can't V4LoadU 'pos' directly (it can come directly from users!). So we have to live with this for now:
const Vec4V posV = Vec4V_From_Vec3V(V3LoadU(&pos.x));
// PT: but eventually we'd like to use the "unsafe" version (e.g. by switching p&q in PxTransform), which would save 6 instructions on Win32
const Vec4V cV = V4Add(multiply3x3V(boundsCenterV, rot), posV);
// const Vec4V cV = V4Add(multiply3x3V(boundsCenterV, rot), V4LoadU(&pos.x)); // ### unsafe
V4StoreU(cV, &c.x);
// extended basis vectors
const Vec4V boundsExtentsV = V4LoadU(&bounds.mExtents.x); // PT: this load is safe since bounds are padded
const Vec4V c0V = V4Scale(V4LoadU(&rot.column0.x), V4GetX(boundsExtentsV));
const Vec4V c1V = V4Scale(V4LoadU(&rot.column1.x), V4GetY(boundsExtentsV));
const Vec4V c2V = V4Scale(V4LoadU(&rot.column2.x), V4GetZ(boundsExtentsV));
// find combination of base vectors that produces max. distance for each component = sum of abs()
Vec4V extentsV = V4Add(V4Abs(c0V), V4Abs(c1V));
extentsV = V4Add(extentsV, V4Abs(c2V));
V4StoreU(extentsV, &ext.x);
}
static PX_FORCE_INLINE PxU32 isNonIdentity(const PxVec3& scale)
{
#define IEEE_1_0 0x3f800000 //!< integer representation of 1.0
const PxU32* binary = reinterpret_cast<const PxU32*>(&scale.x);
return (binary[0] - IEEE_1_0)|(binary[1] - IEEE_1_0)|(binary[2] - IEEE_1_0);
}
// PT: please don't inline this one - 300+ lines of rarely used code
static void computeScaledMatrix(PxMat33Padded& rot, const PxMeshScale& scale)
{
rot = rot * Cm::toMat33(scale);
}
static PX_FORCE_INLINE void transformNoEmptyTest(PxVec3p& c, PxVec3p& ext, const PxTransform& transform, const PxMeshScale& scale, const CenterExtentsPadded& bounds)
{
PxMat33Padded rot(transform.q);
if(isNonIdentity(scale.scale))
computeScaledMatrix(rot, scale);
transformNoEmptyTestV(c, ext, rot, transform.p, bounds);
}
static PX_FORCE_INLINE void transformNoEmptyTest(PxVec3p& c, PxVec3p& ext, const PxVec3& pos, const PxMat33Padded& rot, const PxMeshScale& scale, const CenterExtentsPadded& bounds)
{
if(scale.isIdentity())
transformNoEmptyTest(c, ext, rot, pos, bounds);
else
transformNoEmptyTest(c, ext, rot * Cm::toMat33(scale), pos, bounds);
}
static void computeMeshBounds(const PxTransform& pose, const CenterExtentsPadded* PX_RESTRICT localSpaceBounds, const PxMeshScale& meshScale, PxVec3p& origin, PxVec3p& extent)
{
transformNoEmptyTest(origin, extent, pose, meshScale, *localSpaceBounds);
}
static void computePlaneBounds(PxBounds3& bounds, const PxTransform& pose, float contactOffset, float inflation)
{
// PT: A plane is infinite, so usually the bounding box covers the whole world.
// Now, in particular cases when the plane is axis-aligned, we can take
// advantage of this to compute a smaller bounding box anyway.
// PT: we use PX_MAX_BOUNDS_EXTENTS to be compatible with PxBounds3::setMaximal,
// and to make sure that the value doesn't collide with the BP's sentinels.
const PxF32 bigValue = PX_MAX_BOUNDS_EXTENTS;
// const PxF32 bigValue = 1000000.0f;
PxVec3 minPt = PxVec3(-bigValue, -bigValue, -bigValue);
PxVec3 maxPt = PxVec3(bigValue, bigValue, bigValue);
const PxVec3 planeNormal = pose.q.getBasisVector0();
const PxPlane plane(pose.p, planeNormal);
const float nx = PxAbs(planeNormal.x);
const float ny = PxAbs(planeNormal.y);
const float nz = PxAbs(planeNormal.z);
const float epsilon = 1e-6f;
const float oneMinusEpsilon = 1.0f - epsilon;
if(nx>oneMinusEpsilon && ny<epsilon && nz<epsilon)
{
if(planeNormal.x>0.0f) maxPt.x = -plane.d + contactOffset;
else minPt.x = plane.d - contactOffset;
}
else if(nx<epsilon && ny>oneMinusEpsilon && nz<epsilon)
{
if(planeNormal.y>0.0f) maxPt.y = -plane.d + contactOffset;
else minPt.y = plane.d - contactOffset;
}
else if(nx<epsilon && ny<epsilon && nz>oneMinusEpsilon)
{
if(planeNormal.z>0.0f) maxPt.z = -plane.d + contactOffset;
else minPt.z = plane.d - contactOffset;
}
// PT: it is important to compute the min/max form directly without going through the
// center/extents intermediate form. With PX_MAX_BOUNDS_EXTENTS, those back-and-forth
// computations destroy accuracy.
// PT: inflation actually destroys the bounds really. We keep it to please UTs but this is broken (DE10595).
// (e.g. for SQ 1% of PX_MAX_BOUNDS_EXTENTS is still a huge number, effectively making the AABB infinite and defeating the point of the above computation)
if(inflation!=1.0f)
{
const PxVec3 c = (maxPt + minPt)*0.5f;
const PxVec3 e = (maxPt - minPt)*0.5f*inflation;
minPt = c - e;
maxPt = c + e;
}
bounds.minimum = minPt;
bounds.maximum = maxPt;
}
static PX_FORCE_INLINE void inflateBounds(PxBounds3& bounds, const PxVec3p& origin, const PxVec3p& extents, float contactOffset, float inflation)
{
Vec4V extentsV = V4LoadU(&extents.x);
extentsV = V4Add(extentsV, V4Load(contactOffset));
extentsV = V4Scale(extentsV, FLoad(inflation));
const Vec4V originV = V4LoadU(&origin.x);
const Vec4V minV = V4Sub(originV, extentsV);
const Vec4V maxV = V4Add(originV, extentsV);
StoreBounds(bounds, minV, maxV);
}
static PX_FORCE_INLINE Vec4V basisExtentV(const PxMat33Padded& basis, const PxVec3& extent, float offset, float inflation)
{
// extended basis vectors
const Vec4V c0V = V4Scale(V4LoadU(&basis.column0.x), FLoad(extent.x));
const Vec4V c1V = V4Scale(V4LoadU(&basis.column1.x), FLoad(extent.y));
const Vec4V c2V = V4Scale(V4LoadU(&basis.column2.x), FLoad(extent.z));
// find combination of base vectors that produces max. distance for each component = sum of abs()
Vec4V extentsV = V4Add(V4Abs(c0V), V4Abs(c1V));
extentsV = V4Add(extentsV, V4Abs(c2V));
extentsV = V4Add(extentsV, V4Load(offset));
extentsV = V4Scale(extentsV, FLoad(inflation));
return extentsV;
}
static PX_FORCE_INLINE void computeMeshBounds(PxBounds3& bounds, float contactOffset, float inflation, const PxTransform& pose, const CenterExtentsPadded* PX_RESTRICT localSpaceBounds, const PxMeshScale& scale)
{
PxVec3p origin, extents;
computeMeshBounds(pose, localSpaceBounds, scale, origin, extents);
::inflateBounds(bounds, origin, extents, contactOffset, inflation);
}
void Gu::computeTightBounds(PxBounds3& bounds, PxU32 nb, const PxVec3* PX_RESTRICT v, const PxTransform& pose, const PxMeshScale& scale, float contactOffset, float inflation)
{
if(!nb)
{
bounds.setEmpty();
return;
}
PxMat33Padded rot(pose.q);
if(isNonIdentity(scale.scale))
computeScaledMatrix(rot, scale);
// PT: we can safely V4LoadU the first N-1 vertices. We must V3LoadU the last vertex, to make sure we don't read
// invalid memory. Since we have to special-case that last vertex anyway, we reuse that code to also initialize
// the minV/maxV values (bypassing the need for a 'setEmpty()' initialization).
PxU32 nbSafe = nb-1;
// PT: read last (unsafe) vertex using V3LoadU, initialize minV/maxV
const Vec4V lastVertexV = multiply3x3V(Vec4V_From_Vec3V(V3LoadU(&v[nbSafe].x)), rot);
Vec4V minV = lastVertexV;
Vec4V maxV = lastVertexV;
// PT: read N-1 first (safe) vertices using V4LoadU
while(nbSafe--)
{
const Vec4V vertexV = multiply3x3V(V4LoadU(&v->x), rot);
v++;
minV = V4Min(minV, vertexV);
maxV = V4Max(maxV, vertexV);
}
const Vec4V offsetV = V4Load(contactOffset);
minV = V4Sub(minV, offsetV);
maxV = V4Add(maxV, offsetV);
const Vec4V posV = Vec4V_From_Vec3V(V3LoadU(&pose.p.x));
maxV = V4Add(maxV, posV);
minV = V4Add(minV, posV);
// Inflation
{
const Vec4V centerV = V4Scale(V4Add(maxV, minV), FLoad(0.5f));
const Vec4V extentsV = V4Scale(V4Sub(maxV, minV), FLoad(0.5f*inflation));
maxV = V4Add(centerV, extentsV);
minV = V4Sub(centerV, extentsV);
}
StoreBounds(bounds, minV, maxV);
}
void Gu::computeBounds(PxBounds3& bounds, const PxGeometry& geometry, const PxTransform& pose, float contactOffset, float inflation)
{
// Box, Convex, Mesh and HeightField will compute local bounds and pose to world space.
// Sphere, Capsule & Plane will compute world space bounds directly.
switch(geometry.getType())
{
case PxGeometryType::eSPHERE:
{
const PxSphereGeometry& shape = static_cast<const PxSphereGeometry&>(geometry);
const PxVec3 extents((shape.radius+contactOffset)*inflation);
bounds.minimum = pose.p - extents;
bounds.maximum = pose.p + extents;
}
break;
case PxGeometryType::ePLANE:
{
computePlaneBounds(bounds, pose, contactOffset, inflation);
}
break;
case PxGeometryType::eCAPSULE:
{
computeCapsuleBounds(bounds, static_cast<const PxCapsuleGeometry&>(geometry), pose, contactOffset, inflation);
}
break;
case PxGeometryType::eBOX:
{
const PxBoxGeometry& shape = static_cast<const PxBoxGeometry&>(geometry);
const PxVec3p origin(pose.p);
const PxMat33Padded basis(pose.q);
const Vec4V extentsV = basisExtentV(basis, shape.halfExtents, contactOffset, inflation);
const Vec4V originV = V4LoadU(&origin.x);
const Vec4V minV = V4Sub(originV, extentsV);
const Vec4V maxV = V4Add(originV, extentsV);
StoreBounds(bounds, minV, maxV);
}
break;
case PxGeometryType::eCONVEXMESH:
{
const PxConvexMeshGeometry& shape = static_cast<const PxConvexMeshGeometry&>(geometry);
const Gu::ConvexHullData& hullData = static_cast<const Gu::ConvexMesh*>(shape.convexMesh)->getHull();
const bool useTightBounds = shape.meshFlags & PxConvexMeshGeometryFlag::eTIGHT_BOUNDS;
if(useTightBounds)
computeTightBounds(bounds, hullData.mNbHullVertices, hullData.getHullVertices(), pose, shape.scale, contactOffset, inflation);
else
computeMeshBounds(bounds, contactOffset, inflation, pose, &hullData.getPaddedBounds(), shape.scale);
}
break;
case PxGeometryType::eTRIANGLEMESH:
{
const PxTriangleMeshGeometry& shape = static_cast<const PxTriangleMeshGeometry&>(geometry);
const TriangleMesh* triangleMesh = static_cast<const TriangleMesh*>(shape.triangleMesh);
const bool useTightBounds = shape.meshFlags & PxMeshGeometryFlag::eTIGHT_BOUNDS;
if(useTightBounds)
computeTightBounds(bounds, triangleMesh->getNbVerticesFast(), triangleMesh->getVerticesFast(), pose, shape.scale, contactOffset, inflation);
else
computeMeshBounds(bounds, contactOffset, inflation, pose, &triangleMesh->getPaddedBounds(), shape.scale);
}
break;
case PxGeometryType::eHEIGHTFIELD:
{
const PxHeightFieldGeometry& shape = static_cast<const PxHeightFieldGeometry&>(geometry);
computeMeshBounds(bounds, contactOffset, inflation, pose, &static_cast<const Gu::HeightField*>(shape.heightField)->getData().getPaddedBounds(), PxMeshScale(PxVec3(shape.rowScale, shape.heightScale, shape.columnScale)));
}
break;
case PxGeometryType::eTETRAHEDRONMESH:
{
const PxTetrahedronMeshGeometry& shape = static_cast<const PxTetrahedronMeshGeometry&>(geometry);
computeMeshBounds(bounds, contactOffset, inflation, pose, &static_cast<const Gu::TetrahedronMesh*>(shape.tetrahedronMesh)->getPaddedBounds(), PxMeshScale());
}
break;
case PxGeometryType::ePARTICLESYSTEM:
{
// implement!
PX_ASSERT(0);
}
break;
case PxGeometryType::eHAIRSYSTEM:
{
// jcarius: Hairsystem bounds only available on GPU
bounds.setEmpty();
}
break;
case PxGeometryType::eCUSTOM:
{
const PxCustomGeometry& shape = static_cast<const PxCustomGeometry&>(geometry);
PxVec3p centre(0), extents(0);
if (shape.callbacks)
{
const PxBounds3 b = shape.callbacks->getLocalBounds(shape);
centre = b.getCenter(); extents = b.getExtents();
}
const PxVec3p origin(pose.transform(centre));
const PxMat33Padded basis(pose.q);
const Vec4V extentsV = basisExtentV(basis, extents, contactOffset, inflation);
const Vec4V originV = V4LoadU(&origin.x);
const Vec4V minV = V4Sub(originV, extentsV);
const Vec4V maxV = V4Add(originV, extentsV);
StoreBounds(bounds, minV, maxV);
}
break;
default:
{
PX_ASSERT(0);
PxGetFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL, "Gu::computeBounds: Unknown shape type.");
}
}
}
static PX_FORCE_INLINE void computeBoxExtentsAroundCapsule(PxVec3& extents, const PxCapsuleGeometry& capsuleGeom, float inflation)
{
extents.x = (capsuleGeom.radius + capsuleGeom.halfHeight) * inflation;
extents.y = capsuleGeom.radius * inflation;
extents.z = capsuleGeom.radius * inflation;
}
static const PxReal SQ_PRUNER_INFLATION = 1.01f; // pruner test shape inflation (not narrow phase shape)
static void computeMeshBounds(const PxVec3& pos, const PxMat33Padded& rot, const CenterExtentsPadded* PX_RESTRICT localSpaceBounds, const PxMeshScale& meshScale, PxVec3p& origin, PxVec3p& extent)
{
PxPrefetchLine(localSpaceBounds); // PT: this one helps reducing L2 misses in transformNoEmptyTest
transformNoEmptyTest(origin, extent, pos, rot, meshScale, *localSpaceBounds);
}
// PT: warning: this writes 4 bytes after the end of 'bounds'. Calling code must ensure it is safe to do so.
static PX_FORCE_INLINE void computeMinMaxBounds(PxBounds3* PX_RESTRICT bounds, const PxVec3p& c, const PxVec3p& e, float prunerInflation, float offset)
{
const Vec4V extentsV = V4Scale(V4Add(V4LoadU(&e.x), V4Load(offset)), FLoad(prunerInflation));
const Vec4V centerV = V4LoadU(&c.x);
const Vec4V minV = V4Sub(centerV, extentsV);
const Vec4V maxV = V4Add(centerV, extentsV);
V4StoreU(minV, &bounds->minimum.x);
V4StoreU(maxV, &bounds->maximum.x);
}
ShapeData::ShapeData(const PxGeometry& g, const PxTransform& t, PxReal inflation)
{
using namespace physx::aos;
// PT: this cast to matrix is already done in GeometryUnion::computeBounds (e.g. for boxes). So we do it first,
// then we'll pass the matrix directly to computeBoundsShapeData, to avoid the double conversion.
const bool isOBB = PxAbs(t.q.w) < 0.999999f;
if(isOBB)
{
// PT: writes 4 bytes after 'rot' but it's safe since we then write 'center' just afterwards
buildFrom(mGuBox, t.q);
}
else
{
mGuBox.rot = PxMat33(PxIdentity);
}
// PT: can't use V4Load here since there's no guarantee on 't.p'
// PT: must store 'center' after 'rot' now
mGuBox.center = t.p;
// Compute AABB, used by the BucketPruner as cullBox
switch(g.getType())
{
case PxGeometryType::eSPHERE:
{
const PxSphereGeometry& shape = static_cast<const PxSphereGeometry&>(g);
computeMinMaxBounds(&mPrunerInflatedAABB, mGuBox.center, PxVec3(0.0f), SQ_PRUNER_INFLATION, shape.radius+inflation);
//
reinterpret_cast<Sphere&>(mGuSphere) = Sphere(t.p, shape.radius);
}
break;
case PxGeometryType::eCAPSULE:
{
const PxCapsuleGeometry& shape = static_cast<const PxCapsuleGeometry&>(g);
const PxVec3p extents = mGuBox.rot.column0.abs() * shape.halfHeight;
computeMinMaxBounds(&mPrunerInflatedAABB, mGuBox.center, extents, SQ_PRUNER_INFLATION, shape.radius+inflation);
//
Capsule& dstWorldCapsule = reinterpret_cast<Capsule&>(mGuCapsule); // store a narrow phase version copy
getCapsule(dstWorldCapsule, shape, t);
mGuBox.extents.x = shape.halfHeight;
// compute PxBoxGeometry pruner geom around input capsule geom; transform remains unchanged
computeBoxExtentsAroundCapsule(mPrunerBoxGeomExtents, shape, SQ_PRUNER_INFLATION);
}
break;
case PxGeometryType::eBOX:
{
const PxBoxGeometry& shape = static_cast<const PxBoxGeometry&>(g);
// PT: cast is safe because 'rot' followed by other members
Vec4V extentsV = basisExtentV(static_cast<const PxMat33Padded&>(mGuBox.rot), shape.halfExtents, inflation, SQ_PRUNER_INFLATION);
// PT: c/e-to-m/M conversion
const Vec4V centerV = V4LoadU(&mGuBox.center.x);
const Vec4V minV = V4Sub(centerV, extentsV);
const Vec4V maxV = V4Add(centerV, extentsV);
V4StoreU(minV, &mPrunerInflatedAABB.minimum.x);
V4StoreU(maxV, &mPrunerInflatedAABB.maximum.x); // PT: WARNING: writes past end of class
//
mGuBox.extents = shape.halfExtents; // PT: TODO: use SIMD
mPrunerBoxGeomExtents = shape.halfExtents*SQ_PRUNER_INFLATION;
}
break;
case PxGeometryType::eCONVEXMESH:
{
const PxConvexMeshGeometry& shape = static_cast<const PxConvexMeshGeometry&>(g);
const ConvexMesh* cm = static_cast<const ConvexMesh*>(shape.convexMesh);
const ConvexHullData* hullData = &cm->getHull();
// PT: cast is safe since 'rot' is followed by other members of the box
PxVec3p center, extents;
computeMeshBounds(mGuBox.center, static_cast<const PxMat33Padded&>(mGuBox.rot), &hullData->getPaddedBounds(), shape.scale, center, extents);
computeMinMaxBounds(&mPrunerInflatedAABB, center, extents, SQ_PRUNER_INFLATION, inflation);
//
Box prunerBox;
computeOBBAroundConvex(prunerBox, shape, cm, t);
mGuBox.rot = prunerBox.rot; // PT: TODO: optimize this copy
// AP: pruners are now responsible for growing the OBB by 1% for overlap/sweep/GJK accuracy
mPrunerBoxGeomExtents = prunerBox.extents*SQ_PRUNER_INFLATION;
mGuBox.center = prunerBox.center;
}
break;
default:
PX_ALWAYS_ASSERT_MESSAGE("PhysX internal error: Invalid shape in ShapeData contructor.");
}
// PT: WARNING: these writes must stay after the above code
mIsOBB = PxU32(isOBB);
mType = PxU16(g.getType());
}
| 23,441 | C++ | 36.932039 | 222 | 0.738194 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuMeshFactory.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "common/PxInsertionCallback.h"
#include "GuCooking.h"
#include "GuMeshFactory.h"
#include "GuTriangleMeshBV4.h"
#include "GuTriangleMeshRTree.h"
#include "GuTetrahedronMesh.h"
#include "GuConvexMesh.h"
#include "GuBVH.h"
#include "GuHeightField.h"
#if PX_SUPPORT_OMNI_PVD
# define OMNI_PVD_NOTIFY_ADD(OBJECT) notifyListenersAdd(OBJECT)
# define OMNI_PVD_NOTIFY_REMOVE(OBJECT) notifyListenersRemove(OBJECT)
#else
# define OMNI_PVD_NOTIFY_ADD(OBJECT)
# define OMNI_PVD_NOTIFY_REMOVE(OBJECT)
#endif
using namespace physx;
using namespace Gu;
using namespace Cm;
///////////////////////////////////////////////////////////////////////////////
PX_IMPLEMENT_OUTPUT_ERROR
///////////////////////////////////////////////////////////////////////////////
// PT: TODO: refactor all this with a dedicated container
MeshFactory::MeshFactory() :
mTriangleMeshes ("mesh factory triangle mesh hash"),
mConvexMeshes ("mesh factory convex mesh hash"),
mHeightFields ("mesh factory height field hash"),
mBVHs ("BVH factory hash"),
mFactoryListeners ("FactoryListeners")
{
}
MeshFactory::~MeshFactory()
{
}
///////////////////////////////////////////////////////////////////////////////
template<class T>
static void releaseObjects(PxCoalescedHashSet<T*>& objects)
{
while(objects.size())
{
T* object = objects.getEntries()[0];
PX_ASSERT(RefCountable_getRefCount(*object)==1);
object->release();
}
}
// PT: needed because Gu::BVH is not a PxRefCounted object, although it derives from RefCountable
static void releaseObjects(PxCoalescedHashSet<Gu::BVH*>& objects)
{
while(objects.size())
{
Gu::BVH* object = objects.getEntries()[0];
PX_ASSERT(object->getRefCount()==1);
object->release();
}
}
void MeshFactory::release()
{
// Release all objects in case the user didn't do it
releaseObjects(mTriangleMeshes);
releaseObjects(mTetrahedronMeshes);
releaseObjects(mSoftBodyMeshes);
releaseObjects(mConvexMeshes);
releaseObjects(mHeightFields);
releaseObjects(mBVHs);
PX_DELETE_THIS;
}
template <typename T>
static void addToHash(PxCoalescedHashSet<T*>& hash, T* element, PxMutex* mutex)
{
if(!element)
return;
if(mutex)
mutex->lock();
hash.insert(element);
if(mutex)
mutex->unlock();
}
///////////////////////////////////////////////////////////////////////////////
static void read8BitIndices(PxInputStream& stream, void* tris, PxU32 nbIndices, const bool has16BitIndices)
{
PxU8 x;
if(has16BitIndices)
{
PxU16* tris16 = reinterpret_cast<PxU16*>(tris);
for(PxU32 i=0;i<nbIndices;i++)
{
stream.read(&x, sizeof(PxU8));
*tris16++ = x;
}
}
else
{
PxU32* tris32 = reinterpret_cast<PxU32*>(tris);
for(PxU32 i=0;i<nbIndices;i++)
{
stream.read(&x, sizeof(PxU8));
*tris32++ = x;
}
}
}
static void read16BitIndices(PxInputStream& stream, void* tris, PxU32 nbIndices, const bool has16BitIndices, const bool mismatch)
{
if(has16BitIndices)
{
PxU16* tris16 = reinterpret_cast<PxU16*>(tris);
stream.read(tris16, nbIndices*sizeof(PxU16));
if(mismatch)
{
for(PxU32 i=0;i<nbIndices;i++)
flip(tris16[i]);
}
}
else
{
PxU32* tris32 = reinterpret_cast<PxU32*>(tris);
PxU16 x;
for(PxU32 i=0;i<nbIndices;i++)
{
stream.read(&x, sizeof(PxU16));
if(mismatch)
flip(x);
*tris32++ = x;
}
}
}
static void read32BitIndices(PxInputStream& stream, void* tris, PxU32 nbIndices, const bool has16BitIndices, const bool mismatch)
{
if(has16BitIndices)
{
PxU32 x;
PxU16* tris16 = reinterpret_cast<PxU16*>(tris);
for(PxU32 i=0;i<nbIndices;i++)
{
stream.read(&x, sizeof(PxU32));
if(mismatch)
flip(x);
*tris16++ = PxTo16(x);
}
}
else
{
PxU32* tris32 = reinterpret_cast<PxU32*>(tris);
stream.read(tris32, nbIndices*sizeof(PxU32));
if(mismatch)
{
for(PxU32 i=0;i<nbIndices;i++)
flip(tris32[i]);
}
}
}
static TriangleMeshData* loadMeshData(PxInputStream& stream)
{
// Import header
PxU32 version;
bool mismatch;
if(!readHeader('M', 'E', 'S', 'H', version, mismatch, stream))
return NULL;
PxU32 midphaseID = PxMeshMidPhase::eBVH33; // Default before version 14
if(version>=14) // this refers to PX_MESH_VERSION
midphaseID = readDword(mismatch, stream);
// Check if old (incompatible) mesh format is loaded
if (version <= 9) // this refers to PX_MESH_VERSION
{
outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "Loading triangle mesh failed: "
"Deprecated mesh cooking format. Please recook your mesh in a new cooking format.");
PX_ALWAYS_ASSERT_MESSAGE("Obsolete cooked mesh found. Mesh version has been updated, please recook your meshes.");
return NULL;
}
// Import serialization flags
const PxU32 serialFlags = readDword(mismatch, stream);
// Import misc values
if (version <= 12) // this refers to PX_MESH_VERSION
{
// convexEdgeThreshold was removed in 3.4.0
readFloat(mismatch, stream);
}
TriangleMeshData* data;
if(midphaseID==PxMeshMidPhase::eBVH33)
data = PX_NEW(RTreeTriangleData);
else if(midphaseID==PxMeshMidPhase::eBVH34)
data = PX_NEW(BV4TriangleData);
else return NULL;
// Import mesh
PxVec3* verts = data->allocateVertices(readDword(mismatch, stream));
const PxU32 nbTris = readDword(mismatch, stream);
const bool force32 = (serialFlags & (IMSF_8BIT_INDICES|IMSF_16BIT_INDICES)) == 0;
//ML: this will allocate CPU triangle indices and GPU triangle indices if we have GRB data built
void* tris = data->allocateTriangles(nbTris, force32, serialFlags & IMSF_GRB_DATA);
stream.read(verts, sizeof(PxVec3)*data->mNbVertices);
if(mismatch)
{
for(PxU32 i=0;i<data->mNbVertices;i++)
{
flip(verts[i].x);
flip(verts[i].y);
flip(verts[i].z);
}
}
//TODO: stop support for format conversion on load!!
const PxU32 nbIndices = 3*data->mNbTriangles;
if(serialFlags & IMSF_8BIT_INDICES)
read8BitIndices(stream, tris, nbIndices, data->has16BitIndices());
else if(serialFlags & IMSF_16BIT_INDICES)
read16BitIndices(stream, tris, nbIndices, data->has16BitIndices(), mismatch);
else
read32BitIndices(stream, tris, nbIndices, data->has16BitIndices(), mismatch);
if(serialFlags & IMSF_MATERIALS)
{
PxU16* materials = data->allocateMaterials();
stream.read(materials, sizeof(PxU16)*data->mNbTriangles);
if(mismatch)
{
for(PxU32 i=0;i<data->mNbTriangles;i++)
flip(materials[i]);
}
}
if(serialFlags & IMSF_FACE_REMAP)
{
PxU32* remap = data->allocateFaceRemap();
readIndices(readDword(mismatch, stream), data->mNbTriangles, remap, stream, mismatch);
}
if(serialFlags & IMSF_ADJACENCIES)
{
PxU32* adj = data->allocateAdjacencies();
stream.read(adj, sizeof(PxU32)*data->mNbTriangles*3);
if(mismatch)
{
for(PxU32 i=0;i<data->mNbTriangles*3;i++)
flip(adj[i]);
}
}
// PT: TODO better
if(midphaseID==PxMeshMidPhase::eBVH33)
{
if(!static_cast<RTreeTriangleData*>(data)->mRTree.load(stream, version, mismatch))
{
outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "RTree binary image load error.");
PX_DELETE(data);
return NULL;
}
}
else if(midphaseID==PxMeshMidPhase::eBVH34)
{
BV4TriangleData* bv4data = static_cast<BV4TriangleData*>(data);
if(!bv4data->mBV4Tree.load(stream, mismatch))
{
outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "BV4 binary image load error.");
PX_DELETE(data);
return NULL;
}
bv4data->mMeshInterface.setNbTriangles(nbTris);
bv4data->mMeshInterface.setNbVertices(data->mNbVertices);
if(data->has16BitIndices())
bv4data->mMeshInterface.setPointers(NULL, reinterpret_cast<IndTri16*>(tris), verts);
else
bv4data->mMeshInterface.setPointers(reinterpret_cast<IndTri32*>(tris), NULL, verts);
bv4data->mBV4Tree.mMeshInterface = &bv4data->mMeshInterface;
}
else PX_ASSERT(0);
// Import local bounds
data->mGeomEpsilon = readFloat(mismatch, stream);
readFloatBuffer(&data->mAABB.minimum.x, 6, mismatch, stream);
PxU32 nb = readDword(mismatch, stream);
if(nb)
{
PX_ASSERT(nb==data->mNbTriangles);
data->allocateExtraTrigData();
// No need to convert those bytes
stream.read(data->mExtraTrigData, nb*sizeof(PxU8));
}
if(serialFlags & IMSF_GRB_DATA)
{
PxU32 GRB_meshAdjVerticiesTotal = 0;
if(version < 15)
GRB_meshAdjVerticiesTotal = readDword(mismatch, stream);
//read grb triangle indices
PX_ASSERT(data->mGRB_primIndices);
if(serialFlags & IMSF_8BIT_INDICES)
read8BitIndices(stream, data->mGRB_primIndices, nbIndices, data->has16BitIndices());
else if(serialFlags & IMSF_16BIT_INDICES)
read16BitIndices(stream, data->mGRB_primIndices, nbIndices, data->has16BitIndices(), mismatch);
else
read32BitIndices(stream, data->mGRB_primIndices, nbIndices, data->has16BitIndices(), mismatch);
data->mGRB_primAdjacencies = PX_ALLOCATE(PxU32, data->mNbTriangles*4, "mGRB_primAdjacencies");
data->mGRB_faceRemap = PX_ALLOCATE(PxU32, data->mNbTriangles, "mGRB_faceRemap");
if(serialFlags & IMSF_GRB_INV_REMAP)
data->mGRB_faceRemapInverse = PX_ALLOCATE(PxU32, data->mNbTriangles, "mGRB_faceRemapInverse");
stream.read(data->mGRB_primAdjacencies, sizeof(PxU32)*data->mNbTriangles*4);
if (version < 15)
{
//stream.read(data->mGRB_vertValency, sizeof(PxU32)*data->mNbVertices);
for (PxU32 i = 0; i < data->mNbVertices; ++i)
readDword(mismatch, stream);
//stream.read(data->mGRB_adjVertStart, sizeof(PxU32)*data->mNbVertices);
for (PxU32 i = 0; i < data->mNbVertices; ++i)
readDword(mismatch, stream);
//stream.read(data->mGRB_adjVertices, sizeof(PxU32)*GRB_meshAdjVerticiesTotal);
for (PxU32 i = 0; i < GRB_meshAdjVerticiesTotal; ++i)
readDword(mismatch, stream);
}
stream.read(data->mGRB_faceRemap, sizeof(PxU32)*data->mNbTriangles);
if(data->mGRB_faceRemapInverse)
stream.read(data->mGRB_faceRemapInverse, sizeof(PxU32)*data->mNbTriangles);
if(mismatch)
{
for(PxU32 i=0;i<data->mNbTriangles*4;i++)
flip(reinterpret_cast<PxU32 *>(data->mGRB_primIndices)[i]);
for(PxU32 i=0;i<data->mNbTriangles*4;i++)
flip(reinterpret_cast<PxU32 *>(data->mGRB_primAdjacencies)[i]);
}
//read BV32
data->mGRB_BV32Tree = PX_NEW(BV32Tree);
if (!data->mGRB_BV32Tree->load(stream, mismatch))
{
outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "BV32 binary image load error.");
PX_DELETE(data);
return NULL;
}
if (serialFlags & IMSF_VERT_MAPPING)
{
//import vertex mapping data
data->mNbTrianglesReferences = readDword(mismatch, stream);
data->mAccumulatedTrianglesRef = PX_ALLOCATE(PxU32, data->mNbVertices, "mAccumulatedTrianglesRef");
data->mTrianglesReferences = PX_ALLOCATE(PxU32, data->mNbTrianglesReferences, "mTrianglesReferences");
stream.read(data->mAccumulatedTrianglesRef, data->mNbVertices * sizeof(PxU32));
stream.read(data->mTrianglesReferences, data->mNbTrianglesReferences * sizeof(PxU32));
}
}
if (serialFlags & IMSF_SDF)
{
// Import sdf
SDF& sdfData = data->mSdfData;
sdfData.mMeshLower.x = readFloat(mismatch, stream);
sdfData.mMeshLower.y = readFloat(mismatch, stream);
sdfData.mMeshLower.z = readFloat(mismatch, stream);
sdfData.mSpacing = readFloat(mismatch, stream);
sdfData.mDims.x = readDword(mismatch, stream);
sdfData.mDims.y = readDword(mismatch, stream);
sdfData.mDims.z = readDword(mismatch, stream);
sdfData.mNumSdfs = readDword(mismatch, stream);
sdfData.mNumSubgridSdfs = readDword(mismatch, stream);
sdfData.mNumStartSlots = readDword(mismatch, stream);
sdfData.mSubgridSize = readDword(mismatch, stream);
sdfData.mSdfSubgrids3DTexBlockDim.x = readDword(mismatch, stream);
sdfData.mSdfSubgrids3DTexBlockDim.y = readDword(mismatch, stream);
sdfData.mSdfSubgrids3DTexBlockDim.z = readDword(mismatch, stream);
sdfData.mSubgridsMinSdfValue = readFloat(mismatch, stream);
sdfData.mSubgridsMaxSdfValue = readFloat(mismatch, stream);
sdfData.mBytesPerSparsePixel = readDword(mismatch, stream);
PxReal* sdf = sdfData.allocateSdfs(sdfData.mMeshLower, sdfData.mSpacing, sdfData.mDims.x, sdfData.mDims.y, sdfData.mDims.z,
sdfData.mSubgridSize, sdfData.mSdfSubgrids3DTexBlockDim.x, sdfData.mSdfSubgrids3DTexBlockDim.y, sdfData.mSdfSubgrids3DTexBlockDim.z,
sdfData.mSubgridsMinSdfValue, sdfData.mSubgridsMaxSdfValue, sdfData.mBytesPerSparsePixel);
stream.read(sdf, sizeof(PxReal) * sdfData.mNumSdfs);
readByteBuffer(sdfData.mSubgridSdf, sdfData.mNumSubgridSdfs, stream);
readIntBuffer(sdfData.mSubgridStartSlots, sdfData.mNumStartSlots, mismatch, stream);
}
if (serialFlags & IMSF_INERTIA)
{
// Import inertia
stream.read(&data->mMass, sizeof(PxReal));
readFloatBuffer(&data->mInertia(0, 0), 9, mismatch, stream);
readFloatBuffer(&data->mLocalCenterOfMass.x, 3, mismatch, stream);
}
return data;
}
static void readIndices(const PxU32 serialFlags, void* indices, const PxU32 nbIndices,
const bool has16BitIndices, const bool mismatch, PxInputStream& stream)
{
if(serialFlags & IMSF_8BIT_INDICES)
read8BitIndices(stream, indices, nbIndices, has16BitIndices);
else if(serialFlags & IMSF_16BIT_INDICES)
read16BitIndices(stream, indices, nbIndices, has16BitIndices, mismatch);
else
read32BitIndices(stream, indices, nbIndices, has16BitIndices, mismatch);
}
void MeshFactory::addTriangleMesh(TriangleMesh* np, bool lock)
{
addToHash(mTriangleMeshes, np, lock ? &mTrackingMutex : NULL);
OMNI_PVD_NOTIFY_ADD(np);
}
PxTriangleMesh* MeshFactory::createTriangleMesh(TriangleMeshData& data)
{
TriangleMesh* np;
if(data.mType==PxMeshMidPhase::eBVH33)
{
PX_NEW_SERIALIZED(np, RTreeTriangleMesh)(this, data);
}
else if(data.mType==PxMeshMidPhase::eBVH34)
{
PX_NEW_SERIALIZED(np, BV4TriangleMesh)(this, data);
}
else return NULL;
if(np)
addTriangleMesh(np);
return np;
}
// data injected by cooking lib for runtime cooking
PxTriangleMesh* MeshFactory::createTriangleMesh(void* data)
{
return createTriangleMesh(*reinterpret_cast<TriangleMeshData*>(data));
}
PxTriangleMesh* MeshFactory::createTriangleMesh(PxInputStream& desc)
{
TriangleMeshData* data = ::loadMeshData(desc);
if(!data)
return NULL;
PxTriangleMesh* m = createTriangleMesh(*data);
PX_DELETE(data);
return m;
}
bool MeshFactory::removeTriangleMesh(PxTriangleMesh& m)
{
TriangleMesh* gu = static_cast<TriangleMesh*>(&m);
OMNI_PVD_NOTIFY_REMOVE(gu);
PxMutex::ScopedLock lock(mTrackingMutex);
bool found = mTriangleMeshes.erase(gu);
return found;
}
PxU32 MeshFactory::getNbTriangleMeshes() const
{
PxMutex::ScopedLock lock(mTrackingMutex);
return mTriangleMeshes.size();
}
PxU32 MeshFactory::getTriangleMeshes(PxTriangleMesh** userBuffer, PxU32 bufferSize, PxU32 startIndex) const
{
PxMutex::ScopedLock lock(mTrackingMutex);
return getArrayOfPointers(userBuffer, bufferSize, startIndex, mTriangleMeshes.getEntries(), mTriangleMeshes.size());
}
///////////////////////////////////////////////////////////////////////////////
static TetrahedronMeshData* loadTetrahedronMeshData(PxInputStream& stream)
{
// Import header
PxU32 version;
bool mismatch;
if (!readHeader('T', 'E', 'M', 'E', version, mismatch, stream))
return NULL;
// Import serialization flags
const PxU32 serialFlags = readDword(mismatch, stream);
TetrahedronMeshData* data = PX_NEW(TetrahedronMeshData);
// Import mesh
const PxU32 nbVerts = readDword(mismatch, stream);
PxVec3* verts = data->allocateVertices(nbVerts);
//const PxU32 nbSurfaceTriangles = readDword(mismatch, stream);
const PxU32 nbTetrahedrons = readDword(mismatch, stream);
//ML: this will allocate CPU tetrahedron indices and GPU tetrahedron indices and other GPU data if we have GRB data built
//void* tets = data->allocateTetrahedrons(nbTetrahedrons, serialFlags & IMSF_GRB_DATA);
data->allocateTetrahedrons(nbTetrahedrons, 1);
void* tets = data->mTetrahedrons;
stream.read(verts, sizeof(PxVec3)*data->mNbVertices);
//stream.read(restPoses, sizeof(PxMat33) * data->mNbTetrahedrons);
if (mismatch)
{
for (PxU32 i = 0; i < data->mNbVertices; i++)
{
flip(verts[i].x);
flip(verts[i].y);
flip(verts[i].z);
}
}
//TODO: stop support for format conversion on load!!
const PxU32 nbTetIndices = 4 * data->mNbTetrahedrons;
readIndices(serialFlags, tets, nbTetIndices, data->has16BitIndices(), mismatch, stream);
// Import local bounds
data->mGeomEpsilon = readFloat(mismatch, stream);
readFloatBuffer(&data->mAABB.minimum.x, 6, mismatch, stream);
return data;
}
static bool loadSoftBodyMeshData(PxInputStream& stream, SoftBodyMeshData& data)
{
// Import header
PxU32 version;
bool mismatch;
if (!readHeader('S', 'O', 'M', 'E', version, mismatch, stream))
return false;
// Import serialization flags
const PxU32 serialFlags = readDword(mismatch, stream);
// Import mesh
const PxU32 nbVerts = readDword(mismatch, stream);
PxVec3* verts = data.mCollisionMesh.allocateVertices(nbVerts);
//const PxU32 nbSurfaceTriangles = readDword(mismatch, stream);
const PxU32 nbTetrahedrons= readDword(mismatch, stream);
//ML: this will allocate CPU tetrahedron indices and GPU tetrahedron indices and other GPU data if we have GRB data built
//void* tets = data.allocateTetrahedrons(nbTetrahedrons, serialFlags & IMSF_GRB_DATA);
data.mCollisionMesh.allocateTetrahedrons(nbTetrahedrons, 1);
if (serialFlags & IMSF_GRB_DATA)
data.mCollisionData.allocateCollisionData(nbTetrahedrons);
void* tets = data.mCollisionMesh.mTetrahedrons;
//void* surfaceTriangles = data.mCollisionData.allocateSurfaceTriangles(nbSurfaceTriangles);
//void* restPoses = data.mTetraRestPoses;
stream.read(verts, sizeof(PxVec3)*nbVerts);
//stream.read(restPoses, sizeof(PxMat33) * data.mNbTetrahedrons);
if (mismatch)
{
for (PxU32 i = 0; i< nbVerts; i++)
{
flip(verts[i].x);
flip(verts[i].y);
flip(verts[i].z);
}
}
//TODO: stop support for format conversion on load!!
const PxU32 nbTetIndices = 4 * nbTetrahedrons;
readIndices(serialFlags, tets, nbTetIndices, data.mCollisionMesh.has16BitIndices(), mismatch, stream);
//const PxU32 nbSurfaceTriangleIndices = 3 * nbSurfaceTriangles;
//readIndices(serialFlags, surfaceTriangles, nbSurfaceTriangleIndices, data.mCollisionMesh.has16BitIndices(), mismatch, stream);
////using IMSF_ADJACENCIES for tetMesh tetrahedron surface hint
//if (serialFlags & IMSF_ADJACENCIES)
//{
// PxU8* surfaceHints = reinterpret_cast<PxU8*>(data.mTetraSurfaceHint);
// stream.read(surfaceHints, sizeof(PxU8)*data.mNbTetrahedrons);
//}
if (serialFlags & IMSF_MATERIALS)
{
PxU16* materials = data.mCollisionMesh.allocateMaterials();
stream.read(materials, sizeof(PxU16)*nbTetrahedrons);
if (mismatch)
{
for (PxU32 i = 0; i < nbTetrahedrons; i++)
flip(materials[i]);
}
}
if (serialFlags & IMSF_FACE_REMAP)
{
PxU32* remap = data.mCollisionData.allocateFaceRemap(nbTetrahedrons);
readIndices(readDword(mismatch, stream), nbTetrahedrons, remap, stream, mismatch);
}
/*if (serialFlags & IMSF_ADJACENCIES)
{
PxU32* adj = data.allocateAdjacencies();
stream.read(adj, sizeof(PxU32)*data.mNbTetrahedrons * 4);
if (mismatch)
{
for (PxU32 i = 0; i<data.mNbTetrahedrons * 4; i++)
flip(adj[i]);
}
}*/
SoftBodyMeshData* bv4data = &data;
if (!bv4data->mCollisionData.mBV4Tree.load(stream, mismatch))
{
outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "BV4 binary image load error.");
//PX_DELETE(data);
return false;
}
bv4data->mCollisionData.mMeshInterface.setNbTetrahedrons(nbTetrahedrons);
bv4data->mCollisionData.mMeshInterface.setNbVertices(nbVerts);
if (data.mCollisionMesh.has16BitIndices())
bv4data->mCollisionData.mMeshInterface.setPointers(NULL, reinterpret_cast<IndTetrahedron16*>(tets), verts);
else
bv4data->mCollisionData.mMeshInterface.setPointers(reinterpret_cast<IndTetrahedron32*>(tets), NULL, verts);
bv4data->mCollisionData.mBV4Tree.mMeshInterface = &bv4data->mCollisionData.mMeshInterface;
// Import local bounds
data.mCollisionMesh.mGeomEpsilon = readFloat(mismatch, stream);
readFloatBuffer(&data.mCollisionMesh.mAABB.minimum.x, 6, mismatch, stream);
if (serialFlags & IMSF_GRB_DATA)
{
/*PxU32 GRB_meshAdjVerticiesTotal = 0;
if (version < 15)
GRB_meshAdjVerticiesTotal = readDword(mismatch, stream);*/
//read grb tetrahedron indices
PX_ASSERT(data.mCollisionData.mGRB_primIndices);
//read tetrahedron indices
readIndices(serialFlags, data.mCollisionData.mGRB_primIndices, nbTetIndices, data.mCollisionMesh.has16BitIndices(), mismatch, stream);
//data.mGRB_primAdjacencies = static_cast<void *>(PX_NEW(PxU32)[data.mNbTetrahedrons * 4]);
//data.mGRB_surfaceTriIndices = static_cast<void *>(PX_NEW(PxU32)[data.mNbTriangles * 3]);
data.mCollisionData.mGRB_faceRemap = PX_ALLOCATE(PxU32, data.mCollisionMesh.mNbTetrahedrons, "mGRB_faceRemap");
data.mCollisionData.mGRB_faceRemapInverse = PX_ALLOCATE(PxU32, data.mCollisionMesh.mNbTetrahedrons, "mGRB_faceRemapInverse");
//data.mGRB_surfaceTriangleIndice = PX_NEW(PxU32)[data.mNbSurfaceTriangles * 3];
//stream.read(data.mGRB_primAdjacencies, sizeof(PxU32)*data.mNbTetrahedrons * 4);
stream.read(data.mCollisionData.mGRB_tetraSurfaceHint, sizeof(PxU8) * data.mCollisionMesh.mNbTetrahedrons);
stream.read(data.mCollisionData.mGRB_faceRemap, sizeof(PxU32) * data.mCollisionMesh.mNbTetrahedrons);
stream.read(data.mCollisionData.mGRB_faceRemapInverse, sizeof(PxU32) * data.mCollisionMesh.mNbTetrahedrons);
//stream.read(data.mGRB_surfaceTriangleIndice, sizeof(PxU32) * data.mNbSurfaceTriangles * 3);
stream.read(data.mCollisionData.mTetraRestPoses, sizeof(PxMat33) * nbTetrahedrons);
if (mismatch)
{
for (PxU32 i = 0; i<data.mCollisionMesh.mNbTetrahedrons * 4; i++)
flip(reinterpret_cast<PxU32 *>(data.mCollisionData.mGRB_primIndices)[i]);
}
//read BV32
data.mCollisionData.mGRB_BV32Tree = PX_NEW(BV32Tree);
if (!data.mCollisionData.mGRB_BV32Tree->load(stream, mismatch))
{
outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "BV32 binary image load error.");
//PX_DELETE(data);
return false;
}
const PxU32 nbGridModelTetrahedrons = readDword(mismatch, stream);
const PxU32 nbGridModelVertices = readDword(mismatch, stream);
const PxU32 nbGridModelPartitions = readDword(mismatch, stream);
const PxU32 nbGMMaxTetsPerPartition = readDword(mismatch, stream);
const PxU32 nbGMRemapOutputSize = readDword(mismatch, stream);
PxU32 numTetsPerElement = 1;
if(version >= 2)
numTetsPerElement = readDword(mismatch, stream);
const PxU32 nbGMTotalTetReferenceCount = readDword(mismatch, stream);
const PxU32 nbTetRemapSize = readDword(mismatch, stream);
const PxU32 numVertsPerElement = (numTetsPerElement == 5 || numTetsPerElement == 6) ? 8 : 4;
const PxU32 numSimElements = nbGridModelTetrahedrons / numTetsPerElement;
data.mSimulationData.mGridModelMaxTetsPerPartitions = nbGMMaxTetsPerPartition;
data.mSimulationData.mNumTetsPerElement = numTetsPerElement;
data.mMappingData.mTetsRemapSize = nbTetRemapSize;
/*data.allocateGridModelData(nbGridModelTetrahedrons, nbGridModelVertices,
data.mCollisionMesh.mNbVertices, nbGridModelPartitions, nbGMRemapOutputSize,
nbGMTotalTetReferenceCount, nbTetRemapSize, data.mCollisionMesh.mNbTetrahedrons,
serialFlags & IMSF_GRB_DATA);*/
data.mSimulationMesh.allocateTetrahedrons(nbGridModelTetrahedrons, serialFlags & IMSF_GRB_DATA);
data.mSimulationMesh.allocateVertices(nbGridModelVertices, serialFlags & IMSF_GRB_DATA);
data.mSimulationData.allocateGridModelData(nbGridModelTetrahedrons, nbGridModelVertices,
data.mCollisionMesh.mNbVertices, nbGridModelPartitions, nbGMRemapOutputSize, numTetsPerElement, serialFlags & IMSF_GRB_DATA);
data.mMappingData.allocatemappingData(data.mCollisionMesh.mNbVertices, nbTetRemapSize, data.mCollisionMesh.mNbTetrahedrons, serialFlags & IMSF_GRB_DATA);
data.mMappingData.allocateTetRefData(nbGMTotalTetReferenceCount, data.mCollisionMesh.mNbVertices, serialFlags & IMSF_GRB_DATA);
const PxU32 nbGridModelIndices = 4 * nbGridModelTetrahedrons;
readIndices(serialFlags, data.mSimulationMesh.mTetrahedrons, nbGridModelIndices, data.mSimulationMesh.has16BitIndices(), mismatch, stream);
//stream.read(data.mGridModelVerticesInvMass, sizeof(PxVec4) * nbGridModelVertices);
stream.read(data.mSimulationMesh.mVertices, sizeof(PxVec3) * nbGridModelVertices);
if (serialFlags & IMSF_MATERIALS)
{
PxU16* materials = data.mSimulationMesh.allocateMaterials();
stream.read(materials, sizeof(PxU16)*nbGridModelTetrahedrons);
if (mismatch)
{
for (PxU32 i = 0; i < nbTetrahedrons; i++)
flip(materials[i]);
}
}
stream.read(data.mSimulationData.mGridModelInvMass, sizeof(PxReal) * nbGridModelVertices);
stream.read(data.mSimulationData.mGridModelTetraRestPoses, sizeof(PxMat33) * nbGridModelTetrahedrons);
stream.read(data.mSimulationData.mGridModelOrderedTetrahedrons, sizeof(PxU32) * numSimElements);
stream.read(data.mSimulationData.mGMRemapOutputCP, sizeof(PxU32) * numSimElements * numVertsPerElement);
stream.read(data.mSimulationData.mGMAccumulatedPartitionsCP, sizeof(PxU32) * nbGridModelPartitions);
stream.read(data.mSimulationData.mGMAccumulatedCopiesCP, sizeof(PxU32) * data.mSimulationMesh.mNbVertices);
stream.read(data.mMappingData.mCollisionAccumulatedTetrahedronsRef, sizeof(PxU32) * data.mCollisionMesh.mNbVertices);
stream.read(data.mMappingData.mCollisionTetrahedronsReferences, sizeof(PxU32) * data.mMappingData.mCollisionNbTetrahedronsReferences);
stream.read(data.mMappingData.mCollisionSurfaceVertsHint, sizeof(PxU8) * data.mCollisionMesh.mNbVertices);
stream.read(data.mMappingData.mCollisionSurfaceVertToTetRemap, sizeof(PxU32) * data.mCollisionMesh.mNbVertices);
//stream.read(data->mVertsBarycentricInGridModel, sizeof(PxReal) * 4 * data->mNbVertices);
stream.read(data.mSimulationData.mGMPullIndices, sizeof(PxU32) * numSimElements * numVertsPerElement);
//stream.read(data->mVertsBarycentricInGridModel, sizeof(PxReal) * 4 * data->mNbVertices);
stream.read(data.mMappingData.mVertsBarycentricInGridModel, sizeof(PxReal) * 4 * data.mCollisionMesh.mNbVertices);
stream.read(data.mMappingData.mVertsRemapInGridModel, sizeof(PxU32) * data.mCollisionMesh.mNbVertices);
stream.read(data.mMappingData.mTetsRemapColToSim, sizeof(PxU32) *nbTetRemapSize);
stream.read(data.mMappingData.mTetsAccumulatedRemapColToSim, sizeof(PxU32) * data.mCollisionMesh.mNbTetrahedrons);
}
return true;
}
void MeshFactory::addTetrahedronMesh(TetrahedronMesh* np, bool lock)
{
addToHash(mTetrahedronMeshes, np, lock ? &mTrackingMutex : NULL);
OMNI_PVD_NOTIFY_ADD(np);
}
void MeshFactory::addSoftBodyMesh(SoftBodyMesh* np, bool lock)
{
addToHash(mSoftBodyMeshes, np, lock ? &mTrackingMutex : NULL);
OMNI_PVD_NOTIFY_ADD(np);
}
PxSoftBodyMesh* MeshFactory::createSoftBodyMesh(PxInputStream& desc)
{
TetrahedronMeshData mSimulationMesh;
SoftBodySimulationData mSimulationData;
TetrahedronMeshData mCollisionMesh;
SoftBodyCollisionData mCollisionData;
CollisionMeshMappingData mMappingData;
SoftBodyMeshData data(mSimulationMesh, mSimulationData, mCollisionMesh, mCollisionData, mMappingData);
if (!::loadSoftBodyMeshData(desc, data))
return NULL;
PxSoftBodyMesh* m = createSoftBodyMesh(data);
return m;
}
PxTetrahedronMesh* MeshFactory::createTetrahedronMesh(PxInputStream& desc)
{
TetrahedronMeshData* data = ::loadTetrahedronMeshData(desc);
if (!data)
return NULL;
PxTetrahedronMesh* m = createTetrahedronMesh(*data);
PX_DELETE(data);
return m;
}
PxTetrahedronMesh* MeshFactory::createTetrahedronMesh(TetrahedronMeshData& data)
{
TetrahedronMesh* np = NULL;
PX_NEW_SERIALIZED(np, TetrahedronMesh)(this, data);
if (np)
addTetrahedronMesh(np);
return np;
}
// data injected by cooking lib for runtime cooking
PxTetrahedronMesh* MeshFactory::createTetrahedronMesh(void* data)
{
return createTetrahedronMesh(*reinterpret_cast<TetrahedronMeshData*>(data));
}
PxSoftBodyMesh* MeshFactory::createSoftBodyMesh(Gu::SoftBodyMeshData& data)
{
SoftBodyMesh* np = NULL;
PX_NEW_SERIALIZED(np, SoftBodyMesh)(this, data);
if (np)
addSoftBodyMesh(np);
return np;
}
// data injected by cooking lib for runtime cooking
PxSoftBodyMesh* MeshFactory::createSoftBodyMesh(void* data)
{
return createSoftBodyMesh(*reinterpret_cast<SoftBodyMeshData*>(data));
}
bool MeshFactory::removeSoftBodyMesh(PxSoftBodyMesh& tetMesh)
{
SoftBodyMesh* gu = static_cast<SoftBodyMesh*>(&tetMesh);
OMNI_PVD_NOTIFY_REMOVE(gu);
PxMutex::ScopedLock lock(mTrackingMutex);
bool found = mSoftBodyMeshes.erase(gu);
return found;
}
bool MeshFactory::removeTetrahedronMesh(PxTetrahedronMesh& tetMesh)
{
TetrahedronMesh* gu = static_cast<TetrahedronMesh*>(&tetMesh);
OMNI_PVD_NOTIFY_REMOVE(gu);
PxMutex::ScopedLock lock(mTrackingMutex);
bool found = mTetrahedronMeshes.erase(gu);
return found;
}
PxU32 MeshFactory::getNbSoftBodyMeshes() const
{
PxMutex::ScopedLock lock(mTrackingMutex);
return mSoftBodyMeshes.size();
}
PxU32 MeshFactory::getNbTetrahedronMeshes() const
{
PxMutex::ScopedLock lock(mTrackingMutex);
return mTetrahedronMeshes.size();
}
PxU32 MeshFactory::getTetrahedronMeshes(PxTetrahedronMesh** userBuffer, PxU32 bufferSize, PxU32 startIndex) const
{
PxMutex::ScopedLock lock(mTrackingMutex);
return getArrayOfPointers(userBuffer, bufferSize, startIndex, mTetrahedronMeshes.getEntries(), mTetrahedronMeshes.size());
}
PxU32 MeshFactory::getSoftBodyMeshes(PxSoftBodyMesh** userBuffer, PxU32 bufferSize, PxU32 startIndex) const
{
PxMutex::ScopedLock lock(mTrackingMutex);
return getArrayOfPointers(userBuffer, bufferSize, startIndex, mSoftBodyMeshes.getEntries(), mSoftBodyMeshes.size());
}
///////////////////////////////////////////////////////////////////////////////
void MeshFactory::addConvexMesh(ConvexMesh* np, bool lock)
{
addToHash(mConvexMeshes, np, lock ? &mTrackingMutex : NULL);
OMNI_PVD_NOTIFY_ADD(np);
}
// data injected by cooking lib for runtime cooking
PxConvexMesh* MeshFactory::createConvexMesh(void* data)
{
return createConvexMesh(*reinterpret_cast<ConvexHullInitData*>(data));
}
PxConvexMesh* MeshFactory::createConvexMesh(ConvexHullInitData& data)
{
ConvexMesh* np;
PX_NEW_SERIALIZED(np, ConvexMesh)(this, data);
if (np)
addConvexMesh(np);
return np;
}
PxConvexMesh* MeshFactory::createConvexMesh(PxInputStream& desc)
{
ConvexMesh* np;
PX_NEW_SERIALIZED(np, ConvexMesh)(this);
if(!np)
return NULL;
if(!np->load(desc))
{
Cm::deletePxBase(np);
return NULL;
}
addConvexMesh(np);
return np;
}
bool MeshFactory::removeConvexMesh(PxConvexMesh& m)
{
ConvexMesh* gu = static_cast<ConvexMesh*>(&m);
OMNI_PVD_NOTIFY_REMOVE(gu);
PxMutex::ScopedLock lock(mTrackingMutex);
bool found = mConvexMeshes.erase(gu);
return found;
}
PxU32 MeshFactory::getNbConvexMeshes() const
{
PxMutex::ScopedLock lock(mTrackingMutex);
return mConvexMeshes.size();
}
PxU32 MeshFactory::getConvexMeshes(PxConvexMesh** userBuffer, PxU32 bufferSize, PxU32 startIndex) const
{
PxMutex::ScopedLock lock(mTrackingMutex);
return getArrayOfPointers(userBuffer, bufferSize, startIndex, mConvexMeshes.getEntries(), mConvexMeshes.size());
}
///////////////////////////////////////////////////////////////////////////////
void MeshFactory::addHeightField(HeightField* np, bool lock)
{
addToHash(mHeightFields, np, lock ? &mTrackingMutex : NULL);
OMNI_PVD_NOTIFY_ADD(np);
}
PxHeightField* MeshFactory::createHeightField(void* heightFieldMeshData)
{
HeightField* np;
PX_NEW_SERIALIZED(np, HeightField)(this, *reinterpret_cast<HeightFieldData*>(heightFieldMeshData));
if(np)
addHeightField(np);
return np;
}
PxHeightField* MeshFactory::createHeightField(PxInputStream& stream)
{
HeightField* np;
PX_NEW_SERIALIZED(np, HeightField)(this);
if(!np)
return NULL;
if(!np->load(stream))
{
Cm::deletePxBase(np);
return NULL;
}
addHeightField(np);
return np;
}
bool MeshFactory::removeHeightField(PxHeightField& hf)
{
HeightField* gu = static_cast<HeightField*>(&hf);
OMNI_PVD_NOTIFY_REMOVE(gu);
PxMutex::ScopedLock lock(mTrackingMutex);
bool found = mHeightFields.erase(gu);
return found;
}
PxU32 MeshFactory::getNbHeightFields() const
{
PxMutex::ScopedLock lock(mTrackingMutex);
return mHeightFields.size();
}
PxU32 MeshFactory::getHeightFields(PxHeightField** userBuffer, PxU32 bufferSize, PxU32 startIndex) const
{
PxMutex::ScopedLock lock(mTrackingMutex);
return getArrayOfPointers(userBuffer, bufferSize, startIndex, mHeightFields.getEntries(), mHeightFields.size());
}
///////////////////////////////////////////////////////////////////////////////
void MeshFactory::addFactoryListener(Gu::MeshFactoryListener& listener )
{
PxMutex::ScopedLock lock(mTrackingMutex);
mFactoryListeners.pushBack( &listener );
}
void MeshFactory::removeFactoryListener(Gu::MeshFactoryListener& listener )
{
PxMutex::ScopedLock lock(mTrackingMutex);
for ( PxU32 idx = 0; idx < mFactoryListeners.size(); ++idx )
{
if ( mFactoryListeners[idx] == &listener )
{
mFactoryListeners.replaceWithLast( idx );
--idx;
}
}
}
void MeshFactory::notifyFactoryListener(const PxBase* base, PxType typeID)
{
const PxU32 nbListeners = mFactoryListeners.size();
for(PxU32 i=0; i<nbListeners; i++)
mFactoryListeners[i]->onMeshFactoryBufferRelease(base, typeID);
}
#if PX_SUPPORT_OMNI_PVD
void MeshFactory::notifyListenersAdd(const PxBase* base)
{
for (PxU32 i = 0; i < mFactoryListeners.size(); i++)
mFactoryListeners[i]->onObjectAdd(base);
}
void MeshFactory::notifyListenersRemove(const PxBase* base)
{
for (PxU32 i = 0; i < mFactoryListeners.size(); i++)
mFactoryListeners[i]->onObjectRemove(base);
}
#endif
///////////////////////////////////////////////////////////////////////////////
void MeshFactory::addBVH(BVH* np, bool lock)
{
addToHash(mBVHs, np, lock ? &mTrackingMutex : NULL);
OMNI_PVD_NOTIFY_ADD(np);
}
// data injected by cooking lib for runtime cooking
PxBVH* MeshFactory::createBVH(void* data)
{
return createBVH(*reinterpret_cast<BVHData*>(data));
}
PxBVH* MeshFactory::createBVH(BVHData& data)
{
BVH* np;
PX_NEW_SERIALIZED(np, BVH)(this, data);
if (np)
addBVH(np);
return np;
}
PxBVH* MeshFactory::createBVH(PxInputStream& desc)
{
BVH* np;
PX_NEW_SERIALIZED(np, BVH)(this);
if(!np)
return NULL;
if(!np->load(desc))
{
Cm::deletePxBase(np);
return NULL;
}
addBVH(np);
return np;
}
bool MeshFactory::removeBVH(PxBVH& m)
{
BVH* gu = static_cast<BVH*>(&m);
OMNI_PVD_NOTIFY_REMOVE(gu);
PxMutex::ScopedLock lock(mTrackingMutex);
bool found = mBVHs.erase(gu);
return found;
}
PxU32 MeshFactory::getNbBVHs() const
{
PxMutex::ScopedLock lock(mTrackingMutex);
return mBVHs.size();
}
PxU32 MeshFactory::getBVHs(PxBVH** userBuffer, PxU32 bufferSize, PxU32 startIndex) const
{
PxMutex::ScopedLock lock(mTrackingMutex);
return getArrayOfPointers(userBuffer, bufferSize, startIndex, mBVHs.getEntries(), mBVHs.size());
}
///////////////////////////////////////////////////////////////////////////////
bool MeshFactory::remove(PxBase& obj)
{
const PxType type = obj.getConcreteType();
if(type==PxConcreteType::eHEIGHTFIELD)
return removeHeightField(static_cast<PxHeightField&>(obj));
else if(type==PxConcreteType::eCONVEX_MESH)
return removeConvexMesh(static_cast<PxConvexMesh&>(obj));
else if(type==PxConcreteType::eTRIANGLE_MESH_BVH33 || type==PxConcreteType::eTRIANGLE_MESH_BVH34)
return removeTriangleMesh(static_cast<PxTriangleMesh&>(obj));
else if(type==PxConcreteType::eTETRAHEDRON_MESH)
return removeTetrahedronMesh(static_cast<PxTetrahedronMesh&>(obj));
else if (type == PxConcreteType::eSOFTBODY_MESH)
return removeSoftBodyMesh(static_cast<PxSoftBodyMesh&>(obj));
else if(type==PxConcreteType::eBVH)
return removeBVH(static_cast<PxBVH&>(obj));
return false;
}
///////////////////////////////////////////////////////////////////////////////
namespace
{
class StandaloneInsertionCallback : public PxInsertionCallback
{
public:
StandaloneInsertionCallback() {}
virtual PxBase* buildObjectFromData(PxConcreteType::Enum type, void* data)
{
if(type == PxConcreteType::eTRIANGLE_MESH_BVH33)
{
TriangleMesh* np;
PX_NEW_SERIALIZED(np, RTreeTriangleMesh)(NULL, *reinterpret_cast<TriangleMeshData*>(data));
return np;
}
if(type == PxConcreteType::eTRIANGLE_MESH_BVH34)
{
TriangleMesh* np;
PX_NEW_SERIALIZED(np, BV4TriangleMesh)(NULL, *reinterpret_cast<TriangleMeshData*>(data));
return np;
}
if(type == PxConcreteType::eCONVEX_MESH)
{
ConvexMesh* np;
PX_NEW_SERIALIZED(np, ConvexMesh)(NULL, *reinterpret_cast<ConvexHullInitData*>(data));
return np;
}
if(type == PxConcreteType::eHEIGHTFIELD)
{
HeightField* np;
PX_NEW_SERIALIZED(np, HeightField)(NULL, *reinterpret_cast<HeightFieldData*>(data));
return np;
}
if(type == PxConcreteType::eBVH)
{
BVH* np;
PX_NEW_SERIALIZED(np, BVH)(NULL, *reinterpret_cast<BVHData*>(data));
return np;
}
if (type == PxConcreteType::eTETRAHEDRON_MESH)
{
TetrahedronMesh* np;
PX_NEW_SERIALIZED(np, TetrahedronMesh)(NULL, *reinterpret_cast<TetrahedronMeshData*>(data));
return np;
}
if (type == PxConcreteType::eSOFTBODY_MESH)
{
SoftBodyMesh* np;
PX_NEW_SERIALIZED(np, SoftBodyMesh)(NULL, *reinterpret_cast<SoftBodyMeshData*>(data));
return np;
}
outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "Inserting object failed: "
"Object type not supported for buildObjectFromData.");
return NULL;
}
}gSAIC;
}
PxInsertionCallback* physx::immediateCooking::getInsertionCallback()
{
return &gSAIC;
}
| 39,213 | C++ | 30.752227 | 155 | 0.731339 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuBVH.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_BVH_H
#define GU_BVH_H
/** \addtogroup geomutils
@{
*/
#include "geometry/PxBVH.h"
#include "CmRefCountable.h"
#include "foundation/PxVecMath.h"
#include "foundation/PxUserAllocated.h"
#include "GuAABBTreeBounds.h"
#include "GuAABBTree.h"
namespace physx
{
struct PxBVHInternalData;
namespace Gu
{
class MeshFactory;
struct BVHNode;
class ShapeData;
class BVHData : public BVHPartialRefitData
{
public:
BVHData() {}
BVHData(BVHData& other)
{
mNbIndices = other.mNbIndices;
mNbNodes = other.mNbNodes;
mIndices = other.mIndices;
mNodes = other.mNodes;
mBounds.moveFrom(other.mBounds);
other.mIndices = NULL;
other.mNodes = NULL;
}
~BVHData()
{
if(mBounds.ownsMemory())
{
mBounds.release();
PX_FREE(mIndices);
PX_FREE(mNodes); // PT: TODO: fix this, unify with AABBTree version
}
mNbNodes = 0;
mNbIndices = 0;
}
PX_PHYSX_COMMON_API bool build(PxU32 nbBounds, const void* boundsData, PxU32 boundsStride, float enlargement, PxU32 numPrimsPerLeaf, BVHBuildStrategy bs);
PX_PHYSX_COMMON_API bool save(PxOutputStream& stream, bool endian) const;
AABBTreeBounds mBounds;
};
/**
\brief Represents a BVH.
*/
class BVH : public PxBVH, public PxUserAllocated, public Cm::RefCountable
{
public:
// PT: TODO: revisit these PX_PHYSX_COMMON_API calls. At the end of the day the issue is that things like PxUserAllocated aren't exported.
PX_PHYSX_COMMON_API BVH(MeshFactory* factory);
PX_PHYSX_COMMON_API BVH(MeshFactory* factory, BVHData& data);
PX_PHYSX_COMMON_API BVH(const PxBVHInternalData& data);
virtual ~BVH();
PX_PHYSX_COMMON_API bool init(PxU32 nbPrims, AABBTreeBounds* bounds, const void* boundsData, PxU32 stride, BVHBuildStrategy bs, PxU32 nbPrimsPerLeaf, float enlargement);
bool load(PxInputStream& desc);
void release();
// PxBVH
virtual bool raycast(const PxVec3& origin, const PxVec3& unitDir, float distance, RaycastCallback& cb, PxGeometryQueryFlags flags) const PX_OVERRIDE;
virtual bool overlap(const PxGeometry& geom, const PxTransform& pose, OverlapCallback& cb, PxGeometryQueryFlags flags) const PX_OVERRIDE;
virtual bool sweep(const PxGeometry& geom, const PxTransform& pose, const PxVec3& unitDir, float distance, RaycastCallback& cb, PxGeometryQueryFlags flags) const PX_OVERRIDE;
virtual bool cull(PxU32 nbPlanes, const PxPlane* planes, OverlapCallback& cb, PxGeometryQueryFlags flags) const PX_OVERRIDE;
virtual PxU32 getNbBounds() const PX_OVERRIDE { return mData.mNbIndices; }
virtual const PxBounds3* getBounds() const PX_OVERRIDE { return mData.mBounds.getBounds(); }
virtual void refit() PX_OVERRIDE;
virtual bool updateBounds(PxU32 boundsIndex, const PxBounds3& newBounds) PX_OVERRIDE;
virtual void partialRefit() PX_OVERRIDE;
virtual bool traverse(TraversalCallback& cb) const PX_OVERRIDE;
//~PxBVH
// Cm::RefCountable
virtual void onRefCountZero() PX_OVERRIDE;
//~Cm::RefCountable
PX_FORCE_INLINE const BVHNode* getNodes() const { return mData.mNodes; }
PX_FORCE_INLINE const PxU32* getIndices() const { return mData.mIndices; }
PX_FORCE_INLINE const BVHData& getData() const { return mData; }
bool getInternalData(PxBVHInternalData&, bool) const;
bool updateBoundsInternal(PxU32 localIndex, const PxBounds3& bounds);
// PT: alternative implementations directly working on shape data
bool overlap(const ShapeData& shapeData, OverlapCallback& cb, PxGeometryQueryFlags flags) const;
bool sweep(const ShapeData& shapeData, const PxVec3& unitDir, float distance, RaycastCallback& cb, PxGeometryQueryFlags flags) const;
private:
MeshFactory* mMeshFactory;
BVHData mData;
};
}
}
/** @} */
#endif
| 5,708 | C | 38.645833 | 182 | 0.710757 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuWindingNumber.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_WINDING_NUMBER_H
#define GU_WINDING_NUMBER_H
/** \addtogroup geomutils
@{
*/
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxHashMap.h"
#include "foundation/PxArray.h"
#include "GuWindingNumberCluster.h"
namespace physx
{
namespace Gu
{
struct BVHNode;
typedef ClusterApproximationT<PxReal, PxVec3> ClusterApproximation;
PX_PHYSX_COMMON_API PxF32 computeWindingNumber(const Gu::BVHNode* tree, const PxVec3& q, const PxHashMap<PxU32, ClusterApproximation>& clusters,
const PxU32* triangles, const PxVec3* points);
PX_PHYSX_COMMON_API PxF32 computeWindingNumber(const Gu::BVHNode* tree, const PxVec3& q, PxF32 beta, const PxHashMap<PxU32, ClusterApproximation>& clusters,
const PxU32* triangles, const PxVec3* points);
PX_PHYSX_COMMON_API void precomputeClusterInformation(const Gu::BVHNode* tree, const PxU32* triangles, const PxU32 numTriangles,
const PxVec3* points, PxHashMap<PxU32, ClusterApproximation>& result, PxI32 rootNodeIndex = 0);
//Quite slow, only useful for few query points, otherwise it is worth to construct a tree for acceleration
PX_PHYSX_COMMON_API PxF32 computeWindingNumber(const PxVec3& q, const PxU32* triangles, const PxU32 numTriangles, const PxVec3* points);
}
}
/** @} */
#endif
| 2,951 | C | 44.415384 | 157 | 0.771264 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuGeometryQuery.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geometry/PxGeometryQuery.h"
#include "geometry/PxSphereGeometry.h"
#include "geometry/PxBoxGeometry.h"
#include "geometry/PxPlaneGeometry.h"
#include "geometry/PxCapsuleGeometry.h"
#include "geometry/PxTriangleMeshGeometry.h"
#include "geometry/PxConvexMeshGeometry.h"
#include "geometry/PxHeightFieldGeometry.h"
#include "geometry/PxHairSystemGeometry.h"
#include "geometry/PxParticleSystemGeometry.h"
#include "geometry/PxCustomGeometry.h"
#include "foundation/PxAtomic.h"
#include "GuInternal.h"
#include "GuOverlapTests.h"
#include "GuSweepTests.h"
#include "GuRaycastTests.h"
#include "GuBoxConversion.h"
#include "GuTriangleMesh.h"
#include "GuMTD.h"
#include "GuBounds.h"
#include "GuDistancePointSegment.h"
#include "GuConvexMesh.h"
#include "GuDistancePointBox.h"
#include "GuMidphaseInterface.h"
#include "foundation/PxFPU.h"
#include "GuConvexEdgeFlags.h"
#include "GuVecBox.h"
#include "GuVecConvexHull.h"
#include "GuPCMShapeConvex.h"
#include "GuPCMContactConvexCommon.h"
using namespace physx;
using namespace Gu;
extern GeomSweepFuncs gGeomSweepFuncs;
extern GeomOverlapTable gGeomOverlapMethodTable[];
extern RaycastFunc gRaycastMap[PxGeometryType::eGEOMETRY_COUNT];
///////////////////////////////////////////////////////////////////////////////
bool PxGeometryQuery::isValid(const PxGeometry& g)
{
switch(PxU32(g.getType()))
{
case PxGeometryType::eSPHERE: return static_cast<const PxSphereGeometry&>(g).isValid();
case PxGeometryType::ePLANE: return static_cast<const PxPlaneGeometry&>(g).isValid();
case PxGeometryType::eCAPSULE: return static_cast<const PxCapsuleGeometry&>(g).isValid();
case PxGeometryType::eBOX: return static_cast<const PxBoxGeometry&>(g).isValid();
case PxGeometryType::eCONVEXMESH: return static_cast<const PxConvexMeshGeometry&>(g).isValid();
case PxGeometryType::eTRIANGLEMESH: return static_cast<const PxTriangleMeshGeometry&>(g).isValid();
case PxGeometryType::eHEIGHTFIELD: return static_cast<const PxHeightFieldGeometry&>(g).isValid();
case PxGeometryType::eTETRAHEDRONMESH: return static_cast<const PxTetrahedronMeshGeometry&>(g).isValid();
case PxGeometryType::ePARTICLESYSTEM: return static_cast<const PxParticleSystemGeometry&>(g).isValid();
case PxGeometryType::eHAIRSYSTEM: return static_cast<const PxHairSystemGeometry&>(g).isValid();
case PxGeometryType::eCUSTOM: return static_cast<const PxCustomGeometry&>(g).isValid();
}
return false;
}
///////////////////////////////////////////////////////////////////////////////
bool PxGeometryQuery::sweep(const PxVec3& unitDir, const PxReal distance,
const PxGeometry& geom0, const PxTransform& pose0,
const PxGeometry& geom1, const PxTransform& pose1,
PxGeomSweepHit& sweepHit, PxHitFlags hitFlags,
const PxReal inflation, PxGeometryQueryFlags queryFlags, PxSweepThreadContext* threadContext)
{
PX_SIMD_GUARD_CNDT(queryFlags & PxGeometryQueryFlag::eSIMD_GUARD)
PX_CHECK_AND_RETURN_VAL(pose0.isValid(), "PxGeometryQuery::sweep(): pose0 is not valid.", false);
PX_CHECK_AND_RETURN_VAL(pose1.isValid(), "PxGeometryQuery::sweep(): pose1 is not valid.", false);
PX_CHECK_AND_RETURN_VAL(unitDir.isFinite(), "PxGeometryQuery::sweep(): unitDir is not valid.", false);
PX_CHECK_AND_RETURN_VAL(PxIsFinite(distance), "PxGeometryQuery::sweep(): distance is not valid.", false);
PX_CHECK_AND_RETURN_VAL((distance >= 0.0f && !(hitFlags & PxHitFlag::eASSUME_NO_INITIAL_OVERLAP)) || distance > 0.0f,
"PxGeometryQuery::sweep(): sweep distance must be >=0 or >0 with eASSUME_NO_INITIAL_OVERLAP.", 0);
#if PX_CHECKED
if(!PxGeometryQuery::isValid(geom0))
return PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "Provided geometry 0 is not valid");
if(!PxGeometryQuery::isValid(geom1))
return PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "Provided geometry 1 is not valid");
#endif
const GeomSweepFuncs& sf = gGeomSweepFuncs;
switch(geom0.getType())
{
case PxGeometryType::eSPHERE:
{
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom0);
const PxCapsuleGeometry capsuleGeom(sphereGeom.radius, 0.0f);
const Capsule worldCapsule(pose0.p, pose0.p, sphereGeom.radius);
const bool precise = hitFlags & PxHitFlag::ePRECISE_SWEEP;
const SweepCapsuleFunc func = precise ? sf.preciseCapsuleMap[geom1.getType()] : sf.capsuleMap[geom1.getType()];
return func(geom1, pose1, capsuleGeom, pose0, worldCapsule, unitDir, distance, sweepHit, hitFlags, inflation, threadContext);
}
case PxGeometryType::eCAPSULE:
{
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom0);
Capsule worldCapsule;
getCapsule(worldCapsule, capsuleGeom, pose0);
const bool precise = hitFlags & PxHitFlag::ePRECISE_SWEEP;
const SweepCapsuleFunc func = precise ? sf.preciseCapsuleMap[geom1.getType()] : sf.capsuleMap[geom1.getType()];
return func(geom1, pose1, capsuleGeom, pose0, worldCapsule, unitDir, distance, sweepHit, hitFlags, inflation, threadContext);
}
case PxGeometryType::eBOX:
{
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom0);
Box box;
buildFrom(box, pose0.p, boxGeom.halfExtents, pose0.q);
const bool precise = hitFlags & PxHitFlag::ePRECISE_SWEEP;
const SweepBoxFunc func = precise ? sf.preciseBoxMap[geom1.getType()] : sf.boxMap[geom1.getType()];
return func(geom1, pose1, boxGeom, pose0, box, unitDir, distance, sweepHit, hitFlags, inflation, threadContext);
}
case PxGeometryType::eCONVEXMESH:
{
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom0);
const SweepConvexFunc func = sf.convexMap[geom1.getType()];
return func(geom1, pose1, convexGeom, pose0, unitDir, distance, sweepHit, hitFlags, inflation, threadContext);
}
default:
PX_CHECK_MSG(false, "PxGeometryQuery::sweep(): first geometry object parameter must be sphere, capsule, box or convex geometry.");
}
return false;
}
///////////////////////////////////////////////////////////////////////////////
bool PxGeometryQuery::overlap( const PxGeometry& geom0, const PxTransform& pose0,
const PxGeometry& geom1, const PxTransform& pose1,
PxGeometryQueryFlags queryFlags, PxOverlapThreadContext* threadContext)
{
PX_SIMD_GUARD_CNDT(queryFlags & PxGeometryQueryFlag::eSIMD_GUARD)
return Gu::overlap(geom0, pose0, geom1, pose1, gGeomOverlapMethodTable, threadContext);
}
///////////////////////////////////////////////////////////////////////////////
PxU32 PxGeometryQuery::raycast( const PxVec3& rayOrigin, const PxVec3& rayDir,
const PxGeometry& geom, const PxTransform& pose,
PxReal maxDist, PxHitFlags hitFlags, PxU32 maxHits, PxGeomRaycastHit* PX_RESTRICT rayHits, PxU32 stride,
PxGeometryQueryFlags queryFlags, PxRaycastThreadContext* threadContext)
{
PX_SIMD_GUARD_CNDT(queryFlags & PxGeometryQueryFlag::eSIMD_GUARD)
PX_CHECK_AND_RETURN_VAL(rayDir.isFinite(), "PxGeometryQuery::raycast(): rayDir is not valid.", 0);
PX_CHECK_AND_RETURN_VAL(rayOrigin.isFinite(), "PxGeometryQuery::raycast(): rayOrigin is not valid.", 0);
PX_CHECK_AND_RETURN_VAL(pose.isValid(), "PxGeometryQuery::raycast(): pose is not valid.", 0);
PX_CHECK_AND_RETURN_VAL(maxDist >= 0.0f, "PxGeometryQuery::raycast(): maxDist is negative.", false);
PX_CHECK_AND_RETURN_VAL(PxIsFinite(maxDist), "PxGeometryQuery::raycast(): maxDist is not valid.", false);
PX_CHECK_AND_RETURN_VAL(PxAbs(rayDir.magnitudeSquared()-1)<1e-4f, "PxGeometryQuery::raycast(): ray direction must be unit vector.", false);
const RaycastFunc func = gRaycastMap[geom.getType()];
return func(geom, pose, rayOrigin, rayDir, maxDist, hitFlags, maxHits, rayHits, stride, threadContext);
}
///////////////////////////////////////////////////////////////////////////////
bool pointConvexDistance(PxVec3& normal_, PxVec3& closestPoint_, PxReal& sqDistance, const PxVec3& pt, const ConvexMesh* convexMesh, const PxMeshScale& meshScale, const PxTransform32& convexPose);
PxReal PxGeometryQuery::pointDistance(const PxVec3& point, const PxGeometry& geom, const PxTransform& pose, PxVec3* closestPoint, PxU32* closestIndex, PxGeometryQueryFlags queryFlags)
{
PX_SIMD_GUARD_CNDT(queryFlags & PxGeometryQueryFlag::eSIMD_GUARD)
PX_CHECK_AND_RETURN_VAL(pose.isValid(), "PxGeometryQuery::pointDistance(): pose is not valid.", -1.0f);
switch(geom.getType())
{
case PxGeometryType::eSPHERE:
{
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom);
const PxReal r = sphereGeom.radius;
PxVec3 delta = point - pose.p;
const PxReal d = delta.magnitude();
if(d<=r)
return 0.0f;
if(closestPoint)
{
delta /= d;
*closestPoint = pose.p + delta * r;
}
return (d - r)*(d - r);
}
case PxGeometryType::eCAPSULE:
{
const PxCapsuleGeometry& capsGeom = static_cast<const PxCapsuleGeometry&>(geom);
Capsule capsule;
getCapsule(capsule, capsGeom, pose);
const PxReal r = capsGeom.radius;
PxReal param;
const PxReal sqDistance = distancePointSegmentSquared(capsule, point, ¶m);
if(sqDistance<=r*r)
return 0.0f;
const PxReal d = physx::intrinsics::sqrt(sqDistance);
if(closestPoint)
{
const PxVec3 cp = capsule.getPointAt(param);
PxVec3 delta = point - cp;
delta.normalize();
*closestPoint = cp + delta * r;
}
return (d - r)*(d - r);
}
case PxGeometryType::eBOX:
{
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom);
Box obb;
buildFrom(obb, pose.p, boxGeom.halfExtents, pose.q);
PxVec3 boxParam;
const PxReal sqDistance = distancePointBoxSquared(point, obb, &boxParam);
if(closestPoint && sqDistance!=0.0f)
{
*closestPoint = obb.transform(boxParam);
}
return sqDistance;
}
case PxGeometryType::eCONVEXMESH:
{
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom);
const PxTransform32 poseA(pose);
PxVec3 normal, cp;
PxReal sqDistance;
const bool intersect = pointConvexDistance(normal, cp, sqDistance, point, static_cast<ConvexMesh*>(convexGeom.convexMesh), convexGeom.scale, poseA);
if(!intersect && closestPoint)
*closestPoint = cp;
return sqDistance;
}
case PxGeometryType::eTRIANGLEMESH:
{
const PxTriangleMeshGeometry& meshGeom = static_cast<const PxTriangleMeshGeometry&>(geom);
PxU32 index;
float dist;
PxVec3 cp;
Midphase::pointMeshDistance(static_cast<TriangleMesh*>(meshGeom.triangleMesh), meshGeom, pose, point, FLT_MAX, index, dist, cp);
if(closestPoint)
*closestPoint = cp;
if(closestIndex)
*closestIndex = index;
return dist*dist;
}
default:
PX_CHECK_MSG(false, "PxGeometryQuery::pointDistance(): geometry object parameter must be sphere, capsule, box, convex or mesh geometry.");
break;
}
return -1.0f;
}
///////////////////////////////////////////////////////////////////////////////
void PxGeometryQuery::computeGeomBounds(PxBounds3& bounds, const PxGeometry& geom, const PxTransform& pose, float offset, float inflation, PxGeometryQueryFlags queryFlags)
{
PX_SIMD_GUARD_CNDT(queryFlags & PxGeometryQueryFlag::eSIMD_GUARD)
PX_CHECK_AND_RETURN(pose.isValid(), "PxGeometryQuery::computeGeomBounds(): pose is not valid.");
Gu::computeBounds(bounds, geom, pose, offset, inflation);
PX_ASSERT(bounds.isValid());
}
///////////////////////////////////////////////////////////////////////////////
extern GeomMTDFunc gGeomMTDMethodTable[][PxGeometryType::eGEOMETRY_COUNT];
bool PxGeometryQuery::computePenetration( PxVec3& mtd, PxF32& depth,
const PxGeometry& geom0, const PxTransform& pose0,
const PxGeometry& geom1, const PxTransform& pose1, PxGeometryQueryFlags queryFlags)
{
PX_SIMD_GUARD_CNDT(queryFlags & PxGeometryQueryFlag::eSIMD_GUARD)
PX_CHECK_AND_RETURN_VAL(pose0.isValid(), "PxGeometryQuery::computePenetration(): pose0 is not valid.", false);
PX_CHECK_AND_RETURN_VAL(pose1.isValid(), "PxGeometryQuery::computePenetration(): pose1 is not valid.", false);
const PxTransform32 pose0A(pose0);
const PxTransform32 pose1A(pose1);
if(geom0.getType() > geom1.getType())
{
GeomMTDFunc mtdFunc = gGeomMTDMethodTable[geom1.getType()][geom0.getType()];
PX_ASSERT(mtdFunc);
if(!mtdFunc(mtd, depth, geom1, pose1A, geom0, pose0A))
return false;
mtd = -mtd;
return true;
}
else
{
GeomMTDFunc mtdFunc = gGeomMTDMethodTable[geom0.getType()][geom1.getType()];
PX_ASSERT(mtdFunc);
return mtdFunc(mtd, depth, geom0, pose0A, geom1, pose1A);
}
}
///////////////////////////////////////////////////////////////////////////////
bool PxGeometryQuery::generateTriangleContacts(const PxGeometry& geom, const PxTransform& pose, const PxVec3 triangleVertices[3], PxU32 triangleIndex, PxReal contactDistance, PxReal meshContactMargin, PxReal toleranceLength, PxContactBuffer& contactBuffer)
{
using namespace aos;
const PxU32 triangleIndices[3]{ 0, 1, 2 };
PxInlineArray<PxU32, LOCAL_PCM_CONTACTS_SIZE> deferredContacts;
Gu::MultiplePersistentContactManifold multiManifold;
multiManifold.initialize();
PxContactBuffer contactBuffer0; contactBuffer0.reset();
const PxTransformV geomTransform = loadTransformU(pose);
const PxTransformV triangleTransform = loadTransformU(PxTransform(PxIdentity));
float radius0 = 0;
float radius1 = meshContactMargin;
PxU32 oldCount = contactBuffer.count;
switch (geom.getType())
{
case PxGeometryType::eCAPSULE:
{
const PxCapsuleGeometry& capsule = static_cast<const PxCapsuleGeometry&>(geom);
radius0 = capsule.radius;
const FloatV capsuleRadius = FLoad(capsule.radius);
const FloatV contactDist = FLoad(contactDistance + meshContactMargin);
const FloatV replaceBreakingThreshold = FMul(capsuleRadius, FLoad(0.001f));
const PxTransformV capsuleTransform = geomTransform;
const PxTransformV meshTransform = triangleTransform;
multiManifold.setRelativeTransform(capsuleTransform);
const Gu::CapsuleV capsuleV(V3LoadU(pose.p), V3LoadU(pose.q.rotate(PxVec3(capsule.halfHeight, 0, 0))), capsuleRadius);
Gu::PCMCapsuleVsMeshContactGeneration contactGeneration(capsuleV, contactDist, replaceBreakingThreshold, capsuleTransform, meshTransform, multiManifold, contactBuffer0, &deferredContacts);
contactGeneration.processTriangle(triangleVertices, triangleIndex, Gu::ETD_CONVEX_EDGE_ALL, triangleIndices);
contactGeneration.processContacts(GU_CAPSULE_MANIFOLD_CACHE_SIZE, false);
break;
}
case PxGeometryType::eBOX:
{
const PxBoxGeometry& box = static_cast<const PxBoxGeometry&>(geom);
const PxBounds3 hullAABB(-box.halfExtents, box.halfExtents);
const Vec3V boxExtents = V3LoadU(box.halfExtents);
const FloatV minMargin = Gu::CalculatePCMBoxMargin(boxExtents, toleranceLength, GU_PCM_MESH_MANIFOLD_EPSILON);
Cm::FastVertex2ShapeScaling idtScaling;
const FloatV contactDist = FLoad(contactDistance + meshContactMargin);
const FloatV replaceBreakingThreshold = FMul(minMargin, FLoad(0.05f));
const BoxV boxV(V3Zero(), boxExtents);
const PxTransformV boxTransform = geomTransform;
const PxTransformV meshTransform = triangleTransform;
PolygonalData polyData;
PCMPolygonalBox polyBox(box.halfExtents);
polyBox.getPolygonalData(&polyData);
const Mat33V identity = M33Identity();
SupportLocalImpl<BoxV> boxMap(boxV, boxTransform, identity, identity, true);
Gu::PCMConvexVsMeshContactGeneration contactGeneration(contactDist, replaceBreakingThreshold, boxTransform, meshTransform, multiManifold, contactBuffer0, polyData, &boxMap, &deferredContacts, idtScaling, true, true, NULL);
contactGeneration.processTriangle(triangleVertices, triangleIndex, Gu::ETD_CONVEX_EDGE_ALL, triangleIndices);
contactGeneration.processContacts(GU_SINGLE_MANIFOLD_CACHE_SIZE, false);
break;
}
case PxGeometryType::eCONVEXMESH:
{
const PxConvexMeshGeometry& convex = static_cast<const PxConvexMeshGeometry&>(geom);
const ConvexHullData* hullData = _getHullData(convex);
Cm::FastVertex2ShapeScaling convexScaling;
PxBounds3 hullAABB;
PolygonalData polyData;
const bool idtConvexScale = getPCMConvexData(convex, convexScaling, hullAABB, polyData);
const QuatV vQuat = QuatVLoadU(&convex.scale.rotation.x);
const Vec3V vScale = V3LoadU_SafeReadW(convex.scale.scale);
const FloatV minMargin = CalculatePCMConvexMargin(hullData, vScale, toleranceLength, GU_PCM_MESH_MANIFOLD_EPSILON);
const ConvexHullV convexHull(hullData, V3Zero(), vScale, vQuat, idtConvexScale);
const FloatV contactDist = FLoad(contactDistance + meshContactMargin);
const FloatV replaceBreakingThreshold = FMul(minMargin, FLoad(0.05f));
const PxTransformV convexTransform = geomTransform;
const PxTransformV meshTransform = triangleTransform;
SupportLocalImpl<Gu::ConvexHullV> convexMap(convexHull, convexTransform, convexHull.vertex2Shape, convexHull.shape2Vertex, false);
Gu::PCMConvexVsMeshContactGeneration contactGeneration(contactDist, replaceBreakingThreshold, convexTransform, meshTransform, multiManifold, contactBuffer0, polyData, &convexMap, &deferredContacts, convexScaling, idtConvexScale, true, NULL);
contactGeneration.processTriangle(triangleVertices, triangleIndex, Gu::ETD_CONVEX_EDGE_ALL, triangleIndices);
contactGeneration.processContacts(GU_SINGLE_MANIFOLD_CACHE_SIZE, false);
break;
}
default:
break;
}
for (PxU32 manifoldIndex = 0; manifoldIndex < multiManifold.mNumManifolds; ++manifoldIndex)
{
Gu::SinglePersistentContactManifold& manifold = *multiManifold.getManifold(manifoldIndex);
PxVec3 normal; V3StoreU(manifold.getWorldNormal(triangleTransform), normal);
for (PxU32 contactIndex = 0; contactIndex < manifold.getNumContacts(); ++contactIndex)
{
Gu::MeshPersistentContact& meshContact = manifold.getContactPoint(contactIndex);
PxContactPoint contact;
PxVec3 p0; V3StoreU(geomTransform.transform(meshContact.mLocalPointA), p0); p0 -= normal * radius0;
PxVec3 p1; V3StoreU(meshContact.mLocalPointB, p1); p1 += normal * radius1;
contact.point = (p0 + p1) * 0.5f;
contact.normal = normal;
contact.separation = normal.dot(p0 - p1);
contact.internalFaceIndex1 = triangleIndex;
contactBuffer.contact(contact);
}
}
return oldCount < contactBuffer.count;
}
///////////////////////////////////////////////////////////////////////////////
PxU32 PxCustomGeometry::getUniqueID()
{
static PxU32 uniqueID(0);
PxAtomicIncrement(reinterpret_cast<volatile PxI32*>(&uniqueID));
return uniqueID;
}
| 20,270 | C++ | 40.709876 | 256 | 0.732116 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuWindingNumberCluster.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_WINDING_NUMBER_CLUSTER_H
#define GU_WINDING_NUMBER_CLUSTER_H
/** \addtogroup geomutils
@{
*/
namespace physx
{
namespace Gu
{
template<typename R, typename V3>
struct ClusterApproximationT
{
R Radius;
R AreaSum;
V3 WeightedCentroid;
V3 WeightedNormalSum;
PX_FORCE_INLINE ClusterApproximationT() {}
PX_FORCE_INLINE ClusterApproximationT(R radius, R areaSum, const V3& weightedCentroid, const V3& weightedNormalSum) :
Radius(radius), AreaSum(areaSum), WeightedCentroid(weightedCentroid), WeightedNormalSum(weightedNormalSum)
{ }
};
}
}
/** @} */
#endif
| 2,284 | C | 37.728813 | 119 | 0.76007 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuSAH.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxAssert.h"
#include "foundation/PxAllocator.h"
#include "foundation/PxMemory.h"
#include "GuSAH.h"
using namespace physx;
using namespace Gu;
static PX_FORCE_INLINE float getSurfaceArea(const PxBounds3& bounds)
{
const PxVec3 e = bounds.maximum - bounds.minimum;
return 2.0f * (e.x * e.y + e.x * e.z + e.y * e.z);
}
SAH_Buffers::SAH_Buffers(PxU32 nb_prims)
{
mKeys = PX_ALLOCATE(float, nb_prims, "temp");
mCumulativeLower = PX_ALLOCATE(float, nb_prims, "temp");
mCumulativeUpper = PX_ALLOCATE(float, nb_prims, "temp");
mNb = nb_prims;
}
SAH_Buffers::~SAH_Buffers()
{
PX_FREE(mKeys);
PX_FREE(mCumulativeLower);
PX_FREE(mCumulativeUpper);
}
bool SAH_Buffers::split(PxU32& leftCount, PxU32 nb, const PxU32* PX_RESTRICT prims, const PxBounds3* PX_RESTRICT boxes, const PxVec3* PX_RESTRICT centers)
{
PxU32 bestAxis = 0;
PxU32 bestIndex = 0;
float bestCost = PX_MAX_F32;
PX_ASSERT(nb<=mNb);
for(PxU32 axis=0;axis<3;axis++)
{
const PxU32* sorted;
{
float* keys = mKeys;
for(PxU32 i=0;i<nb;i++)
{
const PxU32 index = prims[i];
const float center = centers[index][axis];
keys[i] = center;
}
sorted = mSorters[axis].Sort(keys, nb).GetRanks();
}
float* cumulativeLower = mCumulativeLower;
float* cumulativeUpper = mCumulativeUpper;
/* if(0)
{
PxBounds3 bbox = PxBounds3::empty();
for(PxU32 i=0; i<nb; i++)
{
bbox.include(bboxes[references[axis][i]]);
bbox.include(boxes[prims[nb-sortedIndex-1]]);
}
for (size_t i = end - 1; i > begin; --i) {
bbox.extend(bboxes[references[axis][i]]);
costs[axis][i] = bbox.half_area() * (end - i);
}
bbox = BoundingBox<Scalar>::empty();
auto best_split = std::pair<Scalar, size_t>(std::numeric_limits<Scalar>::max(), end);
for (size_t i = begin; i < end - 1; ++i) {
bbox.extend(bboxes[references[axis][i]]);
auto cost = bbox.half_area() * (i + 1 - begin) + costs[axis][i + 1];
if (cost < best_split.first)
best_split = std::make_pair(cost, i + 1);
}
return best_split;
}*/
if(1)
{
// two passes over data to calculate upper and lower bounds
PxBounds3 lower = PxBounds3::empty();
PxBounds3 upper = PxBounds3::empty();
// lower.minimum = lower.maximum = PxVec3(0.0f);
// upper.minimum = upper.maximum = PxVec3(0.0f);
#if PX_ENABLE_ASSERTS
float prevLowerCenter = -PX_MAX_F32;
float prevUpperCenter = PX_MAX_F32;
#endif
for(PxU32 i=0; i<nb; ++i)
{
const PxU32 lowSortedIndex = sorted[i];
const PxU32 highSortedIndex = sorted[nb-i-1];
//lower.Union(m_faceBounds[faces[i]]);
PX_ASSERT(centers[prims[lowSortedIndex]][axis]>=prevLowerCenter);
lower.include(boxes[prims[lowSortedIndex]]);
#if PX_ENABLE_ASSERTS
prevLowerCenter = centers[prims[lowSortedIndex]][axis];
#endif
//upper.Union(m_faceBounds[faces[numFaces - i - 1]]);
PX_ASSERT(centers[prims[highSortedIndex]][axis]<=prevUpperCenter);
upper.include(boxes[prims[highSortedIndex]]);
#if PX_ENABLE_ASSERTS
prevUpperCenter = centers[prims[highSortedIndex]][axis];
#endif
cumulativeLower[i] = getSurfaceArea(lower);
cumulativeUpper[nb - i - 1] = getSurfaceArea(upper);
}
// const float invTotalSA = 1.0f / cumulativeUpper[0];
// test all split positions
for (PxU32 i = 0; i < nb - 1; ++i)
{
const float pBelow = cumulativeLower[i];// * invTotalSA;
const float pAbove = cumulativeUpper[i];// * invTotalSA;
// const float cost = 0.125f + (pBelow * i + pAbove * float(nb - i));
const float cost = (pBelow * i + pAbove * float(nb - i));
if(cost <= bestCost)
{
bestCost = cost;
bestIndex = i;
bestAxis = axis;
}
}
}
}
leftCount = bestIndex + 1;
if(leftCount==1 || leftCount==nb)
{
// Invalid split
return false;
}
/*
// re-sort by best axis
FaceSorter predicate(&m_vertices[0], &m_indices[0], m_numFaces * 3, bestAxis);
std::sort(faces, faces + numFaces, predicate);
return bestIndex + 1;
*/
{
PxU32* tmp = reinterpret_cast<PxU32*>(mKeys);
PxMemCopy(tmp, prims, nb*sizeof(PxU32));
const PxU32* bestOrder = mSorters[bestAxis].GetRanks();
PxU32* dst = const_cast<PxU32*>(prims);
for(PxU32 i=0;i<nb;i++)
dst[i] = tmp[bestOrder[i]];
}
return true;
}
| 5,974 | C++ | 29.958549 | 154 | 0.67777 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuInternal.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxBounds3.h"
#include "geometry/PxCapsuleGeometry.h"
#include "foundation/PxIntrinsics.h"
#include "GuInternal.h"
#include "GuBox.h"
#include "GuVecPlane.h"
#include "foundation/PxVecMath.h"
using namespace physx::aos;
using namespace physx;
/**
Computes the aabb points.
\param pts [out] 8 box points
*/
void Gu::computeBoxPoints(const PxBounds3& bounds, PxVec3* PX_RESTRICT pts)
{
PX_ASSERT(pts);
// Get box corners
const PxVec3& minimum = bounds.minimum;
const PxVec3& maximum = bounds.maximum;
// 7+------+6 0 = ---
// /| /| 1 = +--
// / | / | 2 = ++-
// / 4+---/--+5 3 = -+-
// 3+------+2 / y z 4 = --+
// | / | / | / 5 = +-+
// |/ |/ |/ 6 = +++
// 0+------+1 *---x 7 = -++
// Generate 8 corners of the bbox
pts[0] = PxVec3(minimum.x, minimum.y, minimum.z);
pts[1] = PxVec3(maximum.x, minimum.y, minimum.z);
pts[2] = PxVec3(maximum.x, maximum.y, minimum.z);
pts[3] = PxVec3(minimum.x, maximum.y, minimum.z);
pts[4] = PxVec3(minimum.x, minimum.y, maximum.z);
pts[5] = PxVec3(maximum.x, minimum.y, maximum.z);
pts[6] = PxVec3(maximum.x, maximum.y, maximum.z);
pts[7] = PxVec3(minimum.x, maximum.y, maximum.z);
}
PxPlane Gu::getPlane(const PxTransform& pose)
{
const PxVec3 n = pose.q.getBasisVector0();
return PxPlane(n, -pose.p.dot(n));
}
void Gu::computeSweptBox(Gu::Box& dest, const PxVec3& extents, const PxVec3& center, const PxMat33& rot, const PxVec3& unitDir, const PxReal distance)
{
PxVec3 R1, R2;
PxComputeBasisVectors(unitDir, R1, R2);
PxReal dd[3];
dd[0] = PxAbs(rot.column0.dot(unitDir));
dd[1] = PxAbs(rot.column1.dot(unitDir));
dd[2] = PxAbs(rot.column2.dot(unitDir));
PxReal dmax = dd[0];
PxU32 ax0=1;
PxU32 ax1=2;
if(dd[1]>dmax)
{
dmax=dd[1];
ax0=0;
ax1=2;
}
if(dd[2]>dmax)
{
dmax=dd[2];
ax0=0;
ax1=1;
}
if(dd[ax1]<dd[ax0])
PxSwap(ax0, ax1);
R1 = rot[ax0];
R1 -= (R1.dot(unitDir))*unitDir; // Project to plane whose normal is dir
R1.normalize();
R2 = unitDir.cross(R1);
dest.setAxes(unitDir, R1, R2);
PxReal offset[3];
offset[0] = distance;
offset[1] = distance*(unitDir.dot(R1));
offset[2] = distance*(unitDir.dot(R2));
for(PxU32 r=0; r<3; r++)
{
const PxVec3& R = dest.rot[r];
dest.extents[r] = offset[r]*0.5f + PxAbs(rot.column0.dot(R))*extents.x + PxAbs(rot.column1.dot(R))*extents.y + PxAbs(rot.column2.dot(R))*extents.z;
}
dest.center = center + unitDir*distance*0.5f;
}
| 4,162 | C++ | 32.304 | 150 | 0.677559 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuBVHTestsSIMD.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_BVH_TESTS_SIMD_H
#define GU_BVH_TESTS_SIMD_H
#include "foundation/PxTransform.h"
#include "foundation/PxBounds3.h"
#include "geometry/PxBoxGeometry.h"
#include "geometry/PxSphereGeometry.h"
#include "geometry/PxCapsuleGeometry.h"
#include "foundation/PxVecMath.h"
namespace physx
{
using namespace aos;
namespace Gu
{
struct RayAABBTest
{
PX_FORCE_INLINE RayAABBTest(const PxVec3& origin_, const PxVec3& unitDir_, const PxReal maxDist, const PxVec3& inflation_)
: mOrigin(V3LoadU(origin_))
, mDir(V3LoadU(unitDir_))
, mDirYZX(V3PermYZX(mDir))
, mInflation(V3LoadU(inflation_))
, mAbsDir(V3Abs(mDir))
, mAbsDirYZX(V3PermYZX(mAbsDir))
{
const PxVec3 ext = maxDist >= PX_MAX_F32 ? PxVec3( unitDir_.x == 0 ? origin_.x : PxSign(unitDir_.x)*PX_MAX_F32,
unitDir_.y == 0 ? origin_.y : PxSign(unitDir_.y)*PX_MAX_F32,
unitDir_.z == 0 ? origin_.z : PxSign(unitDir_.z)*PX_MAX_F32)
: origin_ + unitDir_ * maxDist;
mRayMin = V3Min(mOrigin, V3LoadU(ext));
mRayMax = V3Max(mOrigin, V3LoadU(ext));
}
PX_FORCE_INLINE void setDistance(PxReal distance)
{
const Vec3V ext = V3ScaleAdd(mDir, FLoad(distance), mOrigin);
mRayMin = V3Min(mOrigin, ext);
mRayMax = V3Max(mOrigin, ext);
}
template<bool TInflate>
PX_FORCE_INLINE PxU32 check(const Vec3V center, const Vec3V extents) const
{
const Vec3V iExt = TInflate ? V3Add(extents, mInflation) : extents;
// coordinate axes
const Vec3V nodeMax = V3Add(center, iExt);
const Vec3V nodeMin = V3Sub(center, iExt);
// cross axes
const Vec3V offset = V3Sub(mOrigin, center);
const Vec3V offsetYZX = V3PermYZX(offset);
const Vec3V iExtYZX = V3PermYZX(iExt);
const Vec3V f = V3NegMulSub(mDirYZX, offset, V3Mul(mDir, offsetYZX));
const Vec3V g = V3MulAdd(iExt, mAbsDirYZX, V3Mul(iExtYZX, mAbsDir));
const BoolV
maskA = V3IsGrtrOrEq(nodeMax, mRayMin),
maskB = V3IsGrtrOrEq(mRayMax, nodeMin),
maskC = V3IsGrtrOrEq(g, V3Abs(f));
const BoolV andABCMasks = BAnd(BAnd(maskA, maskB), maskC);
return BAllEqTTTT(andABCMasks);
}
const Vec3V mOrigin, mDir, mDirYZX, mInflation, mAbsDir, mAbsDirYZX;
Vec3V mRayMin, mRayMax;
protected:
RayAABBTest& operator=(const RayAABBTest&);
};
// probably not worth having a SIMD version of this unless the traversal passes Vec3Vs
struct AABBAABBTest
{
PX_FORCE_INLINE AABBAABBTest(const PxTransform&t, const PxBoxGeometry&b)
: mCenter(V3LoadU(t.p))
, mExtents(V3LoadU(b.halfExtents))
{ }
PX_FORCE_INLINE AABBAABBTest(const PxBounds3& b)
: mCenter(V3LoadU(b.getCenter()))
, mExtents(V3LoadU(b.getExtents()))
{ }
PX_FORCE_INLINE PxIntBool operator()(const Vec3V center, const Vec3V extents) const
{
//PxVec3 c; PxVec3_From_Vec3V(center, c);
//PxVec3 e; PxVec3_From_Vec3V(extents, e);
//if(PxAbs(c.x - mCenter.x) > mExtents.x + e.x) return IntFalse;
//if(PxAbs(c.y - mCenter.y) > mExtents.y + e.y) return IntFalse;
//if(PxAbs(c.z - mCenter.z) > mExtents.z + e.z) return IntFalse;
//return IntTrue;
return PxIntBool(V3AllGrtrOrEq(V3Add(mExtents, extents), V3Abs(V3Sub(center, mCenter))));
}
private:
AABBAABBTest& operator=(const AABBAABBTest&);
const Vec3V mCenter, mExtents;
};
struct SphereAABBTest
{
PX_FORCE_INLINE SphereAABBTest(const PxTransform& t, const PxSphereGeometry& s)
: mCenter(V3LoadU(t.p))
, mRadius2(FLoad(s.radius * s.radius))
{}
PX_FORCE_INLINE SphereAABBTest(const PxVec3& center, PxF32 radius)
: mCenter(V3LoadU(center))
, mRadius2(FLoad(radius * radius))
{}
PX_FORCE_INLINE PxIntBool operator()(const Vec3V boxCenter, const Vec3V boxExtents) const
{
const Vec3V offset = V3Sub(mCenter, boxCenter);
const Vec3V closest = V3Clamp(offset, V3Neg(boxExtents), boxExtents);
const Vec3V d = V3Sub(offset, closest);
return PxIntBool(BAllEqTTTT(FIsGrtrOrEq(mRadius2, V3Dot(d, d))));
}
private:
SphereAABBTest& operator=(const SphereAABBTest&);
const Vec3V mCenter;
const FloatV mRadius2;
};
// The Opcode capsule-AABB traversal test seems to be *exactly* the same as the ray-box test inflated by the capsule radius (so not a true capsule/box test)
// and the code for the ray-box test is better. TODO: check the zero length case and use the sphere traversal if this one fails.
// (OTOH it's not that hard to adapt the Ray-AABB test to a capsule test)
struct CapsuleAABBTest: private RayAABBTest
{
PX_FORCE_INLINE CapsuleAABBTest(const PxVec3& origin, const PxVec3& unitDir, const PxReal length, const PxVec3& inflation)
: RayAABBTest(origin, unitDir, length, inflation)
{}
PX_FORCE_INLINE PxIntBool operator()(const Vec3VArg center, const Vec3VArg extents) const
{
return PxIntBool(RayAABBTest::check<true>(center, extents));
}
};
template<bool fullTest>
struct OBBAABBTests
{
OBBAABBTests(const PxVec3& pos, const PxMat33& rot, const PxVec3& halfExtentsInflated)
{
const Vec3V eps = V3Load(1e-6f);
mT = V3LoadU(pos);
mExtents = V3LoadU(halfExtentsInflated);
// storing the transpose matrices yields a simpler SIMD test
mRT = Mat33V_From_PxMat33(rot.getTranspose());
mART = Mat33V(V3Add(V3Abs(mRT.col0), eps), V3Add(V3Abs(mRT.col1), eps), V3Add(V3Abs(mRT.col2), eps));
mBB_xyz = M33TrnspsMulV3(mART, mExtents);
if(fullTest)
{
const Vec3V eYZX = V3PermYZX(mExtents), eZXY = V3PermZXY(mExtents);
mBB_123 = V3MulAdd(eYZX, V3PermZXY(mART.col0), V3Mul(eZXY, V3PermYZX(mART.col0)));
mBB_456 = V3MulAdd(eYZX, V3PermZXY(mART.col1), V3Mul(eZXY, V3PermYZX(mART.col1)));
mBB_789 = V3MulAdd(eYZX, V3PermZXY(mART.col2), V3Mul(eZXY, V3PermYZX(mART.col2)));
}
}
// TODO: force inline it?
PxIntBool operator()(const Vec3V center, const Vec3V extents) const
{
const Vec3V t = V3Sub(mT, center);
// class I - axes of AABB
if(V3OutOfBounds(t, V3Add(extents, mBB_xyz)))
return PxIntFalse;
const Vec3V rX = mRT.col0, rY = mRT.col1, rZ = mRT.col2;
const Vec3V arX = mART.col0, arY = mART.col1, arZ = mART.col2;
const FloatV eX = V3GetX(extents), eY = V3GetY(extents), eZ = V3GetZ(extents);
const FloatV tX = V3GetX(t), tY = V3GetY(t), tZ = V3GetZ(t);
// class II - axes of OBB
{
const Vec3V v = V3ScaleAdd(rZ, tZ, V3ScaleAdd(rY, tY, V3Scale(rX, tX)));
const Vec3V v2 = V3ScaleAdd(arZ, eZ, V3ScaleAdd(arY, eY, V3ScaleAdd(arX, eX, mExtents)));
if(V3OutOfBounds(v, v2))
return PxIntFalse;
}
if(!fullTest)
return PxIntTrue;
// class III - edge cross products. Almost all OBB tests early-out with type I or type II,
// so early-outs here probably aren't useful (TODO: profile)
const Vec3V va = V3NegScaleSub(rZ, tY, V3Scale(rY, tZ));
const Vec3V va2 = V3ScaleAdd(arY, eZ, V3ScaleAdd(arZ, eY, mBB_123));
const BoolV ba = BOr(V3IsGrtr(va, va2), V3IsGrtr(V3Neg(va2), va));
const Vec3V vb = V3NegScaleSub(rX, tZ, V3Scale(rZ, tX));
const Vec3V vb2 = V3ScaleAdd(arX, eZ, V3ScaleAdd(arZ, eX, mBB_456));
const BoolV bb = BOr(V3IsGrtr(vb, vb2), V3IsGrtr(V3Neg(vb2), vb));
const Vec3V vc = V3NegScaleSub(rY, tX, V3Scale(rX, tY));
const Vec3V vc2 = V3ScaleAdd(arX, eY, V3ScaleAdd(arY, eX, mBB_789));
const BoolV bc = BOr(V3IsGrtr(vc, vc2), V3IsGrtr(V3Neg(vc2), vc));
return PxIntBool(BAllEqFFFF(BOr(ba, BOr(bb,bc))));
}
Vec3V mExtents; // extents of OBB
Vec3V mT; // translation of OBB
Mat33V mRT; // transpose of rotation matrix of OBB
Mat33V mART; // transpose of mRT, padded by epsilon
Vec3V mBB_xyz; // extents of OBB along coordinate axes
Vec3V mBB_123; // projections of extents onto edge-cross axes
Vec3V mBB_456;
Vec3V mBB_789;
};
typedef OBBAABBTests<true> OBBAABBTest;
}
}
#endif
| 9,259 | C | 34.891473 | 156 | 0.718652 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuAABBTreeBounds.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_AABBTREE_BOUNDS_H
#define GU_AABBTREE_BOUNDS_H
#include "common/PxPhysXCommonConfig.h"
namespace physx
{
namespace Gu
{
class PX_PHYSX_COMMON_API AABBTreeBounds
{
public:
AABBTreeBounds() : mBounds(NULL), mUserAllocated(false) {}
~AABBTreeBounds() { release(); }
void init(PxU32 nbBounds, const PxBounds3* bounds=NULL);
void resize(PxU32 newSize, PxU32 previousSize);
void release();
PX_FORCE_INLINE PxBounds3* getBounds() { return mBounds; }
PX_FORCE_INLINE const PxBounds3* getBounds() const { return mBounds; }
PX_FORCE_INLINE void moveFrom(AABBTreeBounds& source)
{
mBounds = source.mBounds;
source.mBounds = NULL;
}
PX_FORCE_INLINE void takeOwnership() { mUserAllocated = true; }
PX_FORCE_INLINE bool ownsMemory() const { return mUserAllocated==false; }
PX_FORCE_INLINE void setBounds(PxBounds3* bounds) { mBounds = bounds; mUserAllocated=true; }
private:
PxBounds3* mBounds;
PxU32 mUserAllocated;
};
} // namespace Gu
}
#endif // GU_AABBTREE_BOUNDS_H
| 2,842 | C | 39.614285 | 97 | 0.717101 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuBucketPruner.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxMemory.h"
#include "foundation/PxBitUtils.h"
#include "GuBucketPruner.h"
#include "GuInternal.h"
#include "CmVisualization.h"
#include "CmRadixSort.h"
using namespace physx::aos;
using namespace physx;
using namespace Gu;
#define INVALID_HANDLE 0xffffffff
/*
TODO:
- if Core is always available, mSortedObjects could be replaced with just indices to mCoreObjects => less memory.
- UTS:
- test that queries against empty boxes all return false
- invalidate after 16 removes
- check shiftOrigin stuff (esp what happens to emptied boxes)
- isn't there a very hard-to-find bug waiting to happen in there,
when the shift touches the empty box and overrides mdata0/mdata1 with "wrong" values that break the sort?
- revisit updateObject/removeObject
- optimize/cache computation of free global bounds before clipRay
- remove temp memory buffers (sorted arrays)
- take care of code duplication
- better code to generate SIMD 0x7fffffff
- refactor SIMD tests
- optimize:
- better split values
- optimize update (bitmap, less data copy, etc)
- use ray limits in traversal code too?
- the SIMD XBOX code operates on Min/Max rather than C/E. Change format?
- or just try the alternative ray-box code (as on PC) ==> pretty much exactly the same speed
*/
//#define VERIFY_SORT
//#define BRUTE_FORCE_LIMIT 32
#define LOCAL_SIZE 256 // Size of various local arrays. Dynamic allocations occur if exceeded.
#define USE_SIMD // Use SIMD code or not (sanity performance check)
#define NODE_SORT // Enable/disable node sorting
#define NODE_SORT_MIN_COUNT 16 // Limit above which node sorting is performed
#if PX_INTEL_FAMILY
#if COMPILE_VECTOR_INTRINSICS
#define CAN_USE_MOVEMASK
#endif
#endif
#define ALIGN16(size) ((unsigned(size)+15) & unsigned(~15))
#ifdef _DEBUG
#define AlignedLoad V4LoadU
#define AlignedStore V4StoreU
#else
#define AlignedLoad V4LoadA
#define AlignedStore V4StoreA
#endif
// SAT-based ray-box overlap test has accuracy issues for long rays, so we clip them against the global AABB to limit these issues.
static void clipRay(const PxVec3& rayOrig, const PxVec3& rayDir, float& maxDist, const PxVec3& boxMin, const PxVec3& boxMax)
{
const PxVec3 boxCenter = (boxMax + boxMin)*0.5f;
const PxVec3 boxExtents = (boxMax - boxMin)*0.5f;
const float dpc = boxCenter.dot(rayDir);
const float extentsMagnitude = boxExtents.magnitude();
const float dpMin = dpc - extentsMagnitude;
const float dpMax = dpc + extentsMagnitude;
const float dpO = rayOrig.dot(rayDir);
const float boxLength = extentsMagnitude * 2.0f;
const float distToBox = PxMin(PxAbs(dpMin - dpO), PxAbs(dpMax - dpO));
maxDist = distToBox + boxLength * 2.0f;
}
BucketPrunerNode::BucketPrunerNode()
{
for(PxU32 i=0;i<5;i++)
mBucketBox[i].setEmpty();
}
static const PxU8 gCodes[] = { 4, 4, 4, 4, 4, 3, 2, 2,
4, 1, 0, 0, 4, 1, 0, 0,
4, 1, 0, 0, 2, 1, 0, 0,
3, 1, 0, 0, 2, 1, 0, 0};
#ifdef CAN_USE_MOVEMASK
/*static PX_FORCE_INLINE PxU32 classifyBox_x86(const BucketBox& box, const PxVec4& limits, const bool useY, const bool isCrossBucket)
{
const Vec4V extents = AlignedLoad(&box.mExtents.x);
const Vec4V center = AlignedLoad(&box.mCenter.x);
const Vec4V plus = V4Add(extents, center);
const Vec4V minus = V4Sub(extents, center);
Vec4V tmp;
if(useY) // PT: this is a constant so branch prediction works here
tmp = _mm_shuffle_ps(plus, minus, _MM_SHUFFLE(0,1,0,1));
else
tmp = _mm_shuffle_ps(plus, minus, _MM_SHUFFLE(0,2,0,2));
const Vec4V comp = _mm_shuffle_ps(tmp, tmp, _MM_SHUFFLE(0,2,1,3)); // oh well, nm
const PxU32 Code = (PxU32)_mm_movemask_ps(V4IsGrtr(V4LoadA(&limits.x), comp));
return gCodes[Code | PxU32(isCrossBucket)<<4];
}*/
static PX_FORCE_INLINE PxU32 classifyBox_x86(const Vec4V boxMin, const Vec4V boxMax, const PxVec4& limits, const bool useY, const bool isCrossBucket)
{
const Vec4V plus = boxMax;
const Vec4V minus = V4Neg(boxMin);
Vec4V tmp;
if(useY) // PT: this is a constant so branch prediction works here
tmp = _mm_shuffle_ps(plus, minus, _MM_SHUFFLE(0,1,0,1));
else
tmp = _mm_shuffle_ps(plus, minus, _MM_SHUFFLE(0,2,0,2));
const Vec4V comp = _mm_shuffle_ps(tmp, tmp, _MM_SHUFFLE(0,2,1,3)); // oh well, nm
const PxU32 Code = PxU32(_mm_movemask_ps(V4IsGrtr(V4LoadA(&limits.x), comp)));
return gCodes[Code | PxU32(isCrossBucket)<<4];
}
#endif
#ifdef CAN_USE_MOVEMASK
#if PX_DEBUG
#define USE_CLASSIFY_BOX
#endif
#else
#define USE_CLASSIFY_BOX
#endif
#ifdef USE_CLASSIFY_BOX
static PX_FORCE_INLINE PxU32 classifyBox(const BucketBox& box, const float limitX, const float limitYZ, const PxU32 yz, const bool isCrossBucket)
{
const bool upperPart = (box.mCenter[yz] + box.mExtents[yz])<limitYZ;
const bool lowerPart = (box.mCenter[yz] - box.mExtents[yz])>limitYZ;
const bool leftPart = (box.mCenter.x + box.mExtents.x)<limitX;
const bool rightPart = (box.mCenter.x - box.mExtents.x)>limitX;
// Table-based box classification avoids many branches
const PxU32 Code = PxU32(rightPart)|(PxU32(leftPart)<<1)|(PxU32(lowerPart)<<2)|(PxU32(upperPart)<<3);
return gCodes[Code + (isCrossBucket ? 16 : 0)];
}
#endif
void BucketPrunerNode::classifyBoxes( float limitX, float limitYZ,
PxU32 nb, BucketBox* PX_RESTRICT boxes, const PrunerPayload* PX_RESTRICT objects,
const PxTransform* PX_RESTRICT transforms,
BucketBox* PX_RESTRICT sortedBoxes, PrunerPayload* PX_RESTRICT sortedObjects,
PxTransform* PX_RESTRICT sortedTransforms,
bool isCrossBucket, PxU32 sortAxis)
{
const PxU32 yz = PxU32(sortAxis == 1 ? 2 : 1);
#ifdef _DEBUG
{
float prev = boxes[0].mDebugMin;
for(PxU32 i=1;i<nb;i++)
{
const float current = boxes[i].mDebugMin;
PX_ASSERT(current>=prev);
prev = current;
}
}
#endif
// Local (stack-based) min/max bucket bounds
PX_ALIGN(16, PxVec4) bucketBoxMin[5];
PX_ALIGN(16, PxVec4) bucketBoxMax[5];
{
const PxBounds3 empty = PxBounds3::empty();
for(PxU32 i=0;i<5;i++)
{
mCounters[i] = 0;
bucketBoxMin[i] = PxVec4(empty.minimum, 0.0f);
bucketBoxMax[i] = PxVec4(empty.maximum, 0.0f);
}
}
{
#ifdef CAN_USE_MOVEMASK
// DS: order doesn't play nice with x86 shuffles :-|
PX_ALIGN(16, PxVec4) limits(-limitX, limitX, -limitYZ, limitYZ);
const bool useY = yz==1;
#endif
// Determine in which bucket each object falls, update bucket bounds
for(PxU32 i=0;i<nb;i++)
{
const Vec4V boxCenterV = AlignedLoad(&boxes[i].mCenter.x);
const Vec4V boxExtentsV = AlignedLoad(&boxes[i].mExtents.x);
const Vec4V boxMinV = V4Sub(boxCenterV, boxExtentsV);
const Vec4V boxMaxV = V4Add(boxCenterV, boxExtentsV);
#ifdef CAN_USE_MOVEMASK
// const PxU32 index = classifyBox_x86(boxes[i], limits, useY, isCrossBucket);
const PxU32 index = classifyBox_x86(boxMinV, boxMaxV, limits, useY, isCrossBucket);
#if PX_DEBUG
const PxU32 index_ = classifyBox(boxes[i], limitX, limitYZ, yz, isCrossBucket);
PX_ASSERT(index == index_);
#endif
#else
const PxU32 index = classifyBox(boxes[i], limitX, limitYZ, yz, isCrossBucket);
#endif
// Merge boxes
{
const Vec4V mergedMinV = V4Min(V4LoadA(&bucketBoxMin[index].x), boxMinV);
const Vec4V mergedMaxV = V4Max(V4LoadA(&bucketBoxMax[index].x), boxMaxV);
V4StoreA(mergedMinV, &bucketBoxMin[index].x);
V4StoreA(mergedMaxV, &bucketBoxMax[index].x);
}
boxes[i].mData0 = index; // Store bucket index for current box in this temporary location
mCounters[index]++;
}
}
{
// Regenerate offsets
mOffsets[0]=0;
for(PxU32 i=0;i<4;i++)
mOffsets[i+1] = mOffsets[i] + mCounters[i];
}
{
// Group boxes with same bucket index together
for(PxU32 i=0;i<nb;i++)
{
const PxU32 bucketOffset = mOffsets[boxes[i].mData0]++; // Bucket index for current box was stored in mData0 by previous loop
// The 2 following lines are the same as:
// sortedBoxes[bucketOffset] = boxes[i];
AlignedStore(AlignedLoad(&boxes[i].mCenter.x), &sortedBoxes[bucketOffset].mCenter.x);
AlignedStore(AlignedLoad(&boxes[i].mExtents.x), &sortedBoxes[bucketOffset].mExtents.x);
#ifdef _DEBUG
sortedBoxes[bucketOffset].mDebugMin = boxes[i].mDebugMin;
#endif
sortedObjects[bucketOffset] = objects[i];
sortedTransforms[bucketOffset] = transforms[i];
}
}
{
// Regenerate offsets
mOffsets[0]=0;
for(PxU32 i=0;i<4;i++)
mOffsets[i+1] = mOffsets[i] + mCounters[i];
}
{
// Convert local (stack-based) min/max bucket bounds to persistent center/extents format
const float Half = 0.5f;
const FloatV HalfV = FLoad(Half);
PX_ALIGN(16, PxVec4) bucketCenter;
PX_ALIGN(16, PxVec4) bucketExtents;
for(PxU32 i=0;i<5;i++)
{
// The following lines are the same as:
// mBucketBox[i].mCenter = bucketBox[i].getCenter();
// mBucketBox[i].mExtents = bucketBox[i].getExtents();
const Vec4V bucketBoxMinV = V4LoadA(&bucketBoxMin[i].x);
const Vec4V bucketBoxMaxV = V4LoadA(&bucketBoxMax[i].x);
const Vec4V bucketBoxCenterV = V4Scale(V4Add(bucketBoxMaxV, bucketBoxMinV), HalfV);
const Vec4V bucketBoxExtentsV = V4Scale(V4Sub(bucketBoxMaxV, bucketBoxMinV), HalfV);
V4StoreA(bucketBoxCenterV, &bucketCenter.x);
V4StoreA(bucketBoxExtentsV, &bucketExtents.x);
mBucketBox[i].mCenter = PxVec3(bucketCenter.x, bucketCenter.y, bucketCenter.z);
mBucketBox[i].mExtents = PxVec3(bucketExtents.x, bucketExtents.y, bucketExtents.z);
}
}
#ifdef _DEBUG
for(PxU32 j=0;j<5;j++)
{
const PxU32 count = mCounters[j];
if(count)
{
const BucketBox* base = sortedBoxes + mOffsets[j];
float prev = base[0].mDebugMin;
for(PxU32 i=1;i<count;i++)
{
const float current = base[i].mDebugMin;
PX_ASSERT(current>=prev);
prev = current;
}
}
}
#endif
}
///////////////////////////////////////////////////////////////////////////////
static void processChildBuckets(PxU32 nbAllocated,
BucketBox* sortedBoxesInBucket, PrunerPayload* sortedObjectsInBucket,
PxTransform* sortedTransformsInBucket,
const BucketPrunerNode& bucket, BucketPrunerNode* PX_RESTRICT childBucket,
BucketBox* PX_RESTRICT baseBucketsBoxes, PrunerPayload* PX_RESTRICT baseBucketsObjects,
PxTransform* baseBucketTransforms,
PxU32 sortAxis)
{
PX_UNUSED(nbAllocated);
const PxU32 yz = PxU32(sortAxis == 1 ? 2 : 1);
for(PxU32 i=0;i<5;i++)
{
const PxU32 nbInBucket = bucket.mCounters[i];
if(!nbInBucket)
{
childBucket[i].initCounters();
continue;
}
BucketBox* bucketsBoxes = baseBucketsBoxes + bucket.mOffsets[i];
PrunerPayload* bucketsObjects = baseBucketsObjects + bucket.mOffsets[i];
PxTransform* bucketTransforms = baseBucketTransforms + bucket.mOffsets[i];
PX_ASSERT(nbInBucket<=nbAllocated);
const float limitX = bucket.mBucketBox[i].mCenter.x;
const float limitYZ = bucket.mBucketBox[i].mCenter[yz];
const bool isCrossBucket = i==4;
childBucket[i].classifyBoxes(limitX, limitYZ, nbInBucket, bucketsBoxes, bucketsObjects,
bucketTransforms,
sortedBoxesInBucket, sortedObjectsInBucket,
sortedTransformsInBucket,
isCrossBucket, sortAxis);
PxMemCopy(bucketsBoxes, sortedBoxesInBucket, sizeof(BucketBox)*nbInBucket);
PxMemCopy(bucketsObjects, sortedObjectsInBucket, sizeof(PrunerPayload)*nbInBucket);
PxMemCopy(bucketTransforms, sortedTransformsInBucket, sizeof(PxTransform)*nbInBucket);
}
}
///////////////////////////////////////////////////////////////////////////////
static PX_FORCE_INLINE PxU32 encodeFloat(PxU32 newPos)
{
//we may need to check on -0 and 0
//But it should make no practical difference.
if(newPos & PX_SIGN_BITMASK) //negative?
return ~newPos;//reverse sequence of negative numbers
else
return newPos | PX_SIGN_BITMASK; // flip sign
}
static PX_FORCE_INLINE void computeRayLimits(float& rayMin, float& rayMax, const PxVec3& rayOrig, const PxVec3& rayDir, float maxDist, PxU32 sortAxis)
{
const float rayOrigValue = rayOrig[sortAxis];
const float rayDirValue = rayDir[sortAxis] * maxDist;
rayMin = PxMin(rayOrigValue, rayOrigValue + rayDirValue);
rayMax = PxMax(rayOrigValue, rayOrigValue + rayDirValue);
}
static PX_FORCE_INLINE void computeRayLimits(float& rayMin, float& rayMax, const PxVec3& rayOrig, const PxVec3& rayDir, float maxDist, const PxVec3& inflate, PxU32 sortAxis)
{
const float inflateValue = inflate[sortAxis];
const float rayOrigValue = rayOrig[sortAxis];
const float rayDirValue = rayDir[sortAxis] * maxDist;
rayMin = PxMin(rayOrigValue, rayOrigValue + rayDirValue) - inflateValue;
rayMax = PxMax(rayOrigValue, rayOrigValue + rayDirValue) + inflateValue;
}
static PX_FORCE_INLINE void encodeBoxMinMax(BucketBox& box, const PxU32 axis)
{
const float min = box.mCenter[axis] - box.mExtents[axis];
const float max = box.mCenter[axis] + box.mExtents[axis];
const PxU32* binaryMin = reinterpret_cast<const PxU32*>(&min);
const PxU32* binaryMax = reinterpret_cast<const PxU32*>(&max);
box.mData0 = encodeFloat(binaryMin[0]);
box.mData1 = encodeFloat(binaryMax[0]);
}
///////////////////////////////////////////////////////////////////////////////
BucketPrunerCore::BucketPrunerCore(bool externalMemory) :
mCoreNbObjects (0),
mCoreCapacity (0),
mCoreBoxes (NULL),
mCoreObjects (NULL),
mCoreTransforms (NULL),
mCoreRemap (NULL),
mSortedWorldBoxes (NULL),
mSortedObjects (NULL),
mSortedTransforms (NULL),
#ifdef FREE_PRUNER_SIZE
mNbFree (0),
#endif
mSortedNb (0),
mSortedCapacity (0),
mSortAxis (0),
mDirty (false),
mOwnMemory (!externalMemory)
{
mGlobalBox.setEmpty();
mLevel1.initCounters();
for(PxU32 i=0;i<5;i++)
mLevel2[i].initCounters();
for(PxU32 j=0;j<5;j++)
for(PxU32 i=0;i<5;i++)
mLevel3[j][i].initCounters();
}
BucketPrunerCore::~BucketPrunerCore()
{
release();
}
void BucketPrunerCore::release()
{
mDirty = true;
mCoreNbObjects = 0;
mCoreCapacity = 0;
if(mOwnMemory)
{
PX_FREE(mCoreBoxes);
PX_FREE(mCoreObjects);
PX_FREE(mCoreTransforms);
PX_FREE(mCoreRemap);
}
PX_FREE(mSortedWorldBoxes);
PX_FREE(mSortedObjects);
PX_FREE(mSortedTransforms);
mSortedNb = 0;
mSortedCapacity = 0;
#ifdef FREE_PRUNER_SIZE
mNbFree = 0;
#endif
#ifdef USE_REGULAR_HASH_MAP
mMap.clear();
#else
mMap.purge();
#endif
}
void BucketPrunerCore::setExternalMemory(PxU32 nbObjects, PxBounds3* boxes, PrunerPayload* objects, PxTransform* transforms)
{
PX_ASSERT(!mOwnMemory);
mCoreNbObjects = nbObjects;
mCoreBoxes = boxes;
mCoreObjects = objects;
mCoreTransforms = transforms;
mCoreRemap = NULL;
}
void BucketPrunerCore::allocateSortedMemory(PxU32 nb)
{
mSortedNb = nb;
if(nb<=mSortedCapacity && (nb>=mSortedCapacity/2))
return;
const PxU32 capacity = PxNextPowerOfTwo(nb);
mSortedCapacity = capacity;
PxU32 bytesNeededForBoxes = capacity*sizeof(BucketBox);
bytesNeededForBoxes = ALIGN16(bytesNeededForBoxes);
PxU32 bytesNeededForObjects = capacity*sizeof(PrunerPayload);
bytesNeededForObjects = ALIGN16(bytesNeededForObjects);
// PT: TODO: I don't remember what this alignment is for, maybe we don't need it
PxU32 bytesNeededForTransforms = capacity*sizeof(PxTransform);
bytesNeededForTransforms = ALIGN16(bytesNeededForTransforms);
PX_FREE(mSortedObjects);
PX_FREE(mSortedWorldBoxes);
PX_FREE(mSortedTransforms);
mSortedWorldBoxes = reinterpret_cast<BucketBox*>(PX_ALLOC(bytesNeededForBoxes, "BucketPruner"));
mSortedObjects = reinterpret_cast<PrunerPayload*>(PX_ALLOC(bytesNeededForObjects, "BucketPruner"));
mSortedTransforms = reinterpret_cast<PxTransform*>(PX_ALLOC(bytesNeededForTransforms, "BucketPruner"));
PX_ASSERT(!(size_t(mSortedWorldBoxes)&15));
PX_ASSERT(!(size_t(mSortedObjects)&15));
PX_ASSERT(!(size_t(mSortedTransforms)&15));
}
///////////////////////////////////////////////////////////////////////////////
void BucketPrunerCore::resizeCore()
{
const PxU32 capacity = mCoreCapacity ? mCoreCapacity*2 : 32;
mCoreCapacity = capacity;
const PxU32 bytesNeededForBoxes = capacity*sizeof(PxBounds3);
const PxU32 bytesNeededForObjects = capacity*sizeof(PrunerPayload);
const PxU32 bytesNeededForTransforms = capacity*sizeof(PxTransform);
const PxU32 bytesNeededForRemap = capacity*sizeof(PxU32);
PxBounds3* newCoreBoxes = reinterpret_cast<PxBounds3*>(PX_ALLOC(bytesNeededForBoxes, "BucketPruner"));
PrunerPayload* newCoreObjects = reinterpret_cast<PrunerPayload*>(PX_ALLOC(bytesNeededForObjects, "BucketPruner"));
PxTransform* newCoreTransforms = reinterpret_cast<PxTransform*>(PX_ALLOC(bytesNeededForTransforms, "BucketPruner"));
PxU32* newCoreRemap = reinterpret_cast<PxU32*>(PX_ALLOC(bytesNeededForRemap, "BucketPruner"));
if(mCoreBoxes)
{
PxMemCopy(newCoreBoxes, mCoreBoxes, mCoreNbObjects*sizeof(PxBounds3));
PX_FREE(mCoreBoxes);
}
if(mCoreObjects)
{
PxMemCopy(newCoreObjects, mCoreObjects, mCoreNbObjects*sizeof(PrunerPayload));
PX_FREE(mCoreObjects);
}
if(mCoreTransforms)
{
PxMemCopy(newCoreTransforms, mCoreTransforms, mCoreNbObjects*sizeof(PxTransform));
PX_FREE(mCoreTransforms);
}
if(mCoreRemap)
{
PxMemCopy(newCoreRemap, mCoreRemap, mCoreNbObjects*sizeof(PxU32));
PX_FREE(mCoreRemap);
}
mCoreBoxes = newCoreBoxes;
mCoreObjects = newCoreObjects;
mCoreTransforms = newCoreTransforms;
mCoreRemap = newCoreRemap;
}
PX_FORCE_INLINE void BucketPrunerCore::addObjectInternal(const PrunerPayload& object, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp)
{
if(mCoreNbObjects==mCoreCapacity)
resizeCore();
const PxU32 index = mCoreNbObjects++;
mCoreObjects[index] = object;
mCoreBoxes[index] = worldAABB; // PT: TODO: check assembly here
mCoreTransforms[index] = transform; // PT: TODO: check assembly here
mCoreRemap[index] = 0xffffffff;
// Objects are only inserted into the map once they're part of the main/core arrays.
#ifdef USE_REGULAR_HASH_MAP
bool ok = mMap.insert(object, BucketPrunerPair(index, timeStamp));
#else
BucketPrunerPair* ok = mMap.addPair(object, index, timeStamp);
#endif
PX_UNUSED(ok);
PX_ASSERT(ok);
}
bool BucketPrunerCore::addObject(const PrunerPayload& object, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp)
{
/*
We should probably use a bigger payload/userData struct here, which would also contains the external handle.
(EDIT: we can't even do that, because of the setExternalMemory function)
When asked to update/remove an object it would be O(n) to find the proper object in the mSortedObjects array.
-
For removing it we can simply empty the corresponding box, and the object will never be returned from queries.
Maybe this isn't even true, since boxes are sorted along one axis. So marking a box as empty could break the code relying on a sorted order.
An alternative is to mark the external handle as invalid, and ignore the object when a hit is found.
(EDIT: the sorting is now tested via data0/data1 anyway so we could mark the box as empty without breaking this)
-
For updating an object we would need to keep the (sub) array sorted (not the whole thing, only the array within a bucket).
We don't know the range (what part of the array maps to our bucket) but we may have the bucket ID somewhere? If we'd have this
we could parse the array left/right and resort just the right boxes. If we don't have this we may be able to "quickly" find the
range by traversing the tree, looking for the proper bucket. In any case I don't think there's a mapping to update within a bucket,
unlike in SAP or MBP. So we should be able to shuffle a bucket without having to update anything. For example there's no mapping
between the Core array and the Sorted array. It's a shame in a way because we'd need one, but it's not there - and in fact I think
we can free the Core array once Sorted is created, we don't need it at all.
If we don't want to re-sort the full bucket we can just mark it as dirty and ignore the sort-based early exits in the queries. Then we
can incrementally resort it over N frames or something.
This only works if the updated object remains in the same bucket though. If it moves to another bucket it becomes tempting to just remove
the object and re-insert it.
-
Now for adding an object, we can first have a "free pruner" and do the 16 next entries brute-force. Rebuilding every 16 objects might
give a good speedup already. Otherwise we need to do something more complicated.
*/
PX_ASSERT(mOwnMemory);
PX_ASSERT(!mDirty || !mNbFree);
if(!mDirty)
{
#ifdef FREE_PRUNER_SIZE
// In this path the structure is marked as valid. We do not want to invalidate it for each new object...
if(mNbFree<FREE_PRUNER_SIZE)
{
// ...so as long as there is space in the "free array", we store the newly added object there and
// return immediately. Subsequent queries will parse the free array as if it was a free pruner.
const PxU32 index = mNbFree++;
mFreeObjects[index] = object;
mFreeBounds[index] = worldAABB;
mFreeTransforms[index] = transform;
mFreeStamps[index] = timeStamp;
return true;
}
// If we reach this place, the free array is full. We must transfer the objects from the free array to
// the main (core) arrays, mark the structure as invalid, and still deal with the incoming object.
// First we transfer free objects, reset the number of free objects, and mark the structure as
// invalid/dirty (the core arrays will need rebuilding).
for(PxU32 i=0;i<mNbFree;i++)
addObjectInternal(mFreeObjects[i], mFreeBounds[i], mFreeTransforms[i], mFreeStamps[i]);
mNbFree = 0;
#endif
mDirty = true;
// mSortedNb = 0; // PT: TODO: investigate if this should be done here
// After that we still need to deal with the new incoming object (so far we only
// transferred the already existing objects from the full free array). This will
// happen automatically by letting the code continue to the regular codepath below.
}
// If we reach this place, the structure must be invalid and the incoming object
// must be added to the main arrays.
PX_ASSERT(mDirty);
addObjectInternal(object, worldAABB, transform, timeStamp);
return true;
}
bool BucketPrunerCore::removeObject(const PrunerPayload& object, PxU32& timeStamp)
{
// Even if the structure is already marked as dirty, we still need to update the
// core arrays and the map.
// The map only contains core objects, so we can use it to determine if the object
// exists in the core arrays or in the free array.
#ifdef USE_REGULAR_HASH_MAP
/* BucketPrunerPair entry;
if(mMap.findAndErase(object, entry))
{
PxU32 coreIndex = entry.mCoreIndex;
timeStamp = entry.mTimeStamp;*/
const BucketPrunerMap::Entry* removedEntry = mMap.find(object);
if(removedEntry)
{
PxU32 coreIndex = removedEntry->second.mCoreIndex;
timeStamp = removedEntry->second.mTimeStamp;
#else
PxU32 coreIndex; // This is the object's index in the core arrays.
if(mMap.removePair(object, coreIndex, timeStamp))
{
#endif
// In this codepath, the object we want to remove exists in the core arrays.
// We will need to remove it from both the core arrays & the sorted arrays.
const PxU32 sortedIndex = mCoreRemap[coreIndex]; // This is the object's index in the sorted arrays.
#ifdef USE_REGULAR_HASH_MAP
bool status = mMap.erase(object);
PX_ASSERT(status);
PX_UNUSED(status);
#endif
// First let's deal with the core arrays
mCoreNbObjects--;
if(coreIndex!=mCoreNbObjects)
{
// If it wasn't the last object in the array, close the gaps as usual
const PrunerPayload& movedObject = mCoreObjects[mCoreNbObjects];
mCoreBoxes[coreIndex] = mCoreBoxes[mCoreNbObjects];
mCoreTransforms[coreIndex] = mCoreTransforms[mCoreNbObjects];
mCoreObjects[coreIndex] = movedObject;
mCoreRemap[coreIndex] = mCoreRemap[mCoreNbObjects];
// Since we just moved the last object, its index in the core arrays has changed.
// We must reflect this change in the map.
#ifdef USE_REGULAR_HASH_MAP
BucketPrunerMap::Entry* movedEntry = const_cast<BucketPrunerMap::Entry*>(mMap.find(movedObject));
PX_ASSERT(movedEntry->second.mCoreIndex==mCoreNbObjects);
movedEntry->second.mCoreIndex = coreIndex;
#else
BucketPrunerPair* movedEntry = const_cast<BucketPrunerPair*>(mMap.findPair(movedObject));
PX_ASSERT(movedEntry->mCoreIndex==mCoreNbObjects);
movedEntry->mCoreIndex = coreIndex;
#endif
}
// Now, let's deal with the sorted arrays.
// If the structure is dirty, the sorted arrays will be rebuilt from scratch so there's no need to
// update them right now.
if(!mDirty)
{
// If the structure is valid, we want to keep it this way to avoid rebuilding sorted arrays after
// each removal. We can't "close the gaps" easily here because order of objects in the arrays matters.
// Instead we just invalidate the object by setting its bounding box as empty.
// Queries against empty boxes will never return a hit, so this effectively "removes" the object
// from any subsequent query results. Sorted arrays now contain a "disabled" object, until next build.
// Invalidating the box does not invalidate the sorting, since it's now captured in mData0/mData1.
// That is, mData0/mData1 keep their previous integer-encoded values, as if the box/object was still here.
mSortedWorldBoxes[sortedIndex].mCenter = PxVec3(0.0f);
mSortedWorldBoxes[sortedIndex].mExtents = PxVec3(-GU_EMPTY_BOUNDS_EXTENTS);
// Note that we don't touch mSortedObjects here. We could, but this is not necessary.
}
return true;
}
#ifdef FREE_PRUNER_SIZE
// Here, the object we want to remove exists in the free array. So we just parse it.
for(PxU32 i=0;i<mNbFree;i++)
{
if(mFreeObjects[i]==object)
{
// We found the object we want to remove. Close the gap as usual.
timeStamp = mFreeStamps[i];
mNbFree--;
mFreeBounds[i] = mFreeBounds[mNbFree];
mFreeTransforms[i] = mFreeTransforms[mNbFree];
mFreeObjects[i] = mFreeObjects[mNbFree];
mFreeStamps[i] = mFreeStamps[mNbFree];
return true;
}
}
#endif
// We didn't find the object. Can happen with a double remove. PX_ASSERT might be an option here.
return false;
}
bool BucketPrunerCore::updateObject(const PxBounds3& worldAABB, const PrunerPayload& object, const PxTransform& transform)
{
PxU32 timeStamp;
if(!removeObject(object, timeStamp))
return false;
return addObject(object, worldAABB, transform, timeStamp);
}
PxU32 BucketPrunerCore::removeMarkedObjects(PxU32 timeStamp)
{
PxU32 nbRemoved=0;
// PT: objects can be either in the hash-map, or in the 'free' array. First we look in the hash-map...
#ifdef USE_REGULAR_HASH_MAP
if(mMap.size())
#else
if(mMap.mNbActivePairs)
#endif
{
PxBounds3 empty;
empty.setEmpty();
const PxVec3 emptyCenter = empty.getCenter();
const PxVec3 emptyExtents = empty.getExtents();
// PT: hash-map is coalesced so we just parse it in linear order, no holes
PxU32 i=0;
#ifdef USE_REGULAR_HASH_MAP
PxU32 nbActivePairs = mMap.size();
const BucketPrunerMap::Entry* entries = mMap.mBase.getEntries();
#else
PxU32 nbActivePairs = mMap.mNbActivePairs;
#endif
PxU32 coreNbObjects = mCoreNbObjects; // PT: to avoid LHS
while(i<nbActivePairs)
{
#ifdef USE_REGULAR_HASH_MAP
const BucketPrunerMap::Entry& p = entries[i];
if(p.second.mTimeStamp==timeStamp)
#else
const BucketPrunerPair& p = mMap.mActivePairs[i];
if(p.mTimeStamp==timeStamp)
#endif
{
// PT: timestamps match. We must remove this object.
// PT: we replicate here what we do in BucketPrunerCore::removeObject(). See that function for details.
#ifdef USE_REGULAR_HASH_MAP
const PxU32 coreIndex = p.second.mCoreIndex;
#else
const PxU32 coreIndex = p.mCoreIndex;
#endif
if(!mDirty)
{
// PT: invalidating the box does not invalidate the sorting, since it's now captured in mData0/mData1
const PxU32 sortedIndex = mCoreRemap[coreIndex];
mSortedWorldBoxes[sortedIndex].mCenter = emptyCenter;
mSortedWorldBoxes[sortedIndex].mExtents = emptyExtents;
}
coreNbObjects--;
if(coreIndex!=coreNbObjects)
{
const PrunerPayload& movedObject = mCoreObjects[coreNbObjects];
mCoreBoxes[coreIndex] = mCoreBoxes[coreNbObjects];
mCoreTransforms[coreIndex] = mCoreTransforms[coreNbObjects];
mCoreObjects[coreIndex] = movedObject;
mCoreRemap[coreIndex] = mCoreRemap[coreNbObjects];
#ifdef USE_REGULAR_HASH_MAP
BucketPrunerMap::Entry* movedEntry = const_cast<BucketPrunerMap::Entry*>(mMap.find(movedObject));
PX_ASSERT(movedEntry->second.mCoreIndex==coreNbObjects);
movedEntry->second.mCoreIndex = coreIndex;
#else
BucketPrunerPair* movedEntry = const_cast<BucketPrunerPair*>(mMap.findPair(movedObject));
PX_ASSERT(movedEntry->mCoreIndex==coreNbObjects);
movedEntry->mCoreIndex = coreIndex;
#endif
}
nbRemoved++;
#ifdef USE_REGULAR_HASH_MAP
bool status = mMap.erase(p.first);
PX_ASSERT(status);
PX_UNUSED(status);
#else
const PxU32 hashValue = PxComputeHash(p.mData) & mMap.mMask;
mMap.removePairInternal(p.mData, hashValue, i);
#endif
nbActivePairs--;
}
else i++;
}
mCoreNbObjects = coreNbObjects;
#ifdef USE_REGULAR_HASH_MAP
#else
mMap.shrinkMemory();
#endif
}
#ifdef FREE_PRUNER_SIZE
// PT: ...then we look in the 'free' array
PxU32 i=0;
while(i<mNbFree)
{
if(mFreeStamps[i]==timeStamp)
{
nbRemoved++;
mNbFree--;
mFreeBounds[i] = mFreeBounds[mNbFree];
mFreeTransforms[i] = mFreeTransforms[mNbFree];
mFreeObjects[i] = mFreeObjects[mNbFree];
mFreeStamps[i] = mFreeStamps[mNbFree];
}
else i++;
}
#endif
return nbRemoved;
}
///////////////////////////////////////////////////////////////////////////////
static PxU32 sortBoxes( PxU32 nb, const PxBounds3* PX_RESTRICT boxes, const PrunerPayload* PX_RESTRICT objects,
const PxTransform* PX_RESTRICT transforms,
BucketBox& _globalBox, BucketBox* PX_RESTRICT sortedBoxes, PrunerPayload* PX_RESTRICT sortedObjects
, PxTransform* PX_RESTRICT sortedTransforms)
{
// Compute global box & sort axis
PxU32 sortAxis;
{
PX_ASSERT(nb>0);
Vec4V mergedMinV = V4LoadU(&boxes[nb-1].minimum.x);
Vec4V mergedMaxV = Vec4V_From_Vec3V(V3LoadU(&boxes[nb-1].maximum.x));
for(PxU32 i=0;i<nb-1;i++)
{
mergedMinV = V4Min(mergedMinV, V4LoadU(&boxes[i].minimum.x));
mergedMaxV = V4Max(mergedMaxV, V4LoadU(&boxes[i].maximum.x));
}
/* PX_ALIGN(16, PxVec4) mergedMin;
PX_ALIGN(16, PxVec4) mergedMax;
V4StoreA(mergedMinV, &mergedMin.x);
V4StoreA(mergedMaxV, &mergedMax.x);
_globalBox.mCenter.x = (mergedMax.x + mergedMin.x)*0.5f;
_globalBox.mCenter.y = (mergedMax.y + mergedMin.y)*0.5f;
_globalBox.mCenter.z = (mergedMax.z + mergedMin.z)*0.5f;
_globalBox.mExtents.x = (mergedMax.x - mergedMin.x)*0.5f;
_globalBox.mExtents.y = (mergedMax.y - mergedMin.y)*0.5f;
_globalBox.mExtents.z = (mergedMax.z - mergedMin.z)*0.5f;*/
const float Half = 0.5f;
const FloatV HalfV = FLoad(Half);
PX_ALIGN(16, PxVec4) mergedCenter;
PX_ALIGN(16, PxVec4) mergedExtents;
const Vec4V mergedCenterV = V4Scale(V4Add(mergedMaxV, mergedMinV), HalfV);
const Vec4V mergedExtentsV = V4Scale(V4Sub(mergedMaxV, mergedMinV), HalfV);
V4StoreA(mergedCenterV, &mergedCenter.x);
V4StoreA(mergedExtentsV, &mergedExtents.x);
_globalBox.mCenter = PxVec3(mergedCenter.x, mergedCenter.y, mergedCenter.z);
_globalBox.mExtents = PxVec3(mergedExtents.x, mergedExtents.y, mergedExtents.z);
const PxF32 absY = PxAbs(_globalBox.mExtents.y);
const PxF32 absZ = PxAbs(_globalBox.mExtents.z);
sortAxis = PxU32(absY < absZ ? 1 : 2);
// printf("Sort axis: %d\n", sortAxis);
}
float* keys = reinterpret_cast<float*>(sortedObjects);
for(PxU32 i=0;i<nb;i++)
keys[i] = boxes[i].minimum[sortAxis];
Cm::RadixSortBuffered rs; // ###TODO: some allocs here, remove
const PxU32* ranks = rs.Sort(keys, nb).GetRanks();
const float Half = 0.5f;
const FloatV HalfV = FLoad(Half);
for(PxU32 i=0;i<nb;i++)
{
const PxU32 index = *ranks++;
//const PxU32 index = local[i].index;
// sortedBoxes[i].mCenter = boxes[index].getCenter();
// sortedBoxes[i].mExtents = boxes[index].getExtents();
const Vec4V bucketBoxMinV = V4LoadU(&boxes[index].minimum.x);
const Vec4V bucketBoxMaxV = Vec4V_From_Vec3V(V3LoadU(&boxes[index].maximum.x));
const Vec4V bucketBoxCenterV = V4Scale(V4Add(bucketBoxMaxV, bucketBoxMinV), HalfV);
const Vec4V bucketBoxExtentsV = V4Scale(V4Sub(bucketBoxMaxV, bucketBoxMinV), HalfV);
// We don't need to preserve data0/data1 here
AlignedStore(bucketBoxCenterV, &sortedBoxes[i].mCenter.x);
AlignedStore(bucketBoxExtentsV, &sortedBoxes[i].mExtents.x);
#ifdef _DEBUG
sortedBoxes[i].mDebugMin = boxes[index].minimum[sortAxis];
#endif
sortedObjects[i] = objects[index];
sortedTransforms[i] = transforms[index];
}
return sortAxis;
}
#ifdef NODE_SORT
template<class T>
PX_CUDA_CALLABLE PX_FORCE_INLINE void tswap(T& x, T& y)
{
T tmp = x;
x = y;
y = tmp;
}
/* PX_FORCE_INLINE __m128 DotV(const __m128 a, const __m128 b)
{
const __m128 dot1 = _mm_mul_ps(a, b);
const __m128 shuf1 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(dot1), _MM_SHUFFLE(0,0,0,0)));
const __m128 shuf2 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(dot1), _MM_SHUFFLE(1,1,1,1)));
const __m128 shuf3 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(dot1), _MM_SHUFFLE(2,2,2,2)));
return _mm_add_ps(_mm_add_ps(shuf1, shuf2), shuf3);
}*/
// PT: hmmm, by construction, isn't the order always the same for all bucket pruners?
// => maybe not because the bucket boxes are still around the merged aabbs, not around the bucket
// Still we could do something here
static /*PX_FORCE_INLINE*/ PxU32 sort(const BucketPrunerNode& parent, const PxVec3& rayDir)
{
const PxU32 totalCount = parent.mCounters[0]+parent.mCounters[1]+parent.mCounters[2]+parent.mCounters[3]+parent.mCounters[4];
if(totalCount<NODE_SORT_MIN_COUNT)
return 0|(1<<3)|(2<<6)|(3<<9)|(4<<12);
float dp[5];
/* const __m128 rayDirV = _mm_loadu_ps(&rayDir.x);
__m128 dp0V = DotV(rayDirV, _mm_loadu_ps(&parent.mBucketBox[0].mCenter.x)); _mm_store_ss(&dp[0], dp0V);
__m128 dp1V = DotV(rayDirV, _mm_loadu_ps(&parent.mBucketBox[1].mCenter.x)); _mm_store_ss(&dp[1], dp1V);
__m128 dp2V = DotV(rayDirV, _mm_loadu_ps(&parent.mBucketBox[2].mCenter.x)); _mm_store_ss(&dp[2], dp2V);
__m128 dp3V = DotV(rayDirV, _mm_loadu_ps(&parent.mBucketBox[3].mCenter.x)); _mm_store_ss(&dp[3], dp3V);
__m128 dp4V = DotV(rayDirV, _mm_loadu_ps(&parent.mBucketBox[4].mCenter.x)); _mm_store_ss(&dp[4], dp4V);
*/
#ifdef VERIFY_SORT
PxU32 code;
{
dp[0] = parent.mCounters[0] ? PxAbs(parent.mBucketBox[0].mCenter.dot(rayDir)) : PX_MAX_F32;
dp[1] = parent.mCounters[1] ? PxAbs(parent.mBucketBox[1].mCenter.dot(rayDir)) : PX_MAX_F32;
dp[2] = parent.mCounters[2] ? PxAbs(parent.mBucketBox[2].mCenter.dot(rayDir)) : PX_MAX_F32;
dp[3] = parent.mCounters[3] ? PxAbs(parent.mBucketBox[3].mCenter.dot(rayDir)) : PX_MAX_F32;
dp[4] = parent.mCounters[4] ? PxAbs(parent.mBucketBox[4].mCenter.dot(rayDir)) : PX_MAX_F32;
PxU32 ii0 = 0;
PxU32 ii1 = 1;
PxU32 ii2 = 2;
PxU32 ii3 = 3;
PxU32 ii4 = 4;
// PT: using integer cmps since we used fabsf above
// const PxU32* values = reinterpret_cast<const PxU32*>(dp);
const PxU32* values = PxUnionCast<PxU32*, PxF32*>(dp);
PxU32 value0 = values[0];
PxU32 value1 = values[1];
PxU32 value2 = values[2];
PxU32 value3 = values[3];
PxU32 value4 = values[4];
for(PxU32 j=0;j<5-1;j++)
{
if(value1<value0)
{
tswap(value0, value1);
tswap(ii0, ii1);
}
if(value2<value1)
{
tswap(value1, value2);
tswap(ii1, ii2);
}
if(value3<value2)
{
tswap(value2, value3);
tswap(ii2, ii3);
}
if(value4<value3)
{
tswap(value3, value4);
tswap(ii3, ii4);
}
}
//return ii0|(ii1<<3)|(ii2<<6)|(ii3<<9)|(ii4<<12);
code = ii0|(ii1<<3)|(ii2<<6)|(ii3<<9)|(ii4<<12);
}
#endif
dp[0] = parent.mCounters[0] ? parent.mBucketBox[0].mCenter.dot(rayDir) : PX_MAX_F32;
dp[1] = parent.mCounters[1] ? parent.mBucketBox[1].mCenter.dot(rayDir) : PX_MAX_F32;
dp[2] = parent.mCounters[2] ? parent.mBucketBox[2].mCenter.dot(rayDir) : PX_MAX_F32;
dp[3] = parent.mCounters[3] ? parent.mBucketBox[3].mCenter.dot(rayDir) : PX_MAX_F32;
dp[4] = parent.mCounters[4] ? parent.mBucketBox[4].mCenter.dot(rayDir) : PX_MAX_F32;
const PxU32* values = PxUnionCast<PxU32*, PxF32*>(dp);
// const PxU32 mask = ~7U;
const PxU32 mask = 0x7ffffff8;
PxU32 value0 = (values[0]&mask);
PxU32 value1 = (values[1]&mask)|1;
PxU32 value2 = (values[2]&mask)|2;
PxU32 value3 = (values[3]&mask)|3;
PxU32 value4 = (values[4]&mask)|4;
#define SORT_BLOCK \
if(value1<value0) tswap(value0, value1); \
if(value2<value1) tswap(value1, value2); \
if(value3<value2) tswap(value2, value3); \
if(value4<value3) tswap(value3, value4);
SORT_BLOCK
SORT_BLOCK
SORT_BLOCK
SORT_BLOCK
const PxU32 ii0 = value0&7;
const PxU32 ii1 = value1&7;
const PxU32 ii2 = value2&7;
const PxU32 ii3 = value3&7;
const PxU32 ii4 = value4&7;
const PxU32 code2 = ii0|(ii1<<3)|(ii2<<6)|(ii3<<9)|(ii4<<12);
#ifdef VERIFY_SORT
PX_ASSERT(code2==code);
#endif
return code2;
}
static void gPrecomputeSort(BucketPrunerNode& node, const PxVec3* PX_RESTRICT dirs)
{
for(int i=0;i<8;i++)
node.mOrder[i] = PxTo16(sort(node, dirs[i]));
}
#endif
void BucketPrunerCore::classifyBoxes()
{
if(!mDirty)
return;
mDirty = false;
const PxU32 nb = mCoreNbObjects;
if(!nb)
{
mSortedNb=0;
return;
}
PX_ASSERT(!mNbFree);
#ifdef BRUTE_FORCE_LIMIT
if(nb<=BRUTE_FORCE_LIMIT)
{
allocateSortedMemory(nb);
BucketBox* sortedBoxes = mSortedWorldBoxes;
PrunerPayload* sortedObjects = mSortedObjects;
const float Half = 0.5f;
const __m128 HalfV = _mm_load1_ps(&Half);
PX_ALIGN(16, PxVec4) bucketCenter;
PX_ALIGN(16, PxVec4) bucketExtents;
for(PxU32 i=0;i<nb;i++)
{
const __m128 bucketBoxMinV = _mm_loadu_ps(&mCoreBoxes[i].minimum.x);
const __m128 bucketBoxMaxV = _mm_loadu_ps(&mCoreBoxes[i].maximum.x);
const __m128 bucketBoxCenterV = _mm_mul_ps(_mm_add_ps(bucketBoxMaxV, bucketBoxMinV), HalfV);
const __m128 bucketBoxExtentsV = _mm_mul_ps(_mm_sub_ps(bucketBoxMaxV, bucketBoxMinV), HalfV);
_mm_store_ps(&bucketCenter.x, bucketBoxCenterV);
_mm_store_ps(&bucketExtents.x, bucketBoxExtentsV);
sortedBoxes[i].mCenter = PxVec3(bucketCenter.x, bucketCenter.y, bucketCenter.z);
sortedBoxes[i].mExtents = PxVec3(bucketExtents.x, bucketExtents.y, bucketExtents.z);
sortedObjects[i] = mCoreObjects[i];
}
return;
}
#endif
size_t* remap = reinterpret_cast<size_t*>(PX_ALLOC(nb*sizeof(size_t), ""));
for(PxU32 i=0;i<nb;i++)
{
remap[i] = mCoreObjects[i].data[0];
mCoreObjects[i].data[0] = i;
}
// printf("Nb objects: %d\n", nb);
PrunerPayload localTempObjects[LOCAL_SIZE];
BucketBox localTempBoxes[LOCAL_SIZE];
PxTransform localTempTransforms[LOCAL_SIZE];
PrunerPayload* tempObjects;
PxTransform* tempTransforms;
BucketBox* tempBoxes;
if(nb>LOCAL_SIZE)
{
tempObjects = PX_ALLOCATE(PrunerPayload, nb, "BucketPruner");
tempBoxes = PX_ALLOCATE(BucketBox, nb, "BucketPruner");
tempTransforms = PX_ALLOCATE(PxTransform, nb, "BucketPruner");
}
else
{
tempObjects = localTempObjects;
tempBoxes = localTempBoxes;
tempTransforms = localTempTransforms;
}
mSortAxis = sortBoxes(nb, mCoreBoxes, mCoreObjects, mCoreTransforms, mGlobalBox, tempBoxes, tempObjects, tempTransforms);
PX_ASSERT(mSortAxis);
allocateSortedMemory(nb);
BucketBox* sortedBoxes = mSortedWorldBoxes;
PrunerPayload* sortedObjects = mSortedObjects;
PxTransform* sortedTransforms = mSortedTransforms;
const PxU32 yz = PxU32(mSortAxis == 1 ? 2 : 1);
const float limitX = mGlobalBox.mCenter.x;
const float limitYZ = mGlobalBox.mCenter[yz];
mLevel1.classifyBoxes(limitX, limitYZ, nb, tempBoxes, tempObjects, tempTransforms, sortedBoxes, sortedObjects, sortedTransforms, false, mSortAxis);
processChildBuckets(nb, tempBoxes, tempObjects, tempTransforms, mLevel1, mLevel2, mSortedWorldBoxes, mSortedObjects, mSortedTransforms, mSortAxis);
for(PxU32 j=0;j<5;j++)
processChildBuckets(nb, tempBoxes, tempObjects, tempTransforms, mLevel2[j], mLevel3[j], mSortedWorldBoxes + mLevel1.mOffsets[j], mSortedObjects + mLevel1.mOffsets[j], mSortedTransforms + mLevel1.mOffsets[j], mSortAxis);
{
for(PxU32 i=0;i<nb;i++)
{
encodeBoxMinMax(mSortedWorldBoxes[i], mSortAxis);
}
}
if(nb>LOCAL_SIZE)
{
PX_FREE(tempTransforms);
PX_FREE(tempBoxes);
PX_FREE(tempObjects);
}
for(PxU32 i=0;i<nb;i++)
{
const PxU32 coreIndex = PxU32(mSortedObjects[i].data[0]);
const size_t saved = remap[coreIndex];
mSortedObjects[i].data[0] = saved;
mCoreObjects[coreIndex].data[0] = saved;
if(mCoreRemap)
mCoreRemap[coreIndex] = i;
// remap[i] = mCoreObjects[i].data[0];
// mCoreObjects[i].data[0] = i;
}
PX_FREE(remap);
/* if(mOwnMemory)
{
PX_FREE(mCoreBoxes);
PX_FREE(mCoreObjects);
}*/
#ifdef NODE_SORT
{
PxVec3 dirs[8];
dirs[0] = PxVec3(1.0f, 1.0f, 1.0f);
dirs[1] = PxVec3(1.0f, 1.0f, -1.0f);
dirs[2] = PxVec3(1.0f, -1.0f, 1.0f);
dirs[3] = PxVec3(1.0f, -1.0f, -1.0f);
dirs[4] = PxVec3(-1.0f, 1.0f, 1.0f);
dirs[5] = PxVec3(-1.0f, 1.0f, -1.0f);
dirs[6] = PxVec3(-1.0f, -1.0f, 1.0f);
dirs[7] = PxVec3(-1.0f, -1.0f, -1.0f);
for(int i=0;i<8;i++)
dirs[i].normalize();
gPrecomputeSort(mLevel1, dirs);
for(PxU32 i=0;i<5;i++)
gPrecomputeSort(mLevel2[i], dirs);
for(PxU32 j=0;j<5;j++)
{
for(PxU32 i=0;i<5;i++)
gPrecomputeSort(mLevel3[j][i], dirs);
}
}
#endif
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef CAN_USE_MOVEMASK
namespace
{
struct RayParams
{
PX_ALIGN(16, PxVec3 mData2); float padding0;
PX_ALIGN(16, PxVec3 mFDir); float padding1;
PX_ALIGN(16, PxVec3 mData); float padding2;
PX_ALIGN(16, PxVec3 mInflate); float padding3;
};
}
static PX_FORCE_INLINE void precomputeRayData(RayParams* PX_RESTRICT rayParams, const PxVec3& rayOrig, const PxVec3& rayDir, float maxDist)
{
#ifdef USE_SIMD
const float Half = 0.5f * maxDist;
const __m128 HalfV = _mm_load1_ps(&Half);
const __m128 DataV = _mm_mul_ps(_mm_loadu_ps(&rayDir.x), HalfV);
const __m128 Data2V = _mm_add_ps(_mm_loadu_ps(&rayOrig.x), DataV);
const PxU32 MaskI = 0x7fffffff;
const __m128 FDirV = _mm_and_ps(_mm_load1_ps(reinterpret_cast<const float*>(&MaskI)), DataV);
_mm_store_ps(&rayParams->mData.x, DataV);
_mm_store_ps(&rayParams->mData2.x, Data2V);
_mm_store_ps(&rayParams->mFDir.x, FDirV);
#else
const PxVec3 data = 0.5f * rayDir * maxDist;
rayParams->mData = data;
rayParams->mData2 = rayOrig + data;
rayParams->mFDir.x = PxAbs(data.x);
rayParams->mFDir.y = PxAbs(data.y);
rayParams->mFDir.z = PxAbs(data.z);
#endif
}
template <int inflateT>
static PX_FORCE_INLINE PxIntBool segmentAABB(const BucketBox& box, const RayParams* PX_RESTRICT params)
{
#ifdef USE_SIMD
const PxU32 maskI = 0x7fffffff;
const __m128 fdirV = _mm_load_ps(¶ms->mFDir.x);
// #ifdef _DEBUG
const __m128 extentsV = inflateT ? _mm_add_ps(_mm_loadu_ps(&box.mExtents.x), _mm_load_ps(¶ms->mInflate.x)) : _mm_loadu_ps(&box.mExtents.x);
const __m128 DV = _mm_sub_ps(_mm_load_ps(¶ms->mData2.x), _mm_loadu_ps(&box.mCenter.x));
/* #else
const __m128 extentsV = inflateT ? _mm_add_ps(_mm_load_ps(&box.mExtents.x), _mm_load_ps(¶ms->mInflate.x)) : _mm_load_ps(&box.mExtents.x);
const __m128 DV = _mm_sub_ps(_mm_load_ps(¶ms->mData2.x), _mm_load_ps(&box.mCenter.x));
#endif*/
__m128 absDV = _mm_and_ps(DV, _mm_load1_ps(reinterpret_cast<const float*>(&maskI)));
absDV = _mm_cmpgt_ps(absDV, _mm_add_ps(extentsV, fdirV));
const PxU32 test = PxU32(_mm_movemask_ps(absDV));
if(test&7)
return 0;
const __m128 dataZYX_V = _mm_load_ps(¶ms->mData.x);
const __m128 dataXZY_V = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(dataZYX_V), _MM_SHUFFLE(3,0,2,1)));
const __m128 DXZY_V = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(DV), _MM_SHUFFLE(3,0,2,1)));
const __m128 fV = _mm_sub_ps(_mm_mul_ps(dataZYX_V, DXZY_V), _mm_mul_ps(dataXZY_V, DV));
const __m128 fdirZYX_V = _mm_load_ps(¶ms->mFDir.x);
const __m128 fdirXZY_V = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(fdirZYX_V), _MM_SHUFFLE(3,0,2,1)));
const __m128 extentsXZY_V = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(extentsV), _MM_SHUFFLE(3,0,2,1)));
const __m128 fg = _mm_add_ps(_mm_mul_ps(extentsV, fdirXZY_V), _mm_mul_ps(extentsXZY_V, fdirZYX_V));
__m128 absfV = _mm_and_ps(fV, _mm_load1_ps(reinterpret_cast<const float*>(&maskI)));
absfV = _mm_cmpgt_ps(absfV, fg);
const PxU32 test2 = PxU32(_mm_movemask_ps(absfV));
if(test2&7)
return 0;
return 1;
#else
const float boxExtentsx = inflateT ? box.mExtents.x + params->mInflate.x : box.mExtents.x;
const float Dx = params->mData2.x - box.mCenter.x; if(fabsf(Dx) > boxExtentsx + params->mFDir.x) return PxIntFalse;
const float boxExtentsz = inflateT ? box.mExtents.z + params->mInflate.z : box.mExtents.z;
const float Dz = params->mData2.z - box.mCenter.z; if(fabsf(Dz) > boxExtentsz + params->mFDir.z) return PxIntFalse;
const float boxExtentsy = inflateT ? box.mExtents.y + params->mInflate.y : box.mExtents.y;
const float Dy = params->mData2.y - box.mCenter.y; if(fabsf(Dy) > boxExtentsy + params->mFDir.y) return PxIntFalse;
float f;
f = params->mData.y * Dz - params->mData.z * Dy; if(fabsf(f) > boxExtentsy*params->mFDir.z + boxExtentsz*params->mFDir.y) return PxIntFalse;
f = params->mData.z * Dx - params->mData.x * Dz; if(fabsf(f) > boxExtentsx*params->mFDir.z + boxExtentsz*params->mFDir.x) return PxIntFalse;
f = params->mData.x * Dy - params->mData.y * Dx; if(fabsf(f) > boxExtentsx*params->mFDir.y + boxExtentsy*params->mFDir.x) return PxIntFalse;
return PxIntTrue;
#endif
}
#else
#include "GuBVHTestsSIMD.h"
typedef RayAABBTest BPRayAABBTest;
template <int inflateT>
static PX_FORCE_INLINE PxIntBool segmentAABB(const BucketBox& box, const BPRayAABBTest& test)
{
return static_cast<PxIntBool>(test.check<inflateT>(V3LoadU(box.mCenter), V3LoadU(box.mExtents)));
}
/*static PX_FORCE_INLINE IntBool segmentAABB(const BucketBox& box, const BPRayAABBTest& test, PxU32 rayMinLimitX, PxU32 rayMaxLimitX)
{
if(rayMinLimitX>box.mData1)
return 0;
if(rayMaxLimitX<box.mData0)
return 0;
return test(Vec3V_From_PxVec3(box.mCenter), Vec3V_From_PxVec3(box.mExtents));
}*/
#endif
namespace
{
struct BucketPrunerRaycastAdapter
{
PX_FORCE_INLINE BucketPrunerRaycastAdapter(PrunerRaycastCallback& pcb, const PrunerPayload* payloads, const PxTransform* transforms) :
mCallback(pcb), mPayloads(payloads), mTransforms(transforms) {}
PX_FORCE_INLINE bool invoke(PxReal& distance, PxU32 primIndex)
{
return mCallback.invoke(distance, primIndex, mPayloads, mTransforms);
}
PrunerRaycastCallback& mCallback;
const PrunerPayload* mPayloads;
const PxTransform* mTransforms;
PX_NOCOPY(BucketPrunerRaycastAdapter)
};
struct BucketPrunerOverlapAdapter
{
PX_FORCE_INLINE BucketPrunerOverlapAdapter(PrunerOverlapCallback& pcb, const PrunerPayload* payloads, const PxTransform* transforms) :
mCallback(pcb), mPayloads(payloads), mTransforms(transforms) {}
PX_FORCE_INLINE bool invoke(PxU32 primIndex)
{
return mCallback.invoke(primIndex, mPayloads, mTransforms);
}
PrunerOverlapCallback& mCallback;
const PrunerPayload* mPayloads;
const PxTransform* mTransforms;
PX_NOCOPY(BucketPrunerOverlapAdapter)
};
}
template <int inflateT>
static bool processBucket(
PxU32 nb, const BucketBox* PX_RESTRICT baseBoxes, const PrunerPayload* PX_RESTRICT baseObjects,
const PxTransform* PX_RESTRICT baseTransforms, PxU32 offset, PxU32 totalAllocated,
const PxVec3& rayOrig, const PxVec3& rayDir, float& maxDist,
#ifdef CAN_USE_MOVEMASK
RayParams* PX_RESTRICT rayParams,
#else
BPRayAABBTest& test, const PxVec3& inflate,
#endif
PrunerRaycastCallback& pcbArgName, PxU32& _rayMinLimitInt, PxU32& _rayMaxLimitInt, PxU32 sortAxis)
{
PX_UNUSED(totalAllocated);
const BucketBox* PX_RESTRICT _boxes = baseBoxes + offset;
BucketPrunerRaycastAdapter pcb(pcbArgName, baseObjects + offset, baseTransforms + offset);
PxU32 rayMinLimitInt = _rayMinLimitInt;
PxU32 rayMaxLimitInt = _rayMaxLimitInt;
const BucketBox* last = _boxes + nb;
PxU32 objectID = 0;
while(_boxes!=last)
{
const BucketBox& currentBox = *_boxes++;
const PxU32 currentID = objectID++;
if(currentBox.mData1<rayMinLimitInt)
continue;
if(currentBox.mData0>rayMaxLimitInt)
goto Exit;
#ifdef CAN_USE_MOVEMASK
if(!segmentAABB<inflateT>(currentBox, rayParams))
continue;
#else
if(!segmentAABB<inflateT>(currentBox, test))
continue;
#endif
const float MaxDist = maxDist;
const bool again = pcb.invoke(maxDist, currentID);
if(!again)
return false;
if(maxDist < MaxDist)
{
float rayMinLimit, rayMaxLimit;
#ifdef CAN_USE_MOVEMASK
if(inflateT)
computeRayLimits(rayMinLimit, rayMaxLimit, rayOrig, rayDir, maxDist, rayParams->mInflate, sortAxis);
else
computeRayLimits(rayMinLimit, rayMaxLimit, rayOrig, rayDir, maxDist, sortAxis);
precomputeRayData(rayParams, rayOrig, rayDir, maxDist);
#else
if(inflateT)
computeRayLimits(rayMinLimit, rayMaxLimit, rayOrig, rayDir, maxDist, inflate, sortAxis);
else
computeRayLimits(rayMinLimit, rayMaxLimit, rayOrig, rayDir, maxDist, sortAxis);
test.setDistance(maxDist);
#endif
const PxU32* binaryMinLimit = reinterpret_cast<const PxU32*>(&rayMinLimit);
const PxU32* binaryMaxLimit = reinterpret_cast<const PxU32*>(&rayMaxLimit);
rayMinLimitInt = encodeFloat(binaryMinLimit[0]);
rayMaxLimitInt = encodeFloat(binaryMaxLimit[0]);
}
}
Exit:
_rayMinLimitInt = rayMinLimitInt;
_rayMaxLimitInt = rayMaxLimitInt;
return true;
}
#ifdef NODE_SORT
static PxU32 computeDirMask(const PxVec3& dir)
{
const PxU32* binary = reinterpret_cast<const PxU32*>(&dir.x);
const PxU32 X = (binary[0])>>31;
const PxU32 Y = (binary[1])>>31;
const PxU32 Z = (binary[2])>>31;
return Z|(Y<<1)|(X<<2);
}
#endif
template <int inflateT>
static bool stab(const BucketPrunerCore& core, PrunerRaycastCallback& pcbArgName, const PxVec3& rayOrig, const PxVec3& rayDir, float& maxDist, const PxVec3 inflate)
{
const PxU32 nb = core.mSortedNb;
if(!nb
#ifdef FREE_PRUNER_SIZE
&& !core.mNbFree
#endif
)
return true;
if(maxDist==PX_MAX_F32)
{
/*const*/ PxVec3 boxMin = core.mGlobalBox.getMin() - inflate;
/*const*/ PxVec3 boxMax = core.mGlobalBox.getMax() + inflate;
#ifdef FREE_PRUNER_SIZE
if(core.mNbFree)
{
// TODO: optimize this
PxBounds3 freeGlobalBounds;
freeGlobalBounds.setEmpty();
for(PxU32 i=0;i<core.mNbFree;i++)
freeGlobalBounds.include(core.mFreeBounds[i]);
freeGlobalBounds.minimum -= inflate;
freeGlobalBounds.maximum += inflate;
boxMin = boxMin.minimum(freeGlobalBounds.minimum);
boxMax = boxMax.maximum(freeGlobalBounds.maximum);
}
#endif
clipRay(rayOrig, rayDir, maxDist, boxMin, boxMax);
}
#ifdef CAN_USE_MOVEMASK
RayParams rayParams;
#ifdef USE_SIMD
rayParams.padding0 = rayParams.padding1 = rayParams.padding2 = rayParams.padding3 = 0.0f;
#endif
if(inflateT)
rayParams.mInflate = inflate;
precomputeRayData(&rayParams, rayOrig, rayDir, maxDist);
#else
BPRayAABBTest test(rayOrig, rayDir, maxDist, inflateT ? inflate : PxVec3(0.0f));
#endif
#ifdef FREE_PRUNER_SIZE
BucketPrunerRaycastAdapter pcb(pcbArgName, core.mFreeObjects, core.mFreeTransforms);
for(PxU32 i=0;i<core.mNbFree;i++)
{
BucketBox tmp;
tmp.mCenter = core.mFreeBounds[i].getCenter();
tmp.mExtents = core.mFreeBounds[i].getExtents();
#ifdef CAN_USE_MOVEMASK
if(segmentAABB<inflateT>(tmp, &rayParams))
#else
if(segmentAABB<inflateT>(tmp, test))
#endif
{
if(!pcb.invoke(maxDist, i))
return false;
}
}
#endif
if(!nb)
return true;
#ifdef CAN_USE_MOVEMASK
if(!segmentAABB<inflateT>(core.mGlobalBox, &rayParams))
return true;
#else
if(!segmentAABB<inflateT>(core.mGlobalBox, test))
return true;
#endif
const PxU32 sortAxis = core.mSortAxis;
float rayMinLimit, rayMaxLimit;
if(inflateT)
computeRayLimits(rayMinLimit, rayMaxLimit, rayOrig, rayDir, maxDist, inflate, sortAxis);
else
computeRayLimits(rayMinLimit, rayMaxLimit, rayOrig, rayDir, maxDist, sortAxis);
const PxU32* binaryMinLimit = reinterpret_cast<const PxU32*>(&rayMinLimit);
const PxU32* binaryMaxLimit = reinterpret_cast<const PxU32*>(&rayMaxLimit);
PxU32 rayMinLimitInt = encodeFloat(binaryMinLimit[0]);
PxU32 rayMaxLimitInt = encodeFloat(binaryMaxLimit[0]);
/*
float rayMinLimitX, rayMaxLimitX;
if(inflateT)
computeRayLimits(rayMinLimitX, rayMaxLimitX, rayOrig, rayDir, maxDist, inflate, 0);
else
computeRayLimits(rayMinLimitX, rayMaxLimitX, rayOrig, rayDir, maxDist, 0);
PxU32 rayMinLimitIntX = encodeFloat(PX_IR(rayMinLimitX));
PxU32 rayMaxLimitIntX = encodeFloat(PX_IR(rayMaxLimitX));
*/
float currentDist = maxDist;
#ifdef NODE_SORT
const PxU32 dirIndex = computeDirMask(rayDir);
PxU32 orderi = core.mLevel1.mOrder[dirIndex];
// PxU32 orderi = sort(core.mLevel1, rayDir);
for(PxU32 i_=0;i_<5;i_++)
{
const PxU32 i = orderi&7; orderi>>=3;
#else
for(PxU32 i=0;i<5;i++)
{
#endif
#ifdef CAN_USE_MOVEMASK
if(core.mLevel1.mCounters[i] && segmentAABB<inflateT>(core.mLevel1.mBucketBox[i], &rayParams))
#else
if(core.mLevel1.mCounters[i] && segmentAABB<inflateT>(core.mLevel1.mBucketBox[i], test))
// if(core.mLevel1.mCounters[i] && segmentAABB<inflateT>(core.mLevel1.mBucketBox[i], test, rayMinLimitIntX, rayMaxLimitIntX))
#endif
{
#ifdef NODE_SORT
PxU32 orderj = core.mLevel2[i].mOrder[dirIndex];
// PxU32 orderj = sort(core.mLevel2[i], rayDir);
for(PxU32 j_=0;j_<5;j_++)
{
const PxU32 j = orderj&7; orderj>>=3;
#else
for(PxU32 j=0;j<5;j++)
{
#endif
#ifdef CAN_USE_MOVEMASK
if(core.mLevel2[i].mCounters[j] && segmentAABB<inflateT>(core.mLevel2[i].mBucketBox[j], &rayParams))
#else
if(core.mLevel2[i].mCounters[j] && segmentAABB<inflateT>(core.mLevel2[i].mBucketBox[j], test))
// if(core.mLevel2[i].mCounters[j] && segmentAABB<inflateT>(core.mLevel2[i].mBucketBox[j], test, rayMinLimitIntX, rayMaxLimitIntX))
#endif
{
const BucketPrunerNode& parent = core.mLevel3[i][j];
const PxU32 parentOffset = core.mLevel1.mOffsets[i] + core.mLevel2[i].mOffsets[j];
#ifdef NODE_SORT
PxU32 orderk = parent.mOrder[dirIndex];
// PxU32 orderk = sort(parent, rayDir);
for(PxU32 k_=0;k_<5;k_++)
{
const PxU32 k = orderk&7; orderk>>=3;
#else
for(PxU32 k=0;k<5;k++)
{
#endif
const PxU32 nbInBucket = parent.mCounters[k];
#ifdef CAN_USE_MOVEMASK
if(nbInBucket && segmentAABB<inflateT>(parent.mBucketBox[k], &rayParams))
#else
if(nbInBucket && segmentAABB<inflateT>(parent.mBucketBox[k], test))
// if(nbInBucket && segmentAABB<inflateT>(parent.mBucketBox[k], test, rayMinLimitIntX, rayMaxLimitIntX))
#endif
{
const PxU32 offset = parentOffset + parent.mOffsets[k];
const bool again = processBucket<inflateT>( nbInBucket, core.mSortedWorldBoxes, core.mSortedObjects,
core.mSortedTransforms,
offset, core.mSortedNb,
rayOrig, rayDir, currentDist,
#ifdef CAN_USE_MOVEMASK
&rayParams,
#else
test, inflate,
#endif
pcbArgName,
rayMinLimitInt, rayMaxLimitInt,
sortAxis);
if(!again)
return false;
}
}
}
}
}
}
maxDist = currentDist;
return true;
}
bool BucketPrunerCore::raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& pcb) const
{
return ::stab<0>(*this, pcb, origin, unitDir, inOutDistance, PxVec3(0.0f));
}
bool BucketPrunerCore::sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& pcb) const
{
const PxVec3 extents = queryVolume.getPrunerInflatedWorldAABB().getExtents();
return ::stab<1>(*this, pcb, queryVolume.getPrunerInflatedWorldAABB().getCenter(), unitDir, inOutDistance, extents);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// PT: TODO: decoupling the pruner callback revealed quite a bug here: we call this processBucket function with an inflateT param,
// which is re-interpreted as "doAssert" for overlaps! What happened here?
template<bool doAssert, typename Test>
static PX_FORCE_INLINE bool processBucket( PxU32 nb, const BucketBox* PX_RESTRICT baseBoxes, const PrunerPayload* PX_RESTRICT baseObjects,
const PxTransform* PX_RESTRICT baseTransforms,
PxU32 offset, PxU32 totalAllocated,
const Test& test, PrunerOverlapCallback& pcbArgName,
PxU32 minLimitInt, PxU32 maxLimitInt)
{
PX_UNUSED(totalAllocated);
const BucketBox* PX_RESTRICT boxes = baseBoxes + offset;
BucketPrunerOverlapAdapter pcb(pcbArgName, baseObjects + offset, baseTransforms + offset);
for(PxU32 i=0;i<nb;i++)
{
const BucketBox& currentBox = *boxes++;
if(currentBox.mData1<minLimitInt)
{
if(doAssert)
PX_ASSERT(!test(currentBox));
continue;
}
if(currentBox.mData0>maxLimitInt)
{
if(doAssert)
PX_ASSERT(!test(currentBox));
return true;
}
if(test(currentBox))
{
if(!pcb.invoke(i))
return false;
}
}
return true;
}
template<typename Test, bool isPrecise>
class BucketPrunerOverlapTraversal
{
public:
PX_FORCE_INLINE BucketPrunerOverlapTraversal() {}
/*PX_FORCE_INLINE*/ bool operator()(const BucketPrunerCore& core, const Test& test, PrunerOverlapCallback& pcbArgName, const PxBounds3& cullBox) const
{
#ifdef FREE_PRUNER_SIZE
BucketPrunerOverlapAdapter pcb(pcbArgName, core.mFreeObjects, core.mFreeTransforms);
for(PxU32 i=0;i<core.mNbFree;i++)
{
if(test(core.mFreeBounds[i]))
{
if(!pcb.invoke(i))
return false;
}
}
#endif
const PxU32 nb = core.mSortedNb;
if(!nb)
return true;
#ifdef BRUTE_FORCE_LIMIT
if(nb<=BRUTE_FORCE_LIMIT)
{
for(PxU32 i=0;i<nb;i++)
{
if(test(core.mSortedWorldBoxes[i]))
{
PxReal dist = -1.0f; // no distance for overlaps
if(!pcb.invoke(dist, core.mSortedObjects[i]))
return false;
}
}
return true;
}
#endif
if(!test(core.mGlobalBox))
return true;
const PxU32 sortAxis = core.mSortAxis;
const float boxMinLimit = cullBox.minimum[sortAxis];
const float boxMaxLimit = cullBox.maximum[sortAxis];
const PxU32* binaryMinLimit = reinterpret_cast<const PxU32*>(&boxMinLimit);
const PxU32* binaryMaxLimit = reinterpret_cast<const PxU32*>(&boxMaxLimit);
const PxU32 rayMinLimitInt = encodeFloat(binaryMinLimit[0]);
const PxU32 rayMaxLimitInt = encodeFloat(binaryMaxLimit[0]);
for(PxU32 i=0;i<5;i++)
{
if(core.mLevel1.mCounters[i] && test(core.mLevel1.mBucketBox[i]))
{
for(PxU32 j=0;j<5;j++)
{
if(core.mLevel2[i].mCounters[j] && test(core.mLevel2[i].mBucketBox[j]))
{
for(PxU32 k=0;k<5;k++)
{
const PxU32 nbInBucket = core.mLevel3[i][j].mCounters[k];
if(nbInBucket && test(core.mLevel3[i][j].mBucketBox[k]))
{
const PxU32 offset = core.mLevel1.mOffsets[i] + core.mLevel2[i].mOffsets[j] + core.mLevel3[i][j].mOffsets[k];
if(!processBucket<isPrecise>(nbInBucket, core.mSortedWorldBoxes, core.mSortedObjects,
core.mSortedTransforms,
offset, core.mSortedNb, test, pcbArgName, rayMinLimitInt, rayMaxLimitInt))
return false;
}
}
}
}
}
}
return true;
}
};
///////////////////////////////////////////////////////////////////////////////
#ifdef CAN_USE_MOVEMASK
PX_FORCE_INLINE PxU32 BAllTrue3_R(const BoolV a)
{
const PxI32 moveMask = _mm_movemask_ps(a);
return PxU32((moveMask & 0x7) == (0x7));
}
#endif
#ifdef USE_SIMD
struct SphereAABBTest_SIMD
{
PX_FORCE_INLINE SphereAABBTest_SIMD(const Sphere& sphere) :
#ifdef CAN_USE_MOVEMASK
mCenter (V4LoadU(&sphere.center.x)),
#else
mCenter (V3LoadU(sphere.center)),
#endif
mRadius2(FLoad(sphere.radius * sphere.radius))
{}
PX_FORCE_INLINE PxIntBool operator()(const BucketBox& box) const
{
#ifdef CAN_USE_MOVEMASK
const Vec4V boxCenter = AlignedLoad(&box.mCenter.x);
const Vec4V boxExtents = AlignedLoad(&box.mExtents.x);
//
const Vec4V offset = V4Sub(mCenter, boxCenter);
const Vec4V closest = V4Clamp(offset, V4Neg(boxExtents), boxExtents);
const Vec4V d = V4Sub(offset, closest);
const FloatV dot = V4Dot3(d,d);
return PxIntBool(BAllTrue3_R(FIsGrtrOrEq(mRadius2, dot)));
#else
const Vec3V boxCenter = V3LoadU(box.mCenter);
const Vec3V boxExtents = V3LoadU(box.mExtents);
//
const Vec3V offset = V3Sub(mCenter, boxCenter);
const Vec3V closest = V3Clamp(offset, V3Neg(boxExtents), boxExtents);
const Vec3V d = V3Sub(offset, closest);
return PxIntBool(BAllEqTTTT(FIsGrtrOrEq(mRadius2, V3Dot(d, d))));
#endif
}
PX_FORCE_INLINE PxIntBool operator()(const PxBounds3& bounds) const
{
BucketBox tmp;
tmp.mCenter = bounds.getCenter();
tmp.mExtents = bounds.getExtents();
return (*this)(tmp);
}
private:
SphereAABBTest_SIMD& operator=(const SphereAABBTest_SIMD&);
#ifdef CAN_USE_MOVEMASK
const Vec4V mCenter;
#else
const Vec3V mCenter;
#endif
const FloatV mRadius2;
};
#else
struct SphereAABBTest_Scalar
{
PX_FORCE_INLINE SphereAABBTest_Scalar(const Sphere& sphere) :
mCenter (sphere.center),
mRadius2(sphere.radius * sphere.radius)
{}
PX_FORCE_INLINE PxIntBool operator()(const BucketBox& box) const
{
const PxVec3 minimum = box.getMin();
const PxVec3 maximum = box.getMax();
float d = 0.0f;
//find the square of the distance
//from the sphere to the box
for(PxU32 i=0;i<3;i++)
{
if(mCenter[i]<minimum[i])
{
const float s = mCenter[i] - minimum[i];
d += s*s;
}
else if(mCenter[i]>maximum[i])
{
const float s = mCenter[i] - maximum[i];
d += s*s;
}
}
return d <= mRadius2;
}
private:
SphereAABBTest_Scalar& operator=(const SphereAABBTest_Scalar&);
const PxVec3 mCenter;
float mRadius2;
};
#endif
#ifdef USE_SIMD
typedef SphereAABBTest_SIMD BucketPrunerSphereAABBTest;
#else
typedef SphereAABBTest_Scalar BucketPrunerSphereAABBTest;
#endif
///////////////////////////////////////////////////////////////////////////////
struct BucketPrunerAABBAABBTest
{
PX_FORCE_INLINE BucketPrunerAABBAABBTest(const PxBounds3& queryBox) : mBox(queryBox) {}
PX_FORCE_INLINE PxIntBool operator()(const BucketBox& box) const
{
// PT: we don't use PxBounds3::intersects() because isValid() asserts on our empty boxes!
const PxVec3 bucketMin = box.getMin();
const PxVec3 bucketMax = box.getMax();
return !(mBox.minimum.x > bucketMax.x || bucketMin.x > mBox.maximum.x ||
mBox.minimum.y > bucketMax.y || bucketMin.y > mBox.maximum.y ||
mBox.minimum.z > bucketMax.z || bucketMin.z > mBox.maximum.z);
}
PX_FORCE_INLINE PxIntBool operator()(const PxBounds3& bounds) const
{
// PT: we don't use PxBounds3::intersects() because isValid() asserts on our empty boxes!
const PxVec3& bucketMin = bounds.minimum;
const PxVec3& bucketMax = bounds.maximum;
return !(mBox.minimum.x > bucketMax.x || bucketMin.x > mBox.maximum.x ||
mBox.minimum.y > bucketMax.y || bucketMin.y > mBox.maximum.y ||
mBox.minimum.z > bucketMax.z || bucketMin.z > mBox.maximum.z);
}
private:
BucketPrunerAABBAABBTest& operator=(const BucketPrunerAABBAABBTest&);
const PxBounds3 mBox;
};
/*struct BucketPrunerAABBAABBTest_SIMD
{
PX_FORCE_INLINE BucketPrunerAABBAABBTest_SIMD(const PxBounds3& b)
: mCenter(V3LoadU(b.getCenter()))
, mExtents(V3LoadU(b.getExtents()))
{}
PX_FORCE_INLINE PxIntBool operator()(const BucketBox& box) const
{
return V3AllGrtrOrEq(V3Add(mExtents, AlignedLoad(&box.mExtents.x)), V3Abs(V3Sub(AlignedLoad(&box.mCenter.x), mCenter)));
}
private:
BucketPrunerAABBAABBTest_SIMD& operator=(const BucketPrunerAABBAABBTest_SIMD&);
const Vec3V mCenter, mExtents;
};*/
///////////////////////////////////////////////////////////////////////////////
#ifdef USE_SIMD
struct OBBAABBTest_SIMD
{
OBBAABBTest_SIMD(const PxMat33& rotation, const PxVec3& translation, const PxVec3& extents)
{
const Vec3V eps = V3Load(1e-6f);
mT = V3LoadU(translation);
mExtents = V3LoadU(extents);
// storing the transpose matrices yields a simpler SIMD test
mRT = Mat33V_From_PxMat33(rotation.getTranspose());
mART = Mat33V(V3Add(V3Abs(mRT.col0), eps), V3Add(V3Abs(mRT.col1), eps), V3Add(V3Abs(mRT.col2), eps));
mBB_xyz = M33TrnspsMulV3(mART, mExtents);
/* if(fullTest)
{
const Vec3V eYZX = V3PermYZX(mExtents), eZXY = V3PermZXY(mExtents);
mBB_123 = V3MulAdd(eYZX, V3PermZXY(mART.col0), V3Mul(eZXY, V3PermYZX(mART.col0)));
mBB_456 = V3MulAdd(eYZX, V3PermZXY(mART.col1), V3Mul(eZXY, V3PermYZX(mART.col1)));
mBB_789 = V3MulAdd(eYZX, V3PermZXY(mART.col2), V3Mul(eZXY, V3PermYZX(mART.col2)));
}*/
}
PX_FORCE_INLINE PxIntBool operator()(const BucketBox& box) const
{
const Vec3V extentsV = V3LoadU(box.mExtents);
const Vec3V t = V3Sub(mT, V3LoadU(box.mCenter));
// class I - axes of AABB
if(V3OutOfBounds(t, V3Add(extentsV, mBB_xyz)))
return PxIntFalse;
const Vec3V rX = mRT.col0, rY = mRT.col1, rZ = mRT.col2;
const Vec3V arX = mART.col0, arY = mART.col1, arZ = mART.col2;
const FloatV eX = V3GetX(extentsV), eY = V3GetY(extentsV), eZ = V3GetZ(extentsV);
const FloatV tX = V3GetX(t), tY = V3GetY(t), tZ = V3GetZ(t);
// class II - axes of OBB
{
const Vec3V v = V3ScaleAdd(rZ, tZ, V3ScaleAdd(rY, tY, V3Scale(rX, tX)));
const Vec3V v2 = V3ScaleAdd(arZ, eZ, V3ScaleAdd(arY, eY, V3ScaleAdd(arX, eX, mExtents)));
if(V3OutOfBounds(v, v2))
return PxIntFalse;
}
// if(!fullTest)
return PxIntTrue;
/* // class III - edge cross products. Almost all OBB tests early-out with type I or type II,
// so early-outs here probably aren't useful (TODO: profile)
const Vec3V va = V3NegScaleSub(rZ, tY, V3Scale(rY, tZ));
const Vec3V va2 = V3ScaleAdd(arY, eZ, V3ScaleAdd(arZ, eY, mBB_123));
const BoolV ba = BOr(V3IsGrtr(va, va2), V3IsGrtr(V3Neg(va2), va));
const Vec3V vb = V3NegScaleSub(rX, tZ, V3Scale(rZ, tX));
const Vec3V vb2 = V3ScaleAdd(arX, eZ, V3ScaleAdd(arZ, eX, mBB_456));
const BoolV bb = BOr(V3IsGrtr(vb, vb2), V3IsGrtr(V3Neg(vb2), vb));
const Vec3V vc = V3NegScaleSub(rY, tX, V3Scale(rX, tY));
const Vec3V vc2 = V3ScaleAdd(arX, eY, V3ScaleAdd(arY, eX, mBB_789));
const BoolV bc = BOr(V3IsGrtr(vc, vc2), V3IsGrtr(V3Neg(vc2), vc));
return BAllEq(BOr(ba, BOr(bb,bc)), BFFFF());*/
}
PX_FORCE_INLINE PxIntBool operator()(const PxBounds3& bounds) const
{
BucketBox tmp;
tmp.mCenter = bounds.getCenter();
tmp.mExtents = bounds.getExtents();
return (*this)(tmp);
}
Vec3V mExtents; // extents of OBB
Vec3V mT; // translation of OBB
Mat33V mRT; // transpose of rotation matrix of OBB
Mat33V mART; // transpose of mRT, padded by epsilon
Vec3V mBB_xyz; // extents of OBB along coordinate axes
/* Vec3V mBB_123; // projections of extents onto edge-cross axes
Vec3V mBB_456;
Vec3V mBB_789;*/
};
#else
struct OBBAABBTest_Scalar
{
OBBAABBTest_Scalar(const PxMat33& rotation, const PxVec3& translation, const PxVec3& extents)
{
mR = rotation;
mT = translation;
mExtents = extents;
const PxVec3 eps(1e-6f);
mAR = PxMat33(mR[0].abs() + eps, mR[1].abs() + eps, mR[2].abs() + eps); // Epsilon prevents floating-point inaccuracies (strategy borrowed from RAPID)
mBB_xyz = mAR.transform(mExtents); // Precompute box-box data - Courtesy of Erwin de Vries
/* PxReal ex = mExtents.x, ey = mExtents.y, ez = mExtents.z;
mBB_1 = ey*mAR[2].x + ez*mAR[1].x; mBB_2 = ez*mAR[0].x + ex*mAR[2].x; mBB_3 = ex*mAR[1].x + ey*mAR[0].x;
mBB_4 = ey*mAR[2].y + ez*mAR[1].y; mBB_5 = ez*mAR[0].y + ex*mAR[2].y; mBB_6 = ex*mAR[1].y + ey*mAR[0].y;
mBB_7 = ey*mAR[2].z + ez*mAR[1].z; mBB_8 = ez*mAR[0].z + ex*mAR[2].z; mBB_9 = ex*mAR[1].z + ey*mAR[0].z;*/
}
PX_FORCE_INLINE PxIntBool operator()(const BucketBox& box) const
{
const PxVec3& c = box.mCenter;
const PxVec3& e = box.mExtents;
const PxVec3 T = mT - c;
// Class I : A's basis vectors
if(PxAbs(T.x) > e.x + mBB_xyz.x) return PxIntFalse;
if(PxAbs(T.y) > e.y + mBB_xyz.y) return PxIntFalse;
if(PxAbs(T.z) > e.z + mBB_xyz.z) return PxIntFalse;
// Class II : B's basis vectors
if(PxAbs(T.dot(mR[0])) > e.dot(mAR[0]) + mExtents.x) return PxIntFalse;
if(PxAbs(T.dot(mR[1])) > e.dot(mAR[1]) + mExtents.y) return PxIntFalse;
if(PxAbs(T.dot(mR[2])) > e.dot(mAR[2]) + mExtents.z) return PxIntFalse;
// Class III : 9 cross products
if(0)
{
if(PxAbs(T.z*mR[0].y - T.y*mR[0].z) > e.y*mAR[0].z + e.z*mAR[0].y + mBB_1) return PxIntFalse; // L = A0 x B0
if(PxAbs(T.z*mR[1].y - T.y*mR[1].z) > e.y*mAR[1].z + e.z*mAR[1].y + mBB_2) return PxIntFalse; // L = A0 x B1
if(PxAbs(T.z*mR[2].y - T.y*mR[2].z) > e.y*mAR[2].z + e.z*mAR[2].y + mBB_3) return PxIntFalse; // L = A0 x B2
if(PxAbs(T.x*mR[0].z - T.z*mR[0].x) > e.x*mAR[0].z + e.z*mAR[0].x + mBB_4) return PxIntFalse; // L = A1 x B0
if(PxAbs(T.x*mR[1].z - T.z*mR[1].x) > e.x*mAR[1].z + e.z*mAR[1].x + mBB_5) return PxIntFalse; // L = A1 x B1
if(PxAbs(T.x*mR[2].z - T.z*mR[2].x) > e.x*mAR[2].z + e.z*mAR[2].x + mBB_6) return PxIntFalse; // L = A1 x B2
if(PxAbs(T.y*mR[0].x - T.x*mR[0].y) > e.x*mAR[0].y + e.y*mAR[0].x + mBB_7) return PxIntFalse; // L = A2 x B0
if(PxAbs(T.y*mR[1].x - T.x*mR[1].y) > e.x*mAR[1].y + e.y*mAR[1].x + mBB_8) return PxIntFalse; // L = A2 x B1
if(PxAbs(T.y*mR[2].x - T.x*mR[2].y) > e.x*mAR[2].y + e.y*mAR[2].x + mBB_9) return PxIntFalse; // L = A2 x B2
}
return PxIntTrue;
}
private:
PxMat33 mR; // rotation matrix
PxMat33 mAR; // absolute rotation matrix
PxVec3 mT; // translation from obb space to model space
PxVec3 mExtents;
PxVec3 mBB_xyz;
float mBB_1, mBB_2, mBB_3;
float mBB_4, mBB_5, mBB_6;
float mBB_7, mBB_8, mBB_9;
};
#endif
#ifdef USE_SIMD
typedef OBBAABBTest_SIMD BucketPrunerOBBAABBTest;
#else
typedef OBBAABBTest_Scalar BucketPrunerOBBAABBTest;
#endif
///////////////////////////////////////////////////////////////////////////////
bool BucketPrunerCore::overlap(const ShapeData& queryVolume, PrunerOverlapCallback& pcb) const
{
PX_ASSERT(!mDirty);
bool again = true;
const PxBounds3& cullBox = queryVolume.getPrunerInflatedWorldAABB();
switch(queryVolume.getType())
{
case PxGeometryType::eBOX:
{
if(queryVolume.isOBB())
{
const BucketPrunerOverlapTraversal<BucketPrunerOBBAABBTest, false> overlap;
again = overlap(*this,
BucketPrunerOBBAABBTest(
queryVolume.getPrunerWorldRot33(), queryVolume.getPrunerWorldPos(),
queryVolume.getPrunerBoxGeomExtentsInflated()),
pcb, cullBox);
}
else
{
const BucketPrunerOverlapTraversal<BucketPrunerAABBAABBTest, true> overlap;
again = overlap(*this, BucketPrunerAABBAABBTest(cullBox), pcb, cullBox);
}
}
break;
case PxGeometryType::eCAPSULE:
{
const BucketPrunerOverlapTraversal<BucketPrunerOBBAABBTest, false> overlap;
again = overlap(*this,
BucketPrunerOBBAABBTest(
queryVolume.getPrunerWorldRot33(), queryVolume.getPrunerWorldPos(),
queryVolume.getPrunerBoxGeomExtentsInflated()),
pcb, cullBox);
}
break;
case PxGeometryType::eSPHERE:
{
const Sphere& sphere = queryVolume.getGuSphere();
const PxVec3 sphereExtents(sphere.radius);
const BucketPrunerOverlapTraversal<BucketPrunerSphereAABBTest, true> overlap;
again = overlap(*this, BucketPrunerSphereAABBTest(sphere), pcb, cullBox);
}
break;
case PxGeometryType::eCONVEXMESH:
{
const BucketPrunerOverlapTraversal<BucketPrunerOBBAABBTest, false> overlap;
again = overlap(*this,
BucketPrunerOBBAABBTest(
queryVolume.getPrunerWorldRot33(), queryVolume.getPrunerWorldPos(),
queryVolume.getPrunerBoxGeomExtentsInflated()),
pcb, cullBox);
}
break;
default:
PX_ALWAYS_ASSERT_MESSAGE("unsupported overlap query volume geometry type");
}
return again;
}
///////////////////////////////////////////////////////////////////////////////
void BucketPrunerCore::getGlobalBounds(PxBounds3& bounds) const
{
// PT: TODO: refactor with similar code above in the file
const Vec4V centerV = V4LoadU(&mGlobalBox.mCenter.x);
const Vec4V extentsV = V4LoadU(&mGlobalBox.mExtents.x);
Vec4V minV = V4Sub(centerV, extentsV);
Vec4V maxV = V4Add(centerV, extentsV);
#ifdef FREE_PRUNER_SIZE
PxU32 nbFree = mNbFree;
if(nbFree)
{
const PxBounds3* freeBounds = mFreeBounds;
while(nbFree--)
{
minV = V4Min(minV, V4LoadU(&freeBounds->minimum.x));
maxV = V4Max(maxV, V4LoadU(&freeBounds->maximum.x));
freeBounds++;
}
}
#endif
StoreBounds(bounds, minV, maxV);
}
///////////////////////////////////////////////////////////////////////////////
void BucketPrunerCore::shiftOrigin(const PxVec3& shift)
{
#ifdef FREE_PRUNER_SIZE
for(PxU32 i=0;i<mNbFree;i++)
{
mFreeBounds[i].minimum -= shift;
mFreeBounds[i].maximum -= shift;
mFreeTransforms[i].p -= shift;
}
#endif
const PxU32 nb = mCoreNbObjects;
//if (nb)
{
mGlobalBox.mCenter -= shift;
#ifdef _DEBUG
mGlobalBox.mDebugMin -= shift[mSortAxis];
#endif
encodeBoxMinMax(mGlobalBox, mSortAxis);
for(PxU32 i=0; i<nb; i++)
{
mCoreBoxes[i].minimum -= shift;
mCoreBoxes[i].maximum -= shift;
mCoreTransforms[i].p -= shift;
}
for(PxU32 i=0; i<mSortedNb; i++)
{
mSortedWorldBoxes[i].mCenter -= shift;
#ifdef _DEBUG
mSortedWorldBoxes[i].mDebugMin -= shift[mSortAxis];
#endif
encodeBoxMinMax(mSortedWorldBoxes[i], mSortAxis);
mSortedTransforms[i].p -= shift;
}
for(PxU32 i=0; i < 5; i++)
mLevel1.mBucketBox[i].mCenter -= shift;
for(PxU32 i=0; i < 5; i++)
for(PxU32 j=0; j < 5; j++)
mLevel2[i].mBucketBox[j].mCenter -= shift;
for(PxU32 i=0; i < 5; i++)
for(PxU32 j=0; j < 5; j++)
for(PxU32 k=0; k < 5; k++)
mLevel3[i][j].mBucketBox[k].mCenter -= shift;
}
}
///////////////////////////////////////////////////////////////////////////////
static void visualize(PxRenderOutput& out, const BucketBox& bounds)
{
Cm::renderOutputDebugBox(out, PxBounds3(bounds.getMin(), bounds.getMax()));
}
void BucketPrunerCore::visualize(PxRenderOutput& out, PxU32 color) const
{
const PxTransform idt = PxTransform(PxIdentity);
out << idt;
out << color;
::visualize(out, mGlobalBox);
for(PxU32 i=0;i<5;i++)
{
if(!mLevel1.mCounters[i])
continue;
::visualize(out, mLevel1.mBucketBox[i]);
for(PxU32 j=0;j<5;j++)
{
if(!mLevel2[i].mCounters[j])
continue;
::visualize(out, mLevel2[i].mBucketBox[j]);
for(PxU32 k=0;k<5;k++)
{
if(!mLevel3[i][j].mCounters[k])
continue;
::visualize(out, mLevel3[i][j].mBucketBox[k]);
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
BucketPruner::BucketPruner(PxU64 contextID) : mPool(contextID, TRANSFORM_CACHE_GLOBAL)
{
}
BucketPruner::~BucketPruner()
{
}
static PX_FORCE_INLINE void setExternalMemory(BucketPrunerCore& core, PruningPool& pool)
{
core.mDirty = true;
core.setExternalMemory(pool.getNbActiveObjects(), pool.getCurrentWorldBoxes(), pool.getObjects(), pool.getTransforms());
}
bool BucketPruner::addObjects(PrunerHandle* results, const PxBounds3* bounds, const PrunerPayload* data, const PxTransform* transforms, PxU32 count, bool)
{
if(!count)
return true;
const PxU32 valid = mPool.addObjects(results, bounds, data, transforms, count);
::setExternalMemory(mCore, mPool);
return valid == count;
}
void BucketPruner::removeObjects(const PrunerHandle* handles, PxU32 count, PrunerPayloadRemovalCallback* removalCallback)
{
if(!count)
return;
for(PxU32 i=0;i<count;i++)
mPool.removeObject(handles[i], removalCallback);
::setExternalMemory(mCore, mPool);
}
void BucketPruner::updateObjects(const PrunerHandle* handles, PxU32 count, float inflation, const PxU32* boundsIndices, const PxBounds3* newBounds, const PxTransform32* newTransforms)
{
if(!count)
return;
if(handles && boundsIndices && newBounds)
mPool.updateAndInflateBounds(handles, boundsIndices, newBounds, newTransforms, count, inflation);
::setExternalMemory(mCore, mPool);
}
void BucketPruner::purge()
{
}
void BucketPruner::commit()
{
mCore.build();
}
void BucketPruner::merge(const void*)
{
// merge not implemented for bucket pruner
}
void BucketPruner::shiftOrigin(const PxVec3& shift)
{
mCore.shiftOrigin(shift);
}
bool BucketPruner::sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& pcb) const
{
PX_ASSERT(!mCore.mDirty);
if(mCore.mDirty)
return true; // it may crash otherwise
return mCore.sweep(queryVolume, unitDir, inOutDistance, pcb);
}
bool BucketPruner::overlap(const ShapeData& queryVolume, PrunerOverlapCallback& pcb) const
{
PX_ASSERT(!mCore.mDirty);
if(mCore.mDirty)
return true; // it may crash otherwise
return mCore.overlap(queryVolume, pcb);
}
bool BucketPruner::raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& pcb) const
{
PX_ASSERT(!mCore.mDirty);
if(mCore.mDirty)
return true; // it may crash otherwise
return mCore.raycast(origin, unitDir, inOutDistance, pcb);
}
void BucketPruner::visualize(PxRenderOutput& out, PxU32 primaryColor, PxU32 /*secondaryColor*/) const
{
mCore.visualize(out, primaryColor);
}
void BucketPruner::getGlobalBounds(PxBounds3& bounds) const
{
mCore.getGlobalBounds(bounds);
}
#define MBP_ALLOC(x) PX_ALLOC(x, "BucketPruner")
#define MBP_ALLOC_TMP(x) PX_ALLOC(x, "BucketPruner")
#define MBP_FREE(x) PX_FREE(x)
#define INVALID_ID 0xffffffff
#ifndef USE_REGULAR_HASH_MAP
static PX_FORCE_INLINE bool differentPair(const BucketPrunerPair& p, const PrunerPayload& data)
{
const bool same = p.mData == data;
return !same;
}
///////////////////////////////////////////////////////////////////////////////
BucketPrunerMap::BucketPrunerMap() :
mHashSize (0),
mMask (0),
mNbActivePairs (0),
mHashTable (NULL),
mNext (NULL),
mActivePairs (NULL),
mReservedMemory (0)
{
}
///////////////////////////////////////////////////////////////////////////////
BucketPrunerMap::~BucketPrunerMap()
{
purge();
}
///////////////////////////////////////////////////////////////////////////////
void BucketPrunerMap::purge()
{
MBP_FREE(mNext);
MBP_FREE(mActivePairs);
MBP_FREE(mHashTable);
mHashSize = 0;
mMask = 0;
mNbActivePairs = 0;
}
///////////////////////////////////////////////////////////////////////////////
const BucketPrunerPair* BucketPrunerMap::findPair(const PrunerPayload& payload) const
{
if(!mHashTable)
return NULL; // Nothing has been allocated yet
// Compute hash value for this pair
const PxU32 hashValue = PxComputeHash(payload) & mMask;
const BucketPrunerPair* PX_RESTRICT activePairs = mActivePairs;
const PxU32* PX_RESTRICT next = mNext;
// Look for it in the table
PxU32 offset = mHashTable[hashValue];
while(offset!=INVALID_ID && differentPair(activePairs[offset], payload))
{
offset = next[offset]; // Better to have a separate array for this
}
if(offset==INVALID_ID)
return NULL;
PX_ASSERT(offset<mNbActivePairs);
// Match mActivePairs[offset] => the pair is persistent
return &activePairs[offset];
}
// Internal version saving hash computation
PX_FORCE_INLINE BucketPrunerPair* BucketPrunerMap::findPair(const PrunerPayload& payload, PxU32 hashValue) const
{
if(!mHashTable)
return NULL; // Nothing has been allocated yet
BucketPrunerPair* PX_RESTRICT activePairs = mActivePairs;
const PxU32* PX_RESTRICT next = mNext;
// Look for it in the table
PxU32 offset = mHashTable[hashValue];
while(offset!=INVALID_ID && differentPair(activePairs[offset], payload))
{
offset = next[offset]; // Better to have a separate array for this
}
if(offset==INVALID_ID)
return NULL;
PX_ASSERT(offset<mNbActivePairs);
// Match mActivePairs[offset] => the pair is persistent
return &activePairs[offset];
}
///////////////////////////////////////////////////////////////////////////////
BucketPrunerPair* BucketPrunerMap::addPair(const PrunerPayload& payload, PxU32 coreIndex, PxU32 timeStamp)
{
PxU32 hashValue = PxComputeHash(payload) & mMask;
{
BucketPrunerPair* PX_RESTRICT p = findPair(payload, hashValue);
if(p)
{
PX_ASSERT(p->mCoreIndex==coreIndex);
PX_ASSERT(p->mTimeStamp==timeStamp);
return p; // Persistent pair
}
}
// This is a new pair
if(mNbActivePairs >= mHashSize)
{
// Get more entries
mHashSize = PxNextPowerOfTwo(mNbActivePairs+1);
mMask = mHashSize-1;
reallocPairs();
// Recompute hash value with new hash size
hashValue = PxComputeHash(payload) & mMask; // ### redundant hash computation here?
}
BucketPrunerPair* PX_RESTRICT p = &mActivePairs[mNbActivePairs];
p->mData = payload;
p->mCoreIndex = coreIndex;
p->mTimeStamp = timeStamp;
mNext[mNbActivePairs] = mHashTable[hashValue];
mHashTable[hashValue] = mNbActivePairs++;
return p;
}
///////////////////////////////////////////////////////////////////////////////
void BucketPrunerMap::removePairInternal(const PrunerPayload& /*payload*/, PxU32 hashValue, PxU32 pairIndex)
{
// Walk the hash table to fix mNext
{
PxU32 offset = mHashTable[hashValue];
PX_ASSERT(offset!=INVALID_ID);
PxU32 previous=INVALID_ID;
while(offset!=pairIndex)
{
previous = offset;
offset = mNext[offset];
}
// Let us go/jump us
if(previous!=INVALID_ID)
{
PX_ASSERT(mNext[previous]==pairIndex);
mNext[previous] = mNext[pairIndex];
}
// else we were the first
else mHashTable[hashValue] = mNext[pairIndex];
// we're now free to reuse mNext[pairIndex] without breaking the list
}
#if PX_DEBUG
mNext[pairIndex]=INVALID_ID;
#endif
// Invalidate entry
// Fill holes
if(1)
{
// 1) Remove last pair
const PxU32 lastPairIndex = mNbActivePairs-1;
if(lastPairIndex==pairIndex)
{
mNbActivePairs--;
}
else
{
const BucketPrunerPair* last = &mActivePairs[lastPairIndex];
const PxU32 lastHashValue = PxComputeHash(last->mData) & mMask;
// Walk the hash table to fix mNext
PxU32 offset = mHashTable[lastHashValue];
PX_ASSERT(offset!=INVALID_ID);
PxU32 previous=INVALID_ID;
while(offset!=lastPairIndex)
{
previous = offset;
offset = mNext[offset];
}
// Let us go/jump us
if(previous!=INVALID_ID)
{
PX_ASSERT(mNext[previous]==lastPairIndex);
mNext[previous] = mNext[lastPairIndex];
}
// else we were the first
else mHashTable[lastHashValue] = mNext[lastPairIndex];
// we're now free to reuse mNext[lastPairIndex] without breaking the list
#if PX_DEBUG
mNext[lastPairIndex]=INVALID_ID;
#endif
// Don't invalidate entry since we're going to shrink the array
// 2) Re-insert in free slot
mActivePairs[pairIndex] = mActivePairs[lastPairIndex];
#if PX_DEBUG
PX_ASSERT(mNext[pairIndex]==INVALID_ID);
#endif
mNext[pairIndex] = mHashTable[lastHashValue];
mHashTable[lastHashValue] = pairIndex;
mNbActivePairs--;
}
}
}
///////////////////////////////////////////////////////////////////////////////
bool BucketPrunerMap::removePair(const PrunerPayload& payload, PxU32& coreIndex, PxU32& timeStamp)
{
const PxU32 hashValue = PxComputeHash(payload) & mMask;
const BucketPrunerPair* p = findPair(payload, hashValue);
if(!p)
return false;
PX_ASSERT(p->mData==payload);
coreIndex = p->mCoreIndex;
timeStamp = p->mTimeStamp;
removePairInternal(payload, hashValue, getPairIndex(p));
shrinkMemory();
return true;
}
///////////////////////////////////////////////////////////////////////////////
void BucketPrunerMap::shrinkMemory()
{
// Check correct memory against actually used memory
const PxU32 correctHashSize = PxNextPowerOfTwo(mNbActivePairs);
if(mHashSize==correctHashSize)
return;
if(mReservedMemory && correctHashSize < mReservedMemory)
return;
// Reduce memory used
mHashSize = correctHashSize;
mMask = mHashSize-1;
reallocPairs();
}
///////////////////////////////////////////////////////////////////////////////
static PX_FORCE_INLINE void storeDwords(PxU32* dest, PxU32 nb, PxU32 value)
{
while(nb--)
*dest++ = value;
}
void BucketPrunerMap::reallocPairs()
{
MBP_FREE(mHashTable);
mHashTable = reinterpret_cast<PxU32*>(MBP_ALLOC(mHashSize*sizeof(PxU32)));
storeDwords(mHashTable, mHashSize, INVALID_ID);
// Get some bytes for new entries
BucketPrunerPair* newPairs = reinterpret_cast<BucketPrunerPair*>(MBP_ALLOC(mHashSize * sizeof(BucketPrunerPair)));
PX_ASSERT(newPairs);
PxU32* newNext = reinterpret_cast<PxU32*>(MBP_ALLOC(mHashSize * sizeof(PxU32)));
PX_ASSERT(newNext);
// Copy old data if needed
if(mNbActivePairs)
PxMemCopy(newPairs, mActivePairs, mNbActivePairs*sizeof(BucketPrunerPair));
// ### check it's actually needed... probably only for pairs whose hash value was cut by the and
// yeah, since hash(id0, id1) is a constant
// However it might not be needed to recompute them => only less efficient but still ok
for(PxU32 i=0;i<mNbActivePairs;i++)
{
const PxU32 hashValue = PxComputeHash(mActivePairs[i].mData) & mMask; // New hash value with new mask
newNext[i] = mHashTable[hashValue];
mHashTable[hashValue] = i;
}
// Delete old data
MBP_FREE(mNext);
MBP_FREE(mActivePairs);
// Assign new pointer
mActivePairs = newPairs;
mNext = newNext;
}
///////////////////////////////////////////////////////////////////////////////
void BucketPrunerMap::reserveMemory(PxU32 memSize)
{
if(!memSize)
return;
if(!PxIsPowerOfTwo(memSize))
memSize = PxNextPowerOfTwo(memSize);
mHashSize = memSize;
mMask = mHashSize-1;
mReservedMemory = memSize;
reallocPairs();
}
///////////////////////////////////////////////////////////////////////////////
#endif
| 87,715 | C++ | 31.130403 | 221 | 0.691581 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuIncrementalAABBPrunerCore.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_INCREMENTAL_AABB_PRUNER_CORE_H
#define GU_INCREMENTAL_AABB_PRUNER_CORE_H
#include "GuPruner.h"
#include "GuIncrementalAABBTree.h"
#include "GuPruningPool.h"
#include "GuAABBTreeUpdateMap.h"
#include "foundation/PxHashMap.h"
namespace physx
{
class PxRenderOutput;
namespace Gu
{
typedef PxHashMap<PoolIndex, IncrementalAABBTreeNode*> IncrementalPrunerMap;
struct CoreTree
{
PX_FORCE_INLINE CoreTree() : timeStamp(0), tree(NULL) {}
PxU32 timeStamp;
IncrementalAABBTree* tree;
IncrementalPrunerMap mapping;
};
class IncrementalAABBPrunerCore : public PxUserAllocated
{
public:
IncrementalAABBPrunerCore(const PruningPool* pool);
~IncrementalAABBPrunerCore();
void release();
bool addObject(const PoolIndex poolIndex, PxU32 timeStamp);
bool removeObject(const PoolIndex poolIndex, const PoolIndex poolRelocatedLastIndex, PxU32& timeStamp);
// if we swap object from bucket pruner index with an index in the regular AABB pruner
void swapIndex(const PoolIndex poolIndex, const PoolIndex poolRelocatedLastIndex);
bool updateObject(const PoolIndex poolIndex);
PxU32 removeMarkedObjects(PxU32 timeStamp);
bool raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback&) const;
bool overlap(const ShapeData& queryVolume, PrunerOverlapCallback&) const;
bool sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback&) const;
void getGlobalBounds(PxBounds3&) const;
void shiftOrigin(const PxVec3& shift);
void visualize(PxRenderOutput& out, PxU32 color) const;
PX_FORCE_INLINE void timeStampChange()
{
// swap current and last tree
mLastTree = (mLastTree + 1) % 2;
mCurrentTree = (mCurrentTree + 1) % 2;
}
void build() {}
PX_FORCE_INLINE PxU32 getNbObjects() const { return mAABBTree[0].mapping.size() + mAABBTree[1].mapping.size(); }
private:
void updateMapping(IncrementalPrunerMap& mapping, const PoolIndex poolIndex, IncrementalAABBTreeNode* node);
void test(bool hierarchyCheck = true);
private:
static const PxU32 NUM_TREES = 2;
PxU32 mCurrentTree;
PxU32 mLastTree;
CoreTree mAABBTree[NUM_TREES];
const PruningPool* mPool; // Pruning pool from AABB pruner
NodeList mChangedLeaves;
};
}}
#endif
| 4,211 | C | 37.290909 | 126 | 0.725481 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuMTD.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuMTD.h"
#include "GuDistancePointSegment.h"
#include "GuDistanceSegmentSegment.h"
#include "GuDistanceSegmentBox.h"
#include "GuVecBox.h"
#include "GuVecCapsule.h"
#include "GuVecConvexHullNoScale.h"
#include "GuInternal.h"
#include "GuContactMethodImpl.h"
#include "GuBoxConversion.h"
#include "GuPCMShapeConvex.h"
#include "GuPCMContactGen.h"
#include "GuConvexMesh.h"
#include "GuGJK.h"
#include "GuSphere.h"
#include "geomutils/PxContactBuffer.h"
using namespace physx;
using namespace Gu;
static PX_FORCE_INLINE float validateDepth(float depth)
{
// PT: penetration depth must always be positive or null, but FPU accuracy being what it is, we sometimes
// end up with very small, epsilon-sized negative depths. We clamp those to zero, since they don't indicate
// real bugs in the MTD functions. However anything larger than epsilon is wrong, and caught with an assert.
const float epsilon = 1.e-3f;
//ML: because we are shrunking the shape in this moment, so the depth might be larger than eps, this condition is no longer valid
//PX_ASSERT(depth>=-epsilon);
PX_UNUSED(epsilon);
return PxMax(depth, 0.0f);
}
///////////////////////////////////////////////////////////////////////////////
// PT: the function names should follow the order in which the PxGeometryTypes are listed,
// i.e. computeMTD_Type0Type1 with Type0<=Type1. This is to guarantee that the proper results
// (following the desired convention) are returned from the PxGeometryQuery-level call.
///////////////////////////////////////////////////////////////////////////////
static bool computeMTD_SphereSphere(PxVec3& mtd, PxF32& depth, const Sphere& sphere0, const Sphere& sphere1)
{
const PxVec3 delta = sphere0.center - sphere1.center;
const PxReal d2 = delta.magnitudeSquared();
const PxReal radiusSum = sphere0.radius + sphere1.radius;
if(d2 > radiusSum*radiusSum)
return false;
const PxF32 d = manualNormalize(mtd, delta, d2);
depth = validateDepth(radiusSum - d);
return true;
}
///////////////////////////////////////////////////////////////////////////////
static bool computeMTD_SphereCapsule(PxVec3& mtd, PxF32& depth, const Sphere& sphere, const Capsule& capsule)
{
const PxReal radiusSum = sphere.radius + capsule.radius;
PxReal u;
const PxReal d2 = distancePointSegmentSquared(capsule, sphere.center, &u);
if(d2 > radiusSum*radiusSum)
return false;
const PxVec3 normal = sphere.center - capsule.getPointAt(u);
const PxReal lenSq = normal.magnitudeSquared();
const PxF32 d = manualNormalize(mtd, normal, lenSq);
depth = validateDepth(radiusSum - d);
return true;
}
///////////////////////////////////////////////////////////////////////////////
//This version is ported 1:1 from novodex
static PX_FORCE_INLINE bool ContactSphereBox(const PxVec3& sphereOrigin,
PxReal sphereRadius,
const PxVec3& boxExtents,
// const PxcCachedTransforms& boxCacheTransform,
const PxTransform32& boxTransform,
PxVec3& point,
PxVec3& normal,
PxReal& separation,
PxReal contactDistance)
{
//returns true on contact
const PxVec3 delta = sphereOrigin - boxTransform.p; // s1.center - s2.center;
PxVec3 dRot = boxTransform.rotateInv(delta); //transform delta into OBB body coords.
//check if delta is outside ABB - and clip the vector to the ABB.
bool outside = false;
if(dRot.x < -boxExtents.x)
{
outside = true;
dRot.x = -boxExtents.x;
}
else if(dRot.x > boxExtents.x)
{
outside = true;
dRot.x = boxExtents.x;
}
if(dRot.y < -boxExtents.y)
{
outside = true;
dRot.y = -boxExtents.y;
}
else if(dRot.y > boxExtents.y)
{
outside = true;
dRot.y = boxExtents.y;
}
if(dRot.z < -boxExtents.z)
{
outside = true;
dRot.z =-boxExtents.z;
}
else if(dRot.z > boxExtents.z)
{
outside = true;
dRot.z = boxExtents.z;
}
if(outside) //if clipping was done, sphere center is outside of box.
{
point = boxTransform.rotate(dRot); //get clipped delta back in world coords.
normal = delta - point; //what we clipped away.
const PxReal lenSquared = normal.magnitudeSquared();
const PxReal inflatedDist = sphereRadius + contactDistance;
if(lenSquared > inflatedDist * inflatedDist)
return false; //disjoint
//normalize to make it into the normal:
separation = PxRecipSqrt(lenSquared);
normal *= separation;
separation *= lenSquared;
//any plane that touches the sphere is tangential, so a vector from contact point to sphere center defines normal.
//we could also use point here, which has same direction.
//this is either a faceFace or a vertexFace contact depending on whether the box's face or vertex collides, but we did not distinguish.
//We'll just use vertex face for now, this info isn't really being used anyway.
//contact point is point on surface of cube closest to sphere center.
point += boxTransform.p;
separation -= sphereRadius;
return true;
}
else
{
//center is in box, we definitely have a contact.
PxVec3 locNorm; //local coords contact normal
PxVec3 absdRot;
absdRot = PxVec3(PxAbs(dRot.x), PxAbs(dRot.y), PxAbs(dRot.z));
PxVec3 distToSurface = boxExtents - absdRot; //dist from embedded center to box surface along 3 dimensions.
//find smallest element of distToSurface
if(distToSurface.y < distToSurface.x)
{
if(distToSurface.y < distToSurface.z)
{
//y
locNorm = PxVec3(0.0f, dRot.y > 0.0f ? 1.0f : -1.0f, 0.0f);
separation = -distToSurface.y;
}
else
{
//z
locNorm = PxVec3(0.0f,0.0f, dRot.z > 0.0f ? 1.0f : -1.0f);
separation = -distToSurface.z;
}
}
else
{
if(distToSurface.x < distToSurface.z)
{
//x
locNorm = PxVec3(dRot.x > 0.0f ? 1.0f : -1.0f, 0.0f, 0.0f);
separation = -distToSurface.x;
}
else
{
//z
locNorm = PxVec3(0.0f,0.0f, dRot.z > 0.0f ? 1.0f : -1.0f);
separation = -distToSurface.z;
}
}
//separation so far is just the embedding of the center point; we still have to push out all of the radius.
point = sphereOrigin;
normal = boxTransform.rotate(locNorm);
separation -= sphereRadius;
return true;
}
}
static bool computeMTD_SphereBox(PxVec3& mtd, PxF32& depth, const Sphere& sphere, const Box& box)
{
PxVec3 point;
if(!ContactSphereBox( sphere.center, sphere.radius,
box.extents, PxTransform32(box.center, PxQuat(box.rot)),
point, mtd, depth, 0.0f))
return false;
depth = validateDepth(-depth);
return true;
}
///////////////////////////////////////////////////////////////////////////////
static bool computeMTD_CapsuleCapsule(PxVec3& mtd, PxF32& depth, const Capsule& capsule0, const Capsule& capsule1)
{
PxReal s,t;
const PxReal d2 = distanceSegmentSegmentSquared(capsule0, capsule1, &s, &t);
const PxReal radiusSum = capsule0.radius + capsule1.radius;
if(d2 > radiusSum*radiusSum)
return false;
const PxVec3 normal = capsule0.getPointAt(s) - capsule1.getPointAt(t);
const PxReal lenSq = normal.magnitudeSquared();
const PxF32 d = manualNormalize(mtd, normal, lenSq);
depth = validateDepth(radiusSum - d);
return true;
}
///////////////////////////////////////////////////////////////////////////////
static PX_FORCE_INLINE void reorderMTD(PxVec3& mtd, const PxVec3& center0, const PxVec3& center1)
{
const PxVec3 witness = center0 - center1;
if(mtd.dot(witness) < 0.0f)
mtd = -mtd;
}
static PX_FORCE_INLINE void projectBox(PxReal& min, PxReal& max, const PxVec3& axis, const Box& box)
{
const PxReal boxCen = box.center.dot(axis);
const PxReal boxExt =
PxAbs(box.rot.column0.dot(axis)) * box.extents.x
+ PxAbs(box.rot.column1.dot(axis)) * box.extents.y
+ PxAbs(box.rot.column2.dot(axis)) * box.extents.z;
min = boxCen - boxExt;
max = boxCen + boxExt;
}
static bool PxcTestAxis(const PxVec3& axis, const Segment& segment, PxReal radius, const Box& box, PxReal& depth)
{
// Project capsule
PxReal min0 = segment.p0.dot(axis);
PxReal max0 = segment.p1.dot(axis);
if(min0>max0) PxSwap(min0, max0);
min0 -= radius;
max0 += radius;
// Project box
PxReal Min1, Max1;
projectBox(Min1, Max1, axis, box);
// Test projections
if(max0<Min1 || Max1<min0)
return false;
const PxReal d0 = max0 - Min1;
PX_ASSERT(d0>=0.0f);
const PxReal d1 = Max1 - min0;
PX_ASSERT(d1>=0.0f);
depth = physx::intrinsics::selectMin(d0, d1);
return true;
}
static bool PxcCapsuleOBBOverlap3(const Segment& segment, PxReal radius, const Box& box, PxReal* t=NULL, PxVec3* pp=NULL)
{
PxVec3 Sep(0.0f);
PxReal PenDepth = PX_MAX_REAL;
// Test normals
for(PxU32 i=0;i<3;i++)
{
PxReal d;
if(!PxcTestAxis(box.rot[i], segment, radius, box, d))
return false;
if(d<PenDepth)
{
PenDepth = d;
Sep = box.rot[i];
}
}
// Test edges
PxVec3 CapsuleAxis(segment.p1 - segment.p0);
CapsuleAxis = CapsuleAxis.getNormalized();
for(PxU32 i=0;i<3;i++)
{
PxVec3 Cross = CapsuleAxis.cross(box.rot[i]);
if(!isAlmostZero(Cross))
{
Cross = Cross.getNormalized();
PxReal d;
if(!PxcTestAxis(Cross, segment, radius, box, d))
return false;
if(d<PenDepth)
{
PenDepth = d;
Sep = Cross;
}
}
}
reorderMTD(Sep, segment.computeCenter(), box.center);
if(t)
*t = validateDepth(PenDepth);
if(pp)
*pp = Sep;
return true;
}
static bool computeMTD_CapsuleBox(PxVec3& mtd, PxF32& depth, const Capsule& capsule, const Box& box)
{
PxReal t;
PxVec3 onBox;
const PxReal d2 = distanceSegmentBoxSquared(capsule.p0, capsule.p1, box.center, box.extents, box.rot, &t, &onBox);
if(d2 > capsule.radius*capsule.radius)
return false;
if(d2 != 0.0f)
{
// PT: the capsule segment doesn't intersect the box => distance-based version
const PxVec3 onSegment = capsule.getPointAt(t);
onBox = box.center + box.rot.transform(onBox);
PxVec3 normal = onSegment - onBox;
PxReal normalLen = normal.magnitude();
if(normalLen != 0.0f)
{
normal *= 1.0f/normalLen;
mtd = normal;
depth = validateDepth(capsule.radius - PxSqrt(d2));
return true;
}
}
// PT: the capsule segment intersects the box => penetration-based version
return PxcCapsuleOBBOverlap3(capsule, capsule.radius, box, &depth, &mtd);
}
///////////////////////////////////////////////////////////////////////////////
static bool PxcTestAxis(const PxVec3& axis, const Box& box0, const Box& box1, PxReal& depth)
{
// Project box
PxReal min0, max0;
projectBox(min0, max0, axis, box0);
// Project box
PxReal Min1, Max1;
projectBox(Min1, Max1, axis, box1);
// Test projections
if(max0<Min1 || Max1<min0)
return false;
const PxReal d0 = max0 - Min1;
PX_ASSERT(d0>=0.0f);
const PxReal d1 = Max1 - min0;
PX_ASSERT(d1>=0.0f);
depth = physx::intrinsics::selectMin(d0, d1);
return true;
}
static PX_FORCE_INLINE bool testBoxBoxAxis(PxVec3& mtd, PxF32& depth, const PxVec3& axis, const Box& box0, const Box& box1)
{
PxF32 d;
if(!PxcTestAxis(axis, box0, box1, d))
return false;
if(d<depth)
{
depth = d;
mtd = axis;
}
return true;
}
static bool computeMTD_BoxBox(PxVec3& _mtd, PxF32& _depth, const Box& box0, const Box& box1)
{
PxVec3 mtd;
PxF32 depth = PX_MAX_F32;
if(!testBoxBoxAxis(mtd, depth, box0.rot.column0, box0, box1))
return false;
if(!testBoxBoxAxis(mtd, depth, box0.rot.column1, box0, box1))
return false;
if(!testBoxBoxAxis(mtd, depth, box0.rot.column2, box0, box1))
return false;
if(!testBoxBoxAxis(mtd, depth, box1.rot.column0, box0, box1))
return false;
if(!testBoxBoxAxis(mtd, depth, box1.rot.column1, box0, box1))
return false;
if(!testBoxBoxAxis(mtd, depth, box1.rot.column2, box0, box1))
return false;
for(PxU32 j=0;j<3;j++)
{
for(PxU32 i=0;i<3;i++)
{
PxVec3 cross = box0.rot[i].cross(box1.rot[j]);
if(!isAlmostZero(cross))
{
cross = cross.getNormalized();
if(!testBoxBoxAxis(mtd, depth, cross, box0, box1))
return false;
}
}
}
reorderMTD(mtd, box1.center, box0.center);
_mtd = -mtd;
_depth = validateDepth(depth);
return true;
}
///////////////////////////////////////////////////////////////////////////////
using namespace physx::aos;
bool pointConvexDistance(PxVec3& normal_, PxVec3& closestPoint_, PxReal& sqDistance, const PxVec3& pt, const ConvexMesh* convexMesh, const PxMeshScale& meshScale, const PxTransform32& convexPose)
{
const PxTransform transform0(pt);
PxVec3 onSegment, onConvex;
using namespace aos;
const Vec3V zeroV = V3Zero();
Vec3V closA, closB, normalV;
GjkStatus status;
FloatV dist;
{
const ConvexHullData* hullData = &convexMesh->getHull();
const Vec3V vScale = V3LoadU_SafeReadW(meshScale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&meshScale.rotation.x);
const ConvexHullV convexHull_(hullData, zeroV, vScale, vQuat, meshScale.isIdentity());
const PxMatTransformV aToB(convexPose.transformInv(transform0));
//const CapsuleV capsule(zeroV, zeroV, FZero());//this is a point
const CapsuleV capsule_(aToB.p, FZero());//this is a point
const LocalConvex<CapsuleV> capsule(capsule_);
const LocalConvex<ConvexHullV> convexHull(convexHull_);
status = gjk<LocalConvex<CapsuleV>, LocalConvex<ConvexHullV> >(capsule, convexHull, aToB.p, FMax(), closA, closB, normalV, dist);
}
bool intersect = status == GJK_CONTACT;
if(intersect)
{
sqDistance = 0.0f;
}
else
{
const FloatV sqDist = FMul(dist, dist);
FStore(sqDist, &sqDistance);
V3StoreU(normalV, normal_);
V3StoreU(closB, closestPoint_);
normal_ = convexPose.rotate(normal_);
closestPoint_ = convexPose.transform(closestPoint_);
}
return intersect;
}
static bool computeMTD_SphereConvex(PxVec3& mtd, PxF32& depth, const Sphere& sphere, const PxConvexMeshGeometry& convexGeom, const PxTransform32& convexPose)
{
PxReal d2;
const ConvexMesh* convexMesh = static_cast<const ConvexMesh*>(convexGeom.convexMesh);
PxVec3 dummy;
if(!pointConvexDistance(mtd, dummy, d2, sphere.center, convexMesh, convexGeom.scale, convexPose))
{
if(d2 > sphere.radius*sphere.radius)
return false;
depth = validateDepth(sphere.radius - PxSqrt(d2));
mtd = -mtd;
return true;
}
// PT: if we reach this place, the sphere center touched the convex => switch to penetration-based code
PxU32 nbPolygons = convexMesh->getNbPolygonsFast();
const HullPolygonData* polygons = convexMesh->getPolygons();
const PxVec3 localSphereCenter = convexPose.transformInv(sphere.center);
PxReal dmax = -PX_MAX_F32;
while(nbPolygons--)
{
const HullPolygonData& polygon = *polygons++;
const PxF32 d = polygon.mPlane.distance(localSphereCenter);
if(d>dmax)
{
dmax = d;
mtd = convexPose.rotate(polygon.mPlane.n);
}
}
depth = validateDepth(sphere.radius - dmax);
return true;
}
///////////////////////////////////////////////////////////////////////////////
//ML : capsule will be in the local space of convexHullV
static bool internalComputeMTD_CapsuleConvex(const CapsuleV& capsule, const bool idtScale, const ConvexHullV& convexHullV, const aos::PxTransformV& transf1,
aos::FloatV& penetrationDepth, aos::Vec3V& normal)
{
PolygonalData polyData;
getPCMConvexData(convexHullV, idtScale, polyData);
PX_ALIGN(16, PxU8 buff[sizeof(SupportLocalImpl<ConvexHullV>)]);
SupportLocal* map = (idtScale ? static_cast<SupportLocal*>(PX_PLACEMENT_NEW(buff, SupportLocalImpl<ConvexHullNoScaleV>)(static_cast<const ConvexHullNoScaleV&>(convexHullV), transf1, convexHullV.vertex2Shape, convexHullV.shape2Vertex, idtScale)) :
static_cast<SupportLocal*>(PX_PLACEMENT_NEW(buff, SupportLocalImpl<ConvexHullV>)(convexHullV, transf1, convexHullV.vertex2Shape, convexHullV.shape2Vertex, idtScale)));
return computeMTD(capsule, polyData, map, penetrationDepth, normal);
}
static bool computeMTD_CapsuleConvex(PxVec3& mtd, PxF32& depth, const Capsule& capsule, const PxTransform32& capsulePose, const PxConvexMeshGeometry& convexGeom, const PxTransform32& convexPose)
{
const FloatV capsuleHalfHeight = FLoad(capsule.length()*0.5f);
const FloatV capsuleRadius = FLoad(capsule.radius);
const Vec3V zeroV = V3Zero();
// Convex mesh
const ConvexMesh* convexMesh = static_cast<const ConvexMesh*>(convexGeom.convexMesh);
const ConvexHullData* hull = &convexMesh->getHull();
const Vec3V vScale = V3LoadU_SafeReadW(convexGeom.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&convexGeom.scale.rotation.x);
const ConvexHullV convexHullV(hull, zeroV, vScale, vQuat, convexGeom.scale.isIdentity());
//~Convex mesh
const QuatV q0 = QuatVLoadU(&capsulePose.q.x);
const Vec3V p0 = V3LoadU(&capsulePose.p.x);
const QuatV q1 = QuatVLoadU(&convexPose.q.x);
const Vec3V p1 = V3LoadU(&convexPose.p.x);
const PxTransformV transf0(p0, q0);
const PxTransformV transf1(p1, q1);
const PxTransformV curRTrans(transf1.transformInv(transf0));
const PxMatTransformV aToB(curRTrans);
Vec3V normal = zeroV;
FloatV penetrationDepth = FZero();
const CapsuleV capsuleV(aToB.p, aToB.rotate(V3Scale(V3UnitX(), capsuleHalfHeight)), capsuleRadius);
const bool idtScale = convexGeom.scale.isIdentity();
bool hasContacts = internalComputeMTD_CapsuleConvex(capsuleV, idtScale, convexHullV, transf1, penetrationDepth, normal);
if(hasContacts)
{
FStore(penetrationDepth, &depth);
depth = validateDepth(depth);
V3StoreU(normal, mtd);
}
return hasContacts;
}
///////////////////////////////////////////////////////////////////////////////
static bool internalComputeMTD_BoxConvex(const PxVec3 halfExtents, const BoxV& box, const bool idtScale, const ConvexHullV& convexHullV, const aos::PxTransformV& transf0, const aos::PxTransformV& transf1,
aos::FloatV& penetrationDepth, aos::Vec3V& normal)
{
PolygonalData polyData0;
PCMPolygonalBox polyBox0(halfExtents);
polyBox0.getPolygonalData(&polyData0);
polyData0.mPolygonVertexRefs = gPCMBoxPolygonData;
PolygonalData polyData1;
getPCMConvexData(convexHullV, idtScale, polyData1);
const Mat33V identity = M33Identity();
SupportLocalImpl<BoxV> map0(box, transf0, identity, identity, true);
PX_ALIGN(16, PxU8 buff[sizeof(SupportLocalImpl<ConvexHullV>)]);
SupportLocal* map1 = (idtScale ? static_cast<SupportLocal*>(PX_PLACEMENT_NEW(buff, SupportLocalImpl<ConvexHullNoScaleV>)(static_cast<const ConvexHullNoScaleV&>(convexHullV), transf1, convexHullV.vertex2Shape, convexHullV.shape2Vertex, idtScale)) :
static_cast<SupportLocal*>(PX_PLACEMENT_NEW(buff, SupportLocalImpl<ConvexHullV>)(convexHullV, transf1, convexHullV.vertex2Shape, convexHullV.shape2Vertex, idtScale)));
return computeMTD(polyData0, polyData1, &map0, map1, penetrationDepth, normal);
}
static bool computeMTD_BoxConvex(PxVec3& mtd, PxF32& depth, const Box& box, const PxConvexMeshGeometry& convexGeom, const PxTransform32& convexPose)
{
const Vec3V zeroV = V3Zero();
const PxTransform boxPose = box.getTransform();
const Vec3V boxExtents = V3LoadU(box.extents);
const BoxV boxV(zeroV, boxExtents);
// Convex mesh
const ConvexMesh* convexMesh = static_cast<const ConvexMesh*>(convexGeom.convexMesh);
const ConvexHullData* hull = &convexMesh->getHull();
const Vec3V vScale = V3LoadU_SafeReadW(convexGeom.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&convexGeom.scale.rotation.x);
const ConvexHullV convexHullV(hull, zeroV, vScale, vQuat, convexGeom.scale.isIdentity());
//~Convex mesh
const QuatV q0 = QuatVLoadU(&boxPose.q.x);
const Vec3V p0 = V3LoadU(&boxPose.p.x);
const QuatV q1 = QuatVLoadU(&convexPose.q.x);
const Vec3V p1 = V3LoadU(&convexPose.p.x);
const PxTransformV transf0(p0, q0);
const PxTransformV transf1(p1, q1);
Vec3V normal=zeroV;
FloatV penetrationDepth=FZero();
const bool idtScale = convexGeom.scale.isIdentity();
bool hasContacts = internalComputeMTD_BoxConvex(box.extents, boxV, idtScale, convexHullV, transf0, transf1, penetrationDepth, normal);
if(hasContacts)
{
FStore(penetrationDepth, &depth);
depth = validateDepth(depth);
V3StoreU(normal, mtd);
}
return hasContacts;
}
static bool internalComputeMTD_ConvexConvex(const bool idtScale0, const bool idtScale1, const ConvexHullV& convexHullV0, const ConvexHullV& convexHullV1, const aos::PxTransformV& transf0, const aos::PxTransformV& transf1,
aos::FloatV& penetrationDepth, aos::Vec3V& normal)
{
PolygonalData polyData0, polyData1;
getPCMConvexData(convexHullV0, idtScale0, polyData0);
getPCMConvexData(convexHullV1, idtScale1, polyData1);
PX_ALIGN(16, PxU8 buff0[sizeof(SupportLocalImpl<ConvexHullV>)]);
PX_ALIGN(16, PxU8 buff1[sizeof(SupportLocalImpl<ConvexHullV>)]);
SupportLocal* map0 = (idtScale0 ? static_cast<SupportLocal*>(PX_PLACEMENT_NEW(buff0, SupportLocalImpl<ConvexHullNoScaleV>)(static_cast<const ConvexHullNoScaleV&>(convexHullV0), transf0, convexHullV0.vertex2Shape, convexHullV0.shape2Vertex, idtScale0)) :
static_cast<SupportLocal*>(PX_PLACEMENT_NEW(buff0, SupportLocalImpl<ConvexHullV>)(convexHullV0, transf0, convexHullV0.vertex2Shape, convexHullV0.shape2Vertex, idtScale0)));
SupportLocal* map1 = (idtScale1 ? static_cast<SupportLocal*>(PX_PLACEMENT_NEW(buff1, SupportLocalImpl<ConvexHullNoScaleV>)(static_cast<const ConvexHullNoScaleV&>(convexHullV1), transf1, convexHullV1.vertex2Shape, convexHullV1.shape2Vertex, idtScale1)) :
static_cast<SupportLocal*>(PX_PLACEMENT_NEW(buff1, SupportLocalImpl<ConvexHullV>)(convexHullV1, transf1, convexHullV1.vertex2Shape, convexHullV1.shape2Vertex, idtScale1)));
return computeMTD(polyData0, polyData1, map0, map1, penetrationDepth, normal);
}
///////////////////////////////////////////////////////////////////////////////
static bool computeMTD_ConvexConvex(PxVec3& mtd, PxF32& depth, const PxConvexMeshGeometry& convexGeom0, const PxTransform32& convexPose0, const PxConvexMeshGeometry& convexGeom1, const PxTransform32& convexPose1)
{
using namespace aos;
const Vec3V zeroV = V3Zero();
// Convex mesh
const ConvexMesh* convexMesh0 = static_cast<const ConvexMesh*>(convexGeom0.convexMesh);
const ConvexHullData* hull0 = &convexMesh0->getHull();
const Vec3V vScale0 = V3LoadU_SafeReadW(convexGeom0.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat0 = QuatVLoadU(&convexGeom0.scale.rotation.x);
const ConvexHullV convexHullV0(hull0, zeroV, vScale0, vQuat0, convexGeom0.scale.isIdentity());
//~Convex mesh
// Convex mesh
const ConvexMesh* convexMesh1 = static_cast<const ConvexMesh*>(convexGeom1.convexMesh);
const ConvexHullData* hull1 = &convexMesh1->getHull();
const Vec3V vScale1 = V3LoadU_SafeReadW(convexGeom1.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat1 = QuatVLoadU(&convexGeom1.scale.rotation.x);
const ConvexHullV convexHullV1(hull1, zeroV, vScale1, vQuat1, convexGeom1.scale.isIdentity());
//~Convex mesh
const QuatV q0 = QuatVLoadU(&convexPose0.q.x);
const Vec3V p0 = V3LoadU(&convexPose0.p.x);
const QuatV q1 = QuatVLoadU(&convexPose1.q.x);
const Vec3V p1 = V3LoadU(&convexPose1.p.x);
const PxTransformV transf0(p0, q0);
const PxTransformV transf1(p1, q1);
Vec3V normal = zeroV;
FloatV penetrationDepth = FZero();
const bool idtScale0 = convexGeom0.scale.isIdentity();
const bool idtScale1 = convexGeom1.scale.isIdentity();
bool hasContacts = internalComputeMTD_ConvexConvex(idtScale0, idtScale1, convexHullV0, convexHullV1, transf0, transf1, penetrationDepth, normal);
if(hasContacts)
{
FStore(penetrationDepth, &depth);
depth = validateDepth(depth);
V3StoreU(normal, mtd);
}
return hasContacts;
}
///////////////////////////////////////////////////////////////////////////////
static bool computeMTD_SpherePlane(PxVec3& mtd, PxF32& depth, const Sphere& sphere, const PxPlane& plane)
{
const PxReal d = plane.distance(sphere.center);
if(d>sphere.radius)
return false;
mtd = plane.n;
depth = validateDepth(sphere.radius - d);
return true;
}
static bool computeMTD_PlaneBox(PxVec3& mtd, PxF32& depth, const PxPlane& plane, const Box& box)
{
PxVec3 pts[8];
box.computeBoxPoints(pts);
PxReal dmin = plane.distance(pts[0]);
for(PxU32 i=1;i<8;i++)
{
const PxReal d = plane.distance(pts[i]);
dmin = physx::intrinsics::selectMin(dmin, d);
}
if(dmin>0.0f)
return false;
mtd = -plane.n;
depth = validateDepth(-dmin);
return true;
}
static bool computeMTD_PlaneCapsule(PxVec3& mtd, PxF32& depth, const PxPlane& plane, const Capsule& capsule)
{
const PxReal d0 = plane.distance(capsule.p0);
const PxReal d1 = plane.distance(capsule.p1);
const PxReal dmin = physx::intrinsics::selectMin(d0, d1) - capsule.radius;
if(dmin>0.0f)
return false;
mtd = -plane.n;
depth = validateDepth(-dmin);
return true;
}
static bool computeMTD_PlaneConvex(PxVec3& mtd, PxF32& depth, const PxPlane& plane, const PxConvexMeshGeometry& convexGeom, const PxTransform32& convexPose)
{
const ConvexMesh* convexMesh = static_cast<const ConvexMesh*>(convexGeom.convexMesh);
PxU32 nbVerts = convexMesh->getNbVerts();
const PxVec3* PX_RESTRICT verts = convexMesh->getVerts();
PxReal dmin = plane.distance(convexPose.transform(verts[0]));
for(PxU32 i=1;i<nbVerts;i++)
{
const PxReal d = plane.distance(convexPose.transform(verts[i]));
dmin = physx::intrinsics::selectMin(dmin, d);
}
if(dmin>0.0f)
return false;
mtd = -plane.n;
depth = validateDepth(-dmin);
return true;
}
///////////////////////////////////////////////////////////////////////////////
static bool processContacts(PxVec3& mtd, PxF32& depth, const PxU32 nbContacts, const PxContactPoint* contacts)
{
if(nbContacts)
{
PxVec3 mn(0.0f), mx(0.0f);
for(PxU32 i=0; i<nbContacts; i++)
{
const PxContactPoint& ct = contacts[i];
PxVec3 depenetration = ct.separation * ct.normal;
mn = mn.minimum(depenetration);
mx = mx.maximum(depenetration);
}
// even if we are already moving in separation direction, we should still depenetrate
// so no dot velocity test
// here we attempt to equalize the separations pushing in opposing directions along each axis
PxVec3 mn1, mx1;
mn1.x = (mn.x == 0.0f) ? mx.x : mn.x;
mn1.y = (mn.y == 0.0f) ? mx.y : mn.y;
mn1.z = (mn.z == 0.0f) ? mx.z : mn.z;
mx1.x = (mx.x == 0.0f) ? mn.x : mx.x;
mx1.y = (mx.y == 0.0f) ? mn.y : mx.y;
mx1.z = (mx.z == 0.0f) ? mn.z : mx.z;
PxVec3 sepDir((mn1 + mx1)*0.5f);
if(sepDir.magnitudeSquared() < 1e-10f)
return false;
mtd = -sepDir.getNormalized();
depth = sepDir.magnitude();
}
return nbContacts!=0;
}
static bool computeMTD_SphereMesh(PxVec3& mtd, PxF32& depth, const Sphere& sphere, const PxTriangleMeshGeometry& meshGeom, const PxTransform32& meshPose)
{
Cache cache;
PxContactBuffer contactBuffer;
contactBuffer.reset();
if(!contactSphereMesh(PxSphereGeometry(sphere.radius), meshGeom, PxTransform32(sphere.center), meshPose, NarrowPhaseParams(0.0f, 0.0f, 1.0f), cache, contactBuffer, NULL))
return false;
return processContacts(mtd, depth, contactBuffer.count, contactBuffer.contacts);
}
static bool computeMTD_CapsuleMesh(PxVec3& mtd, PxF32& depth, const Capsule& capsule, const PxTriangleMeshGeometry& meshGeom, const PxTransform32& meshPose)
{
PxReal halfHeight;
const PxTransform32 capsuleTransform(PxTransformFromSegment(capsule.p0, capsule.p1, &halfHeight));
Cache cache;
PxContactBuffer contactBuffer;
contactBuffer.reset();
if(!contactCapsuleMesh(PxCapsuleGeometry(capsule.radius, halfHeight), meshGeom, capsuleTransform, meshPose, NarrowPhaseParams(0.0f, 0.0f, 1.0f), cache, contactBuffer, NULL))
return false;
return processContacts(mtd, depth, contactBuffer.count, contactBuffer.contacts);
}
static bool computeMTD_BoxMesh(PxVec3& mtd, PxF32& depth, const Box& box, const PxTriangleMeshGeometry& meshGeom, const PxTransform32& meshPose)
{
const PxTransform32 boxPose(box.center, PxQuat(box.rot));
Cache cache;
PxContactBuffer contactBuffer;
contactBuffer.reset();
if(!contactBoxMesh(PxBoxGeometry(box.extents), meshGeom, boxPose, meshPose, NarrowPhaseParams(0.0f, 0.0f, 1.0f), cache, contactBuffer, NULL))
return false;
return processContacts(mtd, depth, contactBuffer.count, contactBuffer.contacts);
}
static bool computeMTD_ConvexMesh(PxVec3& mtd, PxF32& depth, const PxConvexMeshGeometry& convexGeom, const PxTransform32& convexPose, const PxTriangleMeshGeometry& meshGeom, const PxTransform32& meshPose)
{
Cache cache;
PxContactBuffer contactBuffer;
contactBuffer.reset();
if(!contactConvexMesh(convexGeom, meshGeom, convexPose, meshPose, NarrowPhaseParams(0.0f, 0.0f, 1.0f), cache, contactBuffer, NULL))
return false;
return processContacts(mtd, depth, contactBuffer.count, contactBuffer.contacts);
}
static bool computeMTD_SphereHeightField(PxVec3& mtd, PxF32& depth, const Sphere& sphere, const PxHeightFieldGeometry& meshGeom, const PxTransform32& meshPose)
{
Cache cache;
PxContactBuffer contactBuffer;
contactBuffer.reset();
const PxTransform32 spherePose(sphere.center);
if(!contactSphereHeightfield(PxSphereGeometry(sphere.radius), meshGeom, spherePose, meshPose, NarrowPhaseParams(0.0f, 0.0f, 1.0f), cache, contactBuffer, NULL))
return false;
return processContacts(mtd, depth, contactBuffer.count, contactBuffer.contacts);
}
static bool computeMTD_CapsuleHeightField(PxVec3& mtd, PxF32& depth, const Capsule& capsule, const PxHeightFieldGeometry& meshGeom, const PxTransform32& meshPose)
{
PxReal halfHeight;
const PxTransform32 capsuleTransform(PxTransformFromSegment(capsule.p0, capsule.p1, &halfHeight));
Cache cache;
PxContactBuffer contactBuffer;
contactBuffer.reset();
if(!contactCapsuleHeightfield(PxCapsuleGeometry(capsule.radius, halfHeight), meshGeom, capsuleTransform, meshPose, NarrowPhaseParams(0.0f, 0.0f, 1.0f), cache, contactBuffer, NULL))
return false;
return processContacts(mtd, depth, contactBuffer.count, contactBuffer.contacts);
}
static bool computeMTD_BoxHeightField(PxVec3& mtd, PxF32& depth, const Box& box, const PxHeightFieldGeometry& meshGeom, const PxTransform32& meshPose)
{
const PxTransform32 boxPose(box.center, PxQuat(box.rot));
Cache cache;
PxContactBuffer contactBuffer;
contactBuffer.reset();
if(!contactBoxHeightfield(PxBoxGeometry(box.extents), meshGeom, boxPose, meshPose, NarrowPhaseParams(0.0f, 0.0f, 1.0f), cache, contactBuffer, NULL))
return false;
return processContacts(mtd, depth, contactBuffer.count, contactBuffer.contacts);
}
static bool computeMTD_ConvexHeightField(PxVec3& mtd, PxF32& depth, const PxConvexMeshGeometry& convexGeom, const PxTransform32& convexPose, const PxHeightFieldGeometry& meshGeom, const PxTransform32& meshPose)
{
Cache cache;
PxContactBuffer contactBuffer;
contactBuffer.reset();
if(!contactConvexHeightfield(convexGeom, meshGeom, convexPose, meshPose, NarrowPhaseParams(0.0f, 0.0f, 1.0f), cache, contactBuffer, NULL))
return false;
return processContacts(mtd, depth, contactBuffer.count, contactBuffer.contacts);
}
static bool computeMTD_CustomGeometry(PxVec3& mtd, PxF32& depth, const PxCustomGeometry& geom0, const PxTransform32& pose0, const PxGeometry& geom1, const PxTransform32& pose1)
{
Cache cache;
PxContactBuffer contactBuffer;
contactBuffer.reset();
if(!geom0.callbacks->generateContacts(geom0, geom1, pose0, pose1, FLT_EPSILON, FLT_EPSILON, 1.0f, contactBuffer))
return false;
return processContacts(mtd, depth, contactBuffer.count, contactBuffer.contacts);
}
static bool GeomMTDCallback_NotSupported(GU_MTD_FUNC_PARAMS)
{
PX_ALWAYS_ASSERT_MESSAGE("NOT SUPPORTED");
PX_UNUSED(mtd); PX_UNUSED(depth); PX_UNUSED(geom0); PX_UNUSED(geom1); PX_UNUSED(pose0); PX_UNUSED(pose1);
return false;
}
static bool GeomMTDCallback_SphereSphere(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::eSPHERE);
const PxSphereGeometry& sphereGeom0 = static_cast<const PxSphereGeometry&>(geom0);
const PxSphereGeometry& sphereGeom1 = static_cast<const PxSphereGeometry&>(geom1);
return computeMTD_SphereSphere(mtd, depth, Sphere(pose0.p, sphereGeom0.radius), Sphere(pose1.p, sphereGeom1.radius));
}
static bool GeomMTDCallback_SpherePlane(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::ePLANE);
PX_UNUSED(geom1);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom0);
return computeMTD_SpherePlane(mtd, depth, Sphere(pose0.p, sphereGeom.radius), getPlane(pose1));
}
static bool GeomMTDCallback_SphereCapsule(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCAPSULE);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom0);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom1);
Capsule capsule;
getCapsuleSegment(pose1, capsuleGeom, capsule);
capsule.radius = capsuleGeom.radius;
return computeMTD_SphereCapsule(mtd, depth, Sphere(pose0.p, sphereGeom.radius), capsule);
}
static bool GeomMTDCallback_SphereBox(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::eBOX);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom0);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom1);
Box obb;
buildFrom(obb, pose1.p, boxGeom.halfExtents, pose1.q);
return computeMTD_SphereBox(mtd, depth, Sphere(pose0.p, sphereGeom.radius), obb);
}
static bool GeomMTDCallback_SphereConvex(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCONVEXMESH);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom0);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom1);
return computeMTD_SphereConvex(mtd, depth, Sphere(pose0.p, sphereGeom.radius), convexGeom, pose1);
}
static bool GeomMTDCallback_SphereMesh(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::eTRIANGLEMESH);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom0);
const PxTriangleMeshGeometry& meshGeom = static_cast<const PxTriangleMeshGeometry&>(geom1);
return computeMTD_SphereMesh(mtd, depth, Sphere(pose0.p, sphereGeom.radius), meshGeom, pose1);
}
static bool GeomMTDCallback_PlaneCapsule(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::ePLANE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCAPSULE);
PX_UNUSED(geom0);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom1);
Capsule capsule;
getCapsuleSegment(pose1, capsuleGeom, capsule);
capsule.radius = capsuleGeom.radius;
return computeMTD_PlaneCapsule(mtd, depth, getPlane(pose0), capsule);
}
static bool GeomMTDCallback_PlaneBox(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::ePLANE);
PX_ASSERT(geom1.getType()==PxGeometryType::eBOX);
PX_UNUSED(geom0);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom1);
Box obb;
buildFrom(obb, pose1.p, boxGeom.halfExtents, pose1.q);
return computeMTD_PlaneBox(mtd, depth, getPlane(pose0), obb);
}
static bool GeomMTDCallback_PlaneConvex(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::ePLANE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCONVEXMESH);
PX_UNUSED(geom0);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom1);
return computeMTD_PlaneConvex(mtd, depth, getPlane(pose0), convexGeom, pose1);
}
static bool GeomMTDCallback_CapsuleCapsule(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eCAPSULE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCAPSULE);
const PxCapsuleGeometry& capsuleGeom0 = static_cast<const PxCapsuleGeometry&>(geom0);
const PxCapsuleGeometry& capsuleGeom1 = static_cast<const PxCapsuleGeometry&>(geom1);
Capsule capsule0;
getCapsuleSegment(pose0, capsuleGeom0, capsule0);
capsule0.radius = capsuleGeom0.radius;
Capsule capsule1;
getCapsuleSegment(pose1, capsuleGeom1, capsule1);
capsule1.radius = capsuleGeom1.radius;
return computeMTD_CapsuleCapsule(mtd, depth, capsule0, capsule1);
}
static bool GeomMTDCallback_CapsuleBox(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eCAPSULE);
PX_ASSERT(geom1.getType()==PxGeometryType::eBOX);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom0);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom1);
Capsule capsule;
getCapsuleSegment(pose0, capsuleGeom, capsule);
capsule.radius = capsuleGeom.radius;
Box obb;
buildFrom(obb, pose1.p, boxGeom.halfExtents, pose1.q);
return computeMTD_CapsuleBox(mtd, depth, capsule, obb);
}
static bool GeomMTDCallback_CapsuleConvex(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eCAPSULE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCONVEXMESH);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom0);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom1);
Capsule capsule;
getCapsuleSegment(pose0, capsuleGeom, capsule);
capsule.radius = capsuleGeom.radius;
return computeMTD_CapsuleConvex(mtd, depth, capsule, pose0, convexGeom, pose1);
}
static bool GeomMTDCallback_CapsuleMesh(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eCAPSULE);
PX_ASSERT(geom1.getType()==PxGeometryType::eTRIANGLEMESH);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom0);
const PxTriangleMeshGeometry& meshGeom = static_cast<const PxTriangleMeshGeometry&>(geom1);
Capsule capsule;
getCapsuleSegment(pose0, capsuleGeom, capsule);
capsule.radius = capsuleGeom.radius;
return computeMTD_CapsuleMesh(mtd, depth, capsule, meshGeom, pose1);
}
static bool GeomMTDCallback_BoxBox(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eBOX);
PX_ASSERT(geom1.getType()==PxGeometryType::eBOX);
const PxBoxGeometry& boxGeom0 = static_cast<const PxBoxGeometry&>(geom0);
const PxBoxGeometry& boxGeom1 = static_cast<const PxBoxGeometry&>(geom1);
Box obb0;
buildFrom(obb0, pose0.p, boxGeom0.halfExtents, pose0.q);
Box obb1;
buildFrom(obb1, pose1.p, boxGeom1.halfExtents, pose1.q);
return computeMTD_BoxBox(mtd, depth, obb0, obb1);
}
static bool GeomMTDCallback_BoxConvex(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eBOX);
PX_ASSERT(geom1.getType()==PxGeometryType::eCONVEXMESH);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom0);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom1);
Box obb;
buildFrom(obb, pose0.p, boxGeom.halfExtents, pose0.q);
return computeMTD_BoxConvex(mtd, depth, obb, convexGeom, pose1);
}
static bool GeomMTDCallback_BoxMesh(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eBOX);
PX_ASSERT(geom1.getType()==PxGeometryType::eTRIANGLEMESH);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom0);
const PxTriangleMeshGeometry& meshGeom = static_cast<const PxTriangleMeshGeometry&>(geom1);
Box obb;
buildFrom(obb, pose0.p, boxGeom.halfExtents, pose0.q);
return computeMTD_BoxMesh(mtd, depth, obb, meshGeom, pose1);
}
static bool GeomMTDCallback_ConvexConvex(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eCONVEXMESH);
PX_ASSERT(geom1.getType()==PxGeometryType::eCONVEXMESH);
const PxConvexMeshGeometry& convexGeom0 = static_cast<const PxConvexMeshGeometry&>(geom0);
const PxConvexMeshGeometry& convexGeom1 = static_cast<const PxConvexMeshGeometry&>(geom1);
return computeMTD_ConvexConvex(mtd, depth, convexGeom0, pose0, convexGeom1, pose1);
}
static bool GeomMTDCallback_ConvexMesh(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eCONVEXMESH);
PX_ASSERT(geom1.getType()==PxGeometryType::eTRIANGLEMESH);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom0);
const PxTriangleMeshGeometry& meshGeom = static_cast<const PxTriangleMeshGeometry&>(geom1);
return computeMTD_ConvexMesh(mtd, depth, convexGeom, pose0, meshGeom, pose1);
}
static bool GeomMTDCallback_SphereHeightField(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::eHEIGHTFIELD);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom0);
const PxHeightFieldGeometry& meshGeom = static_cast<const PxHeightFieldGeometry&>(geom1);
const Sphere sphere(pose0.p, sphereGeom.radius);
return computeMTD_SphereHeightField(mtd, depth, sphere, meshGeom, pose1);
}
static bool GeomMTDCallback_CapsuleHeightField(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eCAPSULE);
PX_ASSERT(geom1.getType()==PxGeometryType::eHEIGHTFIELD);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom0);
const PxHeightFieldGeometry& meshGeom = static_cast<const PxHeightFieldGeometry&>(geom1);
Capsule capsule;
getCapsuleSegment(pose0, capsuleGeom, capsule);
capsule.radius = capsuleGeom.radius;
return computeMTD_CapsuleHeightField(mtd, depth, capsule, meshGeom, pose1);
}
static bool GeomMTDCallback_BoxHeightField(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eBOX);
PX_ASSERT(geom1.getType()==PxGeometryType::eHEIGHTFIELD);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom0);
const PxHeightFieldGeometry& meshGeom = static_cast<const PxHeightFieldGeometry&>(geom1);
Box obb;
buildFrom(obb, pose0.p, boxGeom.halfExtents, pose0.q);
return computeMTD_BoxHeightField(mtd, depth, obb, meshGeom, pose1);
}
static bool GeomMTDCallback_ConvexHeightField(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eCONVEXMESH);
PX_ASSERT(geom1.getType()==PxGeometryType::eHEIGHTFIELD);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom0);
const PxHeightFieldGeometry& meshGeom = static_cast<const PxHeightFieldGeometry&>(geom1);
return computeMTD_ConvexHeightField(mtd, depth, convexGeom, pose0, meshGeom, pose1);
}
static bool GeomMTDCallback_CustomGeometryGeometry(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType() == PxGeometryType::eCUSTOM);
const PxCustomGeometry& customGeom = static_cast<const PxCustomGeometry&>(geom0);
return computeMTD_CustomGeometry(mtd, depth, customGeom, pose0, geom1, pose1);
}
static bool GeomMTDCallback_GeometryCustomGeometry(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom1.getType() == PxGeometryType::eCUSTOM);
const PxCustomGeometry& customGeom = static_cast<const PxCustomGeometry&>(geom1);
if (computeMTD_CustomGeometry(mtd, depth, customGeom, pose1, geom0, pose0))
{
mtd = -mtd;
return true;
}
return false;
}
Gu::GeomMTDFunc gGeomMTDMethodTable[][PxGeometryType::eGEOMETRY_COUNT] =
{
//PxGeometryType::eSPHERE
{
GeomMTDCallback_SphereSphere, //PxGeometryType::eSPHERE
GeomMTDCallback_SpherePlane, //PxGeometryType::ePLANE
GeomMTDCallback_SphereCapsule, //PxGeometryType::eCAPSULE
GeomMTDCallback_SphereBox, //PxGeometryType::eBOX
GeomMTDCallback_SphereConvex, //PxGeometryType::eCONVEXMESH
GeomMTDCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomMTDCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomMTDCallback_SphereMesh, //PxGeometryType::eTRIANGLEMESH
GeomMTDCallback_SphereHeightField, //PxGeometryType::eHEIGHTFIELD
GeomMTDCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomMTDCallback_GeometryCustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::ePLANE
{
0, //PxGeometryType::eSPHERE
GeomMTDCallback_NotSupported, //PxGeometryType::ePLANE
GeomMTDCallback_PlaneCapsule, //PxGeometryType::eCAPSULE
GeomMTDCallback_PlaneBox, //PxGeometryType::eBOX
GeomMTDCallback_PlaneConvex, //PxGeometryType::eCONVEXMESH
GeomMTDCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomMTDCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomMTDCallback_NotSupported, //PxGeometryType::eTRIANGLEMESH
GeomMTDCallback_NotSupported, //PxGeometryType::eHEIGHTFIELD
GeomMTDCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomMTDCallback_GeometryCustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCAPSULE
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
GeomMTDCallback_CapsuleCapsule, //PxGeometryType::eCAPSULE
GeomMTDCallback_CapsuleBox, //PxGeometryType::eBOX
GeomMTDCallback_CapsuleConvex, //PxGeometryType::eCONVEXMESH
GeomMTDCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomMTDCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomMTDCallback_CapsuleMesh, //PxGeometryType::eTRIANGLEMESH
GeomMTDCallback_CapsuleHeightField, //PxGeometryType::eHEIGHTFIELD
GeomMTDCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomMTDCallback_GeometryCustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eBOX
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
GeomMTDCallback_BoxBox, //PxGeometryType::eBOX
GeomMTDCallback_BoxConvex, //PxGeometryType::eCONVEXMESH
GeomMTDCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomMTDCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomMTDCallback_BoxMesh, //PxGeometryType::eTRIANGLEMESH
GeomMTDCallback_BoxHeightField, //PxGeometryType::eHEIGHTFIELD
GeomMTDCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomMTDCallback_GeometryCustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCONVEXMESH
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
GeomMTDCallback_ConvexConvex, //PxGeometryType::eCONVEXMESH
GeomMTDCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomMTDCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomMTDCallback_ConvexMesh, //PxGeometryType::eTRIANGLEMESH
GeomMTDCallback_ConvexHeightField, //PxGeometryType::eHEIGHTFIELD
GeomMTDCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomMTDCallback_GeometryCustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::ePARTICLESYSTEM
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXMESH
GeomMTDCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomMTDCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomMTDCallback_NotSupported, //PxGeometryType::eTRIANGLEMESH
GeomMTDCallback_NotSupported, //PxGeometryType::eHEIGHTFIELD
GeomMTDCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomMTDCallback_NotSupported, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eTETRAHEDRONMESH
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
GeomMTDCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomMTDCallback_NotSupported, //PxGeometryType::eTRIANGLEMESH
GeomMTDCallback_NotSupported, //PxGeometryType::eHEIGHTFIELD
GeomMTDCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomMTDCallback_NotSupported, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eTRIANGLEMESH
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
GeomMTDCallback_NotSupported, //PxGeometryType::eTRIANGLEMESH
GeomMTDCallback_NotSupported, //PxGeometryType::eHEIGHTFIELD
GeomMTDCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomMTDCallback_GeometryCustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eHEIGHTFIELD
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
0, //PxGeometryType::eTRIANGLEMESH
GeomMTDCallback_NotSupported, //PxGeometryType::eHEIGHTFIELD
GeomMTDCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomMTDCallback_GeometryCustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eHAIRSYSTEM
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
0, //PxGeometryType::eTRIANGLEMESH
0, //PxGeometryType::eHEIGHTFIELD
GeomMTDCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomMTDCallback_CustomGeometryGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCUSTOM
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
0, //PxGeometryType::eTRIANGLEMESH
0, //PxGeometryType::eHEIGHTFIELD
0, //PxGeometryType::eHAIRSYSTEM
GeomMTDCallback_CustomGeometryGeometry, //PxGeometryType::eCUSTOM
},
};
PX_COMPILE_TIME_ASSERT(sizeof(gGeomMTDMethodTable) / sizeof(gGeomMTDMethodTable[0]) == PxGeometryType::eGEOMETRY_COUNT);
| 51,608 | C++ | 34.276145 | 255 | 0.733239 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuMeshFactory.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_MESH_FACTORY_H
#define GU_MESH_FACTORY_H
#include "foundation/PxIO.h"
#include "foundation/PxHashSet.h"
#include "foundation/PxUserAllocated.h"
#include "geometry/PxTriangleMesh.h"
#include "geometry/PxTetrahedronMesh.h"
#include "geometry/PxConvexMesh.h"
#include "geometry/PxHeightField.h"
#include "geometry/PxBVH.h"
#include "PxPhysXConfig.h"
#include "foundation/PxMutex.h"
#include "foundation/PxArray.h"
// PT: added for platforms that compile the onRefCountZero template immediately
#include "CmUtils.h"
#include "foundation/PxFoundation.h"
namespace physx
{
namespace Gu
{
class ConvexMesh;
class HeightField;
class TriangleMesh;
class TriangleMeshData;
class SoftBodyMesh;
class SoftBodyMeshData;
class TetrahedronMesh;
class TetrahedronMeshData;
class BVH;
struct ConvexHullInitData;
class BVHData;
class MeshFactoryListener
{
protected:
virtual ~MeshFactoryListener(){}
public:
virtual void onMeshFactoryBufferRelease(const PxBase* object, PxType type) = 0;
#if PX_SUPPORT_OMNI_PVD
virtual void onObjectAdd(const PxBase*) {}
virtual void onObjectRemove(const PxBase*) {}
#endif
};
#if PX_VC
#pragma warning(push)
#pragma warning( disable : 4251 ) // class needs to have dll-interface to be used by clients of class
#endif
class PX_PHYSX_COMMON_API MeshFactory : public PxUserAllocated
{
PX_NOCOPY(MeshFactory)
public:
MeshFactory();
protected:
virtual ~MeshFactory();
public:
void release();
// Triangle meshes
void addTriangleMesh(Gu::TriangleMesh* np, bool lock=true);
PxTriangleMesh* createTriangleMesh(PxInputStream& stream);
PxTriangleMesh* createTriangleMesh(void* triangleMeshData);
bool removeTriangleMesh(PxTriangleMesh&);
PxU32 getNbTriangleMeshes() const;
PxU32 getTriangleMeshes(PxTriangleMesh** userBuffer, PxU32 bufferSize, PxU32 startIndex) const;
// Tetrahedron meshes
void addTetrahedronMesh(Gu::TetrahedronMesh* np, bool lock = true);
PxTetrahedronMesh* createTetrahedronMesh(PxInputStream& stream);
PxTetrahedronMesh* createTetrahedronMesh(void* tetrahedronMeshData);
bool removeTetrahedronMesh(PxTetrahedronMesh&);
PxU32 getNbTetrahedronMeshes() const;
PxU32 getTetrahedronMeshes(PxTetrahedronMesh** userBuffer, PxU32 bufferSize, PxU32 startIndex) const;
// SoftBody meshes
void addSoftBodyMesh(Gu::SoftBodyMesh* np, bool lock = true);
PxSoftBodyMesh* createSoftBodyMesh(PxInputStream& stream);
PxSoftBodyMesh* createSoftBodyMesh(void* tetrahedronMeshData);
bool removeSoftBodyMesh(PxSoftBodyMesh&);
PxU32 getNbSoftBodyMeshes() const;
PxU32 getSoftBodyMeshes(PxSoftBodyMesh** userBuffer, PxU32 bufferSize, PxU32 startIndex) const;
// Convexes
void addConvexMesh(Gu::ConvexMesh* np, bool lock=true);
PxConvexMesh* createConvexMesh(PxInputStream&);
PxConvexMesh* createConvexMesh(void* convexMeshData);
bool removeConvexMesh(PxConvexMesh&);
PxU32 getNbConvexMeshes() const;
PxU32 getConvexMeshes(PxConvexMesh** userBuffer, PxU32 bufferSize, PxU32 startIndex) const;
// Heightfields
void addHeightField(Gu::HeightField* np, bool lock=true);
PxHeightField* createHeightField(void* heightFieldMeshData);
PxHeightField* createHeightField(PxInputStream&);
bool removeHeightField(PxHeightField&);
PxU32 getNbHeightFields() const;
PxU32 getHeightFields(PxHeightField** userBuffer, PxU32 bufferSize, PxU32 startIndex) const;
// BVH
void addBVH(Gu::BVH* np, bool lock=true);
PxBVH* createBVH(PxInputStream&);
PxBVH* createBVH(void* bvhData);
bool removeBVH(PxBVH&);
PxU32 getNbBVHs() const;
PxU32 getBVHs(PxBVH** userBuffer, PxU32 bufferSize, PxU32 startIndex) const;
void addFactoryListener(MeshFactoryListener& listener);
void removeFactoryListener(MeshFactoryListener& listener);
void notifyFactoryListener(const PxBase*, PxType typeID);
bool remove(PxBase&);
protected:
PxTriangleMesh* createTriangleMesh(Gu::TriangleMeshData& data);
PxTetrahedronMesh* createTetrahedronMesh(Gu::TetrahedronMeshData& data);
PxSoftBodyMesh* createSoftBodyMesh(Gu::SoftBodyMeshData& data);
PxConvexMesh* createConvexMesh(Gu::ConvexHullInitData& data);
PxBVH* createBVH(Gu::BVHData& data);
mutable PxMutex mTrackingMutex;
private:
PxCoalescedHashSet<Gu::TriangleMesh*> mTriangleMeshes;
PxCoalescedHashSet<Gu::TetrahedronMesh*> mTetrahedronMeshes;
PxCoalescedHashSet<Gu::SoftBodyMesh*> mSoftBodyMeshes;
PxCoalescedHashSet<Gu::ConvexMesh*> mConvexMeshes;
PxCoalescedHashSet<Gu::HeightField*> mHeightFields;
PxCoalescedHashSet<Gu::BVH*> mBVHs;
PxArray<MeshFactoryListener*> mFactoryListeners;
#if PX_SUPPORT_OMNI_PVD
protected:
void notifyListenersAdd(const PxBase*);
void notifyListenersRemove(const PxBase*);
#endif
};
#if PX_VC
#pragma warning(pop)
#endif
template<typename T>
PX_INLINE void onRefCountZero(T* object, Gu::MeshFactory* mf, bool cndt, const char* errorMsg)
{
if(mf)
{
if(cndt || mf->remove(*object))
{
const PxType type = object->getConcreteType();
Cm::deletePxBase(object);
mf->notifyFactoryListener(object, type);
return;
}
// PT: if we reach this point, we didn't find the mesh in the Physics object => don't delete!
// This prevents deleting the object twice.
PxGetFoundation().error(PxErrorCode::eINVALID_OPERATION, PX_FL, errorMsg);
}
else
Cm::deletePxBase(object);
}
}
}
#endif
| 7,371 | C | 35.86 | 109 | 0.737485 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuOverlapTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuOverlapTests.h"
#include "GuIntersectionBoxBox.h"
#include "GuIntersectionSphereBox.h"
#include "GuDistancePointSegment.h"
#include "GuDistanceSegmentBox.h"
#include "GuDistanceSegmentSegment.h"
#include "GuSphere.h"
#include "GuBoxConversion.h"
#include "GuInternal.h"
#include "GuVecCapsule.h"
#include "GuVecConvexHull.h"
#include "GuVecBox.h"
#include "GuConvexMesh.h"
#include "GuHillClimbing.h"
#include "GuGJK.h"
#include "geometry/PxSphereGeometry.h"
#include "geometry/PxCustomGeometry.h"
#include "CmMatrix34.h"
using namespace physx;
using namespace Cm;
using namespace Gu;
// PT: TODO: why don't we use ShapeData for overlaps?
//returns the maximal vertex in shape space
// PT: this function should be removed. We already have 2 different project hull functions in PxcShapeConvex & GuGJKObjectSupport, this one looks like a weird mix of both!
static PxVec3 projectHull_( const ConvexHullData& hull,
float& minimum, float& maximum,
const PxVec3& localDir, // expected to be normalized
const PxMat33& vert2ShapeSkew)
{
PX_ASSERT(localDir.isNormalized());
//use property that x|My == Mx|y for symmetric M to avoid having to transform vertices.
const PxVec3 vertexSpaceDir = vert2ShapeSkew * localDir;
const PxVec3* Verts = hull.getHullVertices();
const PxVec3* bestVert = NULL;
if(!hull.mBigConvexRawData) // Brute-force, local space. Experiments show break-even point is around 32 verts.
{
PxU32 NbVerts = hull.mNbHullVertices;
float min_ = PX_MAX_F32;
float max_ = -PX_MAX_F32;
while(NbVerts--)
{
const float dp = (*Verts).dot(vertexSpaceDir);
min_ = physx::intrinsics::selectMin(min_, dp);
if(dp > max_) { max_ = dp; bestVert = Verts; }
Verts++;
}
minimum = min_;
maximum = max_;
PX_ASSERT(bestVert != NULL);
return vert2ShapeSkew * *bestVert;
}
else //*/if(1) // This version is better for objects with a lot of vertices
{
const PxU32 Offset = ComputeCubemapNearestOffset(vertexSpaceDir, hull.mBigConvexRawData->mSubdiv);
PxU32 MinID = hull.mBigConvexRawData->mSamples[Offset];
PxU32 MaxID = hull.mBigConvexRawData->getSamples2()[Offset];
localSearch(MinID, -vertexSpaceDir, Verts, hull.mBigConvexRawData);
localSearch(MaxID, vertexSpaceDir, Verts, hull.mBigConvexRawData);
minimum = (Verts[MinID].dot(vertexSpaceDir));
maximum = (Verts[MaxID].dot(vertexSpaceDir));
PX_ASSERT(maximum >= minimum);
return vert2ShapeSkew * Verts[MaxID];
}
}
static bool intersectSphereConvex(const PxTransform& sphereTransform, float radius, const ConvexMesh& mesh, const PxMeshScale& meshScale, const PxTransform& convexGlobalPose,
PxVec3*)
{
using namespace aos;
const Vec3V zeroV = V3Zero();
const ConvexHullData* hullData = &mesh.getHull();
const FloatV sphereRadius = FLoad(radius);
const Vec3V vScale = V3LoadU_SafeReadW(meshScale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&meshScale.rotation.x);
const PxMatTransformV aToB(convexGlobalPose.transformInv(sphereTransform));
const ConvexHullV convexHull(hullData, zeroV, vScale, vQuat, meshScale.isIdentity());
const CapsuleV capsule(aToB.p, sphereRadius);
Vec3V contactA, contactB, normal;
FloatV dist;
const LocalConvex<CapsuleV> convexA(capsule);
const LocalConvex<ConvexHullV> convexB(convexHull);
const Vec3V initialSearchDir = V3Sub(capsule.getCenter(), convexHull.getCenter());
GjkStatus status = gjk(convexA, convexB, initialSearchDir, FZero(), contactA, contactB, normal, dist);
return status == GJK_CONTACT;
}
static bool intersectCapsuleConvex( const PxCapsuleGeometry& capsGeom, const PxTransform& capsGlobalPose,
const ConvexMesh& mesh, const PxMeshScale& meshScale, const PxTransform& convexGlobalPose,
PxVec3*)
{
using namespace aos;
const Vec3V zeroV = V3Zero();
const ConvexHullData* hull = &mesh.getHull();
const FloatV capsuleHalfHeight = FLoad(capsGeom.halfHeight);
const FloatV capsuleRadius = FLoad(capsGeom.radius);
const Vec3V vScale = V3LoadU_SafeReadW(meshScale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&meshScale.rotation.x);
const PxMatTransformV aToB(convexGlobalPose.transformInv(capsGlobalPose));
const ConvexHullV convexHull(hull, zeroV, vScale, vQuat, meshScale.isIdentity());
const CapsuleV capsule(aToB.p, aToB.rotate(V3Scale(V3UnitX(), capsuleHalfHeight)), capsuleRadius);
Vec3V contactA, contactB, normal;
FloatV dist;
const LocalConvex<CapsuleV> convexA(capsule);
const LocalConvex<ConvexHullV> convexB(convexHull);
const Vec3V initialSearchDir = V3Sub(capsule.getCenter(), convexHull.getCenter());
GjkStatus status = gjk(convexA, convexB, initialSearchDir, FZero(), contactA, contactB, normal, dist);
return status == GJK_CONTACT;
}
static bool intersectBoxConvex(const PxBoxGeometry& boxGeom, const PxTransform& boxGlobalPose,
const ConvexMesh& mesh, const PxMeshScale& meshScale, const PxTransform& convexGlobalPose,
PxVec3*)
{
// AP: see archived non-GJK version in //sw/physx/dev/pterdiman/graveyard/contactConvexBox.cpp
using namespace aos;
const Vec3V zeroV = V3Zero();
const ConvexHullData* hull = &mesh.getHull();
const Vec3V vScale = V3LoadU_SafeReadW(meshScale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&meshScale.rotation.x);
const Vec3V boxExtents = V3LoadU(boxGeom.halfExtents);
const PxMatTransformV aToB(convexGlobalPose.transformInv(boxGlobalPose));
const ConvexHullV convexHull(hull, zeroV, vScale, vQuat, meshScale.isIdentity());
const BoxV box(zeroV, boxExtents);
Vec3V contactA, contactB, normal;
FloatV dist;
const RelativeConvex<BoxV> convexA(box, aToB);
const LocalConvex<ConvexHullV> convexB(convexHull);
GjkStatus status = gjk(convexA, convexB, aToB.p, FZero(), contactA, contactB, normal, dist);
//PX_PRINTF("BOX status = %i, overlap = %i, PxVec3(%f, %f, %f)\n", status, overlap, boxGlobalPose.p.x, boxGlobalPose.p.y, boxGlobalPose.p.z);
return status == GJK_CONTACT;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
static PX_FORCE_INLINE PxVec3* getCachedAxis(TriggerCache* cache)
{
if(cache && cache->state==TRIGGER_OVERLAP)
return &cache->dir;
else
return NULL;
}
static PX_FORCE_INLINE bool updateTriggerCache(bool overlap, TriggerCache* cache)
{
if(cache)
{
if(overlap)
cache->state = TRIGGER_OVERLAP;
else
cache->state = TRIGGER_DISJOINT;
}
return overlap;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Sphere-vs-shape
static bool GeomOverlapCallback_SphereSphere(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::eSPHERE);
PX_UNUSED(cache);
PX_UNUSED(threadContext);
const PxSphereGeometry& sphereGeom0 = static_cast<const PxSphereGeometry&>(geom0);
const PxSphereGeometry& sphereGeom1 = static_cast<const PxSphereGeometry&>(geom1);
const PxVec3 delta = pose1.p - pose0.p;
const PxReal r = sphereGeom0.radius + sphereGeom1.radius;
return delta.magnitudeSquared() <= r*r; // PT: objects are defined as closed, so we return 'true' in case of equality
}
static bool GeomOverlapCallback_SpherePlane(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::ePLANE);
PX_UNUSED(cache);
PX_UNUSED(geom1);
PX_UNUSED(threadContext);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom0);
return getPlane(pose1).distance(pose0.p) <= sphereGeom.radius; // PT: objects are defined as closed, so we return 'true' in case of equality
}
static bool GeomOverlapCallback_SphereCapsule(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCAPSULE);
PX_UNUSED(cache);
PX_UNUSED(threadContext);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom0);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom1);
// PT: TODO: remove this useless conversion
const PxVec3 capsuleHalfHeightVector = getCapsuleHalfHeightVector(pose1, capsuleGeom);
const PxReal r = sphereGeom.radius + capsuleGeom.radius;
return distancePointSegmentSquared(capsuleHalfHeightVector, -capsuleHalfHeightVector, pose0.p - pose1.p) <= r*r; // PT: objects are defined as closed, so we return 'true' in case of equality
}
static bool GeomOverlapCallback_SphereBox(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::eBOX);
PX_UNUSED(cache);
PX_UNUSED(threadContext);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom0);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom1);
// PT: TODO: remove this useless conversion
Box obb;
buildFrom(obb, pose1.p, boxGeom.halfExtents, pose1.q);
return intersectSphereBox(Sphere(pose0.p, sphereGeom.radius), obb);
}
static bool GeomOverlapCallback_SphereConvex(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCONVEXMESH);
PX_UNUSED(threadContext);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom0);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom1);
ConvexMesh* cm = static_cast<ConvexMesh*>(convexGeom.convexMesh);
PxVec3 cachedSepAxis;
PxVec3* tmp = getCachedAxis(cache);
if(tmp)
cachedSepAxis = *tmp;
else
cachedSepAxis = PxVec3(0,0,1.f);
const bool overlap = intersectSphereConvex(pose0, sphereGeom.radius,
*cm,
convexGeom.scale, pose1,
&cachedSepAxis);
if(cache && overlap)
cache->dir = cachedSepAxis;
return updateTriggerCache(overlap, cache);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Plane-vs-shape
static bool GeomOverlapCallback_PlaneCapsule(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::ePLANE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCAPSULE);
PX_UNUSED(threadContext);
PX_UNUSED(cache);
PX_UNUSED(geom0);
// const PxPlaneGeometry& planeGeom = static_cast<const PxPlaneGeometry&>(geom0);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom1);
// PT: TODO: remove this useless conversion
Capsule capsule;
getCapsule(capsule, capsuleGeom, pose1);
const PxPlane plane = getPlane(pose0);
// We handle the capsule-plane collision with 2 sphere-plane collisions.
// Seems ok so far, since plane is infinite.
if(plane.distance(capsule.p0) <= capsule.radius) // PT: objects are defined as closed, so we return 'true' in case of equality
return true;
if(plane.distance(capsule.p1) <= capsule.radius) // PT: objects are defined as closed, so we return 'true' in case of equality
return true;
return false;
}
/*static bool intersectPlaneBox(const PxPlane& plane, const Box& box)
{
PxVec3 pts[8];
box.computeBoxPoints(pts);
for(PxU32 i=0;i<8;i++)
{
if(plane.distance(pts[i]) <= 0.0f) // PT: objects are defined as closed, so we return 'true' in case of equality
return true;
}
return false;
}*/
static bool GeomOverlapCallback_PlaneBox(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::ePLANE);
PX_ASSERT(geom1.getType()==PxGeometryType::eBOX);
PX_UNUSED(threadContext);
PX_UNUSED(cache);
PX_UNUSED(geom0);
// const PxPlaneGeometry& planeGeom = static_cast<const PxPlaneGeometry&>(geom0);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom1);
// I currently use the same code as for contact generation but maybe we could do something faster (in theory testing
// only 2 pts is enough).
const Matrix34FromTransform absPose(pose1);
const PxPlane worldPlane = getPlane(pose0);
for(int vx=-1; vx<=1; vx+=2)
for(int vy=-1; vy<=1; vy+=2)
for(int vz=-1; vz<=1; vz+=2)
{
const PxVec3 v = absPose.transform(PxVec3(PxReal(vx),PxReal(vy),PxReal(vz)).multiply(boxGeom.halfExtents));
if(worldPlane.distance(v) <= 0.0f) // PT: objects are defined as closed, so we return 'true' in case of equality
return true;
}
return false;
}
static bool GeomOverlapCallback_PlaneConvex(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::ePLANE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCONVEXMESH);
PX_UNUSED(threadContext);
PX_UNUSED(cache);
PX_UNUSED(geom0);
// const PxPlaneGeometry& planeGeom = static_cast<const PxPlaneGeometry&>(geom0);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom1);
ConvexMesh* cm = static_cast<ConvexMesh*>(convexGeom.convexMesh);
//find plane normal in shape space of convex:
// PT:: tag: scalar transform*transform
const PxTransform plane2convex = pose1.getInverse().transform(pose0);
const PxPlane shapeSpacePlane = getPlane(plane2convex);
PxReal minimum, maximum;
projectHull_(cm->getHull(), minimum, maximum, shapeSpacePlane.n, toMat33(convexGeom.scale));
return (minimum <= -shapeSpacePlane.d);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Capsule-vs-shape
static bool GeomOverlapCallback_CapsuleCapsule(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eCAPSULE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCAPSULE);
PX_UNUSED(cache);
PX_UNUSED(threadContext);
const PxCapsuleGeometry& capsuleGeom0 = static_cast<const PxCapsuleGeometry&>(geom0);
const PxCapsuleGeometry& capsuleGeom1 = static_cast<const PxCapsuleGeometry&>(geom1);
// PT: move computation to local space for improved accuracy
const PxVec3 delta = pose1.p - pose0.p;
// PT: TODO: remove this useless conversion
const PxVec3 capsuleHalfHeightVector0 = getCapsuleHalfHeightVector(pose0, capsuleGeom0);
const PxVec3 capsuleHalfHeightVector1 = getCapsuleHalfHeightVector(pose1, capsuleGeom1);
const PxReal squareDist = distanceSegmentSegmentSquared(-capsuleHalfHeightVector0, capsuleHalfHeightVector0*2.0f,
delta-capsuleHalfHeightVector1, capsuleHalfHeightVector1*2.0f);
const PxReal r = capsuleGeom0.radius + capsuleGeom1.radius;
return squareDist <= r*r; // PT: objects are defined as closed, so we return 'true' in case of equality
}
static bool GeomOverlapCallback_CapsuleBox(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eCAPSULE);
PX_ASSERT(geom1.getType()==PxGeometryType::eBOX);
PX_UNUSED(cache);
PX_UNUSED(threadContext);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom0);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom1);
// PT: move computation to local space for improved accuracy
const PxVec3 delta = pose1.p - pose0.p;
// PT: TODO: remove this useless conversion
const PxVec3 capsuleHalfHeightVector = getCapsuleHalfHeightVector(pose0, capsuleGeom);
// PT: TODO: remove this useless conversion
const PxMat33Padded obbRot(pose1.q);
// PT: objects are defined as closed, so we return 'true' in case of equality
return distanceSegmentBoxSquared(capsuleHalfHeightVector, -capsuleHalfHeightVector, delta, boxGeom.halfExtents, obbRot) <= capsuleGeom.radius*capsuleGeom.radius;
}
static bool GeomOverlapCallback_CapsuleConvex(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eCAPSULE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCONVEXMESH);
PX_UNUSED(threadContext);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom0);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom1);
ConvexMesh* cm = static_cast<ConvexMesh*>(convexGeom.convexMesh);
PxVec3 cachedSepAxis;
PxVec3* tmp = getCachedAxis(cache);
if(tmp)
cachedSepAxis = *tmp;
else
cachedSepAxis = PxVec3(0,0,1.0f);
const bool overlap = intersectCapsuleConvex(capsuleGeom, pose0, *cm, convexGeom.scale, pose1, &cachedSepAxis);
if(cache && overlap)
cache->dir = cachedSepAxis;
return updateTriggerCache(overlap, cache);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Box-vs-shape
static bool GeomOverlapCallback_BoxBox(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eBOX);
PX_ASSERT(geom1.getType()==PxGeometryType::eBOX);
PX_UNUSED(cache);
PX_UNUSED(threadContext);
const PxBoxGeometry& boxGeom0 = static_cast<const PxBoxGeometry&>(geom0);
const PxBoxGeometry& boxGeom1 = static_cast<const PxBoxGeometry&>(geom1);
// PT: TODO: remove this useless conversion
return intersectOBBOBB( boxGeom0.halfExtents, pose0.p, PxMat33Padded(pose0.q),
boxGeom1.halfExtents, pose1.p, PxMat33Padded(pose1.q), true);
}
static bool GeomOverlapCallback_BoxConvex(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eBOX);
PX_ASSERT(geom1.getType()==PxGeometryType::eCONVEXMESH);
PX_UNUSED(threadContext);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom0);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom1);
ConvexMesh* cm = static_cast<ConvexMesh*>(convexGeom.convexMesh);
PxVec3 cachedSepAxis;
PxVec3* tmp = getCachedAxis(cache);
if(tmp)
cachedSepAxis = *tmp;
else
cachedSepAxis = PxVec3(0.0f, 0.0f, 1.0f);
const bool overlap = intersectBoxConvex(boxGeom, pose0, *cm, convexGeom.scale, pose1, &cachedSepAxis);
if(cache && overlap)
cache->dir = cachedSepAxis;
return updateTriggerCache(overlap, cache);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Convex-vs-shape
static bool GeomOverlapCallback_ConvexConvex(GU_OVERLAP_FUNC_PARAMS)
{
using namespace aos;
PX_ASSERT(geom0.getType()==PxGeometryType::eCONVEXMESH);
PX_ASSERT(geom1.getType()==PxGeometryType::eCONVEXMESH);
PX_UNUSED(threadContext);
const Vec3V zeroV = V3Zero();
const PxConvexMeshGeometry& convexGeom0 = static_cast<const PxConvexMeshGeometry&>(geom0);
const PxConvexMeshGeometry& convexGeom1 = static_cast<const PxConvexMeshGeometry&>(geom1);
const ConvexMesh* cm0 = static_cast<ConvexMesh*>(convexGeom0.convexMesh);
const ConvexMesh* cm1 = static_cast<ConvexMesh*>(convexGeom1.convexMesh);
bool overlap;
{
const ConvexHullData* hullData0 = &cm0->getHull();
const ConvexHullData* hullData1 = &cm1->getHull();
const Vec3V vScale0 = V3LoadU_SafeReadW(convexGeom0.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat0 = QuatVLoadU(&convexGeom0.scale.rotation.x);
const Vec3V vScale1 = V3LoadU_SafeReadW(convexGeom1.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat1 = QuatVLoadU(&convexGeom1.scale.rotation.x);
const QuatV q0 = QuatVLoadU(&pose0.q.x);
const Vec3V p0 = V3LoadU(&pose0.p.x);
const QuatV q1 = QuatVLoadU(&pose1.q.x);
const Vec3V p1 = V3LoadU(&pose1.p.x);
const PxTransformV transf0(p0, q0);
const PxTransformV transf1(p1, q1);
const PxMatTransformV aToB(transf1.transformInv(transf0));
const ConvexHullV convexHull0(hullData0, zeroV, vScale0, vQuat0, convexGeom0.scale.isIdentity());
const ConvexHullV convexHull1(hullData1, zeroV, vScale1, vQuat1, convexGeom1.scale.isIdentity());
Vec3V contactA, contactB, normal;
FloatV dist;
const RelativeConvex<ConvexHullV> convexA(convexHull0, aToB);
const LocalConvex<ConvexHullV> convexB(convexHull1);
GjkStatus status = gjk(convexA, convexB, aToB.p, FZero(), contactA, contactB, normal, dist);
overlap = (status == GJK_CONTACT);
}
return updateTriggerCache(overlap, cache);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
static bool GeomOverlapCallback_NotSupported(GU_OVERLAP_FUNC_PARAMS)
{
PX_ALWAYS_ASSERT_MESSAGE("NOT SUPPORTED");
PX_UNUSED(threadContext);
PX_UNUSED(cache);
PX_UNUSED(pose0);
PX_UNUSED(pose1);
PX_UNUSED(geom0);
PX_UNUSED(geom1);
return false;
}
bool GeomOverlapCallback_SphereMesh (GU_OVERLAP_FUNC_PARAMS);
bool GeomOverlapCallback_CapsuleMesh (GU_OVERLAP_FUNC_PARAMS);
bool GeomOverlapCallback_BoxMesh (GU_OVERLAP_FUNC_PARAMS);
bool GeomOverlapCallback_ConvexMesh (GU_OVERLAP_FUNC_PARAMS);
bool GeomOverlapCallback_MeshMesh (GU_OVERLAP_FUNC_PARAMS);
bool GeomOverlapCallback_SphereHeightfield (GU_OVERLAP_FUNC_PARAMS);
bool GeomOverlapCallback_CapsuleHeightfield (GU_OVERLAP_FUNC_PARAMS);
bool GeomOverlapCallback_BoxHeightfield (GU_OVERLAP_FUNC_PARAMS);
bool GeomOverlapCallback_ConvexHeightfield (GU_OVERLAP_FUNC_PARAMS);
static bool GeomOverlapCallback_CustomGeometry(GU_OVERLAP_FUNC_PARAMS)
{
PX_UNUSED(cache);
if(geom0.getType() == PxGeometryType::eCUSTOM)
return static_cast<const PxCustomGeometry&>(geom0).callbacks->overlap(geom0, pose0, geom1, pose1, threadContext);
if(geom1.getType() == PxGeometryType::eCUSTOM)
return static_cast<const PxCustomGeometry&>(geom1).callbacks->overlap(geom1, pose1, geom0, pose0, threadContext);
return false;
}
GeomOverlapTable gGeomOverlapMethodTable[] =
{
//PxGeometryType::eSPHERE
{
GeomOverlapCallback_SphereSphere, //PxGeometryType::eSPHERE
GeomOverlapCallback_SpherePlane, //PxGeometryType::ePLANE
GeomOverlapCallback_SphereCapsule, //PxGeometryType::eCAPSULE
GeomOverlapCallback_SphereBox, //PxGeometryType::eBOX
GeomOverlapCallback_SphereConvex, //PxGeometryType::eCONVEXMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomOverlapCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomOverlapCallback_SphereMesh, //PxGeometryType::eTRIANGLEMESH
GeomOverlapCallback_SphereHeightfield, //PxGeometryType::eHEIGHTFIELD
GeomOverlapCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomOverlapCallback_CustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::ePLANE
{
0, //PxGeometryType::eSPHERE
GeomOverlapCallback_NotSupported, //PxGeometryType::ePLANE
GeomOverlapCallback_PlaneCapsule, //PxGeometryType::eCAPSULE
GeomOverlapCallback_PlaneBox, //PxGeometryType::eBOX
GeomOverlapCallback_PlaneConvex, //PxGeometryType::eCONVEXMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomOverlapCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::eTRIANGLEMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::eHEIGHTFIELD
GeomOverlapCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomOverlapCallback_CustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCAPSULE
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
GeomOverlapCallback_CapsuleCapsule, //PxGeometryType::eCAPSULE
GeomOverlapCallback_CapsuleBox, //PxGeometryType::eBOX
GeomOverlapCallback_CapsuleConvex, //PxGeometryType::eCONVEXMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomOverlapCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomOverlapCallback_CapsuleMesh, //PxGeometryType::eTRIANGLEMESH
GeomOverlapCallback_CapsuleHeightfield, //PxGeometryType::eHEIGHTFIELD
GeomOverlapCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomOverlapCallback_CustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eBOX
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
GeomOverlapCallback_BoxBox, //PxGeometryType::eBOX
GeomOverlapCallback_BoxConvex, //PxGeometryType::eCONVEXMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomOverlapCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomOverlapCallback_BoxMesh, //PxGeometryType::eTRIANGLEMESH
GeomOverlapCallback_BoxHeightfield, //PxGeometryType::eHEIGHTFIELD
GeomOverlapCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomOverlapCallback_CustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCONVEXMESH
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
GeomOverlapCallback_ConvexConvex, //PxGeometryType::eCONVEXMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomOverlapCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomOverlapCallback_ConvexMesh, //PxGeometryType::eTRIANGLEMESH //not used: mesh always uses swept method for midphase.
GeomOverlapCallback_ConvexHeightfield, //PxGeometryType::eHEIGHTFIELD //TODO: make HF midphase that will mask this
GeomOverlapCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomOverlapCallback_CustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::ePARTICLESYSTEM
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomOverlapCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::eTRIANGLEMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::eHEIGHTFIELD
GeomOverlapCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomOverlapCallback_NotSupported, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eTETRAHEDRONMESH
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
GeomOverlapCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::eTRIANGLEMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::eHEIGHTFIELD
GeomOverlapCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomOverlapCallback_NotSupported, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eTRIANGLEMESH
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
GeomOverlapCallback_MeshMesh, //PxGeometryType::eTRIANGLEMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::eHEIGHTFIELD
GeomOverlapCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomOverlapCallback_CustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eHEIGHTFIELD
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
0, //PxGeometryType::eTRIANGLEMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::eHEIGHTFIELD
GeomOverlapCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomOverlapCallback_CustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eHAIRSYSTEM
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
0, //PxGeometryType::eTRIANGLEMESH
0, //PxGeometryType::eHEIGHTFIELD
GeomOverlapCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomOverlapCallback_CustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCUSTOM
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
0, //PxGeometryType::eTRIANGLEMESH
0, //PxGeometryType::eHEIGHTFIELD
0, //PxGeometryType::eHAIRSYSTEM
GeomOverlapCallback_CustomGeometry, //PxGeometryType::eCUSTOM
},
};
PX_COMPILE_TIME_ASSERT(sizeof(gGeomOverlapMethodTable) / sizeof(gGeomOverlapMethodTable[0]) == PxGeometryType::eGEOMETRY_COUNT);
const GeomOverlapTable* Gu::getOverlapFuncTable()
{
return gGeomOverlapMethodTable;
}
| 30,560 | C++ | 37.489924 | 191 | 0.732919 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuMetaData.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxIO.h"
#include "common/PxMetaData.h"
#include "GuHeightField.h"
#include "GuConvexMeshData.h"
#include "GuBigConvexData2.h"
#include "GuConvexMesh.h"
#include "GuTriangleMesh.h"
#include "GuTriangleMeshBV4.h"
#include "GuTriangleMeshRTree.h"
#include "foundation/PxIntrinsics.h"
using namespace physx;
using namespace Cm;
using namespace Gu;
///////////////////////////////////////////////////////////////////////////////
static void getBinaryMetaData_Valency(PxOutputStream& stream)
{
PX_DEF_BIN_METADATA_CLASS(stream, Valency)
PX_DEF_BIN_METADATA_ITEM(stream, Valency, PxU16, mCount, 0)
PX_DEF_BIN_METADATA_ITEM(stream, Valency, PxU16, mOffset, 0)
}
static void getBinaryMetaData_BigConvexRawData(PxOutputStream& stream)
{
PX_DEF_BIN_METADATA_CLASS(stream, BigConvexRawData)
PX_DEF_BIN_METADATA_ITEM(stream, BigConvexRawData, PxU16, mSubdiv, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BigConvexRawData, PxU16, mNbSamples, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BigConvexRawData, PxU8, mSamples, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, BigConvexRawData, PxU32, mNbVerts, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BigConvexRawData, PxU32, mNbAdjVerts, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BigConvexRawData, Valency, mValencies, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, BigConvexRawData, PxU8, mAdjacentVerts, PxMetaDataFlag::ePTR)
}
void SDF::getBinaryMetaData(PxOutputStream& stream)
{
PX_DEF_BIN_METADATA_CLASS(stream, Dim3)
PX_DEF_BIN_METADATA_ITEM(stream, Dim3, PxU32, x, 0)
PX_DEF_BIN_METADATA_ITEM(stream, Dim3, PxU32, y, 0)
PX_DEF_BIN_METADATA_ITEM(stream, Dim3, PxU32, z, 0)
PX_DEF_BIN_METADATA_CLASS(stream, SDF)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, PxVec3, mMeshLower, 0)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, PxReal, mSpacing, 0)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, Dim3, mDims, 0)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, PxU32, mNumSdfs, 0)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, PxReal, mSdf, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, PxU32, mSubgridSize, 0)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, PxU32, mNumStartSlots, 0)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, PxU32, mSubgridStartSlots, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, PxU32, mNumSubgridSdfs, 0)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, PxU8, mSubgridSdf, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, Dim3, mSdfSubgrids3DTexBlockDim, 0)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, PxReal, mSubgridsMinSdfValue, 0)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, PxReal, mSubgridsMaxSdfValue, 0)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, PxU32, mBytesPerSparsePixel, 0)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, bool, mOwnsMemory, 0)
}
void BigConvexData::getBinaryMetaData(PxOutputStream& stream)
{
getBinaryMetaData_Valency(stream);
getBinaryMetaData_BigConvexRawData(stream);
PX_DEF_BIN_METADATA_CLASS(stream, BigConvexData)
PX_DEF_BIN_METADATA_ITEM(stream, BigConvexData, BigConvexRawData, mData, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BigConvexData, void, mVBuffer, PxMetaDataFlag::ePTR)
//------ Extra-data ------
// mData.mSamples
// PT: can't use one array of PxU16 since we don't want to flip those bytes during conversion.
// PT: We only align the first array for DE1340, but the second one shouldn't be aligned since
// both are written as one unique block of memory.
PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream, BigConvexData, PxU8, mData.mNbSamples, PX_SERIAL_ALIGN, 0)
PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream, BigConvexData, PxU8, mData.mNbSamples, 0, 0)
// mData.mValencies
// PT: same here, we must only align the first array
PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream, BigConvexData, Valency, mData.mNbVerts, PX_SERIAL_ALIGN, 0)
PX_DEF_BIN_METADATA_EXTRA_ALIGN(stream, BigConvexData, PX_SERIAL_ALIGN)
PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream, BigConvexData, PxU8, mData.mNbAdjVerts, 0, 0)
}
static void getBinaryMetaData_InternalObjectsData(PxOutputStream& stream)
{
PX_DEF_BIN_METADATA_CLASS(stream, InternalObjectsData)
PX_DEF_BIN_METADATA_ITEM(stream, InternalObjectsData, PxReal, mRadius, 0)
PX_DEF_BIN_METADATA_ITEMS_AUTO(stream, InternalObjectsData, PxReal, mExtents, 0)
}
static void getBinaryMetaData_HullPolygonData(PxOutputStream& stream)
{
PX_DEF_BIN_METADATA_CLASS(stream, HullPolygonData)
PX_DEF_BIN_METADATA_ITEMS_AUTO(stream, HullPolygonData, PxReal, mPlane, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HullPolygonData, PxU16, mVRef8, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HullPolygonData, PxU8, mNbVerts, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HullPolygonData, PxU8, mMinIndex, 0)
}
static void getBinaryMetaData_ConvexHullData(PxOutputStream& stream)
{
PX_DEF_BIN_METADATA_CLASS(stream, ConvexHullData)
PX_DEF_BIN_METADATA_ITEM(stream, ConvexHullData, PxBounds3, mAABB, 0)
PX_DEF_BIN_METADATA_ITEM(stream, ConvexHullData, PxVec3, mCenterOfMass, 0)
PX_DEF_BIN_METADATA_ITEM(stream, ConvexHullData, HullPolygonData, mPolygons, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, ConvexHullData, BigConvexRawData, mBigConvexRawData, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, ConvexHullData, SDF, mSdfData, PxMetaDataFlag::ePTR)
//ML: the most significant bit of mNbEdges is used to indicate whether we have grb data or not. However, we don't support grb data
//in serialization so we have to mask the most significant bit and force the contact gen run on CPU code path
PX_DEF_BIN_METADATA_ITEM(stream, ConvexHullData, PxU16, mNbEdges, PxMetaDataFlag::eCOUNT_MASK_MSB)
PX_DEF_BIN_METADATA_ITEM(stream, ConvexHullData, PxU8, mNbHullVertices, 0)
PX_DEF_BIN_METADATA_ITEM(stream, ConvexHullData, PxU8, mNbPolygons, 0)
PX_DEF_BIN_METADATA_ITEM(stream, ConvexHullData, InternalObjectsData, mInternal, 0)
}
void Gu::ConvexMesh::getBinaryMetaData(PxOutputStream& stream)
{
getBinaryMetaData_InternalObjectsData(stream);
getBinaryMetaData_HullPolygonData(stream);
getBinaryMetaData_ConvexHullData(stream);
SDF::getBinaryMetaData(stream);
BigConvexData::getBinaryMetaData(stream);
PX_DEF_BIN_METADATA_VCLASS(stream,ConvexMesh)
PX_DEF_BIN_METADATA_BASE_CLASS(stream,ConvexMesh, PxBase)
//
PX_DEF_BIN_METADATA_ITEM(stream, ConvexMesh, ConvexHullData, mHullData, 0)
PX_DEF_BIN_METADATA_ITEM(stream, ConvexMesh, PxU32, mNb, 0)
PX_DEF_BIN_METADATA_ITEM(stream, ConvexMesh, SDF, mSdfData, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, ConvexMesh, BigConvexData, mBigConvexData, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, ConvexMesh, PxReal, mMass, 0)
PX_DEF_BIN_METADATA_ITEM(stream, ConvexMesh, PxMat33, mInertia, 0)
PX_DEF_BIN_METADATA_ITEM(stream, ConvexMesh, GuMeshFactory, mMeshFactory, PxMetaDataFlag::ePTR)
//------ Extra-data ------
// mHullData.mPolygons (Gu::HullPolygonData, PxVec3, PxU8*2, PxU8)
// PT: we only align the first array since the other ones are contained within it
PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream, Gu::ConvexMesh, HullPolygonData, mHullData.mNbPolygons, PX_SERIAL_ALIGN, 0)
PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream, Gu::ConvexMesh, PxVec3, mHullData.mNbHullVertices, 0, 0)
PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream, Gu::ConvexMesh, PxU8, mHullData.mNbEdges, 0, PxMetaDataFlag::eCOUNT_MASK_MSB)
PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream, Gu::ConvexMesh, PxU8, mHullData.mNbEdges, 0, PxMetaDataFlag::eCOUNT_MASK_MSB)
PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream, Gu::ConvexMesh, PxU8, mHullData.mNbHullVertices, 0, 0)
PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream, Gu::ConvexMesh, PxU8, mHullData.mNbHullVertices, 0, 0)
PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream, Gu::ConvexMesh, PxU8, mHullData.mNbHullVertices, 0, 0)
PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream, Gu::ConvexMesh, PxU8, mNb, 0, PxMetaDataFlag::eCOUNT_MASK_MSB)
PX_DEF_BIN_METADATA_EXTRA_ALIGN(stream, ConvexMesh, 4)
//mSdfData this is currently broken
//PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, Gu::SDF, PxReal, mSdf, mNumSdfs, 0, PX_SERIAL_ALIGN)
//PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, Gu::SDF, PxU32, mSubgridStartSlots, mNumStartSlots, 0, PX_SERIAL_ALIGN)
//PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, Gu::SDF, PxU8, mSubgridSdf, mNumSubgridSdfs, 0, PX_SERIAL_ALIGN)
// mBigConvexData
PX_DEF_BIN_METADATA_EXTRA_ITEM(stream, Gu::ConvexMesh, BigConvexData, mBigConvexData, PX_SERIAL_ALIGN)
}
///////////////////////////////////////////////////////////////////////////////
static void getBinaryMetaData_PxHeightFieldSample(PxOutputStream& stream)
{
PX_DEF_BIN_METADATA_CLASS(stream, PxHeightFieldSample)
PX_DEF_BIN_METADATA_ITEM(stream, PxHeightFieldSample, PxI16, height, 0)
PX_DEF_BIN_METADATA_ITEM(stream, PxHeightFieldSample, PxBitAndByte, materialIndex0, 0)
PX_DEF_BIN_METADATA_ITEM(stream, PxHeightFieldSample, PxBitAndByte, materialIndex1, 0)
PX_DEF_BIN_METADATA_TYPEDEF(stream, PxBitAndByte, PxU8)
}
static void getBinaryMetaData_HeightFieldData(PxOutputStream& stream)
{
PX_DEF_BIN_METADATA_TYPEDEF(stream, PxHeightFieldFlags, PxU16)
PX_DEF_BIN_METADATA_TYPEDEF(stream, PxHeightFieldFormat::Enum, PxU32)
PX_DEF_BIN_METADATA_CLASS(stream, HeightFieldData)
PX_DEF_BIN_METADATA_ITEM(stream, HeightFieldData, PxBounds3, mAABB, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HeightFieldData, PxU32, rows, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HeightFieldData, PxU32, columns, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HeightFieldData, PxU32, rowLimit, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HeightFieldData, PxU32, colLimit, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HeightFieldData, PxU32, nbColumns, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HeightFieldData, PxHeightFieldSample, samples, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, HeightFieldData, PxReal, convexEdgeThreshold, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HeightFieldData, PxHeightFieldFlags, flags, 0)
#ifdef EXPLICIT_PADDING_METADATA
PX_DEF_BIN_METADATA_ITEM(stream, HeightFieldData, PxU16, paddAfterFlags, PxMetaDataFlag::ePADDING)
#endif
PX_DEF_BIN_METADATA_ITEM(stream, HeightFieldData, PxHeightFieldFormat::Enum, format, 0)
}
void Gu::HeightField::getBinaryMetaData(PxOutputStream& stream)
{
getBinaryMetaData_PxHeightFieldSample(stream);
getBinaryMetaData_HeightFieldData(stream);
PX_DEF_BIN_METADATA_TYPEDEF(stream, PxMaterialTableIndex, PxU16)
PX_DEF_BIN_METADATA_VCLASS(stream, HeightField)
PX_DEF_BIN_METADATA_BASE_CLASS(stream, HeightField, PxBase)
PX_DEF_BIN_METADATA_ITEM(stream, HeightField, HeightFieldData, mData, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HeightField, PxU32, mSampleStride, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HeightField, PxU32, mNbSamples, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HeightField, PxReal, mMinHeight, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HeightField, PxReal, mMaxHeight, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HeightField, PxU32, mModifyCount, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HeightField, GuMeshFactory, mMeshFactory, PxMetaDataFlag::ePTR)
//------ Extra-data ------
// mData.samples
PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream, HeightField, PxHeightFieldSample, mNbSamples, PX_SERIAL_ALIGN, 0) // PT: ### try to remove mNbSamples later
}
///////////////////////////////////////////////////////////////////////////////
static void getBinaryMetaData_RTreePage(PxOutputStream& stream)
{
PX_DEF_BIN_METADATA_CLASS(stream, RTreePage)
PX_DEF_BIN_METADATA_ITEMS(stream, RTreePage, PxReal, minx, 0, RTREE_N)
PX_DEF_BIN_METADATA_ITEMS(stream, RTreePage, PxReal, miny, 0, RTREE_N)
PX_DEF_BIN_METADATA_ITEMS(stream, RTreePage, PxReal, minz, 0, RTREE_N)
PX_DEF_BIN_METADATA_ITEMS(stream, RTreePage, PxReal, maxx, 0, RTREE_N)
PX_DEF_BIN_METADATA_ITEMS(stream, RTreePage, PxReal, maxy, 0, RTREE_N)
PX_DEF_BIN_METADATA_ITEMS(stream, RTreePage, PxReal, maxz, 0, RTREE_N)
PX_DEF_BIN_METADATA_ITEMS(stream, RTreePage, PxU32, ptrs, 0, RTREE_N)
}
void RTree::getBinaryMetaData(PxOutputStream& stream)
{
getBinaryMetaData_RTreePage(stream);
PX_DEF_BIN_METADATA_CLASS(stream, RTree)
PX_DEF_BIN_METADATA_ITEM(stream, RTree, PxVec4, mBoundsMin, 0)
PX_DEF_BIN_METADATA_ITEM(stream, RTree, PxVec4, mBoundsMax, 0)
PX_DEF_BIN_METADATA_ITEM(stream, RTree, PxVec4, mInvDiagonal, 0)
PX_DEF_BIN_METADATA_ITEM(stream, RTree, PxVec4, mDiagonalScaler, 0)
PX_DEF_BIN_METADATA_ITEM(stream, RTree, PxU32, mPageSize, 0)
PX_DEF_BIN_METADATA_ITEM(stream, RTree, PxU32, mNumRootPages, 0)
PX_DEF_BIN_METADATA_ITEM(stream, RTree, PxU32, mNumLevels, 0)
PX_DEF_BIN_METADATA_ITEM(stream, RTree, PxU32, mTotalNodes, 0)
PX_DEF_BIN_METADATA_ITEM(stream, RTree, PxU32, mTotalPages, 0)
PX_DEF_BIN_METADATA_ITEM(stream, RTree, PxU32, mFlags, 0)
PX_DEF_BIN_METADATA_ITEM(stream, RTree, RTreePage, mPages, PxMetaDataFlag::ePTR)
//------ Extra-data ------
// mPages
PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream,RTree, RTreePage, mTotalPages, 128, 0)
}
///////////////////////////////////////////////////////////////////////////////
void SourceMeshBase::getBinaryMetaData(PxOutputStream& stream)
{
PX_DEF_BIN_METADATA_VCLASS(stream, SourceMeshBase)
PX_DEF_BIN_METADATA_ITEM(stream, SourceMeshBase, PxU32, mNbVerts, 0)
PX_DEF_BIN_METADATA_ITEM(stream, SourceMeshBase, PxVec3, mVerts, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, SourceMeshBase, PxU32, mType, 0)
PX_DEF_BIN_METADATA_ITEM(stream, SourceMeshBase, PxU32, mRemap, PxMetaDataFlag::ePTR)
}
void SourceMesh::getBinaryMetaData(PxOutputStream& stream)
{
// SourceMesh
PX_DEF_BIN_METADATA_VCLASS(stream, SourceMesh)
PX_DEF_BIN_METADATA_BASE_CLASS(stream, SourceMesh, SourceMeshBase)
PX_DEF_BIN_METADATA_ITEM(stream, SourceMesh, PxU32, mNbTris, 0)
PX_DEF_BIN_METADATA_ITEM(stream, SourceMesh, void, mTriangles32, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, SourceMesh, void, mTriangles16, PxMetaDataFlag::ePTR)
}
static void getBinaryMetaData_BVDataPackedQ(PxOutputStream& stream)
{
PX_DEF_BIN_METADATA_CLASS(stream, QuantizedAABB)
PX_DEF_BIN_METADATA_ITEM(stream, QuantizedAABB, PxU16, mData[0].mExtents, 0)
PX_DEF_BIN_METADATA_ITEM(stream, QuantizedAABB, PxI16, mData[0].mCenter, 0)
PX_DEF_BIN_METADATA_ITEM(stream, QuantizedAABB, PxU16, mData[1].mExtents, 0)
PX_DEF_BIN_METADATA_ITEM(stream, QuantizedAABB, PxI16, mData[1].mCenter, 0)
PX_DEF_BIN_METADATA_ITEM(stream, QuantizedAABB, PxU16, mData[2].mExtents, 0)
PX_DEF_BIN_METADATA_ITEM(stream, QuantizedAABB, PxI16, mData[2].mCenter, 0)
PX_DEF_BIN_METADATA_CLASS(stream, BVDataPackedQ)
PX_DEF_BIN_METADATA_ITEM(stream, BVDataPackedQ, QuantizedAABB, mAABB, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BVDataPackedQ, PxU32, mData, 0)
}
static void getBinaryMetaData_BVDataPackedNQ(PxOutputStream& stream)
{
PX_DEF_BIN_METADATA_CLASS(stream, CenterExtents)
PX_DEF_BIN_METADATA_ITEM(stream, CenterExtents, PxVec3, mCenter, 0)
PX_DEF_BIN_METADATA_ITEM(stream, CenterExtents, PxVec3, mExtents, 0)
PX_DEF_BIN_METADATA_CLASS(stream, BVDataPackedNQ)
PX_DEF_BIN_METADATA_ITEM(stream, BVDataPackedNQ, CenterExtents, mAABB, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BVDataPackedNQ, PxU32, mData, 0)
}
void BV4Tree::getBinaryMetaData(PxOutputStream& stream)
{
getBinaryMetaData_BVDataPackedQ(stream);
getBinaryMetaData_BVDataPackedNQ(stream);
PX_DEF_BIN_METADATA_CLASS(stream, LocalBounds)
PX_DEF_BIN_METADATA_ITEM(stream, LocalBounds, PxVec3, mCenter, 0)
PX_DEF_BIN_METADATA_ITEM(stream, LocalBounds, float, mExtentsMagnitude, 0)
PX_DEF_BIN_METADATA_CLASS(stream, BV4Tree)
PX_DEF_BIN_METADATA_ITEM(stream, BV4Tree, void, mMeshInterface, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, BV4Tree, LocalBounds, mLocalBounds, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BV4Tree, PxU32, mNbNodes, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BV4Tree, void, mNodes, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, BV4Tree, PxU32, mInitData, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BV4Tree, PxVec3, mCenterOrMinCoeff, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BV4Tree, PxVec3, mExtentsOrMaxCoeff, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BV4Tree, bool, mUserAllocated, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BV4Tree, bool, mQuantized, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BV4Tree, bool, mIsEdgeSet, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BV4Tree, bool, mPadding, PxMetaDataFlag::ePADDING)
//------ Extra-data ------
// PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream, BV4Tree, BVDataPackedQ, mNbNodes, 16, 0)
PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, BV4Tree, BVDataPackedQ, mQuantized, mNbNodes, PxMetaDataFlag::Enum(0), PX_SERIAL_ALIGN)
PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, BV4Tree, BVDataPackedNQ, mQuantized, mNbNodes, PxMetaDataFlag::eCONTROL_FLIP, PX_SERIAL_ALIGN)
}
///////////////////////////////////////////////////////////////////////////////
void Gu::TriangleMesh::getBinaryMetaData(PxOutputStream& stream)
{
SDF::getBinaryMetaData(stream);
PX_DEF_BIN_METADATA_VCLASS(stream, TriangleMesh)
PX_DEF_BIN_METADATA_BASE_CLASS(stream, TriangleMesh, PxBase)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxU32, mNbVertices, 0)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxU32, mNbTriangles, 0)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxVec3, mVertices, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, void, mTriangles, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxBounds3, mAABB, 0) // PT: warning, this is actually a CenterExtents
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxU8, mExtraTrigData, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxReal, mGeomEpsilon, 0)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxU8, mFlags, 0)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxU16, mMaterialIndices, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxU32, mFaceRemap, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxU32, mAdjacencies, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, GuMeshFactory, mMeshFactory, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, void, mEdgeList, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxReal, mMass, 0)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxMat33, mInertia, 0)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxVec3, mLocalCenterOfMass, 0)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, void, mGRB_triIndices, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, void, mGRB_triAdjacencies, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxU32, mGRB_faceRemap, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxU32, mGRB_faceRemapInverse, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, Gu::BV32Tree, mGRB_BV32Tree, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, SDF, mSdfData, 0)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxU32, mAccumulatedTrianglesRef, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxU32, mTrianglesReferences, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxU32, mNbTrianglesReferences, 0)
//------ Extra-data ------
// mVertices
PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, TriangleMesh, PxVec3, mVertices, mNbVertices, 0, PX_SERIAL_ALIGN)
// mTriangles
// PT: quite tricky here: we exported either an array of PxU16s or an array of PxU32s. We trick the converter by
// pretending we exported both, with the same control variable (m16BitIndices) but opposed control flags. Also there's
// no way to capture "mNumTriangles*3" using the macros, so we just pretend we exported 3 buffers instead of 1.
// But since in reality it's all the same buffer, only the first one is declared as aligned.
PX_DEF_BIN_METADATA_EXTRA_ITEMS_MASKED_CONTROL(stream, TriangleMesh, PxU16, mFlags, PxTriangleMeshFlag::e16_BIT_INDICES, mNbTriangles, 0, PX_SERIAL_ALIGN)
PX_DEF_BIN_METADATA_EXTRA_ITEMS_MASKED_CONTROL(stream, TriangleMesh, PxU16, mFlags, PxTriangleMeshFlag::e16_BIT_INDICES, mNbTriangles, 0, 0)
PX_DEF_BIN_METADATA_EXTRA_ITEMS_MASKED_CONTROL(stream, TriangleMesh, PxU16, mFlags, PxTriangleMeshFlag::e16_BIT_INDICES, mNbTriangles, 0, 0)
PX_DEF_BIN_METADATA_EXTRA_ITEMS_MASKED_CONTROL(stream, TriangleMesh, PxU32, mFlags, PxTriangleMeshFlag::e16_BIT_INDICES, mNbTriangles, PxMetaDataFlag::eCONTROL_FLIP, PX_SERIAL_ALIGN)
PX_DEF_BIN_METADATA_EXTRA_ITEMS_MASKED_CONTROL(stream, TriangleMesh, PxU32, mFlags, PxTriangleMeshFlag::e16_BIT_INDICES, mNbTriangles, PxMetaDataFlag::eCONTROL_FLIP, 0)
PX_DEF_BIN_METADATA_EXTRA_ITEMS_MASKED_CONTROL(stream, TriangleMesh, PxU32, mFlags, PxTriangleMeshFlag::e16_BIT_INDICES, mNbTriangles, PxMetaDataFlag::eCONTROL_FLIP, 0)
// mExtraTrigData
PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, TriangleMesh, PxU8, mExtraTrigData, mNbTriangles, 0, PX_SERIAL_ALIGN)
// mMaterialIndices
PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, TriangleMesh, PxU16, mMaterialIndices, mNbTriangles, 0, PX_SERIAL_ALIGN)
// mFaceRemap
PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, TriangleMesh, PxU32, mFaceRemap, mNbTriangles, 0, PX_SERIAL_ALIGN)
// mAdjacencies
PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, TriangleMesh, PxU32, mAdjacencies, mNbTriangles, 0, PX_SERIAL_ALIGN)
PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, TriangleMesh, PxU32, mAdjacencies, mNbTriangles, 0, 0)
PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, TriangleMesh, PxU32, mAdjacencies, mNbTriangles, 0, 0)
// GPU data missing!
// mSdf, this is currently broken
//PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, TriangleMesh, PxReal, mSdfData.mSdf, mSdfData.mNumSdfs, 0, PX_SERIAL_ALIGN)
//PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, TriangleMesh, PxU32, mSdfData.mSubgridStartSlots, mSdfData.mNumStartSlots, 0, PX_SERIAL_ALIGN)
//PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, TriangleMesh, PxU8, mSdfData.mSubgridSdf, mSdfData.mNumSubgridSdfs, 0, PX_SERIAL_ALIGN)
// mAccumulatedTrianglesRef
// PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, TriangleMesh, PxU32, mAccumulatedTrianglesRef, mNbTrianglesReferences, 0, PX_SERIAL_ALIGN)
// mTrianglesReferences
// PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, TriangleMesh, PxU32, mTrianglesReferences, mNbTrianglesReferences, 0, PX_SERIAL_ALIGN)
#ifdef EXPLICIT_PADDING_METADATA
PX_DEF_BIN_METADATA_ITEMS_AUTO(stream, TriangleMesh, PxU32, mPaddingFromInternalMesh, PxMetaDataFlag::ePADDING)
#endif
}
void Gu::RTreeTriangleMesh::getBinaryMetaData(PxOutputStream& stream)
{
RTree::getBinaryMetaData(stream);
PX_DEF_BIN_METADATA_VCLASS(stream, RTreeTriangleMesh)
PX_DEF_BIN_METADATA_BASE_CLASS(stream, RTreeTriangleMesh, TriangleMesh)
PX_DEF_BIN_METADATA_ITEM(stream, RTreeTriangleMesh, RTree, mRTree, 0)
}
void Gu::BV4TriangleMesh::getBinaryMetaData(PxOutputStream& stream)
{
SourceMeshBase::getBinaryMetaData(stream);
SourceMesh::getBinaryMetaData(stream);
BV4Tree::getBinaryMetaData(stream);
PX_DEF_BIN_METADATA_VCLASS(stream, BV4TriangleMesh)
PX_DEF_BIN_METADATA_BASE_CLASS(stream, BV4TriangleMesh, TriangleMesh)
PX_DEF_BIN_METADATA_ITEM(stream, BV4TriangleMesh, SourceMesh, mMeshInterface, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BV4TriangleMesh, BV4Tree, mBV4Tree, 0)
}
///////////////////////////////////////////////////////////////////////////////
| 25,089 | C++ | 50.519507 | 183 | 0.75009 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuQuerySystem.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuQuerySystem.h"
#include "GuBounds.h"
#include "GuBVH.h"
#include "foundation/PxAlloca.h"
#include "common/PxProfileZone.h"
using namespace physx;
using namespace Gu;
///////////////////////////////////////////////////////////////////////////////
bool contains(PxArray<PxU32>& pruners, PxU32 index)
{
const PxU32 nb = pruners.size();
for(PxU32 i=0;i<nb;i++)
{
if(pruners[i]==index)
return true;
}
return false;
}
///////////////////////////////////////////////////////////////////////////////
QuerySystem::PrunerExt::PrunerExt(Pruner* pruner, PxU32 preallocated) : mPruner(pruner), mDirtyList("QuerySystem::PrunerExt::mDirtyList"), mNbStatic(0), mNbDynamic(0), mDirtyStatic(false)
{
if(pruner&& preallocated)
pruner->preallocate(preallocated);
}
QuerySystem::PrunerExt::~PrunerExt()
{
PX_DELETE(mPruner);
}
void QuerySystem::PrunerExt::flushMemory()
{
if(!mDirtyList.size())
mDirtyList.reset();
// PT: TODO: flush bitmap here
// PT: TODO: flush pruner here?
}
// PT: ok things became more complicated than before here. We'd like to delay the update of *both* the transform and the bounds,
// since immediately updating only one of them doesn't make much sense (it invalidates the pruner's data structure anyway). When both
// are delayed it gives users the ability to query the pruners *without* commiting the changes, i.e. they can query the old snapshot
// for as long as they please (i.e. a raycast wouldn't automatically trigger a structure update).
//
// Now the problem is that we need to store (at least) the transform until the update actually happens, and the initial code didn't
// support this. We also want to do this in an efficient way, which of course makes things more difficult.
//
// A naive version would simply use a per-pruner hashmap between the PrunerHandle and its data. Might be slower than before.
//
// Another version could build on the initial bitmap-based solution and use arrays of transforms/bounds as companions to the array
// of PrunerHandle (or we could mix all that data in a single structure). The issue with this is that two consecutive updates on the
// same object wouldn't work anymore: the second call would check the bitmap, see that the bit is set already, and skip the work.
// We'd need to update the cached data instead, i.e. we'd need a mapping between the PrunerHandle and its position in mDirtyList.
// And we don't have that.
//
// A potential way to fix this could be to allow the same PrunerHandle to appear multiple times in mDirtyList, with the assumption
// that users will not update the same object multiple times very often (...). The way it would work:
// - during "add", dirtyMap is set, handle/transform/bounds are pushed to mDirtyList.
// - during "remove", dirtyMap is reset *and that's it*. We don't bother purging mDirtyList (i.e. we kill the current O(n) search there)
// - during "process" we use dirtyMap to validate the update. If bit is cleared, ignore mDirtyList entry. Duplicate entries work as long
// as mDirtyList is processed in linear order. One issue is that the current mDirtyList is also passed to the pruner as-is for the
// update, so we'd need to rebuild a separate array for that and/or make sure all pruners accept duplicate entries in that array.
// Deep down that specific rabbit hole we'll actually find the recently discovered issue regarding the mToRefit array...
//
// Bit tricky. This is only for user-updates anyway (as opposed to sim updates) so this probably doesn't need ultimate perf? Note however
// that we "remove from dirty list" when an object is removed, which happens all the time with or without user updates (e.g. streaming etc).
static const bool gUseOldCode = false;
void QuerySystem::PrunerExt::addToDirtyList(PrunerHandle handle, PxU32 dynamic, const PxTransform& transform, const PxBounds3* userBounds)
{
PxBitMap& dirtyMap = mDirtyMap;
{
if(dirtyMap.size() <= handle)
{
PxU32 size = PxMax<PxU32>(dirtyMap.size()*2, 1024);
const PxU32 minSize = handle+1;
if(minSize>size)
size = minSize*2;
dirtyMap.resize(size);
PX_ASSERT(handle<dirtyMap.size());
PX_ASSERT(!dirtyMap.test(handle));
}
}
if(gUseOldCode)
{
if(!dirtyMap.test(handle))
{
dirtyMap.set(handle);
mDirtyList.pushBack(handle);
}
}
else
{
dirtyMap.set(handle);
mDirtyList.pushBack(handle);
Data& d = mDirtyData.insert();
d.mPose = transform;
if(userBounds)
d.mBounds = *userBounds;
else
d.mBounds.setEmpty();
}
if(!dynamic)
mDirtyStatic = true;
}
void QuerySystem::PrunerExt::removeFromDirtyList(PrunerHandle handle)
{
PxBitMap& dirtyMap = mDirtyMap;
if(gUseOldCode)
{
if(dirtyMap.boundedTest(handle))
{
dirtyMap.reset(handle);
mDirtyList.findAndReplaceWithLast(handle);
}
}
else
{
dirtyMap.boundedReset(handle);
}
// PT: if we remove the object that made us set mDirtyStatic to true, tough luck,
// we don't bother fixing that bool here. It's going to potentially cause an
// unnecessary update of the character controller's caches, which is not a big deal.
}
bool QuerySystem::PrunerExt::processDirtyList(const Adapter& adapter, float inflation)
{
const PxU32 numDirtyList = mDirtyList.size();
if(!numDirtyList)
return false;
if(gUseOldCode)
{
const PrunerHandle* const prunerHandles = mDirtyList.begin();
for(PxU32 i=0; i<numDirtyList; i++)
{
const PrunerHandle handle = prunerHandles[i];
mDirtyMap.reset(handle);
// PT: we compute the new bounds and store them directly in the pruner structure to avoid copies. We delay the updateObjects() call
// to take advantage of batching.
PrunerPayloadData payloadData;
const PrunerPayload& pp = mPruner->getPayloadData(handle, &payloadData);
computeBounds(*payloadData.mBounds, adapter.getGeometry(pp), *payloadData.mTransform, 0.0f, inflation);
}
// PT: batch update happens after the loop instead of once per loop iteration
mPruner->updateObjects(prunerHandles, numDirtyList);
mDirtyList.clear();
}
else
{
// PT: TODO: this stuff is not 100% satisfying, since we do allow the same object to be updated multiple times.
// Would be nice to revisit & improve at some point.
PrunerHandle* prunerHandles = mDirtyList.begin();
PxU32 nbValid = 0;
for(PxU32 i=0; i<numDirtyList; i++)
{
const PrunerHandle handle = prunerHandles[i];
if(mDirtyMap.test(handle))
{
// PT: we compute the new bounds and store them directly in the pruner structure to avoid copies. We delay the updateObjects() call
// to take advantage of batching.
PrunerPayloadData payloadData;
const PrunerPayload& pp = mPruner->getPayloadData(handle, &payloadData);
*payloadData.mTransform = mDirtyData[i].mPose;
if(mDirtyData[i].mBounds.isEmpty())
computeBounds(*payloadData.mBounds, adapter.getGeometry(pp), mDirtyData[i].mPose, 0.0f, inflation);
else
*payloadData.mBounds = mDirtyData[i].mBounds;
prunerHandles[nbValid++] = handle;
}
else
{
// PT: if not set, object has been added to the list then removed
}
}
// PT: batch update happens after the loop instead of once per loop iteration
mPruner->updateObjects(prunerHandles, nbValid);
// PT: have to reset the bits *after* the above loop now. Unclear if clearing the
// whole map would be faster ("it depends" I guess).
while(nbValid--)
{
const PrunerHandle handle = *prunerHandles++;
mDirtyMap.reset(handle);
}
mDirtyList.clear();
mDirtyData.clear();
}
const bool ret = mDirtyStatic;
mDirtyStatic = false;
return ret;
}
///////////////////////////////////////////////////////////////////////////////
static PX_FORCE_INLINE QuerySystem::PrunerExt* checkPrunerIndex(PxU32 prunerIndex, const PxArray<QuerySystem::PrunerExt*>& prunerExt)
{
if(prunerIndex>=prunerExt.size() || !prunerExt[prunerIndex])
{
PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "Invalid pruner index");
return NULL;
}
return prunerExt[prunerIndex];
}
QuerySystem::QuerySystem(PxU64 contextID, float inflation, const Adapter& adapter, bool usesTreeOfPruners) :
mAdapter (adapter),
mTreeOfPruners (NULL),
mContextID (contextID),
mStaticTimestamp (0),
mInflation (inflation),
mPrunerNeedsUpdating (false),
mTimestampNeedsUpdating (false),
mUsesTreeOfPruners (usesTreeOfPruners)
//mBatchUserUpdates (batchUserUpdates)
{
}
QuerySystem::~QuerySystem()
{
PX_DELETE(mTreeOfPruners);
const PxU32 nb = mPrunerExt.size();
for(PxU32 i=0;i<nb;i++)
{
PrunerExt* pe = mPrunerExt[i]; // Can be NULL if the pruner has been removed
PX_DELETE(pe);
}
}
PxU32 QuerySystem::addPruner(Pruner* pruner, PxU32 preallocated)
{
PrunerExt* pe = PX_NEW(PrunerExt)(pruner, preallocated);
PxU32 prunerIndex;
if(mFreePruners.size())
{
prunerIndex = mFreePruners.popBack();
mPrunerExt[prunerIndex] = pe;
}
else
{
prunerIndex = mPrunerExt.size();
mPrunerExt.pushBack(pe);
}
return prunerIndex;
}
void QuerySystem::removePruner(PxU32 prunerIndex)
{
PrunerExt* pe = checkPrunerIndex(prunerIndex, mPrunerExt);
if(!pe)
return;
// PT: it is legal to delete a pruner that still contains objects, but we should still properly update the static timestamp.
if(pe->mNbStatic)
invalidateStaticTimestamp();
PX_DELETE(pe);
mPrunerExt[prunerIndex] = NULL;
mFreePruners.pushBack(prunerIndex);
// We don't bother searching mDirtyPruners since it's going to be cleared next frame
}
void QuerySystem::flushMemory()
{
const PxU32 nb = mPrunerExt.size();
for(PxU32 i=0;i<nb;i++)
{
PrunerExt* pe = mPrunerExt[i]; // Can be NULL if the pruner has been removed
if(pe)
pe->flushMemory();
}
}
ActorShapeData QuerySystem::addPrunerShape(const PrunerPayload& payload, PxU32 prunerIndex, bool dynamic, const PxTransform& transform, const PxBounds3* userBounds)
{
PrunerExt* pe = checkPrunerIndex(prunerIndex, mPrunerExt);
if(!pe)
return INVALID_ACTOR_SHAPE_DATA;
mPrunerNeedsUpdating = true;
if(dynamic)
{
pe->mNbDynamic++;
}
else
{
pe->mNbStatic++;
invalidateStaticTimestamp();
}
PX_ASSERT(pe->mPruner);
const PxBounds3* boundsPtr;
PxBounds3 bounds;
if(userBounds)
{
boundsPtr = userBounds;
}
else
{
computeBounds(bounds, mAdapter.getGeometry(payload), transform, 0.0f, 1.0f + mInflation);
boundsPtr = &bounds;
}
PrunerHandle handle;
pe->mPruner->addObjects(&handle, boundsPtr, &payload, &transform, 1, false);
return createActorShapeData(createPrunerInfo(prunerIndex, dynamic), handle);
}
void QuerySystem::removePrunerShape(ActorShapeData data, PrunerPayloadRemovalCallback* removalCallback)
{
const PrunerInfo info = getPrunerInfo(data);
const PxU32 prunerIndex = getPrunerIndex(info);
PrunerExt* pe = checkPrunerIndex(prunerIndex, mPrunerExt);
if(!pe)
return;
mPrunerNeedsUpdating = true;
const PxU32 dynamic = getDynamic(info);
const PrunerHandle handle = getPrunerHandle(data);
PX_ASSERT(pe->mPruner);
if(dynamic)
{
PX_ASSERT(pe->mNbDynamic);
pe->mNbDynamic--;
}
else
{
PX_ASSERT(pe->mNbStatic);
pe->mNbStatic--;
invalidateStaticTimestamp();
}
//if(mBatchUserUpdates)
pe->removeFromDirtyList(handle);
pe->mPruner->removeObjects(&handle, 1, removalCallback);
}
void QuerySystem::updatePrunerShape(ActorShapeData data, bool immediately, const PxTransform& transform, const PxBounds3* userBounds)
{
const PrunerInfo info = getPrunerInfo(data);
const PxU32 prunerIndex = getPrunerIndex(info);
PrunerExt* pe = checkPrunerIndex(prunerIndex, mPrunerExt);
if(!pe)
return;
mPrunerNeedsUpdating = true;
const PxU32 dynamic = getDynamic(info);
const PrunerHandle handle = getPrunerHandle(data);
PX_ASSERT(pe->mPruner);
Pruner* pruner = pe->mPruner;
if(immediately)
{
if(!dynamic)
invalidateStaticTimestamp();
PrunerPayloadData payloadData;
const PrunerPayload& pp = pruner->getPayloadData(handle, &payloadData);
*payloadData.mTransform = transform;
if(userBounds)
*payloadData.mBounds = *userBounds;
else
computeBounds(*payloadData.mBounds, mAdapter.getGeometry(pp), transform, 0.0f, 1.0f + mInflation);
// PT: TODO: would it be better to pass the bounds & transform directly to this function?
pruner->updateObjects(&handle, 1);
}
else
{
// PT: we don't update the static timestamp immediately, so that users can query the
// old state of the structure without invalidating their caches. This will be resolved
// in processDirtyLists.
if(gUseOldCode)
pruner->setTransform(handle, transform);
// PT: we don't shrink mDirtyList anymore in removePrunerShape so the size of that array can be reused as
// a flag telling us whether we already encountered this pruner or not. If not, we add its index to mDirtyPruners.
// Goal is to avoid processing all pruners in processDirtyLists.
if(!pe->mDirtyList.size())
{
PX_ASSERT(!contains(mDirtyPruners, prunerIndex));
mDirtyPruners.pushBack(prunerIndex);
}
else
{
PX_ASSERT(contains(mDirtyPruners, prunerIndex));
}
pe->addToDirtyList(handle, dynamic, transform, userBounds);
}
}
const PrunerPayload& QuerySystem::getPayloadData(ActorShapeData data, PrunerPayloadData* ppd) const
{
const PrunerInfo info = getPrunerInfo(data);
const PxU32 prunerIndex = getPrunerIndex(info);
PX_ASSERT(checkPrunerIndex(prunerIndex, mPrunerExt));
const PrunerHandle handle = getPrunerHandle(data);
PX_ASSERT(mPrunerExt[prunerIndex]->mPruner);
return mPrunerExt[prunerIndex]->mPruner->getPayloadData(handle, ppd);
}
void QuerySystem::processDirtyLists()
{
PX_PROFILE_ZONE("QuerySystem.processDirtyLists", mContextID);
const PxU32 nbDirtyPruners = mDirtyPruners.size();
if(!nbDirtyPruners)
return;
// must already have acquired writer lock here
const float inflation = 1.0f + mInflation;
bool mustInvalidateStaticTimestamp = false;
for(PxU32 ii=0;ii<nbDirtyPruners;ii++)
{
const PxU32 i = mDirtyPruners[ii];
PrunerExt* pe = mPrunerExt[i]; // Can be NULL if the pruner has been removed
if(pe && pe->processDirtyList(mAdapter, inflation))
mustInvalidateStaticTimestamp = true;
}
if(mustInvalidateStaticTimestamp)
invalidateStaticTimestamp();
mDirtyPruners.clear();
}
void QuerySystem::update(bool buildStep, bool commit)
{
PX_PROFILE_ZONE("QuerySystem::update", mContextID);
if(!buildStep && !commit)
{
//mPrunerNeedsUpdating = true; // PT: removed, why was it here?
return;
}
// flush user modified objects
// if(mBatchUserUpdates)
processDirtyLists();
const PxU32 nb = mPrunerExt.size();
for(PxU32 i=0;i<nb;i++)
{
PrunerExt* pe = mPrunerExt[i]; // Can be NULL if the pruner has been removed
if(!pe)
continue;
Pruner* pruner = pe->mPruner;
if(pruner)
{
if(buildStep && pruner->isDynamic())
static_cast<DynamicPruner*>(pruner)->buildStep(true);
if(commit)
pruner->commit();
}
}
if(commit)
{
if(mUsesTreeOfPruners)
createTreeOfPruners();
}
mPrunerNeedsUpdating = !commit;
}
void QuerySystem::commitUpdates()
{
PX_PROFILE_ZONE("QuerySystem.commitUpdates", mContextID);
if(mPrunerNeedsUpdating)
{
mSQLock.lock();
if(mPrunerNeedsUpdating)
{
//if(mBatchUserUpdates)
processDirtyLists();
const PxU32 nb = mPrunerExt.size();
for(PxU32 i=0;i<nb;i++)
{
PrunerExt* pe = mPrunerExt[i]; // Can be NULL if the pruner has been removed
if(!pe)
continue;
Pruner* pruner = pe->mPruner;
if(pruner)
pruner->commit();
}
if(mUsesTreeOfPruners)
createTreeOfPruners();
PxMemoryBarrier();
mPrunerNeedsUpdating = false;
}
mSQLock.unlock();
}
}
PxU32 QuerySystem::startCustomBuildstep()
{
PX_PROFILE_ZONE("QuerySystem.startCustomBuildstep", mContextID);
mTimestampNeedsUpdating = false;
return mPrunerExt.size();
}
void QuerySystem::customBuildstep(PxU32 index)
{
PX_PROFILE_ZONE("QuerySystem.customBuildstep", mContextID);
PX_ASSERT(index<mPrunerExt.size());
// PT: TODO: would be better to not schedule the update of removed pruners at all
PrunerExt* pe = mPrunerExt[index]; // Can be NULL if the pruner has been removed
if(!pe)
return;
Pruner* pruner = pe->mPruner;
//void QuerySystem::processDirtyLists()
{
PX_PROFILE_ZONE("QuerySystem.processDirtyLists", mContextID);
// must already have acquired writer lock here
const float inflation = 1.0f + mInflation;
// PT: note that we don't use the mDirtyPruners array here
if(pe->processDirtyList(mAdapter, inflation))
mTimestampNeedsUpdating = true;
}
if(pruner)
{
if(pruner->isDynamic())
static_cast<DynamicPruner*>(pruner)->buildStep(true); // PT: "true" because that parameter was made for PxSceneQuerySystem::sceneQueryBuildStep(), not us
pruner->commit();
}
}
void QuerySystem::finishCustomBuildstep()
{
PX_PROFILE_ZONE("QuerySystem.finishCustomBuildstep", mContextID);
if(mUsesTreeOfPruners)
createTreeOfPruners();
mPrunerNeedsUpdating = false;
if(mTimestampNeedsUpdating)
invalidateStaticTimestamp();
mDirtyPruners.clear();
}
void QuerySystem::sync(PxU32 prunerIndex, const PrunerHandle* handles, const PxU32* boundsIndices, const PxBounds3* bounds, const PxTransform32* transforms, PxU32 count)
{
if(!count)
return;
PrunerExt* pe = checkPrunerIndex(prunerIndex, mPrunerExt);
if(!pe)
return;
Pruner* pruner = pe->mPruner;
if(pruner)
pruner->updateObjects(handles, count, mInflation, boundsIndices, bounds, transforms);
}
///////////////////////////////////////////////////////////////////////////////
namespace
{
struct LocalRaycastCB : PxBVH::RaycastCallback
{
LocalRaycastCB(const PxArray<QuerySystem::PrunerExt*>& pruners, const PrunerFilter* prunerFilter, const PxVec3& origin, const PxVec3& unitDir, PrunerRaycastCallback& cb) :
mPrunerExt(pruners), mPrunerFilter(prunerFilter), mOrigin(origin), mUnitDir(unitDir), mCB(cb) {}
virtual bool reportHit(PxU32 boundsIndex, PxReal& distance)
{
QuerySystem::PrunerExt* pe = mPrunerExt[boundsIndex]; // Can be NULL if the pruner has been removed
if(pe && (!mPrunerFilter || mPrunerFilter->processPruner(boundsIndex)))
{
Pruner* pruner = pe->mPruner;
if(!pruner->raycast(mOrigin, mUnitDir, distance, mCB))
return false;
}
return true;
}
const PxArray<QuerySystem::PrunerExt*>& mPrunerExt;
const PrunerFilter* mPrunerFilter;
const PxVec3& mOrigin;
const PxVec3& mUnitDir;
PrunerRaycastCallback& mCB;
PX_NOCOPY(LocalRaycastCB)
};
struct LocalOverlapCB : PxBVH::OverlapCallback
{
LocalOverlapCB(const PxArray<QuerySystem::PrunerExt*>& pruners, const PrunerFilter* prunerFilter, const ShapeData& queryVolume, PrunerOverlapCallback& cb) :
mPrunerExt(pruners), mPrunerFilter(prunerFilter), mQueryVolume(queryVolume), mCB(cb) {}
virtual bool reportHit(PxU32 boundsIndex)
{
QuerySystem::PrunerExt* pe = mPrunerExt[boundsIndex]; // Can be NULL if the pruner has been removed
if(pe && (!mPrunerFilter || mPrunerFilter->processPruner(boundsIndex)))
{
Pruner* pruner = pe->mPruner;
if(!pruner->overlap(mQueryVolume, mCB))
return false;
}
return true;
}
const PxArray<QuerySystem::PrunerExt*>& mPrunerExt;
const PrunerFilter* mPrunerFilter;
const ShapeData& mQueryVolume;
PrunerOverlapCallback& mCB;
PX_NOCOPY(LocalOverlapCB)
};
struct LocalSweepCB : PxBVH::RaycastCallback
{
LocalSweepCB(const PxArray<QuerySystem::PrunerExt*>& pruners, const PrunerFilter* prunerFilter, const ShapeData& queryVolume, const PxVec3& unitDir, PrunerRaycastCallback& cb) :
mPrunerExt(pruners), mPrunerFilter(prunerFilter), mQueryVolume(queryVolume), mUnitDir(unitDir), mCB(cb) {}
virtual bool reportHit(PxU32 boundsIndex, PxReal& distance)
{
QuerySystem::PrunerExt* pe = mPrunerExt[boundsIndex]; // Can be NULL if the pruner has been removed
if(pe && (!mPrunerFilter || mPrunerFilter->processPruner(boundsIndex)))
{
Pruner* pruner = pe->mPruner;
if(!pruner->sweep(mQueryVolume, mUnitDir, distance, mCB))
return false;
}
return true;
}
const PxArray<QuerySystem::PrunerExt*>& mPrunerExt;
const PrunerFilter* mPrunerFilter;
const ShapeData& mQueryVolume;
const PxVec3& mUnitDir;
PrunerRaycastCallback& mCB;
PX_NOCOPY(LocalSweepCB)
};
}
void QuerySystem::raycast(const PxVec3& origin, const PxVec3& unitDir, float& inOutDistance, PrunerRaycastCallback& cb, const PrunerFilter* prunerFilter) const
{
if(mTreeOfPruners)
{
LocalRaycastCB localCB(mPrunerExt, prunerFilter, origin, unitDir, cb);
mTreeOfPruners->raycast(origin, unitDir, inOutDistance, localCB, PxGeometryQueryFlag::Enum(0));
}
else
{
const PxU32 nb = mPrunerExt.size();
for(PxU32 i=0;i<nb;i++)
{
PrunerExt* pe = mPrunerExt[i]; // Can be NULL if the pruner has been removed
if(!pe)
continue;
if(!prunerFilter || prunerFilter->processPruner(i))
{
Pruner* pruner = pe->mPruner;
if(!pruner->raycast(origin, unitDir, inOutDistance, cb))
return;
}
}
}
}
void QuerySystem::overlap(const ShapeData& queryVolume, PrunerOverlapCallback& cb, const PrunerFilter* prunerFilter) const
{
if(mTreeOfPruners)
{
LocalOverlapCB localCB(mPrunerExt, prunerFilter, queryVolume, cb);
mTreeOfPruners->overlap(queryVolume, localCB, PxGeometryQueryFlag::Enum(0));
}
else
{
const PxU32 nb = mPrunerExt.size();
for(PxU32 i=0;i<nb;i++)
{
PrunerExt* pe = mPrunerExt[i]; // Can be NULL if the pruner has been removed
if(!pe)
continue;
if(!prunerFilter || prunerFilter->processPruner(i))
{
Pruner* pruner = pe->mPruner;
if(!pruner->overlap(queryVolume, cb))
return;
}
}
}
}
void QuerySystem::sweep(const ShapeData& queryVolume, const PxVec3& unitDir, float& inOutDistance, PrunerRaycastCallback& cb, const PrunerFilter* prunerFilter) const
{
if(mTreeOfPruners)
{
LocalSweepCB localCB(mPrunerExt, prunerFilter, queryVolume, unitDir, cb);
mTreeOfPruners->sweep(queryVolume, unitDir, inOutDistance, localCB, PxGeometryQueryFlag::Enum(0));
}
else
{
const PxU32 nb = mPrunerExt.size();
for(PxU32 i=0;i<nb;i++)
{
PrunerExt* pe = mPrunerExt[i]; // Can be NULL if the pruner has been removed
if(!pe)
continue;
if(!prunerFilter || prunerFilter->processPruner(i))
{
Pruner* pruner = pe->mPruner;
if(!pruner->sweep(queryVolume, unitDir, inOutDistance, cb))
return;
}
}
}
}
void QuerySystem::createTreeOfPruners()
{
PX_PROFILE_ZONE("QuerySystem.createTreeOfPruners", mContextID);
PX_DELETE(mTreeOfPruners);
mTreeOfPruners = PX_NEW(BVH)(NULL);
const PxU32 nb = mPrunerExt.size();
PxBounds3* prunerBounds = reinterpret_cast<PxBounds3*>(PxAlloca(sizeof(PxBounds3)*(nb+1)));
PxU32 nbBounds = 0;
for(PxU32 i=0;i<nb;i++)
{
PrunerExt* pe = mPrunerExt[i];
Pruner* pruner = pe->mPruner;
if(pruner)
pruner->getGlobalBounds(prunerBounds[nbBounds++]);
}
mTreeOfPruners->init(nbBounds, NULL, prunerBounds, sizeof(PxBounds3), BVH_SPLATTER_POINTS, 1, 0.01f);
}
| 24,617 | C++ | 28.660241 | 187 | 0.720153 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuInternal.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_INTERNAL_H
#define GU_INTERNAL_H
#include "geometry/PxCapsuleGeometry.h"
#include "geometry/PxBoxGeometry.h"
#include "GuCapsule.h"
#include "foundation/PxTransform.h"
#include "foundation/PxMathUtils.h"
#include "foundation/PxUtilities.h"
#include "foundation/PxMat33.h"
#define GU_EPSILON_SAME_DISTANCE 1e-3f
namespace physx
{
namespace Gu
{
class Box;
// PT: TODO: now that the Gu files are not exposed to users anymore, we should move back capsule-related functions
// to GuCapsule.h, etc
PX_PHYSX_COMMON_API const PxU8* getBoxEdges();
PX_PHYSX_COMMON_API void computeBoxPoints(const PxBounds3& bounds, PxVec3* PX_RESTRICT pts);
void computeBoxAroundCapsule(const Capsule& capsule, Box& box);
PxPlane getPlane(const PxTransform& pose);
PX_FORCE_INLINE PxVec3 getCapsuleHalfHeightVector(const PxTransform& transform, const PxCapsuleGeometry& capsuleGeom)
{
return transform.q.getBasisVector0() * capsuleGeom.halfHeight;
}
PX_FORCE_INLINE void getCapsuleSegment(const PxTransform& transform, const PxCapsuleGeometry& capsuleGeom, Gu::Segment& segment)
{
const PxVec3 tmp = getCapsuleHalfHeightVector(transform, capsuleGeom);
segment.p0 = transform.p + tmp;
segment.p1 = transform.p - tmp;
}
PX_FORCE_INLINE void getCapsule(Gu::Capsule& capsule, const PxCapsuleGeometry& capsuleGeom, const PxTransform& pose)
{
getCapsuleSegment(pose, capsuleGeom, capsule);
capsule.radius = capsuleGeom.radius;
}
void computeSweptBox(Gu::Box& box, const PxVec3& extents, const PxVec3& center, const PxMat33& rot, const PxVec3& unitDir, const PxReal distance);
/**
* PT: computes "alignment value" used to select the "best" triangle in case of identical impact distances (for sweeps).
* This simply computes how much a triangle is aligned with a given sweep direction.
* Captured in a function to make sure it is always computed correctly, i.e. working for double-sided triangles.
*
* \param triNormal [in] triangle's normal
* \param unitDir [in] sweep direction (normalized)
* \return alignment value in [-1.0f, 0.0f]. -1.0f for fully aligned, 0.0f for fully orthogonal.
*/
PX_FORCE_INLINE PxReal computeAlignmentValue(const PxVec3& triNormal, const PxVec3& unitDir)
{
PX_ASSERT(triNormal.isNormalized());
// PT: initial dot product gives the angle between the two, with "best" triangles getting a +1 or -1 score
// depending on their winding. We take the absolute value to ignore the impact of winding. We negate the result
// to make the function compatible with the initial code, which assumed single-sided triangles and expected -1
// for best triangles.
return -PxAbs(triNormal.dot(unitDir));
}
/**
* PT: sweeps: determines if a newly touched triangle is "better" than best one so far.
* In this context "better" means either clearly smaller impact distance, or a similar impact
* distance but a normal more aligned with the sweep direction.
*
* \param triImpactDistance [in] new triangle's impact distance
* \param triAlignmentValue [in] new triangle's alignment value (as computed by computeAlignmentValue)
* \param bestImpactDistance [in] current best triangle's impact distance
* \param bestAlignmentValue [in] current best triangle's alignment value (as computed by computeAlignmentValue)
* \param maxDistance [in] maximum distance of the query, hit cannot be longer than this maxDistance
* \return true if new triangle is better
*/
PX_FORCE_INLINE bool keepTriangle( float triImpactDistance, float triAlignmentValue,
float bestImpactDistance, float bestAlignmentValue, float maxDistance)
{
// Reject triangle if further than the maxDistance
if(triImpactDistance > maxDistance)
return false;
// If initial overlap happens, keep the triangle
if(triImpactDistance == 0.0f)
return true;
// tris have "similar" impact distances if the difference is smaller than 2*distEpsilon
float distEpsilon = GU_EPSILON_SAME_DISTANCE; // pick a farther hit within distEpsilon that is more opposing than the previous closest hit
// PT: make it a relative epsilon to make sure it still works with large distances
distEpsilon *= PxMax(1.0f, PxMax(triImpactDistance, bestImpactDistance));
// If new distance is more than epsilon closer than old distance
if(triImpactDistance < bestImpactDistance - distEpsilon)
return true;
// If new distance is no more than epsilon farther than oldDistance and "face is more opposing than previous"
if(triImpactDistance < bestImpactDistance+distEpsilon && triAlignmentValue < bestAlignmentValue)
return true;
// If alignment value is the same, but the new triangle is closer than the best distance
if(triAlignmentValue == bestAlignmentValue && triImpactDistance < bestImpactDistance)
return true;
return false;
}
PX_FORCE_INLINE bool keepTriangleBasic(float triImpactDistance, float bestImpactDistance, float maxDistance)
{
// Reject triangle if further than the maxDistance
if(triImpactDistance > maxDistance)
return false;
// If initial overlap happens, keep the triangle
if(triImpactDistance == 0.0f)
return true;
// If new distance is more than epsilon closer than old distance
if(triImpactDistance < bestImpactDistance)
return true;
return false;
}
PX_FORCE_INLINE PxVec3 cross100(const PxVec3& b)
{
return PxVec3(0.0f, -b.z, b.y);
}
PX_FORCE_INLINE PxVec3 cross010(const PxVec3& b)
{
return PxVec3(b.z, 0.0f, -b.x);
}
PX_FORCE_INLINE PxVec3 cross001(const PxVec3& b)
{
return PxVec3(-b.y, b.x, 0.0f);
}
//! Compute point as combination of barycentric coordinates
PX_FORCE_INLINE PxVec3 computeBarycentricPoint(const PxVec3& p0, const PxVec3& p1, const PxVec3& p2, PxReal u, PxReal v)
{
// This seems to confuse the compiler...
// return (1.0f - u - v)*p0 + u*p1 + v*p2;
const PxF32 w = 1.0f - u - v;
return PxVec3(w * p0.x + u * p1.x + v * p2.x, w * p0.y + u * p1.y + v * p2.y, w * p0.z + u * p1.z + v * p2.z);
}
PX_FORCE_INLINE PxReal computeTetrahedronVolume(const PxVec3& x0, const PxVec3& x1, const PxVec3& x2, const PxVec3& x3, PxMat33& edgeMatrix)
{
const PxVec3 u1 = x1 - x0;
const PxVec3 u2 = x2 - x0;
const PxVec3 u3 = x3 - x0;
edgeMatrix = PxMat33(u1, u2, u3);
const PxReal det = edgeMatrix.getDeterminant();
const PxReal volume = det / 6.0f;
return volume;
}
PX_FORCE_INLINE PxReal computeTetrahedronVolume(const PxVec3& x0, const PxVec3& x1, const PxVec3& x2, const PxVec3& x3)
{
PxMat33 edgeMatrix;
return computeTetrahedronVolume(x0, x1, x2, x3, edgeMatrix);
}
// IndexType should be PxU16 or PxU32.
template<typename IndexType>
PX_FORCE_INLINE PxReal computeTriangleMeshVolume(const PxVec3* vertices, const IndexType* indices,
const PxU32 numTriangles)
{
// See https://twitter.com/keenanisalive/status/1437178786286653445?lang=en
float volume = 0.0f;
for(PxU32 i = 0; i < numTriangles; ++i)
{
PxVec3 v0 = vertices[indices[3*i]];
PxVec3 v1 = vertices[indices[3 * i + 1]];
PxVec3 v2 = vertices[indices[3 * i + 2]];
PxVec3 v0v1 = v0.cross(v1);
volume += v0v1.dot(v2);
}
return volume / 6.0f;
}
// IndexType should be PxU16 or PxU32.
// W in PxVec4 of vertices are ignored.
template <typename IndexType>
PX_FORCE_INLINE PxReal computeTriangleMeshVolume(const PxVec4* vertices, const IndexType* indices,
const PxU32 numTriangles)
{
// See https://twitter.com/keenanisalive/status/1437178786286653445?lang=en
float volume = 0.0f;
for(PxU32 i = 0; i < numTriangles; ++i)
{
PxVec3 v0 = vertices[indices[3 * i]].getXYZ();
PxVec3 v1 = vertices[indices[3 * i + 1]].getXYZ();
PxVec3 v2 = vertices[indices[3 * i + 2]].getXYZ();
PxVec3 v0v1 = v0.cross(v1);
volume += v0v1.dot(v2);
}
return volume / 6.0f;
}
/*!
Extend an edge along its length by a factor
*/
PX_FORCE_INLINE void makeFatEdge(PxVec3& p0, PxVec3& p1, PxReal fatCoeff)
{
PxVec3 delta = p1 - p0;
const PxReal m = delta.magnitude();
if (m > 0.0f)
{
delta *= fatCoeff / m;
p0 -= delta;
p1 += delta;
}
}
#if 0
/*!
Extend an edge along its length by a factor
*/
PX_FORCE_INLINE void makeFatEdge(aos::Vec3V& p0, aos::Vec3V& p1, const aos::FloatVArg fatCoeff)
{
const aos::Vec3V delta = aos::V3Sub(p1, p0);
const aos::FloatV m = aos::V3Length(delta);
const aos::BoolV con = aos::FIsGrtr(m, aos::FZero());
const aos::Vec3V fatDelta = aos::V3Scale(aos::V3ScaleInv(delta, m), fatCoeff);
p0 = aos::V3Sel(con, aos::V3Sub(p0, fatDelta), p0);
p1 = aos::V3Sel(con, aos::V3Add(p1, fatDelta), p1);
}
#endif
PX_FORCE_INLINE PxU32 closestAxis(const PxVec3& v, PxU32& j, PxU32& k)
{
// find largest 2D plane projection
const PxF32 absPx = PxAbs(v.x);
const PxF32 absNy = PxAbs(v.y);
const PxF32 absNz = PxAbs(v.z);
PxU32 m = 0; // x biggest axis
j = 1;
k = 2;
if (absNy > absPx && absNy > absNz)
{
// y biggest
j = 2;
k = 0;
m = 1;
}
else if (absNz > absPx)
{
// z biggest
j = 0;
k = 1;
m = 2;
}
return m;
}
PX_FORCE_INLINE bool isAlmostZero(const PxVec3& v)
{
if (PxAbs(v.x) > 1e-6f || PxAbs(v.y) > 1e-6f || PxAbs(v.z) > 1e-6f)
return false;
return true;
}
} // namespace Gu
}
#endif
| 11,197 | C | 34.66242 | 153 | 0.700188 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuIncrementalAABBPrunerCore.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "CmVisualization.h"
#include "GuIncrementalAABBPrunerCore.h"
#include "GuSqInternal.h"
#include "GuIncrementalAABBTree.h"
#include "GuCallbackAdapter.h"
#include "GuAABBTree.h"
#include "GuAABBTreeQuery.h"
#include "GuSphere.h"
#include "GuBox.h"
#include "GuCapsule.h"
#include "GuQuery.h"
using namespace physx;
using namespace Gu;
#define PARANOIA_CHECKS 0
// PT: TODO: this is copied from SqBounds.h, should be either moved to Gu and shared or passed as a user parameter
#define SQ_PRUNER_EPSILON 0.005f
#define SQ_PRUNER_INFLATION (1.0f + SQ_PRUNER_EPSILON) // pruner test shape inflation (not narrow phase shape)
IncrementalAABBPrunerCore::IncrementalAABBPrunerCore(const PruningPool* pool) :
mCurrentTree (1),
mLastTree (0),
mPool (pool)
{
mAABBTree[0].mapping.reserve(256);
mAABBTree[1].mapping.reserve(256);
mChangedLeaves.reserve(32);
}
IncrementalAABBPrunerCore::~IncrementalAABBPrunerCore()
{
release();
}
void IncrementalAABBPrunerCore::release() // this can be called from purge()
{
for(PxU32 i = 0; i < NUM_TREES; i++)
{
PX_DELETE(mAABBTree[i].tree);
mAABBTree[i].mapping.clear();
mAABBTree[i].timeStamp = 0;
}
mCurrentTree = 1;
mLastTree = 0;
}
bool IncrementalAABBPrunerCore::addObject(const PoolIndex poolIndex, PxU32 timeStamp)
{
CoreTree& tree = mAABBTree[mCurrentTree];
if(!tree.tree || !tree.tree->getNodes())
{
if(!tree.tree)
tree.tree = PX_NEW(IncrementalAABBTree)();
tree.timeStamp = timeStamp;
}
PX_ASSERT(tree.timeStamp == timeStamp);
mChangedLeaves.clear();
IncrementalAABBTreeNode* node = tree.tree->insert(poolIndex, mPool->getCurrentWorldBoxes(), mChangedLeaves);
updateMapping(tree.mapping, poolIndex, node);
#if PARANOIA_CHECKS
test();
#endif
return true;
}
void IncrementalAABBPrunerCore::updateMapping(IncrementalPrunerMap& mapping, const PoolIndex poolIndex, IncrementalAABBTreeNode* node)
{
// if some node leaves changed, we need to update mapping
if(!mChangedLeaves.empty())
{
if(node && node->isLeaf())
{
for(PxU32 j = 0; j < node->getNbPrimitives(); j++)
{
const PoolIndex index = node->getPrimitives(NULL)[j];
mapping[index] = node;
}
}
for(PxU32 i = 0; i < mChangedLeaves.size(); i++)
{
IncrementalAABBTreeNode* changedNode = mChangedLeaves[i];
PX_ASSERT(changedNode->isLeaf());
for(PxU32 j = 0; j < changedNode->getNbPrimitives(); j++)
{
const PoolIndex index = changedNode->getPrimitives(NULL)[j];
mapping[index] = changedNode;
}
}
}
else
{
PX_ASSERT(node->isLeaf());
mapping[poolIndex] = node;
}
}
bool IncrementalAABBPrunerCore::removeObject(const PoolIndex poolIndex, const PoolIndex poolRelocatedLastIndex, PxU32& timeStamp)
{
// erase the entry and get the data
IncrementalPrunerMap::Entry entry;
bool foundEntry = true;
const PxU32 treeIndex = mAABBTree[mLastTree].mapping.erase(poolIndex, entry) ? mLastTree : mCurrentTree;
// if it was not found in the last tree look at the current tree
if(treeIndex == mCurrentTree)
foundEntry = mAABBTree[mCurrentTree].mapping.erase(poolIndex, entry);
// exit somethings is wrong here, entry was not found here
// PT: removed assert to avoid crashing all UTs
// PX_ASSERT(foundEntry);
if(!foundEntry)
return false;
// tree must exist
PX_ASSERT(mAABBTree[treeIndex].tree);
CoreTree& tree = mAABBTree[treeIndex];
timeStamp = tree.timeStamp;
// remove the poolIndex from the tree, update the tree bounds immediatelly
IncrementalAABBTreeNode* node = tree.tree->remove(entry.second, poolIndex, mPool->getCurrentWorldBoxes());
if(node && node->isLeaf())
{
for(PxU32 j = 0; j < node->getNbPrimitives(); j++)
{
const PoolIndex index = node->getPrimitives(NULL)[j];
tree.mapping[index] = node;
}
}
// nothing to swap, last object, early exit
if(poolIndex == poolRelocatedLastIndex)
{
#if PARANOIA_CHECKS
test();
#endif
return true;
}
// fix the indices, we need to swap the index with last index
// erase the relocated index from the tree it is
IncrementalPrunerMap::Entry relocatedEntry;
const PxU32 treeRelocatedIndex = mAABBTree[mCurrentTree].mapping.erase(poolRelocatedLastIndex, relocatedEntry) ? mCurrentTree : mLastTree;
foundEntry = true;
if(treeRelocatedIndex == mLastTree)
foundEntry = mAABBTree[mLastTree].mapping.erase(poolRelocatedLastIndex, relocatedEntry);
if(foundEntry)
{
CoreTree& relocatedTree = mAABBTree[treeRelocatedIndex];
// set the new mapping
relocatedTree.mapping[poolIndex] = relocatedEntry.second;
// update the tree indices - swap
relocatedTree.tree->fixupTreeIndices(relocatedEntry.second, poolRelocatedLastIndex, poolIndex);
}
#if PARANOIA_CHECKS
test();
#endif
return true;
}
void IncrementalAABBPrunerCore::swapIndex(const PoolIndex poolIndex, const PoolIndex poolRelocatedLastIndex)
{
// fix the indices, we need to swap the index with last index
// erase the relocated index from the tre it is
IncrementalPrunerMap::Entry relocatedEntry;
const PxU32 treeRelocatedIndex = mAABBTree[mCurrentTree].mapping.erase(poolRelocatedLastIndex, relocatedEntry) ? mCurrentTree : mLastTree;
bool foundEntry = true;
if(treeRelocatedIndex == mLastTree)
foundEntry = mAABBTree[mLastTree].mapping.erase(poolRelocatedLastIndex, relocatedEntry);
// relocated index is not here
if(!foundEntry)
return;
CoreTree& relocatedTree = mAABBTree[treeRelocatedIndex];
// set the new mapping
relocatedTree.mapping[poolIndex] = relocatedEntry.second;
// update the tree indices - swap
relocatedTree.tree->fixupTreeIndices(relocatedEntry.second, poolRelocatedLastIndex, poolIndex);
}
bool IncrementalAABBPrunerCore::updateObject(const PoolIndex poolIndex)
{
const IncrementalPrunerMap::Entry* entry = mAABBTree[mLastTree].mapping.find(poolIndex);
const PxU32 treeIndex = entry ? mLastTree : mCurrentTree;
if(!entry)
entry = mAABBTree[mCurrentTree].mapping.find(poolIndex);
// we have not found it
PX_ASSERT(entry);
if(!entry)
return false;
CoreTree& tree = mAABBTree[treeIndex];
mChangedLeaves.clear();
IncrementalAABBTreeNode* node = tree.tree->updateFast(entry->second, poolIndex, mPool->getCurrentWorldBoxes(), mChangedLeaves);
if(!mChangedLeaves.empty() || node != entry->second)
updateMapping(tree.mapping, poolIndex, node);
#if PARANOIA_CHECKS
test(false);
#endif
return true;
}
PxU32 IncrementalAABBPrunerCore::removeMarkedObjects(PxU32 timeStamp)
{
// early exit is no tree exists
if(!mAABBTree[mLastTree].tree || !mAABBTree[mLastTree].tree->getNodes())
{
PX_ASSERT(mAABBTree[mLastTree].mapping.size() == 0);
PX_ASSERT(!mAABBTree[mCurrentTree].tree || mAABBTree[mCurrentTree].timeStamp != timeStamp);
return 0;
}
PX_UNUSED(timeStamp);
PX_ASSERT(timeStamp == mAABBTree[mLastTree].timeStamp);
// release the last tree
CoreTree& tree = mAABBTree[mLastTree];
PxU32 nbObjects = tree.mapping.size();
tree.mapping.clear();
tree.timeStamp = 0;
tree.tree->release();
return nbObjects;
}
bool IncrementalAABBPrunerCore::overlap(const ShapeData& queryVolume, PrunerOverlapCallback& pcbArgName) const
{
bool again = true;
OverlapCallbackAdapter pcb(pcbArgName, *mPool);
for(PxU32 i = 0; i < NUM_TREES; i++)
{
const CoreTree& tree = mAABBTree[i];
if(tree.tree && tree.tree->getNodes() && again)
{
switch(queryVolume.getType())
{
case PxGeometryType::eBOX:
{
if(queryVolume.isOBB())
{
const DefaultOBBAABBTest test(queryVolume);
again = AABBTreeOverlap<true, OBBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool->getCurrentAABBTreeBounds(), *tree.tree, test, pcb);
}
else
{
const DefaultAABBAABBTest test(queryVolume);
again = AABBTreeOverlap<true, AABBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool->getCurrentAABBTreeBounds(), *tree.tree, test, pcb);
}
}
break;
case PxGeometryType::eCAPSULE:
{
const DefaultCapsuleAABBTest test(queryVolume, SQ_PRUNER_INFLATION);
again = AABBTreeOverlap<true, CapsuleAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool->getCurrentAABBTreeBounds(), *tree.tree, test, pcb);
}
break;
case PxGeometryType::eSPHERE:
{
const DefaultSphereAABBTest test(queryVolume);
again = AABBTreeOverlap<true, SphereAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool->getCurrentAABBTreeBounds(), *tree.tree, test, pcb);
}
break;
case PxGeometryType::eCONVEXMESH:
{
const DefaultOBBAABBTest test(queryVolume);
again = AABBTreeOverlap<true, OBBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool->getCurrentAABBTreeBounds(), *tree.tree, test, pcb);
}
break;
default:
PX_ALWAYS_ASSERT_MESSAGE("unsupported overlap query volume geometry type");
}
}
}
return again;
}
bool IncrementalAABBPrunerCore::sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& pcbArgName) const
{
bool again = true;
RaycastCallbackAdapter pcb(pcbArgName, *mPool);
for(PxU32 i = 0; i < NUM_TREES; i++)
{
const CoreTree& tree = mAABBTree[i];
if(tree.tree && tree.tree->getNodes() && again)
{
const PxBounds3& aabb = queryVolume.getPrunerInflatedWorldAABB();
again = AABBTreeRaycast<true, true, IncrementalAABBTree, IncrementalAABBTreeNode, RaycastCallbackAdapter>()(mPool->getCurrentAABBTreeBounds(), *tree.tree, aabb.getCenter(), unitDir, inOutDistance, aabb.getExtents(), pcb);
}
}
return again;
}
bool IncrementalAABBPrunerCore::raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& pcbArgName) const
{
bool again = true;
RaycastCallbackAdapter pcb(pcbArgName, *mPool);
for(PxU32 i = 0; i < NUM_TREES; i++)
{
const CoreTree& tree = mAABBTree[i];
if(tree.tree && tree.tree->getNodes() && again)
{
again = AABBTreeRaycast<false, true, IncrementalAABBTree, IncrementalAABBTreeNode, RaycastCallbackAdapter>()(mPool->getCurrentAABBTreeBounds(), *tree.tree, origin, unitDir, inOutDistance, PxVec3(0.0f), pcb);
}
}
return again;
}
void IncrementalAABBPrunerCore::getGlobalBounds(PxBounds3& bounds) const
{
bounds.setEmpty();
// PT: TODO: optimize this
for(PxU32 i=0; i<NUM_TREES; i++)
{
const CoreTree& tree = mAABBTree[i];
if(tree.tree && tree.tree->getNodes())
{
PxBounds3 tmp;
StoreBounds(tmp, tree.tree->getNodes()->mBVMin, tree.tree->getNodes()->mBVMax);
bounds.include(tmp);
}
}
}
void IncrementalAABBPrunerCore::shiftOrigin(const PxVec3& shift)
{
for(PxU32 i = 0; i < NUM_TREES; i++)
{
if(mAABBTree[i].tree)
{
mAABBTree[i].tree->shiftOrigin(shift);
}
}
}
void IncrementalAABBPrunerCore::visualize(PxRenderOutput& out, PxU32 color) const
{
for(PxU32 i = 0; i < NUM_TREES; i++)
{
visualizeTree(out, color, mAABBTree[i].tree);
// Render added objects not yet in the tree
//out << PxTransform(PxIdentity);
//out << PxU32(PxDebugColor::eARGB_WHITE);
}
}
void IncrementalAABBPrunerCore::test(bool hierarchyCheck)
{
PxU32 maxDepth[NUM_TREES] = { 0, 0 };
for(PxU32 i=0; i<NUM_TREES; i++)
{
if(mAABBTree[i].tree)
{
if(hierarchyCheck)
mAABBTree[i].tree->hierarchyCheck(mPool->getCurrentWorldBoxes());
for(IncrementalPrunerMap::Iterator iter = mAABBTree[i].mapping.getIterator(); !iter.done(); ++iter)
{
mAABBTree[i].tree->checkTreeLeaf(iter->second, iter->first);
const PxU32 depth = mAABBTree[i].tree->getTreeLeafDepth(iter->second);
if(depth > maxDepth[i])
maxDepth[i] = depth;
}
}
}
}
| 13,317 | C++ | 31.169082 | 224 | 0.735601 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuAABBTreeBuildStats.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_AABBTREE_BUILD_STATS_H
#define GU_AABBTREE_BUILD_STATS_H
#include "common/PxPhysXCommonConfig.h"
namespace physx
{
namespace Gu
{
//! Contains AABB-tree build statistics
struct PX_PHYSX_COMMON_API BuildStats
{
BuildStats() : mCount(0), mTotalPrims(0) {}
PxU32 mCount; //!< Number of nodes created
PxU32 mTotalPrims; //!< Total accumulated number of primitives. Should be much higher than the source
//!< number of prims, since it accumulates all prims covered by each node (i.e. internal
//!< nodes too, not just leaf ones)
// PT: everything's public so consider dropping these
PX_FORCE_INLINE void reset() { mCount = mTotalPrims = 0; }
PX_FORCE_INLINE void setCount(PxU32 nb) { mCount = nb; }
PX_FORCE_INLINE void increaseCount(PxU32 nb) { mCount += nb; }
PX_FORCE_INLINE PxU32 getCount() const { return mCount; }
};
} // namespace Gu
}
#endif // GU_AABBTREE_BUILD_STATS_H
| 2,663 | C | 44.152542 | 107 | 0.733383 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuSweepSharedTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geometry/PxConvexMeshGeometry.h"
#include "geometry/PxSphereGeometry.h"
#include "GuSweepTests.h"
#include "GuHeightFieldUtil.h"
#include "CmScaling.h"
#include "GuConvexMesh.h"
#include "GuIntersectionRayPlane.h"
#include "GuVecBox.h"
#include "GuVecCapsule.h"
#include "GuVecConvexHull.h"
#include "GuSweepMTD.h"
#include "GuSweepSphereCapsule.h"
#include "GuSweepCapsuleCapsule.h"
#include "GuSweepTriangleUtils.h"
#include "GuSweepCapsuleTriangle.h"
#include "GuInternal.h"
#include "GuGJKRaycast.h"
using namespace physx;
using namespace Gu;
using namespace Cm;
using namespace physx::aos;
static const PxReal gEpsilon = .01f;
//#define USE_VIRTUAL_GJK
#ifdef USE_VIRTUAL_GJK
static bool virtualGjkRaycastPenetration(const GjkConvex& a, const GjkConvex& b, const aos::Vec3VArg initialDir, const aos::FloatVArg initialLambda, const aos::Vec3VArg s, const aos::Vec3VArg r, aos::FloatV& lambda,
aos::Vec3V& normal, aos::Vec3V& closestA, const PxReal _inflation, const bool initialOverlap)
{
return gjkRaycastPenetration<GjkConvex, GjkConvex >(a, b, initialDir, initialLambda, s, r, lambda, normal, closestA, _inflation, initialOverlap);
}
#endif
static PxU32 computeSweepConvexPlane(
const PxConvexMeshGeometry& convexGeom, ConvexHullData* hullData, const PxU32& nbPolys, const PxTransform& pose,
const PxVec3& impact_, const PxVec3& unitDir)
{
PX_ASSERT(nbPolys);
const PxVec3 impact = impact_ - unitDir * gEpsilon;
const PxVec3 localPoint = pose.transformInv(impact);
const PxVec3 localDir = pose.rotateInv(unitDir);
const FastVertex2ShapeScaling scaling(convexGeom.scale);
PxU32 minIndex = 0;
PxReal minD = PX_MAX_REAL;
for(PxU32 j=0; j<nbPolys; j++)
{
const PxPlane& pl = hullData->mPolygons[j].mPlane;
PxPlane plane;
scaling.transformPlaneToShapeSpace(pl.n, pl.d, plane.n, plane.d);
PxReal d = plane.distance(localPoint);
if(d<0.0f)
continue;
const PxReal tweak = plane.n.dot(localDir) * gEpsilon;
d += tweak;
if(d<minD)
{
minIndex = j;
minD = d;
}
}
return minIndex;
}
static PX_FORCE_INLINE bool computeFaceIndex(PxGeomSweepHit& sweepHit, const PxHitFlags hitFlags, const PxConvexMeshGeometry& convexGeom, ConvexHullData* hullData, const PxTransform& pose, const PxVec3& unitDir)
{
if(hitFlags & PxHitFlag::eFACE_INDEX)
{
// PT: compute closest polygon using the same tweak as in swept-capsule-vs-mesh
sweepHit.faceIndex = computeSweepConvexPlane(convexGeom, hullData, hullData->mNbPolygons, pose, sweepHit.position, unitDir);
sweepHit.flags |= PxHitFlag::eFACE_INDEX;
}
return true;
}
static PX_FORCE_INLINE bool hasInitialOverlap(PxGeomSweepHit& sweepHit, const PxVec3& unitDir,
const FloatVArg toi,
const Vec3VArg normal, const Vec3VArg closestA,
const PxTransformV& convexPose,
const bool isMtd, const bool impactPointOnTheOtherShape)
{
sweepHit.flags = PxHitFlag::eNORMAL;
const FloatV zero = FZero();
if(FAllGrtrOrEq(zero, toi))
{
//ML: initial overlap
if(isMtd)
{
sweepHit.flags |= PxHitFlag::ePOSITION;
const FloatV length = toi;
const Vec3V worldPointA = convexPose.transform(closestA);
const Vec3V worldNormal = V3Normalize(convexPose.rotate(normal));
if(impactPointOnTheOtherShape)
{
const Vec3V destWorldPointA = V3NegScaleSub(worldNormal, length, worldPointA);
V3StoreU(worldNormal, sweepHit.normal);
V3StoreU(destWorldPointA, sweepHit.position);
}
else
{
const Vec3V destNormal = V3Neg(worldNormal);
V3StoreU(destNormal, sweepHit.normal);
V3StoreU(worldPointA, sweepHit.position);
}
FStore(length, &sweepHit.distance);
}
else
{
sweepHit.distance = 0.0f;
sweepHit.normal = -unitDir;
}
sweepHit.faceIndex = 0xffffffff;
return true;
}
return false;
}
///////////////////////////////////////////////// sweepCapsule/Sphere //////////////////////////////////////////////////////
bool sweepCapsule_SphereGeom(GU_CAPSULE_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_UNUSED(capsuleGeom_);
PX_UNUSED(capsulePose_);
PX_ASSERT(geom.getType() == PxGeometryType::eSPHERE);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom);
const Sphere sphere(pose.p, sphereGeom.radius+inflation);
if(!sweepSphereCapsule(sphere, lss, -unitDir, distance, sweepHit.distance, sweepHit.position, sweepHit.normal, hitFlags))
return false;
const bool isMtd = hitFlags & PxHitFlag::eMTD;
if(isMtd)
{
sweepHit.flags = PxHitFlag::ePOSITION | PxHitFlag::eNORMAL;
if(sweepHit.distance == 0.f)
{
//intialOverlap
if(lss.p0 == lss.p1)
{
//sphere
return computeSphere_SphereMTD(sphere, Sphere(lss.p0, lss.radius), sweepHit);
}
else
{
//capsule
return computeSphere_CapsuleMTD(sphere, lss, sweepHit);
}
}
}
else
{
if(sweepHit.distance!=0.0f)
sweepHit.flags = PxHitFlag::ePOSITION | PxHitFlag::eNORMAL;
else
sweepHit.flags = PxHitFlag::eNORMAL;
}
return true;
}
bool sweepCapsule_PlaneGeom(GU_CAPSULE_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_UNUSED(capsuleGeom_);
PX_UNUSED(capsulePose_);
PX_ASSERT(geom.getType() == PxGeometryType::ePLANE);
PX_UNUSED(geom);
// const PxPlaneGeometry& planeGeom = static_cast<const PxPlaneGeometry&>(geom);
const PxPlane& worldPlane = getPlane(pose);
const PxF32 capsuleRadius = lss.radius + inflation;
PxU32 index = 0;
PxVec3 pts[2];
PxReal minDp = PX_MAX_REAL;
sweepHit.faceIndex = 0xFFFFffff; // spec says face index is undefined for planes
// Find extreme point on the capsule
// AP: removed if (lss.p0 == lss.p1 clause because it wasn't properly computing minDp)
pts[0] = lss.p0;
pts[1] = lss.p1;
for(PxU32 i=0; i<2; i++)
{
const PxReal dp = pts[i].dot(worldPlane.n);
if(dp<minDp)
{
minDp = dp;
index = i;
}
}
const bool isMtd = hitFlags & PxHitFlag::eMTD;
if(isMtd)
{
//initial overlap with the plane
if(minDp <= capsuleRadius - worldPlane.d)
{
sweepHit.flags = PxHitFlag::eNORMAL| PxHitFlag::ePOSITION;
return computePlane_CapsuleMTD(worldPlane, lss, sweepHit);
}
}
else
{
if(!(hitFlags & PxHitFlag::eASSUME_NO_INITIAL_OVERLAP))
{
// test if the capsule initially overlaps with plane
if(minDp <= capsuleRadius - worldPlane.d)
{
sweepHit.flags = PxHitFlag::eNORMAL;
sweepHit.distance = 0.0f;
sweepHit.normal = -unitDir;
return true;
}
}
}
const PxVec3 ptOnCapsule = pts[index] - worldPlane.n*capsuleRadius;
// Raycast extreme vertex against plane
bool hitPlane = intersectRayPlane(ptOnCapsule, unitDir, worldPlane, sweepHit.distance, &sweepHit.position);
if(hitPlane && sweepHit.distance > 0 && sweepHit.distance <= distance)
{
sweepHit.normal = worldPlane.n;
sweepHit.flags = PxHitFlag::ePOSITION | PxHitFlag::eNORMAL;
return true;
}
return false;
}
bool sweepCapsule_CapsuleGeom(GU_CAPSULE_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_UNUSED(capsuleGeom_);
PX_UNUSED(capsulePose_);
PX_ASSERT(geom.getType() == PxGeometryType::eCAPSULE);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom);
Capsule staticCapsule;
getCapsule(staticCapsule, capsuleGeom, pose);
staticCapsule.radius +=inflation;
const bool isMtd = hitFlags & PxHitFlag::eMTD;
PxU16 outFlags;
if(!sweepCapsuleCapsule(lss, staticCapsule, -unitDir, distance, sweepHit.distance, sweepHit.position, sweepHit.normal, hitFlags, outFlags))
return false;
sweepHit.flags = PxHitFlags(outFlags);
if(sweepHit.distance == 0.0f)
{
//initial overlap
if(isMtd)
{
sweepHit.flags |= PxHitFlag::ePOSITION;
return computeCapsule_CapsuleMTD(lss, staticCapsule, sweepHit);
}
}
return true;
}
bool sweepCapsule_ConvexGeom(GU_CAPSULE_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_ASSERT(geom.getType() == PxGeometryType::eCONVEXMESH);
using namespace aos;
PX_ASSERT(geom.getType() == PxGeometryType::eCONVEXMESH);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom);
ConvexMesh* convexMesh = static_cast<ConvexMesh*>(convexGeom.convexMesh);
ConvexHullData* hullData = &convexMesh->getHull();
const Vec3V zeroV = V3Zero();
const FloatV zero = FZero();
const FloatV dist = FLoad(distance);
const Vec3V worldDir = V3LoadU(unitDir);
const PxTransformV capPose = loadTransformU(capsulePose_);
const PxTransformV convexPose = loadTransformU(pose);
const PxMatTransformV aToB(convexPose.transformInv(capPose));
const FloatV capsuleHalfHeight = FLoad(capsuleGeom_.halfHeight);
const FloatV capsuleRadius = FLoad(lss.radius);
const Vec3V vScale = V3LoadU_SafeReadW(convexGeom.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&convexGeom.scale.rotation.x);
const CapsuleV capsule(aToB.p, aToB.rotate( V3Scale(V3UnitX(), capsuleHalfHeight)), capsuleRadius);
const ConvexHullV convexHull(hullData, zeroV, vScale, vQuat, convexGeom.scale.isIdentity());
const Vec3V dir = convexPose.rotateInv(V3Neg(V3Scale(worldDir, dist)));
bool isMtd = hitFlags & PxHitFlag::eMTD;
FloatV toi;
Vec3V closestA, normal;//closestA and normal is in the local space of convex hull
const LocalConvex<CapsuleV> convexA(capsule);
const LocalConvex<ConvexHullV> convexB(convexHull);
const Vec3V initialSearchDir = V3Sub(capsule.getCenter(), convexHull.getCenter());
#ifdef USE_VIRTUAL_GJK
if(!virtualGjkRaycastPenetration(convexA, convexB, initialSearchDir, zero, zeroV, dir, toi, normal, closestA, lss.radius + inflation, isMtd))
return false;
#else
if(!gjkRaycastPenetration<LocalConvex<CapsuleV>, LocalConvex<ConvexHullV> >(convexA, convexB, initialSearchDir, zero, zeroV, dir, toi, normal, closestA, lss.radius + inflation, isMtd))
return false;
#endif
if(hasInitialOverlap(sweepHit, unitDir, toi, normal, closestA, convexPose, isMtd, true))
return true;
sweepHit.flags |= PxHitFlag::ePOSITION;
const Vec3V worldPointA = convexPose.transform(closestA);
const FloatV length = FMul(dist, toi);
const Vec3V destNormal = V3Normalize(convexPose.rotate(normal));
const Vec3V destWorldPointA = V3ScaleAdd(worldDir, length, worldPointA);
V3StoreU(destNormal, sweepHit.normal);
V3StoreU(destWorldPointA, sweepHit.position);
FStore(length, &sweepHit.distance);
return computeFaceIndex(sweepHit, hitFlags, convexGeom, hullData, pose, unitDir);
}
///////////////////////////////////////////////// sweepBox //////////////////////////////////////////////////////
bool sweepBox_PlaneGeom(GU_BOX_SWEEP_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::ePLANE);
PX_UNUSED(threadContext);
PX_UNUSED(geom);
PX_UNUSED(boxPose_);
PX_UNUSED(boxGeom_);
// const PxPlaneGeometry& planeGeom = static_cast<const PxPlaneGeometry&>(geom);
sweepHit.faceIndex = 0xFFFFffff; // spec says face index is undefined for planes
PxPlane worldPlane = getPlane(pose);
worldPlane.d -=inflation;
// Find extreme point on the box
PxVec3 boxPts[8];
box.computeBoxPoints(boxPts);
PxU32 index = 0;
PxReal minDp = PX_MAX_REAL;
for(PxU32 i=0;i<8;i++)
{
const PxReal dp = boxPts[i].dot(worldPlane.n);
if(dp<minDp)
{
minDp = dp;
index = i;
}
}
bool isMtd = hitFlags & PxHitFlag::eMTD;
if(isMtd)
{
// test if box initially overlap with plane
if(minDp <= -worldPlane.d)
{
sweepHit.flags = PxHitFlag::ePOSITION | PxHitFlag::eNORMAL;
//compute Mtd;
return computePlane_BoxMTD(worldPlane, box, sweepHit);
}
}
else
{
if(!(hitFlags & PxHitFlag::eASSUME_NO_INITIAL_OVERLAP))
{
// test if box initially overlap with plane
if(minDp <= -worldPlane.d)
{
sweepHit.flags = PxHitFlag::eNORMAL;
sweepHit.distance = 0.0f;
sweepHit.normal = -unitDir;
return true;
}
}
}
// Raycast extreme vertex against plane
bool hitPlane = intersectRayPlane(boxPts[index], unitDir, worldPlane, sweepHit.distance, &sweepHit.position);
if(hitPlane && sweepHit.distance > 0 && sweepHit.distance <= distance)
{
sweepHit.normal = worldPlane.n;
sweepHit.flags = PxHitFlag::ePOSITION | PxHitFlag::eNORMAL;
return true;
}
return false;
}
bool sweepBox_ConvexGeom(GU_BOX_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_UNUSED(boxGeom_);
using namespace aos;
PX_ASSERT(geom.getType() == PxGeometryType::eCONVEXMESH);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom);
ConvexMesh* convexMesh = static_cast<ConvexMesh*>(convexGeom.convexMesh);
ConvexHullData* hullData = &convexMesh->getHull();
const Vec3V zeroV = V3Zero();
const FloatV zero = FZero();
const PxTransformV boxPose = loadTransformU(boxPose_);
const PxTransformV convexPose = loadTransformU(pose);
const PxMatTransformV aToB(convexPose.transformInv(boxPose));
const Vec3V boxExtents = V3LoadU(box.extents);
const Vec3V vScale = V3LoadU_SafeReadW(convexGeom.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&convexGeom.scale.rotation.x);
const BoxV boxV(zeroV, boxExtents);
const ConvexHullV convexHull(hullData, zeroV, vScale, vQuat, convexGeom.scale.isIdentity());
const Vec3V worldDir = V3LoadU(unitDir);
const FloatV dist = FLoad(distance);
const Vec3V dir = convexPose.rotateInv(V3Neg(V3Scale(worldDir, dist)));
bool isMtd = hitFlags & PxHitFlag::eMTD;
FloatV toi;
Vec3V closestA, normal;
const RelativeConvex<BoxV> convexA(boxV, aToB);
const LocalConvex<ConvexHullV> convexB(convexHull);
#ifdef USE_VIRTUAL_GJK
if(!virtualGjkRaycastPenetration(convexA, convexB, aToB.p, zero, zeroV, dir, toi, normal, closestA, inflation, isMtd))
return false;
#else
if(!gjkRaycastPenetration<RelativeConvex<BoxV>,LocalConvex<ConvexHullV> >(convexA, convexB, aToB.p, zero, zeroV, dir, toi, normal, closestA, inflation, isMtd))
return false;
#endif
if(hasInitialOverlap(sweepHit, unitDir, toi, normal, closestA, convexPose, isMtd, true))
return true;
sweepHit.flags |= PxHitFlag::ePOSITION;
const Vec3V destNormal = V3Normalize(convexPose.rotate(normal));
const FloatV length = FMul(dist, toi);
const Vec3V worldPointA = convexPose.transform(closestA);
const Vec3V destWorldPointA = V3ScaleAdd(worldDir, length, worldPointA);
V3StoreU(destNormal, sweepHit.normal);
V3StoreU(destWorldPointA, sweepHit.position);
FStore(length, &sweepHit.distance);
return computeFaceIndex(sweepHit, hitFlags, convexGeom, hullData, pose, unitDir);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool Gu::sweepCapsuleTriangles(GU_SWEEP_TRIANGLES_FUNC_PARAMS(PxCapsuleGeometry))
{
Capsule capsule;
getCapsule(capsule, geom, pose);
capsule.radius +=inflation;
// Compute swept box
Box capsuleBox;
computeBoxAroundCapsule(capsule, capsuleBox);
BoxPadded sweptBounds;
computeSweptBox(sweptBounds, capsuleBox.extents, capsuleBox.center, capsuleBox.rot, unitDir, distance);
PxVec3 triNormal;
return sweepCapsuleTriangles_Precise(nbTris, triangles, capsule, unitDir, distance, cachedIndex, hit, triNormal, hitFlags, doubleSided, &sweptBounds);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool sweepConvex_SphereGeom(GU_CONVEX_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_ASSERT(geom.getType() == PxGeometryType::eSPHERE);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom);
ConvexMesh* convexMesh = static_cast<ConvexMesh*>(convexGeom.convexMesh);
ConvexHullData* hullData = &convexMesh->getHull();
const Vec3V zeroV = V3Zero();
const FloatV zero= FZero();
const Vec3V vScale = V3LoadU_SafeReadW(convexGeom.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&convexGeom.scale.rotation.x);
const FloatV sphereRadius = FLoad(sphereGeom.radius);
const PxTransformV sphereTransf = loadTransformU(pose);
const PxTransformV convexTransf = loadTransformU(convexPose);
const PxMatTransformV aToB(convexTransf.transformInv(sphereTransf));
const Vec3V worldDir = V3LoadU(unitDir);
const FloatV dist = FLoad(distance);
const Vec3V dir = convexTransf.rotateInv(V3Scale(worldDir, dist));
const ConvexHullV convexHull(hullData, zeroV, vScale, vQuat, convexGeom.scale.isIdentity());
//CapsuleV capsule(zeroV, sphereRadius);
const CapsuleV capsule(aToB.p, sphereRadius);
const bool isMtd = hitFlags & PxHitFlag::eMTD;
FloatV toi;
Vec3V closestA, normal;
const LocalConvex<CapsuleV> convexA(capsule);
const LocalConvex<ConvexHullV> convexB(convexHull);
const Vec3V initialSearchDir = V3Sub(capsule.getCenter(), convexHull.getCenter());
#ifdef USE_VIRTUAL_GJK
if(!virtualGjkRaycastPenetration(convexA, convexB, initialSearchDir, zero, zeroV, dir, toi, normal, closestA, sphereGeom.radius+inflation, isMtd))
return false;
#else
if(!gjkRaycastPenetration<LocalConvex<CapsuleV>, LocalConvex<ConvexHullV> >(convexA, convexB, initialSearchDir, zero, zeroV, dir, toi, normal, closestA, sphereGeom.radius+inflation, isMtd))
return false;
#endif
if(hasInitialOverlap(sweepHit, unitDir, toi, normal, closestA, convexPose, isMtd, false))
return true;
sweepHit.flags |= PxHitFlag::ePOSITION;
const Vec3V destNormal = V3Neg(V3Normalize(convexTransf.rotate(normal)));
const FloatV length = FMul(dist, toi);
const Vec3V destWorldPointA = convexTransf.transform(closestA);
V3StoreU(destNormal, sweepHit.normal);
V3StoreU(destWorldPointA, sweepHit.position);
FStore(length, &sweepHit.distance);
sweepHit.faceIndex = 0xffffffff;
return true;
}
bool sweepConvex_PlaneGeom(GU_CONVEX_SWEEP_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::ePLANE);
PX_UNUSED(hitFlags);
PX_UNUSED(geom);
PX_UNUSED(threadContext);
ConvexMesh* convexMesh = static_cast<ConvexMesh*>(convexGeom.convexMesh);
ConvexHullData* hullData = &convexMesh->getHull();
sweepHit.faceIndex = 0xFFFFffff; // spec says face index is undefined for planes
const PxVec3* PX_RESTRICT hullVertices = hullData->getHullVertices();
PxU32 numHullVertices = hullData->mNbHullVertices;
const bool isMtd = hitFlags & PxHitFlag::eMTD;
const FastVertex2ShapeScaling convexScaling(convexGeom.scale);
PxPlane plane = getPlane(pose);
plane.d -=inflation;
sweepHit.distance = distance;
bool status = false;
bool initialOverlap = false;
while(numHullVertices--)
{
const PxVec3& vertex = *hullVertices++;
const PxVec3 worldPt = convexPose.transform(convexScaling * vertex);
float t;
PxVec3 pointOnPlane;
if(intersectRayPlane(worldPt, unitDir, plane, t, &pointOnPlane))
{
if(plane.distance(worldPt) <= 0.0f)
{
initialOverlap = true;
break;
//// Convex touches plane
//sweepHit.distance = 0.0f;
//sweepHit.flags = PxHitFlag::eNORMAL;
//sweepHit.normal = -unitDir;
//return true;
}
if(t > 0.0f && t <= sweepHit.distance)
{
sweepHit.distance = t;
sweepHit.flags = PxHitFlag::ePOSITION | PxHitFlag::eNORMAL;
sweepHit.position = pointOnPlane;
sweepHit.normal = plane.n;
status = true;
}
}
}
if(initialOverlap)
{
if(isMtd)
{
sweepHit.flags = PxHitFlag::ePOSITION | PxHitFlag::eNORMAL;
return computePlane_ConvexMTD(plane, convexGeom, convexPose, sweepHit);
}
else
{
sweepHit.distance = 0.0f;
sweepHit.flags = PxHitFlag::eNORMAL;
sweepHit.normal = -unitDir;
return true;
}
}
return status;
}
bool sweepConvex_CapsuleGeom(GU_CONVEX_SWEEP_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eCAPSULE);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom);
Capsule capsule;
getCapsule(capsule, capsuleGeom, pose);
// remove PxHitFlag::eFACE_INDEX, not neeeded to compute.
PxHitFlags tempHitFlags = hitFlags;
tempHitFlags &= ~PxHitFlag::eFACE_INDEX;
if(!sweepCapsule_ConvexGeom(convexGeom, convexPose, capsuleGeom, pose, capsule, -unitDir, distance, sweepHit, tempHitFlags, inflation, threadContext))
return false;
if(sweepHit.flags & PxHitFlag::ePOSITION)
sweepHit.position += unitDir * sweepHit.distance;
sweepHit.normal = -sweepHit.normal;
sweepHit.faceIndex = 0xffffffff;
return true;
}
bool sweepConvex_BoxGeom(GU_CONVEX_SWEEP_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eBOX);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom);
Box box;
buildFrom(box, pose.p, boxGeom.halfExtents, pose.q);
// remove PxHitFlag::eFACE_INDEX, not neeeded to compute.
PxHitFlags tempHitFlags = hitFlags;
tempHitFlags &= ~PxHitFlag::eFACE_INDEX;
if(!sweepBox_ConvexGeom(convexGeom, convexPose, boxGeom, pose, box, -unitDir, distance, sweepHit, tempHitFlags, inflation, threadContext))
return false;
if(sweepHit.flags & PxHitFlag::ePOSITION)
sweepHit.position += unitDir * sweepHit.distance;
sweepHit.normal = -sweepHit.normal;
sweepHit.faceIndex = 0xffffffff;
return true;
}
bool sweepConvex_ConvexGeom(GU_CONVEX_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
using namespace aos;
PX_ASSERT(geom.getType() == PxGeometryType::eCONVEXMESH);
const PxConvexMeshGeometry& otherConvexGeom = static_cast<const PxConvexMeshGeometry&>(geom);
ConvexMesh& otherConvexMesh = *static_cast<ConvexMesh*>(otherConvexGeom.convexMesh);
ConvexMesh* convexMesh = static_cast<ConvexMesh*>(convexGeom.convexMesh);
ConvexHullData* hullData = &convexMesh->getHull();
ConvexHullData* otherHullData = &otherConvexMesh.getHull();
const Vec3V zeroV = V3Zero();
const FloatV zero = FZero();
const Vec3V otherVScale = V3LoadU_SafeReadW(otherConvexGeom.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV otherVQuat = QuatVLoadU(&otherConvexGeom.scale.rotation.x);
const Vec3V vScale = V3LoadU_SafeReadW(convexGeom.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&convexGeom.scale.rotation.x);
const PxTransformV otherTransf = loadTransformU(pose);
const PxTransformV convexTransf = loadTransformU(convexPose);
const Vec3V worldDir = V3LoadU(unitDir);
const FloatV dist = FLoad(distance);
const Vec3V dir = convexTransf.rotateInv(V3Scale(worldDir, dist));
const PxMatTransformV aToB(convexTransf.transformInv(otherTransf));
const ConvexHullV otherConvexHull(otherHullData, zeroV, otherVScale, otherVQuat, otherConvexGeom.scale.isIdentity());
const ConvexHullV convexHull(hullData, zeroV, vScale, vQuat, convexGeom.scale.isIdentity());
const bool isMtd = hitFlags & PxHitFlag::eMTD;
FloatV toi;
Vec3V closestA, normal;
const RelativeConvex<ConvexHullV> convexA(otherConvexHull, aToB);
const LocalConvex<ConvexHullV> convexB(convexHull);
#ifdef USE_VIRTUAL_GJK
if(!virtualGjkRaycastPenetration(convexA, convexB, aToB.p, zero, zeroV, dir, toi, normal, closestA, inflation, isMtd))
return false;
#else
if(!gjkRaycastPenetration<RelativeConvex<ConvexHullV>, LocalConvex<ConvexHullV> >(convexA, convexB, aToB.p, zero, zeroV, dir, toi, normal, closestA, inflation, isMtd))
return false;
#endif
if(hasInitialOverlap(sweepHit, unitDir, toi, normal, closestA, convexPose, isMtd, false))
return true;
sweepHit.flags |= PxHitFlag::ePOSITION;
const Vec3V worldPointA = convexTransf.transform(closestA);
const Vec3V destNormal = V3Neg(V3Normalize(convexTransf.rotate(normal)));
const FloatV length = FMul(dist, toi);
V3StoreU(destNormal, sweepHit.normal);
V3StoreU(worldPointA, sweepHit.position);
FStore(length, &sweepHit.distance);
return computeFaceIndex(sweepHit, hitFlags, otherConvexGeom, otherHullData, pose, unitDir);
}
| 25,221 | C++ | 32.230566 | 216 | 0.736014 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuIncrementalAABBTree.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_INCREMENTAL_AABB_TREE_H
#define GU_INCREMENTAL_AABB_TREE_H
#include "foundation/PxBounds3.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxHashMap.h"
#include "foundation/PxVecMath.h"
#include "foundation/PxPool.h"
#include "common/PxPhysXCommonConfig.h"
#include "GuAABBTree.h"
#include "GuPrunerTypedef.h"
namespace physx
{
using namespace aos;
namespace Gu
{
struct BVHNode;
class BVH;
#define INCR_NB_OBJECTS_PER_NODE 4
// tree indices, can change in runtime
struct AABBTreeIndices
{
PX_FORCE_INLINE AABBTreeIndices(PoolIndex index) : nbIndices(1)
{
indices[0] = index;
for(PxU32 i=1; i<INCR_NB_OBJECTS_PER_NODE; i++)
indices[i] = 0;
}
PxU32 nbIndices;
PoolIndex indices[INCR_NB_OBJECTS_PER_NODE];
};
// tree node, has parent information
class IncrementalAABBTreeNode : public PxUserAllocated
{
public:
PX_FORCE_INLINE IncrementalAABBTreeNode() : mParent(NULL)
{
mChilds[0] = NULL;
mChilds[1] = NULL;
}
PX_FORCE_INLINE IncrementalAABBTreeNode(AABBTreeIndices* indices) : mParent(NULL)
{
mIndices = indices;
mChilds[1] = NULL;
}
PX_FORCE_INLINE ~IncrementalAABBTreeNode() {}
PX_FORCE_INLINE PxU32 isLeaf() const { return PxU32(mChilds[1]==0); }
PX_FORCE_INLINE const PxU32* getPrimitives(const PxU32*) const { return &mIndices->indices[0]; }
PX_FORCE_INLINE PxU32* getPrimitives(PxU32*) { return &mIndices->indices[0]; }
PX_FORCE_INLINE PxU32 getNbPrimitives() const { return mIndices->nbIndices; }
PX_FORCE_INLINE PxU32 getPrimitiveIndex() const { return PX_INVALID_U32; }
PX_FORCE_INLINE const IncrementalAABBTreeNode* getPos(const IncrementalAABBTreeNode*) const { return mChilds[0]; }
PX_FORCE_INLINE const IncrementalAABBTreeNode* getNeg(const IncrementalAABBTreeNode*) const { return mChilds[1]; }
PX_FORCE_INLINE IncrementalAABBTreeNode* getPos(IncrementalAABBTreeNode*) { return mChilds[0]; }
PX_FORCE_INLINE IncrementalAABBTreeNode* getNeg(IncrementalAABBTreeNode*) { return mChilds[1]; }
// PT: TODO: these functions are duplicates from the regular AABB tree node
PX_FORCE_INLINE void getAABBCenterExtentsV(physx::aos::Vec3V* center, physx::aos::Vec3V* extents) const
{
const float half = 0.5f;
const FloatV halfV = FLoad(half);
*extents = Vec3V_From_Vec4V((V4Scale(V4Sub(mBVMax, mBVMin), halfV)));
*center = Vec3V_From_Vec4V((V4Scale(V4Add(mBVMax, mBVMin), halfV)));
}
PX_FORCE_INLINE void getAABBCenterExtentsV2(physx::aos::Vec3V* center, physx::aos::Vec3V* extents) const
{
*extents = Vec3V_From_Vec4V((V4Sub(mBVMax, mBVMin)));
*center = Vec3V_From_Vec4V((V4Add(mBVMax, mBVMin)));
}
Vec4V mBVMin; // Global bounding-volume min enclosing all the node-related primitives
Vec4V mBVMax; // Global bounding-volume max enclosing all the node-related primitives
IncrementalAABBTreeNode* mParent; // node parent
union
{
IncrementalAABBTreeNode* mChilds[2]; // childs of node if not a leaf
AABBTreeIndices* mIndices; // if leaf, indices information
};
};
struct IncrementalAABBTreeNodePair
{
IncrementalAABBTreeNode mNode0;
IncrementalAABBTreeNode mNode1;
};
typedef PxArray<IncrementalAABBTreeNode*> NodeList;
// incremental AABB tree, all changes are immediatelly reflected to the tree
class IncrementalAABBTree : public PxUserAllocated
{
public:
PX_PHYSX_COMMON_API IncrementalAABBTree();
PX_PHYSX_COMMON_API ~IncrementalAABBTree();
// Build the tree for the first time
PX_PHYSX_COMMON_API bool build(const AABBTreeBuildParams& params, PxArray<IncrementalAABBTreeNode*>& mapping);
// insert a new index into the tree
PX_PHYSX_COMMON_API IncrementalAABBTreeNode* insert(const PoolIndex index, const PxBounds3* bounds, NodeList& changedLeaf);
// update the object in the tree - full update insert/remove
PX_PHYSX_COMMON_API IncrementalAABBTreeNode* update(IncrementalAABBTreeNode* node, const PoolIndex index, const PxBounds3* bounds, NodeList& changedLeaf);
// update the object in the tree, faster method, that may unbalance the tree
PX_PHYSX_COMMON_API IncrementalAABBTreeNode* updateFast(IncrementalAABBTreeNode* node, const PoolIndex index, const PxBounds3* bounds, NodeList& changedLeaf);
// remove object from the tree
PX_PHYSX_COMMON_API IncrementalAABBTreeNode* remove(IncrementalAABBTreeNode* node, const PoolIndex index, const PxBounds3* bounds);
// fixup the tree indices, if we swapped the objects in the pruning pool
PX_PHYSX_COMMON_API void fixupTreeIndices(IncrementalAABBTreeNode* node, const PoolIndex index, const PoolIndex newIndex);
// origin shift
PX_PHYSX_COMMON_API void shiftOrigin(const PxVec3& shift);
// get the tree root node
PX_FORCE_INLINE const IncrementalAABBTreeNode* getNodes() const { return mRoot; }
// define this function so we can share the scene query code with regular AABBTree
PX_FORCE_INLINE const PxU32* getIndices() const { return NULL; }
// paranoia checks
PX_PHYSX_COMMON_API void hierarchyCheck(PoolIndex maxIndex, const PxBounds3* bounds);
PX_PHYSX_COMMON_API void hierarchyCheck(const PxBounds3* bounds);
PX_PHYSX_COMMON_API void checkTreeLeaf(IncrementalAABBTreeNode* leaf, PoolIndex h);
PX_PHYSX_COMMON_API PxU32 getTreeLeafDepth(IncrementalAABBTreeNode* leaf);
PX_PHYSX_COMMON_API void release();
PX_PHYSX_COMMON_API void copy(const BVH& bvh, PxArray<IncrementalAABBTreeNode*>& mapping);
private:
// clone the tree from the generic AABB tree that was built
void clone(PxArray<IncrementalAABBTreeNode*>& mapping, const PxU32* indices, IncrementalAABBTreeNode** treeNodes);
void copyNode(IncrementalAABBTreeNode& destNode, const BVHNode& sourceNode, const BVHNode* nodeBase,
IncrementalAABBTreeNode* parent, const PxU32* primitivesBase, PxArray<IncrementalAABBTreeNode*>& mapping);
// split leaf node, the newly added object does not fit in
IncrementalAABBTreeNode* splitLeafNode(IncrementalAABBTreeNode* node, const PoolIndex index, const Vec4V& minV, const Vec4V& maxV, const PxBounds3* bounds);
void rotateTree(IncrementalAABBTreeNode* node, NodeList& changedLeaf, PxU32 largesRotateNode, const PxBounds3* bounds, bool rotateAgain);
void releaseNode(IncrementalAABBTreeNode* node);
PxPool<AABBTreeIndices> mIndicesPool;
PxPool<IncrementalAABBTreeNodePair> mNodesPool;
IncrementalAABBTreeNode* mRoot;
NodeAllocator mNodeAllocator;
};
}
}
#endif
| 8,796 | C | 43.882653 | 166 | 0.701342 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuSecondaryPruner.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuSecondaryPruner.h"
#include "GuBucketPruner.h"
#include "GuIncrementalAABBPrunerCore.h"
//#define USE_DEBUG_PRINTF
#ifdef USE_DEBUG_PRINTF
#include <stdio.h>
#endif
using namespace physx;
using namespace Gu;
class CompanionPrunerBucket : public CompanionPruner
{
public:
CompanionPrunerBucket() : mPrunerCore(false) {}
virtual ~CompanionPrunerBucket() {}
virtual bool addObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp, PoolIndex poolIndex)
{
PX_UNUSED(poolIndex);
PX_UNUSED(handle);
return mPrunerCore.addObject(object, worldAABB, transform, timeStamp);
}
virtual bool updateObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PoolIndex poolIndex)
{
PX_UNUSED(poolIndex);
PX_UNUSED(handle);
return mPrunerCore.updateObject(worldAABB, object, transform);
}
virtual bool removeObject(const PrunerPayload& object, PrunerHandle handle, PxU32 objectIndex, PxU32 swapObjectIndex)
{
PX_UNUSED(objectIndex);
PX_UNUSED(swapObjectIndex);
PX_UNUSED(handle);
PxU32 timeStamp;
return mPrunerCore.removeObject(object, timeStamp);
}
virtual void swapIndex(PxU32 objectIndex, PxU32 swapObjectIndex)
{
PX_UNUSED(objectIndex);
PX_UNUSED(swapObjectIndex);
}
virtual PxU32 removeMarkedObjects(PxU32 timeStamp) { return mPrunerCore.removeMarkedObjects(timeStamp); }
virtual void shiftOrigin(const PxVec3& shift) { mPrunerCore.shiftOrigin(shift); }
virtual void timeStampChange() { }
virtual void build() { mPrunerCore.build(); }
virtual PxU32 getNbObjects() const { return mPrunerCore.getNbObjects(); }
virtual void release() { mPrunerCore.release(); }
virtual void visualize(PxRenderOutput& out, PxU32 color) const { mPrunerCore.visualize(out, color); }
virtual bool raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& prunerCallback) const
{
if(mPrunerCore.getNbObjects())
return mPrunerCore.raycast(origin, unitDir, inOutDistance, prunerCallback);
return true;
}
virtual bool overlap(const ShapeData& queryVolume, PrunerOverlapCallback& prunerCallback) const
{
if(mPrunerCore.getNbObjects())
return mPrunerCore.overlap(queryVolume, prunerCallback);
return true;
}
virtual bool sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& prunerCallback) const
{
if(mPrunerCore.getNbObjects())
return mPrunerCore.sweep(queryVolume, unitDir, inOutDistance, prunerCallback);
return true;
}
virtual void getGlobalBounds(PxBounds3& bounds) const
{
mPrunerCore.getGlobalBounds(bounds);
}
BucketPrunerCore mPrunerCore;
};
class CompanionPrunerIncremental : public CompanionPruner
{
public:
CompanionPrunerIncremental(const PruningPool* pool) : mPrunerCore(pool) {}
virtual ~CompanionPrunerIncremental() {}
virtual bool addObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp, PoolIndex poolIndex)
{
PX_UNUSED(worldAABB);
PX_UNUSED(transform);
PX_UNUSED(object);
PX_UNUSED(handle);
return mPrunerCore.addObject(poolIndex, timeStamp);
}
virtual bool updateObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PoolIndex poolIndex)
{
PX_UNUSED(worldAABB);
PX_UNUSED(transform);
PX_UNUSED(object);
PX_UNUSED(handle);
return mPrunerCore.updateObject(poolIndex);
}
virtual bool removeObject(const PrunerPayload& object, PrunerHandle handle, PxU32 objectIndex, PxU32 swapObjectIndex)
{
PX_UNUSED(object);
PX_UNUSED(handle);
PxU32 timeStamp;
return mPrunerCore.removeObject(objectIndex, swapObjectIndex, timeStamp);
}
virtual void swapIndex(PxU32 objectIndex, PxU32 swapObjectIndex)
{
mPrunerCore.swapIndex(objectIndex, swapObjectIndex);
}
virtual PxU32 removeMarkedObjects(PxU32 timeStamp) { return mPrunerCore.removeMarkedObjects(timeStamp); }
virtual void shiftOrigin(const PxVec3& shift) { mPrunerCore.shiftOrigin(shift); }
virtual void timeStampChange() { mPrunerCore.timeStampChange(); }
virtual void build() { mPrunerCore.build(); }
virtual PxU32 getNbObjects() const { return mPrunerCore.getNbObjects(); }
virtual void release() { mPrunerCore.release(); }
virtual void visualize(PxRenderOutput& out, PxU32 color) const { mPrunerCore.visualize(out, color); }
virtual bool raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& prunerCallback) const
{
if(mPrunerCore.getNbObjects())
return mPrunerCore.raycast(origin, unitDir, inOutDistance, prunerCallback);
return true;
}
virtual bool overlap(const ShapeData& queryVolume, PrunerOverlapCallback& prunerCallback) const
{
if(mPrunerCore.getNbObjects())
return mPrunerCore.overlap(queryVolume, prunerCallback);
return true;
}
virtual bool sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& prunerCallback) const
{
if(mPrunerCore.getNbObjects())
return mPrunerCore.sweep(queryVolume, unitDir, inOutDistance, prunerCallback);
return true;
}
virtual void getGlobalBounds(PxBounds3& bounds) const
{
mPrunerCore.getGlobalBounds(bounds);
}
IncrementalAABBPrunerCore mPrunerCore;
};
#define USE_MAVERICK_NODE
#include "GuActorShapeMap.h"
#include "GuBVH.h"
#include "GuAABBTreeNode.h"
#include "GuAABBTreeBuildStats.h"
#include "GuAABBTreeQuery.h"
#include "GuQuery.h"
#ifdef USE_MAVERICK_NODE
#include "GuMaverickNode.h"
#endif
static const bool gUpdateTreeWhenRemovingObject = false;
static const bool gUpdateObjectBoundsWhenRemovingObject = true;
class CompanionPrunerAABBTree : public CompanionPruner
{
enum DirtyFlags
{
NEEDS_REBUILD = (1<<0),
NEEDS_REFIT = (1<<1)
};
public:
CompanionPrunerAABBTree(PxU64 contextID, const PruningPool* pool);
virtual ~CompanionPrunerAABBTree();
virtual bool addObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp, PoolIndex poolIndex);
virtual bool updateObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PoolIndex poolIndex);
virtual bool removeObject(const PrunerPayload& object, PrunerHandle handle, PxU32 objectIndex, PxU32 swapObjectIndex);
virtual void swapIndex(PxU32 objectIndex, PxU32 swapObjectIndex);
virtual PxU32 removeMarkedObjects(PxU32 timeStamp);
virtual void shiftOrigin(const PxVec3& shift);
virtual void timeStampChange();
virtual void build();
virtual PxU32 getNbObjects() const;
virtual void release();
virtual void visualize(PxRenderOutput& out, PxU32 color) const;
virtual bool raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& prunerCallback) const;
virtual bool overlap(const ShapeData& queryVolume, PrunerOverlapCallback& prunerCallback) const;
virtual bool sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& prunerCallback) const;
virtual void getGlobalBounds(PxBounds3& bounds) const;
// PT: we have multiple options here, not sure which one is best:
// - use a Gu:BVH
// - use a Gu:AABBTree
// - use a full blown Pruner
// - use/reference the master PruningPool or not
// - use a hashmap
// - use PoolIndex
// - use PrunerHandle
// - somehow return our own local index to caller and use that
//
// The current implementation uses a PxBVH, a reference to the master PruningPool, and PrunerHandles.
#ifdef USE_MAVERICK_NODE
MaverickNode mMaverick;
#endif
const PruningPool* mPool;
struct LocalData
{
PX_FORCE_INLINE LocalData(PxU32 timestamp, PrunerHandle handle) : mTimestamp(timestamp), mHandle(handle) {}
PxU32 mTimestamp;
PrunerHandle mHandle;
PX_FORCE_INLINE void setRemoved()
{
mTimestamp = 0xffffffff;
mHandle = 0xffffffff;
}
PX_FORCE_INLINE bool isValid(PxU32 lastValidTimestamp) const
{
return mHandle != 0xffffffff && mTimestamp>=lastValidTimestamp;
}
};
PxArray<LocalData> mLocalData;
BVH* mBVH;
PxU32* mRemap; // Pruner handle to local index
PxU32 mMapSize;
PxU32 mDirtyFlags;
PxU32 mLastValidTimestamp;
PX_FORCE_INLINE PxU32 getNbObjectsFast() const { return mLocalData.size(); }
bool addObjectInternal(PrunerHandle handle, PxU32 timeStamp);
void releaseInternal();
void resizeMap(PxU32 index);
};
CompanionPrunerAABBTree::CompanionPrunerAABBTree(PxU64 /*contextID*/, const PruningPool* pool) : mPool(pool),
mBVH (NULL),
mRemap (NULL),
mMapSize (0),
mDirtyFlags (0),
mLastValidTimestamp (0)
{
}
CompanionPrunerAABBTree::~CompanionPrunerAABBTree()
{
releaseInternal();
}
void CompanionPrunerAABBTree::releaseInternal()
{
PX_DELETE(mBVH);
PX_FREE(mRemap);
mMapSize = 0;
mDirtyFlags = 0;
mLastValidTimestamp = 0;
}
void CompanionPrunerAABBTree::resizeMap(PxU32 index)
{
PxU32 size = mMapSize ? mMapSize*2 : 64;
const PxU32 minSize = index+1;
if(minSize>size)
size = minSize*2;
PxU32* items = PX_ALLOCATE(PxU32, size, "Map");
if(mRemap)
PxMemCopy(items, mRemap, mMapSize*sizeof(PxU32));
PxMemSet(items+mMapSize, 0xff, (size-mMapSize)*sizeof(PxU32));
PX_FREE(mRemap);
mRemap = items;
mMapSize = size;
}
bool CompanionPrunerAABBTree::addObjectInternal(PrunerHandle handle, PxU32 timeStamp)
{
const PxU32 localIndex = getNbObjectsFast();
#ifdef USE_DEBUG_PRINTF
printf("add %d %d to local %d\n", handle, timeStamp, localIndex);
#endif
PX_ASSERT(handle!=0xffffffff);
if(handle>=mMapSize)
resizeMap(handle);
PX_ASSERT(mRemap[handle]==0xffffffff || !mLocalData[mRemap[handle]].isValid(mLastValidTimestamp));
mRemap[handle] = localIndex;
mLocalData.pushBack(LocalData(timeStamp, handle));
PX_DELETE(mBVH);
mDirtyFlags = NEEDS_REBUILD;
// PT: TODO: why didn't we return a secondary pruner handle from here? Could have been stored in the padding bytes of the pruning pool's transform array for example
return true;
}
bool CompanionPrunerAABBTree::addObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp, PoolIndex poolIndex)
{
PX_UNUSED(object);
PX_UNUSED(worldAABB);
PX_UNUSED(transform);
PX_UNUSED(timeStamp);
PX_UNUSED(poolIndex);
#ifdef USE_MAVERICK_NODE
if(mMaverick.addObject(object, handle, worldAABB, transform, timeStamp))
return true;
PxU32 nbToAdd = mMaverick.mNbFree;
for(PxU32 i=0;i<nbToAdd;i++)
addObjectInternal(mMaverick.mFreeHandles[i], mMaverick.mFreeStamps[i]);
mMaverick.mNbFree = 0;
#endif
return addObjectInternal(handle, timeStamp);
}
bool CompanionPrunerAABBTree::updateObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PoolIndex poolIndex)
{
PX_UNUSED(object);
PX_UNUSED(worldAABB);
PX_UNUSED(transform);
PX_UNUSED(poolIndex);
PX_UNUSED(handle);
#ifdef USE_MAVERICK_NODE
if(mMaverick.updateObject(handle, worldAABB, transform))
return true;
#endif
// PT: the bounds & transform have already been updated in the source pruning pool.
// We just need to mark the corresponding node for refit.
PX_ASSERT(handle<mMapSize);
const PxU32 localIndex = mRemap[handle];
PX_ASSERT(localIndex<getNbObjectsFast());
PX_ASSERT(localIndex!=0xffffffff);
PX_ASSERT(mLocalData[localIndex].mHandle==handle);
if(mBVH && mBVH->updateBoundsInternal(localIndex, worldAABB))
mDirtyFlags |= NEEDS_REFIT;
return true;
}
bool CompanionPrunerAABBTree::removeObject(const PrunerPayload& object, PrunerHandle handle, PxU32 objectIndex, PxU32 swapObjectIndex)
{
PX_UNUSED(object);
PX_UNUSED(objectIndex);
PX_UNUSED(swapObjectIndex);
PX_UNUSED(handle);
#ifdef USE_MAVERICK_NODE
PxU32 unused;
if(mMaverick.removeObject(handle, unused))
return true;
#endif
PX_ASSERT(handle<mMapSize);
const PxU32 localIndex = mRemap[handle];
PX_ASSERT(localIndex<getNbObjectsFast());
PX_ASSERT(localIndex!=0xffffffff);
PX_ASSERT(mLocalData[localIndex].mHandle==handle);
// PT: technically this is all we need to mark the object as removed. We can then test the handle against 0xffffffff during
// queries and skip the object. This is optimal in terms of remove performance, but not optimal in terms of query performance.
// There are a number of extra steps we could do here:
//
// - invalidate the *object* bounds. This means the tree structure itself doesn't change, it's only the object bounds used in
// leaf nodes that do. Empty bounds for removed objects mean we discard the object before reaching the previously mentioned
// handle test. This does not need an "update map".
//
// - update the number of primitives in the node. In this case we update the contents of a leaf node, which means decreasing
// the number of primitives there and reordering them so that there is no hole in the list. This requires an update map so
// it uses more memory and more CPU time during the remove call. It also has a really, really, really nasty side-effect of
// invalidating the optimization that skips the object bounds test in the traversal code when the number of primitives is 1. (*)
//
// - the next step would be to recompute the *node* bounds, to take into account the fact that one of the bounds involved in
// its computation is now empty. This would avoid visiting the node at all in some queries, so it is probably worth doing
// if we already do the previous step. (It also requires an update map and pretty much visiting the same memory).
//
// - finally the last step would be to then refit the branch involving that node. This is more complicated because it needs
// support for partial refit in the tree, i.e. links to parent nodes etc. If we do that though, the previous step can be
// skipped since the node bounds recomputation will happen automatically as part of the refit procedure. The previous step
// is only useful as a limited refit (limited to a single node) when parent pointers are not available. The previous step
// can also be used as an optimization if the recomputed bounds is the same as the old one, then we can skip the more
// costly refit procedure. In fact this could probably be used as an optimization for the refit loop: if the box is the
// same as before we could break out of the loop. Note that it is possible to skip this last step here because the new
// bounds are guaranteed to be smaller than or equal to the previous bounds. We couldn't skip this part in the "update
// object" codepath for example.
//
// (*) the optimization relies on the fact that the narrow-phase test is roughly as expensive as the AABB test within the
// tree, so it skips it if there is only one primitive in the node. (With multiple primitives it's worth doing the test
// anyway since one AABB test can skip N narrow-phase tests). The nasty bit is that removing an object can suddenly mean
// the AABB test isn't done anymore, and while it isn't a big deal in practice it's enough to break unit tests that don't
// expect that.
#ifdef USE_DEBUG_PRINTF
printf("remove %d %d from %d\n", handle, mLocalData[localIndex].mTimestamp, localIndex);
#endif
mRemap[handle] = 0xffffffff;
mLocalData[localIndex].setRemoved();
if(mBVH)
{
BVHData& data = const_cast<BVHData&>(mBVH->getData());
const PxU32 nbNodes = data.mNbNodes;
PX_UNUSED(nbNodes);
BVHNode* nodes = data.mNodes;
PxU32* indices = data.mIndices;
PxBounds3* bounds = data.mBounds.getBounds();
if(gUpdateObjectBoundsWhenRemovingObject)
{
// Invalidates the object bounds, not always needed
// The node bounds would need recomputing, and the branch refit
bounds[localIndex].minimum = PxVec3(GU_EMPTY_BOUNDS_EXTENTS);
bounds[localIndex].maximum = PxVec3(-GU_EMPTY_BOUNDS_EXTENTS);
}
PxU32* mMapping = data.getUpdateMap();
if(gUpdateTreeWhenRemovingObject && mMapping)
{
// PT: note: the following codepath has only one part (as opposed to the equivalent code in AABBTreeUpdateMap)
// because it operates on our local indices, not on (pruning) pool indices. The difference is that our local
// array can have holes in it for removed objects, while the AABBTree's update code works with the PruningPool
// (no holes).
const PxU32 treeNodeIndex = mMapping[localIndex];
if(treeNodeIndex!=0xffffffff)
{
PX_ASSERT(treeNodeIndex < nbNodes);
PX_ASSERT(nodes[treeNodeIndex].isLeaf());
BVHNode* node = nodes + treeNodeIndex;
const PxU32 nbPrims = node->getNbRuntimePrimitives();
PX_ASSERT(nbPrims < 16);
// retrieve the primitives pointer
PxU32* primitives = node->getPrimitives(indices);
PX_ASSERT(primitives);
// PT: look for desired local index in the leaf
bool foundIt = false;
for(PxU32 i=0;i<nbPrims;i++)
{
PX_ASSERT(mMapping[primitives[i]] == treeNodeIndex); // PT: all primitives should point to the same leaf node
if(localIndex == primitives[i])
{
foundIt = true;
const PxU32 last = nbPrims-1;
node->setNbRunTimePrimitives(last);
primitives[i] = 0xffffffff; // Mark primitive index as invalid in the node
mMapping[localIndex] = 0xffffffff; // invalidate the node index for pool 0
// PT: swap within the leaf node. No need to update the mapping since they should all point
// to the same tree node anyway.
if(last!=i)
PxSwap(primitives[i], primitives[last]);
// PT: breaking here means we couldn't reuse that loop to update the node bounds
break;
}
}
PX_ASSERT(foundIt);
PX_UNUSED(foundIt);
}
}
}
return true;
}
void CompanionPrunerAABBTree::swapIndex(PxU32 objectIndex, PxU32 swapObjectIndex)
{
PX_UNUSED(objectIndex);
PX_UNUSED(swapObjectIndex);
}
PxU32 CompanionPrunerAABBTree::removeMarkedObjects(PxU32 timeStamp)
{
#ifdef USE_DEBUG_PRINTF
printf("removeMarkedObjects %d\n", timeStamp);
#endif
PX_UNUSED(timeStamp);
//printf("removeMarkedObjects %d\n", timeStamp);
mLastValidTimestamp = timeStamp+1;
// PT: TODO: consider updating our local data as well here but is it worth it?
if(0)
{
const PxU32 nbObjects = getNbObjectsFast();
for(PxU32 i=0;i<nbObjects;i++)
{
LocalData& localData = mLocalData[i];
if(localData.mTimestamp==timeStamp)
{
localData.setRemoved();
}
}
}
#ifdef USE_MAVERICK_NODE
mMaverick.removeMarkedObjects(timeStamp);
#endif
return 0;
}
void CompanionPrunerAABBTree::shiftOrigin(const PxVec3& shift)
{
if(mBVH)
{
BVHData& data = const_cast<BVHData&>(mBVH->getData());
PxU32 nbNodes = data.mNbNodes;
BVHNode* nodes = data.mNodes;
while(nbNodes--)
{
nodes->mBV.minimum -= shift;
nodes->mBV.maximum -= shift;
nodes++;
}
PxU32 nbObjects = getNbObjectsFast();
PxBounds3* bounds = data.mBounds.getBounds();
while(nbObjects--)
{
if(!bounds->isEmpty())
{
bounds->minimum -= shift;
bounds->maximum -= shift;
}
bounds++;
}
}
#ifdef USE_MAVERICK_NODE
mMaverick.shiftOrigin(shift);
#endif
}
void CompanionPrunerAABBTree::timeStampChange()
{
}
void CompanionPrunerAABBTree::build()
{
if(!mDirtyFlags) // PT: necessary, extended bucket pruner calls this without checking first
return;
const PxU32 needsRebuild = mDirtyFlags & NEEDS_REBUILD;
const PxU32 needsRefit = mDirtyFlags & NEEDS_REFIT;
mDirtyFlags = 0;
// PT: we want fast build for this one
const PxU32 numPrimsPerLeaf = 15;
if(needsRebuild)
{
PX_DELETE(mBVH);
PxU32 nbObjects = getNbObjectsFast();
if(!nbObjects)
return;
if(1)
{
// PT: you know what forget it just rebuild the whole map
PX_FREE(mRemap);
PxU32* newRemap = PX_ALLOCATE(PxU32, mMapSize, "Map");
PxMemSet(newRemap, 0xff, mMapSize*sizeof(PxU32));
mRemap = newRemap;
PxU32 offset = 0;
PxU32 nb = nbObjects;
while(nb--)
{
if(!mLocalData[offset].isValid(mLastValidTimestamp))
{
if(0 && mLocalData[offset].mHandle!=0xffffffff)
{
//PX_ASSERT(mRemap[mLocalData[offset].mHandle]==offset);
mRemap[mLocalData[offset].mHandle] = 0xffffffff;
}
// This object has been removed, plug the hole
const LocalData& movedData = mLocalData[--nbObjects];
if(movedData.isValid(mLastValidTimestamp))
{
#ifdef USE_DEBUG_PRINTF
printf("move %d %d from %d to %d\n", movedData.mHandle, movedData.mTimestamp, nbObjects, offset);
if(movedData.mHandle==22)
{
int stop = 1;
(void)stop;
}
#endif
//PX_ASSERT(mRemap[movedData.mHandle]==nbObjects);
//mRemap[movedData.mHandle] = offset;
mRemap[movedData.mHandle] = offset;
}
#ifdef USE_DEBUG_PRINTF
else
printf("skip remap %d %d from %d to %d\n", movedData.mHandle, movedData.mTimestamp, nbObjects, offset);
#endif
mLocalData[offset] = movedData;
}
else
{
mRemap[mLocalData[offset].mHandle] = offset;
offset++;
}
}
nbObjects = offset;
mLocalData.forceSize_Unsafe(offset);
if(!nbObjects)
return;
}
if(1)
{
AABBTreeBounds bounds;
bounds.init(nbObjects);
// PT: TODO: inflation?
const PxBounds3* currentBounds = mPool->getCurrentWorldBoxes();
PxBounds3* dst = bounds.getBounds();
for(PxU32 i=0; i<nbObjects; i++)
{
const LocalData& localData = mLocalData[i];
const PoolIndex poolIndex = mPool->getIndex(localData.mHandle);
dst[i] = currentBounds[poolIndex];
}
mBVH = PX_NEW(BVH)(NULL);
bool status = mBVH->init(nbObjects, &bounds, NULL, 0, BVH_SPLATTER_POINTS, numPrimsPerLeaf, 0.0);
PX_ASSERT(status);
PX_UNUSED(status);
}
{
BVHData& data = const_cast<BVHData&>(mBVH->getData());
data.createUpdateMap(getNbObjectsFast());
}
return;
}
if(needsRefit && mBVH)
{
BVHData& data = const_cast<BVHData&>(mBVH->getData());
data.refitMarkedNodes(data.mBounds.getBounds());
}
}
PxU32 CompanionPrunerAABBTree::getNbObjects() const
{
PxU32 nb = getNbObjectsFast();
#ifdef USE_MAVERICK_NODE
nb += mMaverick.getNbPrimitives();
#endif
return nb;
}
void CompanionPrunerAABBTree::release()
{
releaseInternal();
}
void CompanionPrunerAABBTree::visualize(PxRenderOutput& out, PxU32 color) const
{
visualizeTree(out, color, mBVH);
}
namespace
{
struct BVHTree
{
PX_FORCE_INLINE BVHTree(const BVHData& data) : mRootNode(data.mNodes), mIndices(data.mIndices) {}
const BVHNode* getNodes() const { return mRootNode; }
const PxU32* getIndices() const { return mIndices; }
const BVHNode* mRootNode;
const PxU32* mIndices;
};
struct RaycastAdapter
{
RaycastAdapter(const CompanionPrunerAABBTree& owner, PrunerRaycastCallback& cb, PxU32 lastValidTimestamp) : mOwner(owner), mCallback(cb), mLastValidTimestamp(lastValidTimestamp), mAbort(false) {}
PX_FORCE_INLINE bool invoke(PxReal& distance, PxU32 index)
{
if(!mOwner.mLocalData[index].isValid(mLastValidTimestamp))
return true; // PT: object has been removed, tree data hasn't been updated accordingly
const PxU32 handle = mOwner.mLocalData[index].mHandle;
// if(gUpdateTreeWhenRemovingObject)
{
PX_ASSERT(handle!=0xffffffff);
}
/* else
{
if(handle==0xffffffff)
{
// PT: object has been removed, tree data hasn't been updated accordingly
return true;
}
}*/
const PoolIndex poolIndex = mOwner.mPool->getIndex(handle);
const PxTransform* currentTransforms = mOwner.mPool->getTransforms();
const PrunerPayload* currentPayloads = mOwner.mPool->getObjects();
if(mAbort || !mCallback.invoke(distance, poolIndex, currentPayloads, currentTransforms))
{
mAbort = true;
return false;
}
return true;
}
const CompanionPrunerAABBTree& mOwner;
PrunerRaycastCallback& mCallback;
const PxU32 mLastValidTimestamp;
bool mAbort;
PX_NOCOPY(RaycastAdapter)
};
struct OverlapAdapter
{
OverlapAdapter(const CompanionPrunerAABBTree& owner, PrunerOverlapCallback& cb, PxU32 lastValidTimestamp) : mOwner(owner), mCallback(cb), mLastValidTimestamp(lastValidTimestamp), mAbort(false) {}
PX_FORCE_INLINE bool invoke(PxU32 index)
{
if(!mOwner.mLocalData[index].isValid(mLastValidTimestamp))
return true; // PT: object has been removed, tree data hasn't been updated accordingly
const PxU32 handle = mOwner.mLocalData[index].mHandle;
PX_ASSERT(handle!=0xffffffff);
const PoolIndex poolIndex = mOwner.mPool->getIndex(handle);
const PxTransform* currentTransforms = mOwner.mPool->getTransforms();
const PrunerPayload* currentPayloads = mOwner.mPool->getObjects();
if(mAbort || !mCallback.invoke(poolIndex, currentPayloads, currentTransforms))
{
mAbort = true;
return false;
}
return true;
}
const CompanionPrunerAABBTree& mOwner;
PrunerOverlapCallback& mCallback;
const PxU32 mLastValidTimestamp;
bool mAbort;
PX_NOCOPY(OverlapAdapter)
};
#ifdef USE_MAVERICK_NODE
struct MaverickRaycastAdapter
{
MaverickRaycastAdapter(const MaverickNode& owner, PrunerRaycastCallback& cb) : mOwner(owner), mCallback(cb), mAbort(false) {}
PX_FORCE_INLINE bool invoke(PxReal& distance, PxU32 index)
{
if(mAbort || !mCallback.invoke(distance, index, mOwner.mFreeObjects, mOwner.mFreeTransforms))
{
mAbort = true;
return false;
}
return true;
}
const MaverickNode& mOwner;
PrunerRaycastCallback& mCallback;
bool mAbort;
PX_NOCOPY(MaverickRaycastAdapter)
};
struct MaverickOverlapAdapter
{
MaverickOverlapAdapter(const MaverickNode& owner, PrunerOverlapCallback& cb) : mOwner(owner), mCallback(cb), mAbort(false) {}
PX_FORCE_INLINE bool invoke(PxU32 index)
{
if(mAbort || !mCallback.invoke(index, mOwner.mFreeObjects, mOwner.mFreeTransforms))
{
mAbort = true;
return false;
}
return true;
}
const MaverickNode& mOwner;
PrunerOverlapCallback& mCallback;
bool mAbort;
PX_NOCOPY(MaverickOverlapAdapter)
};
#endif
}
bool CompanionPrunerAABBTree::raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& prunerCallback) const
{
PX_UNUSED(origin);
PX_UNUSED(unitDir);
PX_UNUSED(inOutDistance);
PX_UNUSED(prunerCallback);
PX_ASSERT(!mDirtyFlags);
// if(mDirtyFlags)
// const_cast<CompanionPrunerAABBTree*>(this)->build();
#ifdef USE_MAVERICK_NODE
{
MaverickRaycastAdapter ra(mMaverick, prunerCallback);
Gu::RayAABBTest test(origin*2.0f, unitDir*2.0f, inOutDistance, PxVec3(0.0f));
if(!doLeafTest<false, true, MaverickNode, MaverickRaycastAdapter>(&mMaverick, test, mMaverick.mFreeBounds, NULL, inOutDistance, ra))
return false;
}
#endif
if(mBVH)
{
RaycastAdapter ra(*this, prunerCallback, mLastValidTimestamp);
return AABBTreeRaycast<false, true, BVHTree, BVHNode, RaycastAdapter>()(mBVH->getData().mBounds, BVHTree(mBVH->getData()), origin, unitDir, inOutDistance, PxVec3(0.0f), ra);
}
return true;
}
// PT: TODO: this is copied from SqBounds.h, should be either moved to Gu and shared or passed as a user parameter
#define SQ_PRUNER_EPSILON 0.005f
#define SQ_PRUNER_INFLATION (1.0f + SQ_PRUNER_EPSILON) // pruner test shape inflation (not narrow phase shape)
bool CompanionPrunerAABBTree::overlap(const ShapeData& queryVolume, PrunerOverlapCallback& prunerCallback) const
{
PX_UNUSED(queryVolume);
PX_UNUSED(prunerCallback);
PX_ASSERT(!mDirtyFlags);
// if(mDirtyFlags)
// const_cast<CompanionPrunerAABBTree*>(this)->build();
#ifdef USE_MAVERICK_NODE
{
MaverickOverlapAdapter ra(mMaverick, prunerCallback);
switch(queryVolume.getType())
{
case PxGeometryType::eBOX:
{
if(queryVolume.isOBB())
{
const DefaultOBBAABBTest test(queryVolume);
if(!doOverlapLeafTest<true, OBBAABBTest, MaverickNode, MaverickOverlapAdapter>(test, &mMaverick, mMaverick.mFreeBounds, NULL, ra))
return false;
}
else
{
const DefaultAABBAABBTest test(queryVolume);
if(!doOverlapLeafTest<true, AABBAABBTest, MaverickNode, MaverickOverlapAdapter>(test, &mMaverick, mMaverick.mFreeBounds, NULL, ra))
return false;
}
}
break;
case PxGeometryType::eCAPSULE:
{
const DefaultCapsuleAABBTest test(queryVolume, SQ_PRUNER_INFLATION);
if(!doOverlapLeafTest<true, CapsuleAABBTest, MaverickNode, MaverickOverlapAdapter>(test, &mMaverick, mMaverick.mFreeBounds, NULL, ra))
return false;
}
break;
case PxGeometryType::eSPHERE:
{
const DefaultSphereAABBTest test(queryVolume);
if(!doOverlapLeafTest<true, SphereAABBTest, MaverickNode, MaverickOverlapAdapter>(test, &mMaverick, mMaverick.mFreeBounds, NULL, ra))
return false;
}
break;
case PxGeometryType::eCONVEXMESH:
{
const DefaultOBBAABBTest test(queryVolume);
if(!doOverlapLeafTest<true, OBBAABBTest, MaverickNode, MaverickOverlapAdapter>(test, &mMaverick, mMaverick.mFreeBounds, NULL, ra))
return false;
}
break;
default:
PX_ALWAYS_ASSERT_MESSAGE("unsupported overlap query volume geometry type");
}
}
#endif
if(mBVH)
{
OverlapAdapter ra(*this, prunerCallback, mLastValidTimestamp);
switch(queryVolume.getType())
{
case PxGeometryType::eBOX:
{
if(queryVolume.isOBB())
{
const DefaultOBBAABBTest test(queryVolume);
return AABBTreeOverlap<true, OBBAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mBVH->getData().mBounds, BVHTree(mBVH->getData()), test, ra);
}
else
{
const DefaultAABBAABBTest test(queryVolume);
return AABBTreeOverlap<true, AABBAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mBVH->getData().mBounds, BVHTree(mBVH->getData()), test, ra);
}
}
case PxGeometryType::eCAPSULE:
{
const DefaultCapsuleAABBTest test(queryVolume, SQ_PRUNER_INFLATION);
//const DefaultCapsuleAABBTest test(queryVolume, 1.0f);
return AABBTreeOverlap<true, CapsuleAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mBVH->getData().mBounds, BVHTree(mBVH->getData()), test, ra);
}
case PxGeometryType::eSPHERE:
{
const DefaultSphereAABBTest test(queryVolume);
return AABBTreeOverlap<true, SphereAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mBVH->getData().mBounds, BVHTree(mBVH->getData()), test, ra);
}
case PxGeometryType::eCONVEXMESH:
{
const DefaultOBBAABBTest test(queryVolume);
return AABBTreeOverlap<true, OBBAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mBVH->getData().mBounds, BVHTree(mBVH->getData()), test, ra);
}
default:
PX_ALWAYS_ASSERT_MESSAGE("unsupported overlap query volume geometry type");
}
}
return true;
}
bool CompanionPrunerAABBTree::sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& prunerCallback) const
{
PX_UNUSED(queryVolume);
PX_UNUSED(unitDir);
PX_UNUSED(inOutDistance);
PX_UNUSED(prunerCallback);
PX_ASSERT(!mDirtyFlags);
// if(mDirtyFlags)
// const_cast<CompanionPrunerAABBTree*>(this)->build();
#ifdef USE_MAVERICK_NODE
{
MaverickRaycastAdapter ra(mMaverick, prunerCallback);
const PxBounds3& aabb = queryVolume.getPrunerInflatedWorldAABB();
Gu::RayAABBTest test(aabb.getCenter()*2.0f, unitDir*2.0f, inOutDistance, aabb.getExtents());
if(!doLeafTest<true, true, MaverickNode, MaverickRaycastAdapter>(&mMaverick, test, mMaverick.mFreeBounds, NULL, inOutDistance, ra))
return false;
}
#endif
if(mBVH)
{
RaycastAdapter ra(*this, prunerCallback, mLastValidTimestamp);
const PxBounds3& aabb = queryVolume.getPrunerInflatedWorldAABB();
return AABBTreeRaycast<true, true, BVHTree, BVHNode, RaycastAdapter>()(mBVH->getData().mBounds, BVHTree(mBVH->getData()), aabb.getCenter(), unitDir, inOutDistance, aabb.getExtents(), ra);
}
return true;
}
class PxBounds3Padded : public PxBounds3
{
public:
PX_FORCE_INLINE PxBounds3Padded() {}
PX_FORCE_INLINE ~PxBounds3Padded() {}
PxU32 padding;
};
void CompanionPrunerAABBTree::getGlobalBounds(PxBounds3& bounds) const
{
PxBounds3Padded tmp;
if(mBVH)
{
tmp.minimum = mBVH->getNodes()->mBV.minimum;
tmp.maximum = mBVH->getNodes()->mBV.maximum;
}
else
tmp.setEmpty();
Vec4V minV = V4LoadU(&tmp.minimum.x);
Vec4V maxV = V4LoadU(&tmp.maximum.x);
#ifdef USE_MAVERICK_NODE
{
PxU32 nbFree = mMaverick.mNbFree;
if(nbFree)
{
const PxBounds3* freeBounds = mMaverick.mFreeBounds;
while(nbFree--)
{
minV = V4Min(minV, V4LoadU(&freeBounds->minimum.x));
maxV = V4Max(maxV, V4LoadU(&freeBounds->maximum.x));
freeBounds++;
}
}
}
#endif
StoreBounds(bounds, minV, maxV);
}
CompanionPruner* physx::Gu::createCompanionPruner(PxU64 contextID, CompanionPrunerType type, const PruningPool* pool)
{
if(0)
// return NULL;
return PX_NEW(CompanionPrunerAABBTree)(contextID, pool);
//return PX_NEW(CompanionPrunerBucket);
// return PX_NEW(CompanionPrunerIncremental)(pool);
PX_UNUSED(contextID);
switch(type)
{
case COMPANION_PRUNER_NONE: return NULL;
case COMPANION_PRUNER_BUCKET: return PX_NEW(CompanionPrunerBucket);
case COMPANION_PRUNER_INCREMENTAL: return PX_NEW(CompanionPrunerIncremental)(pool);
case COMPANION_PRUNER_AABB_TREE: return PX_NEW(CompanionPrunerAABBTree)(contextID, pool);
}
return NULL;
}
| 35,587 | C++ | 32.197761 | 197 | 0.723242 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuSweepMTD.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_SWEEP_MTD_H
#define GU_SWEEP_MTD_H
namespace physx
{
class PxConvexMeshGeometry;
class PxTriangleMeshGeometry;
class PxGeometry;
class PxHeightFieldGeometry;
namespace Gu
{
class Sphere;
class Capsule;
bool computeCapsule_TriangleMeshMTD(const PxTriangleMeshGeometry& triMeshGeom, const PxTransform& pose, Gu::CapsuleV& capsuleV, PxReal inflatedRadius, bool isDoubleSided, PxGeomSweepHit& hit);
bool computeCapsule_HeightFieldMTD(const PxHeightFieldGeometry& heightFieldGeom, const PxTransform& pose, Gu::CapsuleV& capsuleV, PxReal inflatedRadius, bool isDoubleSided, PxGeomSweepHit& hit);
bool computeBox_TriangleMeshMTD(const PxTriangleMeshGeometry& triMeshGeom, const PxTransform& pose, const Gu::Box& box, const PxTransform& boxTransform, PxReal inflation,
bool isDoubleSided, PxGeomSweepHit& hit);
bool computeBox_HeightFieldMTD( const PxHeightFieldGeometry& heightFieldGeom, const PxTransform& pose, const Gu::Box& box, const PxTransform& boxTransform, PxReal inflation, bool isDoubleSided, PxGeomSweepHit& hit);
bool computeConvex_TriangleMeshMTD( const PxTriangleMeshGeometry& triMeshGeom, const PxTransform& pose, const PxConvexMeshGeometry& convexGeom, const PxTransform& convexTransform, PxReal inflation,
bool isDoubleSided, PxGeomSweepHit& hit);
bool computeConvex_HeightFieldMTD( const PxHeightFieldGeometry& heightFieldGeom, const PxTransform& pose, const PxConvexMeshGeometry& convexGeom, const PxTransform& convexTransform, PxReal inflation, bool isDoubleSided, PxGeomSweepHit& hit);
bool computeSphere_SphereMTD(const Sphere& sphere0, const Sphere& sphere1, PxGeomSweepHit& hit);
bool computeSphere_CapsuleMTD(const Sphere& sphere, const Capsule& capsule, PxGeomSweepHit& hit);
bool computeCapsule_CapsuleMTD(const Capsule& capsule0, const Capsule& capsule1, PxGeomSweepHit& hit);
bool computePlane_CapsuleMTD(const PxPlane& plane, const Capsule& capsule, PxGeomSweepHit& hit);
bool computePlane_BoxMTD(const PxPlane& plane, const Box& box, PxGeomSweepHit& hit);
bool computePlane_ConvexMTD(const PxPlane& plane, const PxConvexMeshGeometry& convexGeom, const PxTransform& convexPose, PxGeomSweepHit& hit);
// PT: wrapper just to avoid duplicating these lines.
PX_FORCE_INLINE void setupSweepHitForMTD(PxGeomSweepHit& sweepHit, bool hasContacts, const PxVec3& unitDir)
{
sweepHit.flags = PxHitFlag::eNORMAL | PxHitFlag::eFACE_INDEX;
if(!hasContacts)
{
sweepHit.distance = 0.0f;
sweepHit.normal = -unitDir;
}
else
{
//ML: touching contact. We need to overwrite the normal to the negative of sweep direction
if(sweepHit.distance == 0.0f && sweepHit.normal.isZero())
sweepHit.normal = -unitDir;
sweepHit.flags |= PxHitFlag::ePOSITION;
}
}
}
}
#endif
| 4,469 | C | 48.120879 | 242 | 0.780712 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuAABBPruner.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_AABB_PRUNER_H
#define GU_AABB_PRUNER_H
#include "common/PxPhysXCommonConfig.h"
#include "GuExtendedBucketPruner.h"
#include "GuSqInternal.h"
#include "GuPruningPool.h"
#include "GuAABBTree.h"
#include "GuAABBTreeUpdateMap.h"
#include "GuAABBTreeBuildStats.h"
namespace physx
{
namespace Gu
{
// PT: we build the new tree over a number of frames/states, in order to limit perf spikes in 'updatePruningTrees'.
// The states are as follows:
//
// BUILD_NOT_STARTED (1 frame, AABBPruner):
//
// This is the initial state, before the new (AABBTree) build even starts. In this frame/state, we perform the AABBPruner-related
// memory allocations:
// - the new AABB tree is allocated
// - the array of cached bounding boxes is allocated and filled
//
// BUILD_INIT (1 frame, AABBTree):
//
// This is the first frame in which the new tree gets built. It deserves its own special state since various things happen in the
// first frame, that do no happen in subsequent frames. Basically most initial AABBTree-related allocations happen here (but no
// build step per se).
//
// BUILD_IN_PROGRESS (N frames, AABBTree):
//
// This is the core build function, actually building the tree. This should be mostly allocation-free, except here and there when
// building non-complete trees, and during the last call when the tree is finally built.
//
// BUILD_NEW_MAPPING (1 frame, AABBPruner):
//
// After the new AABBTree is built, we recreate an AABBTreeUpdateMap for the new tree, and use it to invalidate nodes whose objects
// have been removed during the build.
//
// We need to do that before doing a full refit in the next stage/frame. If we don't do that, the refit code will fetch a wrong box,
// that may very well belong to an entirely new object.
//
// Note that this mapping/update map (mNewTreeMap) is temporary, and only needed for the next stage.
//
// BUILD_FULL_REFIT (1 frame, AABBPruner):
//
// Once the new update map is available, we fully refit the new tree. AABBs of moved objects get updated. AABBs of removed objects
// become empty.
//
// BUILD_LAST_FRAME (1 frame, AABBPruner):
//
// This is an artificial frame used to delay the tree switching code. The switch happens as soon as we reach the BUILD_FINISHED
// state, but we don't want to execute BUILD_FULL_REFIT and the switch in the same frame. This extra BUILD_LAST_FRAME stage buys
// us one frame, i.e. we have one frame in which we do BUILD_FULL_REFIT, and in the next frame we'll do both BUILD_LAST_FRAME /
// BUILD_FINISHED / the switch.
//
// BUILD_FINISHED (1 frame, AABBPruner):
//
// Several things happen in this 'finalization' frame/stage:
// - We switch the trees (old one is deleted, cached boxes are deleted, new tree pointer is setup)
// - A new (final) update map is created (mTreeMap). The map is used to invalidate objects that may have been removed during
// the BUILD_NEW_MAPPING and BUILD_FULL_REFIT frames. The nodes containing these removed objects are marked for refit.
// - Nodes containing objects that have moved during the BUILD_NEW_MAPPING and BUILD_FULL_REFIT frames are marked for refit.
// - We do a partial refit on the new tree, to take these final changes into account. This small partial refit is usually much
// cheaper than the full refit we previously performed here.
// - We remove old objects from the bucket pruner
//
enum BuildStatus
{
BUILD_NOT_STARTED,
BUILD_INIT,
BUILD_IN_PROGRESS,
BUILD_NEW_MAPPING,
BUILD_FULL_REFIT,
BUILD_LAST_FRAME,
BUILD_FINISHED,
BUILD_FORCE_DWORD = 0xffffffff
};
// This class implements the Pruner interface for internal SQ use with some additional specialized functions
// The underlying data structure is a binary AABB tree
// AABBPruner supports insertions, removals and updates for dynamic objects
// The tree is either entirely rebuilt in a single frame (static pruner) or progressively rebuilt over multiple frames (dynamic pruner)
// The rebuild happens on a copy of the tree
// the copy is then swapped with current tree at the time commit() is called (only if mBuildState is BUILD_FINISHED),
// otherwise commit() will perform a refit operation applying any pending changes to the current tree
// While the tree is being rebuilt a temporary data structure (BucketPruner) is also kept in sync and used to speed up
// queries on updated objects that are not yet in either old or new tree.
// The requirements on the order of calls:
// commit() is required to be called before any queries to apply modifications
// queries can be issued on multiple threads after commit is called
// commit, buildStep, add/remove/update have to be called from the same thread or otherwise strictly serialized by external code
// and cannot be issued while a query is running
class AABBPruner : public DynamicPruner
{
PX_NOCOPY(AABBPruner)
public:
PX_PHYSX_COMMON_API AABBPruner(bool incrementalRebuild, PxU64 contextID, CompanionPrunerType cpType, BVHBuildStrategy buildStrategy=BVH_SPLATTER_POINTS, PxU32 nbObjectsPerNode=4); // true is equivalent to former dynamic pruner
virtual ~AABBPruner();
// BasePruner
DECLARE_BASE_PRUNER_API
//~BasePruner
// Pruner
DECLARE_PRUNER_API_COMMON
virtual bool isDynamic() const { return mIncrementalRebuild; }
//~Pruner
// DynamicPruner
virtual void setRebuildRateHint(PxU32 nbStepsForRebuild); // Besides the actual rebuild steps, 3 additional steps are needed.
virtual bool buildStep(bool synchronousCall = true); // returns true if finished
virtual bool prepareBuild(); // returns true if new tree is needed
//~DynamicPruner
// direct access for test code
PX_FORCE_INLINE PxU32 getNbAddedObjects() const { return mBucketPruner.getNbObjects(); }
PX_FORCE_INLINE const AABBTree* getAABBTree() const { PX_ASSERT(!mUncommittedChanges); return mAABBTree; }
PX_FORCE_INLINE AABBTree* getAABBTree() { PX_ASSERT(!mUncommittedChanges); return mAABBTree; }
PX_FORCE_INLINE void setAABBTree(AABBTree* tree) { mAABBTree = tree; }
PX_FORCE_INLINE const AABBTree* hasAABBTree() const { return mAABBTree; }
PX_FORCE_INLINE BuildStatus getBuildStatus() const { return mProgress; }
// local functions
// private:
NodeAllocator mNodeAllocator;
AABBTree* mAABBTree; // current active tree
AABBTreeBuildParams mBuilder; // this class deals with the details of the actual tree building
BuildStats mBuildStats;
// tree with build in progress, assigned to mAABBTree in commit, when mProgress is BUILD_FINISHED
// created in buildStep(), BUILD_NOT_STARTED
// This is non-null when there is a tree rebuild going on in progress
// and thus also indicates that we have to start saving the fixups
AABBTree* mNewTree;
// during rebuild the pool might change so we need a copy of boxes for the tree build
AABBTreeBounds mCachedBoxes;
PxU32 mNbCachedBoxes;
// incremented in commit(), serves as a progress counter for rebuild
PxU32 mNbCalls;
// PT: incremented each time we start building a new tree (i.e. effectively identifies a given tree)
// Timestamp is passed to bucket pruner to mark objects added there, linking them to a specific tree.
// When switching to the new tree, timestamp is used to remove old objects (now in the new tree) from
// the bucket pruner.
PxU32 mTimeStamp;
// this pruner is used for queries on objects that are not in the current tree yet
// includes both the objects in the tree being rebuilt and all the objects added later
ExtendedBucketPruner mBucketPruner;
BuildStatus mProgress; // current state of second tree build progress
// Fraction (as in 1/Nth) of the total number of primitives
// that should be processed per step by the AABB builder
// so if this value is 1, all primitives will be rebuilt, 2 => 1/2 of primitives per step etc.
// see also mNbCalls, mNbCalls varies from 0 to mRebuildRateHint-1
PxU32 mRebuildRateHint;
// Estimate for how much work has to be done to rebuild the tree.
PxU32 mTotalWorkUnits;
// Term to correct the work unit estimate if the rebuild rate is not matched
PxI32 mAdaptiveRebuildTerm;
const PxU32 mNbObjectsPerNode;
const BVHBuildStrategy mBuildStrategy;
PruningPool mPool; // Pool of AABBs
// maps pruning pool indices to aabb tree indices
// maps to INVALID_NODE_ID if the pool entry was removed or "pool index is outside input domain"
// The map is the inverse of the tree mapping: (node[map[poolID]].primitive == poolID)
// So:
// treeNodeIndex = mTreeMap.operator[](poolIndex)
// aabbTree->treeNodes[treeNodeIndex].primitives[0] == poolIndex
AABBTreeUpdateMap mTreeMap;
// Temporary update map, see BuildStatus notes above for details
AABBTreeUpdateMap mNewTreeMap;
// This is only set once in the constructor and is equivalent to isDynamicTree
// if it set to false then a 1-shot rebuild is performed in commit()
// bucket pruner is only used with incremental rebuild
const bool mIncrementalRebuild;
// A rebuild can be triggered even when the Pruner is not dirty
// mUncommittedChanges is set to true in add, remove, update and buildStep
// mUncommittedChanges is set to false in commit
// mUncommittedChanges has to be false (commit() has to be called) in order to run a query as defined by the
// mUncommittedChanges is not set to true in add, when pruning structure is provided. Scene query shapes
// are merged to current AABB tree directly
// Pruner higher level API
bool mUncommittedChanges;
// A new AABB tree is built if an object was added, removed or updated
// Changing objects during a build will trigger another rebuild right afterwards
// this is set to true if a new tree has to be created again after the current rebuild is done
bool mNeedsNewTree;
// This struct is used to record modifications made to the pruner state
// while a tree is building in the background
// this is so we can apply the modifications to the tree at the time of completion
// the recorded fixup information is: removedIndex (in ::remove()) and
// lastIndexMoved which is the last index in the pruner array
// (since the way we remove from PruningPool is by swapping last into removed slot,
// we need to apply a fixup so that it syncs up that operation in the new tree)
struct NewTreeFixup
{
PX_FORCE_INLINE NewTreeFixup(PxU32 removedIndex_, PxU32 relocatedLastIndex_)
: removedIndex(removedIndex_), relocatedLastIndex(relocatedLastIndex_) {}
PxU32 removedIndex;
PxU32 relocatedLastIndex;
};
PxArray<NewTreeFixup> mNewTreeFixups;
PxArray<PoolIndex> mToRefit;
// Internal methods
bool fullRebuildAABBTree(); // full rebuild function, used with static pruner mode
void release();
void refitUpdatedAndRemoved();
void updateBucketPruner();
};
}
}
#endif
| 12,838 | C | 47.449056 | 233 | 0.73337 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.