file_path
stringlengths
21
224
content
stringlengths
0
80.8M
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/tet/ExtRemesher.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #include "ExtRemesher.h" #include "ExtBVH.h" #include "ExtMarchingCubesTable.h" #include "foundation/PxSort.h" #include "GuIntersectionTriangleBox.h" #include "GuBox.h" namespace physx { namespace Ext { // ------------------------------------------------------------------------------------- void Remesher::clear() { cells.clear(); vertices.clear(); normals.clear(); triIds.clear(); triNeighbors.clear(); } #define HASH_SIZE 170111 // ------------------------------------------------------------------------------------- PX_FORCE_INLINE static PxU32 hash(PxI32 xi, PxI32 yi, PxI32 zi) { PxU32 h = (xi * 92837111) ^ (yi * 689287499) ^ (zi * 283923481); return h % HASH_SIZE; } // ------------------------------------------------------------------------------------- void Remesher::addCell(PxI32 xi, PxI32 yi, PxI32 zi) { Cell c; c.init(xi, yi, zi); PxU32 h = hash(xi, yi, zi); c.next = firstCell[h]; firstCell[h] = PxI32(cells.size()); cells.pushBack(c); } // ------------------------------------------------------------------------------------- PxI32 Remesher::getCellNr(PxI32 xi, PxI32 yi, PxI32 zi) const { PxU32 h = hash(xi, yi, zi); PxI32 nr = firstCell[h]; while (nr >= 0) { const Cell& c = cells[nr]; if (c.xi == xi && c.yi == yi && c.zi == zi) return nr; nr = c.next; } return -1; } // ------------------------------------------------------------------------------------- PX_FORCE_INLINE bool Remesher::cellExists(PxI32 xi, PxI32 yi, PxI32 zi) const { return getCellNr(xi, yi, zi) >= 0; } // ------------------------------------------------------------------------------------- void Remesher::remesh(const PxArray<PxVec3>& inputVerts, const PxArray<PxU32>& inputTriIds, PxU32 resolution, PxArray<PxU32> *vertexMap) { remesh(inputVerts.begin(), inputVerts.size(), inputTriIds.begin(), inputTriIds.size(), resolution, vertexMap); } void Remesher::remesh(const PxVec3* inputVerts, PxU32 nbVertices, const PxU32* inputTriIds, PxU32 nbTriangleIndices, PxU32 resolution, PxArray<PxU32> *vertexMap) { clear(); PxBounds3 meshBounds; meshBounds.setEmpty(); for (PxU32 i = 0; i < nbVertices; i++) meshBounds.include(inputVerts[i]); PxVec3 dims = meshBounds.getDimensions(); float spacing = PxMax(dims.x, PxMax(dims.y, dims.z)) / resolution; meshBounds.fattenFast(3.0f * spacing); PxU32 numTris = nbTriangleIndices / 3; PxBounds3 triBounds, cellBounds; Gu::BoxPadded box; box.rot = PxMat33(PxIdentity); firstCell.clear(); firstCell.resize(HASH_SIZE, -1); // create sparse overlapping cells for (PxU32 i = 0; i < numTris; i++) { const PxVec3& p0 = inputVerts[inputTriIds[3 * i]]; const PxVec3& p1 = inputVerts[inputTriIds[3 * i + 1]]; const PxVec3& p2 = inputVerts[inputTriIds[3 * i + 2]]; triBounds.setEmpty(); triBounds.include(p0); triBounds.include(p1); triBounds.include(p2); PxI32 x0 = PxI32(PxFloor((triBounds.minimum.x - meshBounds.minimum.x) / spacing)); PxI32 y0 = PxI32(PxFloor((triBounds.minimum.y - meshBounds.minimum.y) / spacing)); PxI32 z0 = PxI32(PxFloor((triBounds.minimum.z - meshBounds.minimum.z) / spacing)); PxI32 x1 = PxI32(PxFloor((triBounds.maximum.x - meshBounds.minimum.x) / spacing)) + 1; PxI32 y1 = PxI32(PxFloor((triBounds.maximum.y - meshBounds.minimum.y) / spacing)) + 1; PxI32 z1 = PxI32(PxFloor((triBounds.maximum.z - meshBounds.minimum.z) / spacing)) + 1; for (PxI32 xi = x0; xi <= x1; xi++) { for (PxI32 yi = y0; yi <= y1; yi++) { for (PxI32 zi = z0; zi <= z1; zi++) { cellBounds.minimum.x = meshBounds.minimum.x + xi * spacing; cellBounds.minimum.y = meshBounds.minimum.y + yi * spacing; cellBounds.minimum.z = meshBounds.minimum.z + zi * spacing; cellBounds.maximum = cellBounds.minimum + PxVec3(spacing, spacing, spacing); cellBounds.fattenFast(1e-5f); box.center = cellBounds.getCenter(); box.extents = cellBounds.getExtents(); if (!Gu::intersectTriangleBox(box, p0, p1, p2)) continue; if (!cellExists(xi, yi, zi)) addCell(xi, yi, zi); } } } } // using marching cubes to create boundaries vertices.clear(); cellOfVertex.clear(); triIds.clear(); PxI32 edgeVertId[12]; PxVec3 cornerPos[8]; int cornerVoxelNr[8]; for (PxI32 i = 0; i < PxI32(cells.size()); i++) { Cell& c = cells[i]; // we need to handle a 2 x 2 x 2 block of cells to cover the boundary for (PxI32 dx = 0; dx < 2; dx++) { for (PxI32 dy = 0; dy < 2; dy++) { for (PxI32 dz = 0; dz < 2; dz++) { PxI32 xi = c.xi + dx; PxI32 yi = c.yi + dy; PxI32 zi = c.zi + dz; // are we responsible for this cell? PxI32 maxCellNr = i; for (PxI32 rx = xi - 1; rx <= xi; rx++) for (PxI32 ry = yi - 1; ry <= yi; ry++) for (PxI32 rz = zi - 1; rz <= zi; rz++) maxCellNr = PxMax(maxCellNr, getCellNr(rx, ry, rz)); if (maxCellNr != i) continue; PxI32 code = 0; for (PxI32 j = 0; j < 8; j++) { PxI32 mx = xi - 1 + marchingCubeCorners[j][0]; PxI32 my = yi - 1 + marchingCubeCorners[j][1]; PxI32 mz = zi - 1 + marchingCubeCorners[j][2]; cornerVoxelNr[j] = getCellNr(mx, my, mz); if (cornerVoxelNr[j] >= 0) code |= (1 << j); cornerPos[j].x = meshBounds.minimum.x + (mx + 0.5f) * spacing; cornerPos[j].y = meshBounds.minimum.y + (my + 0.5f) * spacing; cornerPos[j].z = meshBounds.minimum.z + (mz + 0.5f) * spacing; } PxI32 first = firstMarchingCubesId[code]; PxI32 num = (firstMarchingCubesId[code + 1] - first); // create vertices and tris for (PxI32 j = 0; j < 12; j++) edgeVertId[j] = -1; for (PxI32 j = num - 1; j >= 0; j--) { PxI32 edgeId = marchingCubesIds[first + j]; if (edgeVertId[edgeId] < 0) { PxI32 id0 = marchingCubeEdges[edgeId][0]; PxI32 id1 = marchingCubeEdges[edgeId][1]; PxVec3& p0 = cornerPos[id0]; PxVec3& p1 = cornerPos[id1]; edgeVertId[edgeId] = vertices.size(); vertices.pushBack((p0 + p1) * 0.5f); cellOfVertex.pushBack(PxMax(cornerVoxelNr[id0], cornerVoxelNr[id1])); } triIds.pushBack(edgeVertId[edgeId]); } } } } } removeDuplicateVertices(); pruneInternalSurfaces(); project(inputVerts, inputTriIds, nbTriangleIndices, 2.0f * spacing, 0.1f * spacing); if (vertexMap) createVertexMap(inputVerts, nbVertices, meshBounds.minimum, spacing, *vertexMap); computeNormals(); } // ------------------------------------------------------------------------------------- void Remesher::removeDuplicateVertices() { PxF32 eps = 1e-5f; struct Ref { PxF32 d; PxI32 id; bool operator < (const Ref& r) const { return d < r.d; } }; PxI32 numVerts = PxI32(vertices.size()); PxArray<Ref> refs(numVerts); for (PxI32 i = 0; i < numVerts; i++) { PxVec3& p = vertices[i]; refs[i].d = p.x + 0.3f * p.y + 0.1f * p.z; refs[i].id = i; } PxSort(refs.begin(), refs.size()); PxArray<PxI32> idMap(vertices.size(), -1); PxArray<PxVec3> oldVerts = vertices; PxArray<PxI32> oldCellOfVertex = cellOfVertex; vertices.clear(); cellOfVertex.clear(); PxI32 nr = 0; while (nr < numVerts) { Ref& r = refs[nr]; nr++; if (idMap[r.id] >= 0) continue; idMap[r.id] = vertices.size(); vertices.pushBack(oldVerts[r.id]); cellOfVertex.pushBack(oldCellOfVertex[r.id]); PxI32 i = nr; while (i < numVerts && fabsf(refs[i].d - r.d) < eps) { PxI32 id = refs[i].id; if ((oldVerts[r.id] - oldVerts[id]).magnitudeSquared() < eps * eps) idMap[id] = idMap[r.id]; i++; } } for (PxI32 i = 0; i < PxI32(triIds.size()); i++) triIds[i] = idMap[triIds[i]]; } // ------------------------------------------------------------------------------------- void Remesher::findTriNeighbors() { PxI32 numTris = PxI32(triIds.size()) / 3; triNeighbors.clear(); triNeighbors.resize(3 * numTris, -1); struct Edge { void init(PxI32 _id0, PxI32 _id1, PxI32 _triNr, PxI32 _edgeNr) { this->id0 = PxMin(_id0, _id1); this->id1 = PxMax(_id0, _id1); this->triNr = _triNr; this->edgeNr = _edgeNr; } bool operator < (const Edge& e) const { if (id0 < e.id0) return true; if (id0 > e.id0) return false; return id1 < e.id1; } bool operator == (const Edge& e) const { return id0 == e.id0 && id1 == e.id1; } PxI32 id0, id1, triNr, edgeNr; }; PxArray<Edge> edges(triIds.size()); for (PxI32 i = 0; i < numTris; i++) { for (PxI32 j = 0; j < 3; j++) { PxI32 id0 = triIds[3 * i + j]; PxI32 id1 = triIds[3 * i + (j + 1) % 3]; edges[3 * i + j].init(id0, id1, i, j); } } PxSort(edges.begin(), edges.size()); PxI32 nr = 0; while (nr < PxI32(edges.size())) { Edge& e0 = edges[nr]; nr++; while (nr < PxI32(edges.size()) && edges[nr] == e0) { Edge& e1 = edges[nr]; triNeighbors[3 * e0.triNr + e0.edgeNr] = e1.triNr; triNeighbors[3 * e1.triNr + e1.edgeNr] = e0.triNr; nr++; } } } // ------------------------------------------------------------------------------------- void Remesher::pruneInternalSurfaces() { // flood islands, if the enclosed volume is negative remove it findTriNeighbors(); PxI32 numTris = PxI32(triIds.size()) / 3; PxArray<PxI32> oldTriIds = triIds; triIds.clear(); PxArray<bool> visited(numTris, false); PxArray<PxI32> stack; for (PxI32 i = 0; i < numTris; i++) { if (visited[i]) continue; stack.clear(); stack.pushBack(i); PxI32 islandStart = PxI32(triIds.size()); float vol = 0.0f; while (!stack.empty()) { PxI32 triNr = stack.back(); stack.popBack(); if (visited[triNr]) continue; visited[triNr] = true; for (PxI32 j = 0; j < 3; j++) triIds.pushBack(oldTriIds[3 * triNr + j]); const PxVec3& p0 = vertices[oldTriIds[3 * triNr]]; const PxVec3& p1 = vertices[oldTriIds[3 * triNr + 1]]; const PxVec3& p2 = vertices[oldTriIds[3 * triNr + 2]]; vol += p0.cross(p1).dot(p2); for (PxI32 j = 0; j < 3; j++) { PxI32 n = triNeighbors[3 * triNr + j]; if (n >= 0 && !visited[n]) stack.pushBack(n); } } if (vol <= 0.0f) triIds.resize(islandStart); } // remove unreferenced vertices PxArray<PxI32> idMap(vertices.size(), -1); PxArray<PxVec3> oldVerts = vertices; PxArray<PxI32> oldCellOfVertex = cellOfVertex; vertices.clear(); cellOfVertex.clear(); for (int i = 0; i < PxI32(triIds.size()); i++) { PxI32 id = triIds[i]; if (idMap[id] < 0) { idMap[id] = vertices.size(); vertices.pushBack(oldVerts[id]); cellOfVertex.pushBack(oldCellOfVertex[id]); } triIds[i] = idMap[id]; } } // ----------------------------------------------------------------------------------- static void getClosestPointOnTriangle(const PxVec3& pos, const PxVec3& p0, const PxVec3& p1, const PxVec3& p2, PxVec3& closest, PxVec3& bary) { PxVec3 e0 = p1 - p0; PxVec3 e1 = p2 - p0; PxVec3 tmp = p0 - pos; float a = e0.dot(e0); float b = e0.dot(e1); float c = e1.dot(e1); float d = e0.dot(tmp); float e = e1.dot(tmp); PxVec3 coords, clampedCoords; coords.x = b * e - c * d; // s * det coords.y = b * d - a * e; // t * det coords.z = a * c - b * b; // det clampedCoords = PxVec3(PxZero); if (coords.x <= 0.0f) { if (c != 0.0f) clampedCoords.y = -e / c; } else if (coords.y <= 0.0f) { if (a != 0.0f) clampedCoords.x = -d / a; } else if (coords.x + coords.y > coords.z) { float denominator = a + c - b - b; float numerator = c + e - b - d; if (denominator != 0.0f) { clampedCoords.x = numerator / denominator; clampedCoords.y = 1.0f - clampedCoords.x; } } else { // all inside if (coords.z != 0.0f) { clampedCoords.x = coords.x / coords.z; clampedCoords.y = coords.y / coords.z; } } bary.y = PxMin(PxMax(clampedCoords.x, 0.0f), 1.0f); bary.z = PxMin(PxMax(clampedCoords.y, 0.0f), 1.0f); bary.x = 1.0f - bary.y - bary.z; closest = p0 * bary.x + p1 * bary.y + p2 * bary.z; } // ------------------------------------------------------------------------------------- void Remesher::project(const PxVec3* inputVerts, const PxU32* inputTriIds, PxU32 nbTriangleIndices, float searchDist, float surfaceDist) { // build a bvh for the input mesh PxI32 numInputTris = PxI32(nbTriangleIndices) / 3; if (numInputTris == 0) return; bvhBounds.resize(numInputTris); bvhTris.clear(); for (PxI32 i = 0; i < numInputTris; i++) { PxBounds3& b = bvhBounds[i]; b.setEmpty(); b.include(inputVerts[inputTriIds[3 * i]]); b.include(inputVerts[inputTriIds[3 * i + 1]]); b.include(inputVerts[inputTriIds[3 * i + 2]]); } BVHDesc bvh; BVHBuilder::build(bvh, &bvhBounds[0], bvhBounds.size()); // project vertices to closest point on surface PxBounds3 pb; for (PxU32 i = 0; i < vertices.size(); i++) { PxVec3& p = vertices[i]; pb.setEmpty(); pb.include(p); pb.fattenFast(searchDist); bvh.query(pb, bvhTris); float minDist2 = PX_MAX_F32; PxVec3 closest(PxZero); for (PxU32 j = 0; j < bvhTris.size(); j++) { PxI32 triNr = bvhTris[j]; const PxVec3& p0 = inputVerts[inputTriIds[3 * triNr]]; const PxVec3& p1 = inputVerts[inputTriIds[3 * triNr + 1]]; const PxVec3& p2 = inputVerts[inputTriIds[3 * triNr + 2]]; PxVec3 c, bary; getClosestPointOnTriangle(p, p0, p1, p2, c, bary); float dist2 = (c - p).magnitudeSquared(); if (dist2 < minDist2) { minDist2 = dist2; closest = c; } } if (minDist2 < PX_MAX_F32) { PxVec3 n = p - closest; n.normalize(); p = closest + n * surfaceDist; } } } static const int cellNeighbors[6][3] = { { -1,0,0 }, {1,0,0},{0,-1,0},{0,1,0},{0,0,-1},{0,0,1} }; // ------------------------------------------------------------------------------------- void Remesher::createVertexMap(const PxVec3* inputVerts, PxU32 nbVertices, const PxVec3 &gridOrigin, PxF32 &gridSpacing, PxArray<PxU32> &vertexMap) { PxArray<PxI32> vertexOfCell(cells.size(), -1); PxArray<PxI32 > front[2]; PxI32 frontNr = 0; // compute inverse links for (PxI32 i = 0; i < PxI32(vertices.size()); i++) { PxI32 cellNr = cellOfVertex[i]; if (cellNr >= 0) { if (vertexOfCell[cellNr] < 0) { vertexOfCell[cellNr] = i; front[frontNr].pushBack(cellNr); } } } // propagate cell->vertex links through the voxel mesh while (!front[frontNr].empty()) { front[1 - frontNr].clear(); for (PxI32 i = 0; i < PxI32(front[frontNr].size()); i++) { int cellNr = front[frontNr][i]; Cell& c = cells[cellNr]; for (PxI32 j = 0; j < 6; j++) { PxI32 n = getCellNr(c.xi + cellNeighbors[j][0], c.yi + cellNeighbors[j][1], c.zi + cellNeighbors[j][2]); if (n >= 0 && vertexOfCell[n] < 0) { vertexOfCell[n] = vertexOfCell[cellNr]; front[1 - frontNr].pushBack(n); } } } frontNr = 1 - frontNr; } // create the map vertexMap.clear(); vertexMap.resize(nbVertices, 0); for (PxU32 i = 0; i < nbVertices; i++) { const PxVec3& p = inputVerts[i]; PxI32 xi = PxI32(PxFloor((p.x - gridOrigin.x) / gridSpacing)); PxI32 yi = PxI32(PxFloor((p.y - gridOrigin.y) / gridSpacing)); PxI32 zi = PxI32(PxFloor((p.z - gridOrigin.z) / gridSpacing)); PxI32 cellNr = getCellNr(xi, yi, zi); vertexMap[i] = cellNr >= 0 ? vertexOfCell[cellNr] : 0; } } // ------------------------------------------------------------------------------------- void Remesher::computeNormals() { normals.clear(); normals.resize(vertices.size(), PxVec3(PxZero)); for (PxI32 i = 0; i < PxI32(triIds.size()); i += 3) { PxI32* ids = &triIds[i]; PxVec3& p0 = vertices[ids[0]]; PxVec3& p1 = vertices[ids[1]]; PxVec3& p2 = vertices[ids[2]]; PxVec3 n = (p1 - p0).cross(p2 - p0); normals[ids[0]] += n; normals[ids[1]] += n; normals[ids[2]] += n; } for (PxI32 i = 0; i < PxI32(normals.size()); i++) normals[i].normalize(); } // ------------------------------------------------------------------------------------- void Remesher::readBack(PxArray<PxVec3>& outputVertices, PxArray<PxU32>& outputTriIds) { outputVertices = vertices; outputTriIds.resize(triIds.size()); for (PxU32 i = 0; i < triIds.size(); i++) outputTriIds[i] = PxU32(triIds[i]); } } }
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/tet/ExtDelaunayBoundaryInserter.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #ifndef EXT_DELAUNAY_BOUNDARY_INSERTER_H #define EXT_DELAUNAY_BOUNDARY_INSERTER_H #include "foundation/PxHashSet.h" #include "foundation/PxVec3.h" #include "foundation/PxMat33.h" #include "foundation/PxHashMap.h" #include "foundation/PxArray.h" #include "common/PxCoreUtilityTypes.h" #include "extensions/PxTriangleMeshAnalysisResult.h" #include "extensions/PxTetrahedronMeshAnalysisResult.h" #include "ExtDelaunayTetrahedralizer.h" namespace physx { namespace Ext { //Generates a tetmesh that matches the specified triangle mesh. All triangle mesh points are present in the tetmesh, additional points on edges and faces might be present. void generateTetmesh(const PxBoundedData& inputPoints, const PxBoundedData& inputTriangles, const bool has16bitIndices, PxArray<PxVec3>& tetPoints, PxArray<PxU32>& finalTets); //Generates a tetmesh that that matches the surface of the input tetmesh approximately but creats very regular shaped tetrahedra. void generateVoxelTetmesh(const PxBoundedData& inputPoints, const PxBoundedData& inputTets, PxU32 numVoxelsX, PxU32 numVoxelsY, PxU32 numVoxelsZ, PxArray<PxVec3>& voxelPoints, PxArray<PxU32>& voxelTets, PxI32* intputPointToOutputTetIndex, const PxU32* anchorNodeIndices = NULL, PxU32 numTetsPerVoxel = 5); //Generates a tetmesh that that matches the surface of the input tetmesh approximately but creats very regular shaped tetrahedra. void generateVoxelTetmesh(const PxBoundedData& inputPoints, const PxBoundedData& inputTets, PxReal voxelEdgeLength, PxArray<PxVec3>& voxelPoints, PxArray<PxU32>& voxelTets, PxI32* intputPointToOutputTetIndex, const PxU32* anchorNodeIndices = NULL, PxU32 numTetsPerVoxel = 5); //Generates a tetmesh that that matches the surface of the input tetmesh approximately but creats very regular shaped tetrahedra. void generateVoxelTetmesh(const PxBoundedData& inputPoints, const PxBoundedData& inputTets, PxU32 numVoxelsAlongLongestBoundingBoxAxis, PxArray<PxVec3>& voxelPoints, PxArray<PxU32>& voxelTets, PxI32* intputPointToOutputTetIndex, const PxU32* anchorNodeIndices = NULL, PxU32 numTetsPerVoxel = 5); //Extracts the surface triangles from the specified tetrahedra void extractTetmeshSurface(const PxArray<PxI32>& tets, PxArray<PxI32>& triangles); //Computes the lumped mass per vertex for the specified tetmesh void pointMasses(const PxArray<PxVec3>& tetVerts, const PxArray<PxU32>& tets, PxF32 density, PxArray<PxF32>& mass); //Computes a rest pose matrix for every tetrahedron in the specified tetmesh void restPoses(const PxArray<PxVec3>& tetVerts, const PxArray<PxU32>& tets, PxArray<PxMat33>& restPoses); //Computes a fiber direction for every tetrahedron in the specified tetmesh. Currently just returns dummy values. void tetFibers(const PxArray<PxVec3>& tetVerts, const PxArray<PxU32>& tets, PxArray<PxVec3>& tetFibers); //Analyzes the triangle mesh to get a report about deficiencies. Some deficiencies can be handled by the tetmesher, others cannot. PxTriangleMeshAnalysisResults validateTriangleMesh(const PxBoundedData& points, const PxBoundedData& triangles, const bool has16BitIndices, const PxReal minVolumeThreshold = 1e-6f, const PxReal minTriangleAngleRadians = 10.0f*3.1415926535898f / 180.0f); //Analyzes the tetrahedron mesh to get a report about deficiencies. Some deficiencies can be handled by the softbody cooker, others cannot. PxTetrahedronMeshAnalysisResults validateTetrahedronMesh(const PxBoundedData& points, const PxBoundedData& tetrahedra, const bool has16BitIndices, const PxReal minTetVolumeThreshold = 1e-8f); PxU32 removeDisconnectedIslands(PxI32* finalTets, PxU32 numTets); } } #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/tet/ExtUtilities.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #include "foundation/PxAssert.h" #include "ExtUtilities.h" #include "GuAABBTreeBuildStats.h" #include "foundation/PxFPU.h" namespace physx { namespace Ext { using namespace Gu; static PxVec3 toFloat(const PxVec3d& p) { return PxVec3(PxReal(p.x), PxReal(p.y), PxReal(p.z)); } void buildTree(const PxU32* triangles, const PxU32 numTriangles, const PxVec3d* points, PxArray<Gu::BVHNode>& tree, PxF32 enlargement) { //Computes a bounding box for every triangle in triangles AABBTreeBounds boxes; boxes.init(numTriangles); for (PxU32 i = 0; i < numTriangles; ++i) { const PxU32* tri = &triangles[3 * i]; PxBounds3 box = PxBounds3::empty(); box.include(toFloat(points[tri[0]])); box.include(toFloat(points[tri[1]])); box.include(toFloat(points[tri[2]])); box.fattenFast(enlargement); boxes.getBounds()[i] = box; } Gu::buildAABBTree(numTriangles, boxes, tree); } } }
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/tet/ExtOctreeTetrahedralizer.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef EXT_OCTREE_TETRAHEDRALIZER_H #define EXT_OCTREE_TETRAHEDRALIZER_H #include "ExtMultiList.h" #include "ExtVec3.h" #include "foundation/PxVec3.h" #include "ExtInsideTester.h" namespace physx { namespace Ext { class InsideTester; // ------------------------------------------------------------------------------ class OctreeTetrahedralizer { public: OctreeTetrahedralizer(); void clear(); void createTetMesh(const PxArray<PxVec3> &verts, const PxArray<PxU32> &triIds, bool includeOctreeNodes = true, PxI32 maxVertsPerCell = 20, PxI32 maxTreeDepth = 5); void readBack(PxArray<PxVec3> &tetVertices, PxArray<PxU32> &tetIndices); private: // input mesh PxArray<PxVec3> surfaceVerts; PxArray<PxI32> surfaceTriIds; // octree PxI32 maxVertsPerCell; PxI32 maxTreeDepth; struct Cell { void init() { firstChild = -1; orig = PxVec3d(0.0, 0.0, 0.0); size = 0.0; numVerts = 0; closestTetNr = -1; depth = 0; } PxI32 getChildNr(const PxVec3d& p); PX_FORCE_INLINE PxI32 getChildNr(const PxVec3& p) { return getChildNr(PxVec3d(PxF64(p.x), PxF64(p.y), PxF64(p.z))); } PxI32 firstChild; PxI32 firstCellVert; PxI32 firstCellTet; PxVec3d orig; double size; PxI32 numVerts; PxI32 closestTetNr; PxI32 depth; }; PxArray<Cell> cells; MultiList<PxI32> vertsOfCell; // tet mesh PxArray<PxVec3d> tetVerts; PxArray<PxI32> tetIds; PxArray<PxI32> tetNeighbors; PxArray<PxI32> tetMarks; PxI32 currentTetMark; PxArray<PxI32> stack; PxArray<PxI32> violatingTets; PxI32 firstABBVert; struct Edge { PxI32 id0, id1; PxI32 faceNr, tetNr; void init(PxI32 _id0, PxI32 _id1, PxI32 _tetNr, PxI32 _faceNr) { this->id0 = _id0 < _id1 ? _id0 : _id1; this->id1 = _id0 > _id1 ? _id0 : _id1; this->tetNr = _tetNr; this->faceNr = _faceNr; } PX_FORCE_INLINE bool operator < (Edge e) const { if (id0 < e.id0) return true; if (id0 > e.id0) return false; return id1 < e.id1; } PX_FORCE_INLINE bool operator == (Edge e) { return id0 == e.id0 && id1 == e.id1; } }; PxArray<Edge> edges; void clearTets(); void createTree(); void treeInsertVert(PxI32 cellNr, PxI32 vertNr); void createTetVerts(bool includeOctreeNodes); bool findSurroundingTet(const PxVec3d& p, PxI32 startTetNr, PxI32& tetNr); bool findSurroundingTet(const PxVec3d& p, PxI32& tetNr); void treeInsertTet(PxI32 tetNr); void treeRemoveTet(PxI32 tetNr); PxI32 firstFreeTet; PxI32 getNewTetNr(); void removeTetNr(PxI32 tetNr); PxVec3d getTetCenter(PxI32 tetNr) const; bool meshInsertTetVert(PxI32 vertNr); InsideTester insideTester; void pruneTets(); mutable float prevClip; mutable float prevScale; mutable PxArray<PxVec3> renderVerts; mutable PxArray<PxVec3> renderNormals; mutable PxArray<PxI32> renderTriIds; }; } } #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/tet/ExtVoxelTetrahedralizer.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #include "ExtVoxelTetrahedralizer.h" #include "CmRandom.h" #include "ExtTetUnionFind.h" namespace physx { namespace Ext { // ------------------------------------------------------------------------------------- static PxI32 cubeNeighbors[6][3] = { { -1,0,0 }, {1,0,0}, {0,-1,0}, {0,1,0}, {0,0,-1}, {0,0,1} }; static const PxI32 cubeCorners[8][3] = { {0,0,0}, {1,0,0},{1,1,0},{0,1,0}, {0,0,1}, {1,0,1},{1,1,1},{0,1,1} }; static const PxI32 cubeFaces[6][4] = { {0,3,7,4},{1,2,6,5},{0,1,5,4},{3,2,6,7},{0,1,2,3},{4,5,6,7} }; static const PxI32 oppNeighbor[6] = { 1,0,3,2,5,4 }; static const PxI32 tetEdges[12][2] = { {0,1},{1,2},{2,0},{0,3},{1,3},{2,3}, {1,0},{2,1},{0,2},{3,0},{3,1},{3,2} }; static PxI32 cubeSixTets[6][4] = { { 0, 4, 5, 7 },{ 1, 5, 6, 7 },{ 1, 0, 5, 7 },{ 1, 2, 3, 6 },{ 3, 1, 6, 7 },{ 0, 1, 3, 7 } }; static PxI32 cubeFiveTets[2][5][4] = { { { 0, 1, 2, 5 },{ 0, 2, 3, 7 },{ 0, 5, 2, 7 },{ 0, 5, 7, 4 },{ 2, 7, 5, 6 } }, { { 1, 2, 3, 6 },{ 1, 3, 0, 4 },{ 1, 6, 3, 4 },{ 1, 6, 4, 5 },{ 3, 4, 6, 7 } }, }; static PxI32 cubeSixSubdivTets[12][4] = { {0,4,5,8}, {0,5,1,8}, {3,2,6,8}, {3,6,7,8}, {0,3,7,8}, {0,7,4,8}, {1,5,6,8}, {1,6,2,8}, {0,1,3,8}, {1,2,3,8}, {5,4,7,8}, {5,7,6,8} }; static PxI32 cubeFiveSubdivTets[2][12][4] = { { {0,1,2,8}, {0,2,3,8}, {4,7,5,8}, {5,7,6,8}, {0,7,4,8}, {0,3,7,8}, {1,5,2,8}, {2,5,6,8}, {0,5,1,8}, {0,4,5,8}, {3,2,7,8}, {2,6,7,8} }, { {0,1,3,8}, {1,2,3,8}, {4,7,6,8}, {4,6,5,8}, {0,3,4,8}, {3,7,4,8}, {1,5,6,8}, {1,6,2,8}, {0,4,1,8}, {1,4,5,8}, {3,2,6,8}, {3,6,7,8} } }; static const PxI32 volIdOrder[4][3] = { {1, 3, 2}, {0, 2, 3}, {0, 3, 1}, {0, 1, 2} }; // ------------------------------------------------------------------------------------- static bool boxTriangleIntersection( PxVec3 p0, PxVec3 p1, PxVec3 p2, PxVec3 center, PxVec3 extents); static void getClosestPointOnTriangle( PxVec3 p1, PxVec3 p2, PxVec3 p3, PxVec3 p, PxVec3& closest, PxVec3& bary); // ------------------------------------------------------------------------------------- VoxelTetrahedralizer::VoxelTetrahedralizer() { clear(); } // ------------------------------------------------------------------------------------- void VoxelTetrahedralizer::clear() { surfaceVerts.clear(); surfaceTriIds.clear(); surfaceBounds.setEmpty(); tetVerts.clear(); origTetVerts.clear(); isSurfaceVert.clear(); targetVertPos.clear(); tetIds.clear(); voxels.clear(); gridOrigin = PxVec3(PxZero); gridSpacing = 0.0f; } // ----------------------------------------------------------------------------------- void VoxelTetrahedralizer::readBack(PxArray<PxVec3>& _tetVertices, PxArray<PxU32>& _tetIndices) { _tetVertices = tetVerts; _tetIndices.resize(tetIds.size()); for (PxU32 i = 0; i < tetIds.size(); i++) _tetIndices[i] = PxU32(tetIds[i]); } // ----------------------------------------------------------------------------------- void VoxelTetrahedralizer::createTetMesh(const PxArray<PxVec3>& verts, const PxArray<PxU32>& triIds, PxI32 resolution, PxI32 numRelaxationIters, PxF32 relMinTetVolume) { surfaceVerts = verts; surfaceTriIds.resize(triIds.size()); for (PxU32 i = 0; i < triIds.size(); i++) surfaceTriIds[i] = triIds[i]; surfaceBounds.setEmpty(); for (PxU32 i = 0; i < surfaceVerts.size(); i++) surfaceBounds.include(surfaceVerts[i]); buildBVH(); voxelize(resolution); bool subdivBorder = true; int numTetsPerVoxel = 5; // or 6 createTets(subdivBorder, numTetsPerVoxel); findTargetPositions(0.2f * gridSpacing); relax(numRelaxationIters, relMinTetVolume); } // ----------------------------------------------------------------------------------- void VoxelTetrahedralizer::buildBVH() { PxI32 numTris = PxI32(surfaceTriIds.size()) / 3; if (numTris == 0) return; PxArray<PxBounds3> bvhBounds(numTris); for (PxI32 i = 0; i < numTris; i++) { PxBounds3& b = bvhBounds[i]; b.setEmpty(); b.include(surfaceVerts[surfaceTriIds[3 * i]]); b.include(surfaceVerts[surfaceTriIds[3 * i + 1]]); b.include(surfaceVerts[surfaceTriIds[3 * i + 2]]); } BVHBuilder::build(bvh, &bvhBounds[0], bvhBounds.size()); } // ----------------------------------------------------------------------------------- void VoxelTetrahedralizer::voxelize(PxU32 resolution) { tetIds.clear(); tetVerts.clear(); PxBounds3 meshBounds; meshBounds.setEmpty(); for (PxU32 i = 0; i < surfaceVerts.size(); i++) meshBounds.include(surfaceVerts[i]); gridSpacing = meshBounds.getDimensions().magnitude() / resolution; meshBounds.fattenSafe(gridSpacing); gridOrigin = meshBounds.minimum; voxels.clear(); PxI32 numX = PxI32((meshBounds.maximum.x - meshBounds.minimum.x) / gridSpacing) + 1; PxI32 numY = PxI32((meshBounds.maximum.y - meshBounds.minimum.y) / gridSpacing) + 1; PxI32 numZ = PxI32((meshBounds.maximum.z - meshBounds.minimum.z) / gridSpacing) + 1; PxI32 numCells = numX * numY * numZ; PxArray<PxI32> voxelOfCell(numCells, -1); PxBounds3 voxelBounds, faceBounds; // create intersected voxels for (PxI32 i = 0; i < numCells; i++) { PxI32 zi = i % numZ; PxI32 yi = (i / numZ) % numY; PxI32 xi = (i / numZ / numY); voxelBounds.minimum = meshBounds.minimum + PxVec3(PxF32(xi), PxF32(yi), PxF32(zi)) * gridSpacing; voxelBounds.maximum = voxelBounds.minimum + PxVec3(gridSpacing); bvh.query(voxelBounds, queryTris); for (PxU32 j = 0; j < queryTris.size(); j++) { PxI32 triNr = queryTris[j]; const PxVec3& p0 = surfaceVerts[surfaceTriIds[3 * triNr]]; const PxVec3& p1 = surfaceVerts[surfaceTriIds[3 * triNr + 1]]; const PxVec3& p2 = surfaceVerts[surfaceTriIds[3 * triNr + 2]]; if (boxTriangleIntersection(p0, p1, p2, voxelBounds.getCenter(), voxelBounds.getExtents())) { // volume if (voxelOfCell[i] < 0) { voxelOfCell[i] = voxels.size(); voxels.resize(voxels.size() + 1); voxels.back().init(xi, yi, zi); } } } } // flood outside PxArray<PxI32> stack; stack.pushBack(0); while (!stack.empty()) { PxI32 nr = stack.back(); stack.popBack(); if (voxelOfCell[nr] == -1) { voxelOfCell[nr] = -2; // outside PxI32 z0 = nr % numZ; PxI32 y0 = (nr / numZ) % numY; PxI32 x0 = (nr / numZ / numY); for (PxI32 i = 0; i < 6; i++) { PxI32 xi = x0 + cubeNeighbors[i][0]; PxI32 yi = y0 + cubeNeighbors[i][1]; PxI32 zi = z0 + cubeNeighbors[i][2]; if (xi >= 0 && xi < numX && yi >= 0 && yi < numY && zi >= 0 && zi < numZ) { PxI32 adj = (xi * numY + yi) * numZ + zi; if (voxelOfCell[adj] == -1) stack.pushBack(adj); } } } } // create voxels for the inside for (PxI32 i = 0; i < numCells; i++) { if (voxelOfCell[i] == -1) { voxelOfCell[i] = voxels.size(); voxels.resize(voxels.size() + 1); PxI32 zi = i % numZ; PxI32 yi = (i / numZ) % numY; PxI32 xi = (i / numZ / numY); voxels.back().init(xi, yi, zi); voxels.back().inner = true; } } // find neighbors for (PxU32 i = 0; i < voxels.size(); i++) { Voxel& v = voxels[i]; voxelBounds.minimum = meshBounds.minimum + PxVec3(PxF32(v.xi), PxF32(v.yi), PxF32(v.zi)) * gridSpacing; voxelBounds.maximum = voxelBounds.minimum + PxVec3(gridSpacing); for (PxI32 j = 0; j < 6; j++) { PxI32 xi = v.xi + cubeNeighbors[j][0]; PxI32 yi = v.yi + cubeNeighbors[j][1]; PxI32 zi = v.zi + cubeNeighbors[j][2]; if (xi < 0 || xi >= numX || yi < 0 || yi >= numY || zi < 0 || zi >= numZ) continue; PxI32 neighbor = voxelOfCell[(xi * numY + yi) * numZ + zi]; if (neighbor < 0) continue; if (v.inner || voxels[neighbor].inner) { v.neighbors[j] = neighbor; continue; } faceBounds = voxelBounds; PxF32 eps = 1e-4f; switch (j) { case 0: faceBounds.maximum.x = faceBounds.minimum.x + eps; break; case 1: faceBounds.minimum.x = faceBounds.maximum.x - eps; break; case 2: faceBounds.maximum.y = faceBounds.minimum.y + eps; break; case 3: faceBounds.minimum.y = faceBounds.maximum.y - eps; break; case 4: faceBounds.maximum.z = faceBounds.minimum.z + eps; break; case 5: faceBounds.minimum.z = faceBounds.maximum.z - eps; break; } bvh.query(faceBounds, queryTris); bool intersected = false; for (PxU32 k = 0; k < queryTris.size(); k++) { PxI32 triNr = queryTris[k]; const PxVec3& p0 = surfaceVerts[surfaceTriIds[3 * triNr]]; const PxVec3& p1 = surfaceVerts[surfaceTriIds[3 * triNr + 1]]; const PxVec3& p2 = surfaceVerts[surfaceTriIds[3 * triNr + 2]]; if (boxTriangleIntersection(p0, p1, p2, faceBounds.getCenter(), faceBounds.getExtents())) { intersected = true; break; } } if (intersected) v.neighbors[j] = neighbor; } } } // ----------------------------------------------------------------------------------- void VoxelTetrahedralizer::createUniqueTetVertices() { // start with each voxel having its own vertices PxArray<PxVec3> verts; for (PxU32 i = 0; i < voxels.size(); i++) { Voxel& v = voxels[i]; for (PxI32 j = 0; j < 8; j++) { v.ids[j] = verts.size(); verts.pushBack(gridOrigin + PxVec3( PxF32(v.xi + cubeCorners[j][0]), PxF32(v.yi + cubeCorners[j][1]), PxF32(v.zi + cubeCorners[j][2])) * gridSpacing); } } // unify vertices UnionFind* u = new UnionFind(); u->init(verts.size()); for (PxU32 i = 0; i < voxels.size(); i++) { Voxel& v0 = voxels[i]; for (PxI32 j = 0; j < 6; j++) { PxI32 n = v0.neighbors[j]; if (n < 0) continue; Voxel& v1 = voxels[n]; for (PxI32 k = 0; k < 4; k++) { PxI32 id0 = v0.ids[cubeFaces[j][k]]; PxI32 id1 = v1.ids[cubeFaces[oppNeighbor[j]][k]]; u->makeSet(id0, id1); } } } u->computeSetNrs(); tetVerts.clear(); for (PxU32 i = 0; i < voxels.size(); i++) { Voxel& v = voxels[i]; for (PxI32 j = 0; j < 8; j++) { PxI32 setNr = u->getSetNr(v.ids[j]); if (PxI32(tetVerts.size()) <= setNr) tetVerts.resize(setNr + 1, PxVec3(PxZero)); tetVerts[setNr] = verts[v.ids[j]]; v.ids[j] = setNr; } } origTetVerts = tetVerts; delete u; } // ------------------------------------------------------------------------------------- void VoxelTetrahedralizer::findTargetPositions(PxF32 surfaceDist) { targetVertPos = tetVerts; for (PxU32 i = 0; i < voxels.size(); i++) { Voxel& v = voxels[i]; PxBounds3 voxelBounds; voxelBounds.minimum = gridOrigin + PxVec3(PxF32(v.xi), PxF32(v.yi), PxF32(v.zi)) * gridSpacing; voxelBounds.maximum = voxelBounds.minimum + PxVec3(gridSpacing); voxelBounds.fattenFast(0.1f * gridSpacing); bvh.query(voxelBounds, queryTris); for (PxI32 j = 0; j < 8; j++) { PxI32 id = v.ids[j]; if (!isSurfaceVert[id]) continue; PxVec3& p = tetVerts[id]; PxF32 minDist2 = PX_MAX_F32; PxVec3 closest(PxZero); for (PxU32 k = 0; k < queryTris.size(); k++) { PxI32 triNr = queryTris[k]; const PxVec3& p0 = surfaceVerts[surfaceTriIds[3 * triNr]]; const PxVec3& p1 = surfaceVerts[surfaceTriIds[3 * triNr + 1]]; const PxVec3& p2 = surfaceVerts[surfaceTriIds[3 * triNr + 2]]; PxVec3 c, bary; getClosestPointOnTriangle(p0, p1, p2, p, c, bary); PxF32 dist2 = (c - p).magnitudeSquared(); if (dist2 < minDist2) { minDist2 = dist2; closest = c; } } if (minDist2 < PX_MAX_F32) { PxVec3 n = p - closest; n.normalize(); targetVertPos[id] = closest + n * surfaceDist; } } } } // ----------------------------------------------------------------------------------- void VoxelTetrahedralizer::createTets(bool subdivBorder, PxU32 numTetsPerVoxel) { if (numTetsPerVoxel < 5 || numTetsPerVoxel > 6) return; createUniqueTetVertices(); PxArray<Voxel> prevVoxels; PxArray<PxI32> numVertVoxels(tetVerts.size(), 0); tetIds.clear(); for (PxU32 i = 0; i < voxels.size(); i++) { Voxel& v = voxels[i]; for (PxI32 j = 0; j < 8; j++) numVertVoxels[v.ids[j]]++; PxI32 parity = (v.xi + v.yi + v.zi) % 2; if (v.inner || !subdivBorder) { if (numTetsPerVoxel == 6) { for (PxI32 j = 0; j < 6; j++) { tetIds.pushBack(v.ids[cubeSixTets[j][0]]); tetIds.pushBack(v.ids[cubeSixTets[j][1]]); tetIds.pushBack(v.ids[cubeSixTets[j][2]]); tetIds.pushBack(v.ids[cubeSixTets[j][3]]); } } else if (numTetsPerVoxel == 5) { for (PxI32 j = 0; j < 5; j++) { tetIds.pushBack(v.ids[cubeFiveTets[parity][j][0]]); tetIds.pushBack(v.ids[cubeFiveTets[parity][j][1]]); tetIds.pushBack(v.ids[cubeFiveTets[parity][j][2]]); tetIds.pushBack(v.ids[cubeFiveTets[parity][j][3]]); } } } else { PxVec3 p(PxZero); for (PxI32 j = 0; j < 8; j++) p += tetVerts[v.ids[j]]; p /= 8.0; PxI32 newId = tetVerts.size(); tetVerts.pushBack(p); origTetVerts.pushBack(p); numVertVoxels.pushBack(8); for (PxI32 j = 0; j < 12; j++) { const int* localIds; if (numTetsPerVoxel == 6) localIds = cubeSixSubdivTets[j]; else localIds = cubeFiveSubdivTets[parity][j]; for (PxI32 k = 0; k < 4; k++) { PxI32 id = localIds[k] < 8 ? v.ids[localIds[k]] : newId; tetIds.pushBack(id); } } } } isSurfaceVert.resize(tetVerts.size(), false); for (PxU32 i = 0; i < tetVerts.size(); i++) isSurfaceVert[i] = numVertVoxels[i] < 8; // randomize tets PxU32 numTets = tetIds.size() / 4; //for (PxU32 i = 0; i < numTets - 1; i++) { // PxI32 ri = i + rand() % (numTets - i); // for (PxI32 j = 0; j < 4; j++) { // PxI32 id = tetIds[4 * i + j]; tetIds[4 * i + j] = tetIds[4 * ri + j]; tetIds[4 * ri + j] = id; // } //} // edges MultiList<int> adjVerts; edgeIds.clear(); adjVerts.clear(); adjVerts.reserve(tetVerts.size()); for (PxU32 i = 0; i < numTets; i++) { for (PxI32 j = 0; j < 6; j++) { PxI32 id0 = tetIds[4 * i + tetEdges[j][0]]; PxI32 id1 = tetIds[4 * i + tetEdges[j][1]]; if (!adjVerts.exists(id0, id1)) { edgeIds.pushBack(id0); edgeIds.pushBack(id1); adjVerts.addUnique(id0, id1); adjVerts.addUnique(id1, id0); } } } } // ----------------------------------------------------------------------------------- void VoxelTetrahedralizer::conserveVolume(PxF32 relMinVolume) { PxVec3 grads[4]; PxU32 numTets = tetIds.size() / 4; for (PxU32 i = 0; i < numTets; i++) { PxI32* ids = &tetIds[4 * i]; PxF32 w = 0.0f; for (PxI32 j = 0; j < 4; j++) { PxI32 id0 = ids[volIdOrder[j][0]]; PxI32 id1 = ids[volIdOrder[j][1]]; PxI32 id2 = ids[volIdOrder[j][2]]; grads[j] = (tetVerts[id1] - tetVerts[id0]).cross(tetVerts[id2] - tetVerts[id0]); w += grads[j].magnitudeSquared(); } if (w == 0.0f) continue; PxVec3& p0 = tetVerts[ids[0]]; PxF32 V = (tetVerts[ids[1]] - p0).cross(tetVerts[ids[2]] - p0).dot(tetVerts[ids[3]] - p0); PxVec3& origP0 = origTetVerts[ids[0]]; PxF32 origV = (origTetVerts[ids[1]] - origP0).cross(origTetVerts[ids[2]] - origP0).dot(origTetVerts[ids[3]] - origP0); PxF32 minV = relMinVolume * origV; if (V < minV) { PxF32 C = V - minV; PxF32 lambda = -C / w; for (PxI32 j = 0; j < 4; j++) { tetVerts[ids[j]] += grads[j] * lambda; } } } } // ------------------------------------------------------------------------------------- void VoxelTetrahedralizer::relax(PxI32 numIters, PxF32 relMinVolume) { const PxF32 targetScale = 0.3f; const PxF32 edgeScale = 0.3f; for (PxI32 iter = 0; iter < numIters; iter++) { PxU32 numVerts = tetVerts.size(); for (PxU32 i = 0; i < numVerts; i++) { if (isSurfaceVert[i]) { PxVec3 offset = (targetVertPos[i] - tetVerts[i]) * targetScale; tetVerts[i] += offset; } } for (PxU32 i = 0; i < edgeIds.size(); i += 2) { PxI32 id0 = edgeIds[i]; PxI32 id1 = edgeIds[i + 1]; PxF32 w0 = isSurfaceVert[id0] ? 0.0f : 1.0f; PxF32 w1 = isSurfaceVert[id1] ? 0.0f : 1.0f; PxF32 w = w0 + w1; if (w == 0.0f) continue; PxVec3& p0 = tetVerts[id0]; PxVec3& p1 = tetVerts[id1]; PxVec3 e = (p1 - p0) * edgeScale; if (w == 1.0f) e *= 0.5f; p0 += w0 / w * e; p1 -= w1 / w * e; } conserveVolume(relMinVolume); } PxI32 volIters = 2; for (PxI32 volIter = 0; volIter < volIters; volIter++) conserveVolume(relMinVolume); } // ----------------------------------------------------------------------------------- static PxF32 max3(PxF32 f0, PxF32 f1, PxF32 f2) { return PxMax(f0, PxMax(f1, f2)); } static PxF32 min3(PxF32 f0, PxF32 f1, PxF32 f2) { return PxMin(f0, PxMin(f1, f2)); } static PxF32 minMax(PxF32 f0, PxF32 f1, PxF32 f2) { return PxMax(-max3(f0, f1, f2), min3(f0, f1, f2)); } // ----------------------------------------------------------------------------------- static bool boxTriangleIntersection( PxVec3 p0, PxVec3 p1, PxVec3 p2, PxVec3 center, PxVec3 extents) { PxVec3 v0 = p0 - center, v1 = p1 - center, v2 = p2 - center; PxVec3 f0 = p1 - p0, f1 = p2 - p1, f2 = p0 - p2; PxF32 r; PxVec3 n = f0.cross(f1); PxF32 d = n.dot(v0); r = extents.x * fabsf(n.x) + extents.y * fabsf(n.y) + extents.z * fabsf(n.z); if (d > r || d < -r) return false; if (max3(v0.x, v1.x, v2.x) < -extents.x || min3(v0.x, v1.x, v2.x) > extents.x) return false; if (max3(v0.y, v1.y, v2.y) < -extents.y || min3(v0.y, v1.y, v2.y) > extents.y) return false; if (max3(v0.z, v1.z, v2.z) < -extents.z || min3(v0.z, v1.z, v2.z) > extents.z) return false; PxVec3 a00(0.0f, -f0.z, f0.y); r = extents.y * fabsf(f0.z) + extents.z * fabsf(f0.y); if (minMax(v0.dot(a00), v1.dot(a00), v2.dot(a00)) > r) return false; PxVec3 a01(0.0f, -f1.z, f1.y); r = extents.y * fabsf(f1.z) + extents.z * fabsf(f1.y); if (minMax(v0.dot(a01), v1.dot(a01), v2.dot(a01)) > r) return false; PxVec3 a02(0.0f, -f2.z, f2.y); r = extents.y * fabsf(f2.z) + extents.z * fabsf(f2.y); if (minMax(v0.dot(a02), v1.dot(a02), v2.dot(a02)) > r) return false; PxVec3 a10(f0.z, 0.0f, -f0.x); r = extents.x * fabsf(f0.z) + extents.z * fabsf(f0.x); if (minMax(v0.dot(a10), v1.dot(a10), v2.dot(a10)) > r) return false; PxVec3 a11(f1.z, 0.0f, -f1.x); r = extents.x * fabsf(f1.z) + extents.z * fabsf(f1.x); if (minMax(v0.dot(a11), v1.dot(a11), v2.dot(a11)) > r) return false; PxVec3 a12(f2.z, 0.0f, -f2.x); r = extents.x * fabsf(f2.z) + extents.z * fabsf(f2.x); if (minMax(v0.dot(a12), v1.dot(a12), v2.dot(a12)) > r) return false; PxVec3 a20(-f0.y, f0.x, 0.0f); r = extents.x * fabsf(f0.y) + extents.y * fabsf(f0.x); if (minMax(v0.dot(a20), v1.dot(a20), v2.dot(a20)) > r) return false; PxVec3 a21(-f1.y, f1.x, 0.0f); r = extents.x * fabsf(f1.y) + extents.y * fabsf(f1.x); if (minMax(v0.dot(a21), v1.dot(a21), v2.dot(a21)) > r) return false; PxVec3 a22(-f2.y, f2.x, 0.0f); r = extents.x * fabsf(f2.y) + extents.y * fabsf(f2.x); if (minMax(v0.dot(a22), v1.dot(a22), v2.dot(a22)) > r) return false; return true; } // ----------------------------------------------------------------------------------- static void getClosestPointOnTriangle( PxVec3 p1, PxVec3 p2, PxVec3 p3, PxVec3 p, PxVec3& closest, PxVec3& bary) { PxVec3 e0 = p2 - p1; PxVec3 e1 = p3 - p1; PxVec3 tmp = p1 - p; PxF32 a = e0.dot(e0); PxF32 b = e0.dot(e1); PxF32 c = e1.dot(e1); PxF32 d = e0.dot(tmp); PxF32 e = e1.dot(tmp); PxVec3 coords, clampedCoords; coords.x = b * e - c * d; // s * det coords.y = b * d - a * e; // t * det coords.z = a * c - b * b; // det clampedCoords = PxVec3(0.0f, 0.0f, 0.0f); if (coords.x <= 0.0f) { if (c != 0.0f) clampedCoords.y = -e / c; } else if (coords.y <= 0.0f) { if (a != 0.0f) clampedCoords.x = -d / a; } else if (coords.x + coords.y > coords.z) { PxF32 denominator = a + c - b - b; PxF32 numerator = c + e - b - d; if (denominator != 0.0f) { clampedCoords.x = numerator / denominator; clampedCoords.y = 1.0f - clampedCoords.x; } } else { // all inside if (coords.z != 0.0f) { clampedCoords.x = coords.x / coords.z; clampedCoords.y = coords.y / coords.z; } } clampedCoords.x = PxMax(clampedCoords.x, 0.0f); clampedCoords.y = PxMax(clampedCoords.y, 0.0f); clampedCoords.x = PxMin(clampedCoords.x, 1.0f); clampedCoords.y = PxMin(clampedCoords.y, 1.0f); closest = p1 + e0 * clampedCoords.x + e1 * clampedCoords.y; bary.x = 1.0f - clampedCoords.x - clampedCoords.y; bary.y = clampedCoords.x; bary.z = clampedCoords.y; } } }
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/tet/ExtInsideTester.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #include "ExtInsideTester.h" #include "foundation/PxBounds3.h" namespace physx { namespace Ext { // ------------------------------------------------------------------------ void InsideTester::init(const PxVec3 *vertices, PxI32 numVertices, const PxI32 *triIndices, PxI32 numTris) { PxArray<PxI32> newIds(numVertices, -1); mVertices.clear(); mIndices.clear(); for (PxI32 i = 0; i < 3 * numTris; i++) { PxI32 id = triIndices[i]; if (newIds[id] < 0) { newIds[id] = PxI32(mVertices.size()); mVertices.pushBack(vertices[id]); } mIndices.pushBack(newIds[id]); } mGrids[0].init(0, mVertices, mIndices); mGrids[1].init(1, mVertices, mIndices); mGrids[2].init(2, mVertices, mIndices); } // ------------------------------------------------------------------------ bool InsideTester::isInside(const PxVec3 &pos) { PxI32 vote = 0; vote += mGrids[0].numInside(pos, mVertices, mIndices); vote += mGrids[1].numInside(pos, mVertices, mIndices); vote += mGrids[2].numInside(pos, mVertices, mIndices); return (vote > 3); } // ------------------------------------------------------------------------ void InsideTester::Grid2d::init(PxI32 _dim0, const PxArray<PxVec3> &vertices, const PxArray<PxI32> &indices) { first.clear(); tris.clear(); next.clear(); num1 = num2 = 0; this->dim0 = _dim0; PxI32 dim1 = (_dim0 + 1) % 3; PxI32 dim2 = (_dim0 + 2) % 3; PxI32 numTris = PxI32(indices.size()) / 3; if (numTris == 0) return; PxBounds3 bounds, triBounds; bounds.setEmpty(); PxReal avgSize = 0.0f; for (PxI32 i = 0; i < numTris; i++) { triBounds.setEmpty(); triBounds.include(vertices[indices[3 * i]]); triBounds.include(vertices[indices[3 * i + 1]]); triBounds.include(vertices[indices[3 * i + 2]]); triBounds.minimum[dim0] = 0.0f; triBounds.maximum[dim0] = 0.0f; avgSize += triBounds.getDimensions().magnitude(); bounds.include(triBounds); } if (bounds.isEmpty()) return; avgSize /= PxReal(numTris); orig = bounds.minimum; spacing = avgSize; num1 = PxI32((bounds.maximum[dim1] - orig[dim1]) / spacing) + 2; num2 = PxI32((bounds.maximum[dim2] - orig[dim2]) / spacing) + 2; first.clear(); first.resize(num1 * num2, -1); for (PxI32 i = 0; i < numTris; i++) { triBounds.setEmpty(); triBounds.include(vertices[indices[3 * i]] - orig); triBounds.include(vertices[indices[3 * i + 1]] - orig); triBounds.include(vertices[indices[3 * i + 2]] - orig); PxI32 min1 = PxI32(triBounds.minimum[dim1] / spacing); PxI32 min2 = PxI32(triBounds.minimum[dim2] / spacing); PxI32 max1 = PxI32(triBounds.maximum[dim1] / spacing); PxI32 max2 = PxI32(triBounds.maximum[dim2] / spacing); for (PxI32 i1 = min1; i1 <= max1; i1++) { for (PxI32 i2 = min2; i2 <= max2; i2++) { PxI32 nr = i1 * num2 + i2; next.pushBack(first[nr]); first[nr] = PxI32(tris.size()); tris.pushBack(i); } } } } // ------------------------------------------------------------------------ PxI32 InsideTester::Grid2d::numInside(const PxVec3 &pos, const PxArray<PxVec3> &vertices, const PxArray<PxI32> &indices) { if (first.empty()) return 0; PxI32 dim1 = (dim0 + 1) % 3; PxI32 dim2 = (dim0 + 2) % 3; PxReal r = 1e-5f; PxVec3 p = pos; p[dim1] = pos[dim1] + rnd.rand(0.0f, r); p[dim2] = pos[dim2] + rnd.rand(0.0f, r); PxI32 i1 = PxI32((p[dim1] - orig[dim1]) / spacing); PxI32 i2 = PxI32((p[dim2] - orig[dim2]) / spacing); if (i1 < 0 || i1 >= num1 || i2 < 0 || i2 >= num2) return false; PxI32 count1 = 0; PxI32 count2 = 0; PxI32 nr = first[i1 * num2 + i2]; while (nr >= 0) { PxI32 triNr = tris[nr]; nr = next[nr]; const PxVec3 &p0 = vertices[indices[3 * triNr]]; const PxVec3 &p1 = vertices[indices[3 * triNr + 1]]; const PxVec3 &p2 = vertices[indices[3 * triNr + 2]]; bool side0 = (p1 - p0).cross(p - p0)[dim0] > 0.0f; bool side1 = (p2 - p1).cross(p - p1)[dim0] > 0.0f; bool side2 = (p0 - p2).cross(p - p2)[dim0] > 0.0f; if (side0 != side1 || side1 != side2) continue; // ray triangle intersection PxVec3 n = (p1 - p0).cross(p2 - p0); if (n[dim0] == 0.0f) continue; PxReal t = (p0 - p).dot(n) / n[dim0]; if (t > 0.0f) count1++; else if (t < 0.0f) count2++; } PxI32 num = 0; if ((count1 % 2) == 1) num++; if ((count2 % 2) == 1) num++; return num; } } }
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/omnipvd/ExtOmniPvdRegistrationData.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "ExtOmniPvdRegistrationData.h" #if PX_SUPPORT_OMNI_PVD namespace physx { namespace Ext { void OmniPvdPxExtensionsRegistrationData::registerData(OmniPvdWriter& writer) { // auto-generate class/attribute registration code from object definition file #define OMNI_PVD_WRITER_VAR writer #include "omnipvd/CmOmniPvdAutoGenRegisterData.h" #include "OmniPvdPxExtensionsTypes.h" #include "omnipvd/CmOmniPvdAutoGenClearDefines.h" #undef OMNI_PVD_WRITER_VAR } } } #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/omnipvd/OmniPvdPxExtensionsSampler.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef OMNI_PVD_EXTENSION_SAMPLER_H #define OMNI_PVD_EXTENSION_SAMPLER_H #if PX_SUPPORT_OMNI_PVD #include "foundation/PxUserAllocated.h" #include "ExtOmniPvdRegistrationData.h" namespace physx { class PxOmniPvd; } class OmniPvdPxExtensionsSampler : public physx::PxUserAllocated { public: OmniPvdPxExtensionsSampler(); ~OmniPvdPxExtensionsSampler(); void setOmniPvdInstance(physx::PxOmniPvd* omniPvdInstance); physx::PxOmniPvd* getOmniPvdInstance(); void registerClasses(); const physx::Ext::OmniPvdPxExtensionsRegistrationData& getRegistrationData() const { return mRegistrationData; } // OmniPvdPxExtensionsSampler singleton static bool createInstance(); static OmniPvdPxExtensionsSampler* getInstance(); static void destroyInstance(); private: physx::PxOmniPvd* mOmniPvdInstance; physx::Ext::OmniPvdPxExtensionsRegistrationData mRegistrationData; }; namespace physx { namespace Ext { const OmniPvdPxExtensionsRegistrationData* OmniPvdGetPxExtensionsRegistrationData(); PxOmniPvd* OmniPvdGetInstance(); } } #endif #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/omnipvd/ExtOmniPvdSetData.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef EXT_OMNI_PVD_SET_DATA_H #define EXT_OMNI_PVD_SET_DATA_H // // This header has to be included to use macros like OMNI_PVD_SET() (set attribute values, // object instance registration etc.) // #define OMNI_PVD_CONTEXT_HANDLE 1 #if PX_SUPPORT_OMNI_PVD #include "OmniPvdPxExtensionsSampler.h" #include "omnipvd/PxOmniPvd.h" // // Define the macros needed in CmOmniPvdAutoGenSetData.h // #undef OMNI_PVD_GET_WRITER #define OMNI_PVD_GET_WRITER(writer) \ physx::PxOmniPvd::ScopedExclusiveWriter writeLock(physx::Ext::OmniPvdGetInstance()); \ OmniPvdWriter* writer = writeLock.getWriter(); #undef OMNI_PVD_GET_REGISTRATION_DATA #define OMNI_PVD_GET_REGISTRATION_DATA(registrationData) \ const OmniPvdPxExtensionsRegistrationData* registrationData = physx::Ext::OmniPvdGetPxExtensionsRegistrationData(); #endif // PX_SUPPORT_OMNI_PVD #include "omnipvd/CmOmniPvdAutoGenSetData.h" // note: included in all cases since it will provide empty definitions of the helper macros such // that not all of them have to be guarded by PX_SUPPORT_OMNI_PVD #endif // EXT_OMNI_PVD_SET_DATA_H
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/omnipvd/OmniPvdPxExtensionsSampler.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #if PX_SUPPORT_OMNI_PVD #include "OmniPvdPxExtensionsSampler.h" #include "omnipvd/PxOmniPvd.h" using namespace physx; void OmniPvdPxExtensionsSampler::registerClasses() { PxOmniPvd::ScopedExclusiveWriter scope(mOmniPvdInstance); OmniPvdWriter* writer = scope.getWriter(); if (writer) { mRegistrationData.registerData(*mOmniPvdInstance->getWriter()); } } OmniPvdPxExtensionsSampler::OmniPvdPxExtensionsSampler() { mOmniPvdInstance = NULL; } OmniPvdPxExtensionsSampler::~OmniPvdPxExtensionsSampler() { } void OmniPvdPxExtensionsSampler::setOmniPvdInstance(physx::PxOmniPvd* omniPvdInstance) { mOmniPvdInstance = omniPvdInstance; } physx::PxOmniPvd* OmniPvdPxExtensionsSampler::getOmniPvdInstance() { return mOmniPvdInstance; } /////////////////////////////////////////////////////////////////////////////// static OmniPvdPxExtensionsSampler* gOmniPvdPxExtensionsSampler = NULL; bool OmniPvdPxExtensionsSampler::createInstance() { gOmniPvdPxExtensionsSampler = PX_NEW(OmniPvdPxExtensionsSampler)(); return gOmniPvdPxExtensionsSampler != NULL; } OmniPvdPxExtensionsSampler* OmniPvdPxExtensionsSampler::getInstance() { return gOmniPvdPxExtensionsSampler; } void OmniPvdPxExtensionsSampler::destroyInstance() { PX_DELETE(gOmniPvdPxExtensionsSampler); } namespace physx { namespace Ext { const OmniPvdPxExtensionsRegistrationData* OmniPvdGetPxExtensionsRegistrationData() { OmniPvdPxExtensionsSampler* sampler = OmniPvdPxExtensionsSampler::getInstance(); if (sampler) { return &sampler->getRegistrationData(); } else { return NULL; } } PxOmniPvd* OmniPvdGetInstance() { OmniPvdPxExtensionsSampler* sampler = OmniPvdPxExtensionsSampler::getInstance(); if (sampler) { return sampler->getOmniPvdInstance(); } else { return NULL; } } } } #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/omnipvd/ExtOmniPvdRegistrationData.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef EXT_OMNI_PVD_REGISTRATION_DATA_H #define EXT_OMNI_PVD_REGISTRATION_DATA_H #if PX_SUPPORT_OMNI_PVD #include "../pvdruntime/include/OmniPvdWriter.h" #include "extensions/PxD6Joint.h" // for PxD6Motion #include "extensions/PxDistanceJoint.h" // for PxDistanceJointFlags #include "extensions/PxPrismaticJoint.h" // for PxPrismaticJointFlags #include "extensions/PxRevoluteJoint.h" // for PxRevoluteJointFlags #include "extensions/PxSphericalJoint.h" // for PxSphericalJointFlags #include "extensions/PxCustomGeometryExt.h" // for PxCustomGeometryExtBaseConvexCallbacks etc. namespace physx { class PxContactJoint; class PxFixedJoint; class PxGearJoint; class PxRackAndPinionJoint; namespace Ext { struct OmniPvdPxExtensionsRegistrationData { void registerData(OmniPvdWriter&); // auto-generate members and setter methods from object definition file #include "omnipvd/CmOmniPvdAutoGenCreateRegistrationStruct.h" #include "OmniPvdPxExtensionsTypes.h" #include "omnipvd/CmOmniPvdAutoGenClearDefines.h" }; } } #endif // PX_SUPPORT_OMNI_PVD #endif // EXT_OMNI_PVD_REGISTRATION_DATA_H
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/omnipvd/OmniPvdPxExtensionsTypes.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. // Declare OMNI_PVD Types and Attributes here! // The last two attribute parameters could now be derived from the other data, so could be removed in a refactor, // though explicit control may be better. // Note that HANDLE attributes have to use (Type const *) style, otherwise it won't compile! //////////////////////////////////////////////////////////////////////////////// // Enums //////////////////////////////////////////////////////////////////////////////// OMNI_PVD_ENUM_BEGIN (PxConstraintFlag) OMNI_PVD_ENUM_VALUE (PxConstraintFlag, eBROKEN) OMNI_PVD_ENUM_VALUE (PxConstraintFlag, eCOLLISION_ENABLED) OMNI_PVD_ENUM_VALUE (PxConstraintFlag, eVISUALIZATION) OMNI_PVD_ENUM_VALUE (PxConstraintFlag, eDRIVE_LIMITS_ARE_FORCES) OMNI_PVD_ENUM_VALUE (PxConstraintFlag, eIMPROVED_SLERP) OMNI_PVD_ENUM_VALUE (PxConstraintFlag, eDISABLE_PREPROCESSING) OMNI_PVD_ENUM_VALUE (PxConstraintFlag, eENABLE_EXTENDED_LIMITS) OMNI_PVD_ENUM_VALUE (PxConstraintFlag, eGPU_COMPATIBLE) OMNI_PVD_ENUM_VALUE (PxConstraintFlag, eALWAYS_UPDATE) OMNI_PVD_ENUM_VALUE (PxConstraintFlag, eDISABLE_CONSTRAINT) OMNI_PVD_ENUM_END (PxConstraintFlag) OMNI_PVD_ENUM_BEGIN (PxRevoluteJointFlag) OMNI_PVD_ENUM_VALUE (PxRevoluteJointFlag, eLIMIT_ENABLED) OMNI_PVD_ENUM_VALUE (PxRevoluteJointFlag, eDRIVE_ENABLED) OMNI_PVD_ENUM_VALUE (PxRevoluteJointFlag, eDRIVE_FREESPIN) OMNI_PVD_ENUM_END (PxRevoluteJointFlag) OMNI_PVD_ENUM_BEGIN (PxPrismaticJointFlag) OMNI_PVD_ENUM_VALUE (PxPrismaticJointFlag, eLIMIT_ENABLED) OMNI_PVD_ENUM_END (PxPrismaticJointFlag) OMNI_PVD_ENUM_BEGIN (PxDistanceJointFlag) OMNI_PVD_ENUM_VALUE (PxDistanceJointFlag, eMAX_DISTANCE_ENABLED) OMNI_PVD_ENUM_VALUE (PxDistanceJointFlag, eMIN_DISTANCE_ENABLED) OMNI_PVD_ENUM_VALUE (PxDistanceJointFlag, eSPRING_ENABLED) OMNI_PVD_ENUM_END (PxDistanceJointFlag) OMNI_PVD_ENUM_BEGIN (PxSphericalJointFlag) OMNI_PVD_ENUM_VALUE (PxSphericalJointFlag, eLIMIT_ENABLED) OMNI_PVD_ENUM_END (PxSphericalJointFlag) OMNI_PVD_ENUM_BEGIN (PxD6JointDriveFlag) OMNI_PVD_ENUM_VALUE (PxD6JointDriveFlag, eACCELERATION) OMNI_PVD_ENUM_END (PxD6JointDriveFlag) OMNI_PVD_ENUM_BEGIN (PxJointConcreteType) OMNI_PVD_ENUM_VALUE (PxJointConcreteType, eSPHERICAL) OMNI_PVD_ENUM_VALUE (PxJointConcreteType, eREVOLUTE) OMNI_PVD_ENUM_VALUE (PxJointConcreteType, ePRISMATIC) OMNI_PVD_ENUM_VALUE (PxJointConcreteType, eFIXED) OMNI_PVD_ENUM_VALUE (PxJointConcreteType, eDISTANCE) OMNI_PVD_ENUM_VALUE (PxJointConcreteType, eD6) OMNI_PVD_ENUM_VALUE (PxJointConcreteType, eCONTACT) OMNI_PVD_ENUM_VALUE (PxJointConcreteType, eGEAR) OMNI_PVD_ENUM_VALUE (PxJointConcreteType, eRACK_AND_PINION) OMNI_PVD_ENUM_END (PxJointConcreteType) OMNI_PVD_ENUM_BEGIN (PxD6Motion) OMNI_PVD_ENUM_VALUE (PxD6Motion, eLOCKED) OMNI_PVD_ENUM_VALUE (PxD6Motion, eLIMITED) OMNI_PVD_ENUM_VALUE (PxD6Motion, eFREE) OMNI_PVD_ENUM_END (PxD6Motion) //////////////////////////////////////////////////////////////////////////////// // Classes //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // PxJoint //////////////////////////////////////////////////////////////////////////////// OMNI_PVD_CLASS_BEGIN (PxJoint) OMNI_PVD_ATTRIBUTE_FLAG (PxJoint, type, PxJointConcreteType::Enum, PxJointConcreteType) OMNI_PVD_ATTRIBUTE (PxJoint, actor0, PxRigidActor* const, OmniPvdDataType::eOBJECT_HANDLE) OMNI_PVD_ATTRIBUTE (PxJoint, actor1, PxRigidActor* const, OmniPvdDataType::eOBJECT_HANDLE) OMNI_PVD_ATTRIBUTE_ARRAY_FIXED_SIZE (PxJoint, actor0LocalPose, PxTransform, OmniPvdDataType::eFLOAT32, 7) OMNI_PVD_ATTRIBUTE_ARRAY_FIXED_SIZE (PxJoint, actor1LocalPose, PxTransform, OmniPvdDataType::eFLOAT32, 7) OMNI_PVD_ATTRIBUTE (PxJoint, breakForce, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxJoint, breakTorque, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE_FLAG (PxJoint, constraintFlags, PxConstraintFlags, PxConstraintFlag) OMNI_PVD_ATTRIBUTE (PxJoint, invMassScale0, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxJoint, invInertiaScale0, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxJoint, invMassScale1, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxJoint, invInertiaScale1, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE_STRING (PxJoint, name) OMNI_PVD_ATTRIBUTE_STRING (PxJoint, concreteTypeName) OMNI_PVD_CLASS_END (PxJoint) //////////////////////////////////////////////////////////////////////////////// // PxFixedJoint //////////////////////////////////////////////////////////////////////////////// OMNI_PVD_CLASS_DERIVED_BEGIN (PxFixedJoint, PxJoint) OMNI_PVD_CLASS_END (PxFixedJoint) //////////////////////////////////////////////////////////////////////////////// // PxPrismaticJoint //////////////////////////////////////////////////////////////////////////////// OMNI_PVD_CLASS_DERIVED_BEGIN (PxPrismaticJoint, PxJoint) OMNI_PVD_ATTRIBUTE (PxPrismaticJoint, position, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxPrismaticJoint, velocity, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxPrismaticJoint, limitLower, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxPrismaticJoint, limitUpper, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxPrismaticJoint, limitRestitution, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxPrismaticJoint, limitBounceThreshold, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxPrismaticJoint, limitStiffness, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxPrismaticJoint, limitDamping, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE_FLAG (PxPrismaticJoint, jointFlags, PxPrismaticJointFlags, PxPrismaticJointFlag) OMNI_PVD_CLASS_END (PxPrismaticJoint) //////////////////////////////////////////////////////////////////////////////// // PxRevoluteJoint //////////////////////////////////////////////////////////////////////////////// OMNI_PVD_CLASS_DERIVED_BEGIN (PxRevoluteJoint, PxJoint) OMNI_PVD_ATTRIBUTE (PxRevoluteJoint, angle, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxRevoluteJoint, velocity, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxRevoluteJoint, limitLower, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxRevoluteJoint, limitUpper, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxRevoluteJoint, limitRestitution, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxRevoluteJoint, limitBounceThreshold, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxRevoluteJoint, limitStiffness, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxRevoluteJoint, limitDamping, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxRevoluteJoint, driveVelocity, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxRevoluteJoint, driveForceLimit, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxRevoluteJoint, driveGearRatio, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE_FLAG (PxRevoluteJoint, jointFlags, PxRevoluteJointFlags, PxRevoluteJointFlag) OMNI_PVD_CLASS_END (PxRevoluteJoint) //////////////////////////////////////////////////////////////////////////////// // PxSphericalJoint //////////////////////////////////////////////////////////////////////////////// OMNI_PVD_CLASS_DERIVED_BEGIN (PxSphericalJoint, PxJoint) OMNI_PVD_ATTRIBUTE (PxSphericalJoint, swingYAngle, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxSphericalJoint, swingZAngle, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxSphericalJoint, limitYAngle, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxSphericalJoint, limitZAngle, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxSphericalJoint, limitRestitution, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxSphericalJoint, limitBounceThreshold, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxSphericalJoint, limitStiffness, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxSphericalJoint, limitDamping, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE_FLAG (PxSphericalJoint, jointFlags, PxSphericalJointFlags, PxSphericalJointFlag) OMNI_PVD_CLASS_END (PxSphericalJoint) //////////////////////////////////////////////////////////////////////////////// // PxDistanceJoint //////////////////////////////////////////////////////////////////////////////// OMNI_PVD_CLASS_DERIVED_BEGIN (PxDistanceJoint, PxJoint) OMNI_PVD_ATTRIBUTE (PxDistanceJoint, distance, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxDistanceJoint, minDistance, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxDistanceJoint, maxDistance, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxDistanceJoint, tolerance, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxDistanceJoint, stiffness, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxDistanceJoint, damping, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE_FLAG (PxDistanceJoint, jointFlags, PxDistanceJointFlags, PxDistanceJointFlag) OMNI_PVD_CLASS_END (PxDistanceJoint) //////////////////////////////////////////////////////////////////////////////// // PxContactJoint //////////////////////////////////////////////////////////////////////////////// OMNI_PVD_CLASS_DERIVED_BEGIN (PxContactJoint, PxJoint) OMNI_PVD_ATTRIBUTE_ARRAY_FIXED_SIZE (PxContactJoint, point, PxVec3, OmniPvdDataType::eFLOAT32, 3) OMNI_PVD_ATTRIBUTE_ARRAY_FIXED_SIZE (PxContactJoint, normal, PxVec3, OmniPvdDataType::eFLOAT32, 3) OMNI_PVD_ATTRIBUTE (PxContactJoint, penetration, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxContactJoint, restitution, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxContactJoint, bounceThreshold, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_CLASS_END (PxContactJoint) //////////////////////////////////////////////////////////////////////////////// // PxGearJoint //////////////////////////////////////////////////////////////////////////////// OMNI_PVD_CLASS_DERIVED_BEGIN (PxGearJoint, PxJoint) OMNI_PVD_ATTRIBUTE (PxGearJoint, ratio, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE_ARRAY_VARIABLE_SIZE (PxGearJoint, hinges, PxBase* const, OmniPvdDataType::eOBJECT_HANDLE) OMNI_PVD_CLASS_END (PxGearJoint) //////////////////////////////////////////////////////////////////////////////// // PxRackAndPinionJoint //////////////////////////////////////////////////////////////////////////////// OMNI_PVD_CLASS_DERIVED_BEGIN (PxRackAndPinionJoint, PxJoint) OMNI_PVD_ATTRIBUTE (PxRackAndPinionJoint, ratio, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE_ARRAY_VARIABLE_SIZE (PxRackAndPinionJoint, joints, PxBase* const, OmniPvdDataType::eOBJECT_HANDLE) OMNI_PVD_CLASS_END (PxRackAndPinionJoint) //////////////////////////////////////////////////////////////////////////////// // PxD6Joint //////////////////////////////////////////////////////////////////////////////// OMNI_PVD_CLASS_DERIVED_BEGIN (PxD6Joint, PxJoint) OMNI_PVD_ATTRIBUTE (PxD6Joint, twistAngle, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxD6Joint, swingYAngle, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxD6Joint, swingZAngle, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE_ARRAY_VARIABLE_SIZE (PxD6Joint, motions, PxD6Motion::Enum, OmniPvdDataType::eUINT32) OMNI_PVD_ATTRIBUTE (PxD6Joint, distanceLimitValue, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxD6Joint, distanceLimitRestitution, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxD6Joint, distanceLimitBounceThreshold, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxD6Joint, distanceLimitStiffness, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxD6Joint, distanceLimitDamping, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE_ARRAY_VARIABLE_SIZE (PxD6Joint, linearLimitLower, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE_ARRAY_VARIABLE_SIZE (PxD6Joint, linearLimitUpper, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE_ARRAY_VARIABLE_SIZE (PxD6Joint, linearLimitRestitution, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE_ARRAY_VARIABLE_SIZE (PxD6Joint, linearLimitBounceThreshold, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE_ARRAY_VARIABLE_SIZE (PxD6Joint, linearLimitStiffness, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE_ARRAY_VARIABLE_SIZE (PxD6Joint, linearLimitDamping, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxD6Joint, twistLimitLower, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxD6Joint, twistLimitUpper, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxD6Joint, twistLimitRestitution, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxD6Joint, twistLimitBounceThreshold, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxD6Joint, twistLimitStiffness, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxD6Joint, twistLimitDamping, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxD6Joint, swingLimitYAngle, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxD6Joint, swingLimitZAngle, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxD6Joint, swingLimitRestitution, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxD6Joint, swingLimitBounceThreshold, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxD6Joint, swingLimitStiffness, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxD6Joint, swingLimitDamping, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxD6Joint, pyramidSwingLimitYAngleMin, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxD6Joint, pyramidSwingLimitYAngleMax, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxD6Joint, pyramidSwingLimitZAngleMin, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxD6Joint, pyramidSwingLimitZAngleMax, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxD6Joint, pyramidSwingLimitRestitution, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxD6Joint, pyramidSwingLimitBounceThreshold, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxD6Joint, pyramidSwingLimitStiffness, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxD6Joint, pyramidSwingLimitDamping, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE_ARRAY_VARIABLE_SIZE (PxD6Joint, driveForceLimit, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE_ARRAY_VARIABLE_SIZE (PxD6Joint, driveFlags, PxD6JointDriveFlags, OmniPvdDataType::eUINT32) OMNI_PVD_ATTRIBUTE_ARRAY_VARIABLE_SIZE (PxD6Joint, driveStiffness, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE_ARRAY_VARIABLE_SIZE (PxD6Joint, driveDamping, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE_ARRAY_FIXED_SIZE (PxD6Joint, drivePosition, PxTransform, OmniPvdDataType::eFLOAT32, 7) OMNI_PVD_ATTRIBUTE_ARRAY_FIXED_SIZE (PxD6Joint, driveLinVelocity, PxVec3, OmniPvdDataType::eFLOAT32, 3) OMNI_PVD_ATTRIBUTE_ARRAY_FIXED_SIZE (PxD6Joint, driveAngVelocity, PxVec3, OmniPvdDataType::eFLOAT32, 3) OMNI_PVD_CLASS_END (PxD6Joint) //////////////////////////////////////////////////////////////////////////////// // PxCustomGeometryExt::BaseConvexCallbacks //////////////////////////////////////////////////////////////////////////////// OMNI_PVD_CLASS_BEGIN (PxCustomGeometryExtBaseConvexCallbacks) OMNI_PVD_ATTRIBUTE (PxCustomGeometryExtBaseConvexCallbacks, margin, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_CLASS_END (PxCustomGeometryExtBaseConvexCallbacks) //////////////////////////////////////////////////////////////////////////////// // PxCustomGeometryExt::CylinderCallbacks //////////////////////////////////////////////////////////////////////////////// OMNI_PVD_CLASS_DERIVED_BEGIN (PxCustomGeometryExtCylinderCallbacks, PxCustomGeometryExtBaseConvexCallbacks) OMNI_PVD_ATTRIBUTE (PxCustomGeometryExtCylinderCallbacks, height, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxCustomGeometryExtCylinderCallbacks, radius, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxCustomGeometryExtCylinderCallbacks, axis, PxI32, OmniPvdDataType::eINT32) OMNI_PVD_CLASS_END (PxCustomGeometryExtCylinderCallbacks) //////////////////////////////////////////////////////////////////////////////// // PxCustomGeometryExt::ConeCallbacks //////////////////////////////////////////////////////////////////////////////// OMNI_PVD_CLASS_DERIVED_BEGIN (PxCustomGeometryExtConeCallbacks, PxCustomGeometryExtBaseConvexCallbacks) OMNI_PVD_ATTRIBUTE (PxCustomGeometryExtConeCallbacks, height, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxCustomGeometryExtConeCallbacks, radius, PxReal, OmniPvdDataType::eFLOAT32) OMNI_PVD_ATTRIBUTE (PxCustomGeometryExtConeCallbacks, axis, PxI32, OmniPvdDataType::eINT32) OMNI_PVD_CLASS_END (PxCustomGeometryExtConeCallbacks)
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/SnSerializationRegistry.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SN_SERIALIZATION_REGISTRY_H #define SN_SERIALIZATION_REGISTRY_H #include "extensions/PxSerialization.h" #include "extensions/PxRepXSerializer.h" #include "foundation/PxUserAllocated.h" #include "foundation/PxHashMap.h" #include "foundation/PxArray.h" namespace physx { namespace Cm { class Collection; } namespace Sn { class SerializationRegistry : public PxSerializationRegistry, public PxUserAllocated { public: SerializationRegistry(PxPhysics& physics); virtual ~SerializationRegistry(); virtual void release(){ PX_DELETE_THIS; } PxPhysics& getPhysics() const { return mPhysics; } //binary void registerSerializer(PxType type, PxSerializer& serializer); PxSerializer* unregisterSerializer(PxType type); void registerBinaryMetaDataCallback(PxBinaryMetaDataCallback callback); void getBinaryMetaData(PxOutputStream& stream) const; const PxSerializer* getSerializer(PxType type) const; const char* getSerializerName(PxU32 index) const; PxType getSerializerType(PxU32 index) const; PxU32 getNbSerializers() const { return mSerializers.size(); } //repx void registerRepXSerializer(PxType type, PxRepXSerializer& serializer); PxRepXSerializer* getRepXSerializer(const char* typeName) const; PxRepXSerializer* unregisterRepXSerializer(PxType type); protected: SerializationRegistry &operator=(const SerializationRegistry &); private: typedef PxCoalescedHashMap<PxType, PxSerializer*> SerializerMap; typedef PxHashMap<PxType, PxRepXSerializer*> RepXSerializerMap; PxPhysics& mPhysics; SerializerMap mSerializers; RepXSerializerMap mRepXSerializers; PxArray<PxBinaryMetaDataCallback> mMetaDataCallbacks; }; void sortCollection(Cm::Collection& collection, SerializationRegistry& sr, bool isRepx); } // Sn } // physx #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/SnSerialUtils.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "extensions/PxSerialization.h" #include "foundation/PxPhysicsVersion.h" #include "SnSerialUtils.h" #include "foundation/PxString.h" #include "foundation/PxBasicTemplates.h" using namespace physx; namespace { #define SN_NUM_BINARY_PLATFORMS 9 const PxU32 sBinaryPlatformTags[SN_NUM_BINARY_PLATFORMS] = { PX_MAKE_FOURCC('W','_','3','2'), PX_MAKE_FOURCC('W','_','6','4'), PX_MAKE_FOURCC('L','_','3','2'), PX_MAKE_FOURCC('L','_','6','4'), PX_MAKE_FOURCC('M','_','3','2'), PX_MAKE_FOURCC('M','_','6','4'), PX_MAKE_FOURCC('N','X','3','2'), PX_MAKE_FOURCC('N','X','6','4'), PX_MAKE_FOURCC('L','A','6','4') }; const char* sBinaryPlatformNames[SN_NUM_BINARY_PLATFORMS] = { "win32", "win64", "linux32", "linux64", "mac32", "mac64", "switch32", "switch64", "linuxaarch64" }; } namespace physx { namespace Sn { PxU32 getBinaryPlatformTag() { #if PX_WINDOWS && PX_X86 return sBinaryPlatformTags[0]; #elif PX_WINDOWS && PX_X64 return sBinaryPlatformTags[1]; #elif PX_LINUX && PX_X86 return sBinaryPlatformTags[2]; #elif PX_LINUX && PX_X64 return sBinaryPlatformTags[3]; #elif PX_OSX && PX_X86 return sBinaryPlatformTags[4]; #elif PX_OSX && PX_X64 return sBinaryPlatformTags[5]; #elif PX_SWITCH && !PX_A64 return sBinaryPlatformTags[6]; #elif PX_SWITCH && PX_A64 return sBinaryPlatformTags[7]; #elif PX_LINUX && PX_A64 return sBinaryPlatformTags[8]; #else #error Unknown binary platform #endif } bool isBinaryPlatformTagValid(physx::PxU32 platformTag) { PxU32 platformIndex = 0; while (platformIndex < SN_NUM_BINARY_PLATFORMS && platformTag != sBinaryPlatformTags[platformIndex]) platformIndex++; return platformIndex < SN_NUM_BINARY_PLATFORMS; } const char* getBinaryPlatformName(physx::PxU32 platformTag) { PxU32 platformIndex = 0; while (platformIndex < SN_NUM_BINARY_PLATFORMS && platformTag != sBinaryPlatformTags[platformIndex]) platformIndex++; return (platformIndex == SN_NUM_BINARY_PLATFORMS) ? "unknown" : sBinaryPlatformNames[platformIndex]; } const char* getBinaryVersionGuid() { PX_COMPILE_TIME_ASSERT(sizeof(PX_BINARY_SERIAL_VERSION) == SN_BINARY_VERSION_GUID_NUM_CHARS + 1); return PX_BINARY_SERIAL_VERSION; } bool checkCompatibility(const char* binaryVersionGuidCandidate) { for(PxU32 i=0; i<SN_BINARY_VERSION_GUID_NUM_CHARS; i++) { if (binaryVersionGuidCandidate[i] != PX_BINARY_SERIAL_VERSION[i]) { return false; } } return true; } } // Sn } // physx
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/SnSerializationRegistry.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "common/PxSerializer.h" #include "foundation/PxString.h" #include "PxPhysics.h" #include "PxPhysicsSerialization.h" #include "PxArticulationLink.h" #include "SnSerializationRegistry.h" #include "ExtSerialization.h" #include "CmCollection.h" using namespace physx; namespace { class CollectionSorter : public PxProcessPxBaseCallback { typedef PxPair<PxBase*, PxSerialObjectId> Object; class Element { public: Object object; PxArray<PxU32> children; bool isFinished; Element(PxBase* obj = NULL) : object(obj, PX_SERIAL_OBJECT_ID_INVALID), isFinished(false) {} }; public: CollectionSorter(Cm::Collection& collection, Sn::SerializationRegistry& sr, bool isRepx) : mCollection(collection), mSr(sr), mIsRepx(isRepx) {} virtual ~CollectionSorter(){} void process(PxBase& base) { addChild(&base); //ArticulationLink is not a repx serializer, so should require Articulation here if( mIsRepx && PxConcreteType::eARTICULATION_LINK == base.getConcreteType() ) { PxArticulationLink* link = static_cast<PxArticulationLink*>(&base); PxBase* a = reinterpret_cast<PxBase*>(&link->getArticulation()); if(mCurElement->object.first != a ) //don't require itself addChild(a); } } void sort() { Element element; PxU32 i; PxU32 nbObject = mCollection.internalGetNbObjects(); const Cm::Collection::ObjectToIdMap::Entry* objectdatas = mCollection.internalGetObjects(); for( i = 0; i < nbObject; ++i ) { element.object.first = objectdatas[i].first; element.object.second = objectdatas[i].second; mObjToIdMap.insert(objectdatas[i].first, mElements.size()); mElements.pushBack(element); } for( i = 0; i < nbObject; ++i ) { mCurElement = &mElements[i]; const PxSerializer* serializer = mSr.getSerializer(mCurElement->object.first->getConcreteType()); PX_ASSERT(serializer); serializer->requiresObjects(*mCurElement->object.first, *this); } for( i = 0; i < nbObject; ++i ) { if( mElements[i].isFinished ) continue; AddElement(mElements[i]); } mCollection.mObjects.clear(); for(PxArray<Object>::Iterator o = mSorted.begin(); o != mSorted.end(); ++o ) { mCollection.internalAdd(o->first, o->second); } } void AddElement(Element& e) { if( !e.isFinished ) { for( PxArray<PxU32>::Iterator child = e.children.begin(); child != e.children.end(); ++child ) { AddElement(mElements[*child]); } mSorted.pushBack(e.object); e.isFinished = true; } } private: PX_INLINE void addChild(PxBase* base) { PX_ASSERT(mCurElement); const PxHashMap<PxBase*, PxU32>::Entry* entry = mObjToIdMap.find(base); if(entry) mCurElement->children.pushBack(entry->second); } CollectionSorter& operator=(const CollectionSorter&); PxHashMap<PxBase*, PxU32> mObjToIdMap; PxArray<Element> mElements; Cm::Collection& mCollection; Sn::SerializationRegistry& mSr; PxArray<Object> mSorted; Element* mCurElement; bool mIsRepx; }; } namespace physx { namespace Sn { SerializationRegistry::SerializationRegistry(PxPhysics& physics) : mPhysics(physics) { PxRegisterPhysicsSerializers(*this); Ext::RegisterExtensionsSerializers(*this); registerBinaryMetaDataCallback(PxGetPhysicsBinaryMetaData); registerBinaryMetaDataCallback(Ext::GetExtensionsBinaryMetaData); } SerializationRegistry::~SerializationRegistry() { PxUnregisterPhysicsSerializers(*this); Ext::UnregisterExtensionsSerializers(*this); if(mSerializers.size() > 0) { PxGetFoundation().error(physx::PxErrorCode::eDEBUG_WARNING, PX_FL, "PxSerializationRegistry::release(): some registered PxSerializer instances were not unregistered"); } if(mRepXSerializers.size() > 0) { PxGetFoundation().error(physx::PxErrorCode::eDEBUG_WARNING, PX_FL, "PxSerializationRegistry::release(): some registered PxRepXSerializer instances were not unregistered"); } } void SerializationRegistry::registerSerializer(PxType type, PxSerializer& serializer) { if(mSerializers.find(type)) { PxGetFoundation().error(physx::PxErrorCode::eDEBUG_WARNING, PX_FL, "PxSerializationRegistry::registerSerializer: Type %d has already been registered", type); } mSerializers.insert(type, &serializer); } PxSerializer* SerializationRegistry::unregisterSerializer(PxType type) { const SerializerMap::Entry* e = mSerializers.find(type); PxSerializer* s = e ? e->second : NULL; if(!mSerializers.erase(type)) { PxGetFoundation().error(physx::PxErrorCode::eDEBUG_WARNING, PX_FL, "PxSerializationRegistry::unregisterSerializer: failed to find PxSerializer instance for type %d", type); } return s; } const PxSerializer* SerializationRegistry::getSerializer(PxType type) const { const SerializerMap::Entry* e = mSerializers.find(type); #if PX_CHECKED if (!e) { PxGetFoundation().error(physx::PxErrorCode::eDEBUG_WARNING, PX_FL, "PxSerializationRegistry::getSerializer: failed to find PxSerializer instance for type %d", type); } #endif return e ? e->second : NULL; } PxType SerializationRegistry::getSerializerType(PxU32 index) const { PX_ASSERT(index < mSerializers.size()); return mSerializers.getEntries()[index].first; } const char* SerializationRegistry::getSerializerName(PxU32 index) const { PX_ASSERT(index < mSerializers.size()); return mSerializers.getEntries()[index].second->getConcreteTypeName(); } void SerializationRegistry::registerBinaryMetaDataCallback(PxBinaryMetaDataCallback callback) { mMetaDataCallbacks.pushBack(callback); } void SerializationRegistry::getBinaryMetaData(PxOutputStream& stream) const { for(PxU32 i = 0; i < mMetaDataCallbacks.size(); i++) { mMetaDataCallbacks[i](stream); } } void SerializationRegistry::registerRepXSerializer(PxType type, PxRepXSerializer& serializer) { if(mRepXSerializers.find(type)) { PxGetFoundation().error(physx::PxErrorCode::eDEBUG_WARNING, PX_FL, "PxSerializationRegistry::registerRepXSerializer: Type %d has already been registered", type); } mRepXSerializers.insert(type, &serializer); } PxRepXSerializer* SerializationRegistry::getRepXSerializer(const char* typeName) const { SerializationRegistry* sr = const_cast<SerializationRegistry*>(this); for( RepXSerializerMap::Iterator iter = sr->mRepXSerializers.getIterator(); !iter.done(); ++iter) { if ( physx::Pxstricmp( iter->second->getTypeName(), typeName ) == 0 ) return iter->second; } return NULL; } PxRepXSerializer* SerializationRegistry::unregisterRepXSerializer(PxType type) { const RepXSerializerMap::Entry* e = mRepXSerializers.find(type); PxRepXSerializer* s = e ? e->second : NULL; if(!mRepXSerializers.erase(type)) { PxGetFoundation().error(physx::PxErrorCode::eDEBUG_WARNING, PX_FL, "PxSerializationRegistry::unregisterRepXSerializer: failed to find PxRepXSerializer instance for type %d", type); } return s; } void sortCollection(Cm::Collection& collection, SerializationRegistry& sr, bool isRepx) { CollectionSorter sorter(collection, sr, isRepx); sorter.sort(); } } // Sn PxSerializationRegistry* PxSerialization::createSerializationRegistry(PxPhysics& physics) { return PX_NEW(Sn::SerializationRegistry)(physics); } } // physx
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/SnSerialization.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "common/PxMetaData.h" #include "common/PxSerializer.h" #include "extensions/PxConstraintExt.h" #include "foundation/PxPhysicsVersion.h" #include "PxPhysicsAPI.h" #include "SnConvX.h" #include "SnSerializationRegistry.h" #include "SnSerialUtils.h" #include "ExtSerialization.h" #include "CmCollection.h" using namespace physx; using namespace Sn; namespace { struct RequiresCallback : public PxProcessPxBaseCallback { RequiresCallback(physx::PxCollection& c) : collection(c) {} void process(PxBase& base) { if(!collection.contains(base)) collection.add(base); } PxCollection& collection; PX_NOCOPY(RequiresCallback) }; struct CompleteCallback : public PxProcessPxBaseCallback { CompleteCallback(physx::PxCollection& r, physx::PxCollection& c, const physx::PxCollection* e) : required(r), complete(c), external(e) {} void process(PxBase& base) { if(complete.contains(base) || (external && external->contains(base))) return; if(!required.contains(base)) required.add(base); } PxCollection& required; PxCollection& complete; const PxCollection* external; PX_NOCOPY(CompleteCallback) }; void getRequiresCollection(PxCollection& required, PxCollection& collection, PxCollection& complete, const PxCollection* external, PxSerializationRegistry& sr, bool followJoints) { CompleteCallback callback(required, complete, external); for (PxU32 i = 0; i < collection.getNbObjects(); ++i) { PxBase& s = collection.getObject(i); const PxSerializer* serializer = sr.getSerializer(s.getConcreteType()); PX_ASSERT(serializer); serializer->requiresObjects(s, callback); if(followJoints) { PxRigidActor* actor = s.is<PxRigidActor>(); if(actor) { PxArray<PxConstraint*> objects(actor->getNbConstraints()); actor->getConstraints(objects.begin(), objects.size()); for(PxU32 j=0;j<objects.size();j++) { PxU32 typeId; PxJoint* joint = reinterpret_cast<PxJoint*>(objects[j]->getExternalReference(typeId)); if(typeId == PxConstraintExtIDs::eJOINT) { const PxSerializer* sj = sr.getSerializer(joint->getConcreteType()); PX_ASSERT(sj); sj->requiresObjects(*joint, callback); if(!required.contains(*joint)) required.add(*joint); } } } } } } } bool PxSerialization::isSerializable(PxCollection& collection, PxSerializationRegistry& sr, const PxCollection* externalReferences) { PxCollection* subordinateCollection = PxCreateCollection(); PX_ASSERT(subordinateCollection); for(PxU32 i = 0; i < collection.getNbObjects(); ++i) { PxBase& s = collection.getObject(i); const PxSerializer* serializer = sr.getSerializer(s.getConcreteType()); PX_ASSERT(serializer); if(serializer->isSubordinate()) subordinateCollection->add(s); if(externalReferences) { PxSerialObjectId id = collection.getId(s); if(id != PX_SERIAL_OBJECT_ID_INVALID) { PxBase* object = externalReferences->find(id); if(object && (object != &s)) { subordinateCollection->release(); PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL, "PxSerialization::isSerializable: Reference id %" PX_PRIu64 " used both in current collection and in externalReferences. " "Please use unique identifiers.", id); return false; } } } } PxCollection* requiresCollection = PxCreateCollection(); PX_ASSERT(requiresCollection); RequiresCallback requiresCallback0(*requiresCollection); for (PxU32 i = 0; i < collection.getNbObjects(); ++i) { PxBase& s = collection.getObject(i); const PxSerializer* serializer = sr.getSerializer(s.getConcreteType()); PX_ASSERT(serializer); serializer->requiresObjects(s, requiresCallback0); Cm::Collection* cmRequiresCollection = static_cast<Cm::Collection*>(requiresCollection); for(PxU32 j = 0; j < cmRequiresCollection->getNbObjects(); ++j) { PxBase& s0 = cmRequiresCollection->getObject(j); if(subordinateCollection->contains(s0)) { subordinateCollection->remove(s0); continue; } bool requiredIsInCollection = collection.contains(s0); if(!requiredIsInCollection) { if(externalReferences) { if(!externalReferences->contains(s0)) { PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL, "PxSerialization::isSerializable: Object of type %s references a missing object of type %s. " "The missing object needs to be added to either the current collection or the externalReferences collection.", s.getConcreteTypeName(), s0.getConcreteTypeName()); } else if(externalReferences->getId(s0) == PX_SERIAL_OBJECT_ID_INVALID) { PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL, "PxSerialization::isSerializable: Object of type %s in externalReferences collection requires an id.", s0.getConcreteTypeName()); } else continue; } else { PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL, "PxSerialization::isSerializable: Object of type %s references a missing serial object of type %s. " "Please completed the collection or specify an externalReferences collection containing the object.", s.getConcreteTypeName(), s0.getConcreteTypeName()); } subordinateCollection->release(); requiresCollection->release(); return false; } } cmRequiresCollection->mObjects.clear(); } requiresCollection->release(); PxU32 numOrphans = subordinateCollection->getNbObjects(); for(PxU32 j = 0; j < numOrphans; ++j) { PxBase& subordinate = subordinateCollection->getObject(j); PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL, "PxSerialization::isSerializable: An object of type %s is subordinate but not required " "by other objects in the collection (orphan). Please remove the object from the collection or add its owner.", subordinate.getConcreteTypeName()); } subordinateCollection->release(); if(numOrphans>0) return false; if(externalReferences) { PxCollection* oppositeRequiresCollection = PxCreateCollection(); PX_ASSERT(oppositeRequiresCollection); RequiresCallback requiresCallback(*oppositeRequiresCollection); for (PxU32 i = 0; i < externalReferences->getNbObjects(); ++i) { PxBase& s = externalReferences->getObject(i); const PxSerializer* serializer = sr.getSerializer(s.getConcreteType()); PX_ASSERT(serializer); serializer->requiresObjects(s, requiresCallback); Cm::Collection* cmCollection = static_cast<Cm::Collection*>(oppositeRequiresCollection); for(PxU32 j = 0; j < cmCollection->getNbObjects(); ++j) { PxBase& s0 = cmCollection->getObject(j); if(collection.contains(s0)) { oppositeRequiresCollection->release(); PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL, "PxSerialization::isSerializable: Object of type %s in externalReferences references an object " "of type %s in collection (circular dependency).", s.getConcreteTypeName(), s0.getConcreteTypeName()); return false; } } cmCollection->mObjects.clear(); } oppositeRequiresCollection->release(); } return true; } void PxSerialization::complete(PxCollection& collection, PxSerializationRegistry& sr, const PxCollection* exceptFor, bool followJoints) { PxCollection* curCollection = PxCreateCollection(); PX_ASSERT(curCollection); curCollection->add(collection); PxCollection* requiresCollection = PxCreateCollection(); PX_ASSERT(requiresCollection); do { getRequiresCollection(*requiresCollection, *curCollection, collection, exceptFor, sr, followJoints); collection.add(*requiresCollection); PxCollection* swap = curCollection; curCollection = requiresCollection; requiresCollection = swap; (static_cast<Cm::Collection*>(requiresCollection))->mObjects.clear(); }while(curCollection->getNbObjects() > 0); requiresCollection->release(); curCollection->release(); } void PxSerialization::createSerialObjectIds(PxCollection& collection, const PxSerialObjectId base) { PxSerialObjectId localBase = base; PxU32 nbObjects = collection.getNbObjects(); for (PxU32 i = 0; i < nbObjects; ++i) { while(collection.find(localBase)) { localBase++; } PxBase& s = collection.getObject(i); if(PX_SERIAL_OBJECT_ID_INVALID == collection.getId(s)) { collection.addId(s, localBase); localBase++; } } } namespace physx { namespace Sn { static PxU32 addToStringTable(physx::PxArray<char>& stringTable, const char* str) { if(!str) return 0xffffffff; PxI32 length = PxI32(stringTable.size()); const char* table = stringTable.begin(); const char* start = table; while(length) { if(Pxstrcmp(table, str)==0) return PxU32(table - start); const char* saved = table; while(*table++); length -= PxU32(table - saved); PX_ASSERT(length>=0); } const PxU32 offset = stringTable.size(); while(*str) stringTable.pushBack(*str++); stringTable.pushBack(0); return offset; } } } void PxSerialization::dumpBinaryMetaData(PxOutputStream& outputStream, PxSerializationRegistry& sr) { class MetaDataStream : public PxOutputStream { public: bool addNewType(const char* typeName) { for(PxU32 i=0;i<types.size();i++) { if(Pxstrcmp(types[i], typeName)==0) return false; } types.pushBack(typeName); return true; } virtual PxU32 write(const void* src, PxU32 count) { PX_ASSERT(count==sizeof(PxMetaDataEntry)); const PxMetaDataEntry* entry = reinterpret_cast<const PxMetaDataEntry*>(src); if(( entry->flags & PxMetaDataFlag::eTYPEDEF) || ((entry->flags & PxMetaDataFlag::eCLASS) && (!entry->name)) ) newType = addNewType(entry->type); if(newType) metaData.pushBack(*entry); return count; } PxArray<PxMetaDataEntry> metaData; PxArray<const char*> types; bool newType; }s; SerializationRegistry& sn = static_cast<SerializationRegistry&>(sr); sn.getBinaryMetaData(s); PxArray<char> stringTable; PxU32 nb = s.metaData.size(); PxMetaDataEntry* entries = s.metaData.begin(); for(PxU32 i=0;i<nb;i++) { entries[i].type = reinterpret_cast<const char*>(size_t(addToStringTable(stringTable, entries[i].type))); entries[i].name = reinterpret_cast<const char*>(size_t(addToStringTable(stringTable, entries[i].name))); } PxU32 platformTag = getBinaryPlatformTag(); const PxU32 gaussMapLimit = 32; const PxU32 header = PX_MAKE_FOURCC('M','E','T','A'); const PxU32 version = PX_PHYSICS_VERSION; const PxU32 ptrSize = sizeof(void*); outputStream.write(&header, 4); outputStream.write(&version, 4); outputStream.write(PX_BINARY_SERIAL_VERSION, SN_BINARY_VERSION_GUID_NUM_CHARS); outputStream.write(&ptrSize, 4); outputStream.write(&platformTag, 4); outputStream.write(&gaussMapLimit, 4); outputStream.write(&nb, 4); outputStream.write(entries, nb*sizeof(PxMetaDataEntry)); //concreteTypes to name PxU32 num = sn.getNbSerializers(); outputStream.write(&num, 4); for(PxU32 i=0; i<num; i++) { PxU16 type = sn.getSerializerType(i); PxU32 nameOffset = addToStringTable(stringTable, sn.getSerializerName(i)); outputStream.write(&type, 2); outputStream.write(&nameOffset, 4); } PxU32 length = stringTable.size(); const char* table = stringTable.begin(); outputStream.write(&length, 4); outputStream.write(table, length); } PxBinaryConverter* PxSerialization::createBinaryConverter() { return PX_NEW(ConvX)(); }
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/SnSerialUtils.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SN_SERIAL_UTILS_H #define SN_SERIAL_UTILS_H #define SN_BINARY_VERSION_GUID_NUM_CHARS 32 namespace physx { namespace Sn { PxU32 getBinaryPlatformTag(); bool isBinaryPlatformTagValid(PxU32 platformTag); const char* getBinaryPlatformName(PxU32 platformTag); const char* getBinaryVersionGuid(); bool checkCompatibility(const char* binaryVersionGuidCandidate); } } #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Binary/SnConvX_Output.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #include "foundation/PxIO.h" #include "foundation/PxErrorCallback.h" #include "SnConvX.h" #if PX_VC #pragma warning(disable:4389) // signed/unsigned mismatch #endif using namespace physx; void Sn::ConvX::setNullPtr(bool flag) { mNullPtr = flag; } void Sn::ConvX::setNoOutput(bool flag) { mNoOutput = flag; } bool Sn::ConvX::initOutput(PxOutputStream& targetStream) { mOutStream = &targetStream; mOutputSize = 0; mNullPtr = false; mNoOutput = false; const MetaData* srcMetaData = getBinaryMetaData(META_DATA_SRC); PX_ASSERT(srcMetaData); const MetaData* dstMetaData = getBinaryMetaData(META_DATA_DST); PX_ASSERT(dstMetaData); mSrcPtrSize = srcMetaData->getPtrSize(); mDstPtrSize = dstMetaData->getPtrSize(); PX_ASSERT(!srcMetaData->getFlip()); mMustFlip = dstMetaData->getFlip(); return true; } void Sn::ConvX::closeOutput() { mOutStream = NULL; } int Sn::ConvX::getCurrentOutputSize() { return mOutputSize; } void Sn::ConvX::output(short value) { if(mNoOutput) return; if(mMustFlip) flip(value); PX_ASSERT(mOutStream); const size_t size = mOutStream->write(&value, 2); PX_ASSERT(size==2); mOutputSize += int(size); } void Sn::ConvX::output(int value) { if(mNoOutput) return; if(mMustFlip) flip(value); PX_ASSERT(mOutStream); const size_t size = mOutStream->write(&value, 4); PX_ASSERT(size==4); mOutputSize += int(size); } //ntohll is a macro on apple yosemite static PxU64 ntohll_internal(const PxU64 value) { union { PxU64 ull; PxU8 c[8]; } x; x.ull = value; PxU8 c = 0; c = x.c[0]; x.c[0] = x.c[7]; x.c[7] = c; c = x.c[1]; x.c[1] = x.c[6]; x.c[6] = c; c = x.c[2]; x.c[2] = x.c[5]; x.c[5] = c; c = x.c[3]; x.c[3] = x.c[4]; x.c[4] = c; return x.ull; } void Sn::ConvX::output(PxU64 value) { if(mNoOutput) return; if(mMustFlip) // flip(value); value = ntohll_internal(value); PX_ASSERT(mOutStream); const size_t size = mOutStream->write(&value, 8); PX_ASSERT(size==8); mOutputSize += int(size); } void Sn::ConvX::output(const char* buffer, int nbBytes) { if(mNoOutput) return; if(!nbBytes) return; PX_ASSERT(mOutStream); const PxU32 size = mOutStream->write(buffer, PxU32(nbBytes)); PX_ASSERT(size== PxU32(nbBytes)); mOutputSize += int(size); } void Sn::ConvX::convert8(const char* src, const PxMetaDataEntry& entry, const PxMetaDataEntry& dstEntry) { (void)dstEntry; if(mNoOutput) return; PX_ASSERT(entry.mSize==1*entry.mCount); PX_ASSERT(mOutStream); PX_ASSERT(entry.mSize==dstEntry.mSize); const PxU32 size = mOutStream->write(src, PxU32(entry.mSize)); PX_ASSERT(size== PxU32(entry.mSize)); mOutputSize += int(size); } // This is called to convert auto-generated "padding bytes" (or so we think). // We use a special converter to check the input bytes and issue warnings when it doesn't look like padding void Sn::ConvX::convertPad8(const char* src, const PxMetaDataEntry& entry, const PxMetaDataEntry& dstEntry) { (void)dstEntry; (void)src; if(mNoOutput) return; PX_ASSERT(entry.mSize); PX_ASSERT(entry.mSize==1*entry.mCount); PX_ASSERT(mOutStream); PX_ASSERT(entry.mSize==dstEntry.mSize); // PT: we don't output the source data on purpose, to catch missing meta-data // sschirm: changed that to 0xcd, so we can mark the output as "having marked pads" const unsigned char b = 0xcd; for(int i=0;i<entry.mSize;i++) { const size_t size = mOutStream->write(&b, 1); (void)size; } mOutputSize += entry.mSize; } void Sn::ConvX::convert16(const char* src, const PxMetaDataEntry& entry, const PxMetaDataEntry& dstEntry) { (void)dstEntry; if(mNoOutput) return; PX_ASSERT(entry.mSize==int(sizeof(short)*entry.mCount)); PX_ASSERT(mOutStream); PX_ASSERT(entry.mSize==dstEntry.mSize); const short* data = reinterpret_cast<const short*>(src); for(int i=0;i<entry.mCount;i++) { short value = *data++; if(mMustFlip) flip(value); const size_t size = mOutStream->write(&value, sizeof(short)); PX_ASSERT(size==sizeof(short)); mOutputSize += int(size); } } void Sn::ConvX::convert32(const char* src, const PxMetaDataEntry& entry, const PxMetaDataEntry& dstEntry) { (void)dstEntry; if(mNoOutput) return; PX_ASSERT(entry.mSize==int(sizeof(int)*entry.mCount)); PX_ASSERT(mOutStream); PX_ASSERT(entry.mSize==dstEntry.mSize); const int* data = reinterpret_cast<const int*>(src); for(int i=0;i<entry.mCount;i++) { int value = *data++; if(mMustFlip) flip(value); const size_t size = mOutStream->write(&value, sizeof(int)); PX_ASSERT(size==sizeof(int)); mOutputSize += int(size); } } void Sn::ConvX::convert64(const char* src, const PxMetaDataEntry& entry, const PxMetaDataEntry& dstEntry) { (void)dstEntry; if(mNoOutput) return; PX_ASSERT(entry.mSize==int(sizeof(PxU64)*entry.mCount)); PX_ASSERT(mOutStream); PX_ASSERT(entry.mSize==dstEntry.mSize); const PxU64* data = reinterpret_cast<const PxU64*>(src); for(int i=0;i<entry.mCount;i++) { PxU64 value = *data++; if(mMustFlip) value = ntohll_internal(value); const size_t size = mOutStream->write(&value, sizeof(PxU64)); PX_ASSERT(size==sizeof(PxU64)); mOutputSize += int(size); } } void Sn::ConvX::convertFloat(const char* src, const PxMetaDataEntry& entry, const PxMetaDataEntry& dstEntry) { (void)dstEntry; if(mNoOutput) return; PX_ASSERT(entry.mSize==int(sizeof(float)*entry.mCount)); PX_ASSERT(mOutStream); PX_ASSERT(entry.mSize==dstEntry.mSize); const float* data = reinterpret_cast<const float*>(src); for(int i=0;i<entry.mCount;i++) { float value = *data++; if(mMustFlip) flip(value); const size_t size = mOutStream->write(&value, sizeof(float)); PX_ASSERT(size==sizeof(float)); mOutputSize += int(size); } } void Sn::ConvX::convertPtr(const char* src, const PxMetaDataEntry& entry, const PxMetaDataEntry& dstEntry) { (void)dstEntry; if(mNoOutput) return; PX_ASSERT(entry.mSize==mSrcPtrSize*entry.mCount); PX_ASSERT(mOutStream); char buffer[16]; for(int i=0;i<entry.mCount;i++) { PxU64 testValue=0; // Src pointer can be 4 or 8 bytes so we can't use "void*" here if(mSrcPtrSize==4) { PX_ASSERT(sizeof(PxU32)==4); const PxU32* data = reinterpret_cast<const PxU32*>(src); PxU32 value = *data++; src = reinterpret_cast<const char*>(data); if(mPointerActiveRemap) { PxU32 ref; if(mPointerActiveRemap->getObjectRef(value, ref)) { value = ref; } else if(value) { // all pointers not in the pointer remap table get set as 0x12345678, this also applies to PhysX name properties (mName) value=0x12345678; } } else { //we should only get here during convertReferenceTables to build up the pointer map PxU32 ref; if (mPointerRemap.getObjectRef(value, ref)) { value = ref; } else if(value) { const PxU32 remappedRef = 0x80000000 | (mPointerRemapCounter++ +1); mPointerRemap.setObjectRef(value, remappedRef); value = remappedRef; } } if(mMustFlip) flip(value); if(mNullPtr) value = 0; *reinterpret_cast<PxU32*>(buffer) = value; } else { PX_ASSERT(mSrcPtrSize==8); PX_ASSERT(sizeof(PxU64)==8); const PxU64* data = reinterpret_cast<const PxU64*>(src); PxU64 value = *data++; src = reinterpret_cast<const char*>(data); if(mPointerActiveRemap) { PxU32 ref; if(mPointerActiveRemap->getObjectRef(value, ref)) { value = ref; } else if(value) { // all pointers not in the pointer remap table get set as 0x12345678, this also applies to PhysX name properties (mName) value=0x12345678; } } else { //we should only get here during convertReferenceTables to build up the pointer map PxU32 ref; if (mPointerRemap.getObjectRef(value, ref)) { value = ref; } else if(value) { const PxU32 remappedRef = 0x80000000 | (mPointerRemapCounter++ +1); mPointerRemap.setObjectRef(value, remappedRef); value = remappedRef; } } if(mNullPtr) value = 0; testValue = value; *reinterpret_cast<PxU64*>(buffer) = value; } if(mSrcPtrSize==mDstPtrSize) { const size_t size = mOutStream->write(buffer, PxU32(mSrcPtrSize)); PX_ASSERT(size==PxU32(mSrcPtrSize)); mOutputSize += int(size); } else { if(mDstPtrSize>mSrcPtrSize) { // 32bit to 64bit PX_ASSERT(mDstPtrSize==8); PX_ASSERT(mSrcPtrSize==4); // We need to output the lower 32bits first for PC. Might be different on a 64bit console.... // Output src ptr for the lower 32bits const size_t size = mOutStream->write(buffer, PxU32(mSrcPtrSize)); PX_ASSERT(size==PxU32(mSrcPtrSize)); mOutputSize += int(size); // Output zeros for the higher 32bits const int zero = 0; const size_t size0 = mOutStream->write(&zero, 4); PX_ASSERT(size0==4); mOutputSize += int(size0); } else { // 64bit to 32bit PX_ASSERT(mSrcPtrSize==8); PX_ASSERT(mDstPtrSize==4); // Not sure how we can safely convert 64bit ptrs to 32bit... just drop the high 32 bits?!? PxU32 ptr32 = *reinterpret_cast<PxU32*>(buffer); (void)ptr32; PxU32 ptr32b = PxU32(testValue); (void)ptr32b; if(mMustFlip) flip(ptr32b); // Output src ptr for the lower 32bits const size_t size = mOutStream->write(&ptr32b, 4); PX_ASSERT(size==4); mOutputSize += int(size); } } } } void Sn::ConvX::convertHandle16(const char* src, const PxMetaDataEntry& entry, const PxMetaDataEntry& dstEntry) { (void)dstEntry; if(mNoOutput) return; PX_ASSERT(strcmp(entry.mType, "PxU16") == 0); PX_ASSERT(entry.mSize==dstEntry.mSize); PX_ASSERT(mOutStream); const PxU16* handles = reinterpret_cast<const PxU16*>(src); for(int i=0;i<entry.mCount;i++) { PxU16 value = handles[i]; if (mHandle16ActiveRemap) { PxU16 ref; bool isMapped = mHandle16ActiveRemap->getObjectRef(value, ref); PX_UNUSED(isMapped); PX_ASSERT(isMapped); value = ref; } else { //we should only get here during convertReferenceTables to build up the pointer map PxU16 ref; if (mHandle16Remap.getObjectRef(value, ref)) { value = ref; } else { const PxU16 remappedRef = mHandle16RemapCounter++; mHandle16Remap.setObjectRef(value, remappedRef); value = remappedRef; } } if(mMustFlip) flip(value); const size_t size = mOutStream->write(&value, sizeof(PxU16)); PX_UNUSED(size); PX_ASSERT(size==sizeof(PxU16)); mOutputSize += sizeof(PxU16); } }
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Binary/SnBinaryDeserialization.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "common/PxSerializer.h" #include "foundation/PxHash.h" #include "foundation/PxHashMap.h" #include "foundation/PxString.h" #include "extensions/PxSerialization.h" #include "PxPhysics.h" #include "PxPhysicsSerialization.h" #include "SnFile.h" #include "SnSerializationContext.h" #include "SnConvX_Align.h" #include "serialization/SnSerializationRegistry.h" #include "serialization/SnSerialUtils.h" #include "CmCollection.h" using namespace physx; using namespace Sn; namespace { PX_INLINE PxU8* alignPtr(PxU8* ptr, PxU32 alignment = PX_SERIAL_ALIGN) { if(!alignment) return ptr; const PxU32 padding = getPadding(size_t(ptr), alignment); PX_ASSERT(!getPadding(size_t(ptr + padding), alignment)); return ptr + padding; } PX_FORCE_INLINE PxU32 read32(PxU8*& address) { const PxU32 value = *reinterpret_cast<PxU32*>(address); address += sizeof(PxU32); return value; } bool readHeader(PxU8*& address) { const PxU32 header = read32(address); PX_UNUSED(header); const PxU32 version = read32(address); PX_UNUSED(version); char binaryVersionGuid[SN_BINARY_VERSION_GUID_NUM_CHARS + 1]; PxMemCopy(binaryVersionGuid, address, SN_BINARY_VERSION_GUID_NUM_CHARS); binaryVersionGuid[SN_BINARY_VERSION_GUID_NUM_CHARS] = 0; address += SN_BINARY_VERSION_GUID_NUM_CHARS; PX_UNUSED(binaryVersionGuid); const PxU32 platformTag = read32(address); PX_UNUSED(platformTag); const PxU32 markedPadding = read32(address); PX_UNUSED(markedPadding); if (header != PX_MAKE_FOURCC('S','E','B','D')) { PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL, "Buffer contains data with wrong header indicating invalid binary data."); return false; } if (!checkCompatibility(binaryVersionGuid)) { PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL, "Buffer contains binary data version 0x%s and is incompatible with this PhysX sdk (0x%s).\n", binaryVersionGuid, getBinaryVersionGuid()); return false; } if (platformTag != getBinaryPlatformTag()) { PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL, "Buffer contains data with platform mismatch:\nExpected: %s \nActual: %s\n", getBinaryPlatformName(getBinaryPlatformTag()), getBinaryPlatformName(platformTag)); return false; } return true; } bool checkImportReferences(const ImportReference* importReferences, PxU32 nbImportReferences, const Cm::Collection* externalRefs) { if (!externalRefs) { if (nbImportReferences > 0) { PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "PxSerialization::createCollectionFromBinary: External references needed but no externalRefs collection specified."); return false; } } else { for (PxU32 i=0; i<nbImportReferences;i++) { PxSerialObjectId id = importReferences[i].id; PxType type = importReferences[i].type; PxBase* referencedObject = externalRefs->find(id); if (!referencedObject) { PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "PxSerialization::createCollectionFromBinary: External reference %" PX_PRIu64 " expected in externalRefs collection but not found.", id); return false; } if (referencedObject->getConcreteType() != type) { PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "PxSerialization::createCollectionFromBinary: External reference %d type mismatch. Expected %d but found %d in externalRefs collection.", type, referencedObject->getConcreteType()); return false; } } } return true; } } PxCollection* PxSerialization::createCollectionFromBinary(void* memBlock, PxSerializationRegistry& sr, const PxCollection* pxExternalRefs) { if(size_t(memBlock) & (PX_SERIAL_FILE_ALIGN-1)) { PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "Buffer must be 128-bytes aligned."); return NULL; } PxU8* address = reinterpret_cast<PxU8*>(memBlock); const Cm::Collection* externalRefs = static_cast<const Cm::Collection*>(pxExternalRefs); if (!readHeader(address)) { return NULL; } ManifestEntry* manifestTable; PxU32 nbManifestEntries; PxU32 nbObjectsInCollection; PxU32 objectDataEndOffset; // read number of objects in collection address = alignPtr(address); nbObjectsInCollection = read32(address); // read manifest (PxU32 offset, PxConcreteType type) { address = alignPtr(address); nbManifestEntries = read32(address); PX_ASSERT(*reinterpret_cast<PxU32*>(address) == 0); //first offset is always 0 manifestTable = (nbManifestEntries > 0) ? reinterpret_cast<ManifestEntry*>(address) : NULL; address += nbManifestEntries*sizeof(ManifestEntry); objectDataEndOffset = read32(address); } ImportReference* importReferences; PxU32 nbImportReferences; // read import references { address = alignPtr(address); nbImportReferences = read32(address); importReferences = (nbImportReferences > 0) ? reinterpret_cast<ImportReference*>(address) : NULL; address += nbImportReferences*sizeof(ImportReference); } if (!checkImportReferences(importReferences, nbImportReferences, externalRefs)) { return NULL; } ExportReference* exportReferences; PxU32 nbExportReferences; // read export references { address = alignPtr(address); nbExportReferences = read32(address); exportReferences = (nbExportReferences > 0) ? reinterpret_cast<ExportReference*>(address) : NULL; address += nbExportReferences*sizeof(ExportReference); } // read internal references arrays PxU32 nbInternalPtrReferences = 0; PxU32 nbInternalHandle16References = 0; InternalReferencePtr* internalPtrReferences = NULL; InternalReferenceHandle16* internalHandle16References = NULL; { address = alignPtr(address); nbInternalPtrReferences = read32(address); internalPtrReferences = (nbInternalPtrReferences > 0) ? reinterpret_cast<InternalReferencePtr*>(address) : NULL; address += nbInternalPtrReferences*sizeof(InternalReferencePtr); nbInternalHandle16References = read32(address); internalHandle16References = (nbInternalHandle16References > 0) ? reinterpret_cast<InternalReferenceHandle16*>(address) : NULL; address += nbInternalHandle16References*sizeof(InternalReferenceHandle16); } // create internal references map InternalPtrRefMap internalPtrReferencesMap(nbInternalPtrReferences*2); { //create hash (we should load the hashes directly from memory) for (PxU32 i = 0; i < nbInternalPtrReferences; i++) { const InternalReferencePtr& ref = internalPtrReferences[i]; internalPtrReferencesMap.insertUnique(ref.reference, SerialObjectIndex(ref.objIndex)); } } InternalHandle16RefMap internalHandle16ReferencesMap(nbInternalHandle16References*2); { for (PxU32 i=0;i<nbInternalHandle16References;i++) { const InternalReferenceHandle16& ref = internalHandle16References[i]; internalHandle16ReferencesMap.insertUnique(ref.reference, SerialObjectIndex(ref.objIndex)); } } SerializationRegistry& sn = static_cast<SerializationRegistry&>(sr); Cm::Collection* collection = static_cast<Cm::Collection*>(PxCreateCollection()); PX_ASSERT(collection); collection->mObjects.reserve(nbObjectsInCollection*2); if(nbExportReferences > 0) collection->mIds.reserve(nbExportReferences*2); PxU8* addressObjectData = alignPtr(address); PxU8* addressExtraData = alignPtr(addressObjectData + objectDataEndOffset); DeserializationContext context(manifestTable, importReferences, addressObjectData, internalPtrReferencesMap, internalHandle16ReferencesMap, externalRefs, addressExtraData); // iterate over memory containing PxBase objects, create the instances, resolve the addresses, import the external data, add to collection. { PxU32 nbObjects = nbObjectsInCollection; while(nbObjects--) { address = alignPtr(address); context.alignExtraData(); // read PxBase header with type and get corresponding serializer. PxBase* header = reinterpret_cast<PxBase*>(address); const PxType classType = header->getConcreteType(); const PxSerializer* serializer = sn.getSerializer(classType); PX_ASSERT(serializer); PxBase* instance = serializer->createObject(address, context); if (!instance) { PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL, "Cannot create class instance for concrete type %d.", classType); collection->release(); return NULL; } collection->internalAdd(instance); } } PX_ASSERT(nbObjectsInCollection == collection->internalGetNbObjects()); // update new collection with export references { bool manifestTableAccessError = false; PX_ASSERT(addressObjectData != NULL); for (PxU32 i=0;i<nbExportReferences;i++) { bool isExternal; PxU32 manifestIndex = exportReferences[i].objIndex.getIndex(isExternal); PX_ASSERT(!isExternal); if (manifestIndex < nbManifestEntries) { PxBase* obj = reinterpret_cast<PxBase*>(addressObjectData + manifestTable[manifestIndex].offset); collection->mIds.insertUnique(exportReferences[i].id, obj); collection->mObjects[obj] = exportReferences[i].id; } else { manifestTableAccessError = true; } } if (manifestTableAccessError) { PxGetFoundation().error(physx::PxErrorCode::eINTERNAL_ERROR, PX_FL, "Manifest table access error"); collection->release(); return NULL; } } PxAddCollectionToPhysics(*collection); return collection; }
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Binary/SnConvX_Align.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #include "SnConvX.h" #include "SnConvX_Align.h" #include <assert.h> using namespace physx; void Sn::ConvX::alignTarget(int alignment) { const int outputSize = getCurrentOutputSize(); const PxU32 outPadding = getPadding(size_t(outputSize), PxU32(alignment)); if(outPadding) { assert(outPadding<CONVX_ZERO_BUFFER_SIZE); output(mZeros, int(outPadding)); } } const char* Sn::ConvX::alignStream(const char* buffer, int alignment) { const PxU32 padding = getPadding(size_t(buffer), PxU32(alignment)); assert(!getPadding(size_t(buffer + padding), PxU32(alignment))); const int outputSize = getCurrentOutputSize(); const PxU32 outPadding = getPadding(size_t(outputSize), PxU32(alignment)); if(outPadding==padding) { assert(outPadding<CONVX_ZERO_BUFFER_SIZE); output(mZeros, int(outPadding)); } else if(outPadding) { assert(outPadding<CONVX_ZERO_BUFFER_SIZE); output(mZeros, int(outPadding)); } return buffer + padding; }
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Binary/SnBinarySerialization.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "common/PxSerializer.h" #include "foundation/PxPhysicsVersion.h" #include "foundation/PxUtilities.h" #include "foundation/PxSort.h" #include "extensions/PxBinaryConverter.h" #include "SnSerializationContext.h" #include "serialization/SnSerialUtils.h" #include "serialization/SnSerializationRegistry.h" using namespace physx; using namespace Cm; using namespace Sn; //------------------------------------------------------------------------------------ //// Binary Serialized PxCollection, format documentation //------------------------------------------------------------------------------------ // // //------------------------------------------------------------------------------------ //// overview: //// header information //// manifest table //// import references //// export references //// internal references //// object data //// extra data //------------------------------------------------------------------------------------ // // //------------------------------------------------------------------------------------ //// header information: //// header tag plus various version and platform information //------------------------------------------------------------------------------------ // header SEBD // PX_PHYSICS_VERSION // PX_BINARY_SERIAL_VERSION // platform tag // markedPadding (on for PX_CHECKED) // nbObjectsInCollection // // //------------------------------------------------------------------------------------ //// manifest table: //// one entry per collected object //// offsets relative to object data buffer //------------------------------------------------------------------------------------ // alignment // PxU32 size // (PxU32 offset, PxType type)*size // PxU32 endOffset // // //------------------------------------------------------------------------------------ //// import references: //// one entry per required reference to external collection //------------------------------------------------------------------------------------ // alignment // PxU32 size // (PxSerialObjectId id, PxType type)*size // // //------------------------------------------------------------------------------------ //// export references: //// one entry per object in the collection with id //// object indices point into the manifest table (objects in the same collection) //------------------------------------------------------------------------------------ // alignment // PxU32 size // (PxSerialObjectId id, SerialObjectIndex objIndex)*size // // //------------------------------------------------------------------------------------ //// internal references: //// one entry per reference, kind pair //// object indices point either into the manifest table or into the import references //// depending on whether the entry references the same collection or the external one //// one section for pointer type references and one for index type references. //------------------------------------------------------------------------------------ // alignment // PxU32 sizePtrs; // (size_t reference, PxU32 kind, SerialObjectIndex objIndex)*sizePtrs // PxU32 sizeHandle16; // (PxU16 reference, PxU32 kind, SerialObjectIndex objIndex)*sizeHandle16 // // //------------------------------------------------------------------------------------ //// object data: //// serialized PxBase derived class instances //// each object size depends on specific class //// offsets are stored in manifest table //------------------------------------------------------------------------------------ // alignment // (PxConcreteType type, -----) // alignment // (PxConcreteType type, --------) // alignment // (PxConcreteType type, --) // . // . // // // ----------------------------------------------------------------------------------- //// extra data: //// extra data memory block //// serialized and deserialized by PxBase implementations ////---------------------------------------------------------------------------------- // extra data // //------------------------------------------------------------------------------------ namespace { class OutputStreamWriter { public: PX_INLINE OutputStreamWriter(PxOutputStream& stream) : mStream(stream) , mCount(0) {} PX_INLINE PxU32 write(const void* src, PxU32 offset) { PxU32 count = mStream.write(src, offset); mCount += count; return count; } PX_INLINE PxU32 getStoredSize() { return mCount; } private: OutputStreamWriter& operator=(const OutputStreamWriter&); PxOutputStream& mStream; PxU32 mCount; }; class LegacySerialStream : public PxSerializationContext { public: LegacySerialStream(OutputStreamWriter& writer, const PxCollection& collection, bool exportNames) : mWriter(writer), mCollection(collection), mExportNames(exportNames) {} void writeData(const void* buffer, PxU32 size) { mWriter.write(buffer, size); } PxU32 getTotalStoredSize() { return mWriter.getStoredSize(); } void alignData(PxU32 alignment) { if(!alignment) return; PxI32 bytesToPad = PxI32(getPadding(getTotalStoredSize(), alignment)); static const PxI32 BUFSIZE = 64; char buf[BUFSIZE]; PxMemSet(buf, 0, bytesToPad < BUFSIZE ? PxU32(bytesToPad) : PxU32(BUFSIZE)); while(bytesToPad > 0) { writeData(buf, bytesToPad < BUFSIZE ? PxU32(bytesToPad) : PxU32(BUFSIZE)); bytesToPad -= BUFSIZE; } PX_ASSERT(!getPadding(getTotalStoredSize(), alignment)); } virtual void registerReference(PxBase&, PxU32, size_t) { PxGetFoundation().error(physx::PxErrorCode::eINVALID_OPERATION, PX_FL, "Cannot register references during exportData, exportExtraData."); } virtual const PxCollection& getCollection() const { return mCollection; } virtual void writeName(const char* name) { PxU32 len = name && mExportNames ? PxU32(strlen(name)) + 1 : 0; writeData(&len, sizeof(len)); if(len) writeData(name, len); } private: LegacySerialStream& operator=(const LegacySerialStream&); OutputStreamWriter& mWriter; const PxCollection& mCollection; bool mExportNames; }; void writeHeader(PxSerializationContext& stream, bool hasDeserializedAssets) { PX_UNUSED(hasDeserializedAssets); //serialized binary data. const PxU32 header = PX_MAKE_FOURCC('S','E','B','D'); stream.writeData(&header, sizeof(PxU32)); PxU32 version = PX_PHYSICS_VERSION; stream.writeData(&version, sizeof(PxU32)); stream.writeData(PX_BINARY_SERIAL_VERSION, SN_BINARY_VERSION_GUID_NUM_CHARS); PxU32 platformTag = getBinaryPlatformTag(); stream.writeData(&platformTag, sizeof(PxU32)); PxU32 markedPadding = 0; #if PX_CHECKED if(!hasDeserializedAssets) markedPadding = 1; #endif stream.writeData(&markedPadding, sizeof(PxU32)); } template<typename InternalReferenceType> struct InternalReferencePredicate { PX_FORCE_INLINE bool operator()(InternalReferenceType& a, InternalReferenceType& b) const { return a.objIndex < b.objIndex; } }; } bool PxSerialization::serializeCollectionToBinary(PxOutputStream& outputStream, PxCollection& pxCollection, PxSerializationRegistry& sr, const PxCollection* pxExternalRefs, bool exportNames) { if(!PxSerialization::isSerializable(pxCollection, sr, pxExternalRefs)) return false; Collection& collection = static_cast<Collection&>(pxCollection); const Collection* externalRefs = static_cast<const Collection*>(pxExternalRefs); //temporary memory stream which allows fixing up data up stream SerializationRegistry& sn = static_cast<SerializationRegistry&>(sr); // sort collection by "order" value (this will be the order in which they get serialized) sortCollection(collection, sn, false); //initialized the context with the sorted collection. SerializationContext context(collection, externalRefs); // gather reference information bool hasDeserializedAssets = false; { const PxU32 nb = collection.internalGetNbObjects(); for(PxU32 i=0;i<nb;i++) { PxBase* s = collection.internalGetObject(i); PX_ASSERT(s && s->getConcreteType()); #if PX_CHECKED //can't guarantee marked padding for deserialized instances if(!(s->getBaseFlags() & PxBaseFlag::eOWNS_MEMORY)) hasDeserializedAssets = true; #endif const PxSerializer* serializer = sn.getSerializer(s->getConcreteType()); PX_ASSERT(serializer); serializer->registerReferences(*s, context); } } // now start the actual serialization into the output stream OutputStreamWriter writer(outputStream); LegacySerialStream stream(writer, collection, exportNames); writeHeader(stream, hasDeserializedAssets); // write size of collection stream.alignData(PX_SERIAL_ALIGN); PxU32 nbObjectsInCollection = collection.internalGetNbObjects(); stream.writeData(&nbObjectsInCollection, sizeof(PxU32)); // write the manifest table (PxU32 offset, PxConcreteType type) { PxArray<ManifestEntry> manifestTable(collection.internalGetNbObjects()); PxU32 headerOffset = 0; for(PxU32 i=0;i<collection.internalGetNbObjects();i++) { PxBase* s = collection.internalGetObject(i); PX_ASSERT(s && s->getConcreteType()); PxType concreteType = s->getConcreteType(); const PxSerializer* serializer = sn.getSerializer(concreteType); PX_ASSERT(serializer); manifestTable[i] = ManifestEntry(headerOffset, concreteType); PxU32 classSize = PxU32(serializer->getClassSize()); headerOffset += getPadding(classSize, PX_SERIAL_ALIGN) + classSize; } stream.alignData(PX_SERIAL_ALIGN); const PxU32 nb = manifestTable.size(); stream.writeData(&nb, sizeof(PxU32)); stream.writeData(manifestTable.begin(), manifestTable.size()*sizeof(ManifestEntry)); //store offset for end of object buffer (PxU32 offset) stream.writeData(&headerOffset, sizeof(PxU32)); } // write import references { const PxArray<ImportReference>& importReferences = context.getImportReferences(); stream.alignData(PX_SERIAL_ALIGN); const PxU32 nb = importReferences.size(); stream.writeData(&nb, sizeof(PxU32)); stream.writeData(importReferences.begin(), importReferences.size()*sizeof(ImportReference)); } // write export references { PxU32 nbIds = collection.getNbIds(); PxArray<ExportReference> exportReferences(nbIds); //we can't get quickly from id to object index in collection. //if we only need this here, its not worth to build a hash nbIds = 0; for (PxU32 i=0;i<collection.getNbObjects();i++) { PxBase& obj = collection.getObject(i); PxSerialObjectId id = collection.getId(obj); if (id != PX_SERIAL_OBJECT_ID_INVALID) { SerialObjectIndex objIndex(i, false); //i corresponds to manifest entry exportReferences[nbIds++] = ExportReference(id, objIndex); } } stream.alignData(PX_SERIAL_ALIGN); stream.writeData(&nbIds, sizeof(PxU32)); stream.writeData(exportReferences.begin(), exportReferences.size()*sizeof(ExportReference)); } // write internal references { InternalPtrRefMap& internalPtrReferencesMap = context.getInternalPtrReferencesMap(); PxArray<InternalReferencePtr> internalReferencesPtr(internalPtrReferencesMap.size()); PxU32 nbInternalPtrReferences = 0; InternalHandle16RefMap& internalHandle16ReferencesMap = context.getInternalHandle16ReferencesMap(); PxArray<InternalReferenceHandle16> internalReferencesHandle16(internalHandle16ReferencesMap.size()); PxU32 nbInternalHandle16References = 0; { for(InternalPtrRefMap::Iterator iter = internalPtrReferencesMap.getIterator(); !iter.done(); ++iter) internalReferencesPtr[nbInternalPtrReferences++] = InternalReferencePtr(iter->first, iter->second); for(InternalHandle16RefMap::Iterator iter = internalHandle16ReferencesMap.getIterator(); !iter.done(); ++iter) internalReferencesHandle16[nbInternalHandle16References++] = InternalReferenceHandle16(PxTo16(iter->first), iter->second); //sort InternalReferences according to SerialObjectIndex for determinism PxSort<InternalReferencePtr, InternalReferencePredicate<InternalReferencePtr> >(internalReferencesPtr.begin(), internalReferencesPtr.size(), InternalReferencePredicate<InternalReferencePtr>()); PxSort<InternalReferenceHandle16, InternalReferencePredicate<InternalReferenceHandle16> >(internalReferencesHandle16.begin(), internalReferencesHandle16.size(), InternalReferencePredicate<InternalReferenceHandle16>()); } stream.alignData(PX_SERIAL_ALIGN); stream.writeData(&nbInternalPtrReferences, sizeof(PxU32)); stream.writeData(internalReferencesPtr.begin(), internalReferencesPtr.size()*sizeof(InternalReferencePtr)); stream.writeData(&nbInternalHandle16References, sizeof(PxU32)); stream.writeData(internalReferencesHandle16.begin(), internalReferencesHandle16.size()*sizeof(InternalReferenceHandle16)); } // write object data { stream.alignData(PX_SERIAL_ALIGN); const PxU32 nb = collection.internalGetNbObjects(); for(PxU32 i=0;i<nb;i++) { PxBase* s = collection.internalGetObject(i); PX_ASSERT(s && s->getConcreteType()); const PxSerializer* serializer = sn.getSerializer(s->getConcreteType()); PX_ASSERT(serializer); stream.alignData(PX_SERIAL_ALIGN); serializer->exportData(*s, stream); } } // write extra data { const PxU32 nb = collection.internalGetNbObjects(); for(PxU32 i=0;i<nb;i++) { PxBase* s = collection.internalGetObject(i); PX_ASSERT(s && s->getConcreteType()); const PxSerializer* serializer = sn.getSerializer(s->getConcreteType()); PX_ASSERT(serializer); stream.alignData(PX_SERIAL_ALIGN); serializer->exportExtraData(*s, stream); } } return true; } bool PxSerialization::serializeCollectionToBinaryDeterministic(PxOutputStream& outputStream, PxCollection& pxCollection, PxSerializationRegistry& sr, const PxCollection* pxExternalRefs, bool exportNames) { PxDefaultMemoryOutputStream tmpOutputStream; if (!serializeCollectionToBinary(tmpOutputStream, pxCollection, sr, pxExternalRefs, exportNames)) return false; PxDefaultMemoryOutputStream metaDataOutput; dumpBinaryMetaData(metaDataOutput, sr); PxBinaryConverter* converter = createBinaryConverter(); PxDefaultMemoryInputData srcMetaData(metaDataOutput.getData(), metaDataOutput.getSize()); PxDefaultMemoryInputData dstMetaData(metaDataOutput.getData(), metaDataOutput.getSize()); if (!converter->setMetaData(srcMetaData, dstMetaData)) return false; PxDefaultMemoryInputData srcBinaryData(tmpOutputStream.getData(), tmpOutputStream.getSize()); bool ret = converter->convert(srcBinaryData, srcBinaryData.getLength(), outputStream); converter->release(); return ret; }
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Binary/SnConvX_Common.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #ifndef SN_CONVX_COMMON_H #define SN_CONVX_COMMON_H #if PX_VC #pragma warning(disable:4121) // alignment of a member was sensitive to packing #endif #include "common/PxPhysXCommonConfig.h" #define inline_ PX_FORCE_INLINE #define PsArray physx::PxArray #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Binary/SnConvX_Align.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #ifndef SN_CONVX_ALIGN_H #define SN_CONVX_ALIGN_H namespace physx { namespace Sn { #define ALIGN_DEFAULT 16 #define ALIGN_FILE 128 PX_INLINE PxU32 getPadding(size_t value, PxU32 alignment) { const PxU32 mask = alignment-1; const PxU32 overhead = PxU32(value) & mask; return (alignment - overhead) & mask; } } } #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Binary/SnSerializationContext.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "common/PxBase.h" #include "SnSerializationContext.h" using namespace physx; using namespace Sn; PxBase* DeserializationContext::resolveReference(PxU32 kind, size_t reference) const { SerialObjectIndex objIndex; if (kind == PX_SERIAL_REF_KIND_PXBASE) { const InternalPtrRefMap::Entry* entry0 = mInternalPtrReferencesMap.find(reference); PX_ASSERT(entry0); objIndex = entry0->second; } else if (kind == PX_SERIAL_REF_KIND_MATERIAL_IDX) { const InternalHandle16RefMap::Entry* entry0 = mInternalHandle16ReferencesMap.find(PxU16(reference)); PX_ASSERT(entry0); objIndex = entry0->second; } else { return NULL; } bool isExternal; PxU32 index = objIndex.getIndex(isExternal); PxBase* base = NULL; if (isExternal) { const ImportReference& entry = mImportReferences[index]; base = mExternalRefs->find(entry.id); } else { const ManifestEntry& entry = mManifestTable[index]; base = reinterpret_cast<PxBase*>(mObjectDataAddress + entry.offset); } PX_ASSERT(base); return base; } void SerializationContext::registerReference(PxBase& serializable, PxU32 kind, size_t reference) { #if PX_CHECKED if ((kind & PX_SERIAL_REF_KIND_PTR_TYPE_BIT) == 0 && reference > 0xffff) { PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "PxSerializationContext::registerReference: only 16 bit handles supported."); return; } #endif bool isExternal = mExternalRefs && mExternalRefs->contains(serializable); PxU32 index; if (isExternal) { PxSerialObjectId id = mExternalRefs->getId(serializable); PX_ASSERT(id != PX_SERIAL_OBJECT_ID_INVALID); if (const PxHashMap<PxSerialObjectId, PxU32>::Entry* entry = mImportReferencesMap.find(id)) { index = entry->second; } else { index = mImportReferences.size(); mImportReferencesMap.insert(id, index); mImportReferences.pushBack(ImportReference(id, serializable.getConcreteType())); } } else { PX_ASSERT(mCollection.contains(serializable)); index = mObjToCollectionIndexMap[&serializable]; } if (kind & PX_SERIAL_REF_KIND_PXBASE) { mInternalPtrReferencesMap[reference] = SerialObjectIndex(index, isExternal); } else if (kind & PX_SERIAL_REF_KIND_MATERIAL_IDX) { mInternalHandle16ReferencesMap[PxU16(reference)] = SerialObjectIndex(index, isExternal); } }
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Binary/SnConvX_MetaData.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #include "foundation/PxIO.h" #include "foundation/PxMemory.h" #include "foundation/PxString.h" #include "SnConvX.h" #include "common/PxSerialFramework.h" #include "serialization/SnSerialUtils.h" #include <assert.h> using namespace physx; using namespace physx::Sn; //#define REMOVE_EXPLICIT_PADDING static const char gVTablePtr[] = "v-table ptr"; static const char gAutoPadding[] = "auto-generated padding"; static const char gByte[] = "paddingByte"; /////////////////////////////////////////////////////////////////////////////// bool PxMetaDataEntry::isVTablePtr() const { return mType==gVTablePtr; } /////////////////////////////////////////////////////////////////////////////// bool MetaClass::getFieldByType(const char* type, PxMetaDataEntry& entry) const { assert(type); PxU32 nbFields = mFields.size(); for(PxU32 i=0;i<nbFields;i++) { if(Pxstrcmp(mFields[i].mType, type)==0) { entry = mFields[i]; return true; } } return false; } bool MetaClass::getFieldByName(const char* name, PxMetaDataEntry& entry) const { assert(name); PxU32 nbFields = mFields.size(); for(PxU32 i=0;i<nbFields;i++) { if(Pxstrcmp(mFields[i].mName, name)==0) { entry = mFields[i]; return true; } } return false; } void MetaClass::checkAndCompleteClass(const MetaData& owner, int& startOffset, int& nbBytes) { if(startOffset!=-1) { owner.mConvX.displayMessage(PxErrorCode::eDEBUG_INFO, "\n Adding %d padding bytes at offset %d in class %s.\n", nbBytes, startOffset, mClassName); // Leap of faith: add padding bytes there PxMetaDataEntry padding; padding.mType = gByte; padding.mName = gAutoPadding; padding.mOffset = startOffset; padding.mSize = nbBytes; padding.mCount = nbBytes; padding.mFlags = PxMetaDataFlag::ePADDING; mFields.pushBack(padding); startOffset = -1; } } bool MetaClass::check(const MetaData& owner) { owner.mConvX.displayMessage(PxErrorCode::eDEBUG_INFO, "Checking class: %s\n", mClassName); if(mCallback) return true; // Skip atomic types if(mMaster) return true; // Skip typedefs bool* map = reinterpret_cast<bool*>(PX_ALLOC(sizeof(bool)*mSize, "bool")); memset(map, 0, size_t(mSize)); const PxU32 nbFields = mFields.size(); for(PxU32 i=0;i<nbFields;i++) { const PxMetaDataEntry& field = mFields[i]; if(field.mFlags & PxMetaDataFlag::eEXTRA_DATA) continue; // if((field.mFlags & PxMetaDataFlag::eUNION) && !field.mSize) // continue; // Union type assert(field.mSize); const int byteStart = field.mOffset; const int byteEnd = field.mOffset + field.mSize; assert(byteStart>=0 && byteStart<mSize); assert(byteEnd>=0 && byteEnd<=mSize); int startOffset = -1; int nbBytes = 0; for(int j=byteStart;j<byteEnd;j++) { if(map[j]) { if(startOffset==-1) { startOffset = int(i); nbBytes = 0; } nbBytes++; // displayErrorMessage(" %s: found overlapping bytes!\n", mClassName); } else { if(startOffset!=-1) { owner.mConvX.displayMessage(PxErrorCode::eINTERNAL_ERROR, "PxBinaryConverter: %s: %d overlapping bytes at offset %d!\n", mClassName, nbBytes, startOffset); startOffset = -1; PX_FREE(map); return false; } } map[j] = true; } if(startOffset!=-1) { owner.mConvX.displayMessage(PxErrorCode::eINTERNAL_ERROR, "PxBinaryConverter: %s: %d overlapping bytes at offset %d!\n", mClassName, nbBytes, startOffset); startOffset = -1; PX_FREE(map); return false; } } { int startOffset = -1; int nbBytes = 0; for(int i=0;i<mSize;i++) { if(!map[i]) { if(startOffset==-1) { startOffset = i; nbBytes = 0; } nbBytes++; } else { checkAndCompleteClass(owner, startOffset, nbBytes); } } checkAndCompleteClass(owner, startOffset, nbBytes); } PX_FREE(map); // for(PxU32 i=0;i<nbFields;i++) { const PxMetaDataEntry& current = mFields[i]; if(current.mFlags & PxMetaDataFlag::ePTR) continue; MetaClass* fieldMetaClass = owner.mConvX.getMetaClass(current.mType, owner.getType()); if(!fieldMetaClass) { owner.mConvX.displayMessage(PxErrorCode::eINTERNAL_ERROR, "PxBinaryConverter: Missing meta-data for: %s\n", current.mType); return false; } else { if(current.mFlags & PxMetaDataFlag::eEXTRA_DATA) { owner.mConvX.displayMessage(PxErrorCode::eDEBUG_INFO, "Extra data: %s\n", current.mType); } else { assert(fieldMetaClass->mSize*current.mCount==current.mSize); } } } return true; } /////////////////////////////////////////////////////////////////////////////// MetaData::MetaData(ConvX& convx) : mConvX (convx), mType (META_DATA_NONE), mNbEntries (0), mEntries (NULL), mStringTable (NULL), mVersion (0), mSizeOfPtr (0), mPlatformTag (0), mGaussMapLimit (0), mFlip (false) { } MetaData::~MetaData() { PxU32 nbMetaClasses = mMetaClasses.size(); for(PxU32 i=0;i<nbMetaClasses;i++) { MetaClass* current = mMetaClasses[i]; PX_DELETE(current); } PX_FREE(mStringTable); PX_DELETE_ARRAY(mEntries); } MetaClass* MetaData::getMetaClass(const char* name) const { PxU32 nbMetaClasses = mMetaClasses.size(); for(PxU32 i=0;i<nbMetaClasses;i++) { MetaClass* current = mMetaClasses[i]; if(Pxstrcmp(current->mClassName, name)==0) { while(current->mMaster) current = current->mMaster; return current; } } return NULL; } MetaClass* MetaData::getMetaClass(PxConcreteType::Enum concreteType) const { for(PxU32 i=0; i< mConcreteTypeTable.size(); i++) { if(mConcreteTypeTable[i].first == concreteType) { const char* className = offsetToText(reinterpret_cast<const char*>(size_t(mConcreteTypeTable[i].second))); return getMetaClass(className); } } return NULL; } MetaClass* MetaData::addNewClass(const char* name, int size, MetaClass* master, ConvertCallback callback) { // PT: if you reach this assert, you used PX_DEF_BIN_METADATA_TYPEDEF twice on the same type assert(!getMetaClass(name)); MetaClass* mc = PX_NEW(MetaClass); mc->mCallback = callback; mc->mMaster = master; mc->mClassName = name; mc->mSize = size; mc->mDepth = 0; mc->mProcessed = false; // mc->mNbEntries = -1; mMetaClasses.pushBack(mc); return mc; } bool MetaData::load(PxInputStream& inputStream, MetaDataType type) { assert(type!=META_DATA_NONE); mConvX.displayMessage(PxErrorCode::eDEBUG_INFO, "Loading %s meta-data...\n", type==META_DATA_SRC ? "source" : "target"); mType = type; mFlip = false; { int header; inputStream.read(&header, 4); if(header==PX_MAKE_FOURCC('M','E','T','A')) { mFlip = false; } else if(header==PX_MAKE_FOURCC('A','T','E','M')) { mFlip = true; } else { mConvX.displayMessage(PxErrorCode::eINVALID_PARAMETER, "PxBinaryConverter: invalid meta-data file!\n"); return false; } if (type == META_DATA_SRC && mFlip) { mConvX.displayMessage(PxErrorCode::eINVALID_PARAMETER, "PxBinaryConverter: source meta data needs to match endianness with current system!"); return false; } inputStream.read(&mVersion, 4); if(mFlip) { flip(mVersion); } inputStream.read(mBinaryVersionGuid, SN_BINARY_VERSION_GUID_NUM_CHARS); mBinaryVersionGuid[SN_BINARY_VERSION_GUID_NUM_CHARS] = 0; if (!checkCompatibility(mBinaryVersionGuid)) { mConvX.displayMessage(PxErrorCode::eINVALID_PARAMETER, "PxBinaryConverter: binary data version 0x%s is incompatible with this PhysX sdk (0x%s).\n", mBinaryVersionGuid, getBinaryVersionGuid()); return false; } inputStream.read(&mSizeOfPtr, 4); if(mFlip) flip(mSizeOfPtr); inputStream.read(&mPlatformTag, 4); if(mFlip) flip(mPlatformTag); if (!Sn::isBinaryPlatformTagValid(PxU32(mPlatformTag))) { mConvX.displayMessage(PxErrorCode::eINVALID_PARAMETER, "PxBinaryConverter: Unknown meta data platform tag"); return false; } inputStream.read(&mGaussMapLimit, 4); if(mFlip) flip(mGaussMapLimit); inputStream.read(&mNbEntries, 4); if(mFlip) flip(mNbEntries); mEntries = PX_NEW(PxMetaDataEntry)[PxU32(mNbEntries)]; if(mSizeOfPtr==8) { for(int i=0;i<mNbEntries;i++) { MetaDataEntry64 tmp; inputStream.read(&tmp, sizeof(MetaDataEntry64)); if (mFlip) // important to flip them first, else the cast below might destroy information { flip(tmp.mType); flip(tmp.mName); } // We can safely cast to 32bits here since we transformed the pointers to offsets in the string table on export mEntries[i].mType = reinterpret_cast<const char*>(size_t(tmp.mType)); mEntries[i].mName = reinterpret_cast<const char*>(size_t(tmp.mName)); mEntries[i].mOffset = tmp.mOffset; mEntries[i].mSize = tmp.mSize; mEntries[i].mCount = tmp.mCount; mEntries[i].mOffsetSize = tmp.mOffsetSize; mEntries[i].mFlags = tmp.mFlags; mEntries[i].mAlignment = tmp.mAlignment; } } else { assert(mSizeOfPtr==4); // inputStream.read(mEntries, mNbEntries*sizeof(PxMetaDataEntry)); for(int i=0;i<mNbEntries;i++) { MetaDataEntry32 tmp; inputStream.read(&tmp, sizeof(MetaDataEntry32)); if (mFlip) { flip(tmp.mType); flip(tmp.mName); } mEntries[i].mType = reinterpret_cast<const char*>(size_t(tmp.mType)); mEntries[i].mName = reinterpret_cast<const char*>(size_t(tmp.mName)); mEntries[i].mOffset = tmp.mOffset; mEntries[i].mSize = tmp.mSize; mEntries[i].mCount = tmp.mCount; mEntries[i].mOffsetSize = tmp.mOffsetSize; mEntries[i].mFlags = tmp.mFlags; mEntries[i].mAlignment = tmp.mAlignment; } } if(mFlip) { for(int i=0;i<mNbEntries;i++) { // mEntries[i].mType and mEntries[i].mName have been flipped already because they need special treatment // on 64bit to 32bit platform conversions flip(mEntries[i].mOffset); flip(mEntries[i].mSize); flip(mEntries[i].mCount); flip(mEntries[i].mOffsetSize); flip(mEntries[i].mFlags); flip(mEntries[i].mAlignment); } } int nbConcreteType; inputStream.read(&nbConcreteType, 4); if(mFlip) flip(nbConcreteType); for(int i=0; i<nbConcreteType; i++) { PxU16 concreteType; PxU32 nameOffset; inputStream.read(&concreteType, 2); inputStream.read(&nameOffset, 4); if(mFlip) { flip(concreteType); flip(nameOffset); } mConcreteTypeTable.pushBack( PxPair<PxConcreteType::Enum, PxU32>(PxConcreteType::Enum(concreteType), nameOffset) ); } int tableSize; inputStream.read(&tableSize, 4); if(mFlip) flip(tableSize); mStringTable = reinterpret_cast<char*>(PX_ALLOC(sizeof(char)*tableSize, "MetaData StringTable")); inputStream.read(mStringTable, PxU32(tableSize)); } // Register atomic types { addNewClass("bool", 1, NULL, &ConvX::convert8); addNewClass("char", 1, NULL, &ConvX::convert8); addNewClass("short", 2, NULL, &ConvX::convert16); addNewClass("int", 4, NULL, &ConvX::convert32); addNewClass("PxU64", 8, NULL, &ConvX::convert64); addNewClass("float", 4, NULL, &ConvX::convertFloat); addNewClass("paddingByte", 1, NULL, &ConvX::convertPad8); } { MetaClass* currentClass = NULL; for(int i=0;i<mNbEntries;i++) { mEntries[i].mType = offsetToText(mEntries[i].mType); mEntries[i].mName = offsetToText(mEntries[i].mName); if(mEntries[i].mFlags & PxMetaDataFlag::eTYPEDEF) { mConvX.displayMessage(PxErrorCode::eDEBUG_INFO, "Found typedef: %s => %s\n", mEntries[i].mName, mEntries[i].mType); MetaClass* mc = getMetaClass(mEntries[i].mName); if(mc) addNewClass(mEntries[i].mType, mc->mSize, mc, mc->mCallback); else mConvX.displayMessage(PxErrorCode::eINTERNAL_ERROR, "PxBinaryConverter: Invalid typedef - Missing metadata for: %s, please check the source metadata.\n" , mEntries[i].mName); } else if(mEntries[i].mFlags & PxMetaDataFlag::eCLASS) { if(!mEntries[i].mName) { mConvX.displayMessage(PxErrorCode::eDEBUG_INFO, "Found class: %s\n", mEntries[i].mType); currentClass = addNewClass(mEntries[i].mType, mEntries[i].mSize); if(mEntries[i].mFlags & PxMetaDataFlag::eVIRTUAL) { PxMetaDataEntry vtable; vtable.mType = gVTablePtr; vtable.mName = gVTablePtr; vtable.mOffset = 0; vtable.mSize = mSizeOfPtr; vtable.mCount = 1; vtable.mFlags = PxMetaDataFlag::ePTR; currentClass->mFields.pushBack(vtable); } } else { assert(currentClass); mConvX.displayMessage(PxErrorCode::eDEBUG_INFO, " - inherits from: %s\n", mEntries[i].mName); currentClass->mBaseClasses.pushBack(mEntries[i]); } } else { const int isUnion = mEntries[i].mFlags & PxMetaDataFlag::eUNION; if(isUnion && !mEntries[i].mSize) { mConvX.registerUnionType(mEntries[i].mType, mEntries[i].mName, mEntries[i].mOffset); } else { if(isUnion) { mConvX.registerUnion(mEntries[i].mType); } const int isPadding = mEntries[i].mFlags & PxMetaDataFlag::ePADDING; assert(currentClass); #ifdef REMOVE_EXPLICIT_PADDING if(!isPadding) #endif currentClass->mFields.pushBack(mEntries[i]); if(isPadding) mConvX.displayMessage(PxErrorCode::eDEBUG_INFO, " - contains padding: %s - %s\n", mEntries[i].mType, mEntries[i].mName); else if(mEntries[i].mFlags & PxMetaDataFlag::eEXTRA_DATA) mConvX.displayMessage(PxErrorCode::eDEBUG_INFO, " - contains extra data: %s%s\n", mEntries[i].mType, mEntries[i].mFlags & PxMetaDataFlag::ePTR ? "*" : ""); else mConvX.displayMessage(PxErrorCode::eDEBUG_INFO, " - contains field: %s%s\n", mEntries[i].mType, mEntries[i].mFlags & PxMetaDataFlag::ePTR ? "*" : ""); } } } } // Sort classes by depth struct Local { static bool _computeDepth(const MetaData& md, MetaClass* current, int currentDepth, int& maxDepth) { if(currentDepth>maxDepth) maxDepth = currentDepth; PxU32 nbBases = current->mBaseClasses.size(); for(PxU32 i=0;i<nbBases;i++) { const PxMetaDataEntry& baseClassEntry = current->mBaseClasses[i]; MetaClass* baseClass = md.getMetaClass(baseClassEntry.mName); if(!baseClass) { md.mConvX.displayMessage(PxErrorCode::eINTERNAL_ERROR, "PxBinaryConverter: Can't find class %s metadata, please check the source metadata.\n", baseClassEntry.mName); return false; } if (!_computeDepth(md, baseClass, currentDepth+1, maxDepth)) return false; } return true; } static int compareClasses(const void* c0, const void* c1) { MetaClass** mc0 = reinterpret_cast<MetaClass**>(const_cast<void*>(c0)); MetaClass** mc1 = reinterpret_cast<MetaClass**>(const_cast<void*>(c1)); // return (*mc0)->mSize - (*mc1)->mSize; return (*mc0)->mDepth - (*mc1)->mDepth; } static int compareEntries(const void* c0, const void* c1) { PxMetaDataEntry* mc0 = reinterpret_cast<PxMetaDataEntry*>(const_cast<void*>(c0)); PxMetaDataEntry* mc1 = reinterpret_cast<PxMetaDataEntry*>(const_cast<void*>(c1)); //mOffset is used to access control information for extra data, and not for offsets of the data itself. assert(!(mc0->mFlags & PxMetaDataFlag::eEXTRA_DATA)); assert(!(mc1->mFlags & PxMetaDataFlag::eEXTRA_DATA)); return mc0->mOffset - mc1->mOffset; } }; { // Compute depths const PxU32 nbMetaClasses = mMetaClasses.size(); for(PxU32 i=0;i<nbMetaClasses;i++) { MetaClass* current = mMetaClasses[i]; int maxDepth = 0; if(!Local::_computeDepth(*this, current, 0, maxDepth)) return false; current->mDepth = maxDepth; } // Sort by depth MetaClass** metaClasses = &mMetaClasses[0]; qsort(metaClasses, size_t(nbMetaClasses), sizeof(MetaClass*), Local::compareClasses); } // Replicate fields from base classes { PxU32 nbMetaClasses = mMetaClasses.size(); for(PxU32 k=0;k<nbMetaClasses;k++) { MetaClass* current = mMetaClasses[k]; PxU32 nbBases = current->mBaseClasses.size(); // merge entries of base classes and current class in the right order // this is needed for extra data ordering, which is not covered by the mOffset sort // in the next stage below PsArray<PxMetaDataEntry> mergedEntries; for(PxU32 i=0;i<nbBases;i++) { const PxMetaDataEntry& baseClassEntry = current->mBaseClasses[i]; MetaClass* baseClass = getMetaClass(baseClassEntry.mName); assert(baseClass); assert(baseClass->mBaseClasses.size()==0 || baseClass->mProcessed); PxU32 nbBaseFields = baseClass->mFields.size(); for(PxU32 j=0;j<nbBaseFields;j++) { PxMetaDataEntry f = baseClass->mFields[j]; // Don't merge primary v-tables to avoid redundant v-table entries. // It means the base v-table won't be inherited & needs to be explicitly defined in the metadata. Seems reasonable. // Could be done better though. if(f.mType==gVTablePtr && !f.mOffset && !baseClassEntry.mOffset) continue; f.mOffset += baseClassEntry.mOffset; mergedEntries.pushBack(f); } current->mProcessed = true; } //append current fields to base class fields for (PxU32 i = 0; i < current->mFields.size(); i++) { mergedEntries.pushBack(current->mFields[i]); } current->mFields.clear(); current->mFields.assign(mergedEntries.begin(), mergedEntries.end()); } } // Check classes { PxU32 nbMetaClasses = mMetaClasses.size(); for(PxU32 i=0;i<nbMetaClasses;i++) { MetaClass* current = mMetaClasses[i]; if(!current->check(*this)) return false; } } // Sort meta-data by offset { PxU32 nbMetaClasses = mMetaClasses.size(); for(PxU32 i=0;i<nbMetaClasses;i++) { MetaClass* current = mMetaClasses[i]; PxU32 nbFields = current->mFields.size(); if(nbFields<2) continue; PxMetaDataEntry* entries = &current->mFields[0]; PxMetaDataEntry* newEntries = PX_NEW(PxMetaDataEntry)[nbFields]; PxU32 nb = 0; for(PxU32 j=0;j<nbFields;j++) if(!(entries[j].mFlags & PxMetaDataFlag::eEXTRA_DATA)) newEntries[nb++] = entries[j]; PxU32 nbToSort = nb; for(PxU32 j=0;j<nbFields;j++) if(entries[j].mFlags & PxMetaDataFlag::eEXTRA_DATA) newEntries[nb++] = entries[j]; assert(nb==nbFields); PxMemCopy(entries, newEntries, nb*sizeof(PxMetaDataEntry)); PX_DELETE_ARRAY(newEntries); qsort(entries, size_t(nbToSort), sizeof(PxMetaDataEntry), Local::compareEntries); } } return true; } namespace { //tool functions for MetaData::compare bool str_equal(const char* src, const char* dst) { if (src == dst) return true; if (src != NULL && dst != NULL) return Pxstrcmp(src, dst) == 0; return false; } const char* str_print(const char* str) { return str != NULL ? str : "(nullptr)"; } } #define COMPARE_METADATA_BOOL_MD(type, src, dst, field) if ((src).field != (dst).field) \ { mConvX.displayMessage(PxErrorCode::eDEBUG_INFO, "%s::%s missmatch: src %s dst %s\n", #type, #field, (src).field?"true":"false", (dst).field?"true":"false"); isEquivalent = false; } #define COMPARE_METADATA_INT_MD(type, src, dst, field) if ((src).field != (dst).field) \ { mConvX.displayMessage(PxErrorCode::eDEBUG_INFO, "%s::%s missmatch: src %d dst %d\n", #type, #field, (src).field, (dst).field); isEquivalent = false; } #define COMPARE_METADATA_STRING_MD(type, src, dst, field) \ if (!str_equal((src).field, (dst).field)) \ { \ mConvX.displayMessage(PxErrorCode::eDEBUG_INFO, "%s::%s missmatch: src %s dst %s\n", #type, #field, str_print((src).field), str_print((dst).field)); \ isEquivalent = false; \ } bool MetaData::compare(const MetaData& dst) const { bool isEquivalent = true; //mType COMPARE_METADATA_BOOL_MD(MetaData, *this, dst, mFlip) //mVersion COMPARE_METADATA_STRING_MD(MetaData, *this, dst, mBinaryVersionGuid) COMPARE_METADATA_INT_MD(MetaData, *this, dst, mSizeOfPtr) COMPARE_METADATA_INT_MD(MetaData, *this, dst, mPlatformTag) COMPARE_METADATA_INT_MD(MetaData, *this, dst, mGaussMapLimit) COMPARE_METADATA_INT_MD(MetaData, *this, dst, mNbEntries) //find classes missing in dst for (PxU32 i = 0; i<mMetaClasses.size(); i++) { MetaClass* mcSrc = mMetaClasses[i]; MetaClass* mcDst = dst.getMetaClass(mcSrc->mClassName); if (mcDst == NULL) { mConvX.displayMessage(PxErrorCode::eDEBUG_INFO, "dst is missing meta class %s", mcSrc->mClassName); } } //find classes missing in src for (PxU32 i = 0; i<dst.mMetaClasses.size(); i++) { MetaClass* mcDst = dst.mMetaClasses[i]; MetaClass* mcSrc = getMetaClass(mcDst->mClassName); if (mcSrc == NULL) { mConvX.displayMessage(PxErrorCode::eDEBUG_INFO, "src is missing meta class %s", mcDst->mClassName); } } //compare classes present in src and dst for (PxU32 i = 0; i<mMetaClasses.size(); i++) { const char* className = mMetaClasses[i]->mClassName; MetaClass* mcSrc = getMetaClass(className); MetaClass* mcDst = dst.getMetaClass(className); if (mcSrc != NULL && mcDst != NULL) { COMPARE_METADATA_INT_MD(MetaClass, *mcSrc, *mcDst, mCallback) COMPARE_METADATA_INT_MD(MetaClass, *mcSrc, *mcDst, mMaster) //should be 0 for both anyway COMPARE_METADATA_STRING_MD(MetaClass, *mcSrc, *mcDst, mClassName) COMPARE_METADATA_INT_MD(MetaClass, *mcSrc, *mcDst, mSize) COMPARE_METADATA_INT_MD(MetaClass, *mcSrc, *mcDst, mDepth) COMPARE_METADATA_INT_MD(MetaClass, *mcSrc, *mcDst, mBaseClasses.size()) if (mcSrc->mBaseClasses.size() == mcDst->mBaseClasses.size()) { for (PxU32 b = 0; b < mcSrc->mBaseClasses.size(); b++) { COMPARE_METADATA_STRING_MD(PxMetaDataEntry, mcSrc->mBaseClasses[b], mcDst->mBaseClasses[b], mName); } } COMPARE_METADATA_INT_MD(MetaClass, *mcSrc, *mcDst, mFields.size()) if (mcSrc->mFields.size() == mcDst->mFields.size()) { for (PxU32 f = 0; f < mcSrc->mFields.size(); f++) { PxMetaDataEntry srcMde = mcSrc->mFields[f]; PxMetaDataEntry dstMde = mcDst->mFields[f]; COMPARE_METADATA_STRING_MD(PxMetaDataEntry, srcMde, dstMde, mType) COMPARE_METADATA_STRING_MD(PxMetaDataEntry, srcMde, dstMde, mName) COMPARE_METADATA_INT_MD(PxMetaDataEntry, srcMde, dstMde, mOffset) COMPARE_METADATA_INT_MD(PxMetaDataEntry, srcMde, dstMde, mSize) COMPARE_METADATA_INT_MD(PxMetaDataEntry, srcMde, dstMde, mCount) COMPARE_METADATA_INT_MD(PxMetaDataEntry, srcMde, dstMde, mOffsetSize) COMPARE_METADATA_INT_MD(PxMetaDataEntry, srcMde, dstMde, mFlags) COMPARE_METADATA_INT_MD(PxMetaDataEntry, srcMde, dstMde, mAlignment) } } } } return isEquivalent; } #undef COMPARE_METADATA_BOOL_MD #undef COMPARE_METADATA_INT_MD #undef COMPARE_METADATA_STRING_MD /////////////////////////////////////////////////////////////////////////////// void ConvX::releaseMetaData() { PX_DELETE(mMetaData_Dst); PX_DELETE(mMetaData_Src); } const MetaData* ConvX::loadMetaData(PxInputStream& inputStream, MetaDataType type) { if (type != META_DATA_SRC && type != META_DATA_DST) { displayMessage(PxErrorCode::eINTERNAL_ERROR, "PxBinaryConverter: Wrong meta data type, please check the source metadata.\n"); return NULL; } PX_ASSERT(type == META_DATA_SRC || type == META_DATA_DST); MetaData*& metaDataPtr = (type == META_DATA_SRC) ? mMetaData_Src : mMetaData_Dst; metaDataPtr = PX_NEW(MetaData)(*this); if(!(metaDataPtr)->load(inputStream, type)) PX_DELETE(metaDataPtr); return metaDataPtr; } const MetaData* ConvX::getBinaryMetaData(MetaDataType type) { if(type==META_DATA_SRC) return mMetaData_Src; if(type==META_DATA_DST) return mMetaData_Dst; PX_ASSERT(0); return NULL; } int ConvX::getNbMetaClasses(MetaDataType type) { if(type==META_DATA_SRC) return mMetaData_Src->getNbMetaClasses(); if(type==META_DATA_DST) return mMetaData_Dst->getNbMetaClasses(); PX_ASSERT(0); return 0; } MetaClass* ConvX::getMetaClass(unsigned int i, MetaDataType type) const { if(type==META_DATA_SRC) return mMetaData_Src->getMetaClass(i); if(type==META_DATA_DST) return mMetaData_Dst->getMetaClass(i); PX_ASSERT(0); return NULL; } MetaClass* ConvX::getMetaClass(const char* name, MetaDataType type) const { if(type==META_DATA_SRC) return mMetaData_Src->getMetaClass(name); if(type==META_DATA_DST) return mMetaData_Dst->getMetaClass(name); PX_ASSERT(0); return NULL; } MetaClass* ConvX::getMetaClass(PxConcreteType::Enum concreteType, MetaDataType type) { MetaClass* metaClass = NULL; if(type==META_DATA_SRC) metaClass = mMetaData_Src->getMetaClass(concreteType); if(type==META_DATA_DST) metaClass = mMetaData_Dst->getMetaClass(concreteType); if(!metaClass) { displayMessage(PxErrorCode::eINTERNAL_ERROR, "PxBinaryConverter: Missing concreteType %d metadata! serialized a class without dumping metadata. Please check the metadata.", concreteType); return NULL; } return metaClass; } /////////////////////////////////////////////////////////////////////////////// // Peek & poke, yes sir. PxU64 physx::Sn::peek(int size, const char* buffer, int flags) { const int maskMSB = flags & PxMetaDataFlag::eCOUNT_MASK_MSB; const int skipIfOne = flags & PxMetaDataFlag::eCOUNT_SKIP_IF_ONE; switch(size) { case 1: { unsigned char value = *(reinterpret_cast<const unsigned char*>(buffer)); if(maskMSB) value &= 0x7f; if(skipIfOne && value==1) return 0; return PxU64(value); } case 2: { unsigned short value = *(reinterpret_cast<const unsigned short*>(buffer)); if(maskMSB) value &= 0x7fff; if(skipIfOne && value==1) return 0; return PxU64(value); } case 4: { unsigned int value = *(reinterpret_cast<const unsigned int*>(buffer)); if(maskMSB) value &= 0x7fffffff; if(skipIfOne && value==1) return 0; return PxU64(value); } case 8: { PxU64 value = *(reinterpret_cast<const PxU64*>(buffer)); if(maskMSB) value &= (PxU64(-1))>>1; if(skipIfOne && value==1) return 0; return value; } }; PX_ASSERT(0); return PxU64(-1); }
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Binary/SnConvX_Output.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #ifndef SN_CONVX_OUTPUT_H #define SN_CONVX_OUTPUT_H #include "foundation/PxSimpleTypes.h" namespace physx { namespace Sn { struct PxMetaDataEntry; class ConvX; typedef void (Sn::ConvX::*ConvertCallback) (const char* src, const PxMetaDataEntry& entry, const PxMetaDataEntry& dstEntry); inline_ void flip(PxI16& v) { PxI8* b = reinterpret_cast<PxI8*>(&v); PxI8 temp = b[0]; b[0] = b[1]; b[1] = temp; } inline_ void flip(PxU16& v) { flip(reinterpret_cast<PxI16&>(v)); } inline_ void flip32(PxI8* b) { PxI8 temp = b[0]; b[0] = b[3]; b[3] = temp; temp = b[1]; b[1] = b[2]; b[2] = temp; } inline_ void flip(PxI32& v) { PxI8* b = reinterpret_cast<PxI8*>(&v); flip32(b); } inline_ void flip(PxU32& v) { PxI8* b = reinterpret_cast<PxI8*>(&v); flip32(b); } inline_ void flip(PxI64& v) { PxI8* b = reinterpret_cast<PxI8*>(&v); PxI8 temp = b[0]; b[0] = b[7]; b[7] = temp; temp = b[1]; b[1] = b[6]; b[6] = temp; temp = b[2]; b[2] = b[5]; b[5] = temp; temp = b[3]; b[3] = b[4]; b[4] = temp; } inline_ void flip(PxF32& v) { PxI8* b = reinterpret_cast<PxI8*>(&v); flip32(b); } inline_ void flip(void*& v) { PxI8* b = reinterpret_cast<PxI8*>(&v); flip32(b); } inline_ void flip(const PxI8*& v) { PxI8* b = const_cast<PxI8*>(v); flip32(b); } } } #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Binary/SnConvX_Convert.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #include "foundation/PxErrorCallback.h" #include "extensions/PxDefaultStreams.h" #include "SnConvX.h" #include "serialization/SnSerialUtils.h" #include "foundation/PxAlloca.h" #include "foundation/PxString.h" #include "CmUtils.h" #include <assert.h> using namespace physx; using namespace physx::Sn; using namespace Cm; void Sn::ConvX::resetConvexFlags() { mConvexFlags.clear(); } void Sn::ConvX::_enumerateFields(const MetaClass* mc, ExtraDataEntry2* entries, int& nb, int baseOffset, MetaDataType type) const { PxU32 nbFields = mc->mFields.size(); int offsetCheck = baseOffset; PX_UNUSED(offsetCheck); for(PxU32 j=0;j<nbFields;j++) { const PxMetaDataEntry& entry = mc->mFields[j]; if(entry.mFlags & PxMetaDataFlag::eCLASS || entry.mFlags & PxMetaDataFlag::eEXTRA_DATA) continue; assert(offsetCheck == baseOffset + entry.mOffset); int currentOffset = baseOffset + entry.mOffset; //for(int c=0;c<entry.mCount;c++) { if(entry.mFlags & PxMetaDataFlag::eUNION) { entries[nb].entry = entry; entries[nb].offset = currentOffset; entries[nb].cb = 0; nb++; } else if(entry.mFlags & PxMetaDataFlag::ePTR) // This also takes care of the vtable pointer { entries[nb].entry = entry; entries[nb].offset = currentOffset; entries[nb].cb = &Sn::ConvX::convertPtr; nb++; } else if(entry.mFlags & PxMetaDataFlag::eHANDLE) { entries[nb].entry = entry; entries[nb].offset = currentOffset; entries[nb].cb = &Sn::ConvX::convertHandle16; nb++; } else { MetaClass* fieldType = getMetaClass(entry.mType, type); assert(fieldType); if(fieldType->mCallback) { entries[nb].entry = entry; entries[nb].offset = currentOffset; entries[nb].cb = fieldType->mCallback; nb++; } else { for(int c=0;c<entry.mCount;c++) { _enumerateFields(fieldType, entries, nb, currentOffset, type); currentOffset += entry.mSize/entry.mCount; } } } } offsetCheck += entry.mSize; } } void Sn::ConvX::_enumerateExtraData(const char* address, const MetaClass* mc, ExtraDataEntry* entries, int& nb, int offset, MetaDataType type) const { PxU32 nbFields = mc->mFields.size(); for(PxU32 j=0;j<nbFields;j++) { const PxMetaDataEntry& entry = mc->mFields[j]; if(entry.mFlags & PxMetaDataFlag::eCLASS /*|| entry.mFlags & PxMetaDataFlag::ePTR*/ || entry.mFlags & PxMetaDataFlag::eTYPEDEF) continue; const char* entryType = entry.mType; // // Insanely Twisted Shadow GeometryUnion // // Special code is needed as long as there are no meta data tags to describe our unions properly. The way it is done here is // not future-proof at all. There should be a tag to describe where the union type can be found and the number of bytes // this type id needs. Then a mapping needs to get added from each union type id to the proper meta class name. // if (entry.mFlags & PxMetaDataFlag::eUNION) { if (!mc->mClassName || Pxstrcmp(mc->mClassName, "GeometryUnion")!=0) continue; else { // ### hardcoded bit here, will only work when union type is the first int of the struct const int* tmp = reinterpret_cast<const int*>(address + offset); const int unionType = *tmp; ConvX* tmpConv = const_cast<ConvX*>(this); // ... don't ask const char* typeName = tmpConv->getTypeName(entry.mType, unionType); assert(typeName); bool isTriMesh = (Pxstrcmp(typeName, "PxTriangleMeshGeometryLL") == 0); bool isHeightField = (Pxstrcmp(typeName, "PxHeightFieldGeometryLL") == 0); if (!isTriMesh && !isHeightField) { continue; } else { entryType = typeName; } } } // MetaClass* extraDataType = getMetaClass(entry.mType, type); // if(!extraDataType) // continue; if(entry.mFlags & PxMetaDataFlag::eEXTRA_DATA) { entries[nb].entry = entry; entries[nb].offset = offset+entry.mOffset; nb++; } else { if(entry.mFlags & PxMetaDataFlag::ePTR) continue; MetaClass* extraDataType = getMetaClass(entryType, type); if(!extraDataType) continue; if(!extraDataType->mCallback) _enumerateExtraData(address, extraDataType, entries, nb, offset+entry.mOffset, type); } } } PxU64 Sn::ConvX::read64(const void*& buffer) { const PxU64* buf64 = reinterpret_cast<const PxU64*>(buffer); buffer = reinterpret_cast<const void*>(size_t(buffer) + sizeof(PxU64)); PxU64 value = *buf64; output(value); return value; } int Sn::ConvX::read32(const void*& buffer) { const int* buf32 = reinterpret_cast<const int*>(buffer); buffer = reinterpret_cast<const void*>(size_t(buffer) + sizeof(int)); int value = *buf32; output(value); return value; } short Sn::ConvX::read16(const void*& buffer) { const short* buf16 = reinterpret_cast<const short*>(buffer); buffer = reinterpret_cast<const void*>(size_t(buffer) + sizeof(short)); short value = *buf16; output(value); return value; } #if PX_CHECKED extern const char* gVTable; static bool compareEntries(const ExtraDataEntry2& e0, const ExtraDataEntry2& e1) { if(e0.entry.isVTablePtr() && e1.entry.isVTablePtr()) return true; if((e0.entry.mFlags & PxMetaDataFlag::eUNION) && (e1.entry.mFlags & PxMetaDataFlag::eUNION)) { if(e0.entry.mType && e1.entry.mType) { // We can't compare the ptrs since they index different string tables if(Pxstrcmp(e0.entry.mType, e1.entry.mType)==0) return true; } return false; } if(e0.entry.mName && e1.entry.mName) { // We can't compare the ptrs since they index different string tables if(Pxstrcmp(e0.entry.mName, e1.entry.mName)==0) return true; } return false; } #endif // TODO: optimize this bool Sn::ConvX::convertClass(const char* buffer, const MetaClass* mc, int offset) { // ---- big convex surgery ---- bool convexSurgery = false; bool foundNbVerts = false; bool removeBigData = false; // force reference (void)foundNbVerts; displayMessage(PxErrorCode::eDEBUG_INFO, "%s\n", mc->mClassName); displayMessage(PxErrorCode::eDEBUG_INFO, "+++++++++++++++++++++++++++++++++++++++++++++\n"); if(Pxstrcmp(mc->mClassName, "ConvexMesh")==0) { convexSurgery = true; } // ---- big convex surgery ---- int nbSrcEntries = 0; PX_ALLOCA(srcEntries, ExtraDataEntry2, 256); // ### painful ctors here int nbDstEntries = 0; PX_ALLOCA(dstEntries, ExtraDataEntry2, 256); // ### painful ctors here // Find corresponding meta-class for target platform const MetaClass* target_mc = getMetaClass(mc->mClassName, META_DATA_DST); assert(target_mc); if(mc->mCallback) { srcEntries[0].cb = mc->mCallback; srcEntries[0].offset = offset; srcEntries[0].entry.mType = mc->mClassName; srcEntries[0].entry.mName = mc->mClassName; srcEntries[0].entry.mOffset = offset; srcEntries[0].entry.mSize = mc->mSize; srcEntries[0].entry.mCount = 1; srcEntries[0].entry.mFlags = 0; nbSrcEntries = 1; assert(target_mc->mCallback); dstEntries[0].cb = target_mc->mCallback; dstEntries[0].offset = offset; dstEntries[0].entry.mType = target_mc->mClassName; dstEntries[0].entry.mName = target_mc->mClassName; dstEntries[0].entry.mOffset = offset; dstEntries[0].entry.mSize = target_mc->mSize; dstEntries[0].entry.mCount = 1; dstEntries[0].entry.mFlags = 0; nbDstEntries = 1; } else { nbSrcEntries = 0; _enumerateFields(mc, srcEntries, nbSrcEntries, 0, META_DATA_SRC); assert(nbSrcEntries<256); nbDstEntries = 0; _enumerateFields(target_mc, dstEntries, nbDstEntries, 0, META_DATA_DST); assert(nbDstEntries<256); // nb = mc->mNbEntries; // assert(nb>=0); // memcpy(entries, mc->mEntries, nb*sizeof(ExtraDataEntry2)); } int srcOffsetCheck = 0; int dstOffsetCheck = 0; PX_UNUSED(dstOffsetCheck); int j = 0; // Track cases where the vtable pointer location is different for different platforms. // The variables indicate whether a platform has a vtable pointer entry that has not been converted yet // and they will remember the index of the corrssponding entry. This works because there can only // be one open vtable pointer entry at a time. int srcOpenVTablePtrEntry = -1; int dstOpenVTablePtrEntry = -1; //if the src and dst platform place the vtable pointers at different locations some fiddling with the iteration count can be necessary. int addVTablePtrShiftIteration = 0; const int maxNb = nbSrcEntries > nbDstEntries ? nbSrcEntries : nbDstEntries; for(int i=0; i < (maxNb + addVTablePtrShiftIteration); i++) { if (i < nbSrcEntries) { displayMessage(PxErrorCode::eDEBUG_INFO, "\t0x%p\t%02x\t%d\t%d\t%s", buffer + srcOffsetCheck, static_cast<unsigned char>(buffer[srcOffsetCheck]), srcOffsetCheck, srcEntries[i].entry.mOffset, srcEntries[i].entry.mName); for (int byteCount = 1; byteCount < srcEntries[i].entry.mSize; ++byteCount) displayMessage(PxErrorCode::eDEBUG_INFO, "\t0x%p\t%02x\t%d\t%d\t.", buffer + srcOffsetCheck + byteCount, static_cast<unsigned char>(buffer[srcOffsetCheck + byteCount]), srcOffsetCheck + byteCount, srcEntries[i].entry.mOffset + byteCount); } bool handlePadding = true; bool skipLoop = false; while(handlePadding) { const int pad0 = i<nbSrcEntries ? srcEntries[i].entry.mFlags & PxMetaDataFlag::ePADDING : 0; const int pad1 = j<nbDstEntries ? dstEntries[j].entry.mFlags & PxMetaDataFlag::ePADDING : 0; if(pad0 || pad1) { if(pad0) { #if PX_CHECKED if (mMarkedPadding && (Pxstrcmp(srcEntries[i].entry.mType, "paddingByte")==0)) if(!checkPaddingBytes(buffer + srcOffsetCheck, srcEntries[i].entry.mSize)) { if(i>0) { displayMessage(PxErrorCode::eDEBUG_WARNING, "PxBinaryConverter warning: Bytes after %s::%s don't look like padding bytes. Likely mismatch between binary data and metadata.\n", mc->mClassName, srcEntries[i-1].entry.mName ); } else displayMessage(PxErrorCode::eDEBUG_WARNING, "PxBinaryConverter warning: Bytes after %s don't look like padding bytes. Likely mismatch between binary data and metadata.\n", mc->mClassName); } #endif if(pad1) { // Both have padding // ### check sizes, output bytes if(srcEntries[i].entry.mSize==dstEntries[j].entry.mSize) { // I guess we can just go on with the normal code here handlePadding = false; } else { // Output padding assert(srcEntries[i].cb); assert(srcEntries[i].offset == srcOffsetCheck); const int padSize = dstEntries[j].entry.mSize; char* paddingBytes = reinterpret_cast<char*>(PX_ALLOC(sizeof(char)*padSize, "paddingByte")); memset(paddingBytes, 0, size_t(padSize)); assert(dstEntries[j].cb); (this->*dstEntries[j].cb)(paddingBytes, dstEntries[j].entry, dstEntries[j].entry); assert(dstOffsetCheck==dstEntries[j].offset); dstOffsetCheck += padSize; PX_FREE(paddingBytes); // srcEntries[i].cb(buffer+srcOffsetCheck, srcEntries[i].entry, dstEntries[j].entry); // assert(dstOffsetCheck==dstEntries[j].offset); // dstOffsetCheck += dstEntries[j].entry.mSize; srcOffsetCheck += srcEntries[i].entry.mSize; // Skip dest padding field j++; // continue; // ### BUG, doesn't go back to the "for" skipLoop = true; handlePadding = false; } } else { // Src has padding, dst has not => skip conversion // Don't increase j skipLoop = true; handlePadding = false; srcOffsetCheck += srcEntries[i].entry.mSize; } } else { if(pad1) { // Dst has padding, src has not // Output padding const int padSize = dstEntries[j].entry.mSize; char* paddingBytes = reinterpret_cast<char*>(PX_ALLOC(sizeof(char)*padSize, "paddingByte")); memset(paddingBytes, 0, size_t(padSize)); assert(dstEntries[j].cb); (this->*dstEntries[j].cb)(paddingBytes, dstEntries[j].entry, dstEntries[j].entry); assert(dstOffsetCheck==dstEntries[j].offset); dstOffsetCheck += padSize; PX_FREE(paddingBytes); // Skip dest padding field, keep same src field j++; } else { assert(0); } } } else handlePadding = false; } if(skipLoop) continue; int modSrcOffsetCheck = srcOffsetCheck; const ExtraDataEntry2* srcEntryPtr = &srcEntries[i]; const ExtraDataEntry2* dstEntryPtr = &dstEntries[j]; bool isSrcVTablePtr = (i < nbSrcEntries) ? srcEntryPtr->entry.isVTablePtr() : false; if (isSrcVTablePtr && (dstOpenVTablePtrEntry != -1)) { // vtable ptr position mismatch: // this check is necessary to align src and dst index again when the // dst vtable pointer has been written already and the src vtable ptr // element is reached. // // i // src: | a | b | vt-ptr | c | ... // dst: | vt-ptr | a | b | c | ... // j // // it needs special treatment because the following case fails otherwise // i // src: | a | b | vt-ptr | c | vt-ptr | ... // dst: | vt-ptr | a | b | vt-ptr | c | ... // j // // This entry has been written already -> advance to next src entry // srcOffsetCheck += srcEntryPtr->entry.mSize; i++; isSrcVTablePtr = (i < nbSrcEntries) ? srcEntryPtr->entry.isVTablePtr() : false; PX_ASSERT(dstOpenVTablePtrEntry < nbDstEntries); PX_ASSERT(dstEntries[dstOpenVTablePtrEntry].entry.isVTablePtr()); dstOpenVTablePtrEntry = -1; PX_ASSERT(addVTablePtrShiftIteration == 0); } bool isDstVTablePtr = (j < nbDstEntries) ? dstEntryPtr->entry.isVTablePtr() : false; if (isDstVTablePtr && (srcOpenVTablePtrEntry != -1)) { // i // src: | vt-ptr | a | b | c | ... // dst: | a | b | vt-ptr | c | ... // j i--; // next iteration the current element should get processed isSrcVTablePtr = true; PX_ASSERT(srcOpenVTablePtrEntry < nbSrcEntries); srcEntryPtr = &srcEntries[srcOpenVTablePtrEntry]; PX_ASSERT(srcEntryPtr->entry.isVTablePtr()); modSrcOffsetCheck = srcEntryPtr->offset; srcOffsetCheck -= srcEntryPtr->entry.mSize; // to make sure total change is 0 after this iteration srcOpenVTablePtrEntry = -1; PX_ASSERT(addVTablePtrShiftIteration == 1); addVTablePtrShiftIteration = 0; } if(i==nbSrcEntries && j==nbDstEntries) { PX_ASSERT((srcOpenVTablePtrEntry == -1) && (dstOpenVTablePtrEntry == -1)); break; } if (isSrcVTablePtr || isDstVTablePtr) { if (!isSrcVTablePtr) { // i // src: | a | b | vt-ptr | c | ... // dst: | vt-ptr | a | b | c | ... // j PX_ASSERT(dstOpenVTablePtrEntry == -1); // the other case should be detected and treated earlier PX_ASSERT(srcOpenVTablePtrEntry == -1); PX_ASSERT(addVTablePtrShiftIteration == 0); int k; for(k=i+1; k < nbSrcEntries; k++) { if (srcEntries[k].entry.isVTablePtr()) break; } PX_ASSERT(k < nbSrcEntries); srcEntryPtr = &srcEntries[k]; modSrcOffsetCheck = srcEntryPtr->offset; srcOffsetCheck -= srcEntryPtr->entry.mSize; // to make sure total change is 0 after this iteration dstOpenVTablePtrEntry = j; i--; // to make sure the original entry gets processed in the next iteration } else if (!isDstVTablePtr) { // i ---> i // src: | vt-ptr | a | b | c | ... // dst: | a | b | vt-ptr | c | ... // j PX_ASSERT(srcOpenVTablePtrEntry == -1); // the other case should be detected and treated earlier PX_ASSERT(dstOpenVTablePtrEntry == -1); PX_ASSERT(addVTablePtrShiftIteration == 0); srcOffsetCheck += srcEntryPtr->entry.mSize; modSrcOffsetCheck = srcOffsetCheck; srcOpenVTablePtrEntry = i; i++; srcEntryPtr = &srcEntries[i]; addVTablePtrShiftIteration = 1; // additional iteration might be needed to process vtable pointer at the end of a class PX_ASSERT((i < nbSrcEntries) && ((srcEntryPtr->entry.mFlags & PxMetaDataFlag::ePADDING) == 0)); // if the second check fails, this whole section might have to be done before the padding bytes get processed. Not sure // what other consequences that might have though. } } #if PX_CHECKED else { if(!compareEntries(*srcEntryPtr, *dstEntryPtr)) { displayMessage(PxErrorCode::eINVALID_PARAMETER, "\rConvX::convertClass: %s, src meta data and dst meta data don't match!", mc->mClassName); return false; } } #endif const ExtraDataEntry2& srcEntry = *srcEntryPtr; const ExtraDataEntry2& dstEntry = *dstEntryPtr; if(srcEntry.entry.mFlags & PxMetaDataFlag::eUNION) { // ### hardcoded bit here, will only work when union type is the first int of the struct const int* tmp = reinterpret_cast<const int*>(buffer + modSrcOffsetCheck); const int unionType = *tmp; const char* typeName = getTypeName(srcEntry.entry.mType, unionType); assert(typeName); MetaClass* unionMC = getMetaClass(typeName, META_DATA_SRC); assert(unionMC); convertClass(buffer + modSrcOffsetCheck, unionMC, 0); // ### recurse dstOffsetCheck += dstEntry.entry.mSize; MetaClass* targetUnionMC = getMetaClass(typeName, META_DATA_DST); assert(targetUnionMC); const int delta = dstEntry.entry.mSize - targetUnionMC->mSize; char* deltaBytes = reinterpret_cast<char*>(PX_ALLOC(sizeof(char)*delta, "deltaBytes")); memset(deltaBytes, 0, size_t(delta)); output(deltaBytes, delta); // Skip unused bytes at the end of the union PX_FREE(deltaBytes); srcOffsetCheck += srcEntry.entry.mSize; // do not use modSrcOffsetCheck here! } else { assert(srcEntry.cb); assert(srcEntry.offset == modSrcOffsetCheck); // ---- big convex surgery ---- if(convexSurgery) { if(Pxstrcmp(srcEntry.entry.mName, "mNbHullVertices")==0) { assert(srcEntry.entry.mSize==1); const PxU8 nbVerts = static_cast<PxU8>(*(buffer+modSrcOffsetCheck)); assert(!foundNbVerts); foundNbVerts = true; const PxU8 gaussMapLimit = static_cast<PxU8>(getBinaryMetaData(META_DATA_DST)->getGaussMapLimit()); if(nbVerts > gaussMapLimit) { // We need a gauss map and we have one => keep it } else { // We don't need a gauss map and we have one => remove it removeBigData = true; } } else { if(removeBigData) { const bool isBigConvexData = Pxstrcmp(srcEntry.entry.mType, "BigConvexData")==0 || Pxstrcmp(srcEntry.entry.mType, "BigConvexRawData")==0; if(isBigConvexData) { assert(foundNbVerts); setNullPtr(true); } } } } // ---- big convex surgery ---- (this->*srcEntry.cb)(buffer+modSrcOffsetCheck, srcEntry.entry, dstEntry.entry); assert(dstOffsetCheck==dstEntry.offset); dstOffsetCheck += dstEntry.entry.mSize; srcOffsetCheck += srcEntry.entry.mSize; // do not use modSrcOffsetCheck here! // ---- big convex surgery ---- if(convexSurgery && removeBigData) setNullPtr(false); // ---- big convex surgery ---- } j++; } displayMessage(PxErrorCode::eDEBUG_INFO, "---------------------------------------------\n"); while(j<nbDstEntries) { assert(dstEntries[j].entry.mFlags & PxMetaDataFlag::ePADDING); if(dstEntries[j].entry.mFlags & PxMetaDataFlag::ePADDING) { dstOffsetCheck += dstEntries[j].entry.mSize; } j++; } assert(j==nbDstEntries); assert(dstOffsetCheck==target_mc->mSize); assert(srcOffsetCheck==mc->mSize); // ---- big convex surgery ---- if(convexSurgery) mConvexFlags.pushBack(removeBigData); // ---- big convex surgery ---- return true; } // Handles data defined with PX_DEF_BIN_METADATA_EXTRA_ARRAY const char* Sn::ConvX::convertExtraData_Array(const char* Address, const char* lastAddress, const char* objectAddress, const ExtraDataEntry& ed) { (void)lastAddress; MetaClass* mc = getMetaClass(ed.entry.mType, META_DATA_SRC); assert(mc); // PT: safe to cast to int here since we're reading a count. const int count = int(peek(ed.entry.mSize, objectAddress + ed.offset, ed.entry.mFlags)); // if(ed.entry.mCount) // Reused as align value if(ed.entry.mAlignment) { Address = alignStream(Address, ed.entry.mAlignment); // Address = alignStream(Address, ed.entry.mCount); assert(Address<=lastAddress); } for(int c=0;c<count;c++) { convertClass(Address, mc, 0); Address += mc->mSize; assert(Address<=lastAddress); } return Address; } const char* Sn::ConvX::convertExtraData_Ptr(const char* Address, const char* lastAddress, const PxMetaDataEntry& entry, int count, int ptrSize_Src, int ptrSize_Dst) { (void)lastAddress; PxMetaDataEntry tmpSrc = entry; tmpSrc.mCount = count; tmpSrc.mSize = count * ptrSize_Src; PxMetaDataEntry tmpDst = entry; tmpDst.mCount = count; tmpDst.mSize = count * ptrSize_Dst; displayMessage(PxErrorCode::eDEBUG_INFO, "extra data ptrs\n"); displayMessage(PxErrorCode::eDEBUG_INFO, "+++++++++++++++++++++++++++++++++++++++++++++\n"); displayMessage(PxErrorCode::eDEBUG_INFO, "\t0x%p\t%02x\t\t\t%s", Address, static_cast<unsigned char>(Address[0]), entry.mName); for (int byteCount = 1; byteCount < ptrSize_Src*count; ++byteCount) displayMessage(PxErrorCode::eDEBUG_INFO, "\t0x%p\t%02x\t\t\t.", Address + byteCount, static_cast<unsigned char>(Address[byteCount])); convertPtr(Address, tmpSrc, tmpDst); Address += count * ptrSize_Src; assert(Address<=lastAddress); return Address; } const char* Sn::ConvX::convertExtraData_Handle(const char* Address, const char* lastAddress, const PxMetaDataEntry& entry, int count) { (void)lastAddress; MetaClass* fieldType = getMetaClass(entry.mType, META_DATA_SRC); int typeSize = fieldType->mSize; PxMetaDataEntry tmpSrc = entry; tmpSrc.mCount = count; tmpSrc.mSize = count*typeSize; PxMetaDataEntry tmpDst = entry; tmpDst.mCount = count; tmpDst.mSize = count*typeSize; displayMessage(PxErrorCode::eDEBUG_INFO, "extra data handles\n"); displayMessage(PxErrorCode::eDEBUG_INFO, "+++++++++++++++++++++++++++++++++++++++++++++\n"); displayMessage(PxErrorCode::eDEBUG_INFO, "\t0x%p\t%02x\t\t\t%s", Address, static_cast<unsigned char>(Address[0]), entry.mName); for (int byteCount = 1; byteCount < tmpSrc.mSize; ++byteCount) displayMessage(PxErrorCode::eDEBUG_INFO, "\t0x%p\t%02x\t\t\t.", Address + byteCount, static_cast<unsigned char>(Address[byteCount])); convertHandle16(Address, tmpSrc, tmpDst); Address += count*typeSize; assert(Address<=lastAddress); return Address; } static bool decodeControl(PxU64 control, const ExtraDataEntry& ed, PxU64 controlMask = 0) { if(ed.entry.mFlags & PxMetaDataFlag::eCONTROL_FLIP) { if(controlMask) { return (control & controlMask) ? false : true; } else { return control==0; } } else { if(controlMask) { return (control & controlMask) ? true : false; } else { return control!=0; } } } // ### currently hardcoded, should change int Sn::ConvX::getConcreteType(const char* buffer) { MetaClass* mc = getMetaClass("PxBase", META_DATA_SRC); assert(mc); PxMetaDataEntry entry; if(mc->getFieldByType("PxType", entry)) { // PT: safe to cast to int here since we're reading our own PxType return int(peek(entry.mSize, buffer + entry.mOffset)); } assert(0); return 0xffffffff; } struct Item : public PxUserAllocated { MetaClass* mc; const char* address; }; bool Sn::ConvX::convertCollection(const void* buffer, int fileSize, int nbObjects) { const char* lastAddress = reinterpret_cast<const char*>(buffer) + fileSize; const char* Address = alignStream(reinterpret_cast<const char*>(buffer)); const int ptrSize_Src = mSrcPtrSize; const int ptrSize_Dst = mDstPtrSize; Item* objects = PX_NEW(Item)[PxU32(nbObjects)]; for(PxU32 i=0;i<PxU32(nbObjects);i++) { const float percents = float(i)/float(nbObjects); displayMessage(PxErrorCode::eDEBUG_INFO, "Object conversion: %d%%", int(percents*100.0f)); Address = alignStream(Address); assert(Address<=lastAddress); PxConcreteType::Enum classType = PxConcreteType::Enum(getConcreteType(Address)); MetaClass* metaClass = getMetaClass(classType, META_DATA_SRC); if(!metaClass) { PX_DELETE_ARRAY(objects); return false; } objects[i].mc = metaClass; objects[i].address = Address; if(!convertClass(Address, metaClass, 0)) { PX_DELETE_ARRAY(objects); return false; } Address += metaClass->mSize; assert(Address<=lastAddress); } // Fields / extra data if(1) { // ---- big convex surgery ---- unsigned int nbConvexes = 0; // ---- big convex surgery ---- //const char* StartAddress2 = Address; //int startDstSize2 = getCurrentOutputSize(); for(int i=0;i<nbObjects;i++) { //const char* StartAddress = Address; //int startDstSize = getCurrentOutputSize(); const float percents = float(i)/float(nbObjects); displayMessage(PxErrorCode::eDEBUG_INFO, "Extra data conversion: %d%%", int(percents*100.0f)); MetaClass* mc0 = objects[i].mc; const char* objectAddress = objects[i].address; // printf("%d: %s\n", i, mc->mClassName); // if(strcmp(mc->mClassName, "TriangleMesh")==0) // if(strcmp(mc->mClassName, "NpRigidDynamic")==0) if(Pxstrcmp(mc0->mClassName, "HybridModel")==0) { int stop=1; (void)(stop); } // ### we actually need to collect all extra data for this class, including data from embedded members. PX_ALLOCA(entries, ExtraDataEntry, 256); int nbEntries = 0; _enumerateExtraData(objectAddress, mc0, entries, nbEntries, 0, META_DATA_SRC); assert(nbEntries<256); Address = alignStream(Address); assert(Address<=lastAddress); for(int j=0;j<nbEntries;j++) { const ExtraDataEntry& ed = entries[j]; assert(ed.entry.mFlags & PxMetaDataFlag::eEXTRA_DATA); if(ed.entry.mFlags & PxMetaDataFlag::eEXTRA_ITEM) { // ---- big convex surgery ---- if(1) { const bool isBigConvexData = Pxstrcmp(ed.entry.mType, "BigConvexData")==0; if(isBigConvexData) { assert(nbConvexes<mConvexFlags.size()); if(mConvexFlags[nbConvexes++]) setNoOutput(true); } } // ---- big convex surgery ---- MetaClass* extraDataType = getMetaClass(ed.entry.mType, META_DATA_SRC); assert(extraDataType); //sschirm: we used to have ed.entry.mOffset here, but that made cloth deserialization fail. - sschirm: cloth is gone now... const char* controlAddress = objectAddress + ed.offset; const PxU64 controlValue = peek(ed.entry.mOffsetSize, controlAddress); if(controlValue) { if(ed.entry.mAlignment) { Address = alignStream(Address, ed.entry.mAlignment); assert(Address<=lastAddress); } const char* classAddress = Address; convertClass(Address, extraDataType, 0); Address += extraDataType->mSize; assert(Address<=lastAddress); // Enumerate extra data for this optional class, and convert it too. // This assumes the extra data for the optional class is always appended to the class itself, // which is something we'll need to enforce in the SDK. So far this is only to handle optional // inline arrays. // ### this should probably be recursive eventually PX_ALLOCA(entries2, ExtraDataEntry, 256); int nbEntries2 = 0; _enumerateExtraData(objectAddress, extraDataType, entries2, nbEntries2, 0, META_DATA_SRC); assert(nbEntries2<256); for(int k=0;k<nbEntries2;k++) { const ExtraDataEntry& ed2 = entries2[k]; assert(ed2.entry.mFlags & PxMetaDataFlag::eEXTRA_DATA); if(ed2.entry.mFlags & PxMetaDataFlag::eEXTRA_ITEMS) { const int controlOffset = ed2.entry.mOffset; const int controlSize = ed2.entry.mSize; const int countOffset = ed2.entry.mCount; const int countSize = ed2.entry.mOffsetSize; const PxU64 controlValue2 = peek(controlSize, classAddress + controlOffset); PxU64 controlMask = 0; if(ed2.entry.mFlags & PxMetaDataFlag::eCONTROL_MASK) { controlMask = PxU64(ed2.entry.mFlags & (PxMetaDataFlag::eCONTROL_MASK_RANGE << 16)); controlMask = controlMask >> 16; } if(decodeControl(controlValue2, ed2, controlMask)) { // PT: safe to cast to int here since we're reading a count int count = int(peek(countSize, classAddress + countOffset, ed2.entry.mFlags)); if(ed2.entry.mAlignment) { assert(0); // Never tested Address = alignStream(Address, ed2.entry.mAlignment); assert(Address<=lastAddress); } if(ed2.entry.mFlags & PxMetaDataFlag::ePTR) { assert(0); // Never tested } else { MetaClass* mc = getMetaClass(ed2.entry.mType, META_DATA_SRC); assert(mc); while(count--) { convertClass(Address, mc, 0); Address += mc->mSize; assert(Address<=lastAddress); } } } } else { if( (ed2.entry.mFlags & PxMetaDataFlag::eALIGNMENT) && ed2.entry.mAlignment) { Address = alignStream(Address, ed2.entry.mAlignment); assert(Address<=lastAddress); } else { // We assume it's an normal array, e.g. the ones from "big convexes" assert(!(ed2.entry.mFlags & PxMetaDataFlag::eEXTRA_ITEM)); Address = convertExtraData_Array(Address, lastAddress, classAddress, ed2); } } } } else { int stop = 0; (void)(stop); } // ---- big convex surgery ---- setNoOutput(false); // ---- big convex surgery ---- } else if(ed.entry.mFlags & PxMetaDataFlag::eEXTRA_ITEMS) { // PX_DEF_BIN_METADATA_EXTRA_ITEMS int reloc = ed.offset - ed.entry.mOffset; // ### because the enum code only fixed the "controlOffset"! const int controlOffset = ed.entry.mOffset; const int controlSize = ed.entry.mSize; const int countOffset = ed.entry.mCount; const int countSize = ed.entry.mOffsetSize; // const int controlValue2 = peek(controlSize, objectAddress + controlOffset); const PxU64 controlValue2 = peek(controlSize, objectAddress + controlOffset + reloc); PxU64 controlMask = 0; if(ed.entry.mFlags & PxMetaDataFlag::eCONTROL_MASK) { controlMask = PxU64(ed.entry.mFlags & (PxMetaDataFlag::eCONTROL_MASK_RANGE << 16)); controlMask = controlMask >> 16; } if(decodeControl(controlValue2, ed, controlMask)) { // PT: safe to cast to int here since we're reading a count // int count = peek(countSize, objectAddress + countOffset); // ### int count = int(peek(countSize, objectAddress + countOffset + reloc, ed.entry.mFlags)); // ### if(ed.entry.mAlignment) { Address = alignStream(Address, ed.entry.mAlignment); assert(Address<=lastAddress); } if(ed.entry.mFlags & PxMetaDataFlag::ePTR) { Address = convertExtraData_Ptr(Address, lastAddress, ed.entry, count, ptrSize_Src, ptrSize_Dst); } else if (ed.entry.mFlags & PxMetaDataFlag::eHANDLE) { Address = convertExtraData_Handle(Address, lastAddress, ed.entry, count); } else { MetaClass* mc = getMetaClass(ed.entry.mType, META_DATA_SRC); assert(mc); while(count--) { convertClass(Address, mc, 0); Address += mc->mSize; assert(Address<=lastAddress); } } } } else if(ed.entry.mFlags & PxMetaDataFlag::eALIGNMENT) { if(ed.entry.mAlignment) { displayMessage(PxErrorCode::eDEBUG_INFO, " align to %d bytes\n", ed.entry.mAlignment); displayMessage(PxErrorCode::eDEBUG_INFO, "---------------------------------------------\n"); Address = alignStream(Address, ed.entry.mAlignment); assert(Address<=lastAddress); } } else if(ed.entry.mFlags & PxMetaDataFlag::eEXTRA_NAME) { if(ed.entry.mAlignment) { Address = alignStream(Address, ed.entry.mAlignment); assert(Address<=lastAddress); } //get string count MetaClass* mc = getMetaClass("PxU32", META_DATA_SRC); assert(mc); //safe to cast to int here since we're reading a count. const int count = int(peek(mc->mSize, Address, 0)); displayMessage(PxErrorCode::eDEBUG_INFO, " convert %d bytes string\n", count); convertClass(Address, mc, 0); Address += mc->mSize; mc = getMetaClass(ed.entry.mType, META_DATA_SRC); assert(mc); for(int c=0;c<count;c++) { convertClass(Address, mc, 0); Address += mc->mSize; assert(Address<=lastAddress); } } else { Address = convertExtraData_Array(Address, lastAddress, objectAddress, ed); } } } PX_DELETE_ARRAY(objects); assert(nbConvexes==mConvexFlags.size()); } assert(Address==lastAddress); return true; } bool Sn::ConvX::convert(const void* buffer, int fileSize) { // Test initial alignment if(size_t(buffer) & (ALIGN_DEFAULT-1)) { assert(0); return false; } const int header = read32(buffer); fileSize -= 4; (void)header; if (header != PX_MAKE_FOURCC('S','E','B','D')) { displayMessage(physx::PxErrorCode::eINVALID_PARAMETER, "PxBinaryConverter: Buffer contains data with bad header indicating invalid serialized data."); return false; } const int version = read32(buffer); fileSize -= 4; (void)version; char binaryVersionGuid[SN_BINARY_VERSION_GUID_NUM_CHARS + 1]; memcpy(binaryVersionGuid, buffer, SN_BINARY_VERSION_GUID_NUM_CHARS); binaryVersionGuid[SN_BINARY_VERSION_GUID_NUM_CHARS] = 0; buffer = reinterpret_cast<const void*>(size_t(buffer) + SN_BINARY_VERSION_GUID_NUM_CHARS); fileSize -= SN_BINARY_VERSION_GUID_NUM_CHARS; output(binaryVersionGuid, SN_BINARY_VERSION_GUID_NUM_CHARS); if (!checkCompatibility(binaryVersionGuid)) { displayMessage(physx::PxErrorCode::eINVALID_PARAMETER, "PxBinaryConverter: Buffer contains binary data version 0x%s which is incompatible with this PhysX sdk (0x%s).\n", binaryVersionGuid, getBinaryVersionGuid()); return false; } //read src platform tag and write dst platform tag according dst meta data const int srcPlatformTag = *reinterpret_cast<const int*>(buffer); buffer = reinterpret_cast<const void*>(size_t(buffer) + 4); fileSize -= 4; const int dstPlatformTag = mMetaData_Dst->getPlatformTag(); output(dstPlatformTag); if (srcPlatformTag != mMetaData_Src->getPlatformTag()) { displayMessage(physx::PxErrorCode::eINVALID_PARAMETER, "PxBinaryConverter: Mismatch of platform tags of binary data and metadata:\n Binary Data: %s\n MetaData: %s\n", getBinaryPlatformName(PxU32(srcPlatformTag)), getBinaryPlatformName(PxU32(mMetaData_Src->getPlatformTag()))); return false; } //read whether input data has marked padding, and set it for the output data (since 0xcd is written into pads on conversion) const int srcMarkedPadding = *reinterpret_cast<const int*>(buffer); buffer = reinterpret_cast<const void*>(size_t(buffer) + 4); fileSize -= 4; mMarkedPadding = srcMarkedPadding != 0; const int dstMarkedPadding = 1; output(dstMarkedPadding); int nbObjectsInCollection; buffer = convertReferenceTables(buffer, fileSize, nbObjectsInCollection); if(!buffer) return false; bool ret = convertCollection(buffer, fileSize, nbObjectsInCollection); mMarkedPadding = false; return ret; } // PT: code below added to support 64bit-to-32bit conversions void Sn::ConvX::exportIntAsPtr(int value) { const int ptrSize_Src = mSrcPtrSize; const int ptrSize_Dst = mDstPtrSize; PxMetaDataEntry entry; const char* address = NULL; const PxU32 value32 = PxU32(value); const PxU64 value64 = PxU64(value)&0xffffffff; if(ptrSize_Src==4) { address = reinterpret_cast<const char*>(&value32); } else if(ptrSize_Src==8) { address = reinterpret_cast<const char*>(&value64); } else assert(0); convertExtraData_Ptr(address, address + ptrSize_Src, entry, 1, ptrSize_Src, ptrSize_Dst); } void Sn::ConvX::exportInt(int value) { output(value); } void Sn::ConvX::exportInt64(PxU64 value) { output(value); } PointerRemap::PointerRemap() { } PointerRemap::~PointerRemap() { } void PointerRemap::setObjectRef(PxU64 object64, PxU32 ref) { mData[object64] = ref; } bool PointerRemap::getObjectRef(PxU64 object64, PxU32& ref) const { const PointerMap::Entry* entry = mData.find(object64); if(entry) { ref = entry->second; return true; } return false; } Handle16Remap::Handle16Remap() { } Handle16Remap::~Handle16Remap() { } void Handle16Remap::setObjectRef(PxU16 object, PxU16 ref) { mData[object] = ref; } bool Handle16Remap::getObjectRef(PxU16 object, PxU16& ref) const { const Handle16Map::Entry* entry = mData.find(object); if(entry) { ref = entry->second; return true; } return false; } /** Converting the PxBase object offsets in the manifest table is fairly complicated now. It would be good to have an easy callback mechanism for custom things like this. */ const void* Sn::ConvX::convertManifestTable(const void* buffer, int& fileSize) { PxU32 padding = getPadding(size_t(buffer), ALIGN_DEFAULT); buffer = alignStream(reinterpret_cast<const char*>(buffer)); fileSize -= padding; int nb = read32(buffer); fileSize -= 4; MetaClass* mc_src = getMetaClass("Sn::ManifestEntry", META_DATA_SRC); assert(mc_src); MetaClass* mc_dst = getMetaClass("Sn::ManifestEntry", META_DATA_DST); assert(mc_dst); bool mdOk; PxMetaDataEntry srcTypeField; mdOk = mc_src->getFieldByName("type", srcTypeField); PX_UNUSED(mdOk); PX_ASSERT(mdOk); PxMetaDataEntry dstOffsetField; mdOk = mc_dst->getFieldByName("offset", dstOffsetField); PX_ASSERT(mdOk); const char* address = reinterpret_cast<const char*>(buffer); PxU32 headerOffset = 0; for(int i=0;i<nb;i++) { PxConcreteType::Enum classType = PxConcreteType::Enum(peek(srcTypeField.mSize, address + srcTypeField.mOffset)); //convert ManifestEntry but output to tmpStream PxDefaultMemoryOutputStream tmpStream; { //backup output state PxOutputStream* outStream = mOutStream; PxU32 outputSize = PxU32(mOutputSize); mOutStream = &tmpStream; mOutputSize = 0; convertClass(address, mc_src, 0); PX_ASSERT(tmpStream.getSize() == PxU32(mc_dst->mSize)); //restore output state mOutStream = outStream; mOutputSize = int(outputSize); } //output patched offset PX_ASSERT(dstOffsetField.mOffset == 0); //assuming offset is the first data output(int(headerOffset)); //output rest of ManifestEntry PxU32 restSize = PxU32(mc_dst->mSize - dstOffsetField.mSize); mOutStream->write(tmpStream.getData() + dstOffsetField.mSize, restSize); mOutputSize += restSize; //increment source stream address += mc_src->mSize; fileSize -= mc_src->mSize; assert(fileSize>=0); //update headerOffset using the type and dst meta data of the type MetaClass* mc_classType_dst = getMetaClass(classType, META_DATA_DST); if(!mc_classType_dst) return NULL; headerOffset += getPadding(size_t(mc_classType_dst->mSize), PX_SERIAL_ALIGN) + mc_classType_dst->mSize; } output(int(headerOffset)); //endoffset buffer = address + 4; fileSize -= 4; return buffer; } const void* Sn::ConvX::convertImportReferences(const void* buffer, int& fileSize) { PxU32 padding = getPadding(size_t(buffer), ALIGN_DEFAULT); buffer = alignStream(reinterpret_cast<const char*>(buffer)); fileSize -= padding; int nb = read32(buffer); fileSize -= 4; if(!nb) return buffer; MetaClass* mc = getMetaClass("Sn::ImportReference", META_DATA_SRC); assert(mc); const char* address = reinterpret_cast<const char*>(buffer); for(int i=0;i<nb;i++) { convertClass(address, mc, 0); address += mc->mSize; fileSize -= mc->mSize; assert(fileSize>=0); } return address; } const void* Sn::ConvX::convertExportReferences(const void* buffer, int& fileSize) { PxU32 padding = getPadding(size_t(buffer), ALIGN_DEFAULT); buffer = alignStream(reinterpret_cast<const char*>(buffer)); fileSize -= padding; int nb = read32(buffer); fileSize -= 4; if(!nb) return buffer; MetaClass* mc = getMetaClass("Sn::ExportReference", META_DATA_SRC); assert(mc); const char* address = reinterpret_cast<const char*>(buffer); for(int i=0;i<nb;i++) { convertClass(address, mc, 0); address += mc->mSize; fileSize -= mc->mSize; assert(fileSize>=0); } return address; } const void* Sn::ConvX::convertInternalReferences(const void* buffer, int& fileSize) { PxU32 padding = getPadding(size_t(buffer), ALIGN_DEFAULT); buffer = alignStream(reinterpret_cast<const char*>(buffer)); fileSize -= padding; //pointer references int nbPtrReferences = read32(buffer); fileSize -= 4; if(nbPtrReferences) { const char* address = reinterpret_cast<const char*>(buffer); MetaClass* mc = getMetaClass("Sn::InternalReferencePtr", META_DATA_SRC); assert(mc); for(int i=0;i<nbPtrReferences;i++) { convertClass(address, mc, 0); address += mc->mSize; fileSize -= mc->mSize; assert(fileSize>=0); } buffer = address; } //16 bit handle references int nbHandle16References = read32(buffer); fileSize -= 4; if (nbHandle16References) { //pre add invalid handle value mHandle16Remap.setObjectRef(0xffff, 0xffff); const char* address = reinterpret_cast<const char*>(buffer); MetaClass* mc = getMetaClass("Sn::InternalReferenceHandle16", META_DATA_SRC); assert(mc); for(int i=0;i<nbHandle16References;i++) { convertClass(address, mc, 0); address += mc->mSize; fileSize -= mc->mSize; assert(fileSize>=0); } buffer = address; } return buffer; } const void* Sn::ConvX::convertReferenceTables(const void* buffer, int& fileSize, int& nbObjectsInCollection) { // PT: the map should not be used while creating it, so use one indirection mPointerActiveRemap = NULL; mPointerRemap.mData.clear(); mPointerRemapCounter = 0; mHandle16ActiveRemap = NULL; mHandle16Remap.mData.clear(); mHandle16RemapCounter = 0; PxU32 padding = getPadding(size_t(buffer), ALIGN_DEFAULT); buffer = alignStream(reinterpret_cast<const char*>(buffer)); fileSize -= padding; nbObjectsInCollection = read32(buffer); if (nbObjectsInCollection == 0) displayMessage(PxErrorCode::eDEBUG_INFO, "\n\nConverting empty collection!\n\n"); fileSize -= 4; buffer = convertManifestTable(buffer, fileSize); if(!buffer) return NULL; buffer = convertImportReferences(buffer, fileSize); buffer = convertExportReferences(buffer, fileSize); buffer = convertInternalReferences(buffer, fileSize); // PT: the map can now be used mPointerActiveRemap = &mPointerRemap; mHandle16ActiveRemap = &mHandle16Remap; return buffer; } bool Sn::ConvX::checkPaddingBytes(const char* buffer, int byteCount) { const unsigned char* src = reinterpret_cast<const unsigned char*>(buffer); int i = 0; while ((i < byteCount) && (src[i] == 0xcd)) i++; return (i == byteCount); }
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Binary/SnConvX_Union.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #ifndef SN_CONVX_UNION_H #define SN_CONVX_UNION_H namespace physx { namespace Sn { struct UnionType { const char* mTypeName; int mTypeValue; }; struct Union { const char* mName; PsArray<UnionType> mTypes; }; } } #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Binary/SnConvX_Union.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #include "SnConvX.h" #include <assert.h> #include "foundation/PxString.h" using namespace physx; void Sn::ConvX::resetUnions() { mUnions.clear(); } bool Sn::ConvX::registerUnion(const char* name) { displayMessage(PxErrorCode::eDEBUG_INFO, "Registering union: %s\n", name); Sn::Union u; u.mName = name; mUnions.pushBack(u); return true; } bool Sn::ConvX::registerUnionType(const char* unionName, const char* typeName, int typeValue) { const PxU32 nb = mUnions.size(); for(PxU32 i=0;i<nb;i++) { if(Pxstrcmp(mUnions[i].mName, unionName)==0) { UnionType t; t.mTypeName = typeName; t.mTypeValue = typeValue; mUnions[i].mTypes.pushBack(t); displayMessage(PxErrorCode::eDEBUG_INFO, "Registering union type: %s | %s | %d\n", unionName, typeName, typeValue); return true; } } displayMessage(PxErrorCode::eINTERNAL_ERROR, "PxBinaryConverter: union not found: %s, please check the source metadata.\n", unionName); return false; } const char* Sn::ConvX::getTypeName(const char* unionName, int typeValue) { const PxU32 nb = mUnions.size(); for(PxU32 i=0;i<nb;i++) { if(Pxstrcmp(mUnions[i].mName, unionName)==0) { const PxU32 nbTypes = mUnions[i].mTypes.size(); for(PxU32 j=0;j<nbTypes;j++) { const UnionType& t = mUnions[i].mTypes[j]; if(t.mTypeValue==typeValue) return t.mTypeName; } break; } } displayMessage(PxErrorCode::eINTERNAL_ERROR, "PxBinaryConverter: union type not found: %s, type %d, please check the source metadata.\n", unionName, typeValue); assert(0); return NULL; }
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Binary/SnConvX.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #ifndef SN_CONVX_H #define SN_CONVX_H #include "foundation/PxErrors.h" #include "common/PxTypeInfo.h" #include "extensions/PxBinaryConverter.h" #include "foundation/PxUserAllocated.h" #include "foundation/PxArray.h" #include "foundation/PxHashMap.h" #include "SnConvX_Common.h" #include "SnConvX_Union.h" #include "SnConvX_MetaData.h" #include "SnConvX_Align.h" #define CONVX_ZERO_BUFFER_SIZE 256 namespace physx { class PxSerializationRegistry; namespace Sn { struct HeightFieldData; class PointerRemap { public: PointerRemap(); ~PointerRemap(); void setObjectRef(PxU64 object64, PxU32 ref); bool getObjectRef(PxU64 object64, PxU32& ref) const; typedef PxHashMap<PxU64, PxU32> PointerMap; PointerMap mData; }; class Handle16Remap { public: Handle16Remap(); ~Handle16Remap(); void setObjectRef(PxU16 object, PxU16 ref); bool getObjectRef(PxU16 object, PxU16& ref) const; typedef PxHashMap<PxU16, PxU16> Handle16Map; Handle16Map mData; }; class ConvX : public physx::PxBinaryConverter, public PxUserAllocated { public: ConvX(); virtual ~ConvX(); virtual void release(); virtual void setReportMode(PxConverterReportMode::Enum mode) { mReportMode = mode; } PX_FORCE_INLINE bool silentMode() const { return mReportMode==PxConverterReportMode::eNONE; } PX_FORCE_INLINE bool verboseMode() const { return mReportMode==PxConverterReportMode::eVERBOSE; } virtual bool setMetaData(PxInputStream& srcMetaData, PxInputStream& dstMetaData); virtual bool compareMetaData() const; virtual bool convert(PxInputStream& srcStream, PxU32 srcSize, PxOutputStream& targetStream); private: ConvX& operator=(const ConvX&); bool setMetaData(PxInputStream& inputStream, MetaDataType type); // Meta-data void releaseMetaData(); const MetaData* loadMetaData(PxInputStream& inputStream, MetaDataType type); const MetaData* getBinaryMetaData(MetaDataType type); int getNbMetaClasses(MetaDataType type); MetaClass* getMetaClass(unsigned int i, MetaDataType type) const; MetaClass* getMetaClass(const char* name, MetaDataType type) const; MetaClass* getMetaClass(PxConcreteType::Enum concreteType, MetaDataType type); MetaData* mMetaData_Src; MetaData* mMetaData_Dst; // Convert bool convert(const void* buffer, int fileSize); void resetConvexFlags(); void _enumerateFields(const MetaClass* mc, ExtraDataEntry2* entries, int& nb, int baseOffset, MetaDataType type) const; void _enumerateExtraData(const char* address, const MetaClass* mc, ExtraDataEntry* entries, int& nb, int offset, MetaDataType type) const; PxU64 read64(const void*& buffer); int read32(const void*& buffer); short read16(const void*& buffer); bool convertClass(const char* buffer, const MetaClass* mc, int offset); const char* convertExtraData_Array(const char* Address, const char* lastAddress, const char* objectAddress, const ExtraDataEntry& ed); const char* convertExtraData_Ptr(const char* Address, const char* lastAddress, const PxMetaDataEntry& entry, int count, int ptrSize_Src, int ptrSize_Dst); const char* convertExtraData_Handle(const char* Address, const char* lastAddress, const PxMetaDataEntry& entry, int count); int getConcreteType(const char* buffer); bool convertCollection(const void* buffer, int fileSize, int nbObjects); const void* convertManifestTable(const void* buffer, int& fileSize); const void* convertImportReferences(const void* buffer, int& fileSize); const void* convertExportReferences(const void* buffer, int& fileSize); const void* convertInternalReferences(const void* buffer, int& fileSize); const void* convertReferenceTables(const void* buffer, int& fileSize, int& nbObjectsInCollection); bool checkPaddingBytes(const char* buffer, int byteCount); // ---- big convex surgery ---- PsArray<bool> mConvexFlags; // Align const char* alignStream(const char* buffer, int alignment=ALIGN_DEFAULT); void alignTarget(int alignment); char mZeros[CONVX_ZERO_BUFFER_SIZE]; // Unions bool registerUnion(const char* name); bool registerUnionType(const char* unionName, const char* typeName, int typeValue); const char* getTypeName(const char* unionName, int typeValue); void resetUnions(); PsArray<Union> mUnions; // Output void setNullPtr(bool); void setNoOutput(bool); bool initOutput(PxOutputStream& targetStream); void closeOutput(); int getCurrentOutputSize(); void output(short value); void output(int value); void output(PxU64 value); void output(const char* buffer, int nbBytes); void convert8 (const char* src, const PxMetaDataEntry& entry, const PxMetaDataEntry& dstEntry); void convertPad8 (const char* src, const PxMetaDataEntry& entry, const PxMetaDataEntry& dstEntry); void convert16 (const char* src, const PxMetaDataEntry& entry, const PxMetaDataEntry& dstEntry); void convert32 (const char* src, const PxMetaDataEntry& entry, const PxMetaDataEntry& dstEntry); void convert64 (const char* src, const PxMetaDataEntry& entry, const PxMetaDataEntry& dstEntry); void convertFloat(const char* src, const PxMetaDataEntry& entry, const PxMetaDataEntry& dstEntry); void convertPtr (const char* src, const PxMetaDataEntry& entry, const PxMetaDataEntry& dstEntry); void convertHandle16(const char* src, const PxMetaDataEntry& entry, const PxMetaDataEntry& dstEntry); PxOutputStream* mOutStream; bool mMustFlip; int mOutputSize; int mSrcPtrSize; int mDstPtrSize; bool mNullPtr; bool mNoOutput; bool mMarkedPadding; // Errors void resetNbErrors(); int getNbErrors() const; void displayMessage(physx::PxErrorCode::Enum code, const char* format, ...); int mNbErrors; int mNbWarnings; // Settings PxConverterReportMode::Enum mReportMode; bool mPerformConversion; // Remap pointers void exportIntAsPtr(int value); void exportInt(int value); void exportInt64(PxU64 value); PointerRemap mPointerRemap; PointerRemap* mPointerActiveRemap; PxU32 mPointerRemapCounter; Handle16Remap mHandle16Remap; Handle16Remap* mHandle16ActiveRemap; PxU16 mHandle16RemapCounter; friend class MetaData; friend struct MetaClass; }; } } #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Binary/SnConvX_Error.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #include "foundation/PxErrorCallback.h" #include "foundation/PxString.h" #include "SnConvX.h" #include <stdarg.h> #define MAX_DISPLAYED_ISSUES 10 using namespace physx; void Sn::ConvX::resetNbErrors() { mNbErrors = 0; mNbWarnings = 0; } int Sn::ConvX::getNbErrors() const { return mNbErrors; } void Sn::ConvX::displayMessage(PxErrorCode::Enum code, const char* format, ...) { if(silentMode()) return; int sum = mNbWarnings + mNbErrors; if(sum >= MAX_DISPLAYED_ISSUES) return; bool display = false; if(code==PxErrorCode::eINTERNAL_ERROR || code==PxErrorCode::eINVALID_OPERATION || code==PxErrorCode::eINVALID_PARAMETER) { mNbErrors++; display = true; } else if(code == PxErrorCode::eDEBUG_WARNING) { mNbWarnings++; display = true; } if(display || ((sum == 0) && verboseMode()) ) { va_list va; va_start(va, format); PxGetFoundation().error(code, PX_FL, format, va); va_end(va); } if(display) { if( sum == 0) { PxGetFoundation().error(PxErrorCode::eDEBUG_INFO, PX_FL, "Hit warnings or errors: skipping further verbose output.\n"); } else if(sum == MAX_DISPLAYED_ISSUES-1) { PxGetFoundation().error(PxErrorCode::eDEBUG_INFO, PX_FL, "Exceeding 10 warnings or errors: skipping further output.\n"); } } return; }
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Binary/SnSerializationContext.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SN_SERIALIZATION_CONTEXT_H #define SN_SERIALIZATION_CONTEXT_H #include "foundation/PxAssert.h" #include "foundation/PxMemory.h" #include "foundation/PxHash.h" #include "common/PxSerialFramework.h" #include "extensions/PxDefaultStreams.h" #include "foundation/PxUserAllocated.h" #include "CmCollection.h" #include "CmUtils.h" #include "SnConvX_Align.h" namespace physx { namespace Sn { struct ManifestEntry { PX_FORCE_INLINE ManifestEntry(PxU32 _offset, PxType _type) { PxMarkSerializedMemory(this, sizeof(ManifestEntry)); offset = _offset; type = _type; } PX_FORCE_INLINE ManifestEntry() { PxMarkSerializedMemory(this, sizeof(ManifestEntry)); } PX_FORCE_INLINE void operator =(const ManifestEntry& m) { PxMemCopy(this, &m, sizeof(ManifestEntry)); } PxU32 offset; PxType type; }; struct ImportReference { PX_FORCE_INLINE ImportReference(PxSerialObjectId _id, PxType _type) { PxMarkSerializedMemory(this, sizeof(ImportReference)); id = _id; type = _type; } PX_FORCE_INLINE ImportReference() { PxMarkSerializedMemory(this, sizeof(ImportReference)); } PX_FORCE_INLINE void operator =(const ImportReference& m) { PxMemCopy(this, &m, sizeof(ImportReference)); } PxSerialObjectId id; PxType type; }; #define SERIAL_OBJECT_INDEX_TYPE_BIT (1u<<31) struct SerialObjectIndex { PX_FORCE_INLINE SerialObjectIndex(PxU32 index, bool external) { setIndex(index, external); } PX_FORCE_INLINE SerialObjectIndex(const SerialObjectIndex& objIndex) : mObjIndex(objIndex.mObjIndex) {} PX_FORCE_INLINE SerialObjectIndex() : mObjIndex(PX_INVALID_U32) {} PX_FORCE_INLINE void setIndex(PxU32 index, bool external) { PX_ASSERT((index & SERIAL_OBJECT_INDEX_TYPE_BIT) == 0); mObjIndex = index | (external ? SERIAL_OBJECT_INDEX_TYPE_BIT : 0); } PX_FORCE_INLINE PxU32 getIndex(bool& isExternal) { PX_ASSERT(mObjIndex != PX_INVALID_U32); isExternal = (mObjIndex & SERIAL_OBJECT_INDEX_TYPE_BIT) > 0; return mObjIndex & ~SERIAL_OBJECT_INDEX_TYPE_BIT; } PX_FORCE_INLINE bool operator < (const SerialObjectIndex& so) const { return mObjIndex < so.mObjIndex; } private: PxU32 mObjIndex; }; struct ExportReference { PX_FORCE_INLINE ExportReference(PxSerialObjectId _id, SerialObjectIndex _objIndex) { PxMarkSerializedMemory(this, sizeof(ExportReference)); id = _id; objIndex = _objIndex; } PX_FORCE_INLINE ExportReference() { PxMarkSerializedMemory(this, sizeof(ExportReference)); } PX_FORCE_INLINE void operator =(const ExportReference& m) { PxMemCopy(this, &m, sizeof(ExportReference)); } PxSerialObjectId id; SerialObjectIndex objIndex; }; struct InternalReferencePtr { PX_FORCE_INLINE InternalReferencePtr() {} PX_FORCE_INLINE InternalReferencePtr(size_t _reference, SerialObjectIndex _objIndex) : reference(_reference), objIndex(_objIndex) #if PX_P64_FAMILY ,pad(PX_PADDING_32) #endif { } size_t reference; SerialObjectIndex objIndex; #if PX_P64_FAMILY PxU32 pad; #endif }; struct InternalReferenceHandle16 { PX_FORCE_INLINE InternalReferenceHandle16() {} PX_FORCE_INLINE InternalReferenceHandle16(PxU16 _reference, SerialObjectIndex _objIndex) : reference(_reference), pad(PX_PADDING_16), objIndex(_objIndex) { } PxU16 reference; PxU16 pad; SerialObjectIndex objIndex; }; typedef Cm::CollectionHashMap<size_t, SerialObjectIndex> InternalPtrRefMap; typedef Cm::CollectionHashMap<PxU16, SerialObjectIndex> InternalHandle16RefMap; class DeserializationContext : public PxDeserializationContext, public PxUserAllocated { PX_NOCOPY(DeserializationContext) public: DeserializationContext(const ManifestEntry* manifestTable, const ImportReference* importReferences, PxU8* objectDataAddress, const InternalPtrRefMap& internalPtrReferencesMap, const InternalHandle16RefMap& internalHandle16ReferencesMap, const Cm::Collection* externalRefs, PxU8* extraData) : mManifestTable(manifestTable) , mImportReferences(importReferences) , mObjectDataAddress(objectDataAddress) , mInternalPtrReferencesMap(internalPtrReferencesMap) , mInternalHandle16ReferencesMap(internalHandle16ReferencesMap) , mExternalRefs(externalRefs) { mExtraDataAddress = extraData; } virtual PxBase* resolveReference(PxU32 kind, size_t reference) const; private: //various pointers to deserialized data const ManifestEntry* mManifestTable; const ImportReference* mImportReferences; PxU8* mObjectDataAddress; //internal references maps for resolving references. const InternalPtrRefMap& mInternalPtrReferencesMap; const InternalHandle16RefMap& mInternalHandle16ReferencesMap; //external collection for resolving import references. const Cm::Collection* mExternalRefs; //const PxU32 mPhysXVersion; }; class SerializationContext : public PxSerializationContext, public PxUserAllocated { PX_NOCOPY(SerializationContext) public: SerializationContext(const Cm::Collection& collection, const Cm::Collection* externalRefs) : mCollection(collection) , mExternalRefs(externalRefs) { // fill object to collection index map (same ordering as manifest) for (PxU32 i=0;i<mCollection.internalGetNbObjects();i++) { mObjToCollectionIndexMap[mCollection.internalGetObject(i)] = i; } } virtual void writeData(const void* buffer, PxU32 size) { mMemStream.write(buffer, size); } virtual PxU32 getTotalStoredSize() { return mMemStream.getSize(); } virtual void alignData(PxU32 alignment = PX_SERIAL_ALIGN) { if(!alignment) return; PxI32 bytesToPad = PxI32(getPadding(mMemStream.getSize(), alignment)); static const PxI32 BUFSIZE = 64; char buf[BUFSIZE]; PxMemSet(buf, 0, bytesToPad < BUFSIZE ? PxU32(bytesToPad) : PxU32(BUFSIZE)); while(bytesToPad > 0) { mMemStream.write(buf, bytesToPad < BUFSIZE ? PxU32(bytesToPad) : PxU32(BUFSIZE)); bytesToPad -= BUFSIZE; } PX_ASSERT(!getPadding(getTotalStoredSize(), alignment)); } virtual void writeName(const char*) { PxGetFoundation().error(physx::PxErrorCode::eINVALID_OPERATION, PX_FL, "Cannot export names during exportData."); } const PxCollection& getCollection() const { return mCollection; } virtual void registerReference(PxBase& serializable, PxU32 kind, size_t reference); const PxArray<ImportReference>& getImportReferences() { return mImportReferences; } InternalPtrRefMap& getInternalPtrReferencesMap() { return mInternalPtrReferencesMap; } InternalHandle16RefMap& getInternalHandle16ReferencesMap() { return mInternalHandle16ReferencesMap; } PxU32 getSize() const { return mMemStream.getSize(); } PxU8* getData() const { return mMemStream.getData(); } private: //import reference map for unique registration of import references and corresponding buffer. PxHashMap<PxSerialObjectId, PxU32> mImportReferencesMap; PxArray<ImportReference> mImportReferences; //maps for unique registration of internal references InternalPtrRefMap mInternalPtrReferencesMap; InternalHandle16RefMap mInternalHandle16ReferencesMap; //map for quick lookup of manifest index. PxHashMap<const PxBase*, PxU32> mObjToCollectionIndexMap; //collection and externalRefs collection for assigning references. const Cm::Collection& mCollection; const Cm::Collection* mExternalRefs; PxDefaultMemoryOutputStream mMemStream; }; } // namespace Sn } #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Binary/SnConvX.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. /* - get rid of STL - NpArticulationLinkArray with more than 4 entries - big convexes Xbox => PC - put a cache in "convertClass" function - remove MD one at a time and check what happens in the converter. Make it robust/report errors - for xbox, compare against source xbox file if it exists - maybe put some info in the header to display "File generated by ConvX 1.xx" on screen (to debug) - report inconsistent naming convention in each class!!!! - try to automatically discover padding bytes? use "0xcd" pattern? * do last param of XXXX_ITEMS macro automatically - what if source files are 64bits? can we safely convert a 64bit ptr to 32bit? */ #include "foundation/PxErrorCallback.h" #include "foundation/PxAllocatorCallback.h" #include "foundation/PxIO.h" #include "SnConvX.h" #include "serialization/SnSerializationRegistry.h" #include <assert.h> using namespace physx; Sn::ConvX::ConvX() : mMetaData_Src (NULL), mMetaData_Dst (NULL), mOutStream (NULL), mMustFlip (false), mOutputSize (0), mSrcPtrSize (0), mDstPtrSize (0), mNullPtr (false), mNoOutput (false), mMarkedPadding (false), mNbErrors (0), mNbWarnings (0), mReportMode (PxConverterReportMode::eNORMAL), mPerformConversion (true) { // memset(mZeros, 0, CONVX_ZERO_BUFFER_SIZE); memset(mZeros, 0x42, CONVX_ZERO_BUFFER_SIZE); } Sn::ConvX::~ConvX() { resetNbErrors(); resetConvexFlags(); releaseMetaData(); resetUnions(); } void Sn::ConvX::release() { PX_DELETE_THIS; } bool Sn::ConvX::setMetaData(PxInputStream& inputStream, MetaDataType type) { resetNbErrors(); return (loadMetaData(inputStream, type) != NULL); } bool Sn::ConvX::setMetaData(PxInputStream& srcMetaData, PxInputStream& dstMetaData) { releaseMetaData(); resetUnions(); if(!setMetaData(srcMetaData, META_DATA_SRC)) return false; if(!setMetaData(dstMetaData, META_DATA_DST)) return false; return true; } bool Sn::ConvX::compareMetaData() const { if (!mMetaData_Src || !mMetaData_Dst) { PxGetFoundation().error(PxErrorCode::eINVALID_OPERATION, PX_FL, "PxBinaryConverter: metadata not defined. Call PxBinaryConverter::setMetaData first.\n"); return false; } return mMetaData_Src->compare(*mMetaData_Dst); } bool Sn::ConvX::convert(PxInputStream& srcStream, PxU32 srcSize, PxOutputStream& targetStream) { if(!mMetaData_Src || !mMetaData_Dst) { PxGetFoundation().error(PxErrorCode::eINVALID_OPERATION, PX_FL, "PxBinaryConverter: metadata not defined. Call PxBinaryConverter::setMetaData first.\n"); return false; } resetConvexFlags(); resetNbErrors(); bool conversionStatus = false; if(mPerformConversion) { if(srcSize == 0) { PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "PxBinaryConverter: source serialized data size is zero.\n"); return false; } void* memory = PX_ALLOC(srcSize+ALIGN_FILE, "ConvX source file"); void* memoryA = reinterpret_cast<void*>((size_t(memory) + ALIGN_FILE)&~(ALIGN_FILE-1)); const PxU32 nbBytesRead = srcStream.read(memoryA, srcSize); if(nbBytesRead != srcSize) { PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "PxBinaryConverter: failure on reading source serialized data.\n"); PX_FREE(memory); return false; } displayMessage(PxErrorCode::eDEBUG_INFO, "\n\nConverting...\n\n"); { if(!initOutput(targetStream)) { PX_FREE(memory); return false; } conversionStatus = convert(memoryA, int(srcSize)); closeOutput(); } PX_FREE(memory); } return conversionStatus; }
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Binary/SnConvX_MetaData.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #ifndef SN_CONVX_METADATA_H #define SN_CONVX_METADATA_H #include "common/PxMetaDataFlags.h" #include "SnConvX_Output.h" #include "serialization/SnSerialUtils.h" namespace physx { namespace Sn { #if PX_VC #pragma warning (push) #pragma warning (disable : 4371) //layout of class may have changed from a previous version of the compiler due to better packing of member #endif // PT: beware, must match corresponding structure in PxMetaData.h struct PxMetaDataEntry : public PxUserAllocated { PxMetaDataEntry() { memset(this, 0, sizeof(*this)); } bool isVTablePtr() const; const char* mType; //!< Field type (bool, byte, quaternion, etc) const char* mName; //!< Field name (appears exactly as in the source file) int mOffset; //!< Offset from the start of the class (ie from "this", field is located at "this"+Offset) int mSize; //!< sizeof(Type) int mCount; //!< Number of items of type Type (0 for dynamic sizes) int mOffsetSize; //!< Offset of dynamic size param, for dynamic arrays int mFlags; //!< Field parameters int mAlignment; //!< Explicit alignment added for DE1340 }; struct MetaDataEntry32 { PxI32 mType; //!< Field type (bool, byte, quaternion, etc) PxI32 mName; //!< Field name (appears exactly as in the source file) int mOffset; //!< Offset from the start of the class (ie from "this", field is located at "this"+Offset) int mSize; //!< sizeof(Type) int mCount; //!< Number of items of type Type (0 for dynamic sizes) int mOffsetSize; //!< Offset of dynamic size param, for dynamic arrays int mFlags; //!< Field parameters int mAlignment; //!< Explicit alignment added for DE1340 }; struct MetaDataEntry64 { PxI64 mType; //!< Field type (bool, byte, quaternion, etc) PxI64 mName; //!< Field name (appears exactly as in the source file) int mOffset; //!< Offset from the start of the class (ie from "this", field is located at "this"+Offset) int mSize; //!< sizeof(Type) int mCount; //!< Number of items of type Type (0 for dynamic sizes) int mOffsetSize; //!< Offset of dynamic size param, for dynamic arrays int mFlags; //!< Field parameters int mAlignment; //!< Explicit alignment added for DE1340 }; struct ExtraDataEntry { PxMetaDataEntry entry; int offset; }; struct ExtraDataEntry2 : ExtraDataEntry { ConvertCallback cb; }; class MetaData; struct MetaClass : public PxUserAllocated { bool getFieldByType(const char* type, PxMetaDataEntry& entry) const; bool getFieldByName(const char* name, PxMetaDataEntry& entry) const; bool check(const MetaData& owner); ConvertCallback mCallback; MetaClass* mMaster; const char* mClassName; int mSize; int mDepth; PsArray<PxMetaDataEntry> mBaseClasses; PsArray<PxMetaDataEntry> mFields; bool mProcessed; // int mNbEntries; // ExtraDataEntry2 mEntries[256]; private: void checkAndCompleteClass(const MetaData& owner, int& startOffset, int& nbBytes); }; enum MetaDataType { META_DATA_NONE, META_DATA_SRC, META_DATA_DST }; class ConvX; class MetaData : public PxUserAllocated { public: MetaData(Sn::ConvX&); ~MetaData(); bool load(PxInputStream& inputStream, MetaDataType type); inline_ MetaDataType getType() const { return mType; } inline_ int getVersion() const { return mVersion; } inline_ int getPtrSize() const { return mSizeOfPtr; } inline_ int getPlatformTag() const { return mPlatformTag; } inline_ int getGaussMapLimit() const { return mGaussMapLimit; } inline_ int getNbMetaClasses() const { return int(mMetaClasses.size()); } inline_ MetaClass* getMetaClass(unsigned int i) const { return mMetaClasses[i]; } inline_ bool getFlip() const { return mFlip; } MetaClass* getMetaClass(const char* name) const; MetaClass* getMetaClass(PxConcreteType::Enum concreteType) const; MetaClass* addNewClass(const char* name, int size, MetaClass* master=NULL, ConvertCallback callback=NULL); bool compare(const MetaData& candidate) const; private: MetaData& operator=(const MetaData&); Sn::ConvX& mConvX; MetaDataType mType; int mNbEntries; PxMetaDataEntry* mEntries; char* mStringTable; PsArray<MetaClass*> mMetaClasses; int mVersion; char mBinaryVersionGuid[SN_BINARY_VERSION_GUID_NUM_CHARS + 1]; int mSizeOfPtr; int mPlatformTag; int mGaussMapLimit; bool mFlip; PsArray< PxPair<PxConcreteType::Enum, PxU32> > mConcreteTypeTable; inline_ const char* offsetToText(const char* text) const { const size_t offset = size_t(text); const PxU32 offset32 = PxU32(offset); // if(offset==-1) if(offset32==0xffffffff) return NULL; return mStringTable + offset32; } friend struct MetaClass; }; PxU64 peek(int size, const char* buffer, int flags=0); #if PX_VC #pragma warning (pop) #endif } } #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/File/SnFile.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SN_FILE_H #define SN_FILE_H // fopen_s - returns 0 on success, non-zero on failure #if PX_WINDOWS_FAMILY #include <stdio.h> namespace physx { namespace sn { PX_INLINE PxI32 fopen_s(FILE** file, const char* name, const char* mode) { static const PxU32 MAX_LEN = 300; char buf[MAX_LEN+1]; PxU32 i; for(i = 0; i<MAX_LEN && name[i]; i++) buf[i] = name[i] == '/' ? '\\' : name[i]; buf[i] = 0; return i == MAX_LEN ? -1 : ::fopen_s(file, buf, mode); }; } // namespace sn } // namespace physx #elif PX_UNIX_FAMILY || PX_SWITCH #include <stdio.h> namespace physx { namespace sn { PX_INLINE PxI32 fopen_s(FILE** file, const char* name, const char* mode) { FILE* fp = ::fopen(name, mode); if(fp) { *file = fp; return PxI32(0); } return -1; } } // namespace sn } // namespace physx #else #error "Platform not supported!" #endif #endif //SN_FILE_H
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Xml/SnXmlMemoryAllocator.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SN_XML_MEMORY_ALLOCATOR_H #define SN_XML_MEMORY_ALLOCATOR_H #include "foundation/PxSimpleTypes.h" namespace physx { class PX_DEPRECATED XmlMemoryAllocator { protected: virtual ~XmlMemoryAllocator(){} public: virtual PxU8* allocate(PxU32 inSize) = 0; virtual void deallocate( PxU8* inMem ) = 0; virtual PxAllocatorCallback& getAllocator() = 0; template<typename TObjectType> TObjectType* allocate() { TObjectType* retval = reinterpret_cast< TObjectType* >( allocate( sizeof( TObjectType ) ) ); new (retval) TObjectType(); return retval; } template<typename TObjectType, typename TArgType> TObjectType* allocate(const TArgType &arg) { TObjectType* retval = reinterpret_cast< TObjectType* >( allocate( sizeof( TObjectType ) ) ); new (retval) TObjectType(arg); return retval; } template<typename TObjectType> void deallocate( TObjectType* inObject ) { deallocate( reinterpret_cast<PxU8*>( inObject ) ); } template<typename TObjectType> inline TObjectType* batchAllocate(PxU32 inCount ) { TObjectType* retval = reinterpret_cast<TObjectType*>( allocate( sizeof(TObjectType) * inCount ) ); for ( PxU32 idx = 0; idx < inCount; ++idx ) { new (retval + idx) TObjectType(); } return retval; } template<typename TObjectType, typename TArgType> inline TObjectType* batchAllocate(PxU32 inCount, const TArgType &arg) { TObjectType* retval = reinterpret_cast<TObjectType*>( allocate( sizeof(TObjectType) * inCount ) ); for ( PxU32 idx = 0; idx < inCount; ++idx ) { new (retval + idx) TObjectType(arg); } return retval; } //Duplicate function definition for gcc. template<typename TObjectType> inline TObjectType* batchAllocate(TObjectType*, PxU32 inCount ) { TObjectType* retval = reinterpret_cast<TObjectType*>( allocate( sizeof(TObjectType) * inCount ) ); for ( PxU32 idx = 0; idx < inCount; ++idx ) { new (retval + idx) TObjectType(); } return retval; } }; struct PX_DEPRECATED XmlMemoryAllocatorImpl : public XmlMemoryAllocator { Sn::TMemoryPoolManager mManager; XmlMemoryAllocatorImpl( PxAllocatorCallback& inAllocator ) : mManager( inAllocator ) { } XmlMemoryAllocatorImpl &operator=(const XmlMemoryAllocatorImpl &); virtual PxAllocatorCallback& getAllocator() { return mManager.getWrapper().getAllocator(); } virtual PxU8* allocate(PxU32 inSize ) { if ( !inSize ) return NULL; return mManager.allocate( inSize ); } virtual void deallocate( PxU8* inMem ) { if ( inMem ) mManager.deallocate( inMem ); } }; } #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Xml/SnXmlReader.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SN_XML_READER_H #define SN_XML_READER_H #include "foundation/PxSimpleTypes.h" #include "extensions/PxRepXSimpleType.h" namespace physx { namespace Sn { struct XmlNode; } /** * Reader used to read data out of the repx format. */ class XmlReader { protected: virtual ~XmlReader(){} public: /** Read a key-value pair out of the database */ virtual bool read( const char* inName, const char*& outData ) = 0; /** Read an object id out of the database */ virtual bool read( const char* inName, PxSerialObjectId& outId ) = 0; /** Goto a child element by name. That child becomes this reader's context */ virtual bool gotoChild( const char* inName ) = 0; /** Goto the first child regardless of name */ virtual bool gotoFirstChild() = 0; /** Goto the next sibling regardless of name */ virtual bool gotoNextSibling() = 0; /** Count all children of the current object */ virtual PxU32 countChildren() = 0; /** Get the name of the current item */ virtual const char* getCurrentItemName() = 0; /** Get the value of the current item */ virtual const char* getCurrentItemValue() = 0; /** Leave the current child */ virtual bool leaveChild() = 0; /** Get reader for the parental object */ virtual XmlReader* getParentReader() = 0; /** * Ensures we don't leave the reader in an odd state * due to not leaving a given child */ virtual void pushCurrentContext() = 0; /** Pop the current context back to where it during push*/ virtual void popCurrentContext() = 0; }; //Used when upgrading a repx collection class XmlReaderWriter : public XmlReader { public: //Clears the stack of nodes (push/pop current node reset) //and sets the current node to inNode. virtual void setNode( Sn::XmlNode& node ) = 0; //If the child exists, add it. //the either way goto that child. virtual void addOrGotoChild( const char* name ) = 0; //Value is copied into the collection, inValue has no further references //to it. virtual void setCurrentItemValue( const char* value ) = 0; //Removes the child but does not release the char* name or char* data ptrs. //Those pointers are never released and are shared among collections. //Thus copying nodes is cheap and safe. virtual bool removeChild( const char* name ) = 0; virtual void release() = 0; bool renameProperty( const char* oldName, const char* newName ) { if ( gotoChild( oldName ) ) { const char* value = getCurrentItemValue(); leaveChild(); removeChild( oldName ); addOrGotoChild( newName ); setCurrentItemValue( value ); leaveChild(); return true; } return false; } bool readAndRemoveProperty( const char* name, const char*& outValue ) { bool retval = read( name, outValue ); if ( retval ) removeChild( name ); return retval; } bool writePropertyIfNotEmpty( const char* name, const char* value ) { if ( value && *value ) { addOrGotoChild( name ); setCurrentItemValue( value ); leaveChild(); return true; } return false; } }; } #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Xml/SnRepXUpgrader.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/PxMemory.h" #include "SnXmlImpl.h" #include "SnXmlReader.h" #include "SnXmlMemoryAllocator.h" #include "SnRepXCollection.h" #include "SnRepXUpgrader.h" using namespace physx::profile; namespace physx { namespace Sn { #define DEFINE_REPX_DEFAULT_PROPERTY( name, val ) RepXDefaultEntry( name, val ), static RepXDefaultEntry gRepX1_0Defaults[] = { #include "SnRepX1_0Defaults.h" }; static PxU32 gNumRepX1_0Default = sizeof( gRepX1_0Defaults ) / sizeof ( *gRepX1_0Defaults ); static RepXDefaultEntry gRepX3_1Defaults[] = { #include "SnRepX3_1Defaults.h" }; static PxU32 gNumRepX3_1Defaults = sizeof( gRepX3_1Defaults ) / sizeof ( *gRepX3_1Defaults ); static RepXDefaultEntry gRepX3_2Defaults[] = { #include "SnRepX3_2Defaults.h" }; static PxU32 gNumRepX3_2Defaults = sizeof( gRepX3_2Defaults ) / sizeof ( *gRepX3_2Defaults ); inline const char* nextPeriod( const char* str ) { for( ++str; str && *str && *str != '.'; ++str ); //empty loop intentional return str; } inline bool safeStrEq(const char* lhs, const char* rhs) { if (lhs == rhs) return true; //If they aren't equal, and one of them is null, //then they can't be equal. //This is assuming that the null char* is not equal to //the empty "" char*. if (!lhs || !rhs) return false; return ::strcmp(lhs, rhs) == 0; } typedef PxProfileHashMap<const char*, PxU32> TNameOffsetMap; void setMissingPropertiesToDefault( XmlNode* topNode, XmlReaderWriter& editor, const RepXDefaultEntry* defaults, PxU32 numDefaults, TNameOffsetMap& map ) { for ( XmlNode* child = topNode->mFirstChild; child != NULL; child = child->mNextSibling ) setMissingPropertiesToDefault( child, editor, defaults, numDefaults, map ); const TNameOffsetMap::Entry* entry( map.find( topNode->mName ) ); if ( entry ) { XmlReaderWriter& theReader( editor ); theReader.setNode( *topNode ); char nameBuffer[512] = {0}; size_t nameLen = strlen( topNode->mName ); //For each default property entry for this node type. for ( const RepXDefaultEntry* item = defaults + entry->second; Pxstrncmp( item->name, topNode->mName, nameLen ) == 0; ++item ) { bool childAdded = false; const char* nameStart = item->name + nameLen; ++nameStart; theReader.pushCurrentContext(); const char* str = nameStart; while( *str ) { const char *period = nextPeriod( str ); size_t len = size_t(PxMin( period - str, ptrdiff_t(1023) )); //can't be too careful these days. PxMemCopy( nameBuffer, str, PxU32(len) ); nameBuffer[len] = 0; if ( theReader.gotoChild( nameBuffer ) == false ) { childAdded = true; theReader.addOrGotoChild( nameBuffer ); } if (*period ) str = period + 1; else str = period; } if ( childAdded ) theReader.setCurrentItemValue( item->value ); theReader.popCurrentContext(); } } } static void setMissingPropertiesToDefault( RepXCollection& collection, XmlReaderWriter& editor, const RepXDefaultEntry* defaults, PxU32 numDefaults ) { PxProfileAllocatorWrapper wrapper( collection.getAllocator() ); //Release all strings at once, instead of piece by piece XmlMemoryAllocatorImpl alloc( collection.getAllocator() ); //build a hashtable of the initial default value strings. TNameOffsetMap nameOffsets( wrapper ); for ( PxU32 idx = 0; idx < numDefaults; ++idx ) { const RepXDefaultEntry& item( defaults[idx] ); size_t nameLen = 0; const char* periodPtr = nextPeriod (item.name); for ( ; periodPtr && *periodPtr; ++periodPtr ) if( *periodPtr == '.' ) break; if ( periodPtr == NULL || *periodPtr != '.' ) continue; nameLen = size_t(periodPtr - item.name); char* newMem = reinterpret_cast<char*>(alloc.allocate( PxU32(nameLen + 1) )); PxMemCopy( newMem, item.name, PxU32(nameLen) ); newMem[nameLen] = 0; if ( nameOffsets.find( newMem ) ) alloc.deallocate( reinterpret_cast<PxU8*>(newMem) ); else nameOffsets.insert( newMem, idx ); } //Run through each collection item, and recursively find it and its children //If an object's name is in the hash map, check and add any properties that don't exist. //else return. for ( const RepXCollectionItem* item = collection.begin(), *end = collection.end(); item != end; ++ item ) { RepXCollectionItem theItem( *item ); setMissingPropertiesToDefault( theItem.descriptor, editor, defaults, numDefaults, nameOffsets ); } } struct RecursiveTraversal { RecursiveTraversal(XmlReaderWriter& editor): mEditor(editor) {} void traverse() { mEditor.pushCurrentContext(); updateNode(); for(bool exists = mEditor.gotoFirstChild(); exists; exists = mEditor.gotoNextSibling()) traverse(); mEditor.popCurrentContext(); } virtual void updateNode() = 0; virtual ~RecursiveTraversal() {} XmlReaderWriter& mEditor; protected: RecursiveTraversal& operator=(const RecursiveTraversal&){return *this;} }; RepXCollection& RepXUpgrader::upgrade10CollectionTo3_1Collection(RepXCollection& src) { XmlReaderWriter& editor( src.createNodeEditor() ); setMissingPropertiesToDefault(src, editor, gRepX1_0Defaults, gNumRepX1_0Default ); RepXCollection* dest = &src.createCollection("3.1.1"); for ( const RepXCollectionItem* item = src.begin(), *end = src.end(); item != end; ++ item ) { //either src or dest could do the copy operation, it doesn't matter who does it. RepXCollectionItem newItem( item->liveObject, src.copyRepXNode( item->descriptor ) ); editor.setNode( *const_cast<XmlNode*>( newItem.descriptor ) ); //Some old files have this name in their system. editor.renameProperty( "MassSpaceInertia", "MassSpaceInertiaTensor" ); editor.renameProperty( "SleepEnergyThreshold", "SleepThreshold" ); if ( strstr( newItem.liveObject.typeName, "Joint" ) || strstr( newItem.liveObject.typeName, "joint" ) ) { //Joints changed format a bit. old joints looked like: /* <Actor0 >1627536</Actor0> <Actor1 >1628368</Actor1> <LocalPose0 >0 0 0 1 0.5 0.5 0.5</LocalPose0> <LocalPose1 >0 0 0 1 0.3 0.3 0.3</LocalPose1>*/ //New joints look like: /* <Actors > <actor0 >58320336</actor0> <actor1 >56353568</actor1> </Actors> <LocalPose > <eACTOR0 >0 0 0 1 0.5 0.5 0.5</eACTOR0> <eACTOR1 >0 0 0 1 0.3 0.3 0.3</eACTOR1> </LocalPose> */ const char* actor0, *actor1, *lp0, *lp1; editor.readAndRemoveProperty( "Actor0", actor0 ); editor.readAndRemoveProperty( "Actor1", actor1 ); editor.readAndRemoveProperty( "LocalPose0", lp0 ); editor.readAndRemoveProperty( "LocalPose1", lp1 ); editor.addOrGotoChild( "Actors" ); editor.writePropertyIfNotEmpty( "actor0", actor0 ); editor.writePropertyIfNotEmpty( "actor1", actor1 ); editor.leaveChild(); editor.addOrGotoChild( "LocalPose" ); editor.writePropertyIfNotEmpty( "eACTOR0", lp0 ); editor.writePropertyIfNotEmpty( "eACTOR1", lp1 ); editor.leaveChild(); } //now desc owns the new node. Collections share a single allocation pool, however, //which will get destroyed when all the collections referencing it are destroyed themselves. //Data on nodes is shared between nodes, but the node structure itself is allocated. dest->addCollectionItem( newItem ); } editor.release(); src.destroy(); return *dest; } RepXCollection& RepXUpgrader::upgrade3_1CollectionTo3_2Collection(RepXCollection& src) { XmlReaderWriter& editor( src.createNodeEditor() ); setMissingPropertiesToDefault(src, editor, gRepX3_1Defaults, gNumRepX3_1Defaults ); RepXCollection* dest = &src.createCollection("3.2.0"); for ( const RepXCollectionItem* item = src.begin(), *end = src.end(); item != end; ++ item ) { //either src or dest could do the copy operation, it doesn't matter who does it. RepXCollectionItem newItem( item->liveObject, src.copyRepXNode( item->descriptor ) ); editor.setNode( *const_cast<XmlNode*>( newItem.descriptor ) ); if ( strstr( newItem.liveObject.typeName, "PxMaterial" ) ) { editor.removeChild( "DynamicFrictionV" ); editor.removeChild( "StaticFrictionV" ); editor.removeChild( "dirOfAnisotropy" ); } //now desc owns the new node. Collections share a single allocation pool, however, //which will get destroyed when all the collections referencing it are destroyed themselves. //Data on nodes is shared between nodes, but the node structure itself is allocated. dest->addCollectionItem( newItem ); } editor.release(); src.destroy(); return *dest; } RepXCollection& RepXUpgrader::upgrade3_2CollectionTo3_3Collection(RepXCollection& src) { XmlReaderWriter& editor( src.createNodeEditor() ); setMissingPropertiesToDefault(src, editor, gRepX3_2Defaults, gNumRepX3_2Defaults ); RepXCollection* dest = &src.createCollection("3.3.0"); struct RenameSpringToStiffness : public RecursiveTraversal { RenameSpringToStiffness(XmlReaderWriter& editor_): RecursiveTraversal(editor_) {} void updateNode() { mEditor.renameProperty("Spring", "Stiffness"); mEditor.renameProperty("TangentialSpring", "TangentialStiffness"); } }; struct UpdateArticulationSwingLimit : public RecursiveTraversal { UpdateArticulationSwingLimit(XmlReaderWriter& editor_): RecursiveTraversal(editor_) {} void updateNode() { if(!Pxstricmp(mEditor.getCurrentItemName(), "yLimit") && !Pxstricmp(mEditor.getCurrentItemValue(), "0")) mEditor.setCurrentItemValue("0.785398"); if(!Pxstricmp(mEditor.getCurrentItemName(), "zLimit") && !Pxstricmp(mEditor.getCurrentItemValue(), "0")) mEditor.setCurrentItemValue("0.785398"); if(!Pxstricmp(mEditor.getCurrentItemName(), "TwistLimit")) { mEditor.gotoFirstChild(); PxReal lower = PxReal(strtod(mEditor.getCurrentItemValue(), NULL)); mEditor.gotoNextSibling(); PxReal upper = PxReal(strtod(mEditor.getCurrentItemValue(), NULL)); mEditor.leaveChild(); if(lower>=upper) { mEditor.writePropertyIfNotEmpty("lower", "-0.785398"); mEditor.writePropertyIfNotEmpty("upper", "0.785398"); } } } }; for ( const RepXCollectionItem* item = src.begin(), *end = src.end(); item != end; ++ item ) { //either src or dest could do the copy operation, it doesn't matter who does it. RepXCollectionItem newItem( item->liveObject, src.copyRepXNode( item->descriptor ) ); if ( strstr( newItem.liveObject.typeName, "PxCloth" ) || strstr( newItem.liveObject.typeName, "PxClothFabric" ) ) { PxGetFoundation().error(PxErrorCode::eDEBUG_WARNING, PX_FL, "Didn't suppot PxCloth upgrate from 3.2 to 3.3! "); continue; } if ( strstr( newItem.liveObject.typeName, "PxParticleSystem" ) || strstr( newItem.liveObject.typeName, "PxParticleFluid" ) ) { editor.setNode( *const_cast<XmlNode*>( newItem.descriptor ) ); editor.renameProperty( "PositionBuffer", "Positions" ); editor.renameProperty( "VelocityBuffer", "Velocities" ); editor.renameProperty( "RestOffsetBuffer", "RestOffsets" ); } if(strstr(newItem.liveObject.typeName, "PxPrismaticJoint" ) || strstr(newItem.liveObject.typeName, "PxRevoluteJoint") || strstr(newItem.liveObject.typeName, "PxSphericalJoint") || strstr(newItem.liveObject.typeName, "PxD6Joint") || strstr(newItem.liveObject.typeName, "PxArticulation")) { editor.setNode( *const_cast<XmlNode*>( newItem.descriptor ) ); RenameSpringToStiffness(editor).traverse(); } if(strstr(newItem.liveObject.typeName, "PxArticulation")) { editor.setNode( *const_cast<XmlNode*>( newItem.descriptor ) ); UpdateArticulationSwingLimit(editor).traverse(); } //now dest owns the new node. Collections share a single allocation pool, however, //which will get destroyed when all the collections referencing it are destroyed themselves. //Data on nodes is shared between nodes, but the node structure itself is allocated. dest->addCollectionItem( newItem ); } editor.release(); src.destroy(); return *dest; } RepXCollection& RepXUpgrader::upgrade3_3CollectionTo3_4Collection(RepXCollection& src) { RepXCollection* dest = &src.createCollection("3.4.0"); for ( const RepXCollectionItem* item = src.begin(), *end = src.end(); item != end; ++ item ) { if(strstr(item->liveObject.typeName, "PxTriangleMesh")) { PxRepXObject newMeshRepXObj("PxBVH33TriangleMesh", item->liveObject.serializable, item->liveObject.id); XmlNode* newMeshNode = src.copyRepXNode( item->descriptor ); newMeshNode->mName = "PxBVH33TriangleMesh"; RepXCollectionItem newMeshItem(newMeshRepXObj, newMeshNode); dest->addCollectionItem( newMeshItem ); continue; } RepXCollectionItem newItem( item->liveObject, src.copyRepXNode( item->descriptor ) ); dest->addCollectionItem( newItem ); } src.destroy(); return *dest; } RepXCollection& RepXUpgrader::upgrade3_4CollectionTo4_0Collection(RepXCollection& src) { RepXCollection* dest = &src.createCollection("4.0.0"); for (const RepXCollectionItem* item = src.begin(), *end = src.end(); item != end; ++item) { if (strstr(item->liveObject.typeName, "PxParticleFluid") || strstr(item->liveObject.typeName, "PxParticleSystem") || strstr(item->liveObject.typeName, "PxClothFabric") || strstr(item->liveObject.typeName, "PxCloth")) { continue; } RepXCollectionItem newItem(item->liveObject, src.copyRepXNode(item->descriptor)); dest->addCollectionItem(newItem); } src.destroy(); return *dest; } RepXCollection& RepXUpgrader::upgradeCollection(RepXCollection& src) { const char* srcVersion = src.getVersion(); if( safeStrEq( srcVersion, RepXCollection::getLatestVersion() )) return src; typedef RepXCollection& (*UPGRADE_FUNCTION)(RepXCollection& src); struct Upgrade { const char* versionString; UPGRADE_FUNCTION upgradeFunction; }; static const Upgrade upgradeTable[] = { { "1.0", upgrade10CollectionTo3_1Collection }, { "3.1", NULL }, { "3.1.1", upgrade3_1CollectionTo3_2Collection }, { "3.2.0", upgrade3_2CollectionTo3_3Collection }, { "3.3.0", NULL }, { "3.3.1", NULL }, { "3.3.2", NULL }, { "3.3.3", NULL }, { "3.3.4", upgrade3_3CollectionTo3_4Collection }, { "3.4.0", NULL }, { "3.4.1", NULL }, { "3.4.2", upgrade3_4CollectionTo4_0Collection } }; //increasing order and complete const PxU32 upgradeTableSize = sizeof(upgradeTable)/sizeof(upgradeTable[0]); PxU32 repxVersion = UINT16_MAX; for (PxU32 i=0; i<upgradeTableSize; i++) { if( safeStrEq( srcVersion, upgradeTable[i].versionString )) { repxVersion = i; break; } } RepXCollection* dest = &src; for( PxU32 j = repxVersion; j < upgradeTableSize; j++ ) { if( upgradeTable[j].upgradeFunction ) dest = &(upgradeTable[j].upgradeFunction)(*dest); } return *dest; } } }
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Xml/SnRepXCoreSerializer.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SN_REPX_CORE_SERIALIZER_H #define SN_REPX_CORE_SERIALIZER_H /** \addtogroup RepXSerializers @{ */ #include "foundation/PxSimpleTypes.h" #include "SnRepXSerializerImpl.h" #if !PX_DOXYGEN namespace physx { #endif class XmlReader; class XmlMemoryAllocator; class XmlWriter; class MemoryBuffer; struct PX_DEPRECATED PxMaterialRepXSerializer : RepXSerializerImpl<PxMaterial> { PxMaterialRepXSerializer( PxAllocatorCallback& inCallback ) : RepXSerializerImpl<PxMaterial>( inCallback ) {} virtual PxMaterial* allocateObject( PxRepXInstantiationArgs& ); }; struct PX_DEPRECATED PxShapeRepXSerializer : public RepXSerializerImpl<PxShape> { PxShapeRepXSerializer( PxAllocatorCallback& inCallback ) : RepXSerializerImpl<PxShape>( inCallback ) {} virtual PxRepXObject fileToObject( XmlReader&, XmlMemoryAllocator&, PxRepXInstantiationArgs&, PxCollection* ); virtual PxShape* allocateObject( PxRepXInstantiationArgs& ) { return NULL; } }; struct PX_DEPRECATED PxBVH33TriangleMeshRepXSerializer : public RepXSerializerImpl<PxBVH33TriangleMesh> { PxBVH33TriangleMeshRepXSerializer( PxAllocatorCallback& inCallback ) : RepXSerializerImpl<PxBVH33TriangleMesh>( inCallback ) {} virtual void objectToFileImpl( const PxBVH33TriangleMesh*, PxCollection*, XmlWriter&, MemoryBuffer&, PxRepXInstantiationArgs& ); virtual PxRepXObject fileToObject( XmlReader&, XmlMemoryAllocator&, PxRepXInstantiationArgs&, PxCollection* ); virtual PxBVH33TriangleMesh* allocateObject( PxRepXInstantiationArgs& ) { return NULL; } }; struct PX_DEPRECATED PxBVH34TriangleMeshRepXSerializer : public RepXSerializerImpl<PxBVH34TriangleMesh> { PxBVH34TriangleMeshRepXSerializer( PxAllocatorCallback& inCallback ) : RepXSerializerImpl<PxBVH34TriangleMesh>( inCallback ) {} virtual void objectToFileImpl( const PxBVH34TriangleMesh*, PxCollection*, XmlWriter&, MemoryBuffer&, PxRepXInstantiationArgs& ); virtual PxRepXObject fileToObject( XmlReader&, XmlMemoryAllocator&, PxRepXInstantiationArgs&, PxCollection* ); virtual PxBVH34TriangleMesh* allocateObject( PxRepXInstantiationArgs& ) { return NULL; } }; struct PX_DEPRECATED PxHeightFieldRepXSerializer : public RepXSerializerImpl<PxHeightField> { PxHeightFieldRepXSerializer( PxAllocatorCallback& inCallback ) : RepXSerializerImpl<PxHeightField>( inCallback ) {} virtual void objectToFileImpl( const PxHeightField*, PxCollection*, XmlWriter&, MemoryBuffer&, PxRepXInstantiationArgs& ); virtual PxRepXObject fileToObject( XmlReader&, XmlMemoryAllocator&, PxRepXInstantiationArgs&, PxCollection* ); virtual PxHeightField* allocateObject( PxRepXInstantiationArgs& ) { return NULL; } }; struct PX_DEPRECATED PxConvexMeshRepXSerializer : public RepXSerializerImpl<PxConvexMesh> { PxConvexMeshRepXSerializer( PxAllocatorCallback& inCallback ) : RepXSerializerImpl<PxConvexMesh>( inCallback ) {} virtual void objectToFileImpl( const PxConvexMesh*, PxCollection*, XmlWriter&, MemoryBuffer&, PxRepXInstantiationArgs& ); virtual PxRepXObject fileToObject( XmlReader&, XmlMemoryAllocator&, PxRepXInstantiationArgs&, PxCollection* ); virtual PxConvexMesh* allocateObject( PxRepXInstantiationArgs& ) { return NULL; } }; struct PX_DEPRECATED PxRigidStaticRepXSerializer : public RepXSerializerImpl<PxRigidStatic> { PxRigidStaticRepXSerializer( PxAllocatorCallback& inCallback ) : RepXSerializerImpl<PxRigidStatic>( inCallback ) {} virtual PxRigidStatic* allocateObject( PxRepXInstantiationArgs& ); }; struct PX_DEPRECATED PxRigidDynamicRepXSerializer : public RepXSerializerImpl<PxRigidDynamic> { PxRigidDynamicRepXSerializer( PxAllocatorCallback& inCallback ) : RepXSerializerImpl<PxRigidDynamic>( inCallback ) {} virtual PxRigidDynamic* allocateObject( PxRepXInstantiationArgs& ); }; struct PX_DEPRECATED PxArticulationReducedCoordinateRepXSerializer : public RepXSerializerImpl<PxArticulationReducedCoordinate> { PxArticulationReducedCoordinateRepXSerializer(PxAllocatorCallback& inCallback) : RepXSerializerImpl<PxArticulationReducedCoordinate>(inCallback) {} virtual void objectToFileImpl(const PxArticulationReducedCoordinate*, PxCollection*, XmlWriter&, MemoryBuffer&, PxRepXInstantiationArgs&); virtual PxArticulationReducedCoordinate* allocateObject(PxRepXInstantiationArgs&); }; struct PX_DEPRECATED PxAggregateRepXSerializer : public RepXSerializerImpl<PxAggregate> { PxAggregateRepXSerializer( PxAllocatorCallback& inCallback ) : RepXSerializerImpl<PxAggregate>( inCallback ) {} virtual void objectToFileImpl( const PxAggregate*, PxCollection*, XmlWriter& , MemoryBuffer&, PxRepXInstantiationArgs& ); virtual PxRepXObject fileToObject( XmlReader&, XmlMemoryAllocator&, PxRepXInstantiationArgs&, PxCollection* ); virtual PxAggregate* allocateObject( PxRepXInstantiationArgs& ) { return NULL; } }; #if !PX_DOXYGEN } // namespace physx #endif #endif /** @} */
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Xml/SnXmlMemoryPool.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SN_XML_MEMORY_POOL_H #define SN_XML_MEMORY_POOL_H #include "foundation/PxAssert.h" #include "foundation/PxArray.h" #include "PxProfileAllocatorWrapper.h" namespace physx { using namespace physx::profile; /** * Linked list used to store next node ptr. */ struct SMemPoolNode { SMemPoolNode* mNextNode; }; /** * Template arguments are powers of two. * A very fast memory pool that is not memory efficient. It contains a vector of pointers * to blocks of memory along with a linked list of free sections. All sections are * of the same size so allocating memory is very fast, there isn't a linear search * through blocks of indeterminate size. It also means there is memory wasted * when objects aren't sized to powers of two. */ template<PxU8 TItemSize , PxU8 TItemCount > class CMemoryPool { typedef PxProfileArray<PxU8*> TPxU8PtrList; PxProfileAllocatorWrapper& mWrapper; TPxU8PtrList mAllMemory; SMemPoolNode* mFirstFreeNode; public: CMemoryPool(PxProfileAllocatorWrapper& inWrapper) : mWrapper( inWrapper ) , mAllMemory( inWrapper ) , mFirstFreeNode( NULL ) {} ~CMemoryPool() { TPxU8PtrList::ConstIterator theEnd = mAllMemory.end(); for ( TPxU8PtrList::ConstIterator theIter = mAllMemory.begin(); theIter != theEnd; ++theIter ) { PxU8* thePtr = *theIter; mWrapper.getAllocator().deallocate( thePtr ); } mAllMemory.clear(); mFirstFreeNode = NULL; } //Using deallocated memory to hold the pointers to the next amount of memory. PxU8* allocate() { if ( mFirstFreeNode ) { PxU8* retval = reinterpret_cast<PxU8*>(mFirstFreeNode); mFirstFreeNode = mFirstFreeNode->mNextNode; return retval; } PxU32 itemSize = GetItemSize(); PxU32 itemCount = 1 << TItemCount; //No free nodes, make some more. PxU8* retval = reinterpret_cast<PxU8*>(mWrapper.getAllocator().allocate( itemCount * itemSize, "RepX fixed-size memory pool", PX_FL)); PxU8* dataPtr = retval + itemSize; //Free extra chunks for( PxU32 idx = 1; idx < itemCount; ++idx, dataPtr += itemSize ) deallocate( dataPtr ); mAllMemory.pushBack(retval); return retval; } void deallocate( PxU8* inData ) { SMemPoolNode* nodePtr = reinterpret_cast<SMemPoolNode*>(inData); nodePtr->mNextNode = mFirstFreeNode; mFirstFreeNode = nodePtr; } //We have to have at least a pointer's worth of memory inline PxU32 GetItemSize() { return sizeof(SMemPoolNode) << TItemSize; } }; typedef PxU32 TMemAllocSizeType; struct SVariableMemPoolNode : SMemPoolNode { TMemAllocSizeType mSize; SVariableMemPoolNode* NextNode() { return static_cast< SVariableMemPoolNode* >( mNextNode ); } }; /** * Manages variable sized allocations. * Keeps track of freed allocations in a insertion sorted * list. Allocating new memory traverses the list linearly. * This object will split nodes if the node is more than * twice as large as the request memory allocation. */ class CVariableMemoryPool { typedef PxProfileHashMap<TMemAllocSizeType, SVariableMemPoolNode*> TFreeNodeMap; typedef PxProfileArray<PxU8*> TPxU8PtrList; PxProfileAllocatorWrapper& mWrapper; TPxU8PtrList mAllMemory; TFreeNodeMap mFreeNodeMap; PxU32 mMinAllocationSize; CVariableMemoryPool &operator=(const CVariableMemoryPool &); public: CVariableMemoryPool(PxProfileAllocatorWrapper& inWrapper, PxU32 inMinAllocationSize = 0x20 ) : mWrapper( inWrapper ) , mAllMemory( inWrapper ) , mFreeNodeMap( inWrapper) , mMinAllocationSize( inMinAllocationSize ) {} ~CVariableMemoryPool() { TPxU8PtrList::ConstIterator theEnd = mAllMemory.end(); for ( TPxU8PtrList::ConstIterator theIter = mAllMemory.begin(); theIter != theEnd; ++theIter ) { PxU8* thePtr = *theIter; mWrapper.getAllocator().deallocate( thePtr ); } mAllMemory.clear(); mFreeNodeMap.clear(); } PxU8* MarkMem( PxU8* inMem, TMemAllocSizeType inSize ) { PX_ASSERT( inSize >= sizeof( SVariableMemPoolNode ) ); SVariableMemPoolNode* theMem = reinterpret_cast<SVariableMemPoolNode*>( inMem ); theMem->mSize = inSize; return reinterpret_cast< PxU8* >( theMem + 1 ); } //Using deallocated memory to hold the pointers to the next amount of memory. PxU8* allocate( PxU32 size ) { //Ensure we can place the size of the memory at the start //of the memory block. //Kai: to reduce the size of hash map, the requested size is aligned to 128 bytes PxU32 theRequestedSize = (size + sizeof(SVariableMemPoolNode) + 127) & ~127; TFreeNodeMap::Entry* entry = const_cast<TFreeNodeMap::Entry*>( mFreeNodeMap.find( theRequestedSize ) ); if ( NULL != entry ) { SVariableMemPoolNode* theNode = entry->second; PX_ASSERT( NULL != theNode ); PX_ASSERT( theNode->mSize == theRequestedSize ); entry->second = theNode->NextNode(); if (entry->second == NULL) mFreeNodeMap.erase( theRequestedSize ); return reinterpret_cast< PxU8* >( theNode + 1 ); } if ( theRequestedSize < mMinAllocationSize ) theRequestedSize = mMinAllocationSize; //No large enough free nodes, make some more. PxU8* retval = reinterpret_cast<PxU8*>(mWrapper.getAllocator().allocate( size_t(theRequestedSize), "RepX variable sized memory pool", PX_FL)); //If we allocated it, we free it. mAllMemory.pushBack( retval ); return MarkMem( retval, theRequestedSize ); } //The size is stored at the beginning of the memory block. void deallocate( PxU8* inData ) { SVariableMemPoolNode* theData = reinterpret_cast< SVariableMemPoolNode* >( inData ) - 1; TMemAllocSizeType theSize = theData->mSize; AddFreeMem( reinterpret_cast< PxU8* >( theData ), theSize ); } void CheckFreeListInvariant( SVariableMemPoolNode* inNode ) { if ( inNode && inNode->mNextNode ) { PX_ASSERT( inNode->mSize <= inNode->NextNode()->mSize ); } } void AddFreeMem( PxU8* inMemory, TMemAllocSizeType inSize ) { PX_ASSERT( inSize >= sizeof( SVariableMemPoolNode ) ); SVariableMemPoolNode* theNewNode = reinterpret_cast< SVariableMemPoolNode* >( inMemory ); theNewNode->mNextNode = NULL; theNewNode->mSize = inSize; TFreeNodeMap::Entry* entry = const_cast<TFreeNodeMap::Entry*>( mFreeNodeMap.find( inSize ) ); if (NULL != entry) { theNewNode->mNextNode = entry->second; entry->second = theNewNode; } else { mFreeNodeMap.insert( inSize, theNewNode ); } } }; /** * The manager keeps a list of memory pools for different sizes of allocations. * Anything too large simply gets allocated using the new operator. * This doesn't mark the memory with the size of the allocated memory thus * allowing much more efficient allocation of small items. For large enough * allocations, it does mark the size. * * When using as a general memory manager, you need to wrap this class with * something that actually does mark the returned allocation with the size * of the allocation. */ class CMemoryPoolManager { CMemoryPoolManager &operator=(const CMemoryPoolManager &); public: PxProfileAllocatorWrapper mWrapper; //CMemoryPool<0,8> m0ItemPool; //CMemoryPool<1,8> m1ItemPool; //CMemoryPool<2,8> m2ItemPool; //CMemoryPool<3,8> m3ItemPool; //CMemoryPool<4,8> m4ItemPool; //CMemoryPool<5,8> m5ItemPool; //CMemoryPool<6,8> m6ItemPool; //CMemoryPool<7,8> m7ItemPool; //CMemoryPool<8,8> m8ItemPool; CVariableMemoryPool mVariablePool; CMemoryPoolManager( PxAllocatorCallback& inAllocator ) : mWrapper( inAllocator ) //, m0ItemPool( mWrapper ) //, m1ItemPool( mWrapper ) //, m2ItemPool( mWrapper ) //, m3ItemPool( mWrapper ) //, m4ItemPool( mWrapper ) //, m5ItemPool( mWrapper ) //, m6ItemPool( mWrapper ) //, m7ItemPool( mWrapper ) //, m8ItemPool( mWrapper ) , mVariablePool( mWrapper ) { } PxProfileAllocatorWrapper& getWrapper() { return mWrapper; } inline PxU8* allocate( PxU32 inSize ) { /* if ( inSize <= m0ItemPool.GetItemSize() ) return m0ItemPool.allocate(); if ( inSize <= m1ItemPool.GetItemSize() ) return m1ItemPool.allocate(); if ( inSize <= m2ItemPool.GetItemSize() ) return m2ItemPool.allocate(); if ( inSize <= m3ItemPool.GetItemSize() ) return m3ItemPool.allocate(); if ( inSize <= m4ItemPool.GetItemSize() ) return m4ItemPool.allocate(); if ( inSize <= m5ItemPool.GetItemSize() ) return m5ItemPool.allocate(); if ( inSize <= m6ItemPool.GetItemSize() ) return m6ItemPool.allocate(); if ( inSize <= m7ItemPool.GetItemSize() ) return m7ItemPool.allocate(); if ( inSize <= m8ItemPool.GetItemSize() ) return m8ItemPool.allocate(); */ return mVariablePool.allocate( inSize ); } inline void deallocate( PxU8* inMemory ) { if ( inMemory == NULL ) return; /* if ( inSize <= m0ItemPool.GetItemSize() ) m0ItemPool.deallocate(inMemory); else if ( inSize <= m1ItemPool.GetItemSize() ) m1ItemPool.deallocate(inMemory); else if ( inSize <= m2ItemPool.GetItemSize() ) m2ItemPool.deallocate(inMemory); else if ( inSize <= m3ItemPool.GetItemSize() ) m3ItemPool.deallocate(inMemory); else if ( inSize <= m4ItemPool.GetItemSize() ) m4ItemPool.deallocate(inMemory); else if ( inSize <= m5ItemPool.GetItemSize() ) m5ItemPool.deallocate(inMemory); else if ( inSize <= m6ItemPool.GetItemSize() ) m6ItemPool.deallocate(inMemory); else if ( inSize <= m7ItemPool.GetItemSize() ) m7ItemPool.deallocate(inMemory); else if ( inSize <= m8ItemPool.GetItemSize() ) m8ItemPool.deallocate(inMemory); else */ mVariablePool.deallocate(inMemory); } /** * allocate an object. Calls constructor on the new memory. */ template<typename TObjectType> inline TObjectType* allocate() { TObjectType* retval = reinterpret_cast<TObjectType*>( allocate( sizeof(TObjectType) ) ); new (retval)TObjectType(); return retval; } /** * deallocate an object calling the destructor on the object. * This *must* be the concrete type, it cannot be a generic type. */ template<typename TObjectType> inline void deallocate( TObjectType* inObject ) { inObject->~TObjectType(); deallocate( reinterpret_cast<PxU8*>(inObject) ); } /** * allocate an object. Calls constructor on the new memory. */ template<typename TObjectType> inline TObjectType* BatchAllocate(PxU32 inCount ) { TObjectType* retval = reinterpret_cast<TObjectType*>( allocate( sizeof(TObjectType) * inCount ) ); return retval; } /** * deallocate an object calling the destructor on the object. * This *must* be the concrete type, it cannot be a generic type. */ template<typename TObjectType> inline void BatchDeallocate( TObjectType* inObject, PxU32 inCount ) { PX_UNUSED(inCount); deallocate( reinterpret_cast<PxU8*>(inObject) ); } }; } #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Xml/SnXmlSerializer.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SN_XML_SERIALIZER_H #define SN_XML_SERIALIZER_H #include "PxExtensionMetaDataObjects.h" #include "SnXmlVisitorWriter.h" namespace physx { namespace Sn { void writeHeightFieldSample( PxOutputStream& inStream, const PxHeightFieldSample& inSample ) { PxU32 retval = 0; PxU8* writePtr( reinterpret_cast< PxU8*>( &retval ) ); const PxU8* inPtr( reinterpret_cast<const PxU8*>( &inSample ) ); if ( isBigEndian() ) { //Height field samples are a //16 bit integer followed by two bytes. //right now, data is 2 1 3 4 //We need a 32 bit integer that //when read in by a LE system is 4 3 2 1. //Thus, we need a BE integer that looks like: //4 3 2 1 writePtr[0] = inPtr[3]; writePtr[1] = inPtr[2]; writePtr[2] = inPtr[0]; writePtr[3] = inPtr[1]; } else { writePtr[0] = inPtr[0]; writePtr[1] = inPtr[1]; writePtr[2] = inPtr[2]; writePtr[3] = inPtr[3]; } inStream << retval; } template<typename TDataType, typename TWriteOperator> inline void writeStridedBufferProperty( XmlWriter& writer, MemoryBuffer& tempBuffer, const char* inPropName, const void* inData, PxU32 inStride, PxU32 inCount, PxU32 inItemsPerLine, TWriteOperator inOperator ) { PX_ASSERT( inStride == 0 || inStride == sizeof( TDataType ) ); PX_UNUSED( inStride ); writeBuffer( writer, tempBuffer , inItemsPerLine, reinterpret_cast<const TDataType*>( inData ) , inCount, inPropName, inOperator ); } template<typename TDataType, typename TWriteOperator> inline void writeStridedBufferProperty( XmlWriter& writer, MemoryBuffer& tempBuffer, const char* inPropName, const PxStridedData& inData, PxU32 inCount, PxU32 inItemsPerLine, TWriteOperator inOperator ) { writeStridedBufferProperty<TDataType>( writer, tempBuffer, inPropName, inData.data, inData.stride, inCount, inItemsPerLine, inOperator ); } template<typename TDataType, typename TWriteOperator> inline void writeStridedBufferProperty( XmlWriter& writer, MemoryBuffer& tempBuffer, const char* inPropName, const PxTypedStridedData<TDataType>& inData, PxU32 inCount, PxU32 inItemsPerLine, TWriteOperator inOperator ) { writeStridedBufferProperty<TDataType>( writer, tempBuffer, inPropName, inData.data, inData.stride, inCount, inItemsPerLine, inOperator ); } template<typename TDataType, typename TWriteOperator> inline void writeStridedBufferProperty( XmlWriter& writer, MemoryBuffer& tempBuffer, const char* inPropName, const PxBoundedData& inData, PxU32 inItemsPerLine, TWriteOperator inWriteOperator ) { writeStridedBufferProperty<TDataType>( writer, tempBuffer, inPropName, inData, inData.count, inItemsPerLine, inWriteOperator ); } template<typename TDataType, typename TWriteOperator> inline void writeStridedBufferProperty( XmlWriter& writer, MemoryBuffer& tempBuffer, const char* inPropName, PxStrideIterator<const TDataType>& inData, PxU32 inCount, PxU32 inItemsPerLine, TWriteOperator inWriteOperator ) { writeStrideBuffer<TDataType>(writer, tempBuffer , inItemsPerLine, inData, PtrAccess<TDataType> , inCount, inPropName, inData.stride(), inWriteOperator ); } template<typename TDataType> inline void writeStridedFlagsProperty( XmlWriter& writer, MemoryBuffer& tempBuffer, const char* inPropName, PxStrideIterator<const TDataType>& inData, PxU32 inCount, PxU32 inItemsPerLine, const PxU32ToName* inTable ) { writeStrideFlags<TDataType>(writer, tempBuffer , inItemsPerLine, inData, PtrAccess<TDataType> , inCount, inPropName, inTable ); } } } #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Xml/SnRepXCollection.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SN_REPX_COLLECTION_H #define SN_REPX_COLLECTION_H #include "common/PxTolerancesScale.h" #include "extensions/PxRepXSerializer.h" namespace physx { namespace Sn { struct XmlNode; struct RepXCollectionItem { PxRepXObject liveObject; XmlNode* descriptor; RepXCollectionItem( PxRepXObject inItem = PxRepXObject(), XmlNode* inDescriptor = NULL ) : liveObject( inItem ) , descriptor( inDescriptor ) { } }; struct RepXDefaultEntry { const char* name; const char* value; RepXDefaultEntry( const char* pn, const char* val ) : name( pn ), value( val ){} }; /** * The result of adding an object to the collection. */ struct RepXAddToCollectionResult { enum Enum { Success, SerializerNotFound, InvalidParameters, //Null data passed in. AlreadyInCollection }; PxSerialObjectId collectionId; Enum result; RepXAddToCollectionResult( Enum inResult = Success, const PxSerialObjectId inId = 0 ) : collectionId( inId ) , result( inResult ) { } bool isValid() { return result == Success && collectionId != 0; } }; /** * A RepX collection contains a set of static data objects that can be transformed * into live objects. It uses RepX serializer to do two transformations: * live object <-> collection object (descriptor) * collection object <-> file system. * * A live object is considered to be something live in the physics * world such as a material or a rigidstatic. * * A collection object is a piece of data from which a live object * of identical characteristics can be created. * * Clients need to pass PxCollection so that objects can resolve * references. In addition, objects must be added in an order such that * references can be resolved in the first place. So objects must be added * to the collection *after* objects they are dependent upon. * * When deserializing from a file, the collection will allocate char*'s that will * not be freed when the collection itself is freed. The user must be responsible * for these character allocations. */ class RepXCollection { protected: virtual ~RepXCollection(){} public: virtual void destroy() = 0; /** * Set the scale on this collection. The scale is saved with the collection. * * If the scale wasn't set, it will be invalid. */ virtual void setTolerancesScale( const PxTolerancesScale& inScale ) = 0; /** * Get the scale that was set at collection creation time or at load time. * If this is a loaded file and the source data does not contain a scale * this value will be invalid (PxTolerancesScale::isValid()). */ virtual PxTolerancesScale getTolerancesScale() const = 0; /** * Set the up vector on this collection. The up vector is saved with the collection. * * If the up vector wasn't set, it will be (0,0,0). */ virtual void setUpVector( const PxVec3& inUpVector ) = 0; /** * If the up vector wasn't set, it will be (0,0,0). Else this will be the up vector * optionally set when the collection was created. */ virtual PxVec3 getUpVector() const = 0; virtual const char* getVersion() = 0; static const char* getLatestVersion(); //Necessary accessor functions for translation/upgrading. virtual const RepXCollectionItem* begin() const = 0; virtual const RepXCollectionItem* end() const = 0; //Performs a deep copy of the repx node. virtual XmlNode* copyRepXNode( const XmlNode* srcNode ) = 0; virtual void addCollectionItem( RepXCollectionItem inItem ) = 0; //Create a new repx node with this name. Its value is unset. virtual XmlNode& createRepXNode( const char* name ) = 0; virtual RepXCollection& createCollection( const char* inVersionStr ) = 0; //Release this when finished. virtual XmlReaderWriter& createNodeEditor() = 0; virtual PxAllocatorCallback& getAllocator() = 0; virtual bool instantiateCollection( PxRepXInstantiationArgs& inArgs, PxCollection& inPxCollection ) = 0; virtual RepXAddToCollectionResult addRepXObjectToCollection( const PxRepXObject& inObject, PxCollection* inCollection, PxRepXInstantiationArgs& inArgs ) = 0; /** * Save this collection out to a file stream. Uses the RepX serialize to perform * collection object->file conversions. * * /param[in] inStream Write-only stream to save collection out to. */ virtual void save( PxOutputStream& inStream ) = 0; }; } } #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Xml/SnPxStreamOperators.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SN_PX_STREAM_OPERATORS_H #define SN_PX_STREAM_OPERATORS_H #include "foundation/PxVec3.h" #include "foundation/PxTransform.h" #include "foundation/PxBounds3.h" #include "foundation/PxString.h" #include "PxFiltering.h" namespace physx { static inline PxU32 strLenght( const char* inStr ) { return inStr ? PxU32(strlen(inStr)) : 0; } } namespace physx // ADL requires we put the operators in the same namespace as the underlying type of PxOutputStream { inline PxOutputStream& operator << ( PxOutputStream& ioStream, const char* inString ) { if ( inString && *inString ) { ioStream.write( inString, PxU32(strlen(inString)) ); } return ioStream; } template<typename TDataType> inline PxOutputStream& toStream( PxOutputStream& ioStream, const char* inFormat, const TDataType inData ) { char buffer[128] = { 0 }; Pxsnprintf( buffer, 128, inFormat, inData ); ioStream << buffer; return ioStream; } struct endl_obj {}; //static endl_obj endl; inline PxOutputStream& operator << ( PxOutputStream& ioStream, bool inData ) { ioStream << (inData ? "true" : "false"); return ioStream; } inline PxOutputStream& operator << ( PxOutputStream& ioStream, PxI32 inData ) { return toStream( ioStream, "%d", inData ); } inline PxOutputStream& operator << ( PxOutputStream& ioStream, PxU16 inData ) { return toStream( ioStream, "%u", PxU32(inData) ); } inline PxOutputStream& operator << ( PxOutputStream& ioStream, PxU8 inData ) { return toStream( ioStream, "%u", PxU32(inData) ); } inline PxOutputStream& operator << ( PxOutputStream& ioStream, char inData ) { return toStream( ioStream, "%c", inData ); } inline PxOutputStream& operator << ( PxOutputStream& ioStream, PxU32 inData ) { return toStream( ioStream, "%u", inData ); } inline PxOutputStream& operator << ( PxOutputStream& ioStream, PxU64 inData ) { return toStream( ioStream, "%" PX_PRIu64, inData ); } inline PxOutputStream& operator << ( PxOutputStream& ioStream, const void* inData ) { return ioStream << static_cast<uint64_t>(size_t(inData)); } inline PxOutputStream& operator << ( PxOutputStream& ioStream, PxF32 inData ) { return toStream( ioStream, "%g", PxF64(inData) ); } inline PxOutputStream& operator << ( PxOutputStream& ioStream, PxF64 inData ) { return toStream( ioStream, "%g", inData ); } inline PxOutputStream& operator << ( PxOutputStream& ioStream, endl_obj) { return ioStream << "\n"; } inline PxOutputStream& operator << ( PxOutputStream& ioStream, const PxVec3& inData ) { ioStream << inData[0]; ioStream << " "; ioStream << inData[1]; ioStream << " "; ioStream << inData[2]; return ioStream; } inline PxOutputStream& operator << ( PxOutputStream& ioStream, const PxQuat& inData ) { ioStream << inData.x; ioStream << " "; ioStream << inData.y; ioStream << " "; ioStream << inData.z; ioStream << " "; ioStream << inData.w; return ioStream; } inline PxOutputStream& operator << ( PxOutputStream& ioStream, const PxTransform& inData ) { ioStream << inData.q; ioStream << " "; ioStream << inData.p; return ioStream; } inline PxOutputStream& operator << ( PxOutputStream& ioStream, const PxBounds3& inData ) { ioStream << inData.minimum; ioStream << " "; ioStream << inData.maximum; return ioStream; } inline PxOutputStream& operator << ( PxOutputStream& ioStream, const PxFilterData& inData ) { ioStream << inData.word0 << " " << inData.word1 << " " << inData.word2 << " " << inData.word3; return ioStream; } inline PxOutputStream& operator << ( PxOutputStream& ioStream, struct PxMetaDataPlane& inData ) { ioStream << inData.normal; ioStream << " "; ioStream << inData.distance; return ioStream; } } #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Xml/SnJointRepXSerializer.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "PxMetaDataObjects.h" #include "PxExtensionMetaDataObjects.h" #include "ExtJointMetaDataExtensions.h" #include "SnJointRepXSerializer.h" namespace physx { template<typename TJointType> inline TJointType* createJoint( PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1 ) { PX_UNUSED(physics); PX_UNUSED(actor0); PX_UNUSED(actor1); PX_UNUSED(localFrame0); PX_UNUSED(localFrame1); return NULL; } template<> inline PxD6Joint* createJoint<PxD6Joint>(PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1) { return PxD6JointCreate( physics, actor0, localFrame0, actor1, localFrame1 ); } template<> inline PxDistanceJoint* createJoint<PxDistanceJoint>(PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1) { return PxDistanceJointCreate( physics, actor0, localFrame0, actor1, localFrame1 ); } template<> inline PxContactJoint* createJoint<PxContactJoint>(PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1) { return PxContactJointCreate( physics, actor0, localFrame0, actor1, localFrame1 ); } template<> inline PxFixedJoint* createJoint<PxFixedJoint>(PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1) { return PxFixedJointCreate( physics, actor0, localFrame0, actor1, localFrame1 ); } template<> inline PxPrismaticJoint* createJoint<PxPrismaticJoint>(PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1) { return PxPrismaticJointCreate( physics, actor0, localFrame0, actor1, localFrame1 ); } template<> inline PxRevoluteJoint* createJoint<PxRevoluteJoint>(PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1) { return PxRevoluteJointCreate( physics, actor0, localFrame0, actor1, localFrame1 ); } template<> inline PxSphericalJoint* createJoint<PxSphericalJoint>(PxPhysics& physics, PxRigidActor* actor0, const PxTransform& localFrame0, PxRigidActor* actor1, const PxTransform& localFrame1) { return PxSphericalJointCreate( physics, actor0, localFrame0, actor1, localFrame1 ); } template<typename TJointType> PxRepXObject PxJointRepXSerializer<TJointType>::fileToObject( XmlReader& inReader, XmlMemoryAllocator& inAllocator, PxRepXInstantiationArgs& inArgs, PxCollection* inCollection ) { PxRigidActor* actor0 = NULL; PxRigidActor* actor1 = NULL; PxTransform localPose0 = PxTransform(PxIdentity); PxTransform localPose1 = PxTransform(PxIdentity); bool ok = true; if ( inReader.gotoChild( "Actors" ) ) { ok = readReference<PxRigidActor>( inReader, *inCollection, "actor0", actor0 ); ok &= readReference<PxRigidActor>( inReader, *inCollection, "actor1", actor1 ); inReader.leaveChild(); } TJointType* theJoint = !ok ? NULL : createJoint<TJointType>( inArgs.physics, actor0, localPose0, actor1, localPose1 ); if ( theJoint ) { PxConstraint* constraint = theJoint->getConstraint(); PX_ASSERT( constraint ); inCollection->add( *constraint ); this->fileToObjectImpl( theJoint, inReader, inAllocator, inArgs, inCollection ); } return PxCreateRepXObject(theJoint); } template<typename TJointType> void PxJointRepXSerializer<TJointType>::objectToFileImpl( const TJointType* inObj, PxCollection* inCollection, XmlWriter& inWriter, MemoryBuffer& inTempBuffer, PxRepXInstantiationArgs& ) { writeAllProperties( inObj, inWriter, inTempBuffer, *inCollection ); } // explicit template instantiations template struct PxJointRepXSerializer<PxFixedJoint>; template struct PxJointRepXSerializer<PxDistanceJoint>; template struct PxJointRepXSerializer<PxContactJoint>; template struct PxJointRepXSerializer<PxD6Joint>; template struct PxJointRepXSerializer<PxPrismaticJoint>; template struct PxJointRepXSerializer<PxRevoluteJoint>; template struct PxJointRepXSerializer<PxSphericalJoint>; }
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Xml/SnSimpleXmlWriter.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SN_SIMPLE_XML_WRITER_H #define SN_SIMPLE_XML_WRITER_H #include "foundation/PxArray.h" #include "SnXmlMemoryPoolStreams.h" namespace physx { namespace Sn { class SimpleXmlWriter { public: struct STagWatcher { typedef SimpleXmlWriter TXmlWriterType; TXmlWriterType& mWriter; STagWatcher( const STagWatcher& inOther ); STagWatcher& operator-( const STagWatcher& inOther ); STagWatcher( TXmlWriterType& inWriter, const char* inTagName ) : mWriter( inWriter ) { mWriter.beginTag( inTagName ); } ~STagWatcher() { mWriter.endTag(); } protected: STagWatcher& operator=(const STagWatcher&); }; virtual ~SimpleXmlWriter(){} virtual void beginTag( const char* inTagname ) = 0; virtual void endTag() = 0; virtual void addAttribute( const char* inName, const char* inValue ) = 0; virtual void writeContentTag( const char* inTag, const char* inContent ) = 0; virtual void addContent( const char* inContent ) = 0; virtual PxU32 tabCount() = 0; private: SimpleXmlWriter& operator=(const SimpleXmlWriter&); }; template<typename TStreamType> class SimpleXmlWriterImpl : public SimpleXmlWriter { PxProfileAllocatorWrapper mWrapper; TStreamType& mStream; SimpleXmlWriterImpl( const SimpleXmlWriterImpl& inOther ); SimpleXmlWriterImpl& operator=( const SimpleXmlWriterImpl& inOther ); PxProfileArray<const char*> mTags; bool mTagOpen; PxU32 mInitialTagDepth; public: SimpleXmlWriterImpl( TStreamType& inStream, PxAllocatorCallback& inAllocator, PxU32 inInitialTagDepth = 0 ) : mWrapper( inAllocator ) , mStream( inStream ) , mTags( mWrapper ) , mTagOpen( false ) , mInitialTagDepth( inInitialTagDepth ) { } virtual ~SimpleXmlWriterImpl() { while( mTags.size() ) endTag(); } PxU32 tabCount() { return mTags.size() + mInitialTagDepth; } void writeTabs( PxU32 inSize ) { inSize += mInitialTagDepth; for ( PxU32 idx =0; idx < inSize; ++idx ) mStream << "\t"; } void beginTag( const char* inTagname ) { closeTag(); writeTabs(mTags.size()); mTags.pushBack( inTagname ); mStream << "<" << inTagname; mTagOpen = true; } void addAttribute( const char* inName, const char* inValue ) { PX_ASSERT( mTagOpen ); mStream << " " << inName << "=" << "\"" << inValue << "\""; } void closeTag(bool useNewline = true) { if ( mTagOpen ) { mStream << " " << ">"; if (useNewline ) mStream << "\n"; } mTagOpen = false; } void doEndOpenTag() { mStream << "</" << mTags.back() << ">" << "\n"; } void endTag() { PX_ASSERT( mTags.size() ); if ( mTagOpen ) mStream << " " << "/>" << "\n"; else { writeTabs(mTags.size()-1); doEndOpenTag(); } mTagOpen = false; mTags.popBack(); } static bool IsNormalizableWhitespace(char c) { return c == 0x9 || c == 0xA || c == 0xD; } static bool IsValidXmlCharacter(char c) { return IsNormalizableWhitespace(c) || c >= 0x20; } void addContent( const char* inContent ) { closeTag(false); //escape xml for( ; *inContent; inContent++ ) { switch (*inContent) { case '<': mStream << "&lt;"; break; case '>': mStream << "&gt;"; break; case '&': mStream << "&amp;"; break; case '\'': mStream << "&apos;"; break; case '"': mStream << "&quot;"; break; default: if (IsValidXmlCharacter(*inContent)) { if (IsNormalizableWhitespace(*inContent)) { char s[32]; Pxsnprintf(s, 32, "&#x%02X;", unsigned(*inContent)); mStream << s; } else mStream << *inContent; } break; } } } void writeContentTag( const char* inTag, const char* inContent ) { beginTag( inTag ); addContent( inContent ); doEndOpenTag(); mTags.popBack(); } void insertXml( const char* inXml ) { closeTag(); mStream << inXml; } }; struct BeginTag { const char* mTagName; BeginTag( const char* inTagName ) : mTagName( inTagName ) { } }; struct EndTag { EndTag() {} }; struct Att { const char* mAttName; const char* mAttValue; Att( const char* inAttName, const char* inAttValue ) : mAttName( inAttName ) , mAttValue( inAttValue ) { } }; struct Content { const char* mContent; Content( const char* inContent ) : mContent( inContent ) { } }; struct ContentTag { const char* mTagName; const char* mContent; ContentTag( const char* inTagName, const char* inContent ) : mTagName( inTagName ) , mContent( inContent ) { } }; inline SimpleXmlWriter& operator<<( SimpleXmlWriter& inWriter, const BeginTag& inTag ) { inWriter.beginTag( inTag.mTagName ); return inWriter; } inline SimpleXmlWriter& operator<<( SimpleXmlWriter& inWriter, const EndTag& inTag ) { PX_UNUSED(inTag); inWriter.endTag(); return inWriter; } inline SimpleXmlWriter& operator<<( SimpleXmlWriter& inWriter, const Att& inTag ) { inWriter.addAttribute(inTag.mAttName, inTag.mAttValue); return inWriter; } inline SimpleXmlWriter& operator<<( SimpleXmlWriter& inWriter, const Content& inTag ) { inWriter.addContent(inTag.mContent); return inWriter; } inline SimpleXmlWriter& operator<<( SimpleXmlWriter& inWriter, const ContentTag& inTag ) { inWriter.writeContentTag(inTag.mTagName, inTag.mContent); return inWriter; } inline void writeProperty( SimpleXmlWriter& inWriter, MemoryBuffer& tempBuffer, const char* inPropName ) { PxU8 data = 0; tempBuffer.write( &data, sizeof(PxU8) ); inWriter.writeContentTag( inPropName, reinterpret_cast<const char*>( tempBuffer.mBuffer ) ); tempBuffer.clear(); } template<typename TDataType> inline void writeProperty( SimpleXmlWriter& inWriter, MemoryBuffer& tempBuffer, const char* inPropName, TDataType inValue ) { tempBuffer << inValue; writeProperty( inWriter, tempBuffer, inPropName ); } } } #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Xml/SnRepXUpgrader.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SN_REPX_UPGRADER_H #define SN_REPX_UPGRADER_H #include "foundation/PxSimpleTypes.h" namespace physx { namespace Sn { class RepXCollection; class RepXUpgrader { public: //If a new collection is created, the source collection is destroyed. //Thus you only need to release the new collection. //This holds for all of the upgrade functions. //So be aware, that the argument to these functions may not be valid //after they are called, but the return value always will be valid. static RepXCollection& upgradeCollection( RepXCollection& src ); static RepXCollection& upgrade10CollectionTo3_1Collection( RepXCollection& src ); static RepXCollection& upgrade3_1CollectionTo3_2Collection( RepXCollection& src ); static RepXCollection& upgrade3_2CollectionTo3_3Collection( RepXCollection& src ); static RepXCollection& upgrade3_3CollectionTo3_4Collection( RepXCollection& src ); static RepXCollection& upgrade3_4CollectionTo4_0Collection( RepXCollection& src ); }; } } #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Xml/SnXmlVisitorReader.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SN_XML_VISITOR_READER_H #define SN_XML_VISITOR_READER_H #include "foundation/PxArray.h" #include "foundation/PxUtilities.h" #include "RepXMetaDataPropertyVisitor.h" #include "SnPxStreamOperators.h" #include "SnXmlMemoryPoolStreams.h" #include "SnXmlReader.h" #include "SnXmlImpl.h" #include "SnXmlMemoryAllocator.h" #include "SnXmlStringToType.h" namespace physx { namespace Sn { inline PxU32 findEnumByName( const char* inName, const PxU32ToName* inTable ) { for ( PxU32 idx = 0; inTable[idx].mName != NULL; ++idx ) { if ( physx::Pxstricmp( inTable[idx].mName, inName ) == 0 ) return inTable[idx].mValue; } return 0; } PX_INLINE void stringToFlagsType( const char* strData, XmlMemoryAllocator& alloc, PxU32& ioType, const PxU32ToName* inTable ) { if ( inTable == NULL ) return; ioType = 0; if ( strData && *strData) { //Destructively parse the string to get out the different flags. char* theValue = const_cast<char*>( copyStr( &alloc, strData ) ); char* theMarker = theValue; char* theNext = theValue; while( theNext && *theNext ) { ++theNext; if( *theNext == '|' ) { *theNext = 0; ++theNext; ioType |= static_cast< PxU32 > ( findEnumByName( theMarker, inTable ) ); theMarker = theNext; } } if ( theMarker && *theMarker ) ioType |= static_cast< PxU32 > ( findEnumByName( theMarker, inTable ) ); alloc.deallocate( reinterpret_cast<PxU8*>( theValue ) ); } } template<typename TDataType> PX_INLINE void stringToEnumType( const char* strData, TDataType& ioType, const PxU32ToName* inTable ) { ioType = static_cast<TDataType>( findEnumByName( strData, inTable ) ); } template<typename TDataType> PX_INLINE bool readProperty( XmlReader& inReader, const char* pname, TDataType& ioType ) { const char* value; if ( inReader.read( pname, value ) ) { stringToType( value, ioType ); return true; } return false; } template<typename TObjType> inline TObjType* findReferencedObject( PxCollection& collection, PxSerialObjectId id) { PX_ASSERT(id > 0); TObjType* outObject = static_cast<TObjType*>(const_cast<PxBase*>(collection.find(id))); if (outObject == NULL) { PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "PxSerialization::createCollectionFromXml: " "Reference to ID %d cannot be resolved. Make sure externalRefs collection is specified if required and " "check Xml file for completeness.", id); } return outObject; } template<typename TObjType> inline bool readReference( XmlReader& inReader, PxCollection& collection, TObjType*& outObject ) { PxSerialObjectId theId; const char* theValue = inReader.getCurrentItemValue(); strto( theId, theValue ); if( theId == 0) { // the NULL pointer is a valid pointer if the input id is 0 outObject = NULL; return true; } else { outObject = findReferencedObject<TObjType>(collection, theId); return outObject != NULL; } } template<typename TObjType> inline bool readReference( XmlReader& inReader, PxCollection& inCollection, const char* pname, TObjType*& outObject ) { outObject = NULL; PxSerialObjectId theId = 0; if (readProperty ( inReader, pname, theId ) && theId ) { outObject = findReferencedObject<TObjType>(inCollection, theId); } // the NULL pointer is a valid pointer if the input id is 0 return (outObject != NULL) || 0 == theId; } template<typename TEnumType, typename TStorageType> inline bool readFlagsProperty( XmlReader& reader, XmlMemoryAllocator& allocator, const char* pname, const PxU32ToName* inConversions, PxFlags<TEnumType,TStorageType>& outFlags ) { const char* value; if ( reader.read( pname, value ) ) { PxU32 tempValue = 0; stringToFlagsType( value, allocator, tempValue, inConversions ); outFlags = PxFlags<TEnumType,TStorageType>(PxTo16(tempValue) ); return true; } return false; } template<typename TObjType, typename TReaderType, typename TInfoType> inline void readComplexObj( TReaderType& oldVisitor, TObjType* inObj, TInfoType& info); template<typename TObjType, typename TReaderType> inline void readComplexObj( TReaderType& oldVisitor, TObjType* inObj); template<typename TReaderType, typename TGeomType> inline PxGeometry* parseGeometry( TReaderType& reader, TGeomType& /*inGeom*/) { PxAllocatorCallback& inAllocator = reader.mAllocator.getAllocator(); TGeomType* shape = PX_PLACEMENT_NEW((inAllocator.allocate(sizeof(TGeomType), "parseGeometry", PX_FL)), TGeomType); PxClassInfoTraits<TGeomType> info; readComplexObj( reader, shape); return shape; } template<typename TReaderType> inline void parseShape( TReaderType& visitor, PxGeometry*& outResult, PxArray<PxMaterial*>& outMaterials) { XmlReader& theReader( visitor.mReader ); PxCollection& collection = visitor.mCollection; visitor.pushCurrentContext(); if ( visitor.gotoTopName() ) { visitor.pushCurrentContext(); if ( visitor.gotoChild( "Materials" ) ) { for( bool matSuccess = visitor.gotoFirstChild(); matSuccess; matSuccess = visitor.gotoNextSibling() ) { PxMaterial* material = NULL; if(!readReference<PxMaterial>( theReader, collection, material )) visitor.mHadError = true; if ( material ) outMaterials.pushBack( material ); } } visitor.popCurrentContext(); visitor.pushCurrentContext(); PxPlaneGeometry plane; PxHeightFieldGeometry heightField; PxSphereGeometry sphere; PxTriangleMeshGeometry mesh; PxConvexMeshGeometry convex; PxBoxGeometry box; PxCapsuleGeometry capsule; if ( visitor.gotoChild( "Geometry" ) ) { if ( visitor.gotoFirstChild() ) { const char* geomTypeName = visitor.getCurrentItemName(); if ( physx::Pxstricmp( geomTypeName, "PxSphereGeometry" ) == 0 ) outResult = parseGeometry(visitor, sphere); else if ( physx::Pxstricmp( geomTypeName, "PxPlaneGeometry" ) == 0 ) outResult = parseGeometry(visitor, plane); else if ( physx::Pxstricmp( geomTypeName, "PxCapsuleGeometry" ) == 0 ) outResult = parseGeometry(visitor, capsule); else if ( physx::Pxstricmp( geomTypeName, "PxBoxGeometry" ) == 0 ) outResult = parseGeometry(visitor, box); else if ( physx::Pxstricmp( geomTypeName, "PxConvexMeshGeometry" ) == 0 ) outResult = parseGeometry(visitor, convex); else if ( physx::Pxstricmp( geomTypeName, "PxTriangleMeshGeometry" ) == 0 ) outResult = parseGeometry(visitor, mesh); else if ( physx::Pxstricmp( geomTypeName, "PxHeightFieldGeometry" ) == 0 ) outResult = parseGeometry(visitor, heightField); else PX_ASSERT( false ); } } visitor.popCurrentContext(); } visitor.popCurrentContext(); return; } template<typename TReaderType, typename TObjType> inline void readShapesProperty( TReaderType& visitor, TObjType* inObj, const PxRigidActorShapeCollection* inProp = NULL, bool isSharedShape = false ) { PX_UNUSED(isSharedShape); PX_UNUSED(inProp); XmlReader& theReader( visitor.mReader ); PxCollection& collection( visitor.mCollection ); visitor.pushCurrentContext(); if ( visitor.gotoTopName() ) { //uggh working around the shape collection api. //read out materials and geometry for ( bool success = visitor.gotoFirstChild(); success; success = visitor.gotoNextSibling() ) { if( 0 == physx::Pxstricmp( visitor.getCurrentItemName(), "PxShapeRef" ) ) { PxShape* shape = NULL; if(!readReference<PxShape>( theReader, collection, shape )) visitor.mHadError = true; if(shape) inObj->attachShape( *shape ); } else { PxArray<PxMaterial*> materials; PxGeometry* geometry = NULL; parseShape( visitor, geometry, materials); PxShape* theShape = NULL; if ( materials.size() ) { theShape = visitor.mArgs.physics.createShape( *geometry, materials.begin(), PxTo16(materials.size()), true ); if ( theShape ) { readComplexObj( visitor, theShape ); if(theShape) { inObj->attachShape(*theShape); collection.add( *theShape ); } } } switch(geometry->getType()) { case PxGeometryType::eSPHERE : static_cast<PxSphereGeometry*>(geometry)->~PxSphereGeometry(); break; case PxGeometryType::ePLANE : static_cast<PxPlaneGeometry*>(geometry)->~PxPlaneGeometry(); break; case PxGeometryType::eCAPSULE : static_cast<PxCapsuleGeometry*>(geometry)->~PxCapsuleGeometry(); break; case PxGeometryType::eBOX : static_cast<PxBoxGeometry*>(geometry)->~PxBoxGeometry(); break; case PxGeometryType::eCONVEXMESH : static_cast<PxConvexMeshGeometry*>(geometry)->~PxConvexMeshGeometry(); break; case PxGeometryType::eTRIANGLEMESH : static_cast<PxTriangleMeshGeometry*>(geometry)->~PxTriangleMeshGeometry(); break; case PxGeometryType::eHEIGHTFIELD : static_cast<PxHeightFieldGeometry*>(geometry)->~PxHeightFieldGeometry(); break; case PxGeometryType::eTETRAHEDRONMESH : static_cast<PxTetrahedronMeshGeometry*>(geometry)->~PxTetrahedronMeshGeometry(); break; case PxGeometryType::ePARTICLESYSTEM: static_cast<PxParticleSystemGeometry*>(geometry)->~PxParticleSystemGeometry(); break; case PxGeometryType::eHAIRSYSTEM: static_cast<PxHairSystemGeometry*>(geometry)->~PxHairSystemGeometry(); break; case PxGeometryType::eCUSTOM : static_cast<PxCustomGeometry*>(geometry)->~PxCustomGeometry(); break; case PxGeometryType::eGEOMETRY_COUNT: case PxGeometryType::eINVALID: PX_ASSERT(0); } visitor.mAllocator.getAllocator().deallocate(geometry); } } } visitor.popCurrentContext(); } struct ReaderNameStackEntry : NameStackEntry { bool mValid; ReaderNameStackEntry( const char* nm, bool valid ) : NameStackEntry(nm), mValid(valid) {} }; typedef PxProfileArray<ReaderNameStackEntry> TReaderNameStack; template<typename TObjType> struct RepXVisitorReaderBase { protected: RepXVisitorReaderBase<TObjType>& operator=(const RepXVisitorReaderBase<TObjType>&); public: TReaderNameStack& mNames; PxProfileArray<PxU32>& mContexts; PxRepXInstantiationArgs mArgs; XmlReader& mReader; TObjType* mObj; XmlMemoryAllocator& mAllocator; PxCollection& mCollection; bool mValid; bool& mHadError; RepXVisitorReaderBase( TReaderNameStack& names, PxProfileArray<PxU32>& contexts, const PxRepXInstantiationArgs& args, XmlReader& reader, TObjType* obj , XmlMemoryAllocator& alloc, PxCollection& collection, bool& hadError ) : mNames( names ) , mContexts( contexts ) , mArgs( args ) , mReader( reader ) , mObj( obj ) , mAllocator( alloc ) , mCollection( collection ) , mValid( true ) , mHadError(hadError) { } RepXVisitorReaderBase( const RepXVisitorReaderBase& other ) : mNames( other.mNames ) , mContexts( other.mContexts ) , mArgs( other.mArgs ) , mReader( other.mReader ) , mObj( other.mObj ) , mAllocator( other.mAllocator ) , mCollection( other.mCollection ) , mValid( other.mValid ) , mHadError( other.mHadError ) { } void pushName( const char* name ) { gotoTopName(); mNames.pushBack( ReaderNameStackEntry( name, mValid ) ); } void pushBracketedName( const char* name ) { pushName( name ); } void popName() { if ( mNames.size() ) { if ( mNames.back().mOpen && mNames.back().mValid ) mReader.leaveChild(); mNames.popBack(); } mValid =true; if ( mNames.size() && mNames.back().mValid == false ) mValid = false; } void pushCurrentContext() { mContexts.pushBack( static_cast<PxU32>( mNames.size() ) ); } void popCurrentContext() { if ( mContexts.size() ) { PxU32 depth = mContexts.back(); PX_ASSERT( mNames.size() >= depth ); while( mNames.size() > depth ) popName(); mContexts.popBack(); } } bool updateLastEntryAfterOpen() { mNames.back().mValid = mValid; mNames.back().mOpen = mValid; return mValid; } bool gotoTopName() { if ( mNames.size() && mNames.back().mOpen == false ) { if ( mValid ) mValid = mReader.gotoChild( mNames.back().mName ); updateLastEntryAfterOpen(); } return mValid; } bool isValid() const { return mValid; } bool gotoChild( const char* name ) { pushName( name ); return gotoTopName(); } bool gotoFirstChild() { pushName( "__child" ); if ( mValid ) mValid = mReader.gotoFirstChild(); return updateLastEntryAfterOpen(); } bool gotoNextSibling() { bool retval = mValid; if ( mValid ) retval = mReader.gotoNextSibling(); return retval; } const char* getCurrentItemName() { if (mValid ) return mReader.getCurrentItemName(); return ""; } const char* topName() const { if ( mNames.size() ) return mNames.back().mName; PX_ASSERT( false ); return "bad__repx__name"; } const char* getCurrentValue() { const char* value = NULL; if ( isValid() && mReader.read( topName(), value ) ) return value; return NULL; } template<typename TDataType> bool readProperty(TDataType& outType) { const char* value = getCurrentValue(); if ( value && *value ) { stringToType( value, outType ); return true; } return false; } template<typename TDataType> bool readExtendedIndexProperty(TDataType& outType) { const char* value = mReader.getCurrentItemValue(); if ( value && *value ) { stringToType( value, outType ); return true; } return false; } template<typename TRefType> bool readReference(TRefType*& outRef) { return physx::Sn::readReference<TRefType>( mReader, mCollection, topName(), outRef ); } inline bool readProperty(const char*& outProp ) { outProp = ""; const char* value = getCurrentValue(); if ( value && *value && mArgs.stringTable ) { outProp = mArgs.stringTable->allocateStr( value ); return true; } return false; } inline bool readProperty(PxConvexMesh*& outProp ) { return readReference<PxConvexMesh>( outProp ); } inline bool readProperty(PxTriangleMesh*& outProp ) { return readReference<PxTriangleMesh>( outProp ); } inline bool readProperty(PxBVH33TriangleMesh*& outProp ) { return readReference<PxBVH33TriangleMesh>( outProp ); } inline bool readProperty(PxBVH34TriangleMesh*& outProp ) { return readReference<PxBVH34TriangleMesh>( outProp ); } inline bool readProperty(PxHeightField*& outProp ) { return readReference<PxHeightField>( outProp ); } inline bool readProperty( PxRigidActor *& outProp ) { return readReference<PxRigidActor>( outProp ); } template<typename TAccessorType> void simpleProperty( PxU32 /*key*/, TAccessorType& inProp ) { typedef typename TAccessorType::prop_type TPropertyType; TPropertyType value; if ( readProperty( value ) ) inProp.set( mObj, value ); } template<typename TAccessorType> void enumProperty( PxU32 /*key*/, TAccessorType& inProp, const PxU32ToName* inConversions ) { typedef typename TAccessorType::prop_type TPropertyType; const char* strVal = getCurrentValue(); if ( strVal && *strVal ) { TPropertyType pval; stringToEnumType( strVal, pval, inConversions ); inProp.set( mObj, pval ); } } template<typename TAccessorType> void flagsProperty( PxU32 /*key*/, const TAccessorType& inProp, const PxU32ToName* inConversions ) { typedef typename TAccessorType::prop_type TPropertyType; typedef typename TPropertyType::InternalType TInternalType; const char* strVal = getCurrentValue(); if ( strVal && *strVal ) { PxU32 tempValue = 0; stringToFlagsType( strVal, mAllocator, tempValue, inConversions ); inProp.set( mObj, TPropertyType(TInternalType( tempValue ))); } } template<typename TAccessorType, typename TInfoType> void complexProperty( PxU32* /*key*/, const TAccessorType& inProp, TInfoType& inInfo ) { typedef typename TAccessorType::prop_type TPropertyType; if ( gotoTopName() ) { TPropertyType propVal = inProp.get( mObj ); readComplexObj( *this, &propVal, inInfo ); inProp.set( mObj, propVal ); } } template<typename TAccessorType, typename TInfoType> void bufferCollectionProperty( PxU32* /*key*/, const TAccessorType& inProp, TInfoType& inInfo ) { typedef typename TAccessorType::prop_type TPropertyType; PxInlineArray<TPropertyType,5> theData; this->pushCurrentContext(); if ( this->gotoTopName() ) { for ( bool success = this->gotoFirstChild(); success; success = this->gotoNextSibling() ) { TPropertyType propVal; readComplexObj( *this, &propVal, inInfo ); theData.pushBack(propVal); } } this->popCurrentContext(); inProp.set( mObj, theData.begin(), theData.size() ); } template<typename TAccessorType, typename TInfoType> void extendedIndexedProperty( PxU32* /*key*/, const TAccessorType& inProp, TInfoType& inInfo ) { typedef typename TAccessorType::prop_type TPropertyType; this->pushCurrentContext(); if ( this->gotoTopName() ) { PxU32 index = 0; for ( bool success = this->gotoFirstChild(); success; success = this->gotoNextSibling() ) { TPropertyType propVal; readComplexObj( *this, &propVal, inInfo ); inProp.set(mObj, index, propVal); ++index; } } this->popCurrentContext(); } template<typename TAccessorType, typename TInfoType> void PxFixedSizeLookupTableProperty( PxU32* /*key*/, const TAccessorType& inProp, TInfoType& inInfo ) { typedef typename TAccessorType::prop_type TPropertyType; const_cast<TAccessorType&>(inProp).clear( mObj ); this->pushCurrentContext(); if ( this->gotoTopName() ) { for ( bool success = this->gotoFirstChild(); success; success = this->gotoNextSibling() ) { TPropertyType propXVal; readComplexObj( *this, &propXVal, inInfo ); if(this->gotoNextSibling()) { TPropertyType propYVal; readComplexObj( *this, &propYVal, inInfo ); const_cast<TAccessorType&>(inProp).addPair(mObj, propXVal, propYVal); } } } this->popCurrentContext(); } void handleShapes( const PxRigidActorShapeCollection& inProp ) { physx::Sn::readShapesProperty( *this, mObj, &inProp ); } void handleRigidActorGlobalPose(const PxRigidActorGlobalPosePropertyInfo& inProp) { PxArticulationLink* link = mObj->template is<PxArticulationLink>(); bool isReducedCoordinateLink = (link != NULL); if (!isReducedCoordinateLink) { PxRepXPropertyAccessor<PxPropertyInfoName::PxRigidActor_GlobalPose, PxRigidActor, const PxTransform &, PxTransform> theAccessor(inProp); simpleProperty(PxPropertyInfoName::PxRigidActor_GlobalPose, theAccessor); } } }; template<typename TObjType> struct RepXVisitorReader : public RepXVisitorReaderBase<TObjType> { RepXVisitorReader( TReaderNameStack& names, PxProfileArray<PxU32>& contexts, const PxRepXInstantiationArgs& args, XmlReader& reader, TObjType* obj , XmlMemoryAllocator& alloc, PxCollection& collection, bool& ret) : RepXVisitorReaderBase<TObjType>( names, contexts, args, reader, obj, alloc, collection, ret) { } RepXVisitorReader( const RepXVisitorReader<TObjType>& other ) : RepXVisitorReaderBase<TObjType>( other ) { } }; // Specialized template to load dynamic rigid, to determine the kinematic state first template<> struct RepXVisitorReader<PxRigidDynamic> : public RepXVisitorReaderBase<PxRigidDynamic> { RepXVisitorReader( TReaderNameStack& names, PxProfileArray<PxU32>& contexts, const PxRepXInstantiationArgs& args, XmlReader& reader, PxRigidDynamic* obj , XmlMemoryAllocator& alloc, PxCollection& collection, bool& ret) : RepXVisitorReaderBase<PxRigidDynamic>( names, contexts, args, reader, obj, alloc, collection, ret) { } RepXVisitorReader( const RepXVisitorReader<PxRigidDynamic>& other ) : RepXVisitorReaderBase<PxRigidDynamic>( other ) { } void handleShapes( const PxRigidActorShapeCollection& inProp ) { // Need to read the parental actor to check if actor is kinematic // in that case we need to apply the kinematic flag before a shape is set XmlReaderWriter* parentReader = static_cast<XmlReaderWriter*>(mReader.getParentReader()); if(mObj) { const char* value; if (parentReader->read( "RigidBodyFlags", value )) { if(strstr(value, "eKINEMATIC")) { mObj->setRigidBodyFlag(PxRigidBodyFlag::eKINEMATIC, true); } } } physx::Sn::readShapesProperty( *this, mObj, &inProp ); parentReader->release(); } template<typename TAccessorType> void simpleProperty( PxU32 /*key*/, TAccessorType& inProp ) { typedef typename TAccessorType::prop_type TPropertyType; TPropertyType value; if (readProperty(value)) { // If the rigid body is kinematic, we cannot set the LinearVelocity or AngularVelocity const bool kinematic = (mObj->getRigidBodyFlags() & PxRigidBodyFlag::eKINEMATIC); if(kinematic && (inProp.mProperty.mKey == PxPropertyInfoName::PxRigidDynamic_LinearVelocity || inProp.mProperty.mKey == PxPropertyInfoName::PxRigidDynamic_AngularVelocity)) return; inProp.set(mObj, value ); } } private: RepXVisitorReader<PxRigidDynamic>& operator=(const RepXVisitorReader<PxRigidDynamic>&); }; template<> struct RepXVisitorReader<PxShape> : public RepXVisitorReaderBase<PxShape> { RepXVisitorReader( TReaderNameStack& names, PxProfileArray<PxU32>& contexts, const PxRepXInstantiationArgs& args, XmlReader& reader, PxShape* obj , XmlMemoryAllocator& alloc, PxCollection& collection, bool& ret ) : RepXVisitorReaderBase<PxShape>( names, contexts, args, reader, obj, alloc, collection, ret ) { } RepXVisitorReader( const RepXVisitorReader<PxShape>& other ) : RepXVisitorReaderBase<PxShape>( other ) { } void handleShapeMaterials( const PxShapeMaterialsProperty& ) //these were handled during construction. { } void handleGeomProperty( const PxShapeGeomProperty& ) { } private: RepXVisitorReader<PxShape>& operator=(const RepXVisitorReader<PxShape>&); }; template<> struct RepXVisitorReader<PxArticulationLink> : public RepXVisitorReaderBase<PxArticulationLink> { RepXVisitorReader( TReaderNameStack& names, PxProfileArray<PxU32>& contexts, const PxRepXInstantiationArgs& args, XmlReader& reader, PxArticulationLink* obj , XmlMemoryAllocator& alloc, PxCollection& collection, bool& ret ) : RepXVisitorReaderBase<PxArticulationLink>( names, contexts, args, reader, obj, alloc, collection, ret ) { } RepXVisitorReader( const RepXVisitorReader<PxArticulationLink>& other ) : RepXVisitorReaderBase<PxArticulationLink>( other ) { } void handleIncomingJoint( const TIncomingJointPropType& prop ) { pushName( "Joint" ); if ( gotoTopName() ) { PxArticulationJointReducedCoordinate* theJoint = static_cast<PxArticulationJointReducedCoordinate*>((prop.get(mObj))); readComplexObj(*this, theJoint); //Add joint to PxCollection, since PxArticulation requires PxArticulationLink and joint. mCollection.add(*theJoint); } popName(); } private: RepXVisitorReader<PxArticulationLink>& operator=(const RepXVisitorReader<PxArticulationLink>&); }; template<typename ArticulationType> inline void readProperty( RepXVisitorReaderBase<ArticulationType>& inSerializer, ArticulationType* inObj, const PxArticulationLinkCollectionProp&) { PxProfileAllocatorWrapper theWrapper( inSerializer.mAllocator.getAllocator() ); PxCollection& collection( inSerializer.mCollection ); TArticulationLinkLinkMap linkRemapMap( theWrapper ); inSerializer.pushCurrentContext(); if( inSerializer.gotoTopName() ) { for ( bool links = inSerializer.gotoFirstChild(); links != false; links = inSerializer.gotoNextSibling() ) { //Need enough information to create the link... PxSerialObjectId theParentPtr = 0; const PxArticulationLink* theParentLink = NULL; if ( inSerializer.mReader.read( "Parent", theParentPtr ) ) { const TArticulationLinkLinkMap::Entry* theRemappedParent( linkRemapMap.find( theParentPtr ) ); //If we have a valid at write time, we had better have a valid parent at read time. PX_ASSERT( theRemappedParent ); theParentLink = theRemappedParent->second; } PxArticulationLink* newLink = inObj->createLink( const_cast<PxArticulationLink*>( theParentLink ), PxTransform(PxIdentity) ); PxSerialObjectId theIdPtr = 0; inSerializer.mReader.read( "Id", theIdPtr ); linkRemapMap.insert( theIdPtr, newLink ); readComplexObj( inSerializer, newLink ); //Add link to PxCollection, since PxArticulation requires PxArticulationLink and joint. collection.add( *newLink, theIdPtr ); } } inSerializer.popCurrentContext(); } template<> struct RepXVisitorReader<PxArticulationReducedCoordinate> : public RepXVisitorReaderBase<PxArticulationReducedCoordinate> { RepXVisitorReader(TReaderNameStack& names, PxProfileArray<PxU32>& contexts, const PxRepXInstantiationArgs& args, XmlReader& reader, PxArticulationReducedCoordinate* obj , XmlMemoryAllocator& alloc, PxCollection& collection, bool& ret) : RepXVisitorReaderBase<PxArticulationReducedCoordinate>(names, contexts, args, reader, obj, alloc, collection, ret) {} RepXVisitorReader(const RepXVisitorReader<PxArticulationReducedCoordinate>& other) : RepXVisitorReaderBase<PxArticulationReducedCoordinate>(other) {} void handleArticulationLinks(const PxArticulationLinkCollectionProp& inProp) { physx::Sn::readProperty(*this, mObj, inProp); } }; template<typename TObjType, typename TInfoType> inline bool readAllProperties( PxRepXInstantiationArgs args, TReaderNameStack& names, PxProfileArray<PxU32>& contexts, XmlReader& reader, TObjType* obj, XmlMemoryAllocator& alloc, PxCollection& collection, TInfoType& info ) { bool hadError = false; RepXVisitorReader<TObjType> theReader( names, contexts, args, reader, obj, alloc, collection, hadError); RepXPropertyFilter<RepXVisitorReader<TObjType> > theOp( theReader ); info.visitBaseProperties( theOp ); info.visitInstanceProperties( theOp ); return !hadError; } template<typename TObjType> inline bool readAllProperties( PxRepXInstantiationArgs args, XmlReader& reader, TObjType* obj, XmlMemoryAllocator& alloc, PxCollection& collection ) { PxProfileAllocatorWrapper wrapper( alloc.getAllocator() ); TReaderNameStack names( wrapper ); PxProfileArray<PxU32> contexts( wrapper ); PxClassInfoTraits<TObjType> info; return readAllProperties( args, names, contexts, reader, obj, alloc, collection, info.Info ); } template<typename TObjType, typename TReaderType, typename TInfoType> inline void readComplexObj( TReaderType& oldVisitor, TObjType* inObj, TInfoType& info) { if(!readAllProperties( oldVisitor.mArgs, oldVisitor.mNames, oldVisitor.mContexts, oldVisitor.mReader, inObj, oldVisitor.mAllocator, oldVisitor.mCollection, info )) oldVisitor.mHadError = true; } template<typename TObjType, typename TReaderType, typename TInfoType> inline void readComplexObj( TReaderType& oldVisitor, TObjType* inObj, const TInfoType& info) { if(!readAllProperties( oldVisitor.mArgs, oldVisitor.mNames, oldVisitor.mContexts, oldVisitor.mReader, inObj, oldVisitor.mAllocator, oldVisitor.mCollection, info )) oldVisitor.mHadError = true; } template<typename TObjType, typename TReaderType> inline void readComplexObj( TReaderType& oldVisitor, TObjType* inObj, const PxUnknownClassInfo& /*info*/) { const char* value = oldVisitor.mReader.getCurrentItemValue(); if ( value && *value ) { stringToType( value, *inObj ); return; } oldVisitor.mHadError = true; } template<typename TObjType, typename TReaderType> inline void readComplexObj( TReaderType& oldVisitor, TObjType* inObj) { PxClassInfoTraits<TObjType> info; if(!readAllProperties( oldVisitor.mArgs, oldVisitor.mNames, oldVisitor.mContexts, oldVisitor.mReader, inObj, oldVisitor.mAllocator, oldVisitor.mCollection, info.Info )) oldVisitor.mHadError = true; } } } #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Xml/SnXmlMemoryPoolStreams.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SN_XML_MEMORY_POOL_STREAMS_H #define SN_XML_MEMORY_POOL_STREAMS_H #include "foundation/PxTransform.h" #include "foundation/PxIO.h" #include "SnXmlMemoryPool.h" namespace physx { template<typename TDataType> struct XmlDefaultValue { bool force_compile_error; }; #define XML_DEFINE_DEFAULT_VALUE(type, defVal ) \ template<> \ struct XmlDefaultValue<type> \ { \ type getDefaultValue() { return type(defVal); } \ }; XML_DEFINE_DEFAULT_VALUE(PxU8, 0) XML_DEFINE_DEFAULT_VALUE(PxI8, 0) XML_DEFINE_DEFAULT_VALUE(PxU16, 0) XML_DEFINE_DEFAULT_VALUE(PxI16, 0) XML_DEFINE_DEFAULT_VALUE(PxU32, 0) XML_DEFINE_DEFAULT_VALUE(PxI32, 0) XML_DEFINE_DEFAULT_VALUE(PxU64, 0) XML_DEFINE_DEFAULT_VALUE(PxI64, 0) XML_DEFINE_DEFAULT_VALUE(PxF32, 0) XML_DEFINE_DEFAULT_VALUE(PxF64, 0) #undef XML_DEFINE_DEFAULT_VALUE template<> struct XmlDefaultValue<PxVec3> { PxVec3 getDefaultValue() { return PxVec3( 0,0,0 ); } }; template<> struct XmlDefaultValue<PxTransform> { PxTransform getDefaultValue() { return PxTransform(PxIdentity); } }; template<> struct XmlDefaultValue<PxQuat> { PxQuat getDefaultValue() { return PxQuat(PxIdentity); } }; /** * Mapping of PxOutputStream to a memory pool manager. * Allows write-then-read semantics of a set of * data. Can safely write up to 4GB of data; then you * will silently fail... */ template<typename TAllocatorType> struct MemoryBufferBase : public PxOutputStream, public PxInputStream { TAllocatorType* mManager; mutable PxU32 mWriteOffset; mutable PxU32 mReadOffset; PxU8* mBuffer; PxU32 mCapacity; MemoryBufferBase( TAllocatorType* inManager ) : mManager( inManager ) , mWriteOffset( 0 ) , mReadOffset( 0 ) , mBuffer( NULL ) , mCapacity( 0 ) { } virtual ~MemoryBufferBase() { mManager->deallocate( mBuffer ); } PxU8* releaseBuffer() { clear(); mCapacity = 0; PxU8* retval(mBuffer); mBuffer = NULL; return retval; } void clear() { mWriteOffset = mReadOffset = 0; } virtual PxU32 read(void* dest, PxU32 count) { bool fits = ( mReadOffset + count ) <= mWriteOffset; PX_ASSERT( fits ); if ( fits ) { PxMemCopy( dest, mBuffer + mReadOffset, count ); mReadOffset += count; return count; } return 0; } inline void checkCapacity( PxU32 inNewCapacity ) { if ( mCapacity < inNewCapacity ) { PxU32 newCapacity = 32; while( newCapacity < inNewCapacity ) newCapacity = newCapacity << 1; PxU8* newData( mManager->allocate( newCapacity ) ); if ( mWriteOffset ) PxMemCopy( newData, mBuffer, mWriteOffset ); mManager->deallocate( mBuffer ); mBuffer = newData; mCapacity = newCapacity; } } virtual PxU32 write(const void* src, PxU32 count) { checkCapacity( mWriteOffset + count ); PxMemCopy( mBuffer + mWriteOffset, src, count ); mWriteOffset += count; return count; } }; class MemoryBuffer : public MemoryBufferBase<CMemoryPoolManager > { public: MemoryBuffer( CMemoryPoolManager* inManager ) : MemoryBufferBase<CMemoryPoolManager >( inManager ) {} }; } #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Xml/SnRepX3_1Defaults.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. DEFINE_REPX_DEFAULT_PROPERTY("PxTolerancesScale.Length", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxTolerancesScale.Mass", "1000" ) DEFINE_REPX_DEFAULT_PROPERTY("PxTolerancesScale.Speed", "10" ) DEFINE_REPX_DEFAULT_PROPERTY("PxBoxGeometry.HalfExtents", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphereGeometry.Radius", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxConvexMeshGeometry.Scale.Scale", "1 1 1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxConvexMeshGeometry.Scale.Rotation", "0 0 0 1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxConvexMeshGeometry.ConvexMesh", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxTriangleMeshGeometry.Scale.Scale", "1 1 1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxTriangleMeshGeometry.Scale.Rotation", "0 0 0 1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxTriangleMeshGeometry.MeshFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxTriangleMeshGeometry.TriangleMesh", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxHeightFieldGeometry.HeightField", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxHeightFieldGeometry.HeightScale", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxHeightFieldGeometry.RowScale", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxHeightFieldGeometry.ColumnScale", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxHeightFieldGeometry.HeightFieldFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxMaterial.DynamicFriction", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxMaterial.StaticFriction", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxMaterial.Restitution", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxMaterial.DynamicFrictionV", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxMaterial.StaticFrictionV", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxMaterial.DirOfAnisotropy", "1 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxMaterial.Flags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxMaterial.FrictionCombineMode", "eAVERAGE" ) DEFINE_REPX_DEFAULT_PROPERTY("PxMaterial.RestitutionCombineMode", "eAVERAGE" ) DEFINE_REPX_DEFAULT_PROPERTY("PxShape.LocalPose", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxShape.SimulationFilterData", "0 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxShape.QueryFilterData", "0 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxShape.ContactOffset", "0.02" ) DEFINE_REPX_DEFAULT_PROPERTY("PxShape.RestOffset", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxShape.Flags", "eSIMULATION_SHAPE|eSCENE_QUERY_SHAPE|eVISUALIZATION" ) DEFINE_REPX_DEFAULT_PROPERTY("PxShape.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidStatic.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidStatic.ActorFlags", "eVISUALIZATION" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidStatic.DominanceGroup", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidStatic.OwnerClient", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidStatic.ClientBehaviorBits", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidStatic.GlobalPose", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.ActorFlags", "eVISUALIZATION" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.DominanceGroup", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.OwnerClient", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.ClientBehaviorBits", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.GlobalPose", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.CMassLocalPose", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.Mass", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.MassSpaceInertiaTensor", "1 1 1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.LinearVelocity", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.AngularVelocity", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.LinearDamping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.AngularDamping", "0.05" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.MaxAngularVelocity", "7" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.SleepThreshold", "0.005" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.SolverIterationCounts.minPositionIters", "4" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.SolverIterationCounts.minVelocityIters", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.ContactReportThreshold", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.RigidDynamicFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.ParentPose", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.ChildPose", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.TargetOrientation", "0 0 0 1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.TargetVelocity", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.InternalCompliance", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.ExternalCompliance", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.SwingLimit.yLimit", "0.78539816339" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.SwingLimit.zLimit", "0.78539816339" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.TangentialSpring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.TangentialDamping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.SwingLimitContactDistance", "0.05" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.SwingLimitEnabled", "false" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.TwistLimit.lower", "-0.78539816339" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.TwistLimit.upper", "0.78539816339" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.TwistLimitEnabled", "false" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.TwistLimitContactDistance", "0.05" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.ActorFlags", "eVISUALIZATION" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.DominanceGroup", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.OwnerClient", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.ClientBehaviorBits", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.GlobalPose", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.CMassLocalPose", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.Mass", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.MassSpaceInertiaTensor", "1 1 1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.LinearVelocity", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.AngularVelocity", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulation.MaxProjectionIterations", "4" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulation.SeparationTolerance", "0.1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulation.InternalDriveIterations", "4" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulation.ExternalDriveIterations", "4" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulation.SolverIterationCounts.minPositionIters", "4" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulation.SolverIterationCounts.minVelocityIters", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulation.SleepThreshold", "0.005" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulation.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Actors.actor0", "8887040" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Actors.actor1", "8887456" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.LocalPose.eACTOR0", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.LocalPose.eACTOR1", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.BreakForce.force", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.BreakForce.torque", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.ConstraintFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Motion.eX", "eLOCKED" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Motion.eY", "eLOCKED" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Motion.eZ", "eLOCKED" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Motion.eTWIST", "eLOCKED" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Motion.eSWING1", "eLOCKED" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Motion.eSWING2", "eLOCKED" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.LinearLimit.Restitution", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.LinearLimit.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.LinearLimit.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.LinearLimit.ContactDistance", "0.05" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.LinearLimit.Value", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.TwistLimit.Restitution", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.TwistLimit.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.TwistLimit.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.TwistLimit.ContactDistance", "0.05" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.TwistLimit.Upper", "1.5708" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.TwistLimit.Lower", "-1.5708" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.SwingLimit.Restitution", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.SwingLimit.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.SwingLimit.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.SwingLimit.ContactDistance", "0.05" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.SwingLimit.YAngle", "1.5708" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.SwingLimit.ZAngle", "1.5708" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eX.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eX.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eX.ForceLimit", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eX.Flags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eY.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eY.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eY.ForceLimit", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eY.Flags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eZ.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eZ.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eZ.ForceLimit", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eZ.Flags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eSWING.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eSWING.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eSWING.ForceLimit", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eSWING.Flags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eTWIST.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eTWIST.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eTWIST.ForceLimit", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eTWIST.Flags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eSLERP.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eSLERP.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eSLERP.ForceLimit", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eSLERP.Flags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.DrivePosition", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.DriveVelocity.linear", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.DriveVelocity.angular", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.ProjectionLinearTolerance", "1e+010" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.ProjectionAngularTolerance", "3.14159" ) DEFINE_REPX_DEFAULT_PROPERTY("PxFixedJoint.Actors.actor0", "8887040" ) DEFINE_REPX_DEFAULT_PROPERTY("PxFixedJoint.Actors.actor1", "8887456" ) DEFINE_REPX_DEFAULT_PROPERTY("PxFixedJoint.LocalPose.eACTOR0", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxFixedJoint.LocalPose.eACTOR1", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxFixedJoint.BreakForce.force", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxFixedJoint.BreakForce.torque", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxFixedJoint.ConstraintFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxFixedJoint.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxFixedJoint.ProjectionLinearTolerance", "1e+010" ) DEFINE_REPX_DEFAULT_PROPERTY("PxFixedJoint.ProjectionAngularTolerance", "3.14159" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.Actors.actor0", "8887040" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.Actors.actor1", "8887456" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.LocalPose.eACTOR0", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.LocalPose.eACTOR1", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.BreakForce.force", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.BreakForce.torque", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.ConstraintFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.MinDistance", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.MaxDistance", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.Tolerance", "0.025" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.DistanceJointFlags", "eMAX_DISTANCE_ENABLED" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.Actors.actor0", "8887040" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.Actors.actor1", "8887456" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.LocalPose.eACTOR0", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.LocalPose.eACTOR1", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.BreakForce.force", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.BreakForce.torque", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.ConstraintFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.Limit.Restitution", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.Limit.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.Limit.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.Limit.ContactDistance", "0.05" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.Limit.Upper", "1.5708" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.Limit.Lower", "-1.5708" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.DriveVelocity", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.DriveForceLimit", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.DriveGearRatio", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.RevoluteJointFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.ProjectionLinearTolerance", "1e+010" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.ProjectionAngularTolerance", "3.14159" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.Actors.actor0", "8887040" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.Actors.actor1", "8887456" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.LocalPose.eACTOR0", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.LocalPose.eACTOR1", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.BreakForce.force", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.BreakForce.torque", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.ConstraintFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.Limit.Restitution", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.Limit.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.Limit.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.Limit.ContactDistance", "0.01" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.Limit.Upper", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.Limit.Lower", "-3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.PrismaticJointFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.ProjectionLinearTolerance", "1e+010" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.ProjectionAngularTolerance", "3.14159" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.Actors.actor0", "8887040" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.Actors.actor1", "8887456" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.LocalPose.eACTOR0", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.LocalPose.eACTOR1", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.BreakForce.force", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.BreakForce.torque", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.ConstraintFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.LimitCone.Restitution", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.LimitCone.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.LimitCone.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.LimitCone.ContactDistance", "0.05" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.LimitCone.YAngle", "1.5708" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.LimitCone.ZAngle", "1.5708" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.SphericalJointFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.ProjectionLinearTolerance", "1e+010" ) DEFINE_REPX_DEFAULT_PROPERTY("PxCloth.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxCloth.ActorFlags", "eVISUALIZATION" ) DEFINE_REPX_DEFAULT_PROPERTY("PxCloth.DominanceGroup", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxCloth.OwnerClient", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxCloth.ClientBehaviorBits", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxCloth.MotionConstraintScaleBias.scale", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxCloth.MotionConstraintScaleBias.bias", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxCloth.GlobalPose", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxCloth.ExternalAcceleration", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxCloth.DampingCoefficient", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxCloth.SolverFrequency", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxCloth.SleepLinearVelocity", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("THEEND", "false" )
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Xml/SnXmlVisitorWriter.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SN_XML_VISITOR_WRITER_H #define SN_XML_VISITOR_WRITER_H #include "foundation/PxInlineArray.h" #include "RepXMetaDataPropertyVisitor.h" #include "SnPxStreamOperators.h" #include "SnXmlMemoryPoolStreams.h" #include "SnXmlWriter.h" #include "SnXmlImpl.h" #include "foundation/PxStrideIterator.h" namespace physx { namespace Sn { template<typename TDataType> inline void writeReference( XmlWriter& writer, PxCollection& inCollection, const char* inPropName, const TDataType* inDatatype ) { const PxBase* s = static_cast<const PxBase*>( inDatatype ) ; if( inDatatype && !inCollection.contains( *const_cast<PxBase*>(s) )) { PxGetFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL, "PxSerialization::serializeCollectionToXml: Reference \"%s\" could not be resolved.", inPropName); } PxSerialObjectId theId = 0; if( s ) { theId = inCollection.getId( *s ); if( theId == 0 ) theId = static_cast<uint64_t>(size_t(inDatatype)); } writer.write( inPropName, PxCreateRepXObject( inDatatype, theId ) ); } inline void writeProperty( XmlWriter& inWriter, MemoryBuffer& inBuffer, const char* inProp ) { PxU8 data = 0; inBuffer.write( &data, sizeof(PxU8) ); inWriter.write( inProp, reinterpret_cast<const char*>( inBuffer.mBuffer ) ); inBuffer.clear(); } template<typename TDataType> inline void writeProperty( XmlWriter& inWriter, PxCollection&, MemoryBuffer& inBuffer, const char* inPropName, TDataType inValue ) { inBuffer << inValue; writeProperty( inWriter, inBuffer, inPropName ); } inline void writeProperty( XmlWriter& writer, PxCollection& inCollection, MemoryBuffer& /*inBuffer*/, const char* inPropName, const PxConvexMesh* inDatatype ) { writeReference( writer, inCollection, inPropName, inDatatype ); } inline void writeProperty( XmlWriter& writer, PxCollection& inCollection, MemoryBuffer& /*inBuffer*/, const char* inPropName, PxConvexMesh* inDatatype ) { writeReference( writer, inCollection, inPropName, inDatatype ); } inline void writeProperty( XmlWriter& writer, PxCollection& inCollection, MemoryBuffer& /*inBuffer*/, const char* inPropName, const PxTriangleMesh* inDatatype ) { if (inDatatype->getConcreteType() == PxConcreteType::eTRIANGLE_MESH_BVH33) { const PxBVH33TriangleMesh* dataType = inDatatype->is<PxBVH33TriangleMesh>(); writeReference(writer, inCollection, inPropName, dataType); } else if (inDatatype->getConcreteType() == PxConcreteType::eTRIANGLE_MESH_BVH34) { const PxBVH34TriangleMesh* dataType = inDatatype->is<PxBVH34TriangleMesh>(); writeReference(writer, inCollection, inPropName, dataType); } else { PX_ASSERT(0); } } inline void writeProperty( XmlWriter& writer, PxCollection& inCollection, MemoryBuffer& /*inBuffer*/, const char* inPropName, PxTriangleMesh* inDatatype ) { if (inDatatype->getConcreteType() == PxConcreteType::eTRIANGLE_MESH_BVH33) { PxBVH33TriangleMesh* dataType = inDatatype->is<PxBVH33TriangleMesh>(); writeReference(writer, inCollection, inPropName, dataType); } else if (inDatatype->getConcreteType() == PxConcreteType::eTRIANGLE_MESH_BVH34) { PxBVH34TriangleMesh* dataType = inDatatype->is<PxBVH34TriangleMesh>(); writeReference(writer, inCollection, inPropName, dataType); } else { PX_ASSERT(0); } } inline void writeProperty( XmlWriter& writer, PxCollection& inCollection, MemoryBuffer& /*inBuffer*/, const char* inPropName, const PxBVH33TriangleMesh* inDatatype ) { writeReference( writer, inCollection, inPropName, inDatatype ); } inline void writeProperty( XmlWriter& writer, PxCollection& inCollection, MemoryBuffer& /*inBuffer*/, const char* inPropName, PxBVH33TriangleMesh* inDatatype ) { writeReference( writer, inCollection, inPropName, inDatatype ); } inline void writeProperty( XmlWriter& writer, PxCollection& inCollection, MemoryBuffer& /*inBuffer*/, const char* inPropName, const PxBVH34TriangleMesh* inDatatype ) { writeReference( writer, inCollection, inPropName, inDatatype ); } inline void writeProperty( XmlWriter& writer, PxCollection& inCollection, MemoryBuffer& /*inBuffer*/, const char* inPropName, PxBVH34TriangleMesh* inDatatype ) { writeReference( writer, inCollection, inPropName, inDatatype ); } inline void writeProperty( XmlWriter& writer, PxCollection& inCollection, MemoryBuffer& /*inBuffer*/, const char* inPropName, const PxHeightField* inDatatype ) { writeReference( writer, inCollection, inPropName, inDatatype ); } inline void writeProperty( XmlWriter& writer, PxCollection& inCollection, MemoryBuffer& /*inBuffer*/, const char* inPropName, PxHeightField* inDatatype ) { writeReference( writer, inCollection, inPropName, inDatatype ); } inline void writeProperty( XmlWriter& writer, PxCollection& inCollection, MemoryBuffer& /*inBuffer*/, const char* inPropName, const PxRigidActor* inDatatype ) { writeReference( writer, inCollection, inPropName, inDatatype ); } inline void writeProperty(XmlWriter& writer, PxCollection& inCollection, MemoryBuffer& /*inBuffer*/, const char* inPropName, PxArticulationReducedCoordinate* inDatatype) { writeReference(writer, inCollection, inPropName, inDatatype); } inline void writeProperty( XmlWriter& writer, PxCollection& inCollection, MemoryBuffer& /*inBuffer*/, const char* inPropName, PxRigidActor* inDatatype ) { writeReference( writer, inCollection, inPropName, inDatatype ); } inline void writeFlagsProperty( XmlWriter& inWriter, MemoryBuffer& tempBuf, const char* inPropName, PxU32 inFlags, const PxU32ToName* inTable ) { if ( inTable ) { PxU32 flagValue( inFlags ); if ( flagValue ) { for ( PxU32 idx =0; inTable[idx].mName != NULL; ++idx ) { if ( (inTable[idx].mValue & flagValue) == inTable[idx].mValue ) { if ( tempBuf.mWriteOffset != 0 ) tempBuf << "|"; tempBuf << inTable[idx].mName; } } writeProperty( inWriter, tempBuf, inPropName ); } else { if ( tempBuf.mWriteOffset != 0 ) tempBuf << "|"; tempBuf << "0"; writeProperty( inWriter, tempBuf, inPropName ); } } } inline void writeFlagsBuffer( MemoryBuffer& tempBuf, PxU32 flagValue, const PxU32ToName* inTable ) { PX_ASSERT(inTable); bool added = false; if ( flagValue ) { for ( PxU32 item =0; inTable[item].mName != NULL; ++item ) { if ( (inTable[item].mValue & flagValue) != 0 ) { if ( added ) tempBuf << "|"; tempBuf << inTable[item].mName; added = true; } } } } inline void writePxVec3( PxOutputStream& inStream, const PxVec3& inVec ) { inStream << inVec; } template<typename TDataType> inline const TDataType& PtrAccess( const TDataType* inPtr, PxU32 inIndex ) { return inPtr[inIndex]; } template<typename TDataType> inline void BasicDatatypeWrite( PxOutputStream& inStream, const TDataType& item ) { inStream << item; } template<typename TObjType, typename TAccessOperator, typename TWriteOperator> inline void writeBuffer( XmlWriter& inWriter, MemoryBuffer& inTempBuffer , PxU32 inObjPerLine, const TObjType* inObjType, TAccessOperator inAccessOperator , PxU32 inBufSize, const char* inPropName, TWriteOperator inOperator ) { if ( inBufSize && inObjType ) { for ( PxU32 idx = 0; idx < inBufSize; ++idx ) { if ( idx && ( idx % inObjPerLine == 0 ) ) inTempBuffer << "\n\t\t\t"; else inTempBuffer << " "; inOperator( inTempBuffer, inAccessOperator( inObjType, idx ) ); } writeProperty( inWriter, inTempBuffer, inPropName ); } } template<typename TDataType, typename TAccessOperator, typename TWriteOperator> inline void writeStrideBuffer( XmlWriter& inWriter, MemoryBuffer& inTempBuffer , PxU32 inObjPerLine, PxStrideIterator<const TDataType>& inData, TAccessOperator inAccessOperator , PxU32 inBufSize, const char* inPropName, PxU32 /*inStride*/, TWriteOperator inOperator ) { #if PX_SWITCH const auto *dat = &inData[0]; if (inBufSize && dat != NULL) #else if ( inBufSize && &inData[0]) #endif { for ( PxU32 idx = 0; idx < inBufSize; ++idx ) { if ( idx && ( idx % inObjPerLine == 0 ) ) inTempBuffer << "\n\t\t\t"; else inTempBuffer << " "; inOperator( inTempBuffer, inAccessOperator( &inData[idx], 0 ) ); } writeProperty( inWriter, inTempBuffer, inPropName ); } } template<typename TDataType, typename TAccessOperator> inline void writeStrideFlags( XmlWriter& inWriter, MemoryBuffer& inTempBuffer , PxU32 inObjPerLine, PxStrideIterator<const TDataType>& inData, TAccessOperator /*inAccessOperator*/ , PxU32 inBufSize, const char* inPropName, const PxU32ToName* inTable) { #if PX_SWITCH const auto *dat = &inData[0]; if (inBufSize && dat != NULL) #else if ( inBufSize && &inData[0]) #endif { for ( PxU32 idx = 0; idx < inBufSize; ++idx ) { writeFlagsBuffer(inTempBuffer, inData[idx], inTable); if ( idx && ( idx % inObjPerLine == 0 ) ) inTempBuffer << "\n\t\t\t"; else inTempBuffer << " "; } writeProperty( inWriter, inTempBuffer, inPropName ); } } template<typename TDataType, typename TWriteOperator> inline void writeBuffer( XmlWriter& inWriter, MemoryBuffer& inTempBuffer , PxU32 inObjPerLine, const TDataType* inBuffer , PxU32 inBufSize, const char* inPropName, TWriteOperator inOperator ) { writeBuffer( inWriter, inTempBuffer, inObjPerLine, inBuffer, PtrAccess<TDataType>, inBufSize, inPropName, inOperator ); } template<typename TEnumType> inline void writeEnumProperty( XmlWriter& inWriter, const char* inPropName, TEnumType inEnumValue, const PxU32ToName* inConversions ) { PxU32 theValue = static_cast<PxU32>( inEnumValue ); for ( const PxU32ToName* conv = inConversions; conv->mName != NULL; ++conv ) if ( conv->mValue == theValue ) inWriter.write( inPropName, conv->mName ); } template<typename TObjType, typename TWriterType, typename TInfoType> inline void handleComplexObj( TWriterType& oldVisitor, const TObjType* inObj, const TInfoType& info); template<typename TCollectionType, typename TVisitor, typename TPropType, typename TInfoType > void handleComplexCollection( TVisitor& visitor, const TPropType& inProp, const char* childName, TInfoType& inInfo ) { PxU32 count( inProp.size( visitor.mObj ) ); if ( count ) { PxInlineArray<TCollectionType*,5> theData; theData.resize( count ); inProp.get( visitor.mObj, theData.begin(), count ); for( PxU32 idx =0; idx < count; ++idx ) { visitor.pushName( childName ); handleComplexObj( visitor, theData[idx], inInfo ); visitor.popName(); } } } template<typename TCollectionType, typename TVisitor, typename TPropType, typename TInfoType > void handleBufferCollection( TVisitor& visitor, const TPropType& inProp, const char* childName, TInfoType& inInfo ) { PxU32 count( inProp.size( visitor.mObj ) ); if ( count ) { PxInlineArray<TCollectionType*,5> theData; theData.resize( count ); inProp.get( visitor.mObj, theData.begin()); for( PxU32 idx =0; idx < count; ++idx ) { visitor.pushName( childName ); handleComplexObj( visitor, theData[idx], inInfo ); visitor.popName(); } } } template<typename TVisitor> void handleShapes( TVisitor& visitor, const PxRigidActorShapeCollection& inProp ) { PxShapeGeneratedInfo theInfo; PxU32 count( inProp.size( visitor.mObj ) ); if ( count ) { PxInlineArray<PxShape*,5> theData; theData.resize( count ); inProp.get( visitor.mObj, theData.begin(), count ); for( PxU32 idx = 0; idx < count; ++idx ) { const PxShape* shape = theData[idx]; visitor.pushName( "PxShape" ); if( !shape->isExclusive() ) { writeReference( visitor.mWriter, visitor.mCollection, "PxShapeRef", shape ); } else { handleComplexObj( visitor, shape, theInfo ); } visitor.popName(); } } } template<typename TVisitor> void handleShapeMaterials( TVisitor& visitor, const PxShapeMaterialsProperty& inProp ) { PxU32 count( inProp.size( visitor.mObj ) ); if ( count ) { PxInlineArray<PxMaterial*,5> theData; theData.resize( count ); inProp.get( visitor.mObj, theData.begin(), count ); visitor.pushName( "PxMaterialRef" ); for( PxU32 idx =0; idx < count; ++idx ) writeReference( visitor.mWriter, visitor.mCollection, "PxMaterialRef", theData[idx] ); visitor.popName(); } } template<typename TObjType> struct RepXVisitorWriterBase { TNameStack& mNameStack; XmlWriter& mWriter; const TObjType* mObj; MemoryBuffer& mTempBuffer; PxCollection& mCollection; RepXVisitorWriterBase( TNameStack& ns, XmlWriter& writer, const TObjType* obj, MemoryBuffer& buf, PxCollection& collection ) : mNameStack( ns ) , mWriter( writer ) , mObj( obj ) , mTempBuffer( buf ) , mCollection( collection ) { } RepXVisitorWriterBase( const RepXVisitorWriterBase<TObjType>& other ) : mNameStack( other.mNameStack ) , mWriter( other.mWriter ) , mObj( other.mObj ) , mTempBuffer( other.mTempBuffer ) , mCollection( other.mCollection ) { } RepXVisitorWriterBase& operator=( const RepXVisitorWriterBase& ){ PX_ASSERT( false ); return *this; } void gotoTopName() { if ( mNameStack.size() && mNameStack.back().mOpen == false ) { mWriter.addAndGotoChild( mNameStack.back().mName ); mNameStack.back().mOpen = true; } } void pushName( const char* inName ) { gotoTopName(); mNameStack.pushBack( inName ); } void pushBracketedName( const char* inName ) { pushName( inName ); } void popName() { if ( mNameStack.size() ) { if ( mNameStack.back().mOpen ) mWriter.leaveChild(); mNameStack.popBack(); } } const char* topName() const { if ( mNameStack.size() ) return mNameStack.back().mName; PX_ASSERT( false ); return "bad__repx__name"; } template<typename TAccessorType> void simpleProperty( PxU32 /*key*/, TAccessorType& inProp ) { typedef typename TAccessorType::prop_type TPropertyType; TPropertyType propVal = inProp.get( mObj ); writeProperty( mWriter, mCollection, mTempBuffer, topName(), propVal ); } template<typename TAccessorType> void enumProperty( PxU32 /*key*/, TAccessorType& inProp, const PxU32ToName* inConversions ) { writeEnumProperty( mWriter, topName(), inProp.get( mObj ), inConversions ); } template<typename TAccessorType> void flagsProperty( PxU32 /*key*/, const TAccessorType& inProp, const PxU32ToName* inConversions ) { writeFlagsProperty( mWriter, mTempBuffer, topName(), inProp.get( mObj ), inConversions ); } template<typename TAccessorType, typename TInfoType> void complexProperty( PxU32* /*key*/, const TAccessorType& inProp, TInfoType& inInfo ) { typedef typename TAccessorType::prop_type TPropertyType; TPropertyType propVal = inProp.get( mObj ); handleComplexObj( *this, &propVal, inInfo ); } template<typename TAccessorType, typename TInfoType> void bufferCollectionProperty( PxU32* /*key*/, const TAccessorType& inProp, TInfoType& inInfo ) { typedef typename TAccessorType::prop_type TPropertyType; PxU32 count( inProp.size( mObj ) ); PxInlineArray<TPropertyType,5> theData; theData.resize( count ); PxClassInfoTraits<TInfoType> theTraits; PX_UNUSED(theTraits); PxU32 numItems = inProp.get( mObj, theData.begin(), count ); PX_ASSERT( numItems == count ); for( PxU32 idx =0; idx < numItems; ++idx ) { pushName( inProp.name() ); handleComplexObj( *this, &theData[idx], inInfo ); popName(); } } template<typename TAccessorType, typename TInfoType> void extendedIndexedProperty( PxU32* /*key*/, const TAccessorType& inProp, TInfoType& /*inInfo */) { typedef typename TAccessorType::prop_type TPropertyType; PxU32 count( inProp.size( mObj ) ); PxInlineArray<TPropertyType,5> theData; theData.resize( count ); for(PxU32 i = 0; i < count; ++i) { char buffer[32] = { 0 }; sprintf( buffer, "id_%u", i ); pushName( buffer ); TPropertyType propVal = inProp.get( mObj, i ); TInfoType& infoType = PxClassInfoTraits<TPropertyType>().Info; handleComplexObj(*this, &propVal, infoType); popName(); } } template<typename TAccessorType, typename TInfoType> void PxFixedSizeLookupTableProperty( PxU32* /*key*/, TAccessorType& inProp, TInfoType& /*inInfo */) { typedef typename TAccessorType::prop_type TPropertyType; PxU32 count( inProp.size( mObj ) ); PxU32 index = 0; for(PxU32 i = 0; i < count; ++i) { char buffer[32] = { 0 }; sprintf( buffer, "id_%u", index++ ); pushName( buffer ); TPropertyType propVal = inProp.getX( mObj , i); writeProperty( mWriter, mCollection, mTempBuffer, topName(), propVal ); popName(); sprintf( buffer, "id_%u", index++ ); pushName( buffer ); propVal = inProp.getY( mObj , i); writeProperty( mWriter, mCollection, mTempBuffer, topName(), propVal ); popName(); } } void handleShapes( const PxRigidActorShapeCollection& inProp ) { physx::Sn::handleShapes( *this, inProp ); } void handleShapeMaterials( const PxShapeMaterialsProperty& inProp ) { physx::Sn::handleShapeMaterials( *this, inProp ); } void handleRigidActorGlobalPose(const PxRigidActorGlobalPosePropertyInfo& inProp) { PxRepXPropertyAccessor<PxPropertyInfoName::PxRigidActor_GlobalPose, PxRigidActor, const PxTransform &, PxTransform> theAccessor(inProp); simpleProperty(PxPropertyInfoName::PxRigidActor_GlobalPose, theAccessor); } }; template<typename TObjType> struct RepXVisitorWriter : RepXVisitorWriterBase<TObjType> { RepXVisitorWriter( TNameStack& ns, XmlWriter& writer, const TObjType* obj, MemoryBuffer& buf, PxCollection& collection ) : RepXVisitorWriterBase<TObjType>( ns, writer, obj, buf, collection ) { } RepXVisitorWriter( const RepXVisitorWriter<TObjType>& other ) : RepXVisitorWriterBase<TObjType>( other ) { } }; template<> struct RepXVisitorWriter<PxArticulationLink> : RepXVisitorWriterBase<PxArticulationLink> { RepXVisitorWriter( TNameStack& ns, XmlWriter& writer, const PxArticulationLink* obj, MemoryBuffer& buf, PxCollection& collection ) : RepXVisitorWriterBase<PxArticulationLink>( ns, writer, obj, buf, collection ) { } RepXVisitorWriter( const RepXVisitorWriter<PxArticulationLink>& other ) : RepXVisitorWriterBase<PxArticulationLink>( other ) { } void handleIncomingJoint( const TIncomingJointPropType& prop ) { const PxArticulationJointReducedCoordinate* joint( prop.get( mObj ) ); if (joint) { pushName( "Joint" ); handleComplexObj( *this, joint, PxArticulationJointReducedCoordinateGeneratedInfo()); popName(); } } }; typedef PxProfileHashMap< const PxSerialObjectId, const PxArticulationLink* > TArticulationLinkLinkMap; static void recurseAddLinkAndChildren( const PxArticulationLink* inLink, PxInlineArray<const PxArticulationLink*, 64>& ioLinks ) { ioLinks.pushBack( inLink ); PxInlineArray<PxArticulationLink*, 8> theChildren; PxU32 childCount( inLink->getNbChildren() ); theChildren.resize( childCount ); inLink->getChildren( theChildren.begin(), childCount ); for ( PxU32 idx = 0; idx < childCount; ++idx ) recurseAddLinkAndChildren( theChildren[idx], ioLinks ); } template<> struct RepXVisitorWriter<PxArticulationReducedCoordinate> : RepXVisitorWriterBase<PxArticulationReducedCoordinate> { TArticulationLinkLinkMap& mArticulationLinkParents; RepXVisitorWriter(TNameStack& ns, XmlWriter& writer, const PxArticulationReducedCoordinate* inArticulation, MemoryBuffer& buf, PxCollection& collection, TArticulationLinkLinkMap* artMap = NULL) : RepXVisitorWriterBase<PxArticulationReducedCoordinate>(ns, writer, inArticulation, buf, collection) , mArticulationLinkParents(*artMap) { PxInlineArray<PxArticulationLink*, 64, PxProfileWrapperReflectionAllocator<PxArticulationLink*> > linkList(PxProfileWrapperReflectionAllocator<PxArticulationLink*>(buf.mManager->getWrapper())); PxU32 numLinks = inArticulation->getNbLinks(); linkList.resize(numLinks); inArticulation->getLinks(linkList.begin(), numLinks); for (PxU32 idx = 0; idx < numLinks; ++idx) { const PxArticulationLink* theLink(linkList[idx]); PxInlineArray<PxArticulationLink*, 64> theChildList; PxU32 numChildren = theLink->getNbChildren(); theChildList.resize(numChildren); theLink->getChildren(theChildList.begin(), numChildren); for (PxU32 childIdx = 0; childIdx < numChildren; ++childIdx) mArticulationLinkParents.insert(static_cast<uint64_t>(size_t(theChildList[childIdx])), theLink); } } RepXVisitorWriter(const RepXVisitorWriter<PxArticulationReducedCoordinate>& other) : RepXVisitorWriterBase<PxArticulationReducedCoordinate>(other) , mArticulationLinkParents(other.mArticulationLinkParents) { } template<typename TAccessorType, typename TInfoType> void complexProperty(PxU32* /*key*/, const TAccessorType& inProp, TInfoType& inInfo) { typedef typename TAccessorType::prop_type TPropertyType; TPropertyType propVal = inProp.get(mObj); handleComplexObj(*this, &propVal, inInfo); } void writeArticulationLink(const PxArticulationLink* inLink) { pushName("PxArticulationLink"); gotoTopName(); const TArticulationLinkLinkMap::Entry* theParentPtr = mArticulationLinkParents.find(static_cast<uint64_t>(size_t(inLink))); if (theParentPtr != NULL) writeProperty(mWriter, mCollection, mTempBuffer, "Parent", theParentPtr->second); writeProperty(mWriter, mCollection, mTempBuffer, "Id", inLink); PxArticulationLinkGeneratedInfo info; handleComplexObj(*this, inLink, info); popName(); } void handleArticulationLinks(const PxArticulationLinkCollectionProp& inProp) { //topologically sort the links as per my discussion with Dilip because //links aren't guaranteed to have the parents before the children in the //overall link list and it is unlikely to be done by beta 1. PxU32 count(inProp.size(mObj)); if (count) { PxInlineArray<PxArticulationLink*, 64> theLinks; theLinks.resize(count); inProp.get(mObj, theLinks.begin(), count); PxInlineArray<const PxArticulationLink*, 64> theSortedLinks; for (PxU32 idx = 0; idx < count; ++idx) { const PxArticulationLink* theLink(theLinks[idx]); if (mArticulationLinkParents.find(static_cast<uint64_t>(size_t(theLink))) == NULL) recurseAddLinkAndChildren(theLink, theSortedLinks); } PX_ASSERT(theSortedLinks.size() == count); for (PxU32 idx = 0; idx < count; ++idx) writeArticulationLink(theSortedLinks[idx]); popName(); } } private: RepXVisitorWriter<PxArticulationReducedCoordinate>& operator=(const RepXVisitorWriter<PxArticulationReducedCoordinate>&); }; template<> struct RepXVisitorWriter<PxShape> : RepXVisitorWriterBase<PxShape> { RepXVisitorWriter( TNameStack& ns, XmlWriter& writer, const PxShape* obj, MemoryBuffer& buf, PxCollection& collection ) : RepXVisitorWriterBase<PxShape>( ns, writer, obj, buf, collection ) { } RepXVisitorWriter( const RepXVisitorWriter<PxShape>& other ) : RepXVisitorWriterBase<PxShape>( other ) { } template<typename GeometryType> inline void writeGeomProperty( const PxShapeGeomProperty& inProp, const char* inTypeName ) { pushName( "Geometry" ); pushName( inTypeName ); GeometryType theType; inProp.getGeometry( mObj, theType ); PxClassInfoTraits<GeometryType> theTraits; PxU32 count = theTraits.Info.totalPropertyCount(); if(count) { handleComplexObj( *this, &theType, theTraits.Info); } else { writeProperty(mWriter, mTempBuffer, inTypeName); } popName(); popName(); } void handleGeomProperty( const PxShapeGeomProperty& inProp ) { switch( mObj->getGeometry().getType() ) { case PxGeometryType::eSPHERE: writeGeomProperty<PxSphereGeometry>( inProp, "PxSphereGeometry" ); break; case PxGeometryType::ePLANE: writeGeomProperty<PxPlaneGeometry>( inProp, "PxPlaneGeometry" ); break; case PxGeometryType::eCAPSULE: writeGeomProperty<PxCapsuleGeometry>( inProp, "PxCapsuleGeometry" ); break; case PxGeometryType::eBOX: writeGeomProperty<PxBoxGeometry>( inProp, "PxBoxGeometry" ); break; case PxGeometryType::eCONVEXMESH: writeGeomProperty<PxConvexMeshGeometry>( inProp, "PxConvexMeshGeometry" ); break; case PxGeometryType::eTRIANGLEMESH: writeGeomProperty<PxTriangleMeshGeometry>( inProp, "PxTriangleMeshGeometry" ); break; case PxGeometryType::eHEIGHTFIELD: writeGeomProperty<PxHeightFieldGeometry>( inProp, "PxHeightFieldGeometry" ); break; case PxGeometryType::eTETRAHEDRONMESH: writeGeomProperty<PxTetrahedronMeshGeometry>( inProp, "PxTetrahedronMeshGeometry" ); break; default: PX_ASSERT( false ); } } }; template<typename TObjType> inline void writeAllProperties( TNameStack& inNameStack, const TObjType* inObj, XmlWriter& writer, MemoryBuffer& buffer, PxCollection& collection ) { RepXVisitorWriter<TObjType> newVisitor( inNameStack, writer, inObj, buffer, collection ); RepXPropertyFilter<RepXVisitorWriter<TObjType> > theOp( newVisitor ); PxClassInfoTraits<TObjType> info; info.Info.visitBaseProperties( theOp ); info.Info.visitInstanceProperties( theOp ); } template<typename TObjType> inline void writeAllProperties( TNameStack& inNameStack, TObjType* inObj, XmlWriter& writer, MemoryBuffer& buffer, PxCollection& collection ) { RepXVisitorWriter<TObjType> newVisitor( inNameStack, writer, inObj, buffer, collection ); RepXPropertyFilter<RepXVisitorWriter<TObjType> > theOp( newVisitor ); PxClassInfoTraits<TObjType> info; info.Info.visitBaseProperties( theOp ); info.Info.visitInstanceProperties( theOp ); } template<typename TObjType> inline void writeAllProperties( const TObjType* inObj, XmlWriter& writer, MemoryBuffer& buffer, PxCollection& collection ) { TNameStack theNames( buffer.mManager->getWrapper() ); writeAllProperties( theNames, inObj, writer, buffer, collection ); } template<typename TObjType, typename TWriterType, typename TInfoType> inline void handleComplexObj( TWriterType& oldVisitor, const TObjType* inObj, const TInfoType& /*info*/) { writeAllProperties( oldVisitor.mNameStack, inObj, oldVisitor.mWriter, oldVisitor.mTempBuffer, oldVisitor.mCollection ); } template<typename TObjType, typename TWriterType> inline void handleComplexObj( TWriterType& oldVisitor, const TObjType* inObj, const PxUnknownClassInfo& /*info*/) { writeProperty( oldVisitor.mWriter, oldVisitor.mCollection, oldVisitor.mTempBuffer, oldVisitor.topName(), *inObj ); } } } #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Xml/SnJointRepXSerializer.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SN_JOINT_REPX_SERIALIZER_H #define SN_JOINT_REPX_SERIALIZER_H /** \addtogroup RepXSerializers @{ */ #include "extensions/PxRepXSimpleType.h" #include "SnRepXSerializerImpl.h" #if !PX_DOXYGEN namespace physx { #endif class XmlReader; class XmlMemoryAllocator; class XmlWriter; class MemoryBuffer; template<typename TJointType> struct PX_DEPRECATED PxJointRepXSerializer : public RepXSerializerImpl<TJointType> { PxJointRepXSerializer(PxAllocatorCallback& inAllocator) : RepXSerializerImpl<TJointType>(inAllocator) {} virtual PxRepXObject fileToObject(XmlReader& inReader, XmlMemoryAllocator& inAllocator, PxRepXInstantiationArgs& inArgs, PxCollection* inCollection); virtual void objectToFileImpl(const TJointType* inObj, PxCollection* inCollection, XmlWriter& inWriter, MemoryBuffer& inTempBuffer, PxRepXInstantiationArgs&); virtual TJointType* allocateObject(PxRepXInstantiationArgs&) { return NULL; } }; #if PX_SUPPORT_EXTERN_TEMPLATE // explicit template instantiations declarations extern template struct PX_DEPRECATED PxJointRepXSerializer<PxD6Joint>; extern template struct PX_DEPRECATED PxJointRepXSerializer<PxDistanceJoint>; extern template struct PX_DEPRECATED PxJointRepXSerializer<PxContactJoint>; extern template struct PX_DEPRECATED PxJointRepXSerializer<PxFixedJoint>; extern template struct PX_DEPRECATED PxJointRepXSerializer<PxPrismaticJoint>; extern template struct PX_DEPRECATED PxJointRepXSerializer<PxRevoluteJoint>; extern template struct PX_DEPRECATED PxJointRepXSerializer<PxSphericalJoint>; #endif #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Xml/SnRepX3_2Defaults.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. DEFINE_REPX_DEFAULT_PROPERTY("PxTolerancesScale.Length", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxTolerancesScale.Mass", "1000" ) DEFINE_REPX_DEFAULT_PROPERTY("PxTolerancesScale.Speed", "10" ) DEFINE_REPX_DEFAULT_PROPERTY("PxBoxGeometry.HalfExtents", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphereGeometry.Radius", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxConvexMeshGeometry.Scale.Scale", "1 1 1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxConvexMeshGeometry.Scale.Rotation", "0 0 0 1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxConvexMeshGeometry.ConvexMesh", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxTriangleMeshGeometry.Scale.Scale", "1 1 1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxTriangleMeshGeometry.Scale.Rotation", "0 0 0 1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxTriangleMeshGeometry.MeshFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxTriangleMeshGeometry.TriangleMesh", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxHeightFieldGeometry.HeightField", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxHeightFieldGeometry.HeightScale", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxHeightFieldGeometry.RowScale", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxHeightFieldGeometry.ColumnScale", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxHeightFieldGeometry.HeightFieldFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxMaterial.DynamicFriction", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxMaterial.StaticFriction", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxMaterial.Restitution", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxMaterial.Flags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxMaterial.FrictionCombineMode", "eAVERAGE" ) DEFINE_REPX_DEFAULT_PROPERTY("PxMaterial.RestitutionCombineMode", "eAVERAGE" ) DEFINE_REPX_DEFAULT_PROPERTY("PxShape.LocalPose", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxShape.SimulationFilterData", "0 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxShape.QueryFilterData", "0 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxShape.ContactOffset", "0.02" ) DEFINE_REPX_DEFAULT_PROPERTY("PxShape.RestOffset", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxShape.Flags", "eSIMULATION_SHAPE|eSCENE_QUERY_SHAPE|eVISUALIZATION" ) DEFINE_REPX_DEFAULT_PROPERTY("PxShape.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidStatic.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidStatic.ActorFlags", "eVISUALIZATION" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidStatic.DominanceGroup", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidStatic.OwnerClient", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidStatic.ClientBehaviorBits", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidStatic.GlobalPose", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.ActorFlags", "eVISUALIZATION" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.DominanceGroup", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.OwnerClient", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.ClientBehaviorBits", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.GlobalPose", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.CMassLocalPose", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.Mass", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.MassSpaceInertiaTensor", "1 1 1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.LinearVelocity", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.AngularVelocity", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.LinearDamping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.AngularDamping", "0.05" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.MaxAngularVelocity", "7" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.SleepThreshold", "0.005" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.SolverIterationCounts.minPositionIters", "4" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.SolverIterationCounts.minVelocityIters", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.ContactReportThreshold", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.RigidDynamicFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.ParentPose", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.ChildPose", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.TargetOrientation", "0 0 0 1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.TargetVelocity", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.InternalCompliance", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.ExternalCompliance", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.SwingLimit.yLimit", "0.78539816339" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.SwingLimit.zLimit", "0.78539816339" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.TangentialSpring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.TangentialDamping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.SwingLimitContactDistance", "0.05" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.SwingLimitEnabled", "false" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.TwistLimit.lower", "-0.78539816339" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.TwistLimit.upper", "0.78539816339" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.TwistLimitEnabled", "false" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.TwistLimitContactDistance", "0.05" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.ActorFlags", "eVISUALIZATION" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.DominanceGroup", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.OwnerClient", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.ClientBehaviorBits", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.GlobalPose", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.CMassLocalPose", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.Mass", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.MassSpaceInertiaTensor", "1 1 1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.LinearVelocity", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.AngularVelocity", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulation.MaxProjectionIterations", "4" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulation.SeparationTolerance", "0.1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulation.InternalDriveIterations", "4" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulation.ExternalDriveIterations", "4" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulation.SolverIterationCounts.minPositionIters", "4" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulation.SolverIterationCounts.minVelocityIters", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulation.SleepThreshold", "0.005" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulation.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Actors.actor0", "8887040" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Actors.actor1", "8887456" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.LocalPose.eACTOR0", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.LocalPose.eACTOR1", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.BreakForce.force", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.BreakForce.torque", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.ConstraintFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Motion.eX", "eLOCKED" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Motion.eY", "eLOCKED" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Motion.eZ", "eLOCKED" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Motion.eTWIST", "eLOCKED" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Motion.eSWING1", "eLOCKED" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Motion.eSWING2", "eLOCKED" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.LinearLimit.Restitution", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.LinearLimit.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.LinearLimit.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.LinearLimit.ContactDistance", "0.05" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.LinearLimit.Value", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.TwistLimit.Restitution", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.TwistLimit.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.TwistLimit.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.TwistLimit.ContactDistance", "0.05" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.TwistLimit.Upper", "1.5708" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.TwistLimit.Lower", "-1.5708" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.SwingLimit.Restitution", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.SwingLimit.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.SwingLimit.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.SwingLimit.ContactDistance", "0.05" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.SwingLimit.YAngle", "1.5708" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.SwingLimit.ZAngle", "1.5708" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eX.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eX.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eX.ForceLimit", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eX.Flags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eY.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eY.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eY.ForceLimit", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eY.Flags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eZ.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eZ.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eZ.ForceLimit", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eZ.Flags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eSWING.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eSWING.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eSWING.ForceLimit", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eSWING.Flags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eTWIST.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eTWIST.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eTWIST.ForceLimit", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eTWIST.Flags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eSLERP.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eSLERP.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eSLERP.ForceLimit", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eSLERP.Flags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.DrivePosition", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.DriveVelocity.linear", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.DriveVelocity.angular", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.ProjectionLinearTolerance", "1e+010" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.ProjectionAngularTolerance", "3.14159" ) DEFINE_REPX_DEFAULT_PROPERTY("PxFixedJoint.Actors.actor0", "8887040" ) DEFINE_REPX_DEFAULT_PROPERTY("PxFixedJoint.Actors.actor1", "8887456" ) DEFINE_REPX_DEFAULT_PROPERTY("PxFixedJoint.LocalPose.eACTOR0", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxFixedJoint.LocalPose.eACTOR1", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxFixedJoint.BreakForce.force", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxFixedJoint.BreakForce.torque", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxFixedJoint.ConstraintFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxFixedJoint.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxFixedJoint.ProjectionLinearTolerance", "1e+010" ) DEFINE_REPX_DEFAULT_PROPERTY("PxFixedJoint.ProjectionAngularTolerance", "3.14159" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.Actors.actor0", "8887040" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.Actors.actor1", "8887456" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.LocalPose.eACTOR0", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.LocalPose.eACTOR1", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.BreakForce.force", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.BreakForce.torque", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.ConstraintFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.MinDistance", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.MaxDistance", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.Tolerance", "0.025" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.DistanceJointFlags", "eMAX_DISTANCE_ENABLED" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.Actors.actor0", "8887040" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.Actors.actor1", "8887456" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.LocalPose.eACTOR0", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.LocalPose.eACTOR1", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.BreakForce.force", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.BreakForce.torque", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.ConstraintFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.Limit.Restitution", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.Limit.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.Limit.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.Limit.ContactDistance", "0.05" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.Limit.Upper", "1.5708" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.Limit.Lower", "-1.5708" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.DriveVelocity", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.DriveForceLimit", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.DriveGearRatio", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.RevoluteJointFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.ProjectionLinearTolerance", "1e+010" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.ProjectionAngularTolerance", "3.14159" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.Actors.actor0", "8887040" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.Actors.actor1", "8887456" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.LocalPose.eACTOR0", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.LocalPose.eACTOR1", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.BreakForce.force", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.BreakForce.torque", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.ConstraintFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.Limit.Restitution", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.Limit.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.Limit.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.Limit.ContactDistance", "0.01" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.Limit.Upper", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.Limit.Lower", "-3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.PrismaticJointFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.ProjectionLinearTolerance", "1e+010" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.ProjectionAngularTolerance", "3.14159" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.Actors.actor0", "8887040" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.Actors.actor1", "8887456" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.LocalPose.eACTOR0", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.LocalPose.eACTOR1", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.BreakForce.force", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.BreakForce.torque", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.ConstraintFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.LimitCone.Restitution", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.LimitCone.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.LimitCone.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.LimitCone.ContactDistance", "0.05" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.LimitCone.YAngle", "1.5708" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.LimitCone.ZAngle", "1.5708" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.SphericalJointFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.ProjectionLinearTolerance", "1e+010" ) DEFINE_REPX_DEFAULT_PROPERTY("PxCloth.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxCloth.ActorFlags", "eVISUALIZATION" ) DEFINE_REPX_DEFAULT_PROPERTY("PxCloth.DominanceGroup", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxCloth.OwnerClient", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxCloth.ClientBehaviorBits", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxCloth.MotionConstraintScaleBias.scale", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxCloth.MotionConstraintScaleBias.bias", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxCloth.GlobalPose", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxCloth.ExternalAcceleration", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxCloth.DampingCoefficient", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxCloth.SolverFrequency", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxCloth.SleepLinearVelocity", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxCloth.InertiaScale", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxCloth.FrictionCoefficient", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxCloth.DragCoefficient", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxCloth.CollisionMassScale", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleSystem.ActorFlags", "eVISUALIZATION" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleSystem.DominanceGroup", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleSystem.OwnerClient", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleSystem.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleSystem.ExternalAcceleration", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleSystem.ParticleMass", "0.001" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleSystem.Restitution", "0.5" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleSystem.DynamicFriction", "0.05" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleSystem.StaticFriction", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleSystem.SimulationFilterData", "0 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleSystem.MaxMotionDistance", "0.06" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleSystem.RestOffset", "0.004" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleSystem.ContactOffset", "0.008" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleSystem.GridSize", "0.96" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleSystem.ProjectionPlane", "0 0 1 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleSystem.ParticleReadDataFlags", "ePOSITION_BUFFER|eFLAGS_BUFFER" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleSystem.ParticleBaseFlags", "eCOLLISION_WITH_DYNAMIC_ACTORS|eENABLED|ePER_PARTICLE_REST_OFFSET|ePER_PARTICLE_COLLISION_CACHE_HINT") DEFINE_REPX_DEFAULT_PROPERTY("PxParticleFluid.ActorFlags", "eVISUALIZATION" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleFluid.DominanceGroup", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleFluid.OwnerClient", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleFluid.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleFluid.ExternalAcceleration", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleFluid.ParticleMass", "0.001" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleFluid.Restitution", "0.5" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleFluid.DynamicFriction", "0.05" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleFluid.StaticFriction", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleFluid.SimulationFilterData", "0 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleFluid.MaxMotionDistance", "0.06" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleFluid.RestOffset", "0.004" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleFluid.ContactOffset", "0.008" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleFluid.GridSize", "0.64" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleFluid.ProjectionPlane", "0 0 1 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleFluid.Stiffness", "20" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleFluid.Viscosity", "6" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleFluid.RestParticleDistance", "0.02" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleFluid.ParticleReadDataFlags", "ePOSITION_BUFFER|eFLAGS_BUFFER" ) DEFINE_REPX_DEFAULT_PROPERTY("PxParticleFluid.ParticleBaseFlags", "eCOLLISION_WITH_DYNAMIC_ACTORS|eENABLED|ePER_PARTICLE_REST_OFFSET|ePER_PARTICLE_COLLISION_CACHE_HINT") DEFINE_REPX_DEFAULT_PROPERTY("PxAggregate.SelfCollision", "false" ) DEFINE_REPX_DEFAULT_PROPERTY("THEEND", "false" )
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Xml/SnXmlSerialization.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "SnXmlImpl.h" #include "foundation/PxHash.h" #include "foundation/PxHashMap.h" #include "foundation/PxString.h" #include "SnSimpleXmlWriter.h" #include "foundation/PxSort.h" #include "PsFastXml.h" #include "SnXmlMemoryPool.h" #include "PxExtensionMetaDataObjects.h" #include "SnXmlVisitorWriter.h" #include "SnXmlVisitorReader.h" #include "SnXmlMemoryAllocator.h" #include "SnXmlStringToType.h" #include "SnRepXCollection.h" #include "SnRepXUpgrader.h" #include "../SnSerializationRegistry.h" #include "CmCollection.h" using namespace physx; using namespace Sn; using namespace physx::profile; //for the foundation wrapper system. namespace physx { namespace Sn { class XmlNodeWriter : public SimpleXmlWriter { XmlMemoryAllocatorImpl& mParseAllocator; XmlNode* mCurrentNode; XmlNode* mTopNode; PxU32 mTabCount; public: XmlNodeWriter( XmlMemoryAllocatorImpl& inAllocator, PxU32 inTabCount = 0 ) : mParseAllocator( inAllocator ) , mCurrentNode( NULL ) , mTopNode( NULL ) , mTabCount( inTabCount ) {} XmlNodeWriter& operator=(const XmlNodeWriter&); virtual ~XmlNodeWriter(){} void onNewNode( XmlNode* newNode ) { if ( mCurrentNode != NULL ) mCurrentNode->addChild( newNode ); if ( mTopNode == NULL ) mTopNode = newNode; mCurrentNode = newNode; ++mTabCount; } XmlNode* getTopNode() const { return mTopNode; } virtual void beginTag( const char* inTagname ) { onNewNode( allocateRepXNode( &mParseAllocator.mManager, inTagname, NULL ) ); } virtual void endTag() { if ( mCurrentNode ) mCurrentNode = mCurrentNode->mParent; if ( mTabCount ) --mTabCount; } virtual void addAttribute( const char*, const char* ) { PX_ASSERT( false ); } virtual void writeContentTag( const char* inTag, const char* inContent ) { onNewNode( allocateRepXNode( &mParseAllocator.mManager, inTag, inContent ) ); endTag(); } virtual void addContent( const char* inContent ) { if ( mCurrentNode->mData ) releaseStr( &mParseAllocator.mManager, mCurrentNode->mData ); mCurrentNode->mData = copyStr( &mParseAllocator.mManager, inContent ); } virtual PxU32 tabCount() { return mTabCount; } }; struct XmlWriterImpl : public XmlWriter { PxU32 mTagDepth; SimpleXmlWriter* mWriter; MemoryBuffer* mMemBuffer; XmlWriterImpl( SimpleXmlWriter* inWriter, MemoryBuffer* inMemBuffer ) : mTagDepth( 0 ) , mWriter( inWriter ) , mMemBuffer( inMemBuffer ) { } ~XmlWriterImpl() { while( mTagDepth ) { --mTagDepth; mWriter->endTag(); } } virtual void write( const char* inName, const char* inData ) { mWriter->writeContentTag( inName, inData ); } virtual void write( const char* inName, const PxRepXObject& inLiveObject ) { (*mMemBuffer) << inLiveObject.id; writeProperty( *mWriter, *mMemBuffer, inName ); } virtual void addAndGotoChild( const char* inName ) { mWriter->beginTag( inName ); mTagDepth++; } virtual void leaveChild() { if ( mTagDepth ) { mWriter->endTag(); --mTagDepth; } } }; struct XmlParseArgs { XmlMemoryAllocatorImpl* mAllocator; PxProfileArray<RepXCollectionItem>* mCollection; XmlParseArgs( XmlMemoryAllocatorImpl* inAllocator , PxProfileArray<RepXCollectionItem>* inCollection) : mAllocator( inAllocator ) , mCollection( inCollection ) { } }; struct XmlNodeReader : public XmlReaderWriter { PxProfileAllocatorWrapper mWrapper; CMemoryPoolManager& mManager; XmlNode* mCurrentNode; XmlNode* mTopNode; PxProfileArray<XmlNode*> mContext; XmlNodeReader( XmlNode* inCurrentNode, PxAllocatorCallback& inAllocator, CMemoryPoolManager& nodePoolManager ) : mWrapper( inAllocator ) , mManager( nodePoolManager ) , mCurrentNode( inCurrentNode ) , mTopNode( inCurrentNode ) , mContext( mWrapper ) { } //Does this node exist as data in the format. virtual bool read( const char* inName, const char*& outData ) { XmlNode* theChild( mCurrentNode->findChildByName( inName ) ); if ( theChild ) { outData = theChild->mData; return outData && *outData; } return false; } virtual bool read( const char* inName, PxSerialObjectId& outId ) { XmlNode* theChild( mCurrentNode->findChildByName( inName ) ); if ( theChild ) { const char* theValue( theChild->mData ); strto( outId, theValue ); return true; } return false; } virtual bool gotoChild( const char* inName ) { XmlNode* theChild( mCurrentNode->findChildByName( inName ) ); if ( theChild ) { mCurrentNode =theChild; return true; } return false; } virtual bool gotoFirstChild() { if ( mCurrentNode->mFirstChild ) { mCurrentNode = mCurrentNode->mFirstChild; return true; } return false; } virtual bool gotoNextSibling() { if ( mCurrentNode->mNextSibling ) { mCurrentNode = mCurrentNode->mNextSibling; return true; } return false; } virtual PxU32 countChildren() { PxU32 retval= 0; for ( XmlNode* theChild = mCurrentNode->mFirstChild; theChild != NULL; theChild = theChild->mNextSibling ) ++retval; return retval; } virtual const char* getCurrentItemName() { return mCurrentNode->mName; } virtual const char* getCurrentItemValue() { return mCurrentNode->mData; } virtual bool leaveChild() { if ( mCurrentNode != mTopNode && mCurrentNode->mParent ) { mCurrentNode = mCurrentNode->mParent; return true; } return false; } virtual void pushCurrentContext() { mContext.pushBack( mCurrentNode ); } virtual void popCurrentContext() { if ( mContext.size() ) { mCurrentNode = mContext.back(); mContext.popBack(); } } virtual void setNode( XmlNode& inNode ) { mContext.clear(); mCurrentNode = &inNode; mTopNode = mCurrentNode; } virtual XmlReader* getParentReader() { XmlReader* retval = PX_PLACEMENT_NEW((mWrapper.getAllocator().allocate(sizeof(XmlNodeReader), "createNodeEditor", PX_FL)), XmlNodeReader) ( mTopNode, mWrapper.getAllocator(), mManager ); return retval; } virtual void addOrGotoChild( const char* inName ) { if ( gotoChild( inName )== false ) { XmlNode* newNode = allocateRepXNode( &mManager, inName, NULL ); mCurrentNode->addChild( newNode ); mCurrentNode = newNode; } } virtual void setCurrentItemValue( const char* inValue ) { mCurrentNode->mData = copyStr( &mManager, inValue ); } virtual bool removeChild( const char* name ) { XmlNode* theChild( mCurrentNode->findChildByName( name ) ); if ( theChild ) { releaseNodeAndChildren( &mManager, theChild ); return true; } return false; } virtual void release() { this->~XmlNodeReader(); mWrapper.getAllocator().deallocate(this); } private: XmlNodeReader& operator=(const XmlNodeReader&); }; PX_INLINE void freeNodeAndChildren( XmlNode* tempNode, TMemoryPoolManager& inManager ) { for( XmlNode* theNode = tempNode->mFirstChild; theNode != NULL; theNode = theNode->mNextSibling ) freeNodeAndChildren( theNode, inManager ); tempNode->orphan(); release( &inManager, tempNode ); } class XmlParser : public shdfnd::FastXml::Callback { XmlParseArgs mParseArgs; //For parse time only allocations XmlMemoryAllocatorImpl& mParseAllocator; XmlNode* mCurrentNode; XmlNode* mTopNode; public: XmlParser( XmlParseArgs inArgs, XmlMemoryAllocatorImpl& inParseAllocator ) : mParseArgs( inArgs ) , mParseAllocator( inParseAllocator ) , mCurrentNode( NULL ) , mTopNode( NULL ) { } virtual ~XmlParser(){} virtual bool processComment(const char* /*comment*/) { return true; } // 'element' is the name of the element that is being closed. // depth is the recursion depth of this element. // Return true to continue processing the XML file. // Return false to stop processing the XML file; leaves the read pointer of the stream right after this close tag. // The bool 'isError' indicates whether processing was stopped due to an error, or intentionally canceled early. virtual bool processClose(const char* /*element*/,physx::PxU32 /*depth*/,bool& isError) { if (NULL != mCurrentNode) { mCurrentNode = mCurrentNode->mParent; return true; } isError = true; return false; } // return true to continue processing the XML document, false to skip. virtual bool processElement( const char *elementName, // name of the element const char *elementData, // element data, null if none const shdfnd::FastXml::AttributePairs& attr, // attributes PxI32 /*lineno*/) { XmlNode* newNode = allocateRepXNode( &mParseAllocator.mManager, elementName, elementData ); if ( mCurrentNode ) mCurrentNode->addChild( newNode ); mCurrentNode = newNode; //Add the elements as children. for( PxI32 item = 0; item < attr.getNbAttr(); item ++ ) { XmlNode* node = allocateRepXNode( &mParseAllocator.mManager, attr.getKey(PxU32(item)), attr.getValue(PxU32(item)) ); mCurrentNode->addChild( node ); } if ( mTopNode == NULL ) mTopNode = newNode; return true; } XmlNode* getTopNode() { return mTopNode; } virtual void * allocate(PxU32 size) { if ( size ) return mParseAllocator.allocate(size); return NULL; } virtual void deallocate(void *mem) { if ( mem ) mParseAllocator.deallocate(reinterpret_cast<PxU8*>(mem)); } private: XmlParser& operator=(const XmlParser&); }; struct RepXCollectionSharedData { PxProfileAllocatorWrapper mWrapper; XmlMemoryAllocatorImpl mAllocator; PxU32 mRefCount; RepXCollectionSharedData( PxAllocatorCallback& inAllocator ) : mWrapper( inAllocator ) , mAllocator( inAllocator ) , mRefCount( 0 ) { } ~RepXCollectionSharedData() {} void addRef() { ++mRefCount;} void release() { if ( mRefCount ) --mRefCount; if ( !mRefCount ) { this->~RepXCollectionSharedData(); mWrapper.getAllocator().deallocate(this);} } }; struct SharedDataPtr { RepXCollectionSharedData* mData; SharedDataPtr( RepXCollectionSharedData* inData ) : mData( inData ) { mData->addRef(); } SharedDataPtr( const SharedDataPtr& inOther ) : mData( inOther.mData ) { mData->addRef(); } SharedDataPtr& operator=( const SharedDataPtr& inOther ); ~SharedDataPtr() { mData->release(); mData = NULL; } RepXCollectionSharedData* operator->() { return mData; } const RepXCollectionSharedData* operator->() const { return mData; } }; class RepXCollectionImpl : public RepXCollection, public PxUserAllocated { SharedDataPtr mSharedData; XmlMemoryAllocatorImpl& mAllocator; PxSerializationRegistry& mSerializationRegistry; PxProfileArray<RepXCollectionItem> mCollection; TMemoryPoolManager mSerializationManager; MemoryBuffer mPropertyBuffer; PxTolerancesScale mScale; PxVec3 mUpVector; const char* mVersionStr; PxCollection* mPxCollection; public: RepXCollectionImpl( PxSerializationRegistry& inRegistry, PxAllocatorCallback& inAllocator, PxCollection& inPxCollection ) : mSharedData( &PX_NEW_REPX_SERIALIZER( RepXCollectionSharedData )) , mAllocator( mSharedData->mAllocator ) , mSerializationRegistry( inRegistry ) , mCollection( mSharedData->mWrapper ) , mSerializationManager( inAllocator ) , mPropertyBuffer( &mSerializationManager ) , mScale(0.f, 0.f) , mUpVector( 0,0,0 ) , mVersionStr( getLatestVersion() ) , mPxCollection( &inPxCollection ) { PX_ASSERT( mScale.isValid() == false ); } RepXCollectionImpl( PxSerializationRegistry& inRegistry, const RepXCollectionImpl& inSrc, const char* inNewVersion ) : mSharedData( inSrc.mSharedData ) , mAllocator( mSharedData->mAllocator ) , mSerializationRegistry( inRegistry ) , mCollection( mSharedData->mWrapper ) , mSerializationManager( mSharedData->mWrapper.getAllocator() ) , mPropertyBuffer( &mSerializationManager ) , mScale( inSrc.mScale ) , mUpVector( inSrc.mUpVector ) , mVersionStr( inNewVersion ) , mPxCollection( NULL ) { } virtual ~RepXCollectionImpl() { PxU32 numItems = mCollection.size(); for ( PxU32 idx = 0; idx < numItems; ++idx ) { XmlNode* theNode = mCollection[idx].descriptor; releaseNodeAndChildren( &mAllocator.mManager, theNode ); } } RepXCollectionImpl& operator=(const RepXCollectionImpl&); virtual void destroy() { PxProfileAllocatorWrapper tempWrapper( mSharedData->mWrapper.getAllocator() ); this->~RepXCollectionImpl(); tempWrapper.getAllocator().deallocate(this); } virtual void setTolerancesScale(const PxTolerancesScale& inScale) { mScale = inScale; } virtual PxTolerancesScale getTolerancesScale() const { return mScale; } virtual void setUpVector( const PxVec3& inUpVector ) { mUpVector = inUpVector; } virtual PxVec3 getUpVector() const { return mUpVector; } PX_INLINE RepXCollectionItem findItemBySceneItem( const PxRepXObject& inObject ) const { //See if the object is in the collection for ( PxU32 idx =0; idx < mCollection.size(); ++idx ) if ( mCollection[idx].liveObject.serializable == inObject.serializable ) return mCollection[idx]; return RepXCollectionItem(); } virtual RepXAddToCollectionResult addRepXObjectToCollection( const PxRepXObject& inObject, PxCollection* inCollection, PxRepXInstantiationArgs& inArgs ) { PX_ASSERT( inObject.serializable ); PX_ASSERT( inObject.id ); if ( inObject.serializable == NULL || inObject.id == 0 ) return RepXAddToCollectionResult( RepXAddToCollectionResult::InvalidParameters ); PxRepXSerializer* theSerializer = mSerializationRegistry.getRepXSerializer( inObject.typeName ); if ( theSerializer == NULL ) return RepXAddToCollectionResult( RepXAddToCollectionResult::SerializerNotFound ); RepXCollectionItem existing = findItemBySceneItem( inObject ); if ( existing.liveObject.serializable ) return RepXAddToCollectionResult( RepXAddToCollectionResult::AlreadyInCollection, existing.liveObject.id ); XmlNodeWriter theXmlWriter( mAllocator, 1 ); XmlWriterImpl theRepXWriter( &theXmlWriter, &mPropertyBuffer ); { SimpleXmlWriter::STagWatcher theWatcher( theXmlWriter, inObject.typeName ); writeProperty( theXmlWriter, mPropertyBuffer, "Id", inObject.id ); theSerializer->objectToFile( inObject, inCollection, theRepXWriter, mPropertyBuffer,inArgs ); } mCollection.pushBack( RepXCollectionItem( inObject, theXmlWriter.getTopNode() ) ); return RepXAddToCollectionResult( RepXAddToCollectionResult::Success, inObject.id ); } virtual bool instantiateCollection( PxRepXInstantiationArgs& inArgs, PxCollection& inCollection ) { for ( PxU32 idx =0; idx < mCollection.size(); ++idx ) { RepXCollectionItem theItem( mCollection[idx] ); PxRepXSerializer* theSerializer = mSerializationRegistry.getRepXSerializer( theItem.liveObject.typeName ); if (theSerializer ) { XmlNodeReader theReader( theItem.descriptor, mAllocator.getAllocator(), mAllocator.mManager ); XmlMemoryAllocatorImpl instantiationAllocator( mAllocator.getAllocator() ); PxRepXObject theLiveObject = theSerializer->fileToObject( theReader, instantiationAllocator, inArgs, &inCollection ); if (theLiveObject.isValid()) { const PxBase* s = reinterpret_cast<const PxBase*>( theLiveObject.serializable ) ; inCollection.add( *const_cast<PxBase*>(s), PxSerialObjectId( theItem.liveObject.id )); } else return false; } else { PxGetFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL, "PxSerialization::createCollectionFromXml: " "PxRepXSerializer missing for type %s", theItem.liveObject.typeName); return false; } } return true; } void saveXmlNode( XmlNode* inNode, SimpleXmlWriter& inWriter ) { XmlNode* theNode( inNode ); if ( theNode->mData && *theNode->mData && theNode->mFirstChild == NULL ) inWriter.writeContentTag( theNode->mName, theNode->mData ); else { inWriter.beginTag( theNode->mName ); if ( theNode->mData && *theNode->mData ) inWriter.addContent( theNode->mData ); for ( XmlNode* theChild = theNode->mFirstChild; theChild != NULL; theChild = theChild->mNextSibling ) saveXmlNode( theChild, inWriter ); inWriter.endTag(); } } virtual void save( PxOutputStream& inStream ) { SimpleXmlWriterImpl<PxOutputStream> theWriter( inStream, mAllocator.getAllocator() ); theWriter.beginTag( "PhysXCollection" ); theWriter.addAttribute( "version", mVersionStr ); { XmlWriterImpl theRepXWriter( &theWriter, &mPropertyBuffer ); writeProperty( theWriter, mPropertyBuffer, "UpVector", mUpVector ); theRepXWriter.addAndGotoChild( "Scale" ); writeAllProperties( &mScale, theRepXWriter, mPropertyBuffer, *mPxCollection); theRepXWriter.leaveChild(); } for ( PxU32 idx =0; idx < mCollection.size(); ++idx ) { RepXCollectionItem theItem( mCollection[idx] ); XmlNode* theNode( theItem.descriptor ); saveXmlNode( theNode, theWriter ); } } void load( PxInputData& inFileBuf, SerializationRegistry& s ) { inFileBuf.seek(0); XmlParser theParser( XmlParseArgs( &mAllocator, &mCollection ), mAllocator ); shdfnd::FastXml* theFastXml = shdfnd::createFastXml( &theParser ); theFastXml->processXml( inFileBuf ); XmlNode* theTopNode = theParser.getTopNode(); if ( theTopNode != NULL ) { { XmlMemoryAllocatorImpl instantiationAllocator( mAllocator.getAllocator() ); XmlNodeReader theReader( theTopNode, mAllocator.getAllocator(), mAllocator.mManager ); readProperty( theReader, "UpVector", mUpVector ); if ( theReader.gotoChild( "Scale" ) ) { readAllProperties( PxRepXInstantiationArgs( s.getPhysics() ), theReader, &mScale, instantiationAllocator, *mPxCollection); theReader.leaveChild(); } const char* verStr = NULL; if ( theReader.read( "version", verStr ) ) mVersionStr = verStr; } for ( XmlNode* theChild = theTopNode->mFirstChild; theChild != NULL; theChild = theChild->mNextSibling ) { if ( physx::Pxstricmp( theChild->mName, "scale" ) == 0 || physx::Pxstricmp( theChild->mName, "version" ) == 0 || physx::Pxstricmp( theChild->mName, "upvector" ) == 0 ) continue; XmlNodeReader theReader( theChild, mAllocator.getAllocator(), mAllocator.mManager ); PxRepXObject theObject; theObject.typeName = theChild->mName; theObject.serializable = NULL; PxSerialObjectId theId = 0; theReader.read( "Id", theId ); theObject.id = theId; mCollection.pushBack( RepXCollectionItem( theObject, theChild ) ); } } else { PxGetFoundation().error(PxErrorCode::eDEBUG_WARNING, PX_FL, "Cannot parse any object from the input buffer, please check the input repx data."); } theFastXml->release(); } virtual const char* getVersion() { return mVersionStr; } virtual const RepXCollectionItem* begin() const { return mCollection.begin(); } virtual const RepXCollectionItem* end() const { return mCollection.end(); } virtual RepXCollection& createCollection( const char* inVersionStr ) { PxAllocatorCallback& allocator = mSharedData->mWrapper.getAllocator(); RepXCollectionImpl* retval = PX_PLACEMENT_NEW((allocator.allocate(sizeof(RepXCollectionImpl), "createCollection", PX_FL)), RepXCollectionImpl) ( mSerializationRegistry, *this, inVersionStr ); return *retval; } //Performs a deep copy of the repx node. virtual XmlNode* copyRepXNode( const XmlNode* srcNode ) { return physx::Sn::copyRepXNode( &mAllocator.mManager, srcNode ); } virtual void addCollectionItem( RepXCollectionItem inItem ) { mCollection.pushBack( inItem ); } virtual PxAllocatorCallback& getAllocator() { return mSharedData->mAllocator.getAllocator(); } //Create a new repx node with this name. Its value is unset. virtual XmlNode& createRepXNode( const char* name ) { XmlNode* newNode = allocateRepXNode( &mSharedData->mAllocator.mManager, name, NULL ); return *newNode; } //Release this when finished. virtual XmlReaderWriter& createNodeEditor() { PxAllocatorCallback& allocator = mSharedData->mWrapper.getAllocator(); XmlReaderWriter* retval = PX_PLACEMENT_NEW((allocator.allocate(sizeof(XmlNodeReader), "createNodeEditor", PX_FL)), XmlNodeReader) ( NULL, allocator, mAllocator.mManager ); return *retval; } }; const char* RepXCollection::getLatestVersion() { #define TOSTR_(x) #x #define CONCAT_(a, b, c) TOSTR_(a.##b.##c) #define MAKE_VERSION_STR(a,b,c) CONCAT_(a, b, c) return MAKE_VERSION_STR(PX_PHYSICS_VERSION_MAJOR,PX_PHYSICS_VERSION_MINOR,PX_PHYSICS_VERSION_BUGFIX); } static RepXCollection* create(SerializationRegistry& s, PxAllocatorCallback& inAllocator, PxCollection& inCollection ) { return PX_PLACEMENT_NEW((inAllocator.allocate(sizeof(RepXCollectionImpl), "RepXCollection::create", PX_FL)), RepXCollectionImpl) ( s, inAllocator, inCollection ); } static RepXCollection* create(SerializationRegistry& s, PxInputData &data, PxAllocatorCallback& inAllocator, PxCollection& inCollection ) { RepXCollectionImpl* theCollection = static_cast<RepXCollectionImpl*>( create(s, inAllocator, inCollection ) ); theCollection->load( data, s ); return theCollection; } } bool PxSerialization::serializeCollectionToXml( PxOutputStream& outputStream, PxCollection& collection, PxSerializationRegistry& sr, const PxCookingParams* params, const PxCollection* externalRefs, PxXmlMiscParameter* inArgs ) { if( !PxSerialization::isSerializable(collection, sr, const_cast<PxCollection*>(externalRefs)) ) return false; bool bRet = true; SerializationRegistry& sn = static_cast<SerializationRegistry&>(sr); PxRepXInstantiationArgs args( sn.getPhysics(), params ); PxCollection* tmpCollection = PxCreateCollection(); PX_ASSERT(tmpCollection); tmpCollection->add( collection ); if(externalRefs) { tmpCollection->add(*const_cast<PxCollection*>(externalRefs)); } PxAllocatorCallback& allocator = *PxGetAllocatorCallback(); Sn::RepXCollection* theRepXCollection = Sn::create(sn, allocator, *tmpCollection ); if(inArgs != NULL) { theRepXCollection->setTolerancesScale(inArgs->scale); theRepXCollection->setUpVector(inArgs->upVector); } PxU32 nbObjects = collection.getNbObjects(); if( nbObjects ) { sortCollection( static_cast<Cm::Collection&>(collection), sn, true); for( PxU32 i = 0; i < nbObjects; i++ ) { PxBase& s = collection.getObject(i); if( PxConcreteType::eSHAPE == s.getConcreteType() ) { PxShape& shape = static_cast<PxShape&>(s); if( shape.isExclusive() ) continue; } PxSerialObjectId id = collection.getId(s); if(id == PX_SERIAL_OBJECT_ID_INVALID) id = static_cast<PxSerialObjectId>( size_t( &s )); PxRepXObject ro = PxCreateRepXObject( &s, id ); if ( ro.serializable == NULL || ro.id == 0 ) { bRet = false; break; } theRepXCollection->addRepXObjectToCollection( ro, tmpCollection, args ); } } tmpCollection->release(); theRepXCollection->save(outputStream); theRepXCollection->destroy(); return bRet; } PxCollection* PxSerialization::createCollectionFromXml(PxInputData& inputData, const PxCookingParams& params, PxSerializationRegistry& sr, const PxCollection* externalRefs, PxStringTable* stringTable, PxXmlMiscParameter* outArgs) { SerializationRegistry& sn = static_cast<SerializationRegistry&>(sr); PxCollection* collection = PxCreateCollection(); PX_ASSERT(collection); if( externalRefs ) collection->add(*const_cast<PxCollection*>(externalRefs)); PxAllocatorCallback& allocator = *PxGetAllocatorCallback(); Sn::RepXCollection* theRepXCollection = Sn::create(sn, inputData, allocator, *collection); theRepXCollection = &Sn::RepXUpgrader::upgradeCollection( *theRepXCollection ); PxRepXInstantiationArgs args( sn.getPhysics(), &params, stringTable ); if( !theRepXCollection->instantiateCollection(args, *collection) ) { collection->release(); theRepXCollection->destroy(); return NULL; } if( externalRefs ) collection->remove(*const_cast<PxCollection*>(externalRefs)); if(outArgs != NULL) { outArgs->upVector = theRepXCollection->getUpVector(); outArgs->scale = theRepXCollection->getTolerancesScale(); } theRepXCollection->destroy(); return collection; } }
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Xml/SnXmlDeserializer.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SN_XML_DESERIALIZER_H #define SN_XML_DESERIALIZER_H #include "SnXmlVisitorReader.h" namespace physx { namespace Sn { //Definitions needed internally in the Serializer headers. template<typename TTriIndexElem> struct Triangle { TTriIndexElem mIdx0; TTriIndexElem mIdx1; TTriIndexElem mIdx2; Triangle( TTriIndexElem inIdx0 = 0, TTriIndexElem inIdx1 = 0, TTriIndexElem inIdx2 = 0) : mIdx0( inIdx0 ) , mIdx1( inIdx1 ) , mIdx2( inIdx2 ) { } }; struct XmlMemoryAllocateMemoryPoolAllocator { XmlMemoryAllocator* mAllocator; XmlMemoryAllocateMemoryPoolAllocator( XmlMemoryAllocator* inAlloc ) : mAllocator( inAlloc ) {} PxU8* allocate( PxU32 inSize ) { return mAllocator->allocate( inSize ); } void deallocate( PxU8* inMem ) { mAllocator->deallocate( inMem ); } }; inline bool isEmpty(const char *s) { while (*s != '\0') { if (!isspace(*s)) return false; s++; } return true; } inline void strtoLong( Triangle<PxU32>& ioDatatype,const char*& ioData ) { strto( ioDatatype.mIdx0, ioData ); strto( ioDatatype.mIdx1, ioData ); strto( ioDatatype.mIdx2, ioData ); } inline void strtoLong( PxHeightFieldSample& ioDatatype,const char*& ioData ) { PxU32 tempData; strto( tempData, ioData ); if ( isBigEndian() ) { PxU32& theItem(tempData); PxU32 theDest = 0; PxU8* theReadPtr( reinterpret_cast< PxU8* >( &theItem ) ); PxU8* theWritePtr( reinterpret_cast< PxU8* >( &theDest ) ); //A height field sample is a 16 bit number //followed by two bytes. //We write this out as a 32 bit integer, LE. //Thus, on a big endian, we need to move the bytes //around a bit. //LE - 1 2 3 4 //BE - 4 3 2 1 - after convert from xml number //Correct BE - 2 1 3 4, just like LE but with the 16 number swapped theWritePtr[0] = theReadPtr[2]; theWritePtr[1] = theReadPtr[3]; theWritePtr[2] = theReadPtr[1]; theWritePtr[3] = theReadPtr[0]; theItem = theDest; } ioDatatype = *reinterpret_cast<PxHeightFieldSample*>( &tempData ); } template<typename TDataType> inline void readStridedFlagsProperty( XmlReader& ioReader, const char* inPropName, TDataType*& outData, PxU32& outStride, PxU32& outCount, XmlMemoryAllocator& inAllocator, const PxU32ToName* inConversions) { const char* theSrcData; outStride = sizeof( TDataType ); outData = NULL; outCount = 0; if ( ioReader.read( inPropName, theSrcData ) ) { XmlMemoryAllocateMemoryPoolAllocator tempAllocator( &inAllocator ); MemoryBufferBase<XmlMemoryAllocateMemoryPoolAllocator> tempBuffer( &tempAllocator ); if ( theSrcData ) { static PxU32 theCount = 0; ++theCount; char* theStartData = const_cast< char*>( copyStr( &tempAllocator, theSrcData ) ); char* aData = strtok(theStartData, " \n"); while( aData ) { TDataType tempValue; stringToFlagsType( aData, inAllocator, tempValue, inConversions ); aData = strtok(NULL," \n"); tempBuffer.write( &tempValue, sizeof(TDataType) ); } outData = reinterpret_cast< TDataType* >( tempBuffer.mBuffer ); outCount = tempBuffer.mWriteOffset / sizeof( TDataType ); tempAllocator.deallocate( reinterpret_cast<PxU8*>(theStartData) ); } tempBuffer.releaseBuffer(); } } template<typename TDataType> inline void readStridedBufferProperty( XmlReader& ioReader, const char* inPropName, TDataType*& outData, PxU32& outStride, PxU32& outCount, XmlMemoryAllocator& inAllocator) { const char* theSrcData; outStride = sizeof( TDataType ); outData = NULL; outCount = 0; if ( ioReader.read( inPropName, theSrcData ) ) { XmlMemoryAllocateMemoryPoolAllocator tempAllocator( &inAllocator ); MemoryBufferBase<XmlMemoryAllocateMemoryPoolAllocator> tempBuffer( &tempAllocator ); if ( theSrcData ) { static PxU32 theCount = 0; ++theCount; char* theStartData = const_cast< char*>( copyStr( &tempAllocator, theSrcData ) ); const char* theData = theStartData; while( !isEmpty(theData) ) { //These buffers are whitespace delimited. TDataType theType; strtoLong( theType, theData ); tempBuffer.write( &theType, sizeof(theType) ); } outData = reinterpret_cast< TDataType* >( tempBuffer.mBuffer ); outCount = tempBuffer.mWriteOffset / sizeof( TDataType ); tempAllocator.deallocate( reinterpret_cast<PxU8*>(theStartData) ); } tempBuffer.releaseBuffer(); } } template<typename TDataType> inline void readStridedBufferProperty( XmlReader& ioReader, const char* inPropName, PxStridedData& ioData, PxU32& outCount, XmlMemoryAllocator& inAllocator) { TDataType* tempData = NULL; readStridedBufferProperty<TDataType>( ioReader, inPropName, tempData, ioData.stride, outCount, inAllocator ); ioData.data = tempData; } template<typename TDataType> inline void readStridedBufferProperty( XmlReader& ioReader, const char* inPropName, PxTypedStridedData<TDataType>& ioData, PxU32& outCount, XmlMemoryAllocator& inAllocator) { TDataType* tempData = NULL; readStridedBufferProperty<TDataType>( ioReader, inPropName, tempData, ioData.stride, outCount, inAllocator ); ioData.data = reinterpret_cast<PxMaterialTableIndex*>( tempData ); } template<typename TDataType> inline void readStridedBufferProperty( XmlReader& ioReader, const char* inPropName, PxBoundedData& ioData, XmlMemoryAllocator& inAllocator) { return readStridedBufferProperty<TDataType>( ioReader, inPropName, ioData, ioData.count, inAllocator ); } } } #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Xml/SnRepX1_0Defaults.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. DEFINE_REPX_DEFAULT_PROPERTY("PxBoxGeometry.HalfExtents", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphereGeometry.Radius", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxConvexMeshGeometry.Scale.Scale", "1 1 1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxConvexMeshGeometry.Scale.Rotation", "0 0 0 1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxConvexMeshGeometry.ConvexMesh", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxTriangleMeshGeometry.Scale.Scale", "1 1 1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxTriangleMeshGeometry.Scale.Rotation", "0 0 0 1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxTriangleMeshGeometry.MeshFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxTriangleMeshGeometry.TriangleMesh", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxHeightFieldGeometry.HeightField", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxHeightFieldGeometry.HeightScale", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxHeightFieldGeometry.RowScale", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxHeightFieldGeometry.ColumnScale", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxHeightFieldGeometry.HeightFieldFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxTolerancesScale.Length", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxTolerancesScale.Mass", "1000" ) DEFINE_REPX_DEFAULT_PROPERTY("PxTolerancesScale.Speed", "10" ) DEFINE_REPX_DEFAULT_PROPERTY("PxMaterial.DynamicFriction", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxMaterial.StaticFriction", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxMaterial.Restitution", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxMaterial.DynamicFrictionV", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxMaterial.StaticFrictionV", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxMaterial.DirOfAnisotropy", "1 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxMaterial.Flags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxMaterial.FrictionCombineMode", "eAVERAGE" ) DEFINE_REPX_DEFAULT_PROPERTY("PxMaterial.RestitutionCombineMode", "eAVERAGE" ) DEFINE_REPX_DEFAULT_PROPERTY("PxMaterial.UserData", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxShape.LocalPose", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxShape.SimulationFilterData", "0 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxShape.QueryFilterData", "0 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxShape.ContactOffset", "0.02" ) DEFINE_REPX_DEFAULT_PROPERTY("PxShape.RestOffset", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxShape.Flags", "eSIMULATION_SHAPE|eSCENE_QUERY_SHAPE|eVISUALIZATION" ) DEFINE_REPX_DEFAULT_PROPERTY("PxShape.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxShape.UserData", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidStatic.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidStatic.ActorFlags", "eVISUALIZATION" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidStatic.DominanceGroup", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidStatic.OwnerClient", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidStatic.ClientBehaviorBits", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidStatic.UserData", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidStatic.GlobalPose", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.ActorFlags", "eVISUALIZATION" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.DominanceGroup", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.OwnerClient", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.ClientBehaviorBits", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.UserData", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.CMassLocalPose", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.Mass", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.MassSpaceInertiaTensor", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.GlobalPose", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.LinearVelocity", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.AngularVelocity", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.LinearDamping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.AngularDamping", "0.05" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.MaxAngularVelocity", "7" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.SleepEnergyThreshold", "0.005" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.SolverIterationCounts.minPositionIters", "4" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.SolverIterationCounts.minVelocityIters", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.ContactReportThreshold", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRigidDynamic.RigidDynamicFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.ParentPose", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.ChildPose", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.TargetOrientation", "0 0 0 1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.TargetVelocity", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.InternalCompliance", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.ExternalCompliance", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.SwingLimit.yLimit", "0.78539816339" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.SwingLimit.zLimit", "0.78539816339" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.SwingLimitEnabled", "false" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.TwistLimit.lower", "-0.78539816339" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.TwistLimit.upper", "0.78539816339" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationJoint.TwistLimitEnabled", "false" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.ActorFlags", "eVISUALIZATION" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.DominanceGroup", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.OwnerClient", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.ClientBehaviorBits", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.UserData", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.CMassLocalPose", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.Mass", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.MassSpaceInertiaTensor", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.GlobalPose", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.LinearVelocity", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulationLink.AngularVelocity", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulation.MaxProjectionIterations", "4" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulation.SeparationTolerance", "0.1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulation.InternalDriveIterations", "4" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulation.ExternalDriveIterations", "4" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulation.SolverIterationCounts.minPositionIters", "4" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulation.SolverIterationCounts.minVelocityIters", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulation.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxArticulation.UserData", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.BreakForce.force", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.BreakForce.torque", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.ConstraintFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.UserData", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Motion.eX", "eLOCKED" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Motion.eY", "eLOCKED" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Motion.eZ", "eLOCKED" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Motion.eTWIST", "eLOCKED" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Motion.eSWING1", "eLOCKED" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Motion.eSWING2", "eLOCKED" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.LinearLimit.Restitution", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.LinearLimit.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.LinearLimit.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.LinearLimit.ContactDistance", "0.05" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.LinearLimit.Value", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.TwistLimit.Restitution", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.TwistLimit.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.TwistLimit.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.TwistLimit.ContactDistance", "0.05" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.TwistLimit.Upper", "1.5708" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.TwistLimit.Lower", "-1.5708" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.SwingLimit.Restitution", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.SwingLimit.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.SwingLimit.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.SwingLimit.ContactDistance", "0.05" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.SwingLimit.YAngle", "1.5708" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.SwingLimit.ZAngle", "1.5708" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eX.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eX.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eX.ForceLimit", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eX.Flags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eY.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eY.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eY.ForceLimit", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eY.Flags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eZ.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eZ.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eZ.ForceLimit", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eZ.Flags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eSWING.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eSWING.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eSWING.ForceLimit", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eSWING.Flags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eTWIST.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eTWIST.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eTWIST.ForceLimit", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eTWIST.Flags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eSLERP.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eSLERP.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eSLERP.ForceLimit", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.Drive.eSLERP.Flags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.DrivePosition", "0 0 0 1 0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.DriveVelocity.linear", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.DriveVelocity.angular", "0 0 0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.ProjectionLinearTolerance", "1e+010" ) DEFINE_REPX_DEFAULT_PROPERTY("PxD6Joint.ProjectionAngularTolerance", "3.14159" ) DEFINE_REPX_DEFAULT_PROPERTY("PxFixedJoint.BreakForce.force", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxFixedJoint.BreakForce.torque", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxFixedJoint.ConstraintFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxFixedJoint.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxFixedJoint.UserData", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxFixedJoint.ProjectionLinearTolerance", "1e+010" ) DEFINE_REPX_DEFAULT_PROPERTY("PxFixedJoint.ProjectionAngularTolerance", "3.14159" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.BreakForce.force", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.BreakForce.torque", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.ConstraintFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.UserData", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.MinDistance", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.MaxDistance", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.Tolerance", "0.025" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxDistanceJoint.DistanceJointFlags", "eMAX_DISTANCE_ENABLED" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.BreakForce.force", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.BreakForce.torque", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.ConstraintFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.UserData", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.Limit.Restitution", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.Limit.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.Limit.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.Limit.ContactDistance", "0.05" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.Limit.Upper", "1.5708" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.Limit.Lower", "-1.5708" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.DriveVelocity", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.DriveForceLimit", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.DriveGearRatio", "1" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.RevoluteJointFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.ProjectionLinearTolerance", "1e+010" ) DEFINE_REPX_DEFAULT_PROPERTY("PxRevoluteJoint.ProjectionAngularTolerance", "3.14159" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.BreakForce.force", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.BreakForce.torque", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.ConstraintFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.UserData", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.Limit.Restitution", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.Limit.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.Limit.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.Limit.ContactDistance", "0.01" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.Limit.Upper", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.Limit.Lower", "-3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.PrismaticJointFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.ProjectionLinearTolerance", "1e+010" ) DEFINE_REPX_DEFAULT_PROPERTY("PxPrismaticJoint.ProjectionAngularTolerance", "3.14159" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.BreakForce.force", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.BreakForce.torque", "3.40282e+038" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.ConstraintFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.Name", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.UserData", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.LimitCone.Restitution", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.LimitCone.Spring", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.LimitCone.Damping", "0" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.LimitCone.ContactDistance", "0.05" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.LimitCone.YAngle", "1.5708" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.LimitCone.ZAngle", "1.5708" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.SphericalJointFlags", "" ) DEFINE_REPX_DEFAULT_PROPERTY("PxSphericalJoint.ProjectionLinearTolerance", "1e+010" ) DEFINE_REPX_DEFAULT_PROPERTY("THEEND", "false" )
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Xml/SnRepXSerializerImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SN_REPX_SERIALIZER_IMPL_H #define SN_REPX_SERIALIZER_IMPL_H #include "foundation/PxUserAllocated.h" #include "SnXmlVisitorWriter.h" #include "SnXmlVisitorReader.h" namespace physx { using namespace Sn; /** * The repx serializer impl takes the raw, untyped repx extension interface * and implements the simpler functions plus does the reinterpret-casts required * for any object to implement the serializer safely. */ template<typename TLiveType> struct RepXSerializerImpl : public PxRepXSerializer, PxUserAllocated { protected: RepXSerializerImpl( const RepXSerializerImpl& inOther ); RepXSerializerImpl& operator=( const RepXSerializerImpl& inOther ); public: PxAllocatorCallback& mAllocator; RepXSerializerImpl( PxAllocatorCallback& inAllocator ) : mAllocator( inAllocator ) { } virtual const char* getTypeName() { return PxTypeInfo<TLiveType>::name(); } virtual void objectToFile( const PxRepXObject& inLiveObject, PxCollection* inCollection, XmlWriter& inWriter, MemoryBuffer& inTempBuffer, PxRepXInstantiationArgs& inArgs ) { const TLiveType* theObj = reinterpret_cast<const TLiveType*>( inLiveObject.serializable ); objectToFileImpl( theObj, inCollection, inWriter, inTempBuffer, inArgs ); } virtual PxRepXObject fileToObject( XmlReader& inReader, XmlMemoryAllocator& inAllocator, PxRepXInstantiationArgs& inArgs, PxCollection* inCollection ) { TLiveType* theObj( allocateObject( inArgs ) ); if ( theObj ) if(fileToObjectImpl( theObj, inReader, inAllocator, inArgs, inCollection )) return PxCreateRepXObject(theObj); return PxRepXObject(); } virtual void objectToFileImpl( const TLiveType* inObj, PxCollection* inCollection, XmlWriter& inWriter, MemoryBuffer& inTempBuffer, PxRepXInstantiationArgs& /*inArgs*/) { writeAllProperties( inObj, inWriter, inTempBuffer, *inCollection ); } virtual bool fileToObjectImpl( TLiveType* inObj, XmlReader& inReader, XmlMemoryAllocator& inAllocator, PxRepXInstantiationArgs& inArgs, PxCollection* inCollection ) { return readAllProperties( inArgs, inReader, inObj, inAllocator, *inCollection ); } virtual TLiveType* allocateObject( PxRepXInstantiationArgs& inArgs ) = 0; }; } #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Xml/SnXmlImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SN_XML_IMPL_H #define SN_XML_IMPL_H #include "SnXmlMemoryPool.h" #include "foundation/PxString.h" #include "foundation/PxMemory.h" namespace physx { namespace Sn { typedef CMemoryPoolManager TMemoryPoolManager; namespace snXmlImpl { inline PxU32 strLen( const char* inStr ) { PxU32 len = 0; if ( inStr ) { while ( *inStr ) { ++len; ++inStr; } } return len; } } inline const char* copyStr( PxAllocatorCallback& inAllocator, const char* inStr ) { if ( inStr && *inStr ) { PxU32 theLen = snXmlImpl::strLen( inStr ); //The memory will never be released by repx. If you want it released, you need to pass in a custom allocator //that tracks all allocations and releases unreleased allocations yourself. char* dest = reinterpret_cast<char* >( inAllocator.allocate( theLen + 1, "Repx::const char*", PX_FL ) ); PxMemCopy( dest, inStr, theLen ); dest[theLen] = 0; return dest; } return ""; } template<typename TManagerType> inline const char* copyStr( TManagerType* inMgr, const char* inStr ) { if ( inStr && *inStr ) { PxU32 theLen = snXmlImpl::strLen( inStr ); char* dest = reinterpret_cast<char* >( inMgr->allocate( theLen + 1 ) ); PxMemCopy( dest, inStr, theLen ); dest[theLen] = 0; return dest; } return ""; } inline void releaseStr( TMemoryPoolManager* inMgr, const char* inStr, PxU32 ) { if ( inStr && *inStr ) { inMgr->deallocate( reinterpret_cast< PxU8* >( const_cast<char*>( inStr ) ) ); } } inline void releaseStr( TMemoryPoolManager* inMgr, const char* inStr ) { if ( inStr && *inStr ) { PxU32 theLen = snXmlImpl::strLen( inStr ); releaseStr( inMgr, inStr, theLen ); } } struct XmlNode { const char* mName; //Never released until all collections are released const char* mData; //Never released until all collections are released XmlNode* mNextSibling; XmlNode* mPreviousSibling; XmlNode* mFirstChild; XmlNode* mParent; XmlNode( const XmlNode& ); XmlNode& operator=( const XmlNode& ); PX_INLINE void initPtrs() { mNextSibling = NULL; mPreviousSibling = NULL; mFirstChild = NULL; mParent = NULL; } PX_INLINE XmlNode( const char* inName = "", const char* inData = "" ) : mName( inName ) , mData( inData ) { initPtrs(); } void addChild( XmlNode* inItem ) { inItem->mParent = this; if ( mFirstChild == NULL ) mFirstChild = inItem; else { XmlNode* theNode = mFirstChild; //Follow the chain till the end. while( theNode->mNextSibling != NULL ) theNode = theNode->mNextSibling; theNode->mNextSibling = inItem; inItem->mPreviousSibling = theNode; } } PX_INLINE XmlNode* findChildByName( const char* inName ) { for ( XmlNode* theNode = mFirstChild; theNode; theNode = theNode->mNextSibling ) { XmlNode* theRepXNode = theNode; if ( physx::Pxstricmp( theRepXNode->mName, inName ) == 0 ) return theNode; } return NULL; } PX_INLINE void orphan() { if ( mParent ) { if ( mParent->mFirstChild == this ) mParent->mFirstChild = mNextSibling; } if ( mPreviousSibling ) mPreviousSibling->mNextSibling = mNextSibling; if ( mNextSibling ) mNextSibling->mPreviousSibling = mPreviousSibling; if ( mFirstChild ) mFirstChild->mParent = NULL; initPtrs(); } }; inline XmlNode* allocateRepXNode( TMemoryPoolManager* inManager, const char* inName, const char* inData ) { XmlNode* retval = inManager->allocate<XmlNode>(); retval->mName = copyStr( inManager, inName ); retval->mData = copyStr( inManager, inData ); return retval; } inline void release( TMemoryPoolManager* inManager, XmlNode* inNode ) { //We *don't* release the strings associated with the node //because they could be shared. Instead, we just let them 'leak' //in some sense, at least until the memory manager itself is deleted. //DO NOT UNCOMMENT THE LINES BELOW!! //releaseStr( inManager, inNode->mName ); //releaseStr( inManager, inNode->mData ); inManager->deallocate( inNode ); } static PX_INLINE void releaseNodeAndChildren( TMemoryPoolManager* inManager, XmlNode* inNode ) { if ( inNode->mFirstChild ) { XmlNode* childNode( inNode->mFirstChild ); while( childNode ) { XmlNode* _node( childNode ); childNode = _node->mNextSibling; releaseNodeAndChildren( inManager, _node ); } } inNode->orphan(); release( inManager, inNode ); } static XmlNode* copyRepXNodeAndSiblings( TMemoryPoolManager* inManager, const XmlNode* inNode, XmlNode* inParent ); static XmlNode* copyRepXNode( TMemoryPoolManager* inManager, const XmlNode* inNode, XmlNode* inParent = NULL ) { XmlNode* newNode( allocateRepXNode( inManager, NULL, NULL ) ); newNode->mName = inNode->mName; //Some light structural sharing newNode->mData = inNode->mData; //Some light structural sharing newNode->mParent = inParent; if ( inNode->mFirstChild ) newNode->mFirstChild = copyRepXNodeAndSiblings( inManager, inNode->mFirstChild, newNode ); return newNode; } static XmlNode* copyRepXNodeAndSiblings( TMemoryPoolManager* inManager, const XmlNode* inNode, XmlNode* inParent ) { XmlNode* sibling = inNode->mNextSibling; if ( sibling ) sibling = copyRepXNodeAndSiblings( inManager, sibling, inParent ); XmlNode* newNode = copyRepXNode( inManager, inNode, inParent ); newNode->mNextSibling = sibling; if ( sibling ) sibling->mPreviousSibling = newNode; return newNode; } inline bool isBigEndian() { int i = 1; return *(reinterpret_cast<char*>(&i))==0; } struct NameStackEntry { const char* mName; bool mOpen; NameStackEntry( const char* nm ) : mName( nm ), mOpen( false ) {} }; typedef PxProfileArray<NameStackEntry> TNameStack; } } #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Xml/SnXmlSimpleXmlWriter.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SN_XML_SIMPLE_XML_WRITER_H #define SN_XML_SIMPLE_XML_WRITER_H #include "foundation/PxArray.h" #include "SnXmlMemoryPoolStreams.h" namespace physx { namespace Sn { class XmlWriter { public: struct STagWatcher { typedef XmlWriter TXmlWriterType; TXmlWriterType& mWriter; STagWatcher( const STagWatcher& inOther ); STagWatcher& operator-( const STagWatcher& inOther ); STagWatcher( TXmlWriterType& inWriter, const char* inTagName ) : mWriter( inWriter ) { mWriter.beginTag( inTagName ); } ~STagWatcher() { mWriter.endTag(); } }; virtual ~XmlWriter(){} virtual void beginTag( const char* inTagname ) = 0; virtual void endTag() = 0; virtual void addAttribute( const char* inName, const char* inValue ) = 0; virtual void writeContentTag( const char* inTag, const char* inContent ) = 0; virtual void addContent( const char* inContent ) = 0; virtual PxU32 tabCount() = 0; }; template<typename TStreamType> class XmlWriterImpl : public XmlWriter { PxProfileAllocatorWrapper mWrapper; TStreamType& mStream; XmlWriterImpl( const XmlWriterImpl& inOther ); XmlWriterImpl& operator=( const XmlWriterImpl& inOther ); PxProfileArray<const char*> mTags; bool mTagOpen; PxU32 mInitialTagDepth; public: XmlWriterImpl( TStreamType& inStream, PxAllocatorCallback& inAllocator, PxU32 inInitialTagDepth = 0 ) : mWrapper( inAllocator ) , mStream( inStream ) , mTags( mWrapper ) , mTagOpen( false ) , mInitialTagDepth( inInitialTagDepth ) { } virtual ~XmlWriterImpl() { while( mTags.size() ) endTag(); } PxU32 tabCount() { return mTags.size() + mInitialTagDepth; } void writeTabs( PxU32 inSize ) { inSize += mInitialTagDepth; for ( PxU32 idx =0; idx < inSize; ++idx ) mStream << "\t"; } void beginTag( const char* inTagname ) { closeTag(); writeTabs(mTags.size()); mTags.pushBack( inTagname ); mStream << "<" << inTagname; mTagOpen = true; } void addAttribute( const char* inName, const char* inValue ) { PX_ASSERT( mTagOpen ); mStream << " " << inName << "=" << "\"" << inValue << "\""; } void closeTag(bool useNewline = true) { if ( mTagOpen ) { mStream << " " << ">"; if (useNewline ) mStream << "\n"; } mTagOpen = false; } void doEndOpenTag() { mStream << "</" << mTags.back() << ">" << "\n"; } void endTag() { PX_ASSERT( mTags.size() ); if ( mTagOpen ) mStream << " " << "/>" << "\n"; else { writeTabs(mTags.size()-1); doEndOpenTag(); } mTagOpen = false; mTags.popBack(); } void addContent( const char* inContent ) { closeTag(false); mStream << inContent; } void writeContentTag( const char* inTag, const char* inContent ) { beginTag( inTag ); addContent( inContent ); doEndOpenTag(); mTags.popBack(); } void insertXml( const char* inXml ) { closeTag(); mStream << inXml; } }; struct BeginTag { const char* mTagName; BeginTag( const char* inTagName ) : mTagName( inTagName ) { } }; struct EndTag { EndTag() {} }; struct Att { const char* mAttName; const char* mAttValue; Att( const char* inAttName, const char* inAttValue ) : mAttName( inAttName ) , mAttValue( inAttValue ) { } }; struct Content { const char* mContent; Content( const char* inContent ) : mContent( inContent ) { } }; struct ContentTag { const char* mTagName; const char* mContent; ContentTag( const char* inTagName, const char* inContent ) : mTagName( inTagName ) , mContent( inContent ) { } }; inline XmlWriter& operator<<( XmlWriter& inWriter, const BeginTag& inTag ) { inWriter.beginTag( inTag.mTagName ); return inWriter; } inline XmlWriter& operator<<( XmlWriter& inWriter, const EndTag& inTag ) { inWriter.endTag(); return inWriter; } inline XmlWriter& operator<<( XmlWriter& inWriter, const Att& inTag ) { inWriter.addAttribute(inTag.mAttName, inTag.mAttValue); return inWriter; } inline XmlWriter& operator<<( XmlWriter& inWriter, const Content& inTag ) { inWriter.addContent(inTag.mContent); return inWriter; } inline XmlWriter& operator<<( XmlWriter& inWriter, const ContentTag& inTag ) { inWriter.writeContentTag(inTag.mTagName, inTag.mContent); return inWriter; } inline void writeProperty( XmlWriter& inWriter, MemoryBuffer& tempBuffer, const char* inPropName ) { PxU8 data = 0; tempBuffer.write( &data, sizeof(PxU8) ); inWriter.writeContentTag( inPropName, reinterpret_cast<const char*>( tempBuffer.mBuffer ) ); tempBuffer.clear(); } template<typename TDataType> inline void writeProperty( XmlWriter& inWriter, MemoryBuffer& tempBuffer, const char* inPropName, TDataType inValue ) { tempBuffer << inValue; writeProperty( inWriter, tempBuffer, inPropName ); } } } #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Xml/SnRepXCoreSerializer.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "PxPhysicsAPI.h" #include "PxMetaDataObjects.h" #include "SnPxStreamOperators.h" #include "foundation/PxUtilities.h" #include "SnXmlImpl.h" #include "SnXmlSerializer.h" #include "SnXmlDeserializer.h" #include "SnRepXCoreSerializer.h" using namespace physx::Sn; namespace physx { typedef PxReadOnlyPropertyInfo<PxPropertyInfoName::PxArticulationLink_InboundJoint, PxArticulationLink, PxArticulationJointReducedCoordinate *> TIncomingJointPropType; //************************************************************* // Actual RepXSerializer implementations for PxMaterial //************************************************************* PxMaterial* PxMaterialRepXSerializer::allocateObject( PxRepXInstantiationArgs& inArgs ) { return inArgs.physics.createMaterial(0, 0, 0); } PxRepXObject PxShapeRepXSerializer::fileToObject( XmlReader& inReader, XmlMemoryAllocator& inAllocator, PxRepXInstantiationArgs& inArgs, PxCollection* inCollection ) { PxProfileAllocatorWrapper wrapper( inAllocator.getAllocator() ); TReaderNameStack names( wrapper ); PxProfileArray<PxU32> contexts( wrapper ); bool hadError = false; RepXVisitorReader<PxShape> theVisitor( names, contexts, inArgs, inReader, NULL, inAllocator, *inCollection, hadError ); PxArray<PxMaterial*> materials; PxGeometry* geometry = NULL; parseShape( theVisitor, geometry, materials ); if(hadError) return PxRepXObject(); PxShape *theShape = inArgs.physics.createShape( *geometry, materials.begin(), PxTo16(materials.size()) ); switch(geometry->getType()) { case PxGeometryType::eSPHERE : static_cast<PxSphereGeometry*>(geometry)->~PxSphereGeometry(); break; case PxGeometryType::ePLANE : static_cast<PxPlaneGeometry*>(geometry)->~PxPlaneGeometry(); break; case PxGeometryType::eCAPSULE : static_cast<PxCapsuleGeometry*>(geometry)->~PxCapsuleGeometry(); break; case PxGeometryType::eBOX : static_cast<PxBoxGeometry*>(geometry)->~PxBoxGeometry(); break; case PxGeometryType::eCONVEXMESH : static_cast<PxConvexMeshGeometry*>(geometry)->~PxConvexMeshGeometry(); break; case PxGeometryType::eTRIANGLEMESH : static_cast<PxTriangleMeshGeometry*>(geometry)->~PxTriangleMeshGeometry(); break; case PxGeometryType::eHEIGHTFIELD : static_cast<PxHeightFieldGeometry*>(geometry)->~PxHeightFieldGeometry(); break; case PxGeometryType::eTETRAHEDRONMESH : static_cast<PxTetrahedronMeshGeometry*>(geometry)->~PxTetrahedronMeshGeometry(); break; case PxGeometryType::ePARTICLESYSTEM: static_cast<PxParticleSystemGeometry*>(geometry)->~PxParticleSystemGeometry(); break; case PxGeometryType::eHAIRSYSTEM: static_cast<PxHairSystemGeometry*>(geometry)->~PxHairSystemGeometry(); break; case PxGeometryType::eCUSTOM : static_cast<PxCustomGeometry*>(geometry)->~PxCustomGeometry(); break; case PxGeometryType::eGEOMETRY_COUNT: case PxGeometryType::eINVALID: PX_ASSERT(0); } inAllocator.getAllocator().deallocate(geometry); bool ret = readAllProperties( inArgs, inReader, theShape, inAllocator, *inCollection ); return ret ? PxCreateRepXObject(theShape) : PxRepXObject(); } //************************************************************* // Actual RepXSerializer implementations for PxTriangleMesh //************************************************************* template<typename TTriIndexElem> inline void writeTriangle( MemoryBuffer& inTempBuffer, const Triangle<TTriIndexElem>& inTriangle ) { inTempBuffer << inTriangle.mIdx0 << " " << inTriangle.mIdx1 << " " << inTriangle.mIdx2; } PxU32 materialAccess( const PxTriangleMesh* inMesh, PxU32 inIndex ) { return inMesh->getTriangleMaterialIndex( inIndex ); } template<typename TDataType> void writeDatatype( MemoryBuffer& inTempBuffer, const TDataType& inType ) { inTempBuffer << inType; } void PxBVH33TriangleMeshRepXSerializer::objectToFileImpl( const PxBVH33TriangleMesh* mesh, PxCollection* /*inCollection*/, XmlWriter& inWriter, MemoryBuffer& inTempBuffer, PxRepXInstantiationArgs& inArgs ) { bool hasMatIndex = mesh->getTriangleMaterialIndex(0) != 0xffff; PxU32 numVertices = mesh->getNbVertices(); const PxVec3* vertices = mesh->getVertices(); writeBuffer( inWriter, inTempBuffer, 2, vertices, numVertices, "Points", writePxVec3 ); bool isU16 = mesh->getTriangleMeshFlags() & PxTriangleMeshFlag::e16_BIT_INDICES ? true : false; PxU32 triCount = mesh->getNbTriangles(); const void* indices = mesh->getTriangles(); if ( isU16 ) writeBuffer( inWriter, inTempBuffer, 2, reinterpret_cast<const Triangle<PxU16>* >( indices ), triCount, "Triangles", writeTriangle<PxU16> ); else writeBuffer( inWriter, inTempBuffer, 2, reinterpret_cast<const Triangle<PxU32>* >( indices ), triCount, "Triangles", writeTriangle<PxU32> ); if ( hasMatIndex ) writeBuffer( inWriter, inTempBuffer, 6, mesh, materialAccess, triCount, "materialIndices", writeDatatype<PxU32> ); //Cooked stream PxTriangleMeshDesc meshDesc; meshDesc.points.count = numVertices; meshDesc.points.data = vertices; meshDesc.points.stride = sizeof(PxVec3); meshDesc.triangles.count = triCount; meshDesc.triangles.data = indices; meshDesc.triangles.stride = isU16?3*sizeof(PxU16):3*sizeof(PxU32); if(isU16) { meshDesc.triangles.stride = sizeof(PxU16)*3; meshDesc.flags |= PxMeshFlag::e16_BIT_INDICES; } else { meshDesc.triangles.stride = sizeof(PxU32)*3; } if(hasMatIndex) { PxMaterialTableIndex* materialIndices = new PxMaterialTableIndex[triCount]; for(PxU32 i = 0; i < triCount; i++) materialIndices[i] = mesh->getTriangleMaterialIndex(i); meshDesc.materialIndices.data = materialIndices; meshDesc.materialIndices.stride = sizeof(PxMaterialTableIndex); } if(inArgs.cooker != NULL) { TMemoryPoolManager theManager(mAllocator); MemoryBuffer theTempBuf( &theManager ); theTempBuf.clear(); PxCookTriangleMesh( *inArgs.cooker, meshDesc, theTempBuf ); writeBuffer( inWriter, inTempBuffer, 16, theTempBuf.mBuffer, theTempBuf.mWriteOffset, "CookedData", writeDatatype<PxU8> ); } delete []meshDesc.materialIndices.data; } PxRepXObject PxBVH33TriangleMeshRepXSerializer::fileToObject( XmlReader& inReader, XmlMemoryAllocator& inAllocator, PxRepXInstantiationArgs& inArgs, PxCollection* /*inCollection*/ ) { //We can't do a simple inverse; we *have* to cook data to get a mesh. PxTriangleMeshDesc theDesc; readStridedBufferProperty<PxVec3>( inReader, "points", theDesc.points, inAllocator); readStridedBufferProperty<Triangle<PxU32> >( inReader, "triangles", theDesc.triangles, inAllocator); PxU32 triCount; readStridedBufferProperty<PxMaterialTableIndex>( inReader, "materialIndices", theDesc.materialIndices, triCount, inAllocator); PxStridedData cookedData; cookedData.stride = sizeof(PxU8); PxU32 dataSize; readStridedBufferProperty<PxU8>( inReader, "CookedData", cookedData, dataSize, inAllocator); TMemoryPoolManager theManager(inAllocator.getAllocator()); MemoryBuffer theTempBuf( &theManager ); // PxTriangleMesh* theMesh = NULL; PxBVH33TriangleMesh* theMesh = NULL; if(dataSize != 0) { theTempBuf.write(cookedData.data, dataSize*sizeof(PxU8)); // theMesh = inArgs.physics.createTriangleMesh( theTempBuf ); theMesh = static_cast<PxBVH33TriangleMesh*>(inArgs.physics.createTriangleMesh( theTempBuf )); } if(theMesh == NULL) { PX_ASSERT(inArgs.cooker); theTempBuf.clear(); PxCookingParams params = *inArgs.cooker; params.midphaseDesc = PxMeshMidPhase::eBVH33; PxCookTriangleMesh( params, theDesc, theTempBuf ); // theMesh = inArgs.physics.createTriangleMesh( theTempBuf ); theMesh = static_cast<PxBVH33TriangleMesh*>(inArgs.physics.createTriangleMesh( theTempBuf )); } return PxCreateRepXObject( theMesh ); } void PxBVH34TriangleMeshRepXSerializer::objectToFileImpl( const PxBVH34TriangleMesh* mesh, PxCollection* /*inCollection*/, XmlWriter& inWriter, MemoryBuffer& inTempBuffer, PxRepXInstantiationArgs& inArgs ) { bool hasMatIndex = mesh->getTriangleMaterialIndex(0) != 0xffff; PxU32 numVertices = mesh->getNbVertices(); const PxVec3* vertices = mesh->getVertices(); writeBuffer( inWriter, inTempBuffer, 2, vertices, numVertices, "Points", writePxVec3 ); bool isU16 = mesh->getTriangleMeshFlags() & PxTriangleMeshFlag::e16_BIT_INDICES ? true : false; PxU32 triCount = mesh->getNbTriangles(); const void* indices = mesh->getTriangles(); if ( isU16 ) writeBuffer( inWriter, inTempBuffer, 2, reinterpret_cast<const Triangle<PxU16>* >( indices ), triCount, "Triangles", writeTriangle<PxU16> ); else writeBuffer( inWriter, inTempBuffer, 2, reinterpret_cast<const Triangle<PxU32>* >( indices ), triCount, "Triangles", writeTriangle<PxU32> ); if ( hasMatIndex ) writeBuffer( inWriter, inTempBuffer, 6, mesh, materialAccess, triCount, "materialIndices", writeDatatype<PxU32> ); //Cooked stream PxTriangleMeshDesc meshDesc; meshDesc.points.count = numVertices; meshDesc.points.data = vertices; meshDesc.points.stride = sizeof(PxVec3); meshDesc.triangles.count = triCount; meshDesc.triangles.data = indices; meshDesc.triangles.stride = isU16?3*sizeof(PxU16):3*sizeof(PxU32); if(isU16) { meshDesc.triangles.stride = sizeof(PxU16)*3; meshDesc.flags |= PxMeshFlag::e16_BIT_INDICES; } else { meshDesc.triangles.stride = sizeof(PxU32)*3; } if(hasMatIndex) { PxMaterialTableIndex* materialIndices = new PxMaterialTableIndex[triCount]; for(PxU32 i = 0; i < triCount; i++) materialIndices[i] = mesh->getTriangleMaterialIndex(i); meshDesc.materialIndices.data = materialIndices; meshDesc.materialIndices.stride = sizeof(PxMaterialTableIndex); } if(inArgs.cooker != NULL) { TMemoryPoolManager theManager(mAllocator); MemoryBuffer theTempBuf( &theManager ); theTempBuf.clear(); PxCookTriangleMesh( *inArgs.cooker, meshDesc, theTempBuf ); writeBuffer( inWriter, inTempBuffer, 16, theTempBuf.mBuffer, theTempBuf.mWriteOffset, "CookedData", writeDatatype<PxU8> ); } delete []meshDesc.materialIndices.data; } PxRepXObject PxBVH34TriangleMeshRepXSerializer::fileToObject( XmlReader& inReader, XmlMemoryAllocator& inAllocator, PxRepXInstantiationArgs& inArgs, PxCollection* /*inCollection*/ ) { //We can't do a simple inverse; we *have* to cook data to get a mesh. PxTriangleMeshDesc theDesc; readStridedBufferProperty<PxVec3>( inReader, "points", theDesc.points, inAllocator); readStridedBufferProperty<Triangle<PxU32> >( inReader, "triangles", theDesc.triangles, inAllocator); PxU32 triCount; readStridedBufferProperty<PxMaterialTableIndex>( inReader, "materialIndices", theDesc.materialIndices, triCount, inAllocator); PxStridedData cookedData; cookedData.stride = sizeof(PxU8); PxU32 dataSize; readStridedBufferProperty<PxU8>( inReader, "CookedData", cookedData, dataSize, inAllocator); TMemoryPoolManager theManager(inAllocator.getAllocator()); MemoryBuffer theTempBuf( &theManager ); // PxTriangleMesh* theMesh = NULL; PxBVH34TriangleMesh* theMesh = NULL; if(dataSize != 0) { theTempBuf.write(cookedData.data, dataSize*sizeof(PxU8)); // theMesh = inArgs.physics.createTriangleMesh( theTempBuf ); theMesh = static_cast<PxBVH34TriangleMesh*>(inArgs.physics.createTriangleMesh( theTempBuf )); } if(theMesh == NULL) { PX_ASSERT(inArgs.cooker); theTempBuf.clear(); PxCookingParams params = *inArgs.cooker; params.midphaseDesc = PxMeshMidPhase::eBVH34; PxCookTriangleMesh( params, theDesc, theTempBuf ); // theMesh = inArgs.physics.createTriangleMesh( theTempBuf ); theMesh = static_cast<PxBVH34TriangleMesh*>(inArgs.physics.createTriangleMesh( theTempBuf )); } return PxCreateRepXObject(theMesh); } //************************************************************* // Actual RepXSerializer implementations for PxHeightField //************************************************************* void PxHeightFieldRepXSerializer::objectToFileImpl( const PxHeightField* inHeightField, PxCollection* inCollection, XmlWriter& inWriter, MemoryBuffer& inTempBuffer, PxRepXInstantiationArgs& /*inArgs*/) { PxHeightFieldDesc theDesc; theDesc.nbRows = inHeightField->getNbRows(); theDesc.nbColumns = inHeightField->getNbColumns(); theDesc.format = inHeightField->getFormat(); theDesc.samples.stride = inHeightField->getSampleStride(); theDesc.samples.data = NULL; theDesc.convexEdgeThreshold = inHeightField->getConvexEdgeThreshold(); theDesc.flags = inHeightField->getFlags(); PxU32 theCellCount = inHeightField->getNbRows() * inHeightField->getNbColumns(); PxU32 theSampleStride = sizeof( PxHeightFieldSample ); PxU32 theSampleBufSize = theCellCount * theSampleStride; PxHeightFieldSample* theSamples = reinterpret_cast< PxHeightFieldSample*> ( inTempBuffer.mManager->allocate( theSampleBufSize ) ); inHeightField->saveCells( theSamples, theSampleBufSize ); theDesc.samples.data = theSamples; writeAllProperties( &theDesc, inWriter, inTempBuffer, *inCollection ); writeStridedBufferProperty<PxHeightFieldSample>( inWriter, inTempBuffer, "samples", theDesc.samples, theDesc.nbRows * theDesc.nbColumns, 6, writeHeightFieldSample); inTempBuffer.mManager->deallocate( reinterpret_cast<PxU8*>(theSamples) ); } PxRepXObject PxHeightFieldRepXSerializer::fileToObject( XmlReader& inReader, XmlMemoryAllocator& inAllocator, PxRepXInstantiationArgs& inArgs, PxCollection* inCollection ) { PX_ASSERT(inArgs.cooker); PxHeightFieldDesc theDesc; readAllProperties( inArgs, inReader, &theDesc, inAllocator, *inCollection ); //Now read the data... PxU32 count = 0; //ignored becaues numRows and numColumns tells the story readStridedBufferProperty<PxHeightFieldSample>( inReader, "samples", theDesc.samples, count, inAllocator); PxHeightField* retval = PxCreateHeightField( theDesc, inArgs.physics.getPhysicsInsertionCallback() ); return PxCreateRepXObject(retval); } //************************************************************* // Actual RepXSerializer implementations for PxConvexMesh //************************************************************* void PxConvexMeshRepXSerializer::objectToFileImpl( const PxConvexMesh* mesh, PxCollection* /*inCollection*/, XmlWriter& inWriter, MemoryBuffer& inTempBuffer, PxRepXInstantiationArgs& inArgs ) { writeBuffer( inWriter, inTempBuffer, 2, mesh->getVertices(), mesh->getNbVertices(), "points", writePxVec3 ); if(inArgs.cooker != NULL) { //Cache cooked Data PxConvexMeshDesc theDesc; theDesc.points.data = mesh->getVertices(); theDesc.points.stride = sizeof(PxVec3); theDesc.points.count = mesh->getNbVertices(); theDesc.flags = PxConvexFlag::eCOMPUTE_CONVEX; TMemoryPoolManager theManager(mAllocator); MemoryBuffer theTempBuf( &theManager ); PxCookConvexMesh( *inArgs.cooker, theDesc, theTempBuf ); writeBuffer( inWriter, inTempBuffer, 16, theTempBuf.mBuffer, theTempBuf.mWriteOffset, "CookedData", writeDatatype<PxU8> ); } } //Conversion from scene object to descriptor. PxRepXObject PxConvexMeshRepXSerializer::fileToObject( XmlReader& inReader, XmlMemoryAllocator& inAllocator, PxRepXInstantiationArgs& inArgs, PxCollection* /*inCollection*/) { PxConvexMeshDesc theDesc; readStridedBufferProperty<PxVec3>( inReader, "points", theDesc.points, inAllocator); theDesc.flags = PxConvexFlag::eCOMPUTE_CONVEX; PxStridedData cookedData; cookedData.stride = sizeof(PxU8); PxU32 dataSize; readStridedBufferProperty<PxU8>( inReader, "CookedData", cookedData, dataSize, inAllocator); TMemoryPoolManager theManager(inAllocator.getAllocator()); MemoryBuffer theTempBuf( &theManager ); PxConvexMesh* theMesh = NULL; if(dataSize != 0) { theTempBuf.write(cookedData.data, dataSize*sizeof(PxU8)); theMesh = inArgs.physics.createConvexMesh( theTempBuf ); } if(theMesh == NULL) { PX_ASSERT(inArgs.cooker); theTempBuf.clear(); PxCookConvexMesh( *inArgs.cooker, theDesc, theTempBuf ); theMesh = inArgs.physics.createConvexMesh( theTempBuf ); } return PxCreateRepXObject(theMesh); } //************************************************************* // Actual RepXSerializer implementations for PxRigidStatic //************************************************************* PxRigidStatic* PxRigidStaticRepXSerializer::allocateObject( PxRepXInstantiationArgs& inArgs ) { return inArgs.physics.createRigidStatic( PxTransform(PxIdentity) ); } //************************************************************* // Actual RepXSerializer implementations for PxRigidDynamic //************************************************************* PxRigidDynamic* PxRigidDynamicRepXSerializer::allocateObject( PxRepXInstantiationArgs& inArgs ) { return inArgs.physics.createRigidDynamic( PxTransform(PxIdentity) ); } //************************************************************* // Actual RepXSerializer implementations for PxArticulationReducedCoordinate //************************************************************* void PxArticulationReducedCoordinateRepXSerializer::objectToFileImpl(const PxArticulationReducedCoordinate* inObj, PxCollection* inCollection, XmlWriter& inWriter, MemoryBuffer& inTempBuffer, PxRepXInstantiationArgs& /*inArgs*/) { TNameStack nameStack(inTempBuffer.mManager->mWrapper); Sn::TArticulationLinkLinkMap linkMap(inTempBuffer.mManager->mWrapper); RepXVisitorWriter<PxArticulationReducedCoordinate> writer(nameStack, inWriter, inObj, inTempBuffer, *inCollection, &linkMap); RepXPropertyFilter<RepXVisitorWriter<PxArticulationReducedCoordinate> > theOp(writer); visitAllProperties<PxArticulationReducedCoordinate>(theOp); } PxArticulationReducedCoordinate* PxArticulationReducedCoordinateRepXSerializer::allocateObject(PxRepXInstantiationArgs& inArgs) { return inArgs.physics.createArticulationReducedCoordinate(); } //************************************************************* // Actual RepXSerializer implementations for PxAggregate //************************************************************* void PxAggregateRepXSerializer::objectToFileImpl( const PxAggregate* data, PxCollection* inCollection, XmlWriter& inWriter, MemoryBuffer& inTempBuffer, PxRepXInstantiationArgs& /*inArgs*/) { PxArticulationLink *link = NULL; inWriter.addAndGotoChild( "Actors" ); for(PxU32 i = 0; i < data->getNbActors(); ++i) { PxActor* actor; if(data->getActors(&actor, 1, i)) { link = actor->is<PxArticulationLink>(); } if(link && !link->getInboundJoint() ) { writeProperty( inWriter, *inCollection, inTempBuffer, "PxArticulationRef", &link->getArticulation()); } else if( !link ) { PxSerialObjectId theId = 0; theId = inCollection->getId( *actor ); if( theId == 0 ) theId = static_cast<uint64_t>(size_t(actor)); writeProperty( inWriter, *inCollection, inTempBuffer, "PxActorRef", theId ); } } inWriter.leaveChild( ); writeProperty( inWriter, *inCollection, inTempBuffer, "NumActors", data->getNbActors() ); writeProperty( inWriter, *inCollection, inTempBuffer, "MaxNbActors", data->getMaxNbActors() ); writeProperty(inWriter, *inCollection, inTempBuffer, "MaxNbShapes", data->getMaxNbShapes()); writeProperty( inWriter, *inCollection, inTempBuffer, "SelfCollision", data->getSelfCollision() ); writeAllProperties( data, inWriter, inTempBuffer, *inCollection ); } PxRepXObject PxAggregateRepXSerializer::fileToObject( XmlReader& inReader, XmlMemoryAllocator& inAllocator, PxRepXInstantiationArgs& inArgs, PxCollection* inCollection ) { PxU32 numActors; readProperty( inReader, "NumActors", numActors ); PxU32 maxNbActors; readProperty( inReader, "MaxNbActors", maxNbActors ); PxU32 maxNbShapes; readProperty(inReader, "MaxNbShapes", maxNbShapes); bool selfCollision; bool ret = readProperty( inReader, "SelfCollision", selfCollision ); PxAggregate* theAggregate = inArgs.physics.createAggregate(maxNbActors, maxNbShapes, selfCollision); ret &= readAllProperties( inArgs, inReader, theAggregate, inAllocator, *inCollection ); inReader.pushCurrentContext(); if ( inReader.gotoChild( "Actors" ) ) { inReader.pushCurrentContext(); for( bool matSuccess = inReader.gotoFirstChild(); matSuccess; matSuccess = inReader.gotoNextSibling() ) { const char* actorType = inReader.getCurrentItemName(); if ( 0 == physx::Pxstricmp( actorType, "PxActorRef" ) ) { PxActor *actor = NULL; ret &= readReference<PxActor>( inReader, *inCollection, actor ); if(actor) { PxScene *currScene = actor->getScene(); if(currScene) { currScene->removeActor(*actor); } theAggregate->addActor(*actor); } } else if ( 0 == physx::Pxstricmp( actorType, "PxArticulationRef" ) ) { PxArticulationReducedCoordinate* articulation = NULL; ret &= readReference<PxArticulationReducedCoordinate>( inReader, *inCollection, articulation ); if(articulation) { PxScene *currScene = articulation->getScene(); if(currScene) { currScene->removeArticulation(*articulation); } theAggregate->addArticulation(*articulation); } } } inReader.popCurrentContext(); inReader.leaveChild(); } inReader.popCurrentContext(); return ret ? PxCreateRepXObject(theAggregate) : PxRepXObject(); } }
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Xml/SnXmlStringToType.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SN_XML_STRING_TO_TYPE_H #define SN_XML_STRING_TO_TYPE_H #include "common/PxCoreUtilityTypes.h" #include "PxFiltering.h" #include "foundation/PxString.h" #include <stdio.h> #include <ctype.h> //Remapping function name for gcc-based systems. #ifndef _MSC_VER #define _strtoui64 strtoull #endif namespace physx { namespace Sn { template<typename TDataType> struct StrToImpl { bool compile_error; }; template<> struct StrToImpl<PxU64> { //Id's (void ptrs) are written to file as unsigned //64 bit integers, so this method gets called more //often than one might think. PX_INLINE void strto( PxU64& ioDatatype,const char*& ioData ) { ioDatatype = _strtoui64( ioData, const_cast<char **>(&ioData), 10 ); } }; PX_INLINE PxF32 strToFloat(const char *str,const char **nextScan) { PxF32 ret; while ( *str && isspace(static_cast<unsigned char>(*str))) str++; // skip leading whitespace char temp[256] = ""; char *dest = temp; char *end = &temp[255]; const char *begin = str; while ( *str && !isspace(static_cast<unsigned char>(*str)) && dest < end ) // copy the number up to the first whitespace or eos { *dest++ = *str++; } *dest = 0; ret = PxF32(strtod(temp,&end)); if ( nextScan ) { *nextScan = begin+(end-temp); } return ret; } template<> struct StrToImpl<PxU32> { PX_INLINE void strto( PxU32& ioDatatype,const char*& ioData ) { ioDatatype = static_cast<PxU32>( strtoul( ioData,const_cast<char **>(&ioData), 10 ) ); } }; template<> struct StrToImpl<PxI32> { PX_INLINE void strto( PxI32& ioDatatype,const char*& ioData ) { ioDatatype = static_cast<PxI32>( strtoul( ioData,const_cast<char **>(&ioData), 10 ) ); } }; template<> struct StrToImpl<PxU16> { PX_INLINE void strto( PxU16& ioDatatype,const char*& ioData ) { ioDatatype = static_cast<PxU16>( strtoul( ioData,const_cast<char **>(&ioData), 10 ) ); } }; PX_INLINE void eatwhite(const char*& ioData ) { if ( ioData ) { while( isspace( static_cast<unsigned char>(*ioData) ) ) ++ioData; } } // copy the source data to the dest buffer until the first whitespace is encountered. // Do not overflow the buffer based on the bufferLen provided. // Advance the input 'ioData' pointer so that it sits just at the next whitespace PX_INLINE void nullTerminateWhite(const char*& ioData,char *buffer,PxU32 bufferLen) { if ( ioData ) { char *eof = buffer+(bufferLen-1); char *dest = buffer; while( *ioData && !isspace(static_cast<unsigned char>(*ioData)) && dest < eof ) { *dest++ = *ioData++; } *dest = 0; } } inline void nonNullTerminateWhite(const char*& ioData ) { if ( ioData ) { while( *ioData && !isspace( static_cast<unsigned char>(*ioData) ) ) ++ioData; } } template<> struct StrToImpl<PxF32> { inline void strto( PxF32& ioDatatype,const char*& ioData ) { ioDatatype = strToFloat(ioData,&ioData); } }; template<> struct StrToImpl<void*> { inline void strto( void*& ioDatatype,const char*& ioData ) { PxU64 theData; StrToImpl<PxU64>().strto( theData, ioData ); ioDatatype = reinterpret_cast<void*>( size_t( theData ) ); } }; template<> struct StrToImpl<physx::PxVec3> { inline void strto( physx::PxVec3& ioDatatype,const char*& ioData ) { StrToImpl<PxF32>().strto( ioDatatype[0], ioData ); StrToImpl<PxF32>().strto( ioDatatype[1], ioData ); StrToImpl<PxF32>().strto( ioDatatype[2], ioData ); } }; template<> struct StrToImpl<PxU8*> { inline void strto( PxU8*& /*ioDatatype*/,const char*& /*ioData*/) { } }; template<> struct StrToImpl<bool> { inline void strto( bool& ioType,const char*& inValue ) { ioType = physx::Pxstricmp( inValue, "true" ) == 0 ? true : false; } }; template<> struct StrToImpl<PxU8> { PX_INLINE void strto( PxU8& ioType,const char* & inValue) { ioType = static_cast<PxU8>( strtoul( inValue,const_cast<char **>(&inValue), 10 ) ); } }; template<> struct StrToImpl<PxFilterData> { PX_INLINE void strto( PxFilterData& ioType,const char*& inValue) { ioType.word0 = static_cast<PxU32>( strtoul( inValue,const_cast<char **>(&inValue), 10 ) ); ioType.word1 = static_cast<PxU32>( strtoul( inValue,const_cast<char **>(&inValue), 10 ) ); ioType.word2 = static_cast<PxU32>( strtoul( inValue,const_cast<char **>(&inValue), 10 ) ); ioType.word3 = static_cast<PxU32>( strtoul( inValue, NULL, 10 ) ); } }; template<> struct StrToImpl<PxQuat> { PX_INLINE void strto( PxQuat& ioType,const char*& inValue ) { ioType.x = static_cast<PxReal>( strToFloat( inValue, &inValue ) ); ioType.y = static_cast<PxReal>( strToFloat( inValue, &inValue ) ); ioType.z = static_cast<PxReal>( strToFloat( inValue, &inValue ) ); ioType.w = static_cast<PxReal>( strToFloat( inValue, &inValue ) ); } }; template<> struct StrToImpl<PxTransform> { PX_INLINE void strto( PxTransform& ioType,const char*& inValue) { ioType.q.x = static_cast<PxReal>( strToFloat( inValue, &inValue ) ); ioType.q.y = static_cast<PxReal>( strToFloat( inValue, &inValue ) ); ioType.q.z = static_cast<PxReal>( strToFloat( inValue, &inValue ) ); ioType.q.w = static_cast<PxReal>( strToFloat( inValue, &inValue ) ); ioType.p[0] = static_cast<PxReal>( strToFloat( inValue, &inValue ) ); ioType.p[1] = static_cast<PxReal>( strToFloat( inValue, &inValue ) ); ioType.p[2] = static_cast<PxReal>( strToFloat( inValue, &inValue ) ); } }; template<> struct StrToImpl<PxBounds3> { PX_INLINE void strto( PxBounds3& ioType,const char*& inValue) { ioType.minimum[0] = static_cast<PxReal>( strToFloat( inValue, &inValue ) ); ioType.minimum[1] = static_cast<PxReal>( strToFloat( inValue, &inValue ) ); ioType.minimum[2] = static_cast<PxReal>( strToFloat( inValue, &inValue ) ); ioType.maximum[0] = static_cast<PxReal>( strToFloat( inValue, &inValue ) ); ioType.maximum[1] = static_cast<PxReal>( strToFloat( inValue, &inValue ) ); ioType.maximum[2] = static_cast<PxReal>( strToFloat( inValue, &inValue ) ); } }; template<> struct StrToImpl<PxMetaDataPlane> { PX_INLINE void strto( PxMetaDataPlane& ioType,const char*& inValue) { ioType.normal.x = static_cast<PxReal>( strToFloat( inValue, &inValue ) ); ioType.normal.y = static_cast<PxReal>( strToFloat( inValue, &inValue ) ); ioType.normal.z = static_cast<PxReal>( strToFloat( inValue, &inValue ) ); ioType.distance = static_cast<PxReal>( strToFloat( inValue, &inValue ) ); } }; template<> struct StrToImpl<PxRigidDynamic*> { PX_INLINE void strto( PxRigidDynamic*& /*ioDatatype*/,const char*& /*ioData*/) { } }; template<typename TDataType> inline void strto( TDataType& ioType,const char*& ioData ) { if ( ioData && *ioData ) StrToImpl<TDataType>().strto( ioType, ioData ); } template<typename TDataType> inline void strtoLong( TDataType& ioType,const char*& ioData ) { if ( ioData && *ioData ) StrToImpl<TDataType>().strto( ioType, ioData ); } template<typename TDataType> inline void stringToType( const char* inValue, TDataType& ioType ) { const char* theValue( inValue ); return strto( ioType, theValue ); } } } #endif
NVIDIA-Omniverse/PhysX/physx/source/physxextensions/src/serialization/Xml/SnXmlWriter.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SN_XML_WRITER_H #define SN_XML_WRITER_H #include "foundation/PxSimpleTypes.h" namespace physx { struct PxRepXObject; /** * Writer used by extensions to write elements to a file or database */ class PX_DEPRECATED XmlWriter { protected: virtual ~XmlWriter(){} public: /** Write a key-value pair into the current item */ virtual void write( const char* inName, const char* inData ) = 0; /** Write an object id into the current item */ virtual void write( const char* inName, const PxRepXObject& inLiveObject ) = 0; /** Add a child that then becomes the current context */ virtual void addAndGotoChild( const char* inName ) = 0; /** Leave the current child */ virtual void leaveChild() = 0; }; } #endif
NVIDIA-Omniverse/PhysX/physx/source/scenequery/src/SqCompoundPruningPool.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SQ_COMPOUND_PRUNING_POOL_H #define SQ_COMPOUND_PRUNING_POOL_H #include "SqPruner.h" #include "foundation/PxArray.h" #include "GuPrunerMergeData.h" #include "GuIncrementalAABBTree.h" #include "GuAABBTreeBounds.h" namespace physx { namespace Gu { class PruningPool; } namespace Sq { /////////////////////////////////////////////////////////////////////////////////////////////// typedef PxArray<Gu::IncrementalAABBTreeNode*> UpdateMap; /////////////////////////////////////////////////////////////////////////////////////////////// class CompoundTree { public: void updateObjectAfterManualBoundsUpdates(Gu::PrunerHandle handle); void removeObject(Gu::PrunerHandle handle, Gu::PrunerPayloadRemovalCallback* removalCallback); bool addObject(Gu::PrunerHandle& result, const PxBounds3& bounds, const Gu::PrunerPayload& data, const PxTransform& transform); private: void updateMapping(const Gu::PoolIndex poolIndex, Gu::IncrementalAABBTreeNode* node, const Gu::NodeList& changedLeaves); public: Gu::IncrementalAABBTree* mTree; Gu::PruningPool* mPruningPool; UpdateMap* mUpdateMap; PxTransform mGlobalPose; PxCompoundPrunerQueryFlags mFlags; }; /////////////////////////////////////////////////////////////////////////////////////////////// class CompoundTreePool { public: CompoundTreePool(PxU64 contextID); ~CompoundTreePool(); void preallocate(PxU32 newCapacity); Gu::PoolIndex addCompound(Gu::PrunerHandle* results, const Gu::BVH& bvh, const PxBounds3& compoundBounds, const PxTransform& transform, bool isDynamic, const Gu::PrunerPayload* data, const PxTransform* transforms); Gu::PoolIndex removeCompound(Gu::PoolIndex index, Gu::PrunerPayloadRemovalCallback* removalCallback); void shiftOrigin(const PxVec3& shift); PX_FORCE_INLINE const Gu::AABBTreeBounds& getCurrentAABBTreeBounds() const { return mCompoundBounds; } PX_FORCE_INLINE const PxBounds3* getCurrentCompoundBounds() const { return mCompoundBounds.getBounds(); } PX_FORCE_INLINE PxBounds3* getCurrentCompoundBounds() { return mCompoundBounds.getBounds(); } PX_FORCE_INLINE const CompoundTree* getCompoundTrees() const { return mCompoundTrees; } PX_FORCE_INLINE CompoundTree* getCompoundTrees() { return mCompoundTrees; } PX_FORCE_INLINE PxU32 getNbObjects() const { return mNbObjects; } private: bool resize(PxU32 newCapacity); PxU32 mNbObjects; //!< Current number of objects PxU32 mMaxNbObjects; //!< Max. number of objects (capacity for mWorldBoxes, mObjects) //!< these arrays are parallel Gu::AABBTreeBounds mCompoundBounds; //!< List of compound world boxes, stores mNbObjects, capacity=mMaxNbObjects CompoundTree* mCompoundTrees; PxU64 mContextID; }; } } #endif
NVIDIA-Omniverse/PhysX/physx/source/scenequery/src/SqCompoundPruningPool.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/PxAllocator.h" #include "SqCompoundPruningPool.h" #include "GuPruningPool.h" #include "GuAABBTree.h" #include "GuBVH.h" using namespace physx; using namespace Cm; using namespace Gu; using namespace Sq; /////////////////////////////////////////////////////////////////////////////////////////////// void CompoundTree::updateObjectAfterManualBoundsUpdates(PrunerHandle handle) { const PxBounds3* newBounds = mPruningPool->getCurrentWorldBoxes(); const PoolIndex poolIndex = mPruningPool->getIndex(handle); NodeList changedLeaves; changedLeaves.reserve(8); IncrementalAABBTreeNode* node = mTree->update((*mUpdateMap)[poolIndex], poolIndex, newBounds, changedLeaves); // we removed node during update, need to update the mapping updateMapping(poolIndex, node, changedLeaves); } /////////////////////////////////////////////////////////////////////////////////////////////// void CompoundTree::removeObject(PrunerHandle handle, PrunerPayloadRemovalCallback* removalCallback) { const PoolIndex poolIndex = mPruningPool->getIndex(handle); // save the pool index for removed object const PoolIndex poolRelocatedLastIndex = mPruningPool->removeObject(handle, removalCallback); // save the lastIndex returned by removeObject IncrementalAABBTreeNode* node = mTree->remove((*mUpdateMap)[poolIndex], poolIndex, mPruningPool->getCurrentWorldBoxes()); // if node moved to its parent if (node && node->isLeaf()) { for (PxU32 j = 0; j < node->getNbPrimitives(); j++) { const PoolIndex index = node->getPrimitives(NULL)[j]; (*mUpdateMap)[index] = node; } } (*mUpdateMap)[poolIndex] = (*mUpdateMap)[poolRelocatedLastIndex]; // fix indices if we made a swap if(poolRelocatedLastIndex != poolIndex) mTree->fixupTreeIndices((*mUpdateMap)[poolIndex], poolRelocatedLastIndex, poolIndex); } /////////////////////////////////////////////////////////////////////////////////////////////// bool CompoundTree::addObject(PrunerHandle& result, const PxBounds3& bounds, const PrunerPayload& data, const PxTransform& transform) { mPruningPool->addObjects(&result, &bounds, &data, &transform, 1); if (mPruningPool->mMaxNbObjects > mUpdateMap->size()) mUpdateMap->resize(mPruningPool->mMaxNbObjects); const PoolIndex poolIndex = mPruningPool->getIndex(result); NodeList changedLeaves; changedLeaves.reserve(8); IncrementalAABBTreeNode* node = mTree->insert(poolIndex, mPruningPool->getCurrentWorldBoxes(), changedLeaves); updateMapping(poolIndex, node, changedLeaves); return true; } /////////////////////////////////////////////////////////////////////////////////////////////// void CompoundTree::updateMapping(const PoolIndex poolIndex, IncrementalAABBTreeNode* node, const NodeList& changedLeaves) { // if a node was split we need to update the node indices and also the sibling indices if(!changedLeaves.empty()) { if(node && node->isLeaf()) { for(PxU32 j = 0; j < node->getNbPrimitives(); j++) { const PoolIndex index = node->getPrimitives(NULL)[j]; (*mUpdateMap)[index] = node; } } for(PxU32 i = 0; i < changedLeaves.size(); i++) { IncrementalAABBTreeNode* changedNode = changedLeaves[i]; PX_ASSERT(changedNode->isLeaf()); for(PxU32 j = 0; j < changedNode->getNbPrimitives(); j++) { const PoolIndex index = changedNode->getPrimitives(NULL)[j]; (*mUpdateMap)[index] = changedNode; } } } else { (*mUpdateMap)[poolIndex] = node; } } /////////////////////////////////////////////////////////////////////////////////////////////// CompoundTreePool::CompoundTreePool(PxU64 contextID) : mNbObjects (0), mMaxNbObjects (0), mCompoundTrees (NULL), mContextID (contextID) { } /////////////////////////////////////////////////////////////////////////////////////////////// CompoundTreePool::~CompoundTreePool() { PX_FREE(mCompoundTrees); } /////////////////////////////////////////////////////////////////////////////////////////////// bool CompoundTreePool::resize(PxU32 newCapacity) { mCompoundBounds.resize(newCapacity, mNbObjects); CompoundTree* newTrees = PX_ALLOCATE(CompoundTree, newCapacity, "IncrementalTrees*"); if(!newTrees) return false; // memzero, we need to set the pointers in the compound tree to NULL PxMemZero(newTrees, sizeof(CompoundTree)*newCapacity); if(mCompoundTrees) PxMemCopy(newTrees, mCompoundTrees, mNbObjects*sizeof(CompoundTree)); mMaxNbObjects = newCapacity; PX_FREE(mCompoundTrees); mCompoundTrees = newTrees; return true; } /////////////////////////////////////////////////////////////////////////////////////////////// void CompoundTreePool::preallocate(PxU32 newCapacity) { if(newCapacity>mMaxNbObjects) resize(newCapacity); } /////////////////////////////////////////////////////////////////////////////////////////////// void CompoundTreePool::shiftOrigin(const PxVec3& shift) { PxBounds3* bounds = mCompoundBounds.getBounds(); for(PxU32 i=0; i < mNbObjects; i++) { bounds[i].minimum -= shift; bounds[i].maximum -= shift; mCompoundTrees[i].mGlobalPose.p -= shift; } } /////////////////////////////////////////////////////////////////////////////////////////////// PoolIndex CompoundTreePool::addCompound(PrunerHandle* results, const BVH& bvh, const PxBounds3& compoundBounds, const PxTransform& transform, bool isDynamic, const PrunerPayload* data, const PxTransform* transforms) { if(mNbObjects==mMaxNbObjects) // increase the capacity on overflow { if(!resize(PxMax<PxU32>(mMaxNbObjects*2, 32))) { // pool can return an invalid handle if memory alloc fails PxGetFoundation().error(PxErrorCode::eOUT_OF_MEMORY, PX_FL, "CompoundTreePool::addCompound memory allocation in resize failed."); return INVALID_PRUNERHANDLE; } } PX_ASSERT(mNbObjects!=mMaxNbObjects); const PoolIndex index = mNbObjects++; mCompoundBounds.getBounds()[index] = compoundBounds; const PxU32 nbObjects = bvh.getNbBounds(); CompoundTree& tree = mCompoundTrees[index]; PX_ASSERT(tree.mPruningPool == NULL); PX_ASSERT(tree.mTree == NULL); PX_ASSERT(tree.mUpdateMap == NULL); tree.mGlobalPose = transform; tree.mFlags = isDynamic ? PxCompoundPrunerQueryFlag::eDYNAMIC : PxCompoundPrunerQueryFlag::eSTATIC; // prepare the pruning pool PruningPool* pool = PX_NEW(PruningPool)(mContextID, TRANSFORM_CACHE_LOCAL); pool->preallocate(nbObjects); pool->addObjects(results, bvh.getBounds(), data, transforms, nbObjects); tree.mPruningPool = pool; // prepare update map UpdateMap* map = PX_PLACEMENT_NEW(PX_ALLOC(sizeof(UpdateMap), "Update map"), UpdateMap); map->resizeUninitialized(nbObjects); tree.mUpdateMap = map; IncrementalAABBTree* iTree = PX_NEW(IncrementalAABBTree); iTree->copy(bvh, *map); tree.mTree = iTree; return index; } /////////////////////////////////////////////////////////////////////////////////////////////// PoolIndex CompoundTreePool::removeCompound(PoolIndex indexOfRemovedObject, PrunerPayloadRemovalCallback* removalCallback) { PX_ASSERT(mNbObjects); // release the tree PX_DELETE(mCompoundTrees[indexOfRemovedObject].mTree); mCompoundTrees[indexOfRemovedObject].mUpdateMap->clear(); mCompoundTrees[indexOfRemovedObject].mUpdateMap->~PxArray(); PX_FREE(mCompoundTrees[indexOfRemovedObject].mUpdateMap); if(removalCallback) { const PruningPool* pool = mCompoundTrees[indexOfRemovedObject].mPruningPool; removalCallback->invoke(pool->getNbActiveObjects(), pool->getObjects()); } PX_DELETE(mCompoundTrees[indexOfRemovedObject].mPruningPool); const PoolIndex indexOfLastObject = --mNbObjects; // swap the object at last index with index if(indexOfLastObject!=indexOfRemovedObject) { // PT: move last object's data to recycled spot (from removed object) // PT: the last object has moved so we need to handle the mappings for this object mCompoundBounds.getBounds() [indexOfRemovedObject] = mCompoundBounds.getBounds() [indexOfLastObject]; mCompoundTrees [indexOfRemovedObject] = mCompoundTrees [indexOfLastObject]; mCompoundTrees [indexOfLastObject].mPruningPool = NULL; mCompoundTrees [indexOfLastObject].mUpdateMap = NULL; mCompoundTrees [indexOfLastObject].mTree = NULL; } return indexOfLastObject; }
NVIDIA-Omniverse/PhysX/physx/source/scenequery/src/SqFactory.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "SqFactory.h" #include "SqCompoundPruner.h" using namespace physx; using namespace Sq; CompoundPruner* physx::Sq::createCompoundPruner(PxU64 contextID) { return PX_NEW(BVHCompoundPruner)(contextID); }
NVIDIA-Omniverse/PhysX/physx/source/scenequery/src/SqQuery.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. // PT: SQ-API LEVEL 3 (Level 1 = SqPruner.h, Level 2 = SqManager/SqPrunerData) // PT: this file is part of a "high-level" set of files within Sq. The SqPruner API doesn't rely on them. // PT: this should really be at Np level but moving it to Sq allows us to share it. #include "SqQuery.h" using namespace physx; using namespace Sq; #include "common/PxProfileZone.h" #include "foundation/PxFPU.h" #include "GuBounds.h" #include "GuIntersectionRayBox.h" #include "GuIntersectionRay.h" #include "geometry/PxGeometryQuery.h" #include "geometry/PxSphereGeometry.h" #include "geometry/PxBoxGeometry.h" #include "geometry/PxCapsuleGeometry.h" #include "geometry/PxConvexMeshGeometry.h" #include "geometry/PxTriangleMeshGeometry.h" #include "PxQueryFiltering.h" using namespace physx; using namespace Sq; using namespace Gu; /////////////////////////////////////////////////////////////////////////////// PX_IMPLEMENT_OUTPUT_ERROR /////////////////////////////////////////////////////////////////////////////// static PX_FORCE_INLINE void copy(PxRaycastHit* PX_RESTRICT dest, const PxRaycastHit* PX_RESTRICT src) { dest->faceIndex = src->faceIndex; dest->flags = src->flags; dest->position = src->position; dest->normal = src->normal; dest->distance = src->distance; dest->u = src->u; dest->v = src->v; dest->actor = src->actor; dest->shape = src->shape; } static PX_FORCE_INLINE void copy(PxSweepHit* PX_RESTRICT dest, const PxSweepHit* PX_RESTRICT src) { dest->faceIndex = src->faceIndex; dest->flags = src->flags; dest->position = src->position; dest->normal = src->normal; dest->distance = src->distance; dest->actor = src->actor; dest->shape = src->shape; } static PX_FORCE_INLINE void copy(PxOverlapHit* PX_RESTRICT dest, const PxOverlapHit* PX_RESTRICT src) { dest->faceIndex = src->faceIndex; dest->actor = src->actor; dest->shape = src->shape; } // these partial template specializations are used to generalize the query code to be reused for all permutations of // hit type=(raycast, overlap, sweep) x query type=(ANY, SINGLE, MULTIPLE) template <typename HitType> struct HitTypeSupport { enum { IsRaycast = 0, IsSweep = 0, IsOverlap = 0 }; }; template <> struct HitTypeSupport<PxRaycastHit> { enum { IsRaycast = 1, IsSweep = 0, IsOverlap = 0 }; static PX_FORCE_INLINE PxReal getDistance(const PxQueryHit& hit) { return static_cast<const PxRaycastHit&>(hit).distance; } }; template <> struct HitTypeSupport<PxSweepHit> { enum { IsRaycast = 0, IsSweep = 1, IsOverlap = 0 }; static PX_FORCE_INLINE PxReal getDistance(const PxQueryHit& hit) { return static_cast<const PxSweepHit&>(hit).distance; } }; template <> struct HitTypeSupport<PxOverlapHit> { enum { IsRaycast = 0, IsSweep = 0, IsOverlap = 1 }; static PX_FORCE_INLINE PxReal getDistance(const PxQueryHit&) { return -1.0f; } }; #define HITDIST(hit) HitTypeSupport<HitType>::getDistance(hit) template<typename HitType> static PxU32 clipHitsToNewMaxDist(HitType* ppuHits, PxU32 count, PxReal newMaxDist) { PxU32 i=0; while(i!=count) { if(HITDIST(ppuHits[i]) > newMaxDist) ppuHits[i] = ppuHits[--count]; else i++; } return count; } namespace physx { namespace Sq { struct MultiQueryInput { const PxVec3* rayOrigin; // only valid for raycasts const PxVec3* unitDir; // only valid for raycasts and sweeps PxReal maxDistance; // only valid for raycasts and sweeps const PxGeometry* geometry; // only valid for overlaps and sweeps const PxTransform* pose; // only valid for overlaps and sweeps PxReal inflation; // only valid for sweeps // Raycast constructor MultiQueryInput(const PxVec3& aRayOrigin, const PxVec3& aUnitDir, PxReal aMaxDist) { rayOrigin = &aRayOrigin; unitDir = &aUnitDir; maxDistance = aMaxDist; geometry = NULL; pose = NULL; inflation = 0.0f; } // Overlap constructor MultiQueryInput(const PxGeometry* aGeometry, const PxTransform* aPose) { geometry = aGeometry; pose = aPose; inflation = 0.0f; rayOrigin = unitDir = NULL; } // Sweep constructor MultiQueryInput( const PxGeometry* aGeometry, const PxTransform* aPose, const PxVec3& aUnitDir, const PxReal aMaxDist, const PxReal aInflation) { rayOrigin = NULL; maxDistance = aMaxDist; unitDir = &aUnitDir; geometry = aGeometry; pose = aPose; inflation = aInflation; } PX_FORCE_INLINE const PxVec3& getDir() const { PX_ASSERT(unitDir); return *unitDir; } PX_FORCE_INLINE const PxVec3& getOrigin() const { PX_ASSERT(rayOrigin); return *rayOrigin; } }; } } // performs a single geometry query for any HitType (PxSweepHit, PxOverlapHit, PxRaycastHit) template<typename HitType> struct GeomQueryAny { static PX_FORCE_INLINE PxU32 geomHit( const CachedFuncs& funcs, const MultiQueryInput& input, const Gu::ShapeData* sd, const PxGeometry& sceneGeom, const PxTransform& pose, PxHitFlags hitFlags, PxU32 maxHits, HitType* hits, const PxReal shrunkMaxDistance, const PxBounds3* precomputedBounds, PxQueryThreadContext* context) { using namespace Gu; const PxGeometry& geom0 = *input.geometry; const PxTransform& pose0 = *input.pose; const PxGeometry& geom1 = sceneGeom; const PxTransform& pose1 = pose; // Handle raycasts if(HitTypeSupport<HitType>::IsRaycast) { // the test for mesh AABB is archived in //sw/physx/dev/apokrovsky/graveyard/sqMeshAABBTest.cpp // TODO: investigate performance impact (see US12801) PX_CHECK_AND_RETURN_VAL(input.getDir().isFinite(), "PxScene::raycast(): rayDir is not valid.", 0); PX_CHECK_AND_RETURN_VAL(input.getOrigin().isFinite(), "PxScene::raycast(): rayOrigin is not valid.", 0); PX_CHECK_AND_RETURN_VAL(pose1.isValid(), "PxScene::raycast(): pose is not valid.", 0); PX_CHECK_AND_RETURN_VAL(shrunkMaxDistance >= 0.0f, "PxScene::raycast(): maxDist is negative.", 0); PX_CHECK_AND_RETURN_VAL(PxIsFinite(shrunkMaxDistance), "PxScene::raycast(): maxDist is not valid.", 0); PX_CHECK_AND_RETURN_VAL(PxAbs(input.getDir().magnitudeSquared()-1)<1e-4f, "PxScene::raycast(): ray direction must be unit vector.", 0); // PT: TODO: investigate perf difference const RaycastFunc func = funcs.mCachedRaycastFuncs[geom1.getType()]; return func(geom1, pose1, input.getOrigin(), input.getDir(), shrunkMaxDistance, hitFlags, maxHits, reinterpret_cast<PxGeomRaycastHit*>(hits), sizeof(PxRaycastHit), context); } // Handle sweeps else if(HitTypeSupport<HitType>::IsSweep) { PX_ASSERT(precomputedBounds != NULL); PX_ASSERT(sd != NULL); // b0 = query shape bounds // b1 = scene shape bounds // AP: Here we clip the sweep to bounds with sum of extents. This is needed for GJK stability. // because sweep is equivalent to a raycast vs a scene shape with inflated bounds. // This also may (or may not) provide an optimization for meshes because top level of rtree has multiple boxes // and there is no bounds test for the whole mesh elsewhere PxBounds3 b0 = *precomputedBounds, b1; // compute the scene geometry bounds // PT: TODO: avoid recomputing the bounds here Gu::computeBounds(b1, sceneGeom, pose, 0.0f, 1.0f); const PxVec3 combExt = (b0.getExtents() + b1.getExtents())*1.01f; PxF32 tnear, tfar; if(!intersectRayAABB2(-combExt, combExt, b0.getCenter() - b1.getCenter(), input.getDir(), shrunkMaxDistance, tnear, tfar)) // returns (tnear<tfar) if(tnear>tfar) // this second test is needed because shrunkMaxDistance can be 0 for 0 length sweep return 0; PX_ASSERT(input.getDir().isNormalized()); // tfar is now the t where the ray exits the AABB. input.getDir() is normalized const PxVec3& unitDir = input.getDir(); PxSweepHit& sweepHit = reinterpret_cast<PxSweepHit&>(hits[0]); // if we don't start inside the AABB box, offset the start pos, because of precision issues with large maxDist const bool offsetPos = (tnear > GU_RAY_SURFACE_OFFSET); const PxReal offset = offsetPos ? (tnear - GU_RAY_SURFACE_OFFSET) : 0.0f; const PxVec3 offsetVec(offsetPos ? (unitDir*offset) : PxVec3(0.0f)); // we move the geometry we sweep against, so that we avoid the Gu::Capsule/Box recomputation const PxTransform pose1Offset(pose1.p - offsetVec, pose1.q); const PxReal distance = PxMin(tfar, shrunkMaxDistance) - offset; const PxReal inflation = input.inflation; PX_CHECK_AND_RETURN_VAL(pose0.isValid(), "PxScene::sweep(): pose0 is not valid.", 0); PX_CHECK_AND_RETURN_VAL(pose1Offset.isValid(), "PxScene::sweep(): pose1 is not valid.", 0); PX_CHECK_AND_RETURN_VAL(unitDir.isFinite(), "PxScene::sweep(): unitDir is not valid.", 0); PX_CHECK_AND_RETURN_VAL(PxIsFinite(distance), "PxScene::sweep(): distance is not valid.", 0); PX_CHECK_AND_RETURN_VAL((distance >= 0.0f && !(hitFlags & PxHitFlag::eASSUME_NO_INITIAL_OVERLAP)) || distance > 0.0f, "PxScene::sweep(): sweep distance must be >=0 or >0 with eASSUME_NO_INITIAL_OVERLAP.", 0); PxU32 retVal = 0; const GeomSweepFuncs& sf = funcs.mCachedSweepFuncs; switch(geom0.getType()) { case PxGeometryType::eSPHERE: { const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom0); const PxCapsuleGeometry capsuleGeom(sphereGeom.radius, 0.0f); const Capsule worldCapsule(pose0.p, pose0.p, sphereGeom.radius); // AP: precompute? const bool precise = hitFlags & PxHitFlag::ePRECISE_SWEEP; const SweepCapsuleFunc func = precise ? sf.preciseCapsuleMap[geom1.getType()] : sf.capsuleMap[geom1.getType()]; retVal = PxU32(func(geom1, pose1Offset, capsuleGeom, pose0, worldCapsule, unitDir, distance, sweepHit, hitFlags, inflation, context)); } break; case PxGeometryType::eCAPSULE: { const bool precise = hitFlags & PxHitFlag::ePRECISE_SWEEP; const SweepCapsuleFunc func = precise ? sf.preciseCapsuleMap[geom1.getType()] : sf.capsuleMap[geom1.getType()]; retVal = PxU32(func(geom1, pose1Offset, static_cast<const PxCapsuleGeometry&>(geom0), pose0, sd->getGuCapsule(), unitDir, distance, sweepHit, hitFlags, inflation, context)); } break; case PxGeometryType::eBOX: { const bool precise = hitFlags & PxHitFlag::ePRECISE_SWEEP; const SweepBoxFunc func = precise ? sf.preciseBoxMap[geom1.getType()] : sf.boxMap[geom1.getType()]; retVal = PxU32(func(geom1, pose1Offset, static_cast<const PxBoxGeometry&>(geom0), pose0, sd->getGuBox(), unitDir, distance, sweepHit, hitFlags, inflation, context)); } break; case PxGeometryType::eCONVEXMESH: { const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom0); const SweepConvexFunc func = sf.convexMap[geom1.getType()]; retVal = PxU32(func(geom1, pose1Offset, convexGeom, pose0, unitDir, distance, sweepHit, hitFlags, inflation, context)); } break; default: outputError<physx::PxErrorCode::eINVALID_PARAMETER>(__LINE__, "PxScene::sweep(): first geometry object parameter must be sphere, capsule, box or convex geometry."); break; } if (retVal) { // we need to offset the distance back sweepHit.distance += offset; // we need to offset the hit position back as we moved the geometry we sweep against sweepHit.position += offsetVec; } return retVal; } // Handle overlaps else if(HitTypeSupport<HitType>::IsOverlap) { const GeomOverlapTable* overlapFuncs = funcs.mCachedOverlapFuncs; return PxU32(Gu::overlap(geom0, pose0, geom1, pose1, overlapFuncs, context)); } else { PX_ALWAYS_ASSERT_MESSAGE("Unexpected template expansion in GeomQueryAny::geomHit"); return 0; } } }; /////////////////////////////////////////////////////////////////////////////// static PX_FORCE_INLINE bool applyFilterEquation(const QueryAdapter& adapter, const PrunerPayload& payload, const PxFilterData& queryFd) { // if the filterData field is non-zero, and the bitwise-AND value of filterData AND the shape's // queryFilterData is zero, the shape is skipped. if(queryFd.word0 | queryFd.word1 | queryFd.word2 | queryFd.word3) { // PT: TODO: revisit this, there's an obvious LHS here otherwise // We could maybe make this more flexible and let the user do the filtering // const PxFilterData& objFd = adapter.getFilterData(payload); PxFilterData objFd; adapter.getFilterData(payload, objFd); const PxU32 keep = (queryFd.word0 & objFd.word0) | (queryFd.word1 & objFd.word1) | (queryFd.word2 & objFd.word2) | (queryFd.word3 & objFd.word3); if(!keep) return false; } return true; } static PX_FORCE_INLINE bool applyAllPreFiltersSQ( const QueryAdapter& adapter, const PrunerPayload& payload, const PxActorShape& as, PxQueryHitType::Enum& shapeHitType, const PxQueryFlags& inFilterFlags, const PxQueryFilterData& filterData, PxQueryFilterCallback* filterCall, PxHitFlags& queryFlags/*, PxU32 maxNbTouches*/) { if(!(filterData.flags & PxQueryFlag::eBATCH_QUERY_LEGACY_BEHAVIOUR) && !applyFilterEquation(adapter, payload, filterData.data)) return false; if((inFilterFlags & PxQueryFlag::ePREFILTER) && (filterCall)) { PxHitFlags outQueryFlags = queryFlags; if(filterCall) shapeHitType = filterCall->preFilter(filterData.data, as.shape, as.actor, outQueryFlags); // AP: at this point the callback might return eTOUCH but the touch buffer can be empty, the hit will be discarded //PX_CHECK_MSG(hitType == PxQueryHitType::eTOUCH ? maxNbTouches > 0 : true, // "SceneQuery: preFilter returned eTOUCH but empty touch buffer was provided, hit discarded."); queryFlags = (queryFlags & ~PxHitFlag::eMODIFIABLE_FLAGS) | (outQueryFlags & PxHitFlag::eMODIFIABLE_FLAGS); if(shapeHitType == PxQueryHitType::eNONE) return false; } // test passed, continue to return as; return true; } static PX_NOINLINE void computeCompoundShapeTransform(PxTransform* PX_RESTRICT transform, const PxTransform* PX_RESTRICT compoundPose, const PxTransform* PX_RESTRICT transforms, PxU32 primIndex) { // PT:: tag: scalar transform*transform *transform = (*compoundPose) * transforms[primIndex]; } // struct to access protected data members in the public PxHitCallback API template<typename HitType> struct MultiQueryCallback : public PrunerRaycastCallback, public PrunerOverlapCallback, public CompoundPrunerRaycastCallback, public CompoundPrunerOverlapCallback { const SceneQueries& mScene; const MultiQueryInput& mInput; PxHitCallback<HitType>& mHitCall; const PxHitFlags mHitFlags; const PxQueryFilterData& mFilterData; PxQueryFilterCallback* mFilterCall; PxReal mShrunkDistance; const PxHitFlags mMeshAnyHitFlags; bool mReportTouchesAgain; bool mFarBlockFound; // this is to prevent repeated searches for far block const bool mNoBlock; const bool mAnyHit; // The reason we need these bounds is because we need to know combined(inflated shape) bounds to clip the sweep path // to be tolerable by GJK precision issues. This test is done for (queryShape vs touchedShapes) // So it makes sense to cache the bounds for sweep query shape, otherwise we'd have to recompute them every time // Currently only used for sweeps. const PxBounds3* mQueryShapeBounds; const ShapeData* mShapeData; PxTransform mCompoundShapeTransform; MultiQueryCallback( const SceneQueries& scene, const MultiQueryInput& input, bool anyHit, PxHitCallback<HitType>& hitCall, PxHitFlags hitFlags, const PxQueryFilterData& filterData, PxQueryFilterCallback* filterCall, PxReal shrunkDistance) : mScene (scene), mInput (input), mHitCall (hitCall), mHitFlags (hitFlags), mFilterData (filterData), mFilterCall (filterCall), mShrunkDistance (shrunkDistance), mMeshAnyHitFlags ((hitFlags.isSet(PxHitFlag::eMESH_ANY) || anyHit) ? PxHitFlag::eMESH_ANY : PxHitFlag::Enum(0)), mReportTouchesAgain (true), mFarBlockFound (filterData.flags & PxQueryFlag::eNO_BLOCK), mNoBlock (filterData.flags & PxQueryFlag::eNO_BLOCK), mAnyHit (anyHit), mQueryShapeBounds (NULL), mShapeData (NULL) { } bool processTouchHit(const HitType& hit, PxReal& aDist) #if PX_WINDOWS_FAMILY PX_RESTRICT #endif { // -------------------------- handle eTOUCH hits --------------------------------- // for qType=multiple, store the hit. For other qTypes ignore it. // <= is important for initially overlapping sweeps #if PX_CHECKED if(mHitCall.maxNbTouches == 0 && !mFilterData.flags.isSet(PxQueryFlag::eRESERVED)) // issue a warning if eTOUCH was returned by the prefilter, we have 0 touch buffer and not a batch query // not doing for BQ because the touches buffer can be overflown and thats ok by spec // eRESERVED to avoid a warning from nested callback (closest blocking hit recursive search) outputError<PxErrorCode::eINVALID_OPERATION>(__LINE__, "User filter returned PxQueryHitType::eTOUCH but the touches buffer was empty. Hit was discarded."); #endif if(mHitCall.maxNbTouches && mReportTouchesAgain && HITDIST(hit) <= mShrunkDistance) { // Buffer full: need to find the closest blocking hit, clip touch hits and flush the buffer if(mHitCall.nbTouches == mHitCall.maxNbTouches) { // issue a second nested query just looking for the closest blocking hit // could do better perf-wise by saving traversal state (start looking for blocking from this point) // but this is not a perf critical case because users can provide a bigger buffer // that covers non-degenerate cases // far block search doesn't apply to overlaps because overlaps don't work with blocking hits if(HitTypeSupport<HitType>::IsOverlap == 0) { // AP: the use of eRESERVED is a bit tricky, see other comments containing #LABEL1 PxQueryFilterData fd1 = mFilterData; fd1.flags |= PxQueryFlag::eRESERVED; PxHitBuffer<HitType> buf1; // create a temp callback buffer for a single blocking hit if(!mFarBlockFound && mHitCall.maxNbTouches > 0 && mScene.SceneQueries::multiQuery<HitType>(mInput, buf1, mHitFlags, NULL, fd1, mFilterCall)) { mHitCall.block = buf1.block; mHitCall.hasBlock = true; mHitCall.nbTouches = clipHitsToNewMaxDist<HitType>(mHitCall.touches, mHitCall.nbTouches, HITDIST(buf1.block)); mShrunkDistance = HITDIST(buf1.block); aDist = mShrunkDistance; } mFarBlockFound = true; } if(mHitCall.nbTouches == mHitCall.maxNbTouches) { mReportTouchesAgain = mHitCall.processTouches(mHitCall.touches, mHitCall.nbTouches); if(!mReportTouchesAgain) return false; // optimization - buffer is full else mHitCall.nbTouches = 0; // reset nbTouches so we can continue accumulating again } } //if(hitCall.nbTouches < hitCall.maxNbTouches) // can be true if maxNbTouches is 0 mHitCall.touches[mHitCall.nbTouches++] = hit; } // if(hitCall.maxNbTouches && reportTouchesAgain && HITDIST(hit) <= shrunkDistance) return true; } template<const bool isCached> // is this call coming as a callback from the pruner or a single item cached callback? bool _invoke(PxReal& aDist, PxU32 primIndex, const PrunerPayload* payloads, const PxTransform* transforms, const PxTransform* compoundPose) #if PX_WINDOWS_FAMILY PX_RESTRICT #endif { PX_ASSERT(payloads); const PrunerPayload& payload = payloads[primIndex]; const QueryAdapter& adapter = static_cast<const QueryAdapter&>(mScene.mSQManager.getAdapter()); PxActorShape actorShape; adapter.getActorShape(payload, actorShape); const PxQueryFlags filterFlags = mFilterData.flags; // for no filter callback, default to eTOUCH for MULTIPLE, eBLOCK otherwise // also always treat as eBLOCK if currently tested shape is cached // Using eRESERVED flag as a special condition to default to eTOUCH hits while only looking for a single blocking hit // from a nested query (see other comments containing #LABEL1) PxQueryHitType::Enum shapeHitType = ((mHitCall.maxNbTouches || (mFilterData.flags & PxQueryFlag::eRESERVED)) && !isCached) ? PxQueryHitType::eTOUCH : PxQueryHitType::eBLOCK; // apply pre-filter PxHitFlags filteredHitFlags = mHitFlags; if(!isCached) // don't run filters on single item cache { if(!applyAllPreFiltersSQ(adapter, payload, actorShape, shapeHitType/*in&out*/, filterFlags, mFilterData, mFilterCall, filteredHitFlags/*, mHitCall.maxNbTouches*/)) return true; // skip this shape from reporting if prefilter said to do so // if(shapeHitType == PxQueryHitType::eNONE) // return true; } const PxGeometry& shapeGeom = adapter.getGeometry(payload); PX_ASSERT(transforms); const PxTransform* shapeTransform; if(!compoundPose) { shapeTransform = transforms + primIndex; } else { computeCompoundShapeTransform(&mCompoundShapeTransform, compoundPose, transforms, primIndex); shapeTransform = &mCompoundShapeTransform; } const PxU32 tempCount = 1; HitType tempBuf[tempCount]; // Here we decide whether to use the user provided buffer in place or a local stack buffer // see if we have more room left in the callback results buffer than in the parent stack buffer // if so get subHits in-place in the hit buffer instead of the parent stack buffer // nbTouches is the number of accumulated touch hits so far // maxNbTouches is the size of the user buffer PxU32 maxSubHits1; HitType* subHits1; if(mHitCall.nbTouches >= mHitCall.maxNbTouches) // if there's no room left in the user buffer, use a stack buffer { // tried using 64 here - causes check stack code to get generated on xbox, perhaps because of guard page // need this buffer in case the input buffer is full but we still want to correctly merge results from later hits maxSubHits1 = tempCount; subHits1 = reinterpret_cast<HitType*>(tempBuf); } else { maxSubHits1 = mHitCall.maxNbTouches - mHitCall.nbTouches; // how much room is left in the user buffer subHits1 = mHitCall.touches + mHitCall.nbTouches; // pointer to the first free hit in the user buffer } // call the geometry specific intersection template const PxU32 nbSubHits = GeomQueryAny<HitType>::geomHit( mScene.mCachedFuncs, mInput, mShapeData, shapeGeom, *shapeTransform, filteredHitFlags | mMeshAnyHitFlags, maxSubHits1, subHits1, mShrunkDistance, mQueryShapeBounds, &mHitCall); // ------------------------- iterate over geometry subhits ----------------------------------- for (PxU32 iSubHit = 0; iSubHit < nbSubHits; iSubHit++) { HitType& hit = subHits1[iSubHit]; hit.actor = actorShape.actor; hit.shape = actorShape.shape; // some additional processing only for sweep hits with initial overlap if(HitTypeSupport<HitType>::IsSweep && HITDIST(hit) == 0.0f && !(filteredHitFlags & PxHitFlag::eMTD)) // PT: necessary as some leaf routines are called with reversed params, thus writing +unitDir there. // AP: apparently still necessary to also do in Gu because Gu can be used standalone (without SQ) reinterpret_cast<PxSweepHit&>(hit).normal = -mInput.getDir(); // start out with hitType for this cached shape set to a pre-filtered hit type PxQueryHitType::Enum hitType = shapeHitType; // run the post-filter if specified in filterFlags and filterCall is non-NULL if(!isCached && mFilterCall && (filterFlags & PxQueryFlag::ePOSTFILTER)) { //if(mFilterCall) hitType = mFilterCall->postFilter(mFilterData.data, hit, hit.shape, hit.actor); } // early out on any hit if eANY_HIT was specified, regardless of hit type if(mAnyHit && hitType != PxQueryHitType::eNONE) { // block or touch qualifies for qType=ANY type hit => return it as blocking according to spec. Ignore eNONE. //mHitCall.block = hit; copy(&mHitCall.block, &hit); mHitCall.hasBlock = true; return false; // found a hit for ANY qType, can early exit now } if(mNoBlock && hitType==PxQueryHitType::eBLOCK) hitType = PxQueryHitType::eTOUCH; PX_WARN_ONCE_IF(HitTypeSupport<HitType>::IsOverlap && hitType == PxQueryHitType::eBLOCK, "eBLOCK returned from user filter for overlap() query. This may cause undesired behavior. " "Consider using PxQueryFlag::eNO_BLOCK for overlap queries."); if(hitType == PxQueryHitType::eTOUCH) { if(!processTouchHit(hit, aDist)) return false; } // if(hitType == PxQueryHitType::eTOUCH) else if(hitType == PxQueryHitType::eBLOCK) { // -------------------------- handle eBLOCK hits ---------------------------------- // only eBLOCK qualifies as a closest hit candidate => compare against best distance and store // <= is needed for eTOUCH hits to be recorded correctly vs same eBLOCK distance for overlaps if(HITDIST(hit) <= mShrunkDistance) { if(HitTypeSupport<HitType>::IsOverlap == 0) { mShrunkDistance = HITDIST(hit); aDist = mShrunkDistance; } //mHitCall.block = hit; copy(&mHitCall.block, &hit); mHitCall.hasBlock = true; } } // if(hitType == eBLOCK) else { PX_ASSERT(hitType == PxQueryHitType::eNONE); } } // for iSubHit return true; } virtual bool invoke(PxReal& aDist, PxU32 primIndex, const PrunerPayload* payloads, const PxTransform* transforms) { return _invoke<false>(aDist, primIndex, payloads, transforms, NULL); } virtual bool invoke(PxU32 primIndex, const PrunerPayload* payloads, const PxTransform* transforms) { float unused = 0.0f; return _invoke<false>(unused, primIndex, payloads, transforms, NULL); } virtual bool invoke(PxReal& aDist, PxU32 primIndex, const PrunerPayload* payloads, const PxTransform* transforms, const PxTransform* compoundPose) { return _invoke<false>(aDist, primIndex, payloads, transforms, compoundPose); } virtual bool invoke(PxU32 primIndex, const PrunerPayload* payloads, const PxTransform* transforms, const PxTransform* compoundPose) { float unused = 0.0f; return _invoke<false>(unused, primIndex, payloads, transforms, compoundPose); } private: MultiQueryCallback<HitType>& operator=(const MultiQueryCallback<HitType>&); }; //======================================================================================================================== #if PX_SUPPORT_PVD template<typename HitType> struct CapturePvdOnReturn : public PxHitCallback<HitType> { // copy the arguments of multiQuery into a struct, this is strictly for PVD recording const SceneQueries* mSQ; const MultiQueryInput& mInput; const PxQueryFilterData& mFilterData; PxArray<HitType> mAllHits; PxHitCallback<HitType>& mParentCallback; CapturePvdOnReturn( const SceneQueries* sq, const MultiQueryInput& input, const PxQueryFilterData& filterData, PxHitCallback<HitType>& parentCallback) : PxHitCallback<HitType> (parentCallback.touches, parentCallback.maxNbTouches), mSQ (sq), mInput (input), mFilterData (filterData), mParentCallback (parentCallback) {} virtual PxAgain processTouches(const HitType* hits, PxU32 nbHits) { const PxAgain again = mParentCallback.processTouches(hits, nbHits); for(PxU32 i=0; i<nbHits; i++) mAllHits.pushBack(hits[i]); return again; } ~CapturePvdOnReturn() { PVDCapture* pvd = mSQ->mPVD; if(!pvd || !pvd->transmitSceneQueries()) return; if(mParentCallback.nbTouches) { for(PxU32 i = 0; i < mParentCallback.nbTouches; i++) mAllHits.pushBack(mParentCallback.touches[i]); } if(mParentCallback.hasBlock) mAllHits.pushBack(mParentCallback.block); // PT: TODO: why do we need reinterpret_casts below? if(HitTypeSupport<HitType>::IsRaycast) pvd->raycast(mInput.getOrigin(), mInput.getDir(), mInput.maxDistance, reinterpret_cast<PxRaycastHit*>(mAllHits.begin()), mAllHits.size(), mFilterData, this->maxNbTouches!=0); else if(HitTypeSupport<HitType>::IsOverlap) pvd->overlap(*mInput.geometry, *mInput.pose, reinterpret_cast<PxOverlapHit*>(mAllHits.begin()), mAllHits.size(), mFilterData); else if(HitTypeSupport<HitType>::IsSweep) pvd->sweep (*mInput.geometry, *mInput.pose, mInput.getDir(), mInput.maxDistance, reinterpret_cast<PxSweepHit*>(mAllHits.begin()), mAllHits.size(), mFilterData, this->maxNbTouches!=0); } private: CapturePvdOnReturn<HitType>& operator=(const CapturePvdOnReturn<HitType>&); }; #endif // PX_SUPPORT_PVD //======================================================================================================================== template<typename HitType> struct IssueCallbacksOnReturn { PxHitCallback<HitType>& hits; bool again; // query was stopped by previous processTouches. This means that nbTouches is still non-zero // but we don't need to issue processTouches again PX_FORCE_INLINE IssueCallbacksOnReturn(PxHitCallback<HitType>& aHits) : hits(aHits) { again = true; } ~IssueCallbacksOnReturn() { if(again) // only issue processTouches if query wasn't stopped // this is because nbTouches doesn't get reset to 0 in this case (according to spec) // and the touches in touches array were already processed by the callback { if(hits.hasBlock && hits.nbTouches) hits.nbTouches = clipHitsToNewMaxDist<HitType>(hits.touches, hits.nbTouches, HITDIST(hits.block)); if(hits.nbTouches) { bool again_ = hits.processTouches(hits.touches, hits.nbTouches); if(again_) hits.nbTouches = 0; } } hits.finalizeQuery(); } private: IssueCallbacksOnReturn<HitType>& operator=(const IssueCallbacksOnReturn<HitType>&); }; #undef HITDIST //======================================================================================================================== template<typename HitType> static bool doQueryVsCached(const PrunerHandle cacheData, PxU32 prunerIndex, const PrunerCompoundId cachedCompoundId, const PrunerManager& manager, MultiQueryCallback<HitType>& pcb, const MultiQueryInput& input); static PX_FORCE_INLINE PxCompoundPrunerQueryFlags convertFlags(PxQueryFlags inFlags) { PxCompoundPrunerQueryFlags outFlags(0); if(inFlags.isSet(PxQueryFlag::eSTATIC)) outFlags.raise(PxCompoundPrunerQueryFlag::eSTATIC); if(inFlags.isSet(PxQueryFlag::eDYNAMIC)) outFlags.raise(PxCompoundPrunerQueryFlag::eDYNAMIC); return outFlags; } // PT: TODO: revisit error messages without breaking UTs template<typename HitType> bool SceneQueries::multiQuery( const MultiQueryInput& input, PxHitCallback<HitType>& hits, PxHitFlags hitFlags, const PxQueryCache* cache, const PxQueryFilterData& filterData, PxQueryFilterCallback* filterCall) const { const bool anyHit = (filterData.flags & PxQueryFlag::eANY_HIT) == PxQueryFlag::eANY_HIT; if(HitTypeSupport<HitType>::IsRaycast == 0) { PX_CHECK_AND_RETURN_VAL(input.pose != NULL, "NpSceneQueries::overlap/sweep pose is NULL.", 0); PX_CHECK_AND_RETURN_VAL(input.pose->isValid(), "NpSceneQueries::overlap/sweep pose is not valid.", 0); } else { PX_CHECK_AND_RETURN_VAL(input.getOrigin().isFinite(), "NpSceneQueries::raycast pose is not valid.", 0); } if(HitTypeSupport<HitType>::IsOverlap == 0) { PX_CHECK_AND_RETURN_VAL(input.getDir().isFinite(), "NpSceneQueries multiQuery input check: unitDir is not valid.", 0); PX_CHECK_AND_RETURN_VAL(input.getDir().isNormalized(), "NpSceneQueries multiQuery input check: direction must be normalized", 0); } if(HitTypeSupport<HitType>::IsRaycast) { PX_CHECK_AND_RETURN_VAL(input.maxDistance > 0.0f, "NpSceneQueries::multiQuery input check: distance cannot be negative or zero", 0); } if(HitTypeSupport<HitType>::IsOverlap && !anyHit) { PX_CHECK_AND_RETURN_VAL(hits.maxNbTouches > 0, "PxScene::overlap() calls without eANY_HIT flag require a touch hit buffer for return results.", 0); } if(HitTypeSupport<HitType>::IsSweep) { PX_CHECK_AND_RETURN_VAL(input.maxDistance >= 0.0f, "NpSceneQueries multiQuery input check: distance cannot be negative", 0); PX_CHECK_AND_RETURN_VAL(input.maxDistance != 0.0f || !(hitFlags & PxHitFlag::eASSUME_NO_INITIAL_OVERLAP), "NpSceneQueries multiQuery input check: zero-length sweep only valid without the PxHitFlag::eASSUME_NO_INITIAL_OVERLAP flag", 0); } PX_CHECK_MSG(!cache || (cache && cache->shape && cache->actor), "Raycast cache specified but shape or actor pointer is NULL!"); PrunerCompoundId cachedCompoundId = INVALID_COMPOUND_ID; // PT: this is similar to the code in the SqRefFinder so we could share that code maybe. But here we later retrieve the payload from the PrunerData, // i.e. we basically go back to the same pointers we started from. I suppose it's to make sure they get properly invalidated when an object is deleted etc, // but we could still probably find a more efficient way to do that here. Isn't it exactly why we had the Signature class initially? // // how can this work anyway? if the actor has been deleted the lookup won't work either => doc says it's up to users to manage that.... PxU32 prunerIndex = 0xffffffff; const PrunerHandle cacheData = cache ? static_cast<const QueryAdapter&>(mSQManager.getAdapter()).findPrunerHandle(*cache, cachedCompoundId, prunerIndex) : INVALID_PRUNERHANDLE; // this function is logically const for the SDK user, as flushUpdates() will not have an API-visible effect on this object // internally however, flushUpdates() changes the states of the Pruners in mSQManager // because here is the only place we need this, const_cast instead of making SQM mutable const_cast<SceneQueries*>(this)->mSQManager.flushUpdates(); #if PX_SUPPORT_PVD CapturePvdOnReturn<HitType> pvdCapture(this, input, filterData, hits); #endif IssueCallbacksOnReturn<HitType> cbr(hits); // destructor will execute callbacks on return from this function hits.hasBlock = false; hits.nbTouches = 0; PxReal shrunkDistance = HitTypeSupport<HitType>::IsOverlap ? PX_MAX_REAL : input.maxDistance; // can be progressively shrunk as we go over the list of shapes if(HitTypeSupport<HitType>::IsSweep) shrunkDistance = PxMin(shrunkDistance, PX_MAX_SWEEP_DISTANCE); MultiQueryCallback<HitType> pcb(*this, input, anyHit, hits, hitFlags, filterData, filterCall, shrunkDistance); if(cacheData!=INVALID_PRUNERHANDLE && hits.maxNbTouches == 0) // don't use cache for queries that can return touch hits { if(!doQueryVsCached(cacheData, prunerIndex, cachedCompoundId, mSQManager, pcb, input)) return hits.hasAnyHits(); } const Pruner* staticPruner = mSQManager.getPruner(PruningIndex::eSTATIC); const Pruner* dynamicPruner = mSQManager.getPruner(PruningIndex::eDYNAMIC); const CompoundPruner* compoundPruner = mSQManager.getCompoundPruner(); const PxU32 doStatics = staticPruner && (filterData.flags & PxQueryFlag::eSTATIC); const PxU32 doDynamics = dynamicPruner && (filterData.flags & PxQueryFlag::eDYNAMIC); const PxCompoundPrunerQueryFlags compoundPrunerQueryFlags = convertFlags(filterData.flags); if(HitTypeSupport<HitType>::IsRaycast) { bool again = doStatics ? staticPruner->raycast(input.getOrigin(), input.getDir(), pcb.mShrunkDistance, pcb) : true; if(!again) return hits.hasAnyHits(); if(doDynamics) again = dynamicPruner->raycast(input.getOrigin(), input.getDir(), pcb.mShrunkDistance, pcb); if(again && compoundPruner) again = compoundPruner->raycast(input.getOrigin(), input.getDir(), pcb.mShrunkDistance, pcb, compoundPrunerQueryFlags); cbr.again = again; // update the status to avoid duplicate processTouches() return hits.hasAnyHits(); } else if(HitTypeSupport<HitType>::IsOverlap) { PX_ASSERT(input.geometry); const ShapeData sd(*input.geometry, *input.pose, input.inflation); pcb.mShapeData = &sd; bool again = doStatics ? staticPruner->overlap(sd, pcb) : true; if(!again) // && (filterData.flags & PxQueryFlag::eANY_HIT)) return hits.hasAnyHits(); if(doDynamics) again = dynamicPruner->overlap(sd, pcb); if(again && compoundPruner) again = compoundPruner->overlap(sd, pcb, compoundPrunerQueryFlags); cbr.again = again; // update the status to avoid duplicate processTouches() return hits.hasAnyHits(); } else { PX_ASSERT(HitTypeSupport<HitType>::IsSweep); PX_ASSERT(input.geometry); const ShapeData sd(*input.geometry, *input.pose, input.inflation); pcb.mQueryShapeBounds = &sd.getPrunerInflatedWorldAABB(); pcb.mShapeData = &sd; bool again = doStatics ? staticPruner->sweep(sd, input.getDir(), pcb.mShrunkDistance, pcb) : true; if(!again) return hits.hasAnyHits(); if(doDynamics) again = dynamicPruner->sweep(sd, input.getDir(), pcb.mShrunkDistance, pcb); if(again && compoundPruner) again = compoundPruner->sweep(sd, input.getDir(), pcb.mShrunkDistance, pcb, compoundPrunerQueryFlags); cbr.again = again; // update the status to avoid duplicate processTouches() return hits.hasAnyHits(); } } //explicit template instantiation template bool SceneQueries::multiQuery<PxRaycastHit>(const MultiQueryInput&, PxHitCallback<PxRaycastHit>&, PxHitFlags, const PxQueryCache*, const PxQueryFilterData&, PxQueryFilterCallback*) const; template bool SceneQueries::multiQuery<PxOverlapHit>(const MultiQueryInput&, PxHitCallback<PxOverlapHit>&, PxHitFlags, const PxQueryCache*, const PxQueryFilterData&, PxQueryFilterCallback*) const; template bool SceneQueries::multiQuery<PxSweepHit>(const MultiQueryInput&, PxHitCallback<PxSweepHit>&, PxHitFlags, const PxQueryCache*, const PxQueryFilterData&, PxQueryFilterCallback*) const; /////////////////////////////////////////////////////////////////////////////// bool SceneQueries::_raycast( const PxVec3& origin, const PxVec3& unitDir, const PxReal distance, PxHitCallback<PxRaycastHit>& hits, PxHitFlags hitFlags, const PxQueryFilterData& filterData, PxQueryFilterCallback* filterCall, const PxQueryCache* cache, PxGeometryQueryFlags flags) const { PX_PROFILE_ZONE("SceneQuery.raycast", getContextId()); PX_SIMD_GUARD_CNDT(flags & PxGeometryQueryFlag::eSIMD_GUARD) MultiQueryInput input(origin, unitDir, distance); return multiQuery<PxRaycastHit>(input, hits, hitFlags, cache, filterData, filterCall); } ////////////////////////////////////////////////////////////////////////// bool SceneQueries::_overlap( const PxGeometry& geometry, const PxTransform& pose, PxOverlapCallback& hits, const PxQueryFilterData& filterData, PxQueryFilterCallback* filterCall, const PxQueryCache* cache, PxGeometryQueryFlags flags) const { PX_PROFILE_ZONE("SceneQuery.overlap", getContextId()); PX_SIMD_GUARD_CNDT(flags & PxGeometryQueryFlag::eSIMD_GUARD) #if PX_CHECKED if (!PxGeometryQuery::isValid(geometry)) return outputError<PxErrorCode::eINVALID_PARAMETER>(__LINE__, "Provided geometry is not valid"); #endif MultiQueryInput input(&geometry, &pose); return multiQuery<PxOverlapHit>(input, hits, PxHitFlags(), cache, filterData, filterCall); } /////////////////////////////////////////////////////////////////////////////// bool SceneQueries::_sweep( const PxGeometry& geometry, const PxTransform& pose, const PxVec3& unitDir, const PxReal distance, PxHitCallback<PxSweepHit>& hits, PxHitFlags hitFlags, const PxQueryFilterData& filterData, PxQueryFilterCallback* filterCall, const PxQueryCache* cache, const PxReal inflation, PxGeometryQueryFlags flags) const { PX_PROFILE_ZONE("SceneQuery.sweep", getContextId()); PX_SIMD_GUARD_CNDT(flags & PxGeometryQueryFlag::eSIMD_GUARD) #if PX_CHECKED if(!PxGeometryQuery::isValid(geometry)) return outputError<PxErrorCode::eINVALID_PARAMETER>(__LINE__, "Provided geometry is not valid"); #endif if((hitFlags & PxHitFlag::ePRECISE_SWEEP) && (hitFlags & PxHitFlag::eMTD)) { outputError<PxErrorCode::eINVALID_PARAMETER>(__LINE__, " Precise sweep doesn't support MTD. Perform MTD with default sweep"); hitFlags &= ~PxHitFlag::ePRECISE_SWEEP; } if((hitFlags & PxHitFlag::eASSUME_NO_INITIAL_OVERLAP) && (hitFlags & PxHitFlag::eMTD)) { outputError<PxErrorCode::eINVALID_PARAMETER>(__LINE__, " eMTD cannot be used in conjunction with eASSUME_NO_INITIAL_OVERLAP. eASSUME_NO_INITIAL_OVERLAP will be ignored"); hitFlags &= ~PxHitFlag::eASSUME_NO_INITIAL_OVERLAP; } PxReal realInflation = inflation; if((hitFlags & PxHitFlag::ePRECISE_SWEEP)&& inflation > 0.f) { realInflation = 0.f; outputError<PxErrorCode::eINVALID_PARAMETER>(__LINE__, " Precise sweep doesn't support inflation, inflation will be overwritten to be zero"); } MultiQueryInput input(&geometry, &pose, unitDir, distance, realInflation); return multiQuery<PxSweepHit>(input, hits, hitFlags, cache, filterData, filterCall); } /////////////////////////////////////////////////////////////////////////////// template<typename HitType> static bool doQueryVsCached(const PrunerHandle handle, PxU32 prunerIndex, const PrunerCompoundId cachedCompoundId, const PrunerManager& manager, MultiQueryCallback<HitType>& pcb, const MultiQueryInput& input) { // this block is only executed for single shape cache const PrunerPayload* payloads; const PxTransform* compoundPosePtr; PxTransform* transform; PxTransform compoundPose; if(cachedCompoundId == INVALID_COMPOUND_ID) { const Pruner* pruner = manager.getPruner(PruningIndex::Enum(prunerIndex)); PX_ASSERT(pruner); PrunerPayloadData ppd; const PrunerPayload& cachedPayload = pruner->getPayloadData(handle, &ppd); payloads = &cachedPayload; compoundPosePtr = NULL; transform = ppd.mTransform; } else { const CompoundPruner* pruner = manager.getCompoundPruner(); PX_ASSERT(pruner); PrunerPayloadData ppd; const PrunerPayload& cachedPayload = pruner->getPayloadData(handle, cachedCompoundId, &ppd); compoundPose = pruner->getTransform(cachedCompoundId); payloads = &cachedPayload; compoundPosePtr = &compoundPose; transform = ppd.mTransform; } PxReal dummyDist; bool againAfterCache; if(HitTypeSupport<HitType>::IsSweep) { // AP: for sweeps we cache the bounds because we need to know them for the test to clip the sweep to bounds // otherwise GJK becomes unstable. The bounds can be used multiple times so this is an optimization. const ShapeData sd(*input.geometry, *input.pose, input.inflation); pcb.mQueryShapeBounds = &sd.getPrunerInflatedWorldAABB(); pcb.mShapeData = &sd; // againAfterCache = pcb.invoke(dummyDist, 0); againAfterCache = pcb.template _invoke<true>(dummyDist, 0, payloads, transform, compoundPosePtr); pcb.mQueryShapeBounds = NULL; pcb.mShapeData = NULL; } else // againAfterCache = pcb.invoke(dummyDist, 0); againAfterCache = pcb.template _invoke<true>(dummyDist, 0, payloads, transform, compoundPosePtr); return againAfterCache; } /////////////////////////////////////////////////////////////////////////////// SceneQueries::SceneQueries( PVDCapture* pvd, PxU64 contextID, Pruner* staticPruner, Pruner* dynamicPruner, PxU32 dynamicTreeRebuildRateHint, float inflation, const PxSceneLimits& limits, const QueryAdapter& adapter) : mSQManager (contextID, staticPruner, dynamicPruner, dynamicTreeRebuildRateHint, inflation, limits, adapter), mPVD (pvd) { } SceneQueries::~SceneQueries() { }
NVIDIA-Omniverse/PhysX/physx/source/scenequery/src/SqManager.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. // PT: SQ-API LEVEL 2 (Level 1 = SqPruner.h) // PT: this file is part of a "high-level" set of files within Sq. The SqPruner API doesn't rely on them. // PT: this should really be at Np level but moving it to Sq allows us to share it. #include "SqManager.h" #include "GuSqInternal.h" #include "GuBounds.h" using namespace physx; using namespace Sq; using namespace Gu; PrunerExt::PrunerExt() : mPruner(NULL), mDirtyList("SQmDirtyList"), mDirtyStatic(false) { } PrunerExt::~PrunerExt() { PX_DELETE(mPruner); } void PrunerExt::init(Pruner* pruner) { mPruner = pruner; } void PrunerExt::preallocate(PxU32 nbShapes) { // if(nbShapes > mDirtyMap.size()) // mDirtyMap.resize(nbShapes); if(mPruner) mPruner->preallocate(nbShapes); } void PrunerExt::flushMemory() { if(!mDirtyList.size()) mDirtyList.reset(); // PT: TODO: flush bitmap here // PT: TODO: flush pruner here? } void PrunerExt::addToDirtyList(PrunerHandle handle, bool dynamic, const PxTransform& transform) { if(mPruner) mPruner->setTransform(handle, transform); PxBitMap& dirtyMap = mDirtyMap; { if(dirtyMap.size() <= handle) { PxU32 size = PxMax<PxU32>(dirtyMap.size()*2, 1024); const PxU32 minSize = handle+1; if(minSize>size) size = minSize*2; dirtyMap.resize(size); PX_ASSERT(handle<dirtyMap.size()); PX_ASSERT(!dirtyMap.test(handle)); } } if(!dirtyMap.test(handle)) { dirtyMap.set(handle); mDirtyList.pushBack(handle); } if(!dynamic) mDirtyStatic = true; } void PrunerExt::removeFromDirtyList(PrunerHandle handle) { PxBitMap& dirtyMap = mDirtyMap; // if(dirtyMap.test(handle)) if(dirtyMap.boundedTest(handle)) { dirtyMap.reset(handle); mDirtyList.findAndReplaceWithLast(handle); } // PT: if we remove the object that made us set mDirtyStatic to true, tough luck, // we don't bother fixing that bool here. It's going to potentially cause an // unnecessary update of the character controller's caches, which is not a big deal. } bool PrunerExt::processDirtyList(PxU32 index, const Adapter& adapter, float inflation) { const PxU32 numDirtyList = mDirtyList.size(); if(!numDirtyList) return false; const PrunerHandle* const prunerHandles = mDirtyList.begin(); for(PxU32 i=0; i<numDirtyList; i++) { const PrunerHandle handle = prunerHandles[i]; mDirtyMap.reset(handle); // PT: we compute the new bounds and store them directly in the pruner structure to avoid copies. We delay the updateObjects() call // to take advantage of batching. PX_UNUSED(index); PrunerPayloadData ppd; const PrunerPayload& pp = mPruner->getPayloadData(handle, &ppd); computeBounds(*ppd.mBounds, adapter.getGeometry(pp), *ppd.mTransform, 0.0f, inflation); } // PT: batch update happens after the loop instead of once per loop iteration mPruner->updateObjects(prunerHandles, numDirtyList); mDirtyList.clear(); const bool ret = mDirtyStatic; mDirtyStatic = false; return ret; } // PT: TODO: re-inline this /*void PrunerExt::growDirtyList(PrunerHandle handle) { // pruners must either provide indices in order or reuse existing indices, so this 'if' is enough to ensure we have space for the new handle // PT: TODO: fix this. There is just no need for any of it. The pruning pool itself could support the feature for free, similar to what we do // in MBP. There would be no need for the bitmap or the dirty list array. However doing this through the virtual interface would be clumsy, // adding the cost of virtual calls for very cheap & simple operations. It would be a lot easier to drop it and go back to what we had before. PxBitMap& dirtyMap = mDirtyMap; if(dirtyMap.size() <= handle) dirtyMap.resize(PxMax<PxU32>(dirtyMap.size() * 2, 1024)); PX_ASSERT(handle<dirtyMap.size()); dirtyMap.reset(handle); }*/ /////////////////////////////////////////////////////////////////////////////// CompoundPrunerExt::CompoundPrunerExt() : mPruner (NULL) { } CompoundPrunerExt::~CompoundPrunerExt() { PX_DELETE(mPruner); } void CompoundPrunerExt::preallocate(PxU32 nbShapes) { // if(nbShapes > mDirtyList.size()) // mDirtyList.reserve(nbShapes); if(mPruner) mPruner->preallocate(nbShapes); } void CompoundPrunerExt::flushMemory() { if(!mDirtyList.size()) mDirtyList.clear(); } void CompoundPrunerExt::flushShapes(const Adapter& adapter, float inflation) { const PxU32 numDirtyList = mDirtyList.size(); if(!numDirtyList) return; const CompoundPair* const compoundPairs = mDirtyList.getEntries(); for(PxU32 i=0; i<numDirtyList; i++) { const PrunerHandle handle = compoundPairs[i].second; const PrunerCompoundId compoundId = compoundPairs[i].first; // PT: we compute the new bounds and store them directly in the pruner structure to avoid copies. We delay the updateObjects() call // to take advantage of batching. PrunerPayloadData ppd; const PrunerPayload& pp = mPruner->getPayloadData(handle, compoundId, &ppd); computeBounds(*ppd.mBounds, adapter.getGeometry(pp), *ppd.mTransform, 0.0f, inflation); // A.B. not very effective, we might do better here mPruner->updateObjectAfterManualBoundsUpdates(compoundId, handle); } mDirtyList.clear(); } void CompoundPrunerExt::addToDirtyList(PrunerCompoundId compoundId, PrunerHandle handle, const PxTransform& transform) { if(mPruner) mPruner->setTransform(handle, compoundId, transform); mDirtyList.insert(CompoundPair(compoundId, handle)); } void CompoundPrunerExt::removeFromDirtyList(PrunerCompoundId compoundId, PrunerHandle handle) { mDirtyList.erase(CompoundPair(compoundId, handle)); } /////////////////////////////////////////////////////////////////////////////// #include "SqFactory.h" #include "common/PxProfileZone.h" #include "common/PxRenderBuffer.h" #include "GuBVH.h" #include "foundation/PxAlloca.h" #include "PxSceneDesc.h" // PT: for PxSceneLimits TODO: remove namespace { enum PxScenePrunerIndex { PX_SCENE_PRUNER_STATIC = 0, PX_SCENE_PRUNER_DYNAMIC = 1, PX_SCENE_COMPOUND_PRUNER = 0xffffffff, }; } PrunerManager::PrunerManager( PxU64 contextID, Pruner* staticPruner, Pruner* dynamicPruner, PxU32 dynamicTreeRebuildRateHint, float inflation, const PxSceneLimits& limits, const Adapter& adapter) : mAdapter (adapter), mContextID (contextID), mStaticTimestamp (0), mInflation (inflation) { mPrunerExt[PruningIndex::eSTATIC].init(staticPruner); mPrunerExt[PruningIndex::eDYNAMIC].init(dynamicPruner); setDynamicTreeRebuildRateHint(dynamicTreeRebuildRateHint); mCompoundPrunerExt.mPruner = createCompoundPruner(contextID); preallocate(PruningIndex::eSTATIC, limits.maxNbStaticShapes); preallocate(PruningIndex::eDYNAMIC, limits.maxNbDynamicShapes); preallocate(PxU32(PX_SCENE_COMPOUND_PRUNER), 32); mPrunerNeedsUpdating = false; } PrunerManager::~PrunerManager() { } void PrunerManager::preallocate(PxU32 prunerIndex, PxU32 nbShapes) { if(prunerIndex==PruningIndex::eSTATIC) mPrunerExt[PruningIndex::eSTATIC].preallocate(nbShapes); else if(prunerIndex==PruningIndex::eDYNAMIC) mPrunerExt[PruningIndex::eDYNAMIC].preallocate(nbShapes); else if(prunerIndex==PX_SCENE_COMPOUND_PRUNER) mCompoundPrunerExt.preallocate(nbShapes); } void PrunerManager::flushMemory() { for(PxU32 i=0;i<PruningIndex::eCOUNT;i++) mPrunerExt[i].flushMemory(); mCompoundPrunerExt.flushMemory(); } PrunerData PrunerManager::addPrunerShape(const PrunerPayload& payload, bool dynamic, PrunerCompoundId compoundId, const PxBounds3& bounds, const PxTransform& transform, bool hasPruningStructure) { mPrunerNeedsUpdating = true; const PxU32 index = PxU32(dynamic); if(!index) invalidateStaticTimestamp(); PrunerHandle handle; if(compoundId == INVALID_COMPOUND_ID) { PX_ASSERT(mPrunerExt[index].pruner()); mPrunerExt[index].pruner()->addObjects(&handle, &bounds, &payload, &transform, 1, hasPruningStructure); //mPrunerExt[index].growDirtyList(handle); } else { PX_ASSERT(mCompoundPrunerExt.pruner()); mCompoundPrunerExt.pruner()->addObject(compoundId, handle, bounds, payload, transform); } return createPrunerData(index, handle); } void PrunerManager::removePrunerShape(PrunerCompoundId compoundId, PrunerData data, PrunerPayloadRemovalCallback* removalCallback) { mPrunerNeedsUpdating = true; const PxU32 index = getPrunerIndex(data); const PrunerHandle handle = getPrunerHandle(data); if(!index) invalidateStaticTimestamp(); if(compoundId == INVALID_COMPOUND_ID) { PX_ASSERT(mPrunerExt[index].pruner()); mPrunerExt[index].removeFromDirtyList(handle); mPrunerExt[index].pruner()->removeObjects(&handle, 1, removalCallback); } else { mCompoundPrunerExt.removeFromDirtyList(compoundId, handle); mCompoundPrunerExt.pruner()->removeObject(compoundId, handle, removalCallback); } } void PrunerManager::markForUpdate(PrunerCompoundId compoundId, PrunerData data, const PxTransform& transform) { mPrunerNeedsUpdating = true; const PxU32 index = getPrunerIndex(data); const PrunerHandle handle = getPrunerHandle(data); if(!index) invalidateStaticTimestamp(); if(compoundId == INVALID_COMPOUND_ID) // PT: TODO: at this point do we still need a dirty list? we could just update the bounds directly? mPrunerExt[index].addToDirtyList(handle, index!=0, transform); else mCompoundPrunerExt.addToDirtyList(compoundId, handle, transform); } void PrunerManager::setDynamicTreeRebuildRateHint(PxU32 rebuildRateHint) { mRebuildRateHint = rebuildRateHint; for(PxU32 i=0;i<PruningIndex::eCOUNT;i++) { Pruner* pruner = mPrunerExt[i].pruner(); if(pruner && pruner->isDynamic()) static_cast<DynamicPruner*>(pruner)->setRebuildRateHint(rebuildRateHint); } } void PrunerManager::afterSync(bool buildStep, bool commit) { PX_PROFILE_ZONE("Sim.sceneQueryBuildStep", mContextID); if(!buildStep && !commit) { mPrunerNeedsUpdating = true; return; } // flush user modified objects flushShapes(); for(PxU32 i=0; i<PruningIndex::eCOUNT; i++) { Pruner* pruner = mPrunerExt[i].pruner(); if(pruner) { if(pruner->isDynamic()) static_cast<DynamicPruner*>(pruner)->buildStep(true); if(commit) pruner->commit(); } } mPrunerNeedsUpdating = !commit; } void PrunerManager::flushShapes() { PX_PROFILE_ZONE("SceneQuery.flushShapes", mContextID); // must already have acquired writer lock here const float inflation = 1.0f + mInflation; bool mustInvalidateStaticTimestamp = false; for(PxU32 i=0; i<PruningIndex::eCOUNT; i++) { if(mPrunerExt[i].processDirtyList(i, mAdapter, inflation)) mustInvalidateStaticTimestamp = true; } if(mustInvalidateStaticTimestamp) invalidateStaticTimestamp(); mCompoundPrunerExt.flushShapes(mAdapter, inflation); } void PrunerManager::flushUpdates() { PX_PROFILE_ZONE("SceneQuery.flushUpdates", mContextID); if(mPrunerNeedsUpdating) { // no need to take lock if manual sq update is enabled // as flushUpdates will only be called from NpScene::flushQueryUpdates() mSQLock.lock(); if(mPrunerNeedsUpdating) { flushShapes(); for(PxU32 i=0; i<PruningIndex::eCOUNT; i++) if(mPrunerExt[i].pruner()) mPrunerExt[i].pruner()->commit(); PxMemoryBarrier(); mPrunerNeedsUpdating = false; } mSQLock.unlock(); } } void PrunerManager::forceRebuildDynamicTree(PxU32 prunerIndex) { PX_PROFILE_ZONE("SceneQuery.forceDynamicTreeRebuild", mContextID); PxMutex::ScopedLock lock(mSQLock); Pruner* pruner = mPrunerExt[prunerIndex].pruner(); if(pruner && pruner->isDynamic()) { static_cast<DynamicPruner*>(pruner)->purge(); static_cast<DynamicPruner*>(pruner)->commit(); } } void* PrunerManager::prepareSceneQueriesUpdate(PruningIndex::Enum index) { bool retVal = false; Pruner* pruner = mPrunerExt[index].pruner(); if(pruner && pruner->isDynamic()) retVal = static_cast<DynamicPruner*>(pruner)->prepareBuild(); return retVal ? pruner : NULL; } void PrunerManager::sceneQueryBuildStep(void* handle) { PX_PROFILE_ZONE("SceneQuery.sceneQueryBuildStep", mContextID); Pruner* pruner = reinterpret_cast<Pruner*>(handle); if(pruner && pruner->isDynamic()) { const bool buildFinished = static_cast<DynamicPruner*>(pruner)->buildStep(false); if(buildFinished) mPrunerNeedsUpdating = true; } } // PT: TODO: revisit this. Perhaps it should be the user's responsibility to call the pruner's // visualize functions directly, when & how he wants. void PrunerManager::visualize(PxU32 prunerIndex, PxRenderOutput& out) const { if(prunerIndex==PX_SCENE_PRUNER_STATIC) { if(getPruner(PruningIndex::eSTATIC)) getPruner(PruningIndex::eSTATIC)->visualize(out, SQ_DEBUG_VIZ_STATIC_COLOR, SQ_DEBUG_VIZ_STATIC_COLOR2); } else if(prunerIndex==PX_SCENE_PRUNER_DYNAMIC) { if(getPruner(PruningIndex::eDYNAMIC)) getPruner(PruningIndex::eDYNAMIC)->visualize(out, SQ_DEBUG_VIZ_DYNAMIC_COLOR, SQ_DEBUG_VIZ_DYNAMIC_COLOR2); } else if(prunerIndex==PX_SCENE_COMPOUND_PRUNER) { const CompoundPruner* cp = mCompoundPrunerExt.pruner(); if(cp) cp->visualizeEx(out, SQ_DEBUG_VIZ_COMPOUND_COLOR, true, true); } } void PrunerManager::shiftOrigin(const PxVec3& shift) { for(PxU32 i=0; i<PruningIndex::eCOUNT; i++) mPrunerExt[i].pruner()->shiftOrigin(shift); mCompoundPrunerExt.pruner()->shiftOrigin(shift); } void PrunerManager::addCompoundShape(const PxBVH& pxbvh, PrunerCompoundId compoundId, const PxTransform& compoundTransform, PrunerData* prunerData, const PrunerPayload* payloads, const PxTransform* transforms, bool isDynamic) { const BVH& bvh = static_cast<const BVH&>(pxbvh); const PxU32 nbShapes = bvh.Gu::BVH::getNbBounds(); PX_ALLOCA(res, PrunerHandle, nbShapes); PX_ASSERT(mCompoundPrunerExt.mPruner); mCompoundPrunerExt.mPruner->addCompound(res, bvh, compoundId, compoundTransform, isDynamic, payloads, transforms); const PxU32 index = PxU32(isDynamic); if(!index) invalidateStaticTimestamp(); for(PxU32 i = 0; i < nbShapes; i++) prunerData[i] = createPrunerData(index, res[i]); } void PrunerManager::updateCompoundActor(PrunerCompoundId compoundId, const PxTransform& compoundTransform) { PX_ASSERT(mCompoundPrunerExt.mPruner); const bool isDynamic = mCompoundPrunerExt.mPruner->updateCompound(compoundId, compoundTransform); if(!isDynamic) invalidateStaticTimestamp(); } void PrunerManager::removeCompoundActor(PrunerCompoundId compoundId, PrunerPayloadRemovalCallback* removalCallback) { PX_ASSERT(mCompoundPrunerExt.mPruner); const bool isDynamic = mCompoundPrunerExt.mPruner->removeCompound(compoundId, removalCallback); if(!isDynamic) invalidateStaticTimestamp(); } void PrunerManager::sync(const PrunerHandle* handles, const PxU32* boundsIndices, const PxBounds3* bounds, const PxTransform32* transforms, PxU32 count, const PxBitMap& ignoredIndices) { if(!count) return; Pruner* dynamicPruner = getPruner(PruningIndex::eDYNAMIC); if(!dynamicPruner) return; PxU32 startIndex = 0; PxU32 numIndices = count; // if shape sim map is not empty, parse the indices and skip update for the dirty one if(ignoredIndices.count()) { // PT: I think this codepath was used with SCB / buffered changes, but it's not needed anymore numIndices = 0; for(PxU32 i=0; i<count; i++) { // if(ignoredIndices.test(boundsIndices[i])) if(ignoredIndices.boundedTest(boundsIndices[i])) { dynamicPruner->updateObjects(handles + startIndex, numIndices, mInflation, boundsIndices + startIndex, bounds, transforms); numIndices = 0; startIndex = i + 1; } else numIndices++; } // PT: we fallback to the next line on purpose - no "else" } dynamicPruner->updateObjects(handles + startIndex, numIndices, mInflation, boundsIndices + startIndex, bounds, transforms); }
NVIDIA-Omniverse/PhysX/physx/source/scenequery/src/SqCompoundPruner.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "SqCompoundPruner.h" #include "GuSqInternal.h" #include "GuIncrementalAABBTree.h" #include "GuPruningPool.h" #include "GuAABBTreeQuery.h" #include "GuAABBTreeNode.h" #include "GuSphere.h" #include "GuBox.h" #include "GuCapsule.h" #include "GuBVH.h" #include "GuQuery.h" #include "GuInternal.h" #include "common/PxRenderBuffer.h" #include "common/PxRenderOutput.h" #include "CmVisualization.h" using namespace physx; using namespace Gu; using namespace Sq; // PT: TODO: this is copied from SqBounds.h, should be either moved to Gu and shared or passed as a user parameter #define SQ_PRUNER_EPSILON 0.005f #define SQ_PRUNER_INFLATION (1.0f + SQ_PRUNER_EPSILON) // pruner test shape inflation (not narrow phase shape) #define PARANOIA_CHECKS 0 /////////////////////////////////////////////////////////////////////////////////////////////// BVHCompoundPruner::BVHCompoundPruner(PxU64 contextID) : mCompoundTreePool(contextID), mDrawStatic(false), mDrawDynamic(false) { preallocate(32); } /////////////////////////////////////////////////////////////////////////////////////////////// BVHCompoundPruner::~BVHCompoundPruner() { } /////////////////////////////////////////////////////////////////////////////////////////////// bool BVHCompoundPruner::addCompound(PrunerHandle* results, const BVH& bvh, PrunerCompoundId compoundId, const PxTransform& transform, bool isDynamic, const PrunerPayload* data, const PxTransform* transforms) { PX_ASSERT(bvh.getNbBounds()); const PxBounds3 compoundBounds = PxBounds3::transformFast(transform, bvh.getNodes()->mBV); const PoolIndex poolIndex = mCompoundTreePool.addCompound(results, bvh, compoundBounds, transform, isDynamic, data, transforms); mChangedLeaves.clear(); IncrementalAABBTreeNode* node = mMainTree.insert(poolIndex, mCompoundTreePool.getCurrentCompoundBounds(), mChangedLeaves); updateMapping(poolIndex, node); mActorPoolMap[compoundId] = poolIndex; mPoolActorMap[poolIndex] = compoundId; #if PARANOIA_CHECKS test(); #endif return true; } /////////////////////////////////////////////////////////////////////////////////////////////// void BVHCompoundPruner::updateMapping(const PoolIndex poolIndex, IncrementalAABBTreeNode* node) { // resize mapping if needed if(mMainTreeUpdateMap.size() <= poolIndex) { const PxU32 resizeSize = mMainTreeUpdateMap.size() * 2; mMainTreeUpdateMap.resize(resizeSize); mPoolActorMap.resize(resizeSize); } // if a node was split we need to update the node indices and also the sibling indices if(!mChangedLeaves.empty()) { if(node && node->isLeaf()) { for(PxU32 j = 0; j < node->getNbPrimitives(); j++) { mMainTreeUpdateMap[node->getPrimitives(NULL)[j]] = node; } } for(PxU32 i = 0; i < mChangedLeaves.size(); i++) { IncrementalAABBTreeNode* changedNode = mChangedLeaves[i]; PX_ASSERT(changedNode->isLeaf()); for(PxU32 j = 0; j < changedNode->getNbPrimitives(); j++) { mMainTreeUpdateMap[changedNode->getPrimitives(NULL)[j]] = changedNode; } } } else { mMainTreeUpdateMap[poolIndex] = node; } } /////////////////////////////////////////////////////////////////////////////////////////////// bool BVHCompoundPruner::removeCompound(PrunerCompoundId compoundId, PrunerPayloadRemovalCallback* removalCallback) { const ActorIdPoolIndexMap::Entry* poolIndexEntry = mActorPoolMap.find(compoundId); PX_ASSERT(poolIndexEntry); bool isDynamic = false; if(poolIndexEntry) { const PoolIndex poolIndex = poolIndexEntry->second; CompoundTree& compoundTree = mCompoundTreePool.getCompoundTrees()[poolIndex]; isDynamic = compoundTree.mFlags & PxCompoundPrunerQueryFlag::eDYNAMIC; const PoolIndex poolRelocatedLastIndex = mCompoundTreePool.removeCompound(poolIndex, removalCallback); IncrementalAABBTreeNode* node = mMainTree.remove(mMainTreeUpdateMap[poolIndex], poolIndex, mCompoundTreePool.getCurrentCompoundBounds()); // if node moved to its parent if(node && node->isLeaf()) { for (PxU32 j = 0; j < node->getNbPrimitives(); j++) { const PoolIndex index = node->getPrimitives(NULL)[j]; mMainTreeUpdateMap[index] = node; } } // fix indices if we made a swap if(poolRelocatedLastIndex != poolIndex) { mMainTreeUpdateMap[poolIndex] = mMainTreeUpdateMap[poolRelocatedLastIndex]; mMainTree.fixupTreeIndices(mMainTreeUpdateMap[poolIndex], poolRelocatedLastIndex, poolIndex); mActorPoolMap[mPoolActorMap[poolRelocatedLastIndex]] = poolIndex; mPoolActorMap[poolIndex] = mPoolActorMap[poolRelocatedLastIndex]; } mActorPoolMap.erase(compoundId); } #if PARANOIA_CHECKS test(); #endif return isDynamic; } /////////////////////////////////////////////////////////////////////////////////////////////// bool BVHCompoundPruner::updateCompound(PrunerCompoundId compoundId, const PxTransform& transform) { const ActorIdPoolIndexMap::Entry* poolIndexEntry = mActorPoolMap.find(compoundId); PX_ASSERT(poolIndexEntry); bool isDynamic = false; if(poolIndexEntry) { const PxU32 poolIndex = poolIndexEntry->second; CompoundTree& compoundTree = mCompoundTreePool.getCompoundTrees()[poolIndex]; isDynamic = compoundTree.mFlags & PxCompoundPrunerQueryFlag::eDYNAMIC; compoundTree.mGlobalPose = transform; PxBounds3 localBounds; const IncrementalAABBTreeNode* node = compoundTree.mTree->getNodes(); V4StoreU(node->mBVMin, &localBounds.minimum.x); PX_ALIGN(16, PxVec4) max4; V4StoreA(node->mBVMax, &max4.x); localBounds.maximum = PxVec3(max4.x, max4.y, max4.z); const PxBounds3 compoundBounds = PxBounds3::transformFast(transform, localBounds); mCompoundTreePool.getCurrentCompoundBounds()[poolIndex] = compoundBounds; mChangedLeaves.clear(); IncrementalAABBTreeNode* mainTreeNode = mMainTree.update(mMainTreeUpdateMap[poolIndex], poolIndex, mCompoundTreePool.getCurrentCompoundBounds(), mChangedLeaves); // we removed node during update, need to update the mapping updateMapping(poolIndex, mainTreeNode); } #if PARANOIA_CHECKS test(); #endif return isDynamic; } /////////////////////////////////////////////////////////////////////////////////////////////// void BVHCompoundPruner::test() { if(mMainTree.getNodes()) { for(PxU32 i = 0; i < mCompoundTreePool.getNbObjects(); i++) { mMainTree.checkTreeLeaf(mMainTreeUpdateMap[i], i); } } } /////////////////////////////////////////////////////////////////////////////////////////////// void BVHCompoundPruner::release() { } ////////////////////////////////////////////////////////////////////////// // Queries implementation ////////////////////////////////////////////////////////////////////////// namespace { struct CompoundCallbackRaycastAdapter { PX_FORCE_INLINE CompoundCallbackRaycastAdapter(CompoundPrunerRaycastCallback& pcb, const CompoundTree& tree) : mCallback(pcb), mTree(tree) {} PX_FORCE_INLINE bool invoke(PxReal& distance, PxU32 primIndex) { return mCallback.invoke(distance, primIndex, mTree.mPruningPool->getObjects(), mTree.mPruningPool->getTransforms(), &mTree.mGlobalPose); } CompoundPrunerRaycastCallback& mCallback; const CompoundTree& mTree; PX_NOCOPY(CompoundCallbackRaycastAdapter) }; struct CompoundCallbackOverlapAdapter { PX_FORCE_INLINE CompoundCallbackOverlapAdapter(CompoundPrunerOverlapCallback& pcb, const CompoundTree& tree) : mCallback(pcb), mTree(tree) {} PX_FORCE_INLINE bool invoke(PxU32 primIndex) { return mCallback.invoke(primIndex, mTree.mPruningPool->getObjects(), mTree.mPruningPool->getTransforms(), &mTree.mGlobalPose); } CompoundPrunerOverlapCallback& mCallback; const CompoundTree& mTree; PX_NOCOPY(CompoundCallbackOverlapAdapter) }; } template<class PrunerCallback> struct MainTreeCompoundPrunerCallback { MainTreeCompoundPrunerCallback(PrunerCallback& prunerCallback, PxCompoundPrunerQueryFlags flags, const CompoundTree* compoundTrees) : mPrunerCallback(prunerCallback), mQueryFlags(flags), mCompoundTrees(compoundTrees) { } virtual ~MainTreeCompoundPrunerCallback() {} PX_FORCE_INLINE bool filtering(const CompoundTree& compoundTree) const { if(!(compoundTree.mFlags & mQueryFlags) || !compoundTree.mTree->getNodes()) return true; return false; } protected: PrunerCallback& mPrunerCallback; const PxCompoundPrunerQueryFlags mQueryFlags; const CompoundTree* mCompoundTrees; PX_NOCOPY(MainTreeCompoundPrunerCallback) }; // Raycast/sweeps callback for main AABB tree template<bool tInflate> struct MainTreeRaycastCompoundPrunerCallback : MainTreeCompoundPrunerCallback<CompoundPrunerRaycastCallback> { MainTreeRaycastCompoundPrunerCallback(const PxVec3& origin, const PxVec3& unitDir, const PxVec3& extent, CompoundPrunerRaycastCallback& prunerCallback, PxCompoundPrunerQueryFlags flags, const CompoundTree* compoundTrees) : MainTreeCompoundPrunerCallback(prunerCallback, flags, compoundTrees), mOrigin(origin), mUnitDir(unitDir), mExtent(extent) { } virtual ~MainTreeRaycastCompoundPrunerCallback() {} bool invoke(PxReal& distance, PxU32 primIndex) { const CompoundTree& compoundTree = mCompoundTrees[primIndex]; if(filtering(compoundTree)) return true; // transfer to actor local space const PxVec3 localOrigin = compoundTree.mGlobalPose.transformInv(mOrigin); const PxVec3 localDir = compoundTree.mGlobalPose.q.rotateInv(mUnitDir); PxVec3 localExtent = mExtent; if(tInflate) { const PxBounds3 wBounds = PxBounds3::centerExtents(mOrigin, mExtent); const PxBounds3 localBounds = PxBounds3::transformSafe(compoundTree.mGlobalPose.getInverse(), wBounds); localExtent = localBounds.getExtents(); } // raycast the merged tree CompoundCallbackRaycastAdapter pcb(mPrunerCallback, compoundTree); return AABBTreeRaycast<tInflate, true, IncrementalAABBTree, IncrementalAABBTreeNode, CompoundCallbackRaycastAdapter>() (compoundTree.mPruningPool->getCurrentAABBTreeBounds(), *compoundTree.mTree, localOrigin, localDir, distance, localExtent, pcb); } PX_NOCOPY(MainTreeRaycastCompoundPrunerCallback) private: const PxVec3& mOrigin; const PxVec3& mUnitDir; const PxVec3& mExtent; }; ////////////////////////////////////////////////////////////////////////// // raycast against the compound pruner bool BVHCompoundPruner::raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, CompoundPrunerRaycastCallback& prunerCallback, PxCompoundPrunerQueryFlags flags) const { bool again = true; // search the main tree if there are nodes if(mMainTree.getNodes()) { const PxVec3 extent(0.0f); // main tree callback MainTreeRaycastCompoundPrunerCallback<false> pcb(origin, unitDir, extent, prunerCallback, flags, mCompoundTreePool.getCompoundTrees()); // traverse the main tree again = AABBTreeRaycast<false, true, IncrementalAABBTree, IncrementalAABBTreeNode, MainTreeRaycastCompoundPrunerCallback<false> >() (mCompoundTreePool.getCurrentAABBTreeBounds(), mMainTree, origin, unitDir, inOutDistance, extent, pcb); } return again; } ////////////////////////////////////////////////////////////////////////// // overlap main tree callback // A.B. templated version is complicated due to test transformations, will do a callback per primitive struct MainTreeOverlapCompoundPrunerCallback : MainTreeCompoundPrunerCallback<CompoundPrunerOverlapCallback> { MainTreeOverlapCompoundPrunerCallback(const ShapeData& queryVolume, CompoundPrunerOverlapCallback& prunerCallback, PxCompoundPrunerQueryFlags flags, const CompoundTree* compoundTrees) : MainTreeCompoundPrunerCallback(prunerCallback, flags, compoundTrees), mQueryVolume(queryVolume) { } virtual ~MainTreeOverlapCompoundPrunerCallback() {} PX_NOCOPY(MainTreeOverlapCompoundPrunerCallback) protected: const ShapeData& mQueryVolume; }; // OBB struct MainTreeOBBOverlapCompoundPrunerCallback : public MainTreeOverlapCompoundPrunerCallback { MainTreeOBBOverlapCompoundPrunerCallback(const ShapeData& queryVolume, CompoundPrunerOverlapCallback& prunerCallback, PxCompoundPrunerQueryFlags flags, const CompoundTree* compoundTrees) : MainTreeOverlapCompoundPrunerCallback(queryVolume, prunerCallback, flags, compoundTrees) {} bool invoke(PxU32 primIndex) { const CompoundTree& compoundTree = mCompoundTrees[primIndex]; if(filtering(compoundTree)) return true; const PxVec3 localPos = compoundTree.mGlobalPose.transformInv(mQueryVolume.getPrunerWorldPos()); const PxMat33 transfMat(compoundTree.mGlobalPose.q); const PxMat33 localRot = transfMat.getTranspose()*mQueryVolume.getPrunerWorldRot33(); const OBBAABBTest localTest(localPos, localRot, mQueryVolume.getPrunerBoxGeomExtentsInflated()); // overlap the compound local tree CompoundCallbackOverlapAdapter pcb(mPrunerCallback, compoundTree); return AABBTreeOverlap<true, OBBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, CompoundCallbackOverlapAdapter>() (compoundTree.mPruningPool->getCurrentAABBTreeBounds(), *compoundTree.mTree, localTest, pcb); } PX_NOCOPY(MainTreeOBBOverlapCompoundPrunerCallback) }; // AABB struct MainTreeAABBOverlapCompoundPrunerCallback : public MainTreeOverlapCompoundPrunerCallback { MainTreeAABBOverlapCompoundPrunerCallback(const ShapeData& queryVolume, CompoundPrunerOverlapCallback& prunerCallback, PxCompoundPrunerQueryFlags flags, const CompoundTree* compoundTrees) : MainTreeOverlapCompoundPrunerCallback(queryVolume, prunerCallback, flags, compoundTrees) {} bool invoke(PxU32 primIndex) { const CompoundTree& compoundTree = mCompoundTrees[primIndex]; if(filtering(compoundTree)) return true; const PxVec3 localPos = compoundTree.mGlobalPose.transformInv(mQueryVolume.getPrunerWorldPos()); const PxMat33 transfMat(compoundTree.mGlobalPose.q); const PxMat33 localRot = transfMat.getTranspose()*mQueryVolume.getPrunerWorldRot33(); // A.B. we dont have the AABB in local space, either we test OBB local space or // we retest the AABB with the worldSpace AABB of the local tree??? const OBBAABBTest localTest(localPos, localRot, mQueryVolume.getPrunerBoxGeomExtentsInflated()); // overlap the compound local tree CompoundCallbackOverlapAdapter pcb(mPrunerCallback, compoundTree); return AABBTreeOverlap<true, OBBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, CompoundCallbackOverlapAdapter>() (compoundTree.mPruningPool->getCurrentAABBTreeBounds(), *compoundTree.mTree, localTest, pcb); } PX_NOCOPY(MainTreeAABBOverlapCompoundPrunerCallback) }; // Capsule struct MainTreeCapsuleOverlapCompoundPrunerCallback : public MainTreeOverlapCompoundPrunerCallback { MainTreeCapsuleOverlapCompoundPrunerCallback(const ShapeData& queryVolume, CompoundPrunerOverlapCallback& prunerCallback, PxCompoundPrunerQueryFlags flags, const CompoundTree* compoundTrees) : MainTreeOverlapCompoundPrunerCallback(queryVolume, prunerCallback, flags, compoundTrees) {} bool invoke(PxU32 primIndex) { const CompoundTree& compoundTree = mCompoundTrees[primIndex]; if(filtering(compoundTree)) return true; const PxMat33 transfMat(compoundTree.mGlobalPose.q); const Capsule& capsule = mQueryVolume.getGuCapsule(); const CapsuleAABBTest localTest( compoundTree.mGlobalPose.transformInv(capsule.p1), transfMat.getTranspose()*mQueryVolume.getPrunerWorldRot33().column0, mQueryVolume.getCapsuleHalfHeight()*2.0f, PxVec3(capsule.radius*SQ_PRUNER_INFLATION)); // overlap the compound local tree CompoundCallbackOverlapAdapter pcb(mPrunerCallback, compoundTree); return AABBTreeOverlap<true, CapsuleAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, CompoundCallbackOverlapAdapter>() (compoundTree.mPruningPool->getCurrentAABBTreeBounds(), *compoundTree.mTree, localTest, pcb); } PX_NOCOPY(MainTreeCapsuleOverlapCompoundPrunerCallback) }; // Sphere struct MainTreeSphereOverlapCompoundPrunerCallback : public MainTreeOverlapCompoundPrunerCallback { MainTreeSphereOverlapCompoundPrunerCallback(const ShapeData& queryVolume, CompoundPrunerOverlapCallback& prunerCallback, PxCompoundPrunerQueryFlags flags, const CompoundTree* compoundTrees) : MainTreeOverlapCompoundPrunerCallback(queryVolume, prunerCallback, flags, compoundTrees) {} bool invoke(PxU32 primIndex) { const CompoundTree& compoundTree = mCompoundTrees[primIndex]; if(filtering(compoundTree)) return true; const Sphere& sphere = mQueryVolume.getGuSphere(); const SphereAABBTest localTest(compoundTree.mGlobalPose.transformInv(sphere.center), sphere.radius); // overlap the compound local tree CompoundCallbackOverlapAdapter pcb(mPrunerCallback, compoundTree); return AABBTreeOverlap<true, SphereAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, CompoundCallbackOverlapAdapter>() (compoundTree.mPruningPool->getCurrentAABBTreeBounds(), *compoundTree.mTree, localTest, pcb); } PX_NOCOPY(MainTreeSphereOverlapCompoundPrunerCallback) }; ////////////////////////////////////////////////////////////////////////// // overlap implementation bool BVHCompoundPruner::overlap(const ShapeData& queryVolume, CompoundPrunerOverlapCallback& prunerCallback, PxCompoundPrunerQueryFlags flags) const { if(!mMainTree.getNodes()) return true; bool again = true; const Gu::AABBTreeBounds& bounds = mCompoundTreePool.getCurrentAABBTreeBounds(); switch (queryVolume.getType()) { case PxGeometryType::eBOX: { if(queryVolume.isOBB()) { const DefaultOBBAABBTest test(queryVolume); MainTreeOBBOverlapCompoundPrunerCallback pcb(queryVolume, prunerCallback, flags, mCompoundTreePool.getCompoundTrees()); again = AABBTreeOverlap<true, OBBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, MainTreeOBBOverlapCompoundPrunerCallback>()(bounds, mMainTree, test, pcb); } else { const DefaultAABBAABBTest test(queryVolume); MainTreeAABBOverlapCompoundPrunerCallback pcb(queryVolume, prunerCallback, flags, mCompoundTreePool.getCompoundTrees()); again = AABBTreeOverlap<true, AABBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, MainTreeAABBOverlapCompoundPrunerCallback>()(bounds, mMainTree, test, pcb); } } break; case PxGeometryType::eCAPSULE: { const DefaultCapsuleAABBTest test(queryVolume, SQ_PRUNER_INFLATION); MainTreeCapsuleOverlapCompoundPrunerCallback pcb(queryVolume, prunerCallback, flags, mCompoundTreePool.getCompoundTrees()); again = AABBTreeOverlap<true, CapsuleAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, MainTreeCapsuleOverlapCompoundPrunerCallback >()(bounds, mMainTree, test, pcb); } break; case PxGeometryType::eSPHERE: { const DefaultSphereAABBTest test(queryVolume); MainTreeSphereOverlapCompoundPrunerCallback pcb(queryVolume, prunerCallback, flags, mCompoundTreePool.getCompoundTrees()); again = AABBTreeOverlap<true, SphereAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, MainTreeSphereOverlapCompoundPrunerCallback>()(bounds, mMainTree, test, pcb); } break; case PxGeometryType::eCONVEXMESH: { const DefaultOBBAABBTest test(queryVolume); MainTreeOBBOverlapCompoundPrunerCallback pcb(queryVolume, prunerCallback, flags, mCompoundTreePool.getCompoundTrees()); again = AABBTreeOverlap<true, OBBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, MainTreeOBBOverlapCompoundPrunerCallback>()(bounds, mMainTree, test, pcb); } break; default: PX_ALWAYS_ASSERT_MESSAGE("unsupported overlap query volume geometry type"); } return again; } /////////////////////////////////////////////////////////////////////////////////////////////// bool BVHCompoundPruner::sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, CompoundPrunerRaycastCallback& prunerCallback, PxCompoundPrunerQueryFlags flags) const { bool again = true; if(mMainTree.getNodes()) { const PxBounds3& aabb = queryVolume.getPrunerInflatedWorldAABB(); const PxVec3 extents = aabb.getExtents(); const PxVec3 center = aabb.getCenter(); MainTreeRaycastCompoundPrunerCallback<true> pcb(center, unitDir, extents, prunerCallback, flags, mCompoundTreePool.getCompoundTrees()); again = AABBTreeRaycast<true, true, IncrementalAABBTree, IncrementalAABBTreeNode, MainTreeRaycastCompoundPrunerCallback<true> >() (mCompoundTreePool.getCurrentAABBTreeBounds(), mMainTree, center, unitDir, inOutDistance, extents, pcb); } return again; } /////////////////////////////////////////////////////////////////////////////////////////////// const PrunerPayload& BVHCompoundPruner::getPayloadData(PrunerHandle handle, PrunerCompoundId compoundId, PrunerPayloadData* data) const { const ActorIdPoolIndexMap::Entry* poolIndexEntry = mActorPoolMap.find(compoundId); PX_ASSERT(poolIndexEntry); return mCompoundTreePool.getCompoundTrees()[poolIndexEntry->second].mPruningPool->getPayloadData(handle, data); } /////////////////////////////////////////////////////////////////////////////////////////////// void BVHCompoundPruner::preallocate(PxU32 nbEntries) { mCompoundTreePool.preallocate(nbEntries); mMainTreeUpdateMap.resizeUninitialized(nbEntries); mPoolActorMap.resizeUninitialized(nbEntries); mChangedLeaves.reserve(nbEntries); } /////////////////////////////////////////////////////////////////////////////////////////////// bool BVHCompoundPruner::setTransform(PrunerHandle handle, PrunerCompoundId compoundId, const PxTransform& transform) { const ActorIdPoolIndexMap::Entry* poolIndexEntry = mActorPoolMap.find(compoundId); PX_ASSERT(poolIndexEntry); return mCompoundTreePool.getCompoundTrees()[poolIndexEntry->second].mPruningPool->setTransform(handle, transform); } const PxTransform& BVHCompoundPruner::getTransform(PrunerCompoundId compoundId) const { const ActorIdPoolIndexMap::Entry* poolIndexEntry = mActorPoolMap.find(compoundId); PX_ASSERT(poolIndexEntry); return mCompoundTreePool.getCompoundTrees()[poolIndexEntry->second].mGlobalPose; } /////////////////////////////////////////////////////////////////////////////////////////////// void BVHCompoundPruner::updateObjectAfterManualBoundsUpdates(PrunerCompoundId compoundId, const PrunerHandle handle) { const ActorIdPoolIndexMap::Entry* poolIndexEntry = mActorPoolMap.find(compoundId); PX_ASSERT(poolIndexEntry); if(!poolIndexEntry) return; mCompoundTreePool.getCompoundTrees()[poolIndexEntry->second].updateObjectAfterManualBoundsUpdates(handle); const PxU32 poolIndex = poolIndexEntry->second; updateMainTreeNode(poolIndex); } /////////////////////////////////////////////////////////////////////////////////////////////// void BVHCompoundPruner::removeObject(PrunerCompoundId compoundId, const PrunerHandle handle, PrunerPayloadRemovalCallback* removalCallback) { const ActorIdPoolIndexMap::Entry* poolIndexEntry = mActorPoolMap.find(compoundId); PX_ASSERT(poolIndexEntry); if(!poolIndexEntry) return; const PxU32 poolIndex = poolIndexEntry->second; mCompoundTreePool.getCompoundTrees()[poolIndex].removeObject(handle, removalCallback); // edge case, we removed all objects for the compound tree, we need to remove it now completely if(!mCompoundTreePool.getCompoundTrees()[poolIndex].mTree->getNodes()) removeCompound(compoundId, removalCallback); else updateMainTreeNode(poolIndex); } /////////////////////////////////////////////////////////////////////////////////////////////// bool BVHCompoundPruner::addObject(PrunerCompoundId compoundId, PrunerHandle& result, const PxBounds3& bounds, const PrunerPayload userData, const PxTransform& transform) { const ActorIdPoolIndexMap::Entry* poolIndexEntry = mActorPoolMap.find(compoundId); PX_ASSERT(poolIndexEntry); if(!poolIndexEntry) return false; mCompoundTreePool.getCompoundTrees()[poolIndexEntry->second].addObject(result, bounds, userData, transform); const PxU32 poolIndex = poolIndexEntry->second; updateMainTreeNode(poolIndex); return true; } /////////////////////////////////////////////////////////////////////////////////////////////// void BVHCompoundPruner::updateMainTreeNode(PoolIndex poolIndex) { PxBounds3 localBounds; const IncrementalAABBTreeNode* node = mCompoundTreePool.getCompoundTrees()[poolIndex].mTree->getNodes(); V4StoreU(node->mBVMin, &localBounds.minimum.x); PX_ALIGN(16, PxVec4) max4; V4StoreA(node->mBVMax, &max4.x); localBounds.maximum = PxVec3(max4.x, max4.y, max4.z); const PxBounds3 compoundBounds = PxBounds3::transformFast(mCompoundTreePool.getCompoundTrees()[poolIndex].mGlobalPose, localBounds); mCompoundTreePool.getCurrentCompoundBounds()[poolIndex] = compoundBounds; mChangedLeaves.clear(); IncrementalAABBTreeNode* mainTreeNode = mMainTree.update(mMainTreeUpdateMap[poolIndex], poolIndex, mCompoundTreePool.getCurrentCompoundBounds(), mChangedLeaves); // we removed node during update, need to update the mapping updateMapping(poolIndex, mainTreeNode); } /////////////////////////////////////////////////////////////////////////////////////////////// void BVHCompoundPruner::shiftOrigin(const PxVec3& shift) { mCompoundTreePool.shiftOrigin(shift); mMainTree.shiftOrigin(shift); } /////////////////////////////////////////////////////////////////////////////////////////////// namespace { class CompoundTreeVizCb : public DebugVizCallback { PX_NOCOPY(CompoundTreeVizCb) public: CompoundTreeVizCb(PxRenderOutput& out, const CompoundTree& tree) : mOut (out), mPose (tree.mGlobalPose) { } virtual bool visualizeNode(const IncrementalAABBTreeNode& /*node*/, const PxBounds3& bounds) { if(0) { Cm::renderOutputDebugBox(mOut, PxBounds3::transformSafe(mPose, bounds)); } else { PxVec3 pts[8]; computeBoxPoints(bounds, pts); for(PxU32 i=0;i<8;i++) pts[i] = mPose.transform(pts[i]); const PxU8* edges = getBoxEdges(); for(PxU32 i=0;i<12;i++) { const PxVec3& p0 = pts[*edges++]; const PxVec3& p1 = pts[*edges++]; mOut.outputSegment(p0, p1); } } return true; } PxRenderOutput& mOut; const PxTransform& mPose; }; class CompoundPrunerDebugVizCb : public DebugVizCallback { PX_NOCOPY(CompoundPrunerDebugVizCb) public: CompoundPrunerDebugVizCb(PxRenderOutput& out, const CompoundTree* trees, bool debugStatic, bool debugDynamic) : mOut (out), mTrees (trees), mDebugVizStatic (debugStatic), mDebugVizDynamic(debugDynamic) {} virtual bool visualizeNode(const IncrementalAABBTreeNode& node, const PxBounds3& /*bounds*/) { if(node.isLeaf()) { PxU32 nbPrims = node.getNbPrimitives(); const PxU32* prims = node.getPrimitives(NULL); while(nbPrims--) { const CompoundTree& compoundTree = mTrees[*prims++]; const bool isDynamic = compoundTree.mFlags & PxCompoundPrunerQueryFlag::eDYNAMIC; if((mDebugVizDynamic && isDynamic) || (mDebugVizStatic && !isDynamic)) { const PxU32 color = isDynamic ? SQ_DEBUG_VIZ_DYNAMIC_COLOR : SQ_DEBUG_VIZ_STATIC_COLOR; CompoundTreeVizCb leafCB(mOut, compoundTree); visualizeTree(mOut, color, compoundTree.mTree, &leafCB); mOut << SQ_DEBUG_VIZ_COMPOUND_COLOR; } } } return false; } PxRenderOutput& mOut; const CompoundTree* mTrees; const bool mDebugVizStatic; const bool mDebugVizDynamic; }; } void BVHCompoundPruner::visualize(PxRenderOutput& out, PxU32 primaryColor, PxU32 /*secondaryColor*/) const { if(mDrawStatic || mDrawDynamic) { CompoundPrunerDebugVizCb cb(out, mCompoundTreePool.getCompoundTrees(), mDrawStatic, mDrawDynamic); visualizeTree(out, primaryColor, &mMainTree, &cb); } } void BVHCompoundPruner::visualizeEx(PxRenderOutput& out, PxU32 color, bool drawStatic, bool drawDynamic) const { mDrawStatic = drawStatic; mDrawDynamic = drawDynamic; visualize(out, color, color); } ///////////////////////////////////////////////////////////////////////////////////////////////
NVIDIA-Omniverse/PhysX/physx/source/scenequery/src/SqCompoundPruner.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SQ_COMPOUND_PRUNER_H #define SQ_COMPOUND_PRUNER_H #include "SqCompoundPruningPool.h" #include "GuSqInternal.h" #include "GuPrunerMergeData.h" #include "GuIncrementalAABBTree.h" #include "GuPruningPool.h" #include "foundation/PxHashMap.h" #include "foundation/PxArray.h" namespace physx { namespace Sq { /////////////////////////////////////////////////////////////////////////////////////////////// typedef PxHashMap<PrunerCompoundId, Gu::PoolIndex> ActorIdPoolIndexMap; typedef PxArray<PrunerCompoundId> PoolIndexActorIdMap; /////////////////////////////////////////////////////////////////////////////////////////////// class BVHCompoundPruner : public CompoundPruner { public: BVHCompoundPruner(PxU64 contextID); virtual ~BVHCompoundPruner(); void release(); // BasePruner DECLARE_BASE_PRUNER_API //~BasePruner // CompoundPruner // compound level virtual bool addCompound(Gu::PrunerHandle* results, const Gu::BVH& bvh, PrunerCompoundId compoundId, const PxTransform& transform, bool isDynamic, const Gu::PrunerPayload* data, const PxTransform* transforms); virtual bool removeCompound(PrunerCompoundId compoundId, Gu::PrunerPayloadRemovalCallback* removalCallback); virtual bool updateCompound(PrunerCompoundId compoundId, const PxTransform& transform); // object level virtual void updateObjectAfterManualBoundsUpdates(PrunerCompoundId compoundId, const Gu::PrunerHandle handle); virtual void removeObject(PrunerCompoundId compoundId, const Gu::PrunerHandle handle, Gu::PrunerPayloadRemovalCallback* removalCallback); virtual bool addObject(PrunerCompoundId compoundId, Gu::PrunerHandle& result, const PxBounds3& bounds, const Gu::PrunerPayload userData, const PxTransform& transform); //queries virtual bool raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, CompoundPrunerRaycastCallback&, PxCompoundPrunerQueryFlags flags) const; virtual bool overlap(const Gu::ShapeData& queryVolume, CompoundPrunerOverlapCallback&, PxCompoundPrunerQueryFlags flags) const; virtual bool sweep(const Gu::ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, CompoundPrunerRaycastCallback&, PxCompoundPrunerQueryFlags flags) const; virtual const Gu::PrunerPayload& getPayloadData(Gu::PrunerHandle handle, PrunerCompoundId compoundId, Gu::PrunerPayloadData* data) const; virtual void preallocate(PxU32 nbEntries); virtual bool setTransform(Gu::PrunerHandle handle, PrunerCompoundId compoundId, const PxTransform& transform); virtual const PxTransform& getTransform(PrunerCompoundId compoundId) const; virtual void visualizeEx(PxRenderOutput& out, PxU32 color, bool drawStatic, bool drawDynamic) const; // ~CompoundPruner private: void updateMapping(const Gu::PoolIndex poolIndex, Gu::IncrementalAABBTreeNode* node); void updateMainTreeNode(Gu::PoolIndex index); void test(); Gu::IncrementalAABBTree mMainTree; UpdateMap mMainTreeUpdateMap; CompoundTreePool mCompoundTreePool; ActorIdPoolIndexMap mActorPoolMap; PoolIndexActorIdMap mPoolActorMap; Gu::NodeList mChangedLeaves; mutable bool mDrawStatic; mutable bool mDrawDynamic; }; } } #endif
NVIDIA-Omniverse/PhysX/physx/source/scenequery/include/SqQuery.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SQ_QUERY_H #define SQ_QUERY_H // PT: SQ-API LEVEL 3 (Level 1 = SqPruner.h, Level 2 = SqManager/SqPrunerData) // PT: this file is part of a "high-level" set of files within Sq. The SqPruner API doesn't rely on them. // PT: this should really be at Np level but moving it to Sq allows us to share it. #include "foundation/PxSimpleTypes.h" #include "geometry/PxGeometryQueryFlags.h" #include "SqManager.h" #include "PxQueryReport.h" #include "GuCachedFuncs.h" namespace physx { class PxGeometry; struct PxQueryFilterData; struct PxFilterData; class PxQueryFilterCallback; namespace Sq { struct MultiQueryInput; class PVDCapture { public: PVDCapture() {} virtual ~PVDCapture() {} virtual bool transmitSceneQueries() = 0; virtual void raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal distance, const PxRaycastHit* hit, PxU32 hitsNum, const PxQueryFilterData& filterData, bool multipleHits) = 0; virtual void sweep(const PxGeometry& geometry, const PxTransform& pose, const PxVec3& unitDir, PxReal distance, const PxSweepHit* hit, PxU32 hitsNum, const PxQueryFilterData& filterData, bool multipleHits) = 0; virtual void overlap(const PxGeometry& geometry, const PxTransform& pose, const PxOverlapHit* hit, PxU32 hitsNum, const PxQueryFilterData& filterData) = 0; }; // SceneQueries-level adapter. Augments the PrunerManager-level adapter with functions needed to perform queries. class QueryAdapter : public Adapter { public: QueryAdapter() {} virtual ~QueryAdapter() {} // PT: TODO: decouple from PxQueryCache? virtual Gu::PrunerHandle findPrunerHandle(const PxQueryCache& cache, PrunerCompoundId& compoundId, PxU32& prunerIndex) const = 0; // PT: TODO: return reference? but this version is at least consistent with getActorShape virtual void getFilterData(const Gu::PrunerPayload& payload, PxFilterData& filterData) const = 0; virtual void getActorShape(const Gu::PrunerPayload& payload, PxActorShape& actorShape) const = 0; }; } class SceneQueries { PX_NOCOPY(SceneQueries) public: SceneQueries(Sq::PVDCapture* pvd, PxU64 contextID, Gu::Pruner* staticPruner, Gu::Pruner* dynamicPruner, PxU32 dynamicTreeRebuildRateHint, float inflation, const PxSceneLimits& limits, const Sq::QueryAdapter& adapter); ~SceneQueries(); PX_FORCE_INLINE Sq::PrunerManager& getPrunerManagerFast() { return mSQManager; } PX_FORCE_INLINE const Sq::PrunerManager& getPrunerManagerFast() const { return mSQManager; } template<typename QueryHit> bool multiQuery( const Sq::MultiQueryInput& in, PxHitCallback<QueryHit>& hits, PxHitFlags hitFlags, const PxQueryCache* cache, const PxQueryFilterData& filterData, PxQueryFilterCallback* filterCall) const; bool _raycast( const PxVec3& origin, const PxVec3& unitDir, const PxReal distance, // Ray data PxRaycastCallback& hitCall, PxHitFlags hitFlags, const PxQueryFilterData& filterData, PxQueryFilterCallback* filterCall, const PxQueryCache* cache, PxGeometryQueryFlags flags) const; bool _sweep( const PxGeometry& geometry, const PxTransform& pose, // GeomObject data const PxVec3& unitDir, const PxReal distance, // Ray data PxSweepCallback& hitCall, PxHitFlags hitFlags, const PxQueryFilterData& filterData, PxQueryFilterCallback* filterCall, const PxQueryCache* cache, const PxReal inflation, PxGeometryQueryFlags flags) const; bool _overlap( const PxGeometry& geometry, const PxTransform& transform, // GeomObject data PxOverlapCallback& hitCall, const PxQueryFilterData& filterData, PxQueryFilterCallback* filterCall, const PxQueryCache* cache, PxGeometryQueryFlags flags) const; PX_FORCE_INLINE PxU64 getContextId() const { return mSQManager.getContextId(); } Sq::PrunerManager mSQManager; public: Gu::CachedFuncs mCachedFuncs; Sq::PVDCapture* mPVD; }; #if PX_SUPPORT_EXTERN_TEMPLATE //explicit template instantiation declaration extern template bool SceneQueries::multiQuery<PxRaycastHit>(const Sq::MultiQueryInput&, PxHitCallback<PxRaycastHit>&, PxHitFlags, const PxQueryCache*, const PxQueryFilterData&, PxQueryFilterCallback*) const; extern template bool SceneQueries::multiQuery<PxOverlapHit>(const Sq::MultiQueryInput&, PxHitCallback<PxOverlapHit>&, PxHitFlags, const PxQueryCache*, const PxQueryFilterData&, PxQueryFilterCallback*) const; extern template bool SceneQueries::multiQuery<PxSweepHit>(const Sq::MultiQueryInput&, PxHitCallback<PxSweepHit>&, PxHitFlags, const PxQueryCache*, const PxQueryFilterData&, PxQueryFilterCallback*) const; #endif } #endif
NVIDIA-Omniverse/PhysX/physx/source/scenequery/include/SqPrunerData.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SQ_PRUNER_DATA_H #define SQ_PRUNER_DATA_H /** \addtogroup physics @{ */ #include "SqTypedef.h" // PT: SQ-API LEVEL 2 (Level 1 = SqPruner.h) // PT: this file is part of a "high-level" set of files within Sq. The SqPruner API doesn't rely on them. // PT: this should really be at Np level but moving it to Sq allows us to share it. namespace physx { namespace Sq { struct PruningIndex { enum Enum { eSTATIC = 0, // PT: must match PX_SCENE_PRUNER_STATIC eDYNAMIC = 1, // PT: must match PX_SCENE_PRUNER_DYNAMIC eCOUNT = 2 }; }; PX_FORCE_INLINE PrunerData createPrunerData(PxU32 index, Gu::PrunerHandle h) { return PrunerData((h << 1) | index); } PX_FORCE_INLINE PxU32 getPrunerIndex(PrunerData data) { return PxU32(data & 1); } PX_FORCE_INLINE Gu::PrunerHandle getPrunerHandle(PrunerData data) { return Gu::PrunerHandle(data >> 1); } } } /** @} */ #endif
NVIDIA-Omniverse/PhysX/physx/source/scenequery/include/SqPruner.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SQ_PRUNER_H #define SQ_PRUNER_H #include "foundation/PxBounds3.h" #include "foundation/PxUserAllocated.h" #include "foundation/PxFlags.h" #include "GuPruner.h" #include "SqTypedef.h" namespace physx { namespace Gu { class BVH; } namespace Sq { /** \brief Compound-pruner-specific flags for scene queries. */ struct PxCompoundPrunerQueryFlag { enum Enum { eSTATIC = (1<<0), //!< Traverse static compounds eDYNAMIC = (1<<1), //!< Traverse dynamic compounds }; }; /** \brief Flags typedef for the set of bits defined in PxCompoundPrunerQueryFlag. */ typedef PxFlags<PxCompoundPrunerQueryFlag::Enum,PxU32> PxCompoundPrunerQueryFlags; PX_FLAGS_OPERATORS(PxCompoundPrunerQueryFlag::Enum,PxU32) struct CompoundPrunerRaycastCallback { CompoundPrunerRaycastCallback() {} virtual ~CompoundPrunerRaycastCallback() {} virtual bool invoke(PxReal& distance, PxU32 primIndex, const Gu::PrunerPayload* payloads, const PxTransform* transforms, const PxTransform* compoundPose) = 0; }; struct CompoundPrunerOverlapCallback { CompoundPrunerOverlapCallback() {} virtual ~CompoundPrunerOverlapCallback() {} virtual bool invoke(PxU32 primIndex, const Gu::PrunerPayload* payloads, const PxTransform* transforms, const PxTransform* compoundPose) = 0; }; ////////////////////////////////////////////////////////////////////////// /** * Pruner holding compound objects */ ////////////////////////////////////////////////////////////////////////// class CompoundPruner : public Gu::BasePruner { public: virtual ~CompoundPruner() {} /** \brief Adds compound to the pruner. \param results [out] an array for resulting handles \param bvh [in] BVH \param compoundId [in] compound id \param transform [in] compound transform \param data [in] an array of object data \return true if success, false if internal allocation failed. The first failing add results in a INVALID_PRUNERHANDLE. Handles are usable as indices. Each handle is either be a recycled handle returned by the client via removeObjects(), or a fresh handle that is either zero, or one greater than the last fresh handle returned. */ virtual bool addCompound(Gu::PrunerHandle* results, const Gu::BVH& bvh, PrunerCompoundId compoundId, const PxTransform& transform, bool isDynamic, const Gu::PrunerPayload* data, const PxTransform* transforms) = 0; /** Removes compound from the pruner. \param compoundId [in] compound to remove */ virtual bool removeCompound(PrunerCompoundId compoundId, Gu::PrunerPayloadRemovalCallback* removalCallback) = 0; /** Updates compound object \param compoundId [in] compound to update \param transform [in] compound transformation */ virtual bool updateCompound(PrunerCompoundId compoundId, const PxTransform& transform) = 0; /** Updates object after manually updating their bounds via "getPayload" calls. \param compoundId [in] compound that the object belongs to \param handle [in] the object to update */ virtual void updateObjectAfterManualBoundsUpdates(PrunerCompoundId compoundId, const Gu::PrunerHandle handle) = 0; /** Removes object from compound pruner. \param compoundId [in] compound that the object belongs to \param handle [in] the object to remove */ virtual void removeObject(PrunerCompoundId compoundId, const Gu::PrunerHandle handle, Gu::PrunerPayloadRemovalCallback* removalCallback) = 0; /** \brief Adds object to the pruner. \param compoundId [in] compound that the object belongs to \param result [out] an array for resulting handles \param bounds [in] an array of bounds. These bounds are used as-is so they should be pre-inflated if inflation is needed. \param userData [in] an array of object data \return true if success, false if internal allocation failed. The first failing add results in a INVALID_PRUNERHANDLE. */ virtual bool addObject(PrunerCompoundId compoundId, Gu::PrunerHandle& result, const PxBounds3& bounds, const Gu::PrunerPayload userData, const PxTransform& transform) = 0; /** * Query functions * * Note: return value may disappear if PrunerCallback contains the necessary information * currently it is still used for the dynamic pruner internally (to decide if added objects must be queried) */ virtual bool raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, CompoundPrunerRaycastCallback&, PxCompoundPrunerQueryFlags flags) const = 0; virtual bool overlap(const Gu::ShapeData& queryVolume, CompoundPrunerOverlapCallback&, PxCompoundPrunerQueryFlags flags) const = 0; virtual bool sweep(const Gu::ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, CompoundPrunerRaycastCallback&, PxCompoundPrunerQueryFlags flags) const = 0; /** \brief Retrieves the object's payload and data associated with the handle. This function returns the payload associated with a given handle. Additionally it can return the destination addresses for the object's bounds & transform. The user can then write the new bounds and transform there, before eventually calling updateObjects(). \param[in] handle Object handle (initially returned by addObjects()) \param[in] compoundId The compound id \param[out] data Optional location where to store the internal data associated with the payload. \return The payload associated with the given handle. */ virtual const Gu::PrunerPayload& getPayloadData(Gu::PrunerHandle handle, PrunerCompoundId compoundId, Gu::PrunerPayloadData* data) const = 0; /** \brief Preallocate space \param[in] nbEntries The number of entries to preallocate space for */ virtual void preallocate(PxU32 nbEntries) = 0; // PT: beware, shape transform virtual bool setTransform(Gu::PrunerHandle handle, PrunerCompoundId compoundId, const PxTransform& transform) = 0; // PT: beware, actor transform virtual const PxTransform& getTransform(PrunerCompoundId compoundId) const = 0; virtual void visualizeEx(PxRenderOutput& out, PxU32 color, bool drawStatic, bool drawDynamic) const = 0; }; } } #endif
NVIDIA-Omniverse/PhysX/physx/source/scenequery/include/SqManager.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SQ_MANAGER_H #define SQ_MANAGER_H // PT: SQ-API LEVEL 2 (Level 1 = SqPruner.h) // PT: this file is part of a "high-level" set of files within Sq. The SqPruner API doesn't rely on them. // PT: this should really be at Np level but moving it to Sq allows us to share it. #include "common/PxPhysXCommonConfig.h" #include "foundation/PxBitMap.h" #include "foundation/PxArray.h" #include "SqPruner.h" #include "geometry/PxGeometryHelpers.h" namespace physx { namespace Sq { // PrunerManager-level adapter class Adapter { public: Adapter() {} virtual ~Adapter() {} // Retrieves the PxGeometry associated with a given PrunerPayload. This will be called by // the PrunerManager class when computing bounds. virtual const PxGeometry& getGeometry(const Gu::PrunerPayload& payload) const = 0; }; // PT: extended pruner structure. We might want to move the additional data to the pruner itself later. struct PrunerExt : public PxUserAllocated { // private: PrunerExt(); ~PrunerExt(); void init(Gu::Pruner* pruner); void flushMemory(); void preallocate(PxU32 nbShapes); void addToDirtyList(Gu::PrunerHandle handle, bool dynamic, const PxTransform& transform); void removeFromDirtyList(Gu::PrunerHandle handle); bool processDirtyList(PxU32 index, const Adapter& adapter, float inflation); // void growDirtyList(Gu::PrunerHandle handle); PX_FORCE_INLINE Gu::Pruner* pruner() { return mPruner; } PX_FORCE_INLINE const Gu::Pruner* pruner() const { return mPruner; } Gu::Pruner* mPruner; PxBitMap mDirtyMap; PxArray<Gu::PrunerHandle> mDirtyList; bool mDirtyStatic; // true if dirty list contains a static PX_NOCOPY(PrunerExt) friend class PrunerManager; }; } } #include "foundation/PxHashSet.h" namespace physx { namespace Sq { class CompoundPruner; typedef PxPair<PrunerCompoundId, Gu::PrunerHandle> CompoundPair; typedef PxCoalescedHashSet<CompoundPair > CompoundPrunerSet; // AB: extended compound pruner structure, buffers compound shape changes and flushes them. struct CompoundPrunerExt : public PxUserAllocated { // private: CompoundPrunerExt(); ~CompoundPrunerExt(); void flushMemory(); void preallocate(PxU32 nbShapes); void flushShapes(const Adapter& adapter, float inflation); void addToDirtyList(PrunerCompoundId compoundId, Gu::PrunerHandle handle, const PxTransform& transform); void removeFromDirtyList(PrunerCompoundId compoundId, Gu::PrunerHandle handle); PX_FORCE_INLINE const CompoundPruner* pruner() const { return mPruner; } PX_FORCE_INLINE CompoundPruner* pruner() { return mPruner; } CompoundPruner* mPruner; CompoundPrunerSet mDirtyList; PX_NOCOPY(CompoundPrunerExt) friend class PrunerManager; }; } } #include "foundation/PxMutex.h" #include "SqPrunerData.h" namespace physx { class PxRenderOutput; class PxBVH; class PxSceneLimits; // PT: TODO: decouple from PxSceneLimits namespace Sq { class PrunerManager : public PxUserAllocated { public: PrunerManager(PxU64 contextID, Gu::Pruner* staticPruner, Gu::Pruner* dynamicPruner, PxU32 dynamicTreeRebuildRateHint, float inflation, const PxSceneLimits& limits, const Adapter& adapter); ~PrunerManager(); PrunerData addPrunerShape(const Gu::PrunerPayload& payload, bool dynamic, PrunerCompoundId compoundId, const PxBounds3& bounds, const PxTransform& transform, bool hasPruningStructure=false); void addCompoundShape(const PxBVH& bvh, PrunerCompoundId compoundId, const PxTransform& compoundTransform, PrunerData* prunerData, const Gu::PrunerPayload* payloads, const PxTransform* transforms, bool isDynamic); void markForUpdate(PrunerCompoundId compoundId, PrunerData s, const PxTransform& transform); void removePrunerShape(PrunerCompoundId compoundId, PrunerData shapeData, Gu::PrunerPayloadRemovalCallback* removalCallback); PX_FORCE_INLINE const Gu::Pruner* getPruner(PruningIndex::Enum index) const { return mPrunerExt[index].mPruner; } PX_FORCE_INLINE Gu::Pruner* getPruner(PruningIndex::Enum index) { return mPrunerExt[index].mPruner; } PX_FORCE_INLINE const CompoundPruner* getCompoundPruner() const { return mCompoundPrunerExt.mPruner; } PX_FORCE_INLINE PxU64 getContextId() const { return mContextID; } void preallocate(PxU32 prunerIndex, PxU32 nbShapes); void setDynamicTreeRebuildRateHint(PxU32 dynTreeRebuildRateHint); PX_FORCE_INLINE PxU32 getDynamicTreeRebuildRateHint() const { return mRebuildRateHint; } void flushUpdates(); void forceRebuildDynamicTree(PxU32 prunerIndex); void updateCompoundActor(PrunerCompoundId compoundId, const PxTransform& compoundTransform); void removeCompoundActor(PrunerCompoundId compoundId, Gu::PrunerPayloadRemovalCallback* removalCallback); void* prepareSceneQueriesUpdate(PruningIndex::Enum index); void sceneQueryBuildStep(void* handle); void sync(const Gu::PrunerHandle* handles, const PxU32* boundsIndices, const PxBounds3* bounds, const PxTransform32* transforms, PxU32 count, const PxBitMap& ignoredIndices); void afterSync(bool buildStep, bool commit); void shiftOrigin(const PxVec3& shift); void visualize(PxU32 prunerIndex, PxRenderOutput& out) const; void flushMemory(); PX_FORCE_INLINE PxU32 getStaticTimestamp() const { return mStaticTimestamp; } PX_FORCE_INLINE const Adapter& getAdapter() const { return mAdapter; } private: const Adapter& mAdapter; PrunerExt mPrunerExt[PruningIndex::eCOUNT]; CompoundPrunerExt mCompoundPrunerExt; const PxU64 mContextID; PxU32 mStaticTimestamp; PxU32 mRebuildRateHint; const float mInflation; // SQ_PRUNER_EPSILON PxMutex mSQLock; // to make sure only one query updates the dirty pruner structure if multiple queries run in parallel volatile bool mPrunerNeedsUpdating; void flushShapes(); PX_FORCE_INLINE void invalidateStaticTimestamp() { mStaticTimestamp++; } PX_NOCOPY(PrunerManager) }; } } #endif
NVIDIA-Omniverse/PhysX/physx/source/scenequery/include/SqFactory.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SQ_FACTORY_H #define SQ_FACTORY_H #include "foundation/PxSimpleTypes.h" #include "GuFactory.h" #include "SqTypedef.h" namespace physx { namespace Sq { class CompoundPruner; CompoundPruner* createCompoundPruner(PxU64 contextID); } } #endif
NVIDIA-Omniverse/PhysX/physx/source/scenequery/include/SqTypedef.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SQ_TYPEDEF_H #define SQ_TYPEDEF_H #include "foundation/PxSimpleTypes.h" #include "GuPrunerTypedef.h" namespace physx { namespace Sq { typedef PxU32 PrunerCompoundId; static const PrunerCompoundId INVALID_COMPOUND_ID = 0xffffffff; typedef PxU32 PrunerData; #define SQ_INVALID_PRUNER_DATA 0xffffffff } } #endif
NVIDIA-Omniverse/PhysX/physx/source/simulationcontroller/src/ScParticleSystemSim.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #include "foundation/PxPreprocessor.h" #if PX_SUPPORT_GPU_PHYSX #include "ScParticleSystemSim.h" #include "ScParticleSystemCore.h" #include "ScScene.h" using namespace physx; using namespace physx::Dy; Sc::ParticleSystemSim::ParticleSystemSim(ParticleSystemCore& core, Scene& scene) : ActorSim(scene, core), mShapeSim(*this, &core.getShapeCore()) { mLLParticleSystem = scene.createLLParticleSystem(this); mNodeIndex = scene.getSimpleIslandManager()->addParticleSystem(mLLParticleSystem, false); scene.getSimpleIslandManager()->activateNode(mNodeIndex); //mCore.setSim(this); mLLParticleSystem->setElementId(mShapeSim.getElementID()); PxParticleSystemGeometry geometry; geometry.mSolverType = core.getSolverType(); core.getShapeCore().setGeometry(geometry); PxsShapeCore* shapeCore = const_cast<PxsShapeCore*>(&core.getShapeCore().getCore()); mLLParticleSystem->setShapeCore(shapeCore); } Sc::ParticleSystemSim::~ParticleSystemSim() { if (!mLLParticleSystem) return; mScene.destroyLLParticleSystem(*mLLParticleSystem); mScene.getSimpleIslandManager()->removeNode(mNodeIndex); mCore.setSim(NULL); } void Sc::ParticleSystemSim::updateBounds() { mShapeSim.updateBounds(); } void Sc::ParticleSystemSim::updateBoundsInAABBMgr() { mShapeSim.updateBoundsInAABBMgr(); } PxBounds3 Sc::ParticleSystemSim::getBounds() const { return mShapeSim.getBounds(); } bool Sc::ParticleSystemSim::isSleeping() const { return false; } void Sc::ParticleSystemSim::sleepCheck(PxReal dt) { PX_UNUSED(dt); } /*void Sc::ParticleSystemSim::activate() { activateInteractions(*this); } void Sc::ParticleSystemSim::deactivate() { deactivateInteractions(*this); }*/ #endif //PX_SUPPORT_GPU_PHYSX
NVIDIA-Omniverse/PhysX/physx/source/simulationcontroller/src/ScSimulationController.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SC_SIMULATION_CONTROLLER_H #define SC_SIMULATION_CONTROLLER_H #include "PxsSimulationController.h" namespace physx { namespace Sc { class SimulationController : public PxsSimulationController { PX_NOCOPY(SimulationController) public: SimulationController(PxsSimulationControllerCallback* callback) : PxsSimulationController(callback, PxIntFalse) {} virtual ~SimulationController() {} virtual void updateScBodyAndShapeSim(PxsTransformCache& cache, Bp::BoundsArray& boundArray, PxBaseTask* continuation) PX_OVERRIDE; virtual void updateArticulationAfterIntegration(PxsContext* llContext, Bp::AABBManagerBase* aabbManager, PxArray<Sc::BodySim*>& ccdBodies, PxBaseTask* continuation, IG::IslandSim& islandSim, float dt) PX_OVERRIDE; }; } } #endif
NVIDIA-Omniverse/PhysX/physx/source/simulationcontroller/src/ScSimStats.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SC_SIM_STATS_H #define SC_SIM_STATS_H #include "geometry/PxGeometry.h" #include "PxSimulationStatistics.h" #include "foundation/PxAtomic.h" #include "foundation/PxUserAllocated.h" namespace physx { struct PxvSimStats; namespace Sc { /* Description: contains statistics for the scene. */ class SimStats : public PxUserAllocated { public: SimStats(); void clear(); //set counters to zero void simStart(); void readOut(PxSimulationStatistics& dest, const PxvSimStats& simStats) const; PX_INLINE void incBroadphaseAdds() { numBroadPhaseAddsPending++; } PX_INLINE void incBroadphaseRemoves() { numBroadPhaseRemovesPending++; } private: // Broadphase adds/removes for the current simulation step PxU32 numBroadPhaseAdds; PxU32 numBroadPhaseRemoves; // Broadphase adds/removes for the next simulation step PxU32 numBroadPhaseAddsPending; PxU32 numBroadPhaseRemovesPending; public: typedef PxI32 TriggerPairCountsNonVolatile[PxGeometryType::eCONVEXMESH+1][PxGeometryType::eGEOMETRY_COUNT]; typedef volatile TriggerPairCountsNonVolatile TriggerPairCounts; TriggerPairCounts numTriggerPairs; PxU64 gpuMemSizeParticles; PxU64 gpuMemSizeSoftBodies; PxU64 gpuMemSizeFEMCloths; PxU64 gpuMemSizeHairSystems; }; } // namespace Sc } #endif
NVIDIA-Omniverse/PhysX/physx/source/simulationcontroller/src/ScTriggerInteraction.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SC_TRIGGER_INTERACTION_H #define SC_TRIGGER_INTERACTION_H #include "ScElementSimInteraction.h" #include "ScShapeSim.h" #include "GuOverlapTests.h" namespace physx { namespace Sc { class TriggerInteraction : public ElementSimInteraction { public: enum TriggerFlag { PAIR_FLAGS_MASK = ((PxPairFlag::eNOTIFY_TOUCH_LOST << 1) - 1), // Bits where the PxPairFlags eNOTIFY_TOUCH_FOUND and eNOTIFY_TOUCH_LOST get stored NEXT_FREE = ((PAIR_FLAGS_MASK << 1) & ~PAIR_FLAGS_MASK), PROCESS_THIS_FRAME = (NEXT_FREE << 0), // the trigger pair is new or the pose of an actor was set -> initial processing required. // This is important to cover cases where a static or kinematic // (non-moving) trigger is created and overlaps with a sleeping // object. Or for the case where a static/kinematic is teleported to a new // location. TOUCH_FOUND should still get sent in that case. LAST = (NEXT_FREE << 1) }; TriggerInteraction(ShapeSimBase& triggerShape, ShapeSimBase& otherShape); ~TriggerInteraction(); PX_FORCE_INLINE Gu::TriggerCache& getTriggerCache() { return mTriggerCache; } PX_FORCE_INLINE ShapeSimBase& getTriggerShape() const { return static_cast<ShapeSimBase&>(getElement0()); } PX_FORCE_INLINE ShapeSimBase& getOtherShape() const { return static_cast<ShapeSimBase&>(getElement1()); } PX_FORCE_INLINE bool lastFrameHadContacts() const { return mLastFrameHadContacts; } PX_FORCE_INLINE void updateLastFrameHadContacts(bool hasContact) { mLastFrameHadContacts = hasContact; } PX_FORCE_INLINE PxPairFlags getTriggerFlags() const { return PxPairFlags(mFlags & PAIR_FLAGS_MASK); } PX_FORCE_INLINE void setTriggerFlags(PxPairFlags triggerFlags); PX_FORCE_INLINE void raiseFlag(TriggerFlag flag) { mFlags |= flag; } PX_FORCE_INLINE void clearFlag(TriggerFlag flag) { mFlags &= ~flag; } PX_FORCE_INLINE PxIntBool readFlag(TriggerFlag flag) const { return PxIntBool(mFlags & flag); } PX_FORCE_INLINE void forceProcessingThisFrame(Sc::Scene& scene); bool onActivate(void*); bool onDeactivate(); protected: Gu::TriggerCache mTriggerCache; bool mLastFrameHadContacts; }; } // namespace Sc PX_FORCE_INLINE void Sc::TriggerInteraction::setTriggerFlags(PxPairFlags triggerFlags) { PX_ASSERT(PxU32(triggerFlags) < (PxPairFlag::eDETECT_CCD_CONTACT << 1)); // to find out if a new PxPairFlag has been added in which case PAIR_FLAGS_MASK needs to get adjusted #if PX_CHECKED if (triggerFlags & PxPairFlag::eNOTIFY_TOUCH_PERSISTS) { PX_WARN_ONCE("Trigger pairs do not support PxPairFlag::eNOTIFY_TOUCH_PERSISTS events any longer."); } #endif PxU32 newFlags = mFlags; PxU32 fl = PxU32(triggerFlags) & PxU32(PxPairFlag::eNOTIFY_TOUCH_FOUND|PxPairFlag::eNOTIFY_TOUCH_LOST); newFlags &= (~PAIR_FLAGS_MASK); // clear old flags newFlags |= fl; mFlags = newFlags; } PX_FORCE_INLINE void Sc::TriggerInteraction::forceProcessingThisFrame(Sc::Scene& scene) { raiseFlag(PROCESS_THIS_FRAME); if (!readInteractionFlag(InteractionFlag::eIS_ACTIVE)) { raiseInteractionFlag(InteractionFlag::eIS_ACTIVE); scene.notifyInteractionActivated(this); } } } #endif
NVIDIA-Omniverse/PhysX/physx/source/simulationcontroller/src/ScConstraintSim.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "ScBodySim.h" #include "ScStaticSim.h" #include "ScConstraintCore.h" #include "ScConstraintSim.h" #include "ScConstraintInteraction.h" #include "ScElementSimInteraction.h" using namespace physx; using namespace Sc; static ConstraintInteraction* createInteraction(ConstraintSim* sim, RigidCore* r0, RigidCore* r1, Scene& scene) { return scene.getConstraintInteractionPool()->construct( sim, r0 ? *r0->getSim() : scene.getStaticAnchor(), r1 ? *r1->getSim() : scene.getStaticAnchor()); } static void releaseInteraction(ConstraintInteraction* interaction, const ConstraintSim* sim, Scene& scene) { if(!sim->isBroken()) interaction->destroy(); scene.getConstraintInteractionPool()->destroy(interaction); } Sc::ConstraintSim::ConstraintSim(ConstraintCore& core, RigidCore* r0, RigidCore* r1, Scene& scene) : mScene (scene), mCore (core), mInteraction(NULL), mFlags (0) { mBodies[0] = (r0 && (r0->getActorCoreType() != PxActorType::eRIGID_STATIC)) ? static_cast<BodySim*>(r0->getSim()) : 0; mBodies[1] = (r1 && (r1->getActorCoreType() != PxActorType::eRIGID_STATIC)) ? static_cast<BodySim*>(r1->getSim()) : 0; const PxU32 id = scene.getConstraintIDTracker().createID(); mLowLevelConstraint.index = id; PxPinnedArray<Dy::ConstraintWriteback>& writeBackPool = scene.getDynamicsContext()->getConstraintWriteBackPool(); if(id >= writeBackPool.capacity()) writeBackPool.reserve(writeBackPool.capacity() * 2); writeBackPool.resize(PxMax(writeBackPool.size(), id + 1)); writeBackPool[id].initialize(); if(!createLLConstraint()) return; PxReal linBreakForce, angBreakForce; core.getBreakForce(linBreakForce, angBreakForce); if ((linBreakForce < PX_MAX_F32) || (angBreakForce < PX_MAX_F32)) setFlag(eBREAKABLE); core.setSim(this); mInteraction = createInteraction(this, r0, r1, scene); PX_ASSERT(!mInteraction->isRegistered()); // constraint interactions must not register in the scene, there is a list of Sc::ConstraintSim instead } Sc::ConstraintSim::~ConstraintSim() { PX_ASSERT(mInteraction); // This is fine now, a body which gets removed from the scene removes all constraints automatically PX_ASSERT(!mInteraction->isRegistered()); // constraint interactions must not register in the scene, there is a list of Sc::ConstraintSim instead releaseInteraction(mInteraction, this, mScene); mScene.getConstraintIDTracker().releaseID(mLowLevelConstraint.index); destroyLLConstraint(); mCore.setSim(NULL); } static PX_FORCE_INLINE void setLLBodies(Dy::Constraint& c, BodySim* b0, BodySim* b1) { PxsRigidBody* body0 = b0 ? &b0->getLowLevelBody() : NULL; PxsRigidBody* body1 = b1 ? &b1->getLowLevelBody() : NULL; c.body0 = body0; c.body1 = body1; c.bodyCore0 = body0 ? &body0->getCore() : NULL; c.bodyCore1 = body1 ? &body1->getCore() : NULL; } bool Sc::ConstraintSim::createLLConstraint() { ConstraintCore& core = getCore(); const PxU32 constantBlockSize = core.getConstantBlockSize(); void* constantBlock = mScene.allocateConstraintBlock(constantBlockSize); if(!constantBlock) return PxGetFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL, "Constraint: could not allocate low-level resources."); //Ensure the constant block isn't just random data because some functions may attempt to use it before it is //setup. Specifically pvd visualization of joints //-CN PxMemZero(constantBlock, constantBlockSize); Dy::Constraint& llc = mLowLevelConstraint; core.getBreakForce(llc.linBreakForce, llc.angBreakForce); llc.flags = core.getFlags(); llc.constantBlockSize = PxU16(constantBlockSize); llc.solverPrep = core.getSolverPrep(); llc.constantBlock = constantBlock; llc.minResponseThreshold = core.getMinResponseThreshold(); //llc.index = mLowLevelConstraint.index; setLLBodies(llc, mBodies[0], mBodies[1]); return true; } void Sc::ConstraintSim::destroyLLConstraint() { if(mLowLevelConstraint.constantBlock) mScene.deallocateConstraintBlock(mLowLevelConstraint.constantBlock, mLowLevelConstraint.constantBlockSize); } void Sc::ConstraintSim::setBodies(RigidCore* r0, RigidCore* r1) { PX_ASSERT(mInteraction); releaseInteraction(mInteraction, this, mScene); BodySim* b0 = (r0 && (r0->getActorCoreType() != PxActorType::eRIGID_STATIC)) ? static_cast<BodySim*>(r0->getSim()) : 0; BodySim* b1 = (r1 && (r1->getActorCoreType() != PxActorType::eRIGID_STATIC)) ? static_cast<BodySim*>(r1->getSim()) : 0; setLLBodies(mLowLevelConstraint, b0, b1); mBodies[0] = b0; mBodies[1] = b1; mInteraction = createInteraction(this, r0, r1, mScene); } void Sc::ConstraintSim::getForce(PxVec3& lin, PxVec3& ang) { const PxReal recipDt = mScene.getOneOverDt(); Dy::ConstraintWriteback& solverOutput= mScene.getDynamicsContext()->getConstraintWriteBackPool()[mLowLevelConstraint.index]; lin = solverOutput.linearImpulse * recipDt; ang = solverOutput.angularImpulse * recipDt; } void Sc::ConstraintSim::setBreakForceLL(PxReal linear, PxReal angular) { PxU8 wasBreakable = readFlag(eBREAKABLE); PxU8 isBreakable; if ((linear < PX_MAX_F32) || (angular < PX_MAX_F32)) isBreakable = eBREAKABLE; else isBreakable = 0; if (isBreakable != wasBreakable) { if (isBreakable) { PX_ASSERT(!readFlag(eCHECK_MAX_FORCE_EXCEEDED)); setFlag(eBREAKABLE); if (mInteraction->readInteractionFlag(InteractionFlag::eIS_ACTIVE)) mScene.addActiveBreakableConstraint(this, mInteraction); } else { if (readFlag(eCHECK_MAX_FORCE_EXCEEDED)) mScene.removeActiveBreakableConstraint(this); clearFlag(eBREAKABLE); } } mLowLevelConstraint.linBreakForce = linear; mLowLevelConstraint.angBreakForce = angular; } void Sc::ConstraintSim::postFlagChange(PxConstraintFlags /*oldFlags*/, PxConstraintFlags newFlags) { mLowLevelConstraint.flags = newFlags; }
NVIDIA-Omniverse/PhysX/physx/source/simulationcontroller/src/ScFEMClothSim.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #ifndef PX_PHYSICS_FEMCLOTH_SIM #define PX_PHYSICS_FEMCLOTH_SIM #include "foundation/PxPreprocessor.h" #if PX_SUPPORT_GPU_PHYSX #include "foundation/PxUserAllocated.h" #include "DyFEMCloth.h" #include "ScFEMClothCore.h" #include "ScFEMClothShapeSim.h" #include "ScActorSim.h" // to be deleted namespace physx { namespace Sc { class Scene; class FEMClothSim : public ActorSim { PX_NOCOPY(FEMClothSim) public: FEMClothSim(FEMClothCore& core, Scene& scene); ~FEMClothSim(); PX_INLINE Dy::FEMCloth* getLowLevelFEMCloth() const { return mLLFEMCloth; } PX_INLINE FEMClothCore& getCore() const { return static_cast<FEMClothCore&>(mCore); } virtual PxActor* getPxActor() const { return getCore().getPxActor(); } void updateBounds(); void updateBoundsInAABBMgr(); PxBounds3 getBounds() const; bool isSleeping() const; PX_FORCE_INLINE bool isActive() const { return !isSleeping(); } void setActive(bool active, bool asPartOfCreation=false); void onSetWakeCounter(); void attachShapeCore(ShapeCore* core); FEMClothShapeSim& getShapeSim() { return mShapeSim; } private: Dy::FEMCloth* mLLFEMCloth; FEMClothShapeSim mShapeSim; PxU32 mIslandNodeIndex; void activate(); void deactivate(); }; } // namespace Sc } #endif #endif
NVIDIA-Omniverse/PhysX/physx/source/simulationcontroller/src/ScParticleSystemShapeSim.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #include "foundation/PxPreprocessor.h" #if PX_SUPPORT_GPU_PHYSX #include "ScParticleSystemShapeSim.h" #include "ScNPhaseCore.h" #include "ScParticleSystemSim.h" #include "PxsContext.h" using namespace physx; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// Sc::ParticleSystemShapeSim::ParticleSystemShapeSim(ParticleSystemSim& particleSim, const ParticleSystemShapeCore* core) : ShapeSimBase(particleSim, core) { mLLShape.mBodySimIndex_GPU = PxNodeIndex(PX_INVALID_NODE); mLLShape.mElementIndex_GPU = PX_INVALID_U32; createLowLevelVolume(); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// Sc::ParticleSystemShapeSim::~ParticleSystemShapeSim() { if (isInBroadPhase()) destroyLowLevelVolume(); PX_ASSERT(!isInBroadPhase()); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void Sc::ParticleSystemShapeSim::getFilterInfo(PxFilterObjectAttributes& filterAttr, PxFilterData& filterData) const { filterAttr = 0; setFilterObjectAttributeType(filterAttr, PxFilterObjectType::ePARTICLESYSTEM); filterData = getBodySim().getCore().getShapeCore().getSimulationFilterData(); } void Sc::ParticleSystemShapeSim::updateBounds() { Scene& scene = getScene(); PxBounds3 worldBounds = PxBounds3(PxVec3(0.f), PxVec3(0.f)); const PxReal contactOffset = getBodySim().getCore().getContactOffset(); worldBounds.fattenSafe(contactOffset); // fatten for fast moving colliders scene.getBoundsArray().setBounds(worldBounds, getElementID()); scene.getAABBManager()->getChangedAABBMgActorHandleMap().growAndSet(getElementID()); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void Sc::ParticleSystemShapeSim::updateBoundsInAABBMgr() { //we are updating the bound in GPU so we just need to set the actor handle in CPU to make sure //the GPU BP will process the particles if (!(static_cast<Sc::ParticleSystemSim&>(getActor()).getCore().getFlags() & PxParticleFlag::eDISABLE_RIGID_COLLISION)) { Scene& scene = getScene(); scene.getAABBManager()->getChangedAABBMgActorHandleMap().growAndSet(getElementID()); scene.getAABBManager()->setGPUStateChanged(); } } PxBounds3 Sc::ParticleSystemShapeSim::getBounds() const { PxBounds3 bounds = getScene().getBoundsArray().getBounds(getElementID()); return bounds; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void Sc::ParticleSystemShapeSim::createLowLevelVolume() { //PX_ASSERT(getWorldBounds().isFinite()); const PxU32 index = getElementID(); if (!(static_cast<Sc::ParticleSystemSim&>(getActor()).getCore().getFlags() & PxParticleFlag::eDISABLE_RIGID_COLLISION)) { getScene().getBoundsArray().setBounds(PxBounds3(PxVec3(PX_MAX_BOUNDS_EXTENTS), PxVec3(-PX_MAX_BOUNDS_EXTENTS)), index); mInBroadPhase = true; } else getScene().getAABBManager()->reserveSpaceForBounds(index); { const PxU32 group = Bp::FilterGroup::eDYNAMICS_BASE + getActor().getActorID(); const PxU32 type = Bp::FilterType::PARTICLESYSTEM; const PxReal contactOffset = getBodySim().getCore().getContactOffset(); addToAABBMgr(contactOffset, Bp::FilterGroup::Enum((group << BP_FILTERING_TYPE_SHIFT_BIT) | type), Bp::ElementType::eSHAPE); } // PT: TODO: what's the difference between "getContactOffset()" and "getBodySim().getCore().getContactOffset()" above? getScene().updateContactDistance(index, getContactOffset()); PxsTransformCache& cache = getScene().getLowLevelContext()->getTransformCache(); cache.initEntry(index); PxTransform idt(PxIdentity); cache.setTransformCache(idt, 0, index); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void Sc::ParticleSystemShapeSim::destroyLowLevelVolume() { if (!isInBroadPhase()) return; Sc::Scene& scene = getScene(); PxsContactManagerOutputIterator outputs = scene.getLowLevelContext()->getNphaseImplementationContext()->getContactManagerOutputs(); scene.getNPhaseCore()->onVolumeRemoved(this, 0, outputs); removeFromAABBMgr(); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// Sc::ParticleSystemSim& Sc::ParticleSystemShapeSim::getBodySim() const { return static_cast<ParticleSystemSim&>(getActor()); } #endif //PX_SUPPORT_GPU_PHYSX
NVIDIA-Omniverse/PhysX/physx/source/simulationcontroller/src/ScParticleSystemShapeCore.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #ifndef SC_PARTICLESYSTEM_SHAPECORE_H #define SC_PARTICLESYSTEM_SHAPECORE_H #include "foundation/PxPreprocessor.h" #if PX_SUPPORT_GPU_PHYSX #include "foundation/PxUserAllocated.h" #include "PxvGeometry.h" #include "foundation/PxUtilities.h" #include "PxFiltering.h" #include "PxShape.h" #include "ScShapeCore.h" #include "DyParticleSystemCore.h" #include "common/PxRenderOutput.h" namespace physx { namespace Sc { class Scene; class ParticleSystemCore; class ParticleSystemSim; class ParticleSystemShapeCore : public Sc::ShapeCore { public: // PX_SERIALIZATION ParticleSystemShapeCore(const PxEMPTY); //~PX_SERIALIZATION ParticleSystemShapeCore(); ~ParticleSystemShapeCore(); PX_FORCE_INLINE const Dy::ParticleSystemCore& getLLCore() const { return mLLCore; } PX_FORCE_INLINE Dy::ParticleSystemCore& getLLCore() { return mLLCore; } void initializeLLCoreData( PxU32 maxNeighborhood); void addParticleBuffer(PxParticleBuffer* particleBuffer); void removeParticleBuffer(PxParticleBuffer* particleBuffer); PxU64 getGpuMemStat() { return mGpuMemStat; } protected: Dy::ParticleSystemCore mLLCore; PxU64 mGpuMemStat; }; } // namespace Sc } #endif #endif
NVIDIA-Omniverse/PhysX/physx/source/simulationcontroller/src/ScElementSimInteraction.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SC_ELEMENT_SIM_INTERACTION_H #define SC_ELEMENT_SIM_INTERACTION_H #include "ScInteraction.h" #include "ScElementSim.h" namespace physx { namespace Sc { class ElementSimInteraction : public Interaction { public: PX_FORCE_INLINE ElementSim& getElement0() const { return mElement0; } PX_FORCE_INLINE ElementSim& getElement1() const { return mElement1; } protected: PX_INLINE ElementSimInteraction(ElementSim& element0, ElementSim& element1, InteractionType::Enum type, PxU8 flags); ~ElementSimInteraction() {} ElementSimInteraction& operator=(const ElementSimInteraction&); ElementSim& mElement0; ElementSim& mElement1; PxU32 mFlags; // PT: moved there in padding bytes, from ShapeInteraction }; } // namespace Sc ////////////////////////////////////////////////////////////////////////// PX_INLINE Sc::ElementSimInteraction::ElementSimInteraction(ElementSim& element0, ElementSim& element1, InteractionType::Enum type, PxU8 flags) : Interaction (element0.getActor(), element1.getActor(), type, flags), mElement0 (element0), mElement1 (element1) { } } #endif
NVIDIA-Omniverse/PhysX/physx/source/simulationcontroller/src/ScConstraintSim.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SC_CONSTRAINT_SIM_H #define SC_CONSTRAINT_SIM_H #include "foundation/PxArray.h" #include "PxSimulationEventCallback.h" #include "DyConstraint.h" namespace physx { namespace Sc { class Scene; class ConstraintInteraction; class ConstraintCore; class RigidCore; class BodySim; class RigidSim; class ConstraintSim : public PxUserAllocated { PX_NOCOPY(ConstraintSim) public: enum Enum { eBREAKABLE = (1<<1), // The constraint can break eCHECK_MAX_FORCE_EXCEEDED = (1<<2), // This constraint will get tested for breakage at the end of the sim step eBROKEN = (1<<3) }; ConstraintSim(ConstraintCore& core, RigidCore* r0, RigidCore* r1, Scene& scene); ~ConstraintSim(); void setBodies(RigidCore* r0, RigidCore* r1); void setBreakForceLL(PxReal linear, PxReal angular); PX_FORCE_INLINE void setMinResponseThresholdLL(PxReal threshold) { mLowLevelConstraint.minResponseThreshold = threshold; } PX_FORCE_INLINE const void* getConstantsLL() const { return mLowLevelConstraint.constantBlock; } void postFlagChange(PxConstraintFlags oldFlags, PxConstraintFlags newFlags); PX_FORCE_INLINE const Dy::Constraint& getLowLevelConstraint() const { return mLowLevelConstraint; } PX_FORCE_INLINE Dy::Constraint& getLowLevelConstraint() { return mLowLevelConstraint; } PX_FORCE_INLINE ConstraintCore& getCore() const { return mCore; } PX_FORCE_INLINE BodySim* getBody(PxU32 i) const // for static actors or world attached constraints NULL is returned { return mBodies[i]; } void getForce(PxVec3& force, PxVec3& torque); PX_FORCE_INLINE PxU8 readFlag(PxU8 flag) const { return PxU8(mFlags & flag); } PX_FORCE_INLINE void setFlag(PxU8 flag) { mFlags |= flag; } PX_FORCE_INLINE void clearFlag(PxU8 flag) { mFlags &= ~flag; } PX_FORCE_INLINE PxU32 isBroken() const { return PxU32(mFlags) & ConstraintSim::eBROKEN; } PX_FORCE_INLINE const ConstraintInteraction* getInteraction() const { return mInteraction; } private: bool createLLConstraint(); void destroyLLConstraint(); Dy::Constraint mLowLevelConstraint; Scene& mScene; ConstraintCore& mCore; ConstraintInteraction* mInteraction; // PT: why do we have an interaction object here? BodySim* mBodies[2]; PxU8 mFlags; }; } // namespace Sc } #endif
NVIDIA-Omniverse/PhysX/physx/source/simulationcontroller/src/ScElementSim.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "ScElementSim.h" #include "ScElementSimInteraction.h" #include "ScSimStats.h" using namespace physx; using namespace Sc; static PX_FORCE_INLINE bool interactionHasElement(const Interaction* it, const ElementSim* elem) { if(it->readInteractionFlag(InteractionFlag::eRB_ELEMENT)) { PX_ASSERT( (it->getType() == InteractionType::eMARKER) || (it->getType() == InteractionType::eOVERLAP) || (it->getType() == InteractionType::eTRIGGER) ); const ElementSimInteraction* ei = static_cast<const ElementSimInteraction*>(it); if((&ei->getElement0() == elem) || (&ei->getElement1() == elem)) return true; } return false; } Sc::ElementSimInteraction* Sc::ElementSim::ElementInteractionIterator::getNext() { while(mInteractions!=mInteractionsLast) { Interaction* it = *mInteractions++; if(interactionHasElement(it, mElement)) return static_cast<ElementSimInteraction*>(it); } return NULL; } Sc::ElementSimInteraction* Sc::ElementSim::ElementInteractionReverseIterator::getNext() { while(mInteractions!=mInteractionsLast) { Interaction* it = *--mInteractionsLast; if(interactionHasElement(it, mElement)) return static_cast<ElementSimInteraction*>(it); } return NULL; } namespace { class ElemSimPtrTableStorageManager : public Cm::PtrTableStorageManager, public PxUserAllocated { PX_NOCOPY(ElemSimPtrTableStorageManager) public: ElemSimPtrTableStorageManager() {} ~ElemSimPtrTableStorageManager() {} // PtrTableStorageManager virtual void** allocate(PxU32 capacity) PX_OVERRIDE { return PX_ALLOCATE(void*, capacity, "CmPtrTable pointer array"); } virtual void deallocate(void** addr, PxU32 /*capacity*/) PX_OVERRIDE { PX_FREE(addr); } virtual bool canReuse(PxU32 /*originalCapacity*/, PxU32 /*newCapacity*/) PX_OVERRIDE { return false; } //~PtrTableStorageManager }; ElemSimPtrTableStorageManager gElemSimTableStorageManager; } static PX_FORCE_INLINE void onElementAttach(ElementSim& element, ShapeManager& manager) { PX_ASSERT(element.mShapeArrayIndex == 0xffffffff); element.mShapeArrayIndex = manager.mShapes.getCount(); manager.mShapes.add(&element, gElemSimTableStorageManager); } void Sc::ShapeManager::onElementDetach(ElementSim& element) { const PxU32 index = element.mShapeArrayIndex; PX_ASSERT(index != 0xffffffff); PX_ASSERT(mShapes.getCount()); void** ptrs = mShapes.getPtrs(); PX_ASSERT(reinterpret_cast<ElementSim*>(ptrs[index]) == &element); const PxU32 last = mShapes.getCount() - 1; if (index != last) { ElementSim* moved = reinterpret_cast<ElementSim*>(ptrs[last]); PX_ASSERT(moved->mShapeArrayIndex == last); moved->mShapeArrayIndex = index; } mShapes.replaceWithLast(index, gElemSimTableStorageManager); element.mShapeArrayIndex = 0xffffffff; } Sc::ElementSim::ElementSim(ActorSim& actor) : mActor (actor), mInBroadPhase (false), mShapeArrayIndex(0xffffffff) { initID(); onElementAttach(*this, actor); } Sc::ElementSim::~ElementSim() { PX_ASSERT(!mInBroadPhase); releaseID(); mActor.onElementDetach(*this); } void Sc::ElementSim::addToAABBMgr(PxReal contactDistance, Bp::FilterGroup::Enum group, Bp::ElementType::Enum type) { Sc::Scene& scene = getScene(); if(!scene.getAABBManager()->addBounds(mElementID, contactDistance, group, this, mActor.getActorCore().getAggregateID(), type)) return; mInBroadPhase = true; #if PX_ENABLE_SIM_STATS scene.getStatsInternal().incBroadphaseAdds(); #else PX_CATCH_UNDEFINED_ENABLE_SIM_STATS #endif } bool Sc::ElementSim::removeFromAABBMgr() { PX_ASSERT(mInBroadPhase); Sc::Scene& scene = getScene(); bool res = scene.getAABBManager()->removeBounds(mElementID); scene.getAABBManager()->getChangedAABBMgActorHandleMap().growAndReset(mElementID); mInBroadPhase = false; #if PX_ENABLE_SIM_STATS scene.getStatsInternal().incBroadphaseRemoves(); #else PX_CATCH_UNDEFINED_ENABLE_SIM_STATS #endif return res; }
NVIDIA-Omniverse/PhysX/physx/source/simulationcontroller/src/ScNPhaseCore.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "ScNPhaseCore.h" #include "ScShapeInteraction.h" #include "ScTriggerInteraction.h" #include "ScElementInteractionMarker.h" #include "ScConstraintInteraction.h" #include "ScSimStats.h" using namespace physx; using namespace Sc; /////////////////////////////////////////////////////////////////////////////// PX_IMPLEMENT_OUTPUT_ERROR /////////////////////////////////////////////////////////////////////////////// NPhaseCore::NPhaseCore(Scene& scene, const PxSceneDesc& sceneDesc) : mOwnerScene (scene), mContactReportActorPairSet ("contactReportPairSet"), mPersistentContactEventPairList ("persistentContactEventPairs"), mNextFramePersistentContactEventPairIndex (0), mForceThresholdContactEventPairList ("forceThresholdContactEventPairs"), mContactReportBuffer (sceneDesc.contactReportStreamBufferSize, (sceneDesc.flags & PxSceneFlag::eDISABLE_CONTACT_REPORT_BUFFER_RESIZE)), mActorPairPool ("actorPairPool"), mActorPairReportPool ("actorPairReportPool"), mShapeInteractionPool (PxAllocatorTraits<ShapeInteraction>::Type("shapeInteractionPool"), 4096), mTriggerInteractionPool ("triggerInteractionPool"), mActorPairContactReportDataPool ("actorPairContactReportPool"), mInteractionMarkerPool ("interactionMarkerPool"), mConcludeTriggerInteractionProcessingTask (scene.getContextId(), this, "ScNPhaseCore.concludeTriggerInteractionProcessing") { } NPhaseCore::~NPhaseCore() { // Clear pending actor pairs (waiting on contact report callback) clearContactReportActorPairs(false); } PxU32 NPhaseCore::getDefaultContactReportStreamBufferSize() const { return mContactReportBuffer.getDefaultBufferSize(); } ElementSimInteraction* NPhaseCore::findInteraction(const ElementSim* element0, const ElementSim* element1) { const PxHashMap<ElementSimKey, ElementSimInteraction*>::Entry* pair = mElementSimMap.find(ElementSimKey(element0->getElementID(), element1->getElementID())); return pair ? pair->second : NULL; } void NPhaseCore::registerInteraction(ElementSimInteraction* interaction) { mElementSimMap.insert(ElementSimKey(interaction->getElement0().getElementID(), interaction->getElement1().getElementID()), interaction); } void NPhaseCore::unregisterInteraction(ElementSimInteraction* interaction) { mElementSimMap.erase(ElementSimKey(interaction->getElement0().getElementID(), interaction->getElement1().getElementID())); } void NPhaseCore::onOverlapRemoved(ElementSim* volume0, ElementSim* volume1, PxU32 ccdPass, void* elemSim, PxsContactManagerOutputIterator& outputs) { ElementSim* elementHi = volume1; ElementSim* elementLo = volume0; // No actor internal interactions PX_ASSERT(&elementHi->getActor() != &elementLo->getActor()); // PT: TODO: get rid of 'findInteraction', cf US10491 ElementSimInteraction* interaction = elemSim ? reinterpret_cast<ElementSimInteraction*>(elemSim) : findInteraction(elementHi, elementLo); // MS: The check below is necessary since at the moment LowLevel broadphase still tracks // killed pairs and hence reports lost overlaps if(interaction) { PxU32 flags = PxU32(PairReleaseFlag::eWAKE_ON_LOST_TOUCH); PX_ASSERT(interaction->isElementInteraction()); releaseElementPair(static_cast<ElementSimInteraction*>(interaction), flags, NULL, ccdPass, true, outputs); } } // MS: TODO: optimize this for the actor release case? void NPhaseCore::onVolumeRemoved(ElementSim* volume, PxU32 flags, PxsContactManagerOutputIterator& outputs) { const PxU32 ccdPass = 0; flags |= PairReleaseFlag::eRUN_LOST_TOUCH_LOGIC; // Release interactions // IMPORTANT: Iterate from the back of the list to the front as we release interactions which // triggers a replace with last ElementSim::ElementInteractionReverseIterator iter = volume->getElemInteractionsReverse(); ElementSimInteraction* interaction = iter.getNext(); while(interaction) { PX_ASSERT( (interaction->getType() == InteractionType::eMARKER) || (interaction->getType() == InteractionType::eOVERLAP) || (interaction->getType() == InteractionType::eTRIGGER) ); releaseElementPair(interaction, flags, volume, ccdPass, true, outputs); interaction = iter.getNext(); } } ElementSimInteraction* NPhaseCore::createRbElementInteraction(const FilterInfo& finfo, ShapeSimBase& s0, ShapeSimBase& s1, PxsContactManager* contactManager, ShapeInteraction* shapeInteraction, ElementInteractionMarker* interactionMarker, bool isTriggerPair) { ElementSimInteraction* pair = NULL; if((finfo.filterFlags & PxFilterFlag::eSUPPRESS) == false) { if(!isTriggerPair) { PX_ASSERT(contactManager); PX_ASSERT(shapeInteraction); pair = createShapeInteraction(s0, s1, finfo.pairFlags, contactManager, shapeInteraction); } else { pair = createTriggerInteraction(s0, s1, finfo.pairFlags); } } else pair = createElementInteractionMarker(s0, s1, interactionMarker); if(finfo.hasPairID) { // Mark the pair as a filter callback pair pair->raiseInteractionFlag(InteractionFlag::eIS_FILTER_PAIR); } return pair; } void NPhaseCore::managerNewTouch(ShapeInteraction& interaction) { //(1) if the pair hasn't already been assigned, look it up! ActorPair* actorPair = interaction.getActorPair(); if(!actorPair) { ShapeSim& s0 = static_cast<ShapeSim&>(interaction.getElement0()); ShapeSim& s1 = static_cast<ShapeSim&>(interaction.getElement1()); actorPair = findActorPair(&s0, &s1, interaction.isReportPair()); actorPair->incRefCount(); //It's being referenced by a new pair... interaction.setActorPair(*actorPair); } } static bool shouldSwapBodies(const ShapeSimBase& s0, const ShapeSimBase& s1) { /* This tries to ensure that if one of the bodies is static or kinematic, it will be body B There is a further optimization to force all pairs that share the same bodies to have the same body ordering. This reduces the number of required partitions in the parallel solver. Sorting rules are: If bodyA is static, swap If bodyA is rigidDynamic and bodyB is articulation, swap If bodyA is in an earlier BP group than bodyB, swap */ // PT: some of these swaps are here to fulfill requirements from the solver code, and we // will get asserts and failures without them. Some others are only optimizations. // PT: generally speaking we want the "static" actor to be second in the pair. // "Static" can mean either: // - a proper static body // - a kinematic dynamic body // - an articulation link with a fixed base ActorSim& rs0 = s0.getActor(); const PxActorType::Enum actorType0 = rs0.getActorType(); if(actorType0 == PxActorType::eRIGID_STATIC) return true; ActorSim& rs1 = s1.getActor(); const PxActorType::Enum actorType1 = rs1.getActorType(); const bool isDyna0 = actorType0 == PxActorType::eRIGID_DYNAMIC; const bool isDyna1 = actorType1 == PxActorType::eRIGID_DYNAMIC; if(actorType0 == PxActorType::eARTICULATION_LINK) { if(isDyna1 || actorType1 == PxActorType::eARTICULATION_LINK) { if(static_cast<BodySim&>(rs0).getLowLevelBody().mCore->fixedBaseLink) return true; } } else if(isDyna0) { // PT: this tries to implement this requirement: "If bodyA is rigidDynamic and bodyB is articulation, swap" // But we do NOT do that if bodyB has a fixed base. It is unclear whether this particular swap is really needed. if(actorType1 == PxActorType::eARTICULATION_LINK) { if(!static_cast<BodySim&>(rs1).getLowLevelBody().mCore->fixedBaseLink) return true; } } // PT: initial code was: // if((actorType0 == PxActorType::eRIGID_DYNAMIC && actorType1 == PxActorType::eRIGID_DYNAMIC) && actorAKinematic) // But actorAKinematic true implies isDyna0 true, so this is equivalent to // if(isDyna1 && actorAKinematic) // And we only need actorAKinematic in this expression so it's faster to move its computation inside the if: // if(isDyna1 && isDyna0 && static_cast<BodySim&>(rs0).isKinematic()) if(isDyna1 && isDyna0 && static_cast<BodySim&>(rs0).isKinematic()) return true; // PT: initial code was: // if(actorType0 == actorType1 && rs0.getActorID() < rs1.getActorID() && !actorBKinematic) // We refactor the code a bit to avoid computing actorBKinematic. We could also test actorBKinematic // first and avoid reading actor IDs. Unclear what's best, arbitrary choice for now. if((actorType0 == actorType1) && (rs0.getActorID() < rs1.getActorID())) { const bool actorBKinematic = isDyna1 && static_cast<BodySim&>(rs1).isKinematic(); if(!actorBKinematic) return true; } #if PX_SUPPORT_GPU_PHYSX // PT: using rs0.isParticleSystem() instead of isParticleSystem(actorType0) is faster. if(actorType1 != PxActorType::eRIGID_STATIC && rs0.isParticleSystem()) return true; #endif return false; } ShapeInteraction* NPhaseCore::createShapeInteraction(ShapeSimBase& s0, ShapeSimBase& s1, PxPairFlags pairFlags, PxsContactManager* contactManager, ShapeInteraction* shapeInteraction) { ShapeSimBase* _s0 = &s0; ShapeSimBase* _s1 = &s1; if(shouldSwapBodies(s0, s1)) PxSwap(_s0, _s1); ShapeInteraction* si = shapeInteraction ? shapeInteraction : mShapeInteractionPool.allocate(); PX_PLACEMENT_NEW(si, ShapeInteraction)(*_s0, *_s1, pairFlags, contactManager); PX_ASSERT(si->mReportPairIndex == INVALID_REPORT_PAIR_ID); return si; } TriggerInteraction* NPhaseCore::createTriggerInteraction(ShapeSimBase& s0, ShapeSimBase& s1, PxPairFlags triggerFlags) { ShapeSimBase* triggerShape; ShapeSimBase* otherShape; if(s1.getFlags() & PxShapeFlag::eTRIGGER_SHAPE) { triggerShape = &s1; otherShape = &s0; } else { triggerShape = &s0; otherShape = &s1; } TriggerInteraction* pair = mTriggerInteractionPool.construct(*triggerShape, *otherShape); pair->setTriggerFlags(triggerFlags); return pair; } ElementInteractionMarker* NPhaseCore::createElementInteractionMarker(ElementSim& e0, ElementSim& e1, ElementInteractionMarker* interactionMarker) { ElementInteractionMarker* pair = interactionMarker ? interactionMarker : mInteractionMarkerPool.allocate(); PX_PLACEMENT_NEW(pair, ElementInteractionMarker)(e0, e1, interactionMarker != NULL); return pair; } ActorPair* NPhaseCore::findActorPair(ShapeSimBase* s0, ShapeSimBase* s1, PxIntBool isReportPair) { PX_ASSERT(!(s0->getFlags() & PxShapeFlag::eTRIGGER_SHAPE) && !(s1->getFlags() & PxShapeFlag::eTRIGGER_SHAPE)); ActorSim* aLess = &s0->getActor(); ActorSim* aMore = &s1->getActor(); if(aLess->getActorID() > aMore->getActorID()) PxSwap(aLess, aMore); const BodyPairKey key(aLess->getActorID(), aMore->getActorID()); ActorPair*& actorPair = mActorPairMap[key]; if(actorPair == NULL) { if(!isReportPair) actorPair = mActorPairPool.construct(); else actorPair = mActorPairReportPool.construct(s0->getActor(), s1->getActor()); } if(!isReportPair || actorPair->isReportPair()) return actorPair; else { PxU32 size = aLess->getActorInteractionCount(); Interaction** interactions = aLess->getActorInteractions(); ActorPairReport* actorPairReport = mActorPairReportPool.construct(s0->getActor(), s1->getActor()); actorPairReport->convert(*actorPair); while(size--) { Interaction* interaction = *interactions++; if((&interaction->getActorSim0() == aMore) || (&interaction->getActorSim1() == aMore)) { PX_ASSERT(((&interaction->getActorSim0() == aLess) || (&interaction->getActorSim1() == aLess))); if(interaction->getType() == InteractionType::eOVERLAP) { ShapeInteraction* si = static_cast<ShapeInteraction*>(interaction); if(si->getActorPair() != NULL) si->setActorPair(*actorPairReport); } } } PX_ASSERT(!actorPair->isReportPair()); mActorPairPool.destroy(actorPair); actorPair = actorPairReport; } return actorPair; } PX_FORCE_INLINE void NPhaseCore::destroyActorPairReport(ActorPairReport& aPair) { PX_ASSERT(aPair.isReportPair()); aPair.releaseContactReportData(*this); mActorPairReportPool.destroy(&aPair); } ElementSimInteraction* NPhaseCore::convert(ElementSimInteraction* pair, InteractionType::Enum newType, FilterInfo& filterInfo, bool removeFromDirtyList, PxsContactManagerOutputIterator& outputs) { PX_ASSERT(newType != pair->getType()); ElementSim& elementA = pair->getElement0(); ElementSim& elementB = pair->getElement1(); // Wake up the actors of the pair if((pair->getActorSim0().getActorType() == PxActorType::eRIGID_DYNAMIC) && !(static_cast<BodySim&>(pair->getActorSim0()).isActive())) pair->getActorSim0().internalWakeUp(); if((pair->getActorSim1().getActorType() == PxActorType::eRIGID_DYNAMIC) && !(static_cast<BodySim&>(pair->getActorSim1()).isActive())) pair->getActorSim1().internalWakeUp(); // Since the FilterPair struct might have been re-used in the newly created interaction, we need to clear // the filter pair marker of the old interaction to avoid that the FilterPair gets deleted by the releaseElementPair() // call that follows. pair->clearInteractionFlag(InteractionFlag::eIS_FILTER_PAIR); // PT: we need to unregister the old interaction *before* creating the new one, because Sc::NPhaseCore::registerInteraction will use // ElementSim pointers which are the same for both. Since "releaseElementPair" will call the unregister function from // the element's dtor, we don't need to do it explicitly here. Just release the object. releaseElementPair(pair, PairReleaseFlag::eWAKE_ON_LOST_TOUCH | PairReleaseFlag::eRUN_LOST_TOUCH_LOGIC, NULL, 0, removeFromDirtyList, outputs); ElementSimInteraction* result = NULL; switch(newType) { case InteractionType::eINVALID: // This means the pair should get killed break; case InteractionType::eMARKER: { result = createElementInteractionMarker(elementA, elementB, NULL); break; } case InteractionType::eOVERLAP: { result = createShapeInteraction(static_cast<ShapeSim&>(elementA), static_cast<ShapeSim&>(elementB), filterInfo.pairFlags, NULL, NULL); break; } case InteractionType::eTRIGGER: { result = createTriggerInteraction(static_cast<ShapeSim&>(elementA), static_cast<ShapeSim&>(elementB), filterInfo.pairFlags); break; } case InteractionType::eCONSTRAINTSHADER: case InteractionType::eARTICULATION: case InteractionType::eTRACKED_IN_SCENE_COUNT: PX_ASSERT(0); break; }; if(filterInfo.hasPairID) { PX_ASSERT(result); // If a filter callback pair is going to get killed, then the FilterPair struct should already have // been deleted. // Mark the new interaction as a filter callback pair result->raiseInteractionFlag(InteractionFlag::eIS_FILTER_PAIR); } return result; } namespace physx { namespace Sc { static bool findTriggerContacts(TriggerInteraction* tri, bool toBeDeleted, bool volumeRemoved, PxTriggerPair& triggerPair, TriggerPairExtraData& triggerPairExtra, SimStats::TriggerPairCountsNonVolatile& triggerPairStats, const PxsTransformCache& transformCache) { ShapeSimBase& s0 = tri->getTriggerShape(); ShapeSimBase& s1 = tri->getOtherShape(); const PxPairFlags pairFlags = tri->getTriggerFlags(); PxPairFlags pairEvent; bool overlap; PxU8 testForRemovedShapes = 0; if(toBeDeleted) { // The trigger interaction is to lie down in its tomb, hence we know that the overlap is gone. // What remains is to check whether the interaction was deleted because of a shape removal in // which case we need to later check for removed shapes. overlap = false; if(volumeRemoved) { // Note: only the first removed volume can be detected when the trigger interaction is deleted but at a later point the second volume might get removed too. testForRemovedShapes = TriggerPairFlag::eTEST_FOR_REMOVED_SHAPES; } } else { #if PX_ENABLE_SIM_STATS PX_ASSERT(s0.getGeometryType() < PxGeometryType::eCONVEXMESH+1); // The first has to be the trigger shape triggerPairStats[s0.getGeometryType()][s1.getGeometryType()]++; #else PX_UNUSED(triggerPairStats); PX_CATCH_UNDEFINED_ENABLE_SIM_STATS #endif ShapeSimBase* primitive0 = &s0; ShapeSimBase* primitive1 = &s1; PX_ASSERT(primitive0->getFlags() & PxShapeFlag::eTRIGGER_SHAPE || primitive1->getFlags() & PxShapeFlag::eTRIGGER_SHAPE); // Reorder them if needed if(primitive0->getGeometryType() > primitive1->getGeometryType()) PxSwap(primitive0, primitive1); const Gu::GeomOverlapFunc overlapFunc = Gu::getOverlapFuncTable()[primitive0->getGeometryType()][primitive1->getGeometryType()]; const PxU32 elementID0 = primitive0->getElementID(); const PxU32 elementID1 = primitive1->getElementID(); const PxTransform& globalPose0 = transformCache.getTransformCache(elementID0).transform; const PxTransform& globalPose1 = transformCache.getTransformCache(elementID1).transform; PX_ASSERT(overlapFunc); overlap = overlapFunc( primitive0->getCore().getGeometry(), globalPose0, primitive1->getCore().getGeometry(), globalPose1, &tri->getTriggerCache(), UNUSED_OVERLAP_THREAD_CONTEXT); } const bool hadOverlap = tri->lastFrameHadContacts(); if(hadOverlap) { if(!overlap) pairEvent = PxPairFlag::eNOTIFY_TOUCH_LOST; } else { if(overlap) pairEvent = PxPairFlag::eNOTIFY_TOUCH_FOUND; } tri->updateLastFrameHadContacts(overlap); const PxPairFlags triggeredFlags = pairEvent & pairFlags; if(triggeredFlags) { triggerPair.triggerShape = s0.getPxShape(); triggerPair.otherShape = s1.getPxShape(); triggerPair.status = PxPairFlag::Enum(PxU32(pairEvent)); triggerPair.flags = PxTriggerPairFlags(testForRemovedShapes); const ActorCore& actorCore0 = s0.getActor().getActorCore(); const ActorCore& actorCore1 = s1.getActor().getActorCore(); #if PX_SUPPORT_GPU_PHYSX if (actorCore0.getActorCoreType() == PxActorType::eSOFTBODY) triggerPair.triggerActor = static_cast<const SoftBodyCore&>(actorCore0).getPxActor(); else #endif triggerPair.triggerActor = static_cast<const RigidCore&>(actorCore0).getPxActor(); #if PX_SUPPORT_GPU_PHYSX if (actorCore0.getActorCoreType() == PxActorType::eSOFTBODY) triggerPair.otherActor = static_cast<const SoftBodyCore&>(actorCore1).getPxActor(); else #endif triggerPair.otherActor = static_cast<const RigidCore&>(actorCore1).getPxActor(); triggerPairExtra = TriggerPairExtraData(s0.getElementID(), s1.getElementID(), actorCore0.getOwnerClient(), actorCore1.getOwnerClient()); return true; } return false; } class TriggerContactTask : public Cm::Task { PX_NOCOPY(TriggerContactTask) public: TriggerContactTask(TriggerInteraction* const* triggerPairs, PxU32 triggerPairCount, PxMutex& lock, Scene& scene, PxsTransformCache& transformCache) : Cm::Task (scene.getContextId()), mTriggerPairs (triggerPairs), mTriggerPairCount (triggerPairCount), mLock (lock), mScene (scene), mTransformCache (transformCache) { } virtual void runInternal() { SimStats::TriggerPairCountsNonVolatile triggerPairStats; #if PX_ENABLE_SIM_STATS PxMemZero(&triggerPairStats, sizeof(SimStats::TriggerPairCountsNonVolatile)); #else PX_CATCH_UNDEFINED_ENABLE_SIM_STATS #endif PxTriggerPair triggerPair[sTriggerPairsPerTask]; TriggerPairExtraData triggerPairExtra[sTriggerPairsPerTask]; PxU32 triggerReportItemCount = 0; for(PxU32 i=0; i < mTriggerPairCount; i++) { TriggerInteraction* tri = mTriggerPairs[i]; PX_ASSERT(tri->readInteractionFlag(InteractionFlag::eIS_ACTIVE)); if (findTriggerContacts(tri, false, false, triggerPair[triggerReportItemCount], triggerPairExtra[triggerReportItemCount], triggerPairStats, mTransformCache)) { triggerReportItemCount++; } } if(triggerReportItemCount) { PxTriggerPair* triggerPairBuffer; TriggerPairExtraData* triggerPairExtraBuffer; { PxMutex::ScopedLock lock(mLock); mScene.reserveTriggerReportBufferSpace(triggerReportItemCount, triggerPairBuffer, triggerPairExtraBuffer); PxMemCopy(triggerPairBuffer, triggerPair, sizeof(PxTriggerPair) * triggerReportItemCount); PxMemCopy(triggerPairExtraBuffer, triggerPairExtra, sizeof(TriggerPairExtraData) * triggerReportItemCount); } } #if PX_ENABLE_SIM_STATS SimStats& simStats = mScene.getStatsInternal(); for(PxU32 i=0; i < PxGeometryType::eCONVEXMESH+1; i++) { for(PxU32 j=0; j < PxGeometryType::eGEOMETRY_COUNT; j++) { if(triggerPairStats[i][j] != 0) PxAtomicAdd(&simStats.numTriggerPairs[i][j], triggerPairStats[i][j]); } } #else PX_CATCH_UNDEFINED_ENABLE_SIM_STATS #endif } virtual const char* getName() const { return "ScNPhaseCore.triggerInteractionWork"; } public: static const PxU32 sTriggerPairsPerTask = 64; private: TriggerInteraction* const* mTriggerPairs; const PxU32 mTriggerPairCount; PxMutex& mLock; Scene& mScene; PxsTransformCache& mTransformCache; }; } // namespace Sc } // namespace physx bool TriggerProcessingContext::initialize(TriggerInteraction** interactions, PxU32 pairCount, PxcScratchAllocator& allocator) { PX_ASSERT(!mTmpTriggerProcessingBlock); PX_ASSERT(mTmpTriggerPairCount == 0); PX_ASSERT(pairCount > 0); const PxU32 taskCountWithoutRemainder = pairCount / TriggerContactTask::sTriggerPairsPerTask; const PxU32 maxTaskCount = taskCountWithoutRemainder + 1; const PxU32 pairPtrSize = pairCount * sizeof(TriggerInteraction*); const PxU32 memBlockSize = pairPtrSize + (maxTaskCount * sizeof(TriggerContactTask)); PxU8* triggerProcessingBlock = reinterpret_cast<PxU8*>(allocator.alloc(memBlockSize, true)); if (triggerProcessingBlock) { PxMemCopy(triggerProcessingBlock, interactions, pairPtrSize); // needs to get copied because other tasks may change the source list // while trigger overlap tests run mTmpTriggerProcessingBlock = triggerProcessingBlock; // note: gets released in deinitialize mTmpTriggerPairCount = pairCount; return true; } else { outputError<PxErrorCode::eOUT_OF_MEMORY>(__LINE__, "Temporary memory for trigger pair processing could not be allocated. Trigger overlap tests will not take place."); } return false; } void TriggerProcessingContext::deinitialize(PxcScratchAllocator& allocator) { PX_ASSERT(mTmpTriggerProcessingBlock); PX_ASSERT(mTmpTriggerPairCount > 0); allocator.free(mTmpTriggerProcessingBlock); mTmpTriggerProcessingBlock = NULL; mTmpTriggerPairCount = 0; } PxBaseTask* NPhaseCore::prepareForTriggerInteractionProcessing(PxBaseTask* continuation) { // Triggers TriggerInteraction** triggerInteractions = reinterpret_cast<TriggerInteraction**>(mOwnerScene.getActiveInteractions(InteractionType::eTRIGGER)); const PxU32 pairCount = mOwnerScene.getNbActiveInteractions(InteractionType::eTRIGGER); if (pairCount > 0) { if (mTriggerProcessingContext.initialize(triggerInteractions, pairCount, mOwnerScene.getLowLevelContext()->getScratchAllocator())) { mConcludeTriggerInteractionProcessingTask.setContinuation(continuation); return &mConcludeTriggerInteractionProcessingTask; } } return NULL; } void NPhaseCore::processTriggerInteractions(PxBaseTask& continuation) { TriggerInteraction* const* triggerInteractions = mTriggerProcessingContext.getTriggerInteractions(); const PxU32 pairCount = mTriggerProcessingContext.getTriggerInteractionCount(); TriggerContactTask* triggerContactTaskBuffer = mTriggerProcessingContext.getTriggerContactTasks(); PxMutex& triggerWriteBackLock = mTriggerProcessingContext.getTriggerWriteBackLock(); PX_ASSERT(triggerInteractions); PX_ASSERT(pairCount > 0); PX_ASSERT(triggerContactTaskBuffer); // PT: TASK-CREATION TAG const bool hasMultipleThreads = mOwnerScene.getTaskManager().getCpuDispatcher()->getWorkerCount() > 1; const bool moreThanOneBatch = pairCount > TriggerContactTask::sTriggerPairsPerTask; const bool scheduleTasks = hasMultipleThreads && moreThanOneBatch; // when running on a single thread, the task system seems to cause the main overhead (locking and atomic operations // seemed less of an issue). Hence, the tasks get run directly in that case. Same if there is only one batch. PxsTransformCache& transformCache = mOwnerScene.getLowLevelContext()->getTransformCache(); PxU32 remainder = pairCount; while(remainder) { const PxU32 nb = remainder > TriggerContactTask::sTriggerPairsPerTask ? TriggerContactTask::sTriggerPairsPerTask : remainder; remainder -= nb; TriggerContactTask* task = triggerContactTaskBuffer; task = PX_PLACEMENT_NEW(task, TriggerContactTask( triggerInteractions, nb, triggerWriteBackLock, mOwnerScene, transformCache)); if(scheduleTasks) { task->setContinuation(&continuation); task->removeReference(); } else task->runInternal(); triggerContactTaskBuffer++; triggerInteractions += nb; } } void NPhaseCore::concludeTriggerInteractionProcessing(PxBaseTask*) { // check if active trigger pairs can be deactivated (until woken up again) TriggerInteraction* const* triggerInteractions = mTriggerProcessingContext.getTriggerInteractions(); const PxU32 pairCount = mTriggerProcessingContext.getTriggerInteractionCount(); PX_ASSERT(triggerInteractions); PX_ASSERT(pairCount > 0); for (PxU32 i = 0; i < pairCount; i++) { TriggerInteraction* tri = triggerInteractions[i]; PX_ASSERT(tri->readInteractionFlag(InteractionFlag::eIS_ACTIVE)); if (!(tri->readFlag(TriggerInteraction::PROCESS_THIS_FRAME))) { // active trigger pairs for which overlap tests were not forced should remain in the active list // to catch transitions between overlap and no overlap continue; } else { tri->clearFlag(TriggerInteraction::PROCESS_THIS_FRAME); // explicitly scheduled overlap test is done (after object creation, teleport, ...). Check if trigger pair should remain active or not. if (!tri->onActivate(NULL)) { PX_ASSERT(tri->readInteractionFlag(InteractionFlag::eIS_ACTIVE)); // Why is the assert enough? // Once an explicit overlap test is scheduled, the interaction can not get deactivated anymore until it got processed. tri->clearInteractionFlag(InteractionFlag::eIS_ACTIVE); mOwnerScene.notifyInteractionDeactivated(tri); } } } mTriggerProcessingContext.deinitialize(mOwnerScene.getLowLevelContext()->getScratchAllocator()); } #ifdef REMOVED class ProcessPersistentContactTask : public Cm::Task { Sc::NPhaseCore& mCore; ContactReportBuffer& mBuffer; PxMutex& mMutex; ShapeInteraction*const* mPersistentEventPairs; PxU32 mNbPersistentEventPairs; PxsContactManagerOutputIterator mOutputs; PX_NOCOPY(ProcessPersistentContactTask) public: ProcessPersistentContactTask(Sc::NPhaseCore& core, ContactReportBuffer& buffer, PxMutex& mutex, ShapeInteraction*const* persistentEventPairs, PxU32 nbPersistentEventPairs, PxsContactManagerOutputIterator& outputs) : Cm::Task(0), mCore(core), mBuffer(buffer), mMutex(mutex), mPersistentEventPairs(persistentEventPairs), mNbPersistentEventPairs(nbPersistentEventPairs), mOutputs(outputs) { } virtual void runInternal() { PX_PROFILE_ZONE("ProcessPersistentContactTask", mCore.getScene().getContextId()); PxU32 size = mNbPersistentEventPairs; ShapeInteraction*const* persistentEventPairs = mPersistentEventPairs; while (size--) { ShapeInteraction* pair = *persistentEventPairs++; if (size) { if (size > 1) { if (size > 2) { ShapeInteraction* nextPair = *(persistentEventPairs + 2); prefetchLine(nextPair); } ShapeInteraction* nextPair = *(persistentEventPairs + 1); ActorPair* aPair = nextPair->getActorPair(); prefetchLine(aPair); prefetchLine(&nextPair->getShape0()); prefetchLine(&nextPair->getShape1()); } ShapeInteraction* nextPair = *(persistentEventPairs); prefetchLine(&nextPair->getShape0().getActor()); prefetchLine(&nextPair->getShape1().getActor()); } PX_ASSERT(pair->hasTouch()); PX_ASSERT(pair->isReportPair()); const PxU32 pairFlags = pair->getPairFlags(); if ((pairFlags & PxU32(PxPairFlag::eNOTIFY_TOUCH_PERSISTS | PxPairFlag::eDETECT_DISCRETE_CONTACT)) == PxU32(PxPairFlag::eNOTIFY_TOUCH_PERSISTS | PxPairFlag::eDETECT_DISCRETE_CONTACT)) { // do not process the pair if only eDETECT_CCD_CONTACT is enabled because at this point CCD did not run yet. Plus the current CCD implementation can not reliably provide eNOTIFY_TOUCH_PERSISTS events // for performance reasons. //KS - filter based on edge activity! const ActorSim& bodySim0 = pair->getShape0().getActor(); const ActorSim& bodySim1 = pair->getShape1().getActor(); if (bodySim0.isActive() || (!bodySim1.isStaticRigid() && bodySim1.isActive())) pair->processUserNotificationAsync(PxPairFlag::eNOTIFY_TOUCH_PERSISTS, 0, false, 0, false, mOutputs/*, &alloc*/); } } } virtual const char* getName() const { return "ScNPhaseCore.ProcessPersistentContactTask"; } }; #endif void NPhaseCore::processPersistentContactEvents(PxsContactManagerOutputIterator& outputs) { PX_PROFILE_ZONE("Sc::NPhaseCore::processPersistentContactEvents", mOwnerScene.getContextId()); // Go through ShapeInteractions which requested persistent contact event reports. This is necessary since there are no low level events for persistent contact. ShapeInteraction*const* persistentEventPairs = getCurrentPersistentContactEventPairs(); PxU32 size = getCurrentPersistentContactEventPairCount(); while (size--) { ShapeInteraction* pair = *persistentEventPairs++; if (size) { ShapeInteraction* nextPair = *persistentEventPairs; PxPrefetchLine(nextPair); } ActorPair* aPair = pair->getActorPair(); PxPrefetchLine(aPair); PX_ASSERT(pair->hasTouch()); PX_ASSERT(pair->isReportPair()); const PxU32 pairFlags = pair->getPairFlags(); if ((pairFlags & PxU32(PxPairFlag::eNOTIFY_TOUCH_PERSISTS | PxPairFlag::eDETECT_DISCRETE_CONTACT)) == PxU32(PxPairFlag::eNOTIFY_TOUCH_PERSISTS | PxPairFlag::eDETECT_DISCRETE_CONTACT)) { // do not process the pair if only eDETECT_CCD_CONTACT is enabled because at this point CCD did not run yet. Plus the current CCD implementation can not reliably provide eNOTIFY_TOUCH_PERSISTS events // for performance reasons. //KS - filter based on edge activity! const ActorSim& actorSim0= pair->getShape0().getActor(); const ActorSim& actorSim1 = pair->getShape1().getActor(); if (actorSim0.isActive() || ((!actorSim1.isStaticRigid()) && actorSim1.isActive())) pair->processUserNotification(PxPairFlag::eNOTIFY_TOUCH_PERSISTS, 0, false, 0, false, outputs); } } } void NPhaseCore::addToDirtyInteractionList(Interaction* pair) { mDirtyInteractions.insert(pair); } void NPhaseCore::removeFromDirtyInteractionList(Interaction* pair) { PX_ASSERT(mDirtyInteractions.contains(pair)); mDirtyInteractions.erase(pair); } void NPhaseCore::updateDirtyInteractions(PxsContactManagerOutputIterator& outputs) { // The sleeping SIs will be updated on activation // clow: Sleeping SIs are not awaken for visualization updates const bool dirtyDominance = mOwnerScene.readInternalFlag(SceneInternalFlag::eSCENE_SIP_STATES_DIRTY_DOMINANCE); const bool dirtyVisualization = mOwnerScene.readInternalFlag(SceneInternalFlag::eSCENE_SIP_STATES_DIRTY_VISUALIZATION); if(dirtyDominance || dirtyVisualization) { // Update all interactions. const PxU8 mask = PxTo8((dirtyDominance ? InteractionDirtyFlag::eDOMINANCE : 0) | (dirtyVisualization ? InteractionDirtyFlag::eVISUALIZATION : 0)); ElementSimInteraction** it = mOwnerScene.getInteractions(InteractionType::eOVERLAP); PxU32 size = mOwnerScene.getNbInteractions(InteractionType::eOVERLAP); while(size--) { ElementSimInteraction* pair = *it++; PX_ASSERT(pair->getType() == InteractionType::eOVERLAP); if(!pair->readInteractionFlag(InteractionFlag::eIN_DIRTY_LIST)) { PX_ASSERT(!pair->getDirtyFlags()); static_cast<ShapeInteraction*>(pair)->updateState(mask); } else pair->setDirty(mask); // the pair will get processed further below anyway, so just mark the flags dirty } } // Update all interactions in the dirty list const PxU32 dirtyItcCount = mDirtyInteractions.size(); Interaction* const* dirtyInteractions = mDirtyInteractions.getEntries(); for(PxU32 i = 0; i < dirtyItcCount; i++) { Interaction* refInt = dirtyInteractions[i]; Interaction* interaction = refInt; if(interaction->isElementInteraction() && interaction->needsRefiltering()) { ElementSimInteraction* pair = static_cast<ElementSimInteraction*>(interaction); refInt = refilterInteraction(pair, NULL, false, outputs); } if(interaction == refInt) // Refiltering might convert the pair to another type and kill the old one. In that case we don't want to update the new pair since it has been updated on creation. { const InteractionType::Enum iType = interaction->getType(); if (iType == InteractionType::eOVERLAP) static_cast<ShapeInteraction*>(interaction)->updateState(0); else if (iType == InteractionType::eCONSTRAINTSHADER) static_cast<ConstraintInteraction*>(interaction)->updateState(); interaction->setClean(false); // false because the dirty interactions list gets cleard further below } } mDirtyInteractions.clear(); } void NPhaseCore::releaseElementPair(ElementSimInteraction* pair, PxU32 flags, ElementSim* removedElement, PxU32 ccdPass, bool removeFromDirtyList, PxsContactManagerOutputIterator& outputs) { pair->setClean(removeFromDirtyList); // Removes the pair from the dirty interaction list etc. if(pair->readInteractionFlag(InteractionFlag::eIS_FILTER_PAIR)) { // Check if this is a filter callback pair ShapeSimBase& s0 = static_cast<ShapeSimBase&>(pair->getElement0()); ShapeSimBase& s1 = static_cast<ShapeSimBase&>(pair->getElement1()); callPairLost(s0, s1, removedElement != NULL); } switch(pair->getType()) { case InteractionType::eTRIGGER: { PxsTransformCache& transformCache = mOwnerScene.getLowLevelContext()->getTransformCache(); TriggerInteraction* tri = static_cast<TriggerInteraction*>(pair); PxTriggerPair triggerPair; TriggerPairExtraData triggerPairExtra; if (findTriggerContacts(tri, true, (removedElement != NULL), triggerPair, triggerPairExtra, const_cast<SimStats::TriggerPairCountsNonVolatile&>(mOwnerScene.getStatsInternal().numTriggerPairs), transformCache)) // cast away volatile-ness (this is fine since the method does not run in parallel) { mOwnerScene.getTriggerBufferAPI().pushBack(triggerPair); mOwnerScene.getTriggerBufferExtraData().pushBack(triggerPairExtra); } mTriggerInteractionPool.destroy(tri); } break; case InteractionType::eMARKER: { ElementInteractionMarker* interactionMarker = static_cast<ElementInteractionMarker*>(pair); mInteractionMarkerPool.destroy(interactionMarker); } break; case InteractionType::eOVERLAP: { ShapeInteraction* si = static_cast<ShapeInteraction*>(pair); if(flags & PairReleaseFlag::eRUN_LOST_TOUCH_LOGIC) lostTouchReports(si, flags, removedElement, ccdPass, outputs); mShapeInteractionPool.destroy(si); } break; case InteractionType::eCONSTRAINTSHADER: case InteractionType::eARTICULATION: case InteractionType::eTRACKED_IN_SCENE_COUNT: case InteractionType::eINVALID: PX_ASSERT(0); return; } } void NPhaseCore::lostTouchReports(ShapeInteraction* si, PxU32 flags, ElementSim* removedElement, PxU32 ccdPass, PxsContactManagerOutputIterator& outputs) { if(si->hasTouch()) { if(si->isReportPair()) si->sendLostTouchReport((removedElement != NULL), ccdPass, outputs); si->adjustCountersOnLostTouch(); } ActorPair* aPair = si->getActorPair(); if(aPair && aPair->decRefCount() == 0) { RigidSim* sim0 = static_cast<RigidSim*>(&si->getActorSim0()); RigidSim* sim1 = static_cast<RigidSim*>(&si->getActorSim1()); if(sim0->getActorID() > sim1->getActorID()) PxSwap(sim0, sim1); const BodyPairKey pair(sim0->getActorID(), sim1->getActorID()); mActorPairMap.erase(pair); if(!aPair->isReportPair()) { mActorPairPool.destroy(aPair); } else { ActorPairReport& apr = ActorPairReport::cast(*aPair); destroyActorPairReport(apr); } } si->clearActorPair(); if(si->hasTouch() || (!si->hasKnownTouchState())) { ActorSim& b0 = si->getShape0().getActor(); ActorSim& b1 = si->getShape1().getActor(); if(flags & PairReleaseFlag::eWAKE_ON_LOST_TOUCH) { // we rely on shape pair ordering here, where the first body is never static // (see createShapeInteraction()) PX_ASSERT(!b0.isStaticRigid()); if (removedElement == NULL) { if (b1.isStaticRigid()) // no check for b0 being static, see assert further above { // given wake-on-lost-touch has been requested: // if one is static, we wake up the other immediately b0.internalWakeUp(); } else if(!si->readFlag(ShapeInteraction::CONTACTS_RESPONSE_DISABLED)) { mOwnerScene.addToLostTouchList(b0, b1); } } else { // given wake-on-lost-touch has been requested: // if an element (broadphase volume) has been removed, we wake the other actor up PX_ASSERT((removedElement == &si->getShape0()) || (removedElement == &si->getShape1())); if (&si->getShape0() == removedElement) { if (!b1.isStaticRigid()) b1.internalWakeUp(); } else b0.internalWakeUp(); // no check for b0 being non-static, see assert further above } } } } void NPhaseCore::clearContactReportActorPairs(bool shrinkToZero) { for(PxU32 i=0; i < mContactReportActorPairSet.size(); i++) { //TODO: prefetch? ActorPairReport* aPair = mContactReportActorPairSet[i]; const PxU32 refCount = aPair->getRefCount(); PX_ASSERT(aPair->isInContactReportActorPairSet()); PX_ASSERT(refCount > 0); aPair->decRefCount(); // Reference held by contact callback if(refCount > 1) { aPair->clearInContactReportActorPairSet(); } else { const PxU32 actorAID = aPair->getActorAID(); const PxU32 actorBID = aPair->getActorBID(); const BodyPairKey pair(PxMin(actorAID, actorBID), PxMax(actorAID, actorBID)); mActorPairMap.erase(pair); destroyActorPairReport(*aPair); } } if(!shrinkToZero) mContactReportActorPairSet.clear(); else mContactReportActorPairSet.reset(); } void NPhaseCore::addToPersistentContactEventPairs(ShapeInteraction* si) { // Pairs which request events which do not get triggered by the sdk and thus need to be tested actively every frame. PX_ASSERT(si->getPairFlags() & (PxPairFlag::eNOTIFY_TOUCH_PERSISTS | ShapeInteraction::CONTACT_FORCE_THRESHOLD_PAIRS)); PX_ASSERT(si->mReportPairIndex == INVALID_REPORT_PAIR_ID); PX_ASSERT(!si->readFlag(ShapeInteraction::IS_IN_PERSISTENT_EVENT_LIST)); PX_ASSERT(!si->readFlag(ShapeInteraction::IS_IN_FORCE_THRESHOLD_EVENT_LIST)); PX_ASSERT(si->hasTouch()); // only pairs which can from now on lose or keep contact should be in this list si->raiseFlag(ShapeInteraction::IS_IN_PERSISTENT_EVENT_LIST); if(mPersistentContactEventPairList.size() == mNextFramePersistentContactEventPairIndex) { si->mReportPairIndex = mPersistentContactEventPairList.size(); mPersistentContactEventPairList.pushBack(si); } else { //swap with first entry that will be active next frame ShapeInteraction* firstDelayedSi = mPersistentContactEventPairList[mNextFramePersistentContactEventPairIndex]; firstDelayedSi->mReportPairIndex = mPersistentContactEventPairList.size(); mPersistentContactEventPairList.pushBack(firstDelayedSi); si->mReportPairIndex = mNextFramePersistentContactEventPairIndex; mPersistentContactEventPairList[mNextFramePersistentContactEventPairIndex] = si; } mNextFramePersistentContactEventPairIndex++; } void NPhaseCore::addToPersistentContactEventPairsDelayed(ShapeInteraction* si) { // Pairs which request events which do not get triggered by the sdk and thus need to be tested actively every frame. PX_ASSERT(si->getPairFlags() & (PxPairFlag::eNOTIFY_TOUCH_PERSISTS | ShapeInteraction::CONTACT_FORCE_THRESHOLD_PAIRS)); PX_ASSERT(si->mReportPairIndex == INVALID_REPORT_PAIR_ID); PX_ASSERT(!si->readFlag(ShapeInteraction::IS_IN_PERSISTENT_EVENT_LIST)); PX_ASSERT(!si->readFlag(ShapeInteraction::IS_IN_FORCE_THRESHOLD_EVENT_LIST)); PX_ASSERT(si->hasTouch()); // only pairs which can from now on lose or keep contact should be in this list si->raiseFlag(ShapeInteraction::IS_IN_PERSISTENT_EVENT_LIST); si->mReportPairIndex = mPersistentContactEventPairList.size(); mPersistentContactEventPairList.pushBack(si); } void NPhaseCore::removeFromPersistentContactEventPairs(ShapeInteraction* si) { PX_ASSERT(si->getPairFlags() & (PxPairFlag::eNOTIFY_TOUCH_PERSISTS | ShapeInteraction::CONTACT_FORCE_THRESHOLD_PAIRS)); PX_ASSERT(si->readFlag(ShapeInteraction::IS_IN_PERSISTENT_EVENT_LIST)); PX_ASSERT(!si->readFlag(ShapeInteraction::IS_IN_FORCE_THRESHOLD_EVENT_LIST)); PX_ASSERT(si->hasTouch()); // only pairs which could lose or keep contact should be in this list PxU32 index = si->mReportPairIndex; PX_ASSERT(index != INVALID_REPORT_PAIR_ID); if(index < mNextFramePersistentContactEventPairIndex) { const PxU32 replaceIdx = mNextFramePersistentContactEventPairIndex - 1; if((mNextFramePersistentContactEventPairIndex < mPersistentContactEventPairList.size()) && (index != replaceIdx)) { // keep next frame persistent pairs at the back of the list ShapeInteraction* tmp = mPersistentContactEventPairList[replaceIdx]; mPersistentContactEventPairList[index] = tmp; tmp->mReportPairIndex = index; index = replaceIdx; } mNextFramePersistentContactEventPairIndex--; } si->clearFlag(ShapeInteraction::IS_IN_PERSISTENT_EVENT_LIST); si->mReportPairIndex = INVALID_REPORT_PAIR_ID; mPersistentContactEventPairList.replaceWithLast(index); if(index < mPersistentContactEventPairList.size()) // Only adjust the index if the removed SIP was not at the end of the list mPersistentContactEventPairList[index]->mReportPairIndex = index; } void NPhaseCore::addToForceThresholdContactEventPairs(ShapeInteraction* si) { PX_ASSERT(si->getPairFlags() & ShapeInteraction::CONTACT_FORCE_THRESHOLD_PAIRS); PX_ASSERT(si->mReportPairIndex == INVALID_REPORT_PAIR_ID); PX_ASSERT(!si->readFlag(ShapeInteraction::IS_IN_PERSISTENT_EVENT_LIST)); PX_ASSERT(!si->readFlag(ShapeInteraction::IS_IN_FORCE_THRESHOLD_EVENT_LIST)); PX_ASSERT(si->hasTouch()); si->raiseFlag(ShapeInteraction::IS_IN_FORCE_THRESHOLD_EVENT_LIST); si->mReportPairIndex = mForceThresholdContactEventPairList.size(); mForceThresholdContactEventPairList.pushBack(si); } void NPhaseCore::removeFromForceThresholdContactEventPairs(ShapeInteraction* si) { PX_ASSERT(si->getPairFlags() & ShapeInteraction::CONTACT_FORCE_THRESHOLD_PAIRS); PX_ASSERT(si->readFlag(ShapeInteraction::IS_IN_FORCE_THRESHOLD_EVENT_LIST)); PX_ASSERT(!si->readFlag(ShapeInteraction::IS_IN_PERSISTENT_EVENT_LIST)); PX_ASSERT(si->hasTouch()); const PxU32 index = si->mReportPairIndex; PX_ASSERT(index != INVALID_REPORT_PAIR_ID); si->clearFlag(ShapeInteraction::IS_IN_FORCE_THRESHOLD_EVENT_LIST); si->mReportPairIndex = INVALID_REPORT_PAIR_ID; mForceThresholdContactEventPairList.replaceWithLast(index); if(index < mForceThresholdContactEventPairList.size()) // Only adjust the index if the removed SIP was not at the end of the list mForceThresholdContactEventPairList[index]->mReportPairIndex = index; } PxU8* NPhaseCore::reserveContactReportPairData(PxU32 pairCount, PxU32 extraDataSize, PxU32& bufferIndex, ContactReportAllocationManager* alloc) { extraDataSize = ContactStreamManager::computeExtraDataBlockSize(extraDataSize); return alloc ? alloc->allocate(extraDataSize + (pairCount * sizeof(ContactShapePair)), bufferIndex) : mContactReportBuffer.allocateNotThreadSafe(extraDataSize + (pairCount * sizeof(ContactShapePair)), bufferIndex); } PxU8* NPhaseCore::resizeContactReportPairData(PxU32 pairCount, PxU32 extraDataSize, ContactStreamManager& csm) { PX_ASSERT((pairCount > csm.maxPairCount) || (extraDataSize > csm.getMaxExtraDataSize())); PX_ASSERT((csm.currentPairCount == csm.maxPairCount) || (extraDataSize > csm.getMaxExtraDataSize())); PX_ASSERT(extraDataSize >= csm.getMaxExtraDataSize()); // we do not support stealing memory from the extra data part when the memory for pair info runs out PxU32 bufferIndex; PxPrefetch(mContactReportBuffer.getData(csm.bufferIndex)); extraDataSize = ContactStreamManager::computeExtraDataBlockSize(extraDataSize); PxU8* stream = mContactReportBuffer.reallocateNotThreadSafe(extraDataSize + (pairCount * sizeof(ContactShapePair)), bufferIndex, 16, csm.bufferIndex); PxU8* oldStream = mContactReportBuffer.getData(csm.bufferIndex); if(stream) { const PxU32 maxExtraDataSize = csm.getMaxExtraDataSize(); if(csm.bufferIndex != bufferIndex) { if(extraDataSize <= maxExtraDataSize) PxMemCopy(stream, oldStream, maxExtraDataSize + (csm.currentPairCount * sizeof(ContactShapePair))); else { PxMemCopy(stream, oldStream, csm.extraDataSize); PxMemCopy(stream + extraDataSize, oldStream + maxExtraDataSize, csm.currentPairCount * sizeof(ContactShapePair)); } csm.bufferIndex = bufferIndex; } else if(extraDataSize > maxExtraDataSize) PxMemMove(stream + extraDataSize, oldStream + maxExtraDataSize, csm.currentPairCount * sizeof(ContactShapePair)); if(pairCount > csm.maxPairCount) csm.maxPairCount = PxTo16(pairCount); if(extraDataSize > maxExtraDataSize) csm.setMaxExtraDataSize(extraDataSize); } return stream; } ActorPairContactReportData* NPhaseCore::createActorPairContactReportData() { PxMutex::ScopedLock lock(mReportAllocLock); return mActorPairContactReportDataPool.construct(); } void NPhaseCore::releaseActorPairContactReportData(ActorPairContactReportData* data) { mActorPairContactReportDataPool.destroy(data); }
NVIDIA-Omniverse/PhysX/physx/source/simulationcontroller/src/ScFiltering.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SC_FILTERING_H #define SC_FILTERING_H #include "PxFiltering.h" namespace physx { namespace Sc { struct FilterInfo { PX_FORCE_INLINE FilterInfo() : filterFlags(0), pairFlags(0), hasPairID(false) {} PX_FORCE_INLINE FilterInfo(PxFilterFlags filterFlags_) : filterFlags(filterFlags_), pairFlags(0), hasPairID(false) {} PxFilterFlags filterFlags; PxPairFlags pairFlags; bool hasPairID; }; } } #endif
NVIDIA-Omniverse/PhysX/physx/source/simulationcontroller/src/ScCCD.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "common/PxProfileZone.h" #include "ScBodySim.h" #include "ScShapeSim.h" #include "ScArticulationSim.h" #include "ScScene.h" using namespace physx; using namespace Sc; /////////////////////////////////////////////////////////////////////////////// void BodySim::addToSpeculativeCCDMap() { if(mNodeIndex.isValid()) { if(isArticulationLink()) mScene.setSpeculativeCCDArticulationLink(mNodeIndex.index()); else mScene.setSpeculativeCCDRigidBody(mNodeIndex.index()); } } void BodySim::removeFromSpeculativeCCDMap() { if(mNodeIndex.isValid()) { if(isArticulationLink()) mScene.resetSpeculativeCCDArticulationLink(mNodeIndex.index()); else mScene.resetSpeculativeCCDRigidBody(mNodeIndex.index()); } } // PT: TODO: consider using a non-member function for this one void BodySim::updateContactDistance(PxReal* contactDistance, PxReal dt, const Bp::BoundsArray& boundsArray) { const PxsRigidBody& llBody = getLowLevelBody(); const PxRigidBodyFlags flags = llBody.getCore().mFlags; // PT: TODO: no need to test eENABLE_SPECULATIVE_CCD if we parsed mSpeculativeCCDRigidBodyBitMap initially if((flags & PxRigidBodyFlag::eENABLE_SPECULATIVE_CCD) && !(llBody.mInternalFlags & PxsRigidBody::eFROZEN)) { // PT: if both CCD flags are enabled we're in "hybrid mode" and we only use speculative contacts for the angular part const PxReal linearInflation = (flags & PxRigidBodyFlag::eENABLE_CCD) ? 0.0f : llBody.getLinearVelocity().magnitude() * dt; const float angVelMagTimesDt = llBody.getAngularVelocity().magnitude() * dt; PxU32 nbElems = getNbElements(); ElementSim** elems = getElements(); while(nbElems--) { ShapeSim* current = static_cast<ShapeSim*>(*elems++); const PxU32 index = current->getElementID(); const PxBounds3& bounds = boundsArray.getBounds(index); const PxReal radius = bounds.getExtents().magnitude(); //Heuristic for angular velocity... const PxReal angularInflation = angVelMagTimesDt * radius; contactDistance[index] = linearInflation + current->getContactOffset() + angularInflation; } } } void Sc::ArticulationSim::updateContactDistance(PxReal* contactDistance, PxReal dt, const Bp::BoundsArray& boundsArray) { const PxU32 size = mBodies.size(); for(PxU32 i=0; i<size; i++) mBodies[i]->updateContactDistance(contactDistance, dt, boundsArray); } namespace { class SpeculativeCCDBaseTask : public Cm::Task { PX_NOCOPY(SpeculativeCCDBaseTask) public: const Bp::BoundsArray& mBoundsArray; float* mContactDistances; const float mDt; SpeculativeCCDBaseTask(PxU64 contextID, const Bp::BoundsArray& boundsArray, PxReal* contactDistances, PxReal dt) : Cm::Task (contextID), mBoundsArray (boundsArray), mContactDistances (contactDistances), mDt (dt) {} }; class SpeculativeCCDContactDistanceUpdateTask : public SpeculativeCCDBaseTask { public: static const PxU32 MaxBodies = 128; BodySim* mBodySims[MaxBodies]; PxU32 mNbBodies; SpeculativeCCDContactDistanceUpdateTask(PxU64 contextID, PxReal* contactDistances, PxReal dt, const Bp::BoundsArray& boundsArray) : SpeculativeCCDBaseTask (contextID, boundsArray, contactDistances, dt), mNbBodies (0) {} virtual void runInternal() { const PxU32 nb = mNbBodies; for(PxU32 i=0; i<nb; i++) mBodySims[i]->updateContactDistance(mContactDistances, mDt, mBoundsArray); } virtual const char* getName() const { return "SpeculativeCCDContactDistanceUpdateTask"; } private: PX_NOCOPY(SpeculativeCCDContactDistanceUpdateTask) }; class SpeculativeCCDContactDistanceArticulationUpdateTask : public SpeculativeCCDBaseTask { public: ArticulationSim* mArticulation; SpeculativeCCDContactDistanceArticulationUpdateTask(PxU64 contextID, PxReal* contactDistances, PxReal dt, const Bp::BoundsArray& boundsArray, ArticulationSim* sim) : SpeculativeCCDBaseTask (contextID, boundsArray, contactDistances, dt), mArticulation (sim) {} virtual void runInternal() { mArticulation->updateContactDistance(mContactDistances, mDt, mBoundsArray); } virtual const char* getName() const { return "SpeculativeCCDContactDistanceArticulationUpdateTask"; } private: PX_NOCOPY(SpeculativeCCDContactDistanceArticulationUpdateTask) }; } static SpeculativeCCDContactDistanceUpdateTask* createCCDTask(Cm::FlushPool& pool, PxU64 contextID, PxReal* contactDistances, PxReal dt, const Bp::BoundsArray& boundsArray) { return PX_PLACEMENT_NEW(pool.allocate(sizeof(SpeculativeCCDContactDistanceUpdateTask)), SpeculativeCCDContactDistanceUpdateTask)(contextID, contactDistances, dt, boundsArray); } void Sc::Scene::updateContactDistances(PxBaseTask* continuation) { PX_PROFILE_ZONE("Scene.updateContactDistances", mContextId); Cm::FlushPool& pool = mLLContext->getTaskPool(); IG::IslandSim& islandSim = mSimpleIslandManager->getAccurateIslandSim(); bool hasContactDistanceChanged = mHasContactDistanceChanged; // PT: TODO: it is quite unfortunate that we cannot shortcut parsing the bitmaps. Consider switching to arrays. // We remove sleeping bodies from the map but we never shrink it.... // PT: TODO: why do we need to involve the island manager here? // PT: TODO: why do we do that on sleeping bodies? Why don't we use mActiveBodies? // PxArray<BodyCore*> mActiveBodies; // Sorted: kinematic before dynamic // ===> because we remove bodies from the bitmap in BodySim::deactivate() //calculate contact distance for speculative CCD shapes if(1) { PxBitMap::Iterator speculativeCCDIter(mSpeculativeCCDRigidBodyBitMap); SpeculativeCCDContactDistanceUpdateTask* ccdTask = createCCDTask(pool, mContextId, mContactDistance->begin(), mDt, *mBoundsArray); PxBitMapPinned& changedMap = mAABBManager->getChangedAABBMgActorHandleMap(); const size_t bodyOffset = PX_OFFSET_OF_RT(BodySim, getLowLevelBody()); //printf("\n"); //PxU32 count = 0; PxU32 nbBodies = 0; PxU32 index; while((index = speculativeCCDIter.getNext()) != PxBitMap::Iterator::DONE) { PxsRigidBody* rigidBody = islandSim.getRigidBody(PxNodeIndex(index)); BodySim* bodySim = reinterpret_cast<BodySim*>(reinterpret_cast<PxU8*>(rigidBody)-bodyOffset); if(bodySim) { //printf("%d\n", bodySim->getActiveListIndex()); //printf("%d: %d\n", count++, bodySim->isActive()); hasContactDistanceChanged = true; ccdTask->mBodySims[nbBodies++] = bodySim; // PT: ### changedMap pattern #1 // PT: TODO: isn't there a problem here? The task function will only touch the shapes whose body has the // speculative flag and isn't frozen, but here we mark all shapes as changed no matter what. // // Also we test some bodySim data and one bit of each ShapeSim here, not great. PxU32 nbElems = bodySim->getNbElements(); ElementSim** elems = bodySim->getElements(); while(nbElems--) { ShapeSim* sim = static_cast<ShapeSim*>(*elems++); if(sim->getFlags() & PxShapeFlag::eSIMULATION_SHAPE) changedMap.growAndSet(sim->getElementID()); } // PT: TODO: better load balancing? if(nbBodies == SpeculativeCCDContactDistanceUpdateTask::MaxBodies) { ccdTask->mNbBodies = nbBodies; nbBodies = 0; startTask(ccdTask, continuation); if(continuation) ccdTask = createCCDTask(pool, mContextId, mContactDistance->begin(), mDt, *mBoundsArray); else ccdTask->mNbBodies = 0; // PT: no need to create a new task in single-threaded mode } } } if(nbBodies) { ccdTask->mNbBodies = nbBodies; startTask(ccdTask, continuation); } } /* else { // PT: codepath without mSpeculativeCCDRigidBodyBitMap PxU32 nb = mActiveBodies.size(); BodyCore** bodies = mActiveBodies.begin(); while(nb--) { const BodyCore* current = *bodies++; BodySim* bodySim = current->getSim(); if(bodySim) { ... } } }*/ //calculate contact distance for articulation links { PxBitMap::Iterator articulateCCDIter(mSpeculativeCDDArticulationBitMap); PxU32 index; while((index = articulateCCDIter.getNext()) != PxBitMap::Iterator::DONE) { ArticulationSim* articulationSim = islandSim.getArticulationSim(PxNodeIndex(index)); if(articulationSim) { hasContactDistanceChanged = true; if(continuation) { SpeculativeCCDContactDistanceArticulationUpdateTask* articulationUpdateTask = PX_PLACEMENT_NEW(pool.allocate(sizeof(SpeculativeCCDContactDistanceArticulationUpdateTask)), SpeculativeCCDContactDistanceArticulationUpdateTask)(mContextId, mContactDistance->begin(), mDt, *mBoundsArray, articulationSim); articulationUpdateTask->setContinuation(continuation); articulationUpdateTask->removeReference(); } else { articulationSim->updateContactDistance(mContactDistance->begin(), mDt, *mBoundsArray); } } } } mHasContactDistanceChanged = hasContactDistanceChanged; } /////////////////////////////////////////////////////////////////////////////// #include "ScNPhaseCore.h" #include "ScShapeInteraction.h" #include "PxsCCD.h" #include "PxsSimulationController.h" #include "CmTransformUtils.h" void Sc::Scene::setCCDContactModifyCallback(PxCCDContactModifyCallback* callback) { mCCDContext->setCCDContactModifyCallback(callback); } PxCCDContactModifyCallback* Sc::Scene::getCCDContactModifyCallback() const { return mCCDContext->getCCDContactModifyCallback(); } void Sc::Scene::setCCDMaxPasses(PxU32 ccdMaxPasses) { mCCDContext->setCCDMaxPasses(ccdMaxPasses); } PxU32 Sc::Scene::getCCDMaxPasses() const { return mCCDContext->getCCDMaxPasses(); } void Sc::Scene::setCCDThreshold(PxReal t) { mCCDContext->setCCDThreshold(t); } PxReal Sc::Scene::getCCDThreshold() const { return mCCDContext->getCCDThreshold(); } void Sc::Scene::collectPostSolverVelocitiesBeforeCCD() { if(mContactReportsNeedPostSolverVelocity) { ActorPairReport*const* actorPairs = mNPhaseCore->getContactReportActorPairs(); PxU32 nbActorPairs = mNPhaseCore->getNbContactReportActorPairs(); for(PxU32 i=0; i < nbActorPairs; i++) { if(i < (nbActorPairs - 1)) PxPrefetchLine(actorPairs[i+1]); ActorPairReport* aPair = actorPairs[i]; ContactStreamManager& cs = aPair->getContactStreamManager(); PxU32 streamManagerFlag = cs.getFlags(); if(streamManagerFlag & ContactStreamManagerFlag::eINVALID_STREAM) continue; PxU8* stream = mNPhaseCore->getContactReportPairData(cs.bufferIndex); if(i + 1 < nbActorPairs) PxPrefetch(&(actorPairs[i+1]->getContactStreamManager())); if(!cs.extraDataSize) continue; else if (streamManagerFlag & ContactStreamManagerFlag::eNEEDS_POST_SOLVER_VELOCITY) cs.setContactReportPostSolverVelocity(stream, aPair->getActorA(), aPair->getActorB()); } } } void Sc::Scene::updateCCDMultiPass(PxBaseTask* parentContinuation) { getCcdBodies().forceSize_Unsafe(mSimulationControllerCallback->getNbCcdBodies()); // second run of the broadphase for making sure objects we have integrated did not tunnel. if(mPublicFlags & PxSceneFlag::eENABLE_CCD) { if(mContactReportsNeedPostSolverVelocity) { // the CCD code will overwrite the post solver body velocities, hence, we need to extract the info // first if any CCD enabled pair requested it. collectPostSolverVelocitiesBeforeCCD(); } //We use 2 CCD task chains to be able to chain together an arbitrary number of ccd passes if(mPostCCDPass.size() != 2) { mPostCCDPass.clear(); mUpdateCCDSinglePass.clear(); mCCDBroadPhase.clear(); mCCDBroadPhaseAABB.clear(); mPostCCDPass.reserve(2); mUpdateCCDSinglePass.reserve(2); mUpdateCCDSinglePass2.reserve(2); mUpdateCCDSinglePass3.reserve(2); mCCDBroadPhase.reserve(2); mCCDBroadPhaseAABB.reserve(2); for (int j = 0; j < 2; j++) { mPostCCDPass.pushBack(Cm::DelegateTask<Sc::Scene, &Sc::Scene::postCCDPass>(mContextId, this, "ScScene.postCCDPass")); mUpdateCCDSinglePass.pushBack(Cm::DelegateTask<Sc::Scene, &Sc::Scene::updateCCDSinglePass>(mContextId, this, "ScScene.updateCCDSinglePass")); mUpdateCCDSinglePass2.pushBack(Cm::DelegateTask<Sc::Scene, &Sc::Scene::updateCCDSinglePassStage2>(mContextId, this, "ScScene.updateCCDSinglePassStage2")); mUpdateCCDSinglePass3.pushBack(Cm::DelegateTask<Sc::Scene, &Sc::Scene::updateCCDSinglePassStage3>(mContextId, this, "ScScene.updateCCDSinglePassStage3")); mCCDBroadPhase.pushBack(Cm::DelegateTask<Sc::Scene, &Sc::Scene::ccdBroadPhase>(mContextId, this, "ScScene.ccdBroadPhase")); mCCDBroadPhaseAABB.pushBack(Cm::DelegateTask<Sc::Scene, &Sc::Scene::ccdBroadPhaseAABB>(mContextId, this, "ScScene.ccdBroadPhaseAABB")); } } //reset thread context in a place we know all tasks possibly accessing it, are in sync with. (see US6664) mLLContext->resetThreadContexts(); mCCDContext->updateCCDBegin(); mCCDBroadPhase[0].setContinuation(parentContinuation); mCCDBroadPhaseAABB[0].setContinuation(&mCCDBroadPhase[0]); mCCDBroadPhase[0].removeReference(); mCCDBroadPhaseAABB[0].removeReference(); } } namespace { class UpdateCCDBoundsTask : public Cm::Task { Bp::BoundsArray* mBoundArray; PxsTransformCache* mTransformCache; BodySim** mBodySims; PxU32 mNbToProcess; PxI32* mNumFastMovingShapes; public: static const PxU32 MaxPerTask = 256; UpdateCCDBoundsTask(PxU64 contextID, Bp::BoundsArray* boundsArray, PxsTransformCache* transformCache, BodySim** bodySims, PxU32 nbToProcess, PxI32* numFastMovingShapes) : Cm::Task (contextID), mBoundArray (boundsArray), mTransformCache (transformCache), mBodySims (bodySims), mNbToProcess (nbToProcess), mNumFastMovingShapes(numFastMovingShapes) { } virtual const char* getName() const { return "UpdateCCDBoundsTask";} PxIntBool updateSweptBounds(ShapeSim* sim, BodySim* body) { PX_ASSERT(body==sim->getBodySim()); const PxU32 elementID = sim->getElementID(); const ShapeCore& shapeCore = sim->getCore(); const PxTransform& endPose = mTransformCache->getTransformCache(elementID).transform; const PxGeometry& shapeGeom = shapeCore.getGeometry(); const PxsRigidBody& rigidBody = body->getLowLevelBody(); const PxsBodyCore& bodyCore = body->getBodyCore().getCore(); PX_ALIGN(16, PxTransform shape2World); Cm::getDynamicGlobalPoseAligned(rigidBody.mLastTransform, shapeCore.getShape2Actor(), bodyCore.getBody2Actor(), shape2World); const float ccdThreshold = computeCCDThreshold(shapeGeom); PxBounds3 bounds = Gu::computeBounds(shapeGeom, endPose); PxIntBool isFastMoving; if(1) { // PT: this alternative implementation avoids computing the start bounds for slow moving objects. isFastMoving = (shape2World.p - endPose.p).magnitudeSquared() >= ccdThreshold * ccdThreshold ? 1 : 0; if(isFastMoving) { const PxBounds3 startBounds = Gu::computeBounds(shapeGeom, shape2World); bounds.include(startBounds); } } else { const PxBounds3 startBounds = Gu::computeBounds(shapeGeom, shape2World); isFastMoving = (startBounds.getCenter() - bounds.getCenter()).magnitudeSquared() >= ccdThreshold * ccdThreshold ? 1 : 0; if(isFastMoving) bounds.include(startBounds); } PX_ASSERT(bounds.minimum.x <= bounds.maximum.x && bounds.minimum.y <= bounds.maximum.y && bounds.minimum.z <= bounds.maximum.z); mBoundArray->setBounds(bounds, elementID); return isFastMoving; } virtual void runInternal() { PxU32 activeShapes = 0; const PxU32 nb = mNbToProcess; for(PxU32 i=0; i<nb; i++) { PxU32 isFastMoving = 0; BodySim& bodySim = *mBodySims[i]; PxU32 nbElems = bodySim.getNbElements(); ElementSim** elems = bodySim.getElements(); while(nbElems--) { ShapeSim* sim = static_cast<ShapeSim*>(*elems++); if(sim->getFlags() & PxU32(PxShapeFlag::eSIMULATION_SHAPE | PxShapeFlag::eTRIGGER_SHAPE)) { const PxIntBool fastMovingShape = updateSweptBounds(sim, &bodySim); activeShapes += fastMovingShape; isFastMoving = isFastMoving | fastMovingShape; } } bodySim.getLowLevelBody().getCore().isFastMoving = isFastMoving!=0; } PxAtomicAdd(mNumFastMovingShapes, PxI32(activeShapes)); } }; } void Sc::Scene::ccdBroadPhaseAABB(PxBaseTask* continuation) { PX_PROFILE_START_CROSSTHREAD("Sim.ccdBroadPhaseComplete", mContextId); PX_PROFILE_ZONE("Sim.ccdBroadPhaseAABB", mContextId); PX_UNUSED(continuation); PxU32 currentPass = mCCDContext->getCurrentCCDPass(); Cm::FlushPool& flushPool = mLLContext->getTaskPool(); mNumFastMovingShapes = 0; //If we are on the 1st pass or we had some sweep hits previous CCD pass, we need to run CCD again if(currentPass == 0 || mCCDContext->getNumSweepHits()) { PxsTransformCache& transformCache = getLowLevelContext()->getTransformCache(); for(PxU32 i = 0; i < mCcdBodies.size(); i+= UpdateCCDBoundsTask::MaxPerTask) { const PxU32 nbToProcess = PxMin(UpdateCCDBoundsTask::MaxPerTask, mCcdBodies.size() - i); UpdateCCDBoundsTask* task = PX_PLACEMENT_NEW(flushPool.allocate(sizeof(UpdateCCDBoundsTask)), UpdateCCDBoundsTask)(mContextId, mBoundsArray, &transformCache, &mCcdBodies[i], nbToProcess, &mNumFastMovingShapes); task->setContinuation(continuation); task->removeReference(); } } } void Sc::Scene::ccdBroadPhase(PxBaseTask* continuation) { PX_PROFILE_ZONE("Sim.ccdBroadPhase", mContextId); PxU32 currentPass = mCCDContext->getCurrentCCDPass(); const PxU32 ccdMaxPasses = mCCDContext->getCCDMaxPasses(); mCCDPass = currentPass+1; //If we are on the 1st pass or we had some sweep hits previous CCD pass, we need to run CCD again if( (currentPass == 0 || mCCDContext->getNumSweepHits()) && mNumFastMovingShapes != 0) { const PxU32 currIndex = currentPass & 1; const PxU32 nextIndex = 1 - currIndex; //Initialize the CCD task chain unless this is the final pass if(currentPass != (ccdMaxPasses - 1)) { mCCDBroadPhase[nextIndex].setContinuation(continuation); mCCDBroadPhaseAABB[nextIndex].setContinuation(&mCCDBroadPhase[nextIndex]); } mPostCCDPass[currIndex].setContinuation(currentPass == ccdMaxPasses-1 ? continuation : &mCCDBroadPhaseAABB[nextIndex]); mUpdateCCDSinglePass3[currIndex].setContinuation(&mPostCCDPass[currIndex]); mUpdateCCDSinglePass2[currIndex].setContinuation(&mUpdateCCDSinglePass3[currIndex]); mUpdateCCDSinglePass[currIndex].setContinuation(&mUpdateCCDSinglePass2[currIndex]); //Do the actual broad phase PxBaseTask* continuationTask = &mUpdateCCDSinglePass[currIndex]; // const PxU32 numCpuTasks = continuationTask->getTaskManager()->getCpuDispatcher()->getWorkerCount(); mCCDBp = true; mBpSecondPass.setContinuation(continuationTask); mBpFirstPass.setContinuation(&mBpSecondPass); mBpSecondPass.removeReference(); mBpFirstPass.removeReference(); //mAABBManager->updateAABBsAndBP(numCpuTasks, mLLContext->getTaskPool(), &mLLContext->getScratchAllocator(), false, continuationTask, NULL); //Allow the CCD task chain to continue mPostCCDPass[currIndex].removeReference(); mUpdateCCDSinglePass3[currIndex].removeReference(); mUpdateCCDSinglePass2[currIndex].removeReference(); mUpdateCCDSinglePass[currIndex].removeReference(); if(currentPass != (ccdMaxPasses - 1)) { mCCDBroadPhase[nextIndex].removeReference(); mCCDBroadPhaseAABB[nextIndex].removeReference(); } } else if (currentPass == 0) { PX_PROFILE_STOP_CROSSTHREAD("Sim.ccdBroadPhaseComplete", mContextId); mCCDContext->resetContactManagers(); } } void Sc::Scene::updateCCDSinglePass(PxBaseTask* continuation) { PX_PROFILE_ZONE("Sim.updateCCDSinglePass", mContextId); mReportShapePairTimeStamp++; // This will makes sure that new report pairs will get created instead of re-using the existing ones. mAABBManager->postBroadPhase(NULL, *getFlushPool()); finishBroadPhase(continuation); const PxU32 currentPass = mCCDContext->getCurrentCCDPass() + 1; // 0 is reserved for discrete collision phase if(currentPass == 1) // reset the handle map so we only update CCD objects from here on { PxBitMapPinned& changedAABBMgrActorHandles = mAABBManager->getChangedAABBMgActorHandleMap(); //changedAABBMgrActorHandles.clear(); for(PxU32 i = 0; i < mCcdBodies.size();i++) { // PT: ### changedMap pattern #1 PxU32 nbElems = mCcdBodies[i]->getNbElements(); ElementSim** elems = mCcdBodies[i]->getElements(); while(nbElems--) { ShapeSim* sim = static_cast<ShapeSim*>(*elems++); if(sim->getFlags()&PxU32(PxShapeFlag::eSIMULATION_SHAPE | PxShapeFlag::eTRIGGER_SHAPE)) // TODO: need trigger shape here? changedAABBMgrActorHandles.growAndSet(sim->getElementID()); } } } } void Sc::Scene::updateCCDSinglePassStage2(PxBaseTask* continuation) { PX_PROFILE_ZONE("Sim.updateCCDSinglePassStage2", mContextId); postBroadPhaseStage2(continuation); } void Sc::Scene::updateCCDSinglePassStage3(PxBaseTask* continuation) { PX_PROFILE_ZONE("Sim.updateCCDSinglePassStage3", mContextId); mReportShapePairTimeStamp++; // This will makes sure that new report pairs will get created instead of re-using the existing ones. const PxU32 currentPass = mCCDContext->getCurrentCCDPass() + 1; // 0 is reserved for discrete collision phase finishBroadPhaseStage2(currentPass); PX_PROFILE_STOP_CROSSTHREAD("Sim.ccdBroadPhaseComplete", mContextId); //reset thread context in a place we know all tasks possibly accessing it, are in sync with. (see US6664) mLLContext->resetThreadContexts(); mCCDContext->updateCCD(mDt, continuation, mSimpleIslandManager->getAccurateIslandSim(), (mPublicFlags & PxSceneFlag::eDISABLE_CCD_RESWEEP), mNumFastMovingShapes); } static PX_FORCE_INLINE Sc::ShapeInteraction* getSI(PxvContactManagerTouchEvent& evt) { return reinterpret_cast<Sc::ShapeInteraction*>(evt.getCMTouchEventUserData()); } void Sc::Scene::postCCDPass(PxBaseTask* /*continuation*/) { // - Performs sleep check // - Updates touch flags PxU32 currentPass = mCCDContext->getCurrentCCDPass(); PX_ASSERT(currentPass > 0); // to make sure changes to the CCD pass counting get noticed. For contact reports, 0 means discrete collision phase. int newTouchCount, lostTouchCount, ccdTouchCount; mLLContext->getManagerTouchEventCount(&newTouchCount, &lostTouchCount, &ccdTouchCount); PX_ALLOCA(newTouches, PxvContactManagerTouchEvent, newTouchCount); PX_ALLOCA(lostTouches, PxvContactManagerTouchEvent, lostTouchCount); PX_ALLOCA(ccdTouches, PxvContactManagerTouchEvent, ccdTouchCount); PxsContactManagerOutputIterator outputs = mLLContext->getNphaseImplementationContext()->getContactManagerOutputs(); // Note: For contact notifications it is important that the new touch pairs get processed before the lost touch pairs. // This allows to know for sure if a pair of actors lost all touch (see eACTOR_PAIR_LOST_TOUCH). mLLContext->fillManagerTouchEvents(newTouches, newTouchCount, lostTouches, lostTouchCount, ccdTouches, ccdTouchCount); for(PxI32 i=0; i<newTouchCount; ++i) { ShapeInteraction* si = getSI(newTouches[i]); PX_ASSERT(si); mNPhaseCore->managerNewTouch(*si); si->managerNewTouch(currentPass, true, outputs); if (!si->readFlag(ShapeInteraction::CONTACTS_RESPONSE_DISABLED)) { mSimpleIslandManager->setEdgeConnected(si->getEdgeIndex(), IG::Edge::eCONTACT_MANAGER); } } for(PxI32 i=0; i<lostTouchCount; ++i) { ShapeInteraction* si = getSI(lostTouches[i]); PX_ASSERT(si); if (si->managerLostTouch(currentPass, true, outputs) && !si->readFlag(ShapeInteraction::CONTACTS_RESPONSE_DISABLED)) addToLostTouchList(si->getShape0().getActor(), si->getShape1().getActor()); mSimpleIslandManager->setEdgeDisconnected(si->getEdgeIndex()); } for(PxI32 i=0; i<ccdTouchCount; ++i) { ShapeInteraction* si = getSI(ccdTouches[i]); PX_ASSERT(si); si->sendCCDRetouch(currentPass, outputs); } checkForceThresholdContactEvents(currentPass); { PxBitMapPinned& changedAABBMgrActorHandles = mAABBManager->getChangedAABBMgActorHandleMap(); for (PxU32 i = 0, s = mCcdBodies.size(); i < s; i++) { BodySim*const body = mCcdBodies[i]; if(i+8 < s) PxPrefetch(mCcdBodies[i+8], 512); PX_ASSERT(body->getBody2World().p.isFinite()); PX_ASSERT(body->getBody2World().q.isFinite()); body->updateCached(&changedAABBMgrActorHandles); } ArticulationCore* const* articList = mArticulations.getEntries(); for(PxU32 i=0;i<mArticulations.size();i++) articList[i]->getSim()->updateCached(&changedAABBMgrActorHandles); } }
NVIDIA-Omniverse/PhysX/physx/source/simulationcontroller/src/ScSoftBodyShapeSim.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #include "foundation/PxPreprocessor.h" #if PX_SUPPORT_GPU_PHYSX #include "ScSoftBodyShapeSim.h" #include "ScNPhaseCore.h" #include "ScScene.h" #include "ScSoftBodySim.h" #include "PxsContext.h" #include "BpAABBManager.h" #include "geometry/PxTetrahedronMesh.h" using namespace physx; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// Sc::SoftBodyShapeSim::SoftBodyShapeSim(SoftBodySim& softBody) : ShapeSimBase(softBody, NULL), initialTransform(PxVec3(0, 0, 0)), initialScale(1.0f) { } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// Sc::SoftBodyShapeSim::~SoftBodyShapeSim() { if (isInBroadPhase()) destroyLowLevelVolume(); PX_ASSERT(!isInBroadPhase()); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void Sc::SoftBodyShapeSim::attachShapeCore(const Sc::ShapeCore* shapeCore) { setCore(shapeCore); createLowLevelVolume(); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void Sc::SoftBodyShapeSim::getFilterInfo(PxFilterObjectAttributes& filterAttr, PxFilterData& filterData) const { filterAttr = 0; setFilterObjectAttributeType(filterAttr, PxFilterObjectType::eSOFTBODY); filterData = getBodySim().getCore().getSimulationFilterData(); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void Sc::SoftBodyShapeSim::updateBounds() { Scene& scene = getScene(); PxBounds3 worldBounds = getWorldBounds(); worldBounds.fattenSafe(getContactOffset()); // fatten for fast moving colliders scene.getBoundsArray().setBounds(worldBounds, getElementID()); scene.getAABBManager()->getChangedAABBMgActorHandleMap().growAndSet(getElementID()); } void Sc::SoftBodyShapeSim::updateBoundsInAABBMgr() { Scene& scene = getScene(); scene.getAABBManager()->getChangedAABBMgActorHandleMap().growAndSet(getElementID()); scene.getAABBManager()->setGPUStateChanged(); } PxBounds3 Sc::SoftBodyShapeSim::getBounds() const { PxBounds3 bounds = getScene().getBoundsArray().getBounds(getElementID()); return bounds; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void Sc::SoftBodyShapeSim::createLowLevelVolume() { PX_ASSERT(getWorldBounds().isFinite()); const PxU32 index = getElementID(); getScene().getBoundsArray().setBounds(getWorldBounds(), index); { const PxU32 group = Bp::FilterGroup::eDYNAMICS_BASE + getActor().getActorID(); const PxU32 type = Bp::FilterType::SOFTBODY; addToAABBMgr(getCore().getContactOffset(), Bp::FilterGroup::Enum((group << BP_FILTERING_TYPE_SHIFT_BIT) | type), Bp::ElementType::eSHAPE); } // PT: TODO: what's the difference between "getContactOffset()" and "getCore().getContactOffset()" above? getScene().updateContactDistance(index, getContactOffset()); PxsTransformCache& cache = getScene().getLowLevelContext()->getTransformCache(); cache.initEntry(index); PxTransform idt(PxIdentity); cache.setTransformCache(idt, 0, index); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void Sc::SoftBodyShapeSim::destroyLowLevelVolume() { if (!isInBroadPhase()) return; Sc::Scene& scene = getScene(); PxsContactManagerOutputIterator outputs = scene.getLowLevelContext()->getNphaseImplementationContext()->getContactManagerOutputs(); scene.getNPhaseCore()->onVolumeRemoved(this, PairReleaseFlag::eWAKE_ON_LOST_TOUCH, outputs); removeFromAABBMgr(); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// PxBounds3 Sc::SoftBodyShapeSim::getWorldBounds() const { const PxTetrahedronMeshGeometry& tetGeom = static_cast<const PxTetrahedronMeshGeometry&>(getCore().getGeometry()); PxTetrahedronMesh* tetMesh = tetGeom.tetrahedronMesh; PxBounds3 bounds = tetMesh->getLocalBounds(); bounds.minimum *= initialScale; bounds.maximum *= initialScale; bounds = PxBounds3::transformFast(initialTransform, bounds); return bounds; } Sc::SoftBodySim& Sc::SoftBodyShapeSim::getBodySim() const { return static_cast<SoftBodySim&>(getActor()); } #endif //PX_SUPPORT_GPU_PHYSX
NVIDIA-Omniverse/PhysX/physx/source/simulationcontroller/src/ScParticleSystemShapeSim.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #ifndef SC_PARTICLESYSTEM_SHAPE_SIM_H #define SC_PARTICLESYSTEM_SHAPE_SIM_H #include "foundation/PxPreprocessor.h" #if PX_SUPPORT_GPU_PHYSX #include "PxPhysXConfig.h" #include "ScElementSim.h" #include "ScShapeSimBase.h" namespace physx { namespace Sc { class ParticleSystemSim; class ParticleSystemShapeCore; /** A collision detection primitive for soft body. */ class ParticleSystemShapeSim : public ShapeSimBase { ParticleSystemShapeSim& operator=(const ParticleSystemShapeSim &); public: ParticleSystemShapeSim(ParticleSystemSim& particleSystem, const ParticleSystemShapeCore* core); virtual ~ParticleSystemShapeSim(); // ElementSim implementation virtual void getFilterInfo(PxFilterObjectAttributes& filterAttr, PxFilterData& filterData) const; // ~ElementSim ParticleSystemSim& getBodySim() const; void updateBounds(); void updateBoundsInAABBMgr(); PxBounds3 getBounds() const; void createLowLevelVolume(); void destroyLowLevelVolume(); }; } // namespace Sc } #endif #endif
NVIDIA-Omniverse/PhysX/physx/source/simulationcontroller/src/ScElementSim.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SC_ELEMENT_SIM_H #define SC_ELEMENT_SIM_H #include "PxFiltering.h" #include "PxvConfig.h" #include "ScActorSim.h" #include "ScInteraction.h" #include "BpAABBManager.h" #include "ScObjectIDTracker.h" #include "ScScene.h" namespace physx { namespace Sc { class ElementSimInteraction; // A ElementSim is a part of a ActorSim. It contributes to the activation framework by adding its interactions to the actor. class ElementSim { PX_NOCOPY(ElementSim) public: class ElementInteractionIterator { public: PX_FORCE_INLINE ElementInteractionIterator(const ElementSim& e, PxU32 nbInteractions, Interaction** interactions) : mInteractions(interactions), mInteractionsLast(interactions + nbInteractions), mElement(&e) {} ElementSimInteraction* getNext(); private: Interaction** mInteractions; Interaction** mInteractionsLast; const ElementSim* mElement; }; class ElementInteractionReverseIterator { public: PX_FORCE_INLINE ElementInteractionReverseIterator(const ElementSim& e, PxU32 nbInteractions, Interaction** interactions) : mInteractions(interactions), mInteractionsLast(interactions + nbInteractions), mElement(&e) {} ElementSimInteraction* getNext(); private: Interaction** mInteractions; Interaction** mInteractionsLast; const ElementSim* mElement; }; ElementSim(ActorSim& actor); protected: ~ElementSim(); public: // Get an iterator to the interactions connected to the element // PT: this may seem strange at first glance since the "element interactions" appear to use the "actor interactions". The thing that makes this work is hidden // inside the iterator implementation: it does parse all the actor interactions indeed, but filters out the ones that do not contain "this", i.e. the desired element. // So this is inefficient (parsing potentially many more interactions than needed, imagine in a large compound) but it works, and the iterator has a point - it isn't // just the same as parsing the actor's array. PX_FORCE_INLINE ElementInteractionIterator getElemInteractions() const { return ElementInteractionIterator(*this, mActor.getActorInteractionCount(), mActor.getActorInteractions()); } PX_FORCE_INLINE ElementInteractionReverseIterator getElemInteractionsReverse() const { return ElementInteractionReverseIterator(*this, mActor.getActorInteractionCount(), mActor.getActorInteractions()); } PX_FORCE_INLINE ActorSim& getActor() const { return mActor; } PX_FORCE_INLINE Scene& getScene() const { return mActor.getScene(); } PX_FORCE_INLINE PxU32 getElementID() const { return mElementID; } PX_FORCE_INLINE bool isInBroadPhase() const { return mInBroadPhase; } void addToAABBMgr(PxReal contactDistance, Bp::FilterGroup::Enum group, Bp::ElementType::Enum type); bool removeFromAABBMgr(); PX_FORCE_INLINE void initID() { Scene& scene = getScene(); mElementID = scene.getElementIDPool().createID(); scene.getBoundsArray().initEntry(mElementID); } PX_FORCE_INLINE void releaseID() { getScene().getElementIDPool().releaseID(mElementID); } protected: ActorSim& mActor; PxU32 mElementID : 31; // PT: ID provided by Sc::Scene::mElementIDPool PxU32 mInBroadPhase : 1; public: PxU32 mShapeArrayIndex; }; PX_FORCE_INLINE void setFilterObjectAttributeType(PxFilterObjectAttributes& attr, PxFilterObjectType::Enum type) { PX_ASSERT((attr & (PxFilterObjectType::eMAX_TYPE_COUNT-1)) == 0); attr |= type; } } // namespace Sc } #endif
NVIDIA-Omniverse/PhysX/physx/source/simulationcontroller/src/ScArticulationTendonCore.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "ScArticulationTendonCore.h" #include "ScArticulationTendonSim.h" using namespace physx; void Sc::ArticulationSpatialTendonCore::setStiffness(const PxReal stiffness) { mStiffness = stiffness; if (mSim) mSim->setStiffness(stiffness); } PxReal Sc::ArticulationSpatialTendonCore::getStiffness() const { return mStiffness; } void Sc::ArticulationSpatialTendonCore::setDamping(const PxReal damping) { mDamping = damping; if (mSim) mSim->setDamping(damping); } PxReal Sc::ArticulationSpatialTendonCore::getDamping() const { return mDamping; } void Sc::ArticulationSpatialTendonCore::setLimitStiffness(const PxReal stiffness) { mLimitStiffness = stiffness; if (mSim) mSim->setLimitStiffness(stiffness); } PxReal Sc::ArticulationSpatialTendonCore::getLimitStiffness() const { return mLimitStiffness; } void Sc::ArticulationSpatialTendonCore::setOffset(const PxReal offset) { mOffset = offset; if (mSim) mSim->setOffset(offset); } PxReal Sc::ArticulationSpatialTendonCore::getOffset() const { return mOffset; } ///////////////////////////////////////////////////////////////////////////////////////////////////// void Sc::ArticulationFixedTendonCore::setStiffness(const PxReal stiffness) { mStiffness = stiffness; if (mSim) mSim->setStiffness(stiffness); } PxReal Sc::ArticulationFixedTendonCore::getStiffness() const { return mStiffness; } void Sc::ArticulationFixedTendonCore::setDamping(const PxReal damping) { mDamping = damping; if (mSim) mSim->setDamping(damping); } PxReal Sc::ArticulationFixedTendonCore::getDamping() const { return mDamping; } void Sc::ArticulationFixedTendonCore::setLimitStiffness(const PxReal stiffness) { mLimitStiffness = stiffness; if (mSim) mSim->setLimitStiffness(stiffness); } PxReal Sc::ArticulationFixedTendonCore::getLimitStiffness() const { return mLimitStiffness; } void Sc::ArticulationFixedTendonCore::setSpringRestLength(const PxReal restLength) { mRestLength = restLength; if (mSim) mSim->setSpringRestLength(restLength); } PxReal Sc::ArticulationFixedTendonCore::getSpringRestLength() const { return mRestLength; } void Sc::ArticulationFixedTendonCore::setLimitRange(const PxReal lowLimit, const PxReal highLimit) { mLowLimit = lowLimit; mHighLimit = highLimit; if (mSim) mSim->setLimitRange(lowLimit, highLimit); } void Sc::ArticulationFixedTendonCore::getLimitRange(PxReal& lowLimit, PxReal& highLimit) const { lowLimit = mLowLimit; highLimit = mHighLimit; } void Sc::ArticulationFixedTendonCore::setOffset(const PxReal offset) { mOffset = offset; if (mSim) mSim->setOffset(offset); } PxReal Sc::ArticulationFixedTendonCore::getOffset() const { return mOffset; }
NVIDIA-Omniverse/PhysX/physx/source/simulationcontroller/src/ScSoftBodySim.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #include "foundation/PxPreprocessor.h" #if PX_SUPPORT_GPU_PHYSX #include "ScSoftBodySim.h" #include "ScSoftBodyCore.h" #include "ScScene.h" #include "PxsSimulationController.h" using namespace physx; using namespace physx::Dy; Sc::SoftBodySim::SoftBodySim(SoftBodyCore& core, Scene& scene) : ActorSim(scene, core), mShapeSim(*this) { mLLSoftBody = scene.createLLSoftBody(this); mNodeIndex = scene.getSimpleIslandManager()->addSoftBody(mLLSoftBody, false); scene.getSimpleIslandManager()->activateNode(mNodeIndex); mLLSoftBody->setElementId(mShapeSim.getElementID()); } Sc::SoftBodySim::~SoftBodySim() { if (!mLLSoftBody) return; mScene.destroyLLSoftBody(*mLLSoftBody); mScene.getSimpleIslandManager()->removeNode(mNodeIndex); mCore.setSim(NULL); } void Sc::SoftBodySim::updateBounds() { mShapeSim.updateBounds(); } void Sc::SoftBodySim::updateBoundsInAABBMgr() { mShapeSim.updateBoundsInAABBMgr(); } PxBounds3 Sc::SoftBodySim::getBounds() const { return mShapeSim.getBounds(); } bool Sc::SoftBodySim::isSleeping() const { IG::IslandSim& sim = mScene.getSimpleIslandManager()->getAccurateIslandSim(); return sim.getActiveNodeIndex(mNodeIndex) == PX_INVALID_NODE; } void Sc::SoftBodySim::onSetWakeCounter() { getScene().getSimulationController()->setSoftBodyWakeCounter(mLLSoftBody); if (mLLSoftBody->getCore().wakeCounter > 0.f) getScene().getSimpleIslandManager()->activateNode(mNodeIndex); else getScene().getSimpleIslandManager()->deactivateNode(mNodeIndex); } void Sc::SoftBodySim::attachShapeCore(ShapeCore* core) { mShapeSim.attachShapeCore(core); PxsShapeCore* shapeCore = const_cast<PxsShapeCore*>(&core->getCore()); mLLSoftBody->setShapeCore(shapeCore); } void Sc::SoftBodySim::attachSimulationMesh(PxTetrahedronMesh* simulationMesh, PxSoftBodyAuxData* simulationState) { mLLSoftBody->setSimShapeCore(simulationMesh, simulationState); } PxTetrahedronMesh* Sc::SoftBodySim::getSimulationMesh() { return mLLSoftBody->getSimulationMesh(); } PxSoftBodyAuxData* Sc::SoftBodySim::getSoftBodyAuxData() { return mLLSoftBody->getSoftBodyAuxData(); } PxTetrahedronMesh* Sc::SoftBodySim::getCollisionMesh() { return mLLSoftBody->getCollisionMesh(); } void Sc::SoftBodySim::enableSelfCollision() { if (isActive()) { getScene().getSimulationController()->activateSoftbodySelfCollision(mLLSoftBody); } } void Sc::SoftBodySim::disableSelfCollision() { if (isActive()) { getScene().getSimulationController()->deactivateSoftbodySelfCollision(mLLSoftBody); } } /*void Sc::SoftBodySim::activate() { // Activate body //{ // PX_ASSERT((!isKinematic()) || notInScene() || readInternalFlag(InternalFlags(BF_KINEMATIC_MOVED | BF_KINEMATIC_SURFACE_VELOCITY))); // kinematics should only get activated when a target is set. // // exception: object gets newly added, then the state change will happen later // if (!isArticulationLink()) // { // mLLBody.mInternalFlags &= (~PxsRigidBody::eFROZEN); // // Put in list of activated bodies. The list gets cleared at the end of a sim step after the sleep callbacks have been fired. // getScene().onBodyWakeUp(this); // } // BodyCore& core = getBodyCore(); // if (core.getFlags() & PxRigidBodyFlag::eENABLE_POSE_INTEGRATION_PREVIEW) // { // PX_ASSERT(!getScene().isInPosePreviewList(*this)); // getScene().addToPosePreviewList(*this); // } // createSqBounds(); //} activateInteractions(*this); } void Sc::SoftBodySim::deactivate() { deactivateInteractions(*this); // Deactivate body //{ // PX_ASSERT((!isKinematic()) || notInScene() || !readInternalFlag(BF_KINEMATIC_MOVED)); // kinematics should only get deactivated when no target is set. // // exception: object gets newly added, then the state change will happen later // BodyCore& core = getBodyCore(); // if (!readInternalFlag(BF_ON_DEATHROW)) // { // // Set velocity to 0. // // Note: this is also fine if the method gets called because the user puts something to sleep (this behavior is documented in the API) // PX_ASSERT(core.getWakeCounter() == 0.0f); // const PxVec3 zero(0.0f); // core.setLinearVelocityInternal(zero); // core.setAngularVelocityInternal(zero); // setForcesToDefaults(!(mLLBody.mInternalFlags & PxsRigidBody::eDISABLE_GRAVITY)); // } // if (!isArticulationLink()) // Articulations have their own sleep logic. // getScene().onBodySleep(this); // if (core.getFlags() & PxRigidBodyFlag::eENABLE_POSE_INTEGRATION_PREVIEW) // { // PX_ASSERT(getScene().isInPosePreviewList(*this)); // getScene().removeFromPosePreviewList(*this); // } // destroySqBounds(); //} }*/ PxU32 Sc::SoftBodySim::getGpuSoftBodyIndex() const { return mLLSoftBody->getGpuSoftBodyIndex(); } #endif //PX_SUPPORT_GPU_PHYSX
NVIDIA-Omniverse/PhysX/physx/source/simulationcontroller/src/ScArticulationJointCore.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "ScArticulationJointCore.h" #include "ScArticulationCore.h" #include "ScArticulationSim.h" #include "ScArticulationJointSim.h" #include "ScBodyCore.h" #include "ScPhysics.h" using namespace physx; Sc::ArticulationJointCore::ArticulationJointCore(const PxTransform& parentFrame, const PxTransform& childFrame) : mCore (parentFrame, childFrame), mSim (NULL), mArticulation (NULL), mRootType (NULL), mLLLinkIndex (0xffffffff) { } Sc::ArticulationJointCore::~ArticulationJointCore() { PX_ASSERT(getSim() == 0); } void Sc::ArticulationJointCore::setSimDirty() { Sc::ArticulationJointSim* sim = getSim(); if(sim) sim->setDirty(); ArticulationSim* artiSim = mArticulation->getSim(); if (artiSim && artiSim->getLLArticulationInitialized()) { Dy::FeatherstoneArticulation* llarticulation = artiSim->getLowLevelArticulation(); llarticulation->mJcalcDirty = true; } } void Sc::ArticulationJointCore::setParentPose(const PxTransform& t) { // AD: we check if it changed at all to avoid marking the complete articulation dirty for a jcalc. // The jcalc internally checks these ArticulationJointCoreDirtyFlag again so we would skip most things // but we'd still check all the joints. The same is also true for the following functions. if (!(mCore.parentPose == t)) { mCore.parentPose = t; setDirty(Dy::ArticulationJointCoreDirtyFlag::eFRAME); } } void Sc::ArticulationJointCore::setChildPose(const PxTransform& t) { if (!(mCore.childPose == t)) { mCore.childPose = t; setDirty(Dy::ArticulationJointCoreDirtyFlag::eFRAME); } } void Sc::ArticulationJointCore::setTargetP(PxArticulationAxis::Enum axis, PxReal targetP) { // this sets the target position in the core. mCore.targetP[axis] = targetP; // this sets the target position in the ll articulation. This needs to happen immediately because we might // look up the value using the cache API again, and that one is reading directly from the llArticulation. ArticulationSim* artiSim = mArticulation->getSim(); if (artiSim && artiSim->getLLArticulationInitialized()) { Dy::FeatherstoneArticulation* llarticulation = artiSim->getLowLevelArticulation(); Dy::ArticulationData& data = llarticulation->getArticulationData(); Dy::ArticulationJointCoreData* jointData = data.getJointData(); Dy::ArticulationJointCoreData& jointDatum = jointData[mLLLinkIndex]; PxReal* jointTargetPositions = data.getJointTargetPositions(); PxReal* jTargetPosition = &jointTargetPositions[jointDatum.jointOffset]; const PxU32 dofId = mCore.invDofIds[axis]; if (dofId != 0xff) { jTargetPosition[dofId] = targetP; artiSim->setArticulationDirty(Dy::ArticulationDirtyFlag::eDIRTY_JOINT_TARGET_POS); } } // AD: does not need setDirty - we write directly into the llArticulation and the GPU part is // handled by setArticulationDirty. } void Sc::ArticulationJointCore::setTargetV(PxArticulationAxis::Enum axis, PxReal targetV) { // this sets the target velocity in the core. mCore.targetV[axis] = targetV; // this sets the target velocity in the ll articulation. This needs to happen immediately because we might // look up the value using the cache API again, and that one is reading directly from the llArticulation. ArticulationSim* artiSim = mArticulation->getSim(); if (artiSim && artiSim->getLLArticulationInitialized()) { Dy::FeatherstoneArticulation* llarticulation = artiSim->getLowLevelArticulation(); Dy::ArticulationData& data = llarticulation->getArticulationData(); Dy::ArticulationJointCoreData* jointData = data.getJointData(); Dy::ArticulationJointCoreData& jointDatum = jointData[mLLLinkIndex]; PxReal* jointTargetVelocities = data.getJointTargetVelocities(); PxReal* jTargetVelocity = &jointTargetVelocities[jointDatum.jointOffset]; const PxU32 dofId = mCore.invDofIds[axis]; if (dofId != 0xff) { jTargetVelocity[dofId] = targetV; artiSim->setArticulationDirty(Dy::ArticulationDirtyFlag::eDIRTY_JOINT_TARGET_VEL); } } // AD: does not need setDirty - we write directly into the llArticulation and the GPU part is // handled by setArticulationDirty. } void Sc::ArticulationJointCore::setArmature(PxArticulationAxis::Enum axis, PxReal armature) { if (mCore.armature[axis] != armature) { mCore.armature[axis] = armature; setDirty(Dy::ArticulationJointCoreDirtyFlag::eARMATURE); } } void Sc::ArticulationJointCore::setJointPosition(PxArticulationAxis::Enum axis, const PxReal jointPos) { // this sets the position in the core. mCore.jointPos[axis] = jointPos; // this sets the position in the ll articulation. This needs to happen immediately because we might // look up the value using the cache API again, and that one is reading directly from the llArticulation. ArticulationSim* artiSim = mArticulation->getSim(); if (artiSim && artiSim->getLLArticulationInitialized()) { Dy::FeatherstoneArticulation* llarticulation = artiSim->getLowLevelArticulation(); Dy::ArticulationData& data = llarticulation->getArticulationData(); Dy::ArticulationJointCoreData* jointData = data.getJointData(); Dy::ArticulationJointCoreData& jointDatum = jointData[mLLLinkIndex]; PxReal* jointPositions = data.getJointPositions(); PxReal* jPosition = &jointPositions[jointDatum.jointOffset]; const PxU32 dofId = mCore.invDofIds[axis]; if (dofId != 0xff) { jPosition[dofId] = jointPos; artiSim->setArticulationDirty(Dy::ArticulationDirtyFlag::eDIRTY_POSITIONS); } } } // AD: we need this indirection right now because we could have updated joint vel using the cache, so // the read from the joint core might be stale. PxReal Sc::ArticulationJointCore::getJointPosition(PxArticulationAxis::Enum axis) const { PxReal jointPos = mCore.jointPos[axis]; ArticulationSim* artiSim = mArticulation->getSim(); if (artiSim && artiSim->getLLArticulationInitialized()) { const Dy::FeatherstoneArticulation* llarticulation = artiSim->getLowLevelArticulation(); const Dy::ArticulationData& data = llarticulation->getArticulationData(); const Dy::ArticulationJointCoreData* jointData = data.getJointData(); const Dy::ArticulationJointCoreData& jointDatum = jointData[mLLLinkIndex]; const PxReal* jointPositions = data.getJointPositions(); const PxReal* jPosition = &jointPositions[jointDatum.jointOffset]; const PxU32 dofId = mCore.invDofIds[axis]; if(dofId != 0xff) jointPos = jPosition[dofId]; } return jointPos; } void Sc::ArticulationJointCore::setJointVelocity(PxArticulationAxis::Enum axis, const PxReal jointVel) { mCore.jointVel[axis] = jointVel; ArticulationSim* artiSim = mArticulation->getSim(); if (artiSim && artiSim->getLLArticulationInitialized()) { Dy::FeatherstoneArticulation* llarticulation = artiSim->getLowLevelArticulation(); Dy::ArticulationData& data = llarticulation->getArticulationData(); Dy::ArticulationJointCoreData* jointData = data.getJointData(); Dy::ArticulationJointCoreData& jointDatum = jointData[mLLLinkIndex]; PxReal* jointVelocities = data.getJointVelocities(); PxReal* jVelocity = &jointVelocities[jointDatum.jointOffset]; const PxU32 dofId = mCore.invDofIds[axis]; if (dofId != 0xff) { jVelocity[dofId] = jointVel; artiSim->setArticulationDirty(Dy::ArticulationDirtyFlag::eDIRTY_VELOCITIES); } } } // AD: we need this indirection right now because we could have updated joint vel using the cache, so // the read from the joint core might be stale. PxReal Sc::ArticulationJointCore::getJointVelocity(PxArticulationAxis::Enum axis) const { PxReal jointVel = mCore.jointVel[axis]; ArticulationSim* artiSim = mArticulation->getSim(); if (artiSim && artiSim->getLLArticulationInitialized()) { const Dy::FeatherstoneArticulation* llarticulation = artiSim->getLowLevelArticulation(); const Dy::ArticulationData& data = llarticulation->getArticulationData(); const Dy::ArticulationJointCoreData* jointData = data.getJointData(); const Dy::ArticulationJointCoreData& jointDatum = jointData[mLLLinkIndex]; const PxReal* jointVelocities = data.getJointVelocities(); const PxReal* jVelocities = &jointVelocities[jointDatum.jointOffset]; const PxU32 dofId = mCore.invDofIds[axis]; if (dofId != 0xff) jointVel = jVelocities[dofId]; } return jointVel; } void Sc::ArticulationJointCore::setLimit(PxArticulationAxis::Enum axis, const PxArticulationLimit& limit) { mCore.initLimit(axis, limit); setSimDirty(); } void Sc::ArticulationJointCore::setDrive(PxArticulationAxis::Enum axis, const PxArticulationDrive& drive) { mCore.initDrive(axis, drive); setSimDirty(); }
NVIDIA-Omniverse/PhysX/physx/source/simulationcontroller/src/ScArticulationSensorSim.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "ScArticulationSensorSim.h" #include "ScArticulationSensor.h" #include "PxArticulationReducedCoordinate.h" #include "ScArticulationSim.h" #include "PxArticulationReducedCoordinate.h" namespace physx { Sc::ArticulationSensorSim::ArticulationSensorSim(ArticulationSensorCore& sensorCore, Scene& scene) : mScene(scene), mCore(sensorCore), mLLIndex(0xffffffff) { sensorCore.setSim(this); mLLSensor.mRelativePose = sensorCore.mRelativePose; mLLSensor.mFlags = sensorCore.mFlags; } Sc::ArticulationSensorSim::~ArticulationSensorSim() { mCore.setSim(NULL); } const PxSpatialForce& Sc::ArticulationSensorSim::getForces() const { return mArticulationSim->getSensorForce(mLLIndex); } void Sc::ArticulationSensorSim::setRelativePose(const PxTransform& relativePose) { mLLSensor.mRelativePose = relativePose; mArticulationSim->setArticulationDirty(Dy::ArticulationDirtyFlag::eDIRTY_SENSOR); } void Sc::ArticulationSensorSim::setFlag(const PxU16 flag) { mLLSensor.mFlags = flag; mArticulationSim->setArticulationDirty(Dy::ArticulationDirtyFlag::eDIRTY_SENSOR); } }
NVIDIA-Omniverse/PhysX/physx/source/simulationcontroller/src/ScArticulationTendonSim.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SC_ARTICULATION_TENDON_SIM_H #define SC_ARTICULATION_TENDON_SIM_H #include "foundation/PxUserAllocated.h" #include "DyArticulationTendon.h" namespace physx { namespace Sc { class ArticulationFixedTendonCore; class ArticulationTendonJointCore; class ArticulationSpatialTendonCore; class ArticulationAttachmentCore; class Scene; class ArticulationJointCore; class ArticulationSim; class ArticulationSpatialTendonSim : public PxUserAllocated { PX_NOCOPY(ArticulationSpatialTendonSim) public: ArticulationSpatialTendonSim(ArticulationSpatialTendonCore& tendon, Scene& scene); virtual ~ArticulationSpatialTendonSim(); void setStiffness(const PxReal stiffness); PxReal getStiffness() const; void setDamping(const PxReal damping); PxReal getDamping() const; void setLimitStiffness(const PxReal stiffness); PxReal getLimitStiffness() const; void setOffset(const PxReal offset); PxReal getOffset() const; void setAttachmentCoefficient(ArticulationAttachmentCore& core, const PxReal coefficient); void setAttachmentRelativeOffset(ArticulationAttachmentCore& core, const PxVec3& offset); void setAttachmentLimits(ArticulationAttachmentCore& core, const PxReal lowLimit, const PxReal highLimit); void setAttachmentRestLength(ArticulationAttachmentCore& core, const PxReal restLength); void addAttachment(ArticulationAttachmentCore& core); void removeAttachment(ArticulationAttachmentCore& core); Dy::ArticulationSpatialTendon mLLTendon; ArticulationSpatialTendonCore& mTendonCore; ArticulationSim* mArtiSim; Scene& mScene; }; class ArticulationFixedTendonSim : public PxUserAllocated { PX_NOCOPY(ArticulationFixedTendonSim) public: ArticulationFixedTendonSim(ArticulationFixedTendonCore& tendon, Scene& scene); virtual ~ArticulationFixedTendonSim(); void setStiffness(const PxReal stiffness); PxReal getStiffness() const; void setDamping(const PxReal damping); PxReal getDamping() const; void setLimitStiffness(const PxReal stiffness); PxReal getLimitStiffness() const; void setOffset(const PxReal offset); PxReal getOffset() const; void setSpringRestLength(const PxReal restLength); PxReal getSpringRestLength() const; void setLimitRange(const PxReal lowLimit, const PxReal highLimit); void getLimitRange(PxReal& lowLimit, PxReal& highLimit) const; void addTendonJoint(ArticulationTendonJointCore& tendonJointCore); void removeTendonJoint(ArticulationTendonJointCore& core); void setTendonJointCoefficient(ArticulationTendonJointCore& core, const PxArticulationAxis::Enum axis, const float coefficient, const float recipCoefficient); Dy::ArticulationFixedTendon mLLTendon; ArticulationFixedTendonCore& mTendonCore; ArticulationSim* mArtiSim; Scene& mScene; }; }//namespace Sc }//namespace physx #endif
NVIDIA-Omniverse/PhysX/physx/source/simulationcontroller/src/ScContactReportBuffer.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #ifndef SC_CONTACT_REPORT_BUFFER_H #define SC_CONTACT_REPORT_BUFFER_H #include "foundation/Px.h" #include "common/PxProfileZone.h" namespace physx { namespace Sc { class ContactReportBuffer { public: PX_FORCE_INLINE ContactReportBuffer(PxU32 initialSize, bool noResizeAllowed) : mBuffer(NULL) ,mCurrentBufferIndex(0) ,mCurrentBufferSize(initialSize) ,mDefaultBufferSize(initialSize) ,mLastBufferIndex(0) ,mAllocationLocked(noResizeAllowed) { mBuffer = allocateBuffer(initialSize); PX_ASSERT(mBuffer); } ~ContactReportBuffer() { PX_FREE(mBuffer); } PX_FORCE_INLINE void reset(); PX_FORCE_INLINE void flush(); PX_FORCE_INLINE PxU8* allocateNotThreadSafe(PxU32 size, PxU32& index, PxU32 alignment= 16); PX_FORCE_INLINE PxU8* reallocateNotThreadSafe(PxU32 size, PxU32& index, PxU32 alignment= 16, PxU32 lastIndex = 0xFFFFFFFF); PX_FORCE_INLINE PxU8* getData(const PxU32& index) const { return mBuffer+index; } PX_FORCE_INLINE PxU32 getDefaultBufferSize() const {return mDefaultBufferSize;} private: PX_FORCE_INLINE PxU8* allocateBuffer(PxU32 size); private: PxU8* mBuffer; PxU32 mCurrentBufferIndex; PxU32 mCurrentBufferSize; PxU32 mDefaultBufferSize; PxU32 mLastBufferIndex; bool mAllocationLocked; }; } // namespace Sc ////////////////////////////////////////////////////////////////////////// PX_FORCE_INLINE void Sc::ContactReportBuffer::reset() { mCurrentBufferIndex = 0; mLastBufferIndex = 0xFFFFFFFF; } ////////////////////////////////////////////////////////////////////////// void Sc::ContactReportBuffer::flush() { mCurrentBufferIndex = 0; mLastBufferIndex = 0xFFFFFFFF; if(mCurrentBufferSize != mDefaultBufferSize) { PX_FREE(mBuffer); mBuffer = allocateBuffer(mDefaultBufferSize); PX_ASSERT(mBuffer); mCurrentBufferSize = mDefaultBufferSize; } } ////////////////////////////////////////////////////////////////////////// PxU8* Sc::ContactReportBuffer::allocateNotThreadSafe(PxU32 size, PxU32& index ,PxU32 alignment/* =16 */) { PX_ASSERT(PxIsPowerOfTwo(alignment)); // padding for alignment PxU32 pad = ((mCurrentBufferIndex+alignment-1)&~(alignment-1)) - mCurrentBufferIndex; index = mCurrentBufferIndex + pad; if (index + size > mCurrentBufferSize) { PX_PROFILE_ZONE("ContactReportBuffer::Resize", 0); if(mAllocationLocked) return NULL; PxU32 oldBufferSize = mCurrentBufferSize; while(index + size > mCurrentBufferSize) { mCurrentBufferSize *= 2; } PxU8* tempBuffer = allocateBuffer(mCurrentBufferSize); PxMemCopy(tempBuffer,mBuffer,oldBufferSize); PX_FREE(mBuffer); mBuffer = tempBuffer; } PxU8* ptr = mBuffer + index; mLastBufferIndex = index; PX_ASSERT((size_t(ptr)&(alignment-1)) == 0); mCurrentBufferIndex += size + pad; return ptr; } ////////////////////////////////////////////////////////////////////////// PxU8* Sc::ContactReportBuffer::reallocateNotThreadSafe(PxU32 size, PxU32& index ,PxU32 alignment/* =16 */, PxU32 lastIndex) { if(lastIndex != mLastBufferIndex) { return allocateNotThreadSafe(size,index,alignment); } else { mCurrentBufferIndex = mLastBufferIndex; return allocateNotThreadSafe(size,index,alignment); } } ////////////////////////////////////////////////////////////////////////// PX_FORCE_INLINE PxU8* Sc::ContactReportBuffer::allocateBuffer(PxU32 size) { return (static_cast<PxU8*>(PX_ALLOC(size, "ContactReportBuffer"))); } } // namespace physx #endif
NVIDIA-Omniverse/PhysX/physx/source/simulationcontroller/src/ScShapeSimBase.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #ifndef SC_SHAPESIM_BASE_H #define SC_SHAPESIM_BASE_H #include "ScElementSim.h" #include "ScShapeCore.h" #include "ScRigidSim.h" #include "PxsShapeSim.h" namespace physx { namespace Sc { PX_FORCE_INLINE PxU32 isBroadPhase(PxShapeFlags flags) { return PxU32(flags) & PxU32(PxShapeFlag::eTRIGGER_SHAPE | PxShapeFlag::eSIMULATION_SHAPE); } class ShapeCore; // PT: TODO: ShapeSimBase is bonkers: // PxU32 ElementSim::mElementID // PxU32 ElementSim::mShapeArrayIndex; // IG::NodeIndex mLLShape::mBodySimIndex; *** GPU only // PxU32 mLLShape::mElementIndex; *** GPU only, looks like a copy of ElementSim::mElementID // PxU32 mLLShape::mShapeIndex; *** GPU only, looks like a copy of ElementSim::mElementID // PxU32 ShapeSimBase::mId; // PxU32 ShapeSimBase::mSqBoundsId; // => do we really need 7 different IDs per shape? class ShapeSimBase : public ElementSim { PX_NOCOPY(ShapeSimBase) public: ShapeSimBase(ActorSim& owner, const ShapeCore* core) : ElementSim (owner), mSqBoundsId (PX_INVALID_U32), mPrunerIndex(PX_INVALID_U32) { setCore(core); } ~ShapeSimBase() { } PX_FORCE_INLINE void setCore(const ShapeCore* core); PX_FORCE_INLINE const ShapeCore& getCore() const; PX_FORCE_INLINE bool isPxsCoreValid() const { return mLLShape.mShapeCore != NULL; } PX_INLINE PxGeometryType::Enum getGeometryType() const { return getCore().getGeometryType(); } // This is just for getting a reference for the user, so we cast away const-ness PX_INLINE PxShape* getPxShape() const { return const_cast<PxShape*>(getCore().getPxShape()); } PX_FORCE_INLINE PxReal getRestOffset() const { return getCore().getRestOffset(); } PX_FORCE_INLINE PxReal getTorsionalPatchRadius() const { return getCore().getTorsionalPatchRadius(); } PX_FORCE_INLINE PxReal getMinTorsionalPatchRadius() const { return getCore().getMinTorsionalPatchRadius(); } PX_FORCE_INLINE PxU32 getFlags() const { return getCore().getFlags(); } PX_FORCE_INLINE PxReal getContactOffset() const { return getCore().getContactOffset(); } PX_FORCE_INLINE PxU32 getTransformCacheID() const { return getElementID(); } PX_FORCE_INLINE PxU32 getSqBoundsId() const { return mSqBoundsId; } PX_FORCE_INLINE void setSqBoundsId(PxU32 id) { mSqBoundsId = id; } PX_FORCE_INLINE PxU32 getSqPrunerIndex() const { return mPrunerIndex; } PX_FORCE_INLINE void setSqPrunerIndex(PxU32 index) { mPrunerIndex = index; } PX_FORCE_INLINE PxsShapeSim& getLLShapeSim() { return mLLShape; } void onFilterDataChange(); void onRestOffsetChange(); void onFlagChange(PxShapeFlags oldFlags); void onResetFiltering(); void onVolumeOrTransformChange(); void onMaterialChange(); // remove when material properties are gone from PxcNpWorkUnit void onContactOffsetChange(); void markBoundsForUpdate(); void reinsertBroadPhase(); void removeFromBroadPhase(bool wakeOnLostTouch); void getAbsPoseAligned(PxTransform* PX_RESTRICT globalPose) const; PX_FORCE_INLINE RigidSim& getRbSim() const { return static_cast<RigidSim&>(getActor()); } BodySim* getBodySim() const; PxsRigidCore& getPxsRigidCore() const; void createSqBounds(); void destroySqBounds(); void updateCached(PxU32 transformCacheFlags, PxBitMapPinned* shapeChangedMap); void updateCached(PxsTransformCache& transformCache, Bp::BoundsArray& boundsArray); void updateBPGroup(); protected: PX_FORCE_INLINE void internalAddToBroadPhase(); PX_FORCE_INLINE bool internalRemoveFromBroadPhase(bool wakeOnLostTouch = true); void initSubsystemsDependingOnElementID(); PxsShapeSim mLLShape; PxU32 mSqBoundsId; PxU32 mPrunerIndex; }; #if PX_P64_FAMILY // PT: to compensate for the padding I removed in PxsShapeSim PX_COMPILE_TIME_ASSERT((sizeof(ShapeSimBase) - sizeof(PxsShapeSim))>=12); #else // PX_COMPILE_TIME_ASSERT(32==sizeof(Sc::ShapeSim)); // after removing bounds from shapes // PX_COMPILE_TIME_ASSERT((sizeof(Sc::ShapeSim) % 16) == 0); // aligned mem bounds are better for prefetching #endif PX_FORCE_INLINE void ShapeSimBase::setCore(const ShapeCore* core) { mLLShape.mShapeCore = core ? const_cast<PxsShapeCore*>(&core->getCore()) : NULL; } PX_FORCE_INLINE const ShapeCore& ShapeSimBase::getCore() const { return Sc::ShapeCore::getCore(*mLLShape.mShapeCore); } } // namespace Sc } #endif
NVIDIA-Omniverse/PhysX/physx/source/simulationcontroller/src/ScPipeline.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. // PT: this file contains most Scene functions called during the simulate() call, i.e. the "pipeline" functions. // Ideally they should be listed in the order in which they are called in the single-threaded version, to help // understanding and following the pipeline. #include "ScScene.h" #include "BpBroadPhase.h" #include "ScArticulationSim.h" #include "ScSimStats.h" #include "PxsCCD.h" #if defined(__APPLE__) && defined(__POWERPC__) #include <ppc_intrinsics.h> #endif #if PX_SUPPORT_GPU_PHYSX #include "PxPhysXGpu.h" #include "PxsKernelWrangler.h" #include "PxsHeapMemoryAllocator.h" #include "cudamanager/PxCudaContextManager.h" #endif #include "ScShapeInteraction.h" #include "ScElementInteractionMarker.h" #if PX_SUPPORT_GPU_PHYSX #include "PxSoftBody.h" #include "ScSoftBodySim.h" #include "DySoftBody.h" #if PX_ENABLE_FEATURES_UNDER_CONSTRUCTION #include "PxFEMCloth.h" #include "PxHairSystem.h" #endif #include "ScFEMClothSim.h" #include "DyFEMCloth.h" #include "ScParticleSystemSim.h" #include "DyParticleSystem.h" #include "ScHairSystemSim.h" #include "DyHairSystem.h" #endif using namespace physx; using namespace physx::Cm; using namespace physx::Dy; using namespace Sc; PX_IMPLEMENT_OUTPUT_ERROR /////////////////////////////////////////////////////////////////////////////// void PxcClearContactCacheStats(); void Sc::Scene::stepSetupCollide(PxBaseTask* continuation) { PX_PROFILE_ZONE("Sim.stepSetupCollide", mContextId); { PX_PROFILE_ZONE("Sim.prepareCollide", mContextId); mReportShapePairTimeStamp++; // deleted actors/shapes should get separate pair entries in contact reports mContactReportsNeedPostSolverVelocity = false; getRenderBuffer().clear(); // Clear broken constraint list: clearBrokenConstraintBuffer(); visualizeStartStep(); PxcClearContactCacheStats(); } kinematicsSetup(continuation); PxsContactManagerOutputIterator outputs = mLLContext->getNphaseImplementationContext()->getContactManagerOutputs(); // Update all dirty interactions mNPhaseCore->updateDirtyInteractions(outputs); mInternalFlags &= ~(SceneInternalFlag::eSCENE_SIP_STATES_DIRTY_DOMINANCE | SceneInternalFlag::eSCENE_SIP_STATES_DIRTY_VISUALIZATION); } void Sc::Scene::simulate(PxReal timeStep, PxBaseTask* continuation) { if(timeStep != 0.0f) { setElapsedTime(timeStep); mDynamicsContext->setDt(timeStep); mAdvanceStep.setContinuation(continuation); stepSetupCollide(&mAdvanceStep); mCollideStep.setContinuation(&mAdvanceStep); mAdvanceStep.removeReference(); mCollideStep.removeReference(); } } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::collideStep(PxBaseTask* continuation) { PX_PROFILE_ZONE("Sim.collideQueueTasks", mContextId); PX_PROFILE_START_CROSSTHREAD("Basic.collision", mContextId); mStats->simStart(); mLLContext->beginUpdate(); mPostNarrowPhase.setTaskManager(*continuation->getTaskManager()); mPostNarrowPhase.addReference(); mFinalizationPhase.setTaskManager(*continuation->getTaskManager()); mFinalizationPhase.addReference(); mRigidBodyNarrowPhase.setContinuation(continuation); mPreRigidBodyNarrowPhase.setContinuation(&mRigidBodyNarrowPhase); mUpdateShapes.setContinuation(&mPreRigidBodyNarrowPhase); mRigidBodyNarrowPhase.removeReference(); mPreRigidBodyNarrowPhase.removeReference(); mUpdateShapes.removeReference(); } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::updateShapes(PxBaseTask* continuation) { //dma shapes data to gpu mSimulationController->updateShapes(continuation); } /////////////////////////////////////////////////////////////////////////////// namespace { class DirtyShapeUpdatesTask : public Cm::Task { public: static const PxU32 MaxShapes = 256; PxsTransformCache& mCache; Bp::BoundsArray& mBoundsArray; ShapeSim* mShapes[MaxShapes]; PxU32 mNbShapes; DirtyShapeUpdatesTask(PxU64 contextID, PxsTransformCache& cache, Bp::BoundsArray& boundsArray) : Cm::Task (contextID), mCache (cache), mBoundsArray(boundsArray), mNbShapes (0) { } virtual void runInternal() { for (PxU32 a = 0; a < mNbShapes; ++a) mShapes[a]->updateCached(mCache, mBoundsArray); } virtual const char* getName() const { return "DirtyShapeUpdatesTask"; } private: PX_NOCOPY(DirtyShapeUpdatesTask) }; } static DirtyShapeUpdatesTask* createDirtyShapeUpdateTask(Cm::FlushPool& pool, PxU64 contextID, PxsTransformCache& cache, Bp::BoundsArray& boundsArray) { return PX_PLACEMENT_NEW(pool.allocate(sizeof(DirtyShapeUpdatesTask)), DirtyShapeUpdatesTask)(contextID, cache, boundsArray); } void Sc::Scene::updateDirtyShapes(PxBaseTask* continuation) { PX_PROFILE_ZONE("Scene.updateDirtyShapes", mContextId); // PT: it is quite unfortunate that we cannot shortcut parsing the bitmaps. We should consider switching to arrays. //Process dirty shapeSims... PxBitMap::Iterator dirtyShapeIter(mDirtyShapeSimMap); PxsTransformCache& cache = mLLContext->getTransformCache(); Bp::BoundsArray& boundsArray = mAABBManager->getBoundsArray(); Cm::FlushPool& pool = mLLContext->getTaskPool(); PxBitMapPinned& changedMap = mAABBManager->getChangedAABBMgActorHandleMap(); DirtyShapeUpdatesTask* task = createDirtyShapeUpdateTask(pool, mContextId, cache, boundsArray); // PT: TASK-CREATION TAG bool hasDirtyShapes = false; PxU32 nbDirtyShapes = 0; PxU32 index; while((index = dirtyShapeIter.getNext()) != PxBitMap::Iterator::DONE) { ShapeSim* shapeSim = reinterpret_cast<ShapeSim*>(mAABBManager->getUserData(index)); if(shapeSim) { hasDirtyShapes = true; changedMap.growAndSet(index); task->mShapes[nbDirtyShapes++] = shapeSim; // PT: consider better load balancing? if(nbDirtyShapes == DirtyShapeUpdatesTask::MaxShapes) { task->mNbShapes = nbDirtyShapes; nbDirtyShapes = 0; startTask(task, continuation); task = createDirtyShapeUpdateTask(pool, mContextId, cache, boundsArray); } } } if(hasDirtyShapes) { //Setting the boundsArray and transform cache as dirty so that they get DMAd to GPU if GPU dynamics and BP are being used respectively. //These bits are no longer set when we update the cached state for actors due to an optimization avoiding setting these dirty bits multiple times. getBoundsArray().setChangedState(); getLowLevelContext()->getTransformCache().setChangedState(); } if(nbDirtyShapes) { task->mNbShapes = nbDirtyShapes; startTask(task, continuation); } // PT: we clear the map but we don't shrink it, bad because we always parse it above mDirtyShapeSimMap.clear(); } void Sc::Scene::preRigidBodyNarrowPhase(PxBaseTask* continuation) { PX_PROFILE_ZONE("Scene.preNarrowPhase", mContextId); updateContactDistances(continuation); updateDirtyShapes(continuation); } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::rigidBodyNarrowPhase(PxBaseTask* continuation) { PX_PROFILE_START_CROSSTHREAD("Basic.narrowPhase", mContextId); mCCDPass = 0; mPostBroadPhase3.addDependent(*continuation); mPostBroadPhase2.setContinuation(&mPostBroadPhase3); mPostBroadPhaseCont.setContinuation(&mPostBroadPhase2); mPostBroadPhase.setContinuation(&mPostBroadPhaseCont); mBroadPhase.setContinuation(&mPostBroadPhase); mRigidBodyNPhaseUnlock.setContinuation(continuation); mRigidBodyNPhaseUnlock.addReference(); mUpdateBoundAndShapeTask.addDependent(mBroadPhase); mLLContext->resetThreadContexts(); mLLContext->updateContactManager(mDt, mHasContactDistanceChanged, continuation, &mRigidBodyNPhaseUnlock, &mUpdateBoundAndShapeTask); // Starts update of contact managers mPostBroadPhase3.removeReference(); mPostBroadPhase2.removeReference(); mPostBroadPhaseCont.removeReference(); mPostBroadPhase.removeReference(); mBroadPhase.removeReference(); mUpdateBoundAndShapeTask.removeReference(); } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::updateBoundsAndShapes(PxBaseTask* /*continuation*/) { //if the scene doesn't use gpu dynamic and gpu broad phase and the user enables the direct API, //the sdk will refuse to create the scene. mSimulationController->updateBoundsAndShapes(*mAABBManager, isDirectGPUAPIInitialized()); } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::broadPhase(PxBaseTask* continuation) { PX_PROFILE_START_CROSSTHREAD("Basic.broadPhase", mContextId); /*mProcessLostPatchesTask.setContinuation(&mPostNarrowPhase); mProcessLostPatchesTask.removeReference();*/ #if PX_SUPPORT_GPU_PHYSX gpu_updateBounds(); #endif mCCDBp = false; mBpSecondPass.setContinuation(continuation); mBpFirstPass.setContinuation(&mBpSecondPass); mBpSecondPass.removeReference(); mBpFirstPass.removeReference(); } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::processFoundSolverPatches(PxBaseTask* /*continuation*/) { PxvNphaseImplementationContext* nphase = mLLContext->getNphaseImplementationContext(); mDynamicsContext->processFoundPatches(*mSimpleIslandManager, nphase->getFoundPatchManagers(), nphase->getNbFoundPatchManagers(), nphase->getFoundPatchOutputCounts()); } void Sc::Scene::processLostSolverPatches(PxBaseTask* /*continuation*/) { PxvNphaseImplementationContext* nphase = mLLContext->getNphaseImplementationContext(); mDynamicsContext->processLostPatches(*mSimpleIslandManager, nphase->getFoundPatchManagers(), nphase->getNbFoundPatchManagers(), nphase->getFoundPatchOutputCounts()); } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::broadPhaseFirstPass(PxBaseTask* continuation) { PX_PROFILE_ZONE("Basic.broadPhaseFirstPass", mContextId); const PxU32 numCpuTasks = continuation->getTaskManager()->getCpuDispatcher()->getWorkerCount(); mAABBManager->updateBPFirstPass(numCpuTasks, mLLContext->getTaskPool(), mHasContactDistanceChanged, continuation); // AD: we already update the aggregate bounds above, but because we just update all the aggregates all the time, // this should be fine here. The important thing is that we don't mix normal and aggregate bounds in the normal BP. if (isDirectGPUAPIInitialized()) { mSimulationController->mergeChangedAABBMgHandle(); } } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::broadPhaseSecondPass(PxBaseTask* continuation) { PX_PROFILE_ZONE("Basic.broadPhaseSecondPass", mContextId); mBpUpdate.setContinuation(continuation); mPreIntegrate.setContinuation(&mBpUpdate); mPreIntegrate.removeReference(); mBpUpdate.removeReference(); } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::preIntegrate(PxBaseTask* continuation) { PX_PROFILE_ZONE("Basic.preIntegrate", mContextId); if (!mCCDBp && isUsingGpuDynamicsOrBp()) mSimulationController->preIntegrateAndUpdateBound(continuation, mGravity, mDt); } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::updateBroadPhase(PxBaseTask* continuation) { PX_PROFILE_ZONE("Basic.updateBroadPhase", mContextId); PxBaseTask* rigidBodyNPhaseUnlock = mCCDPass ? NULL : &mRigidBodyNPhaseUnlock; mAABBManager->updateBPSecondPass(&mLLContext->getScratchAllocator(), continuation); // PT: decoupling: I moved this back from updateBPSecondPass //if this is mCCDPass, narrowPhaseUnlockTask will be NULL if(rigidBodyNPhaseUnlock) rigidBodyNPhaseUnlock->removeReference(); if(!mCCDBp && isUsingGpuDynamicsOrBp()) mSimulationController->updateParticleSystemsAndSoftBodies(); } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::unblockNarrowPhase(PxBaseTask*) { /*if (!mCCDBp && mUseGpuRigidBodies) mSimulationController->updateParticleSystemsAndSoftBodies();*/ // mLLContext->getNphaseImplementationContext()->startNarrowPhaseTasks(); } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::postBroadPhase(PxBaseTask* continuation) { PX_PROFILE_START_CROSSTHREAD("Basic.postBroadPhase", mContextId); //Notify narrow phase that broad phase has completed mLLContext->getNphaseImplementationContext()->postBroadPhaseUpdateContactManager(continuation); mAABBManager->postBroadPhase(continuation, *getFlushPool()); } /////////////////////////////////////////////////////////////////////////////// class OverlapFilterTask : public Cm::Task { public: static const PxU32 MaxPairs = 512; NPhaseCore* mNPhaseCore; const Bp::AABBOverlap* mPairs; PxU32 mNbToProcess; PxU32 mKeepMap[MaxPairs/32]; FilterInfo* mFinfo; PxU32 mNbToKeep; PxU32 mNbToSuppress; OverlapFilterTask* mNext; OverlapFilterTask(PxU64 contextID, NPhaseCore* nPhaseCore, FilterInfo* fInfo, const Bp::AABBOverlap* pairs, PxU32 nbToProcess) : Cm::Task (contextID), mNPhaseCore (nPhaseCore), mPairs (pairs), mNbToProcess (nbToProcess), mFinfo (fInfo), mNbToKeep (0), mNbToSuppress (0), mNext (NULL) { PxMemZero(mKeepMap, sizeof(mKeepMap)); } virtual void runInternal() { mNPhaseCore->runOverlapFilters( mNbToProcess, mPairs, mFinfo, mNbToKeep, mNbToSuppress, mKeepMap); } virtual const char* getName() const { return "OverlapFilterTask"; } }; void Sc::Scene::finishBroadPhase(PxBaseTask* continuation) { PX_PROFILE_ZONE("Sc::Scene::finishBroadPhase", mContextId); { PX_PROFILE_ZONE("Sim.processNewOverlaps", mContextId); // PT: we process "trigger pairs" immediately, sequentially. Both the filtering and the creation of trigger // interactions happen at the same time in onTriggerOverlapCreated. // PT: could we drop trigger interactions or parallelize this? I am not sure why Kier decided to treat trigger // interactions differently here, in my eyes it is pretty much the same as regular interactions, and they // could have been kept in the same multithreaded pipeline. Regular shape interactions also call "registerInActors" // by default, so the below comment is not very convincing - we worked around this for ShapeInteraction, we // could have worked around this as well for TriggerInteraction. { //KS - these functions call "registerInActors", while OverlapFilterTask reads the list of interactions //in an actor. This could lead to a race condition and a crash if they occur at the same time, so we //serialize these operations PX_PROFILE_ZONE("Sim.processNewOverlaps.createOverlapsNoShapeInteractions", mContextId); { PxU32 createdOverlapCount; const Bp::AABBOverlap* PX_RESTRICT p = mAABBManager->getCreatedOverlaps(Bp::ElementType::eTRIGGER, createdOverlapCount); if(createdOverlapCount) { mLLContext->getSimStats().mNbNewPairs += createdOverlapCount; mNPhaseCore->onTriggerOverlapCreated(p, createdOverlapCount); } } } // PT: for regular shapes the code has been multithreaded and split into different parts, making it harder to follow. // Basically this is the same code as the above for triggers, but scattered over multiple Sc::Scene functions and // tasks. As far as I can tell the steps are: // - "first stage" filtering (right here below) // - "second stage" filtering and creation of ShapeInteractions in preallocateContactManagers // - some cleanup in postBroadPhaseStage2 { PxU32 createdOverlapCount; const Bp::AABBOverlap* PX_RESTRICT p = mAABBManager->getCreatedOverlaps(Bp::ElementType::eSHAPE, createdOverlapCount); // PT: removed this because it's pointless at this stage? if(0) { //We allocate at least 1 element in this array to ensure that the onOverlapCreated functions don't go bang! mPreallocatedContactManagers.reserve(1); mPreallocatedShapeInteractions.reserve(1); mPreallocatedInteractionMarkers.reserve(1); mPreallocatedContactManagers.forceSize_Unsafe(1); mPreallocatedShapeInteractions.forceSize_Unsafe(1); mPreallocatedInteractionMarkers.forceSize_Unsafe(1); } mPreallocateContactManagers.setContinuation(continuation); // PT: this is a temporary member value used to pass the OverlapFilterTasks to the next stage of the pipeline (preallocateContactManagers). // It ideally shouldn't be a class member but just a user-data passed from one task to the next. The task manager doesn't support that though (AFAIK), // so instead it just lies there in Sc::Scene as a class member. It's only used in finishBroadPhase & preallocateContactManagers though. mOverlapFilterTaskHead = NULL; if(createdOverlapCount) { mLLContext->getSimStats().mNbNewPairs += createdOverlapCount; Cm::FlushPool& flushPool = mLLContext->getTaskPool(); // PT: temporary data, similar to mOverlapFilterTaskHead. Will be filled with filter info for each pair by the OverlapFilterTask. mFilterInfo.forceSize_Unsafe(0); mFilterInfo.reserve(createdOverlapCount); mFilterInfo.forceSize_Unsafe(createdOverlapCount); // PT: TASK-CREATION TAG const PxU32 nbPairsPerTask = OverlapFilterTask::MaxPairs; OverlapFilterTask* previousTask = NULL; for(PxU32 a=0; a<createdOverlapCount; a+=nbPairsPerTask) { const PxU32 nbToProcess = PxMin(createdOverlapCount - a, nbPairsPerTask); OverlapFilterTask* task = PX_PLACEMENT_NEW(flushPool.allocate(sizeof(OverlapFilterTask)), OverlapFilterTask)(mContextId, mNPhaseCore, mFilterInfo.begin() + a, p + a, nbToProcess); task->setContinuation(&mPreallocateContactManagers); task->removeReference(); // PT: setup a linked-list of OverlapFilterTasks, will be parsed in preallocateContactManagers if(previousTask) previousTask->mNext = task; else mOverlapFilterTaskHead = task; previousTask = task; } } } mPreallocateContactManagers.removeReference(); } } void Sc::Scene::postBroadPhaseContinuation(PxBaseTask* continuation) { mAABBManager->getChangedAABBMgActorHandleMap().clear(); // - Finishes broadphase update // - Adds new interactions (and thereby contact managers if needed) finishBroadPhase(continuation); } /////////////////////////////////////////////////////////////////////////////// template<class T> static PX_FORCE_INLINE T* markPointerAsUsed(T* ptr) { return reinterpret_cast<T*>(size_t(ptr) | 1); } static PX_FORCE_INLINE size_t isPointerMarkedAsUsed(void* ptr) { return size_t(ptr) & 1; } template<class T> static PX_FORCE_INLINE T* getUsedPointer(T* ptr) { const size_t address = size_t(ptr); return address & 1 ? reinterpret_cast<T*>(address & size_t(~1)) : NULL; } namespace { class OnOverlapCreatedTask : public Cm::Task { public: NPhaseCore* mNPhaseCore; const Bp::AABBOverlap* mPairs; const FilterInfo* mFinfo; PxsContactManager** mContactManagers; ShapeInteraction** mShapeInteractions; ElementInteractionMarker** mInteractionMarkers; PxU32 mNbToProcess; OnOverlapCreatedTask(PxU64 contextID, NPhaseCore* nPhaseCore, const Bp::AABBOverlap* pairs, const FilterInfo* fInfo, PxsContactManager** contactManagers, ShapeInteraction** shapeInteractions, ElementInteractionMarker** interactionMarkers, PxU32 nbToProcess) : Cm::Task (contextID), mNPhaseCore (nPhaseCore), mPairs (pairs), mFinfo (fInfo), mContactManagers (contactManagers), mShapeInteractions (shapeInteractions), mInteractionMarkers (interactionMarkers), mNbToProcess (nbToProcess) { } virtual void runInternal() { PxsContactManager** currentCm = mContactManagers; ShapeInteraction** currentSI = mShapeInteractions; ElementInteractionMarker** currentEI = mInteractionMarkers; for(PxU32 i=0; i<mNbToProcess; i++) { const Bp::AABBOverlap& pair = mPairs[i]; ShapeSimBase* s0 = reinterpret_cast<ShapeSimBase*>(pair.mUserData1); ShapeSimBase* s1 = reinterpret_cast<ShapeSimBase*>(pair.mUserData0); ElementSimInteraction* interaction = mNPhaseCore->createRbElementInteraction(mFinfo[i], *s0, *s1, *currentCm, *currentSI, *currentEI, false); if(interaction) { const InteractionType::Enum type = interaction->getType(); if(type == InteractionType::eOVERLAP) { PX_ASSERT(interaction==*currentSI); *currentSI = markPointerAsUsed(*currentSI); currentSI++; if(static_cast<ShapeInteraction*>(interaction)->getContactManager()) { PX_ASSERT(static_cast<ShapeInteraction*>(interaction)->getContactManager()==*currentCm); *currentCm = markPointerAsUsed(*currentCm); currentCm++; } } else if(type == InteractionType::eMARKER) { *currentEI = markPointerAsUsed(*currentEI); currentEI++; } } } } virtual const char* getName() const { return "OnOverlapCreatedTask"; } }; } void Sc::Scene::preallocateContactManagers(PxBaseTask* continuation) { //Iterate over all filter tasks and work out how many pairs we need... PxU32 totalCreatedPairs = 0; PxU32 totalSuppressPairs = 0; OverlapFilterTask* task = mOverlapFilterTaskHead; while(task) { totalCreatedPairs += task->mNbToKeep; totalSuppressPairs += task->mNbToSuppress; task = task->mNext; } { //We allocate at least 1 element in this array to ensure that the onOverlapCreated functions don't go bang! // PT: this has to do with the way we dereference currentCm, currentSI and currentEI in OnOverlapCreatedTask // before we know which type of interaction will be created. That is, we need room for at least one of each type // even if no interaction of that type will be created. // PT: don't we preallocate 2 to 3 times as much memory as needed here then? // PT: also doesn't it mean we're going to allocate & deallocate ALL the interaction markers most of the time? mPreallocatedContactManagers.forceSize_Unsafe(0); mPreallocatedShapeInteractions.forceSize_Unsafe(0); mPreallocatedInteractionMarkers.forceSize_Unsafe(0); mPreallocatedContactManagers.reserve(totalCreatedPairs+1); mPreallocatedShapeInteractions.reserve(totalCreatedPairs+1); mPreallocatedInteractionMarkers.reserve(totalSuppressPairs+1); mPreallocatedContactManagers.forceSize_Unsafe(totalCreatedPairs); mPreallocatedShapeInteractions.forceSize_Unsafe(totalCreatedPairs); mPreallocatedInteractionMarkers.forceSize_Unsafe(totalSuppressPairs); } PxU32 overlapCount; Bp::AABBOverlap* PX_RESTRICT p = mAABBManager->getCreatedOverlaps(Bp::ElementType::eSHAPE, overlapCount); if(!overlapCount) return; struct Local { static void processBatch(const PxU32 createdCurrIdx, PxU32& createdStartIdx, const PxU32 suppressedCurrIdx, PxU32& suppressedStartIdx, const PxU32 batchSize, PxsContext* const context, NPhaseCore* const core, OnOverlapCreatedTask* const createTask, PxBaseTask* const continuation_, PxsContactManager** const cms_, ShapeInteraction** const shapeInter_, ElementInteractionMarker** const markerIter_) { const PxU32 nbToCreate = createdCurrIdx - createdStartIdx; const PxU32 nbToSuppress = suppressedCurrIdx - suppressedStartIdx; context->getContactManagerPool().preallocate(nbToCreate, cms_ + createdStartIdx); for (PxU32 i = 0; i < nbToCreate; ++i) shapeInter_[createdStartIdx + i] = core->mShapeInteractionPool.allocate(); for (PxU32 i = 0; i < nbToSuppress; ++i) markerIter_[suppressedStartIdx + i] = core->mInteractionMarkerPool.allocate(); createdStartIdx = createdCurrIdx; suppressedStartIdx = suppressedCurrIdx; createTask->mNbToProcess = batchSize; startTask(createTask, continuation_); } }; const PxU32 nbPairsPerTask = 256; PxsContactManager** cms = mPreallocatedContactManagers.begin(); ShapeInteraction** shapeInter = mPreallocatedShapeInteractions.begin(); ElementInteractionMarker** markerIter = mPreallocatedInteractionMarkers.begin(); Cm::FlushPool& flushPool = mLLContext->getTaskPool(); FilterInfo* fInfo = mFilterInfo.begin(); // PT: TODO: why do we create the task immediately? Why not create it only when a batch is full? // PT: it's the same pattern as for CCD, kinematics, etc OnOverlapCreatedTask* createTask = PX_PLACEMENT_NEW(flushPool.allocate(sizeof(OnOverlapCreatedTask)), OnOverlapCreatedTask)(mContextId, mNPhaseCore, p, fInfo, cms, shapeInter, markerIter, 0); PxU32 batchSize = 0; PxU32 suppressedStartIdx = 0; PxU32 createdStartIdx = 0; PxU32 suppressedCurrIdx = 0; PxU32 createdCurrIdx = 0; PxU32 currentReadIdx = 0; PxU32 createdOverlapCount = 0; // PT: TASK-CREATION TAG task = mOverlapFilterTaskHead; while(task) { if(task->mNbToKeep || task->mNbToSuppress) { for(PxU32 w = 0; w < (OverlapFilterTask::MaxPairs/32); ++w) { for(PxU32 b = task->mKeepMap[w]; b; b &= b-1) { const PxU32 index = (w<<5) + PxLowestSetBit(b); if(createdOverlapCount < (index + currentReadIdx)) { p[createdOverlapCount] = task->mPairs[index]; fInfo[createdOverlapCount] = task->mFinfo[index]; } createdOverlapCount++; batchSize++; } } suppressedCurrIdx += task->mNbToSuppress; createdCurrIdx += task->mNbToKeep; if(batchSize >= nbPairsPerTask) { Local::processBatch(createdCurrIdx, createdStartIdx, suppressedCurrIdx, suppressedStartIdx, batchSize, mLLContext, mNPhaseCore, createTask, continuation, cms, shapeInter, markerIter); createTask = PX_PLACEMENT_NEW(flushPool.allocate(sizeof(OnOverlapCreatedTask)), OnOverlapCreatedTask)(mContextId, mNPhaseCore, p + createdOverlapCount, fInfo + createdOverlapCount, cms + createdStartIdx, shapeInter + createdStartIdx, markerIter + suppressedStartIdx, 0); batchSize = 0; } } currentReadIdx += OverlapFilterTask::MaxPairs; task = task->mNext; } if(batchSize) Local::processBatch(createdCurrIdx, createdStartIdx, suppressedCurrIdx, suppressedStartIdx, batchSize, mLLContext, mNPhaseCore, createTask, continuation, cms, shapeInter, markerIter); } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::processLostTouchPairs() { PX_PROFILE_ZONE("Sc::Scene::processLostTouchPairs", mContextId); const PxU32 nb = mLostTouchPairs.size(); const SimpleBodyPair* pairs = mLostTouchPairs.begin(); for(PxU32 i=0; i<nb; ++i) { ActorSim* body1 = pairs[i].body1; ActorSim* body2 = pairs[i].body2; // If one has been deleted, we wake the other one const PxIntBool deletedBody1 = mLostTouchPairsDeletedBodyIDs.boundedTest(pairs[i].body1ID); const PxIntBool deletedBody2 = mLostTouchPairsDeletedBodyIDs.boundedTest(pairs[i].body2ID); if(deletedBody1 || deletedBody2) { if(!deletedBody1) body1->internalWakeUp(); if(!deletedBody2) body2->internalWakeUp(); continue; } const bool b1Active = body1->isActive(); const bool b2Active = body2->isActive(); // If both are sleeping, we let them sleep // (for example, two sleeping objects touch and the user teleports one (without waking it up)) if(!b1Active && !b2Active) continue; // If only one has fallen asleep, we wake them both if(!b1Active || !b2Active) { body1->internalWakeUp(); body2->internalWakeUp(); } } mLostTouchPairs.clear(); mLostTouchPairsDeletedBodyIDs.clear(); } void Sc::Scene::postBroadPhaseStage2(PxBaseTask* continuation) { // - Wakes actors that lost touch if appropriate processLostTouchPairs(); mIslandInsertion.setContinuation(continuation); mRegisterContactManagers.setContinuation(continuation); mRegisterInteractions.setContinuation(continuation); mRegisterSceneInteractions.setContinuation(continuation); mIslandInsertion.removeReference(); mRegisterContactManagers.removeReference(); mRegisterInteractions.removeReference(); mRegisterSceneInteractions.removeReference(); //Release unused Cms back to the pool (later, this needs to be done in a thread-safe way from multiple worker threads { PX_PROFILE_ZONE("Sim.processNewOverlaps.release", mContextId); { PxU32 nb = mPreallocatedContactManagers.size(); PxsContactManager** managers = mPreallocatedContactManagers.begin(); Cm::PoolList<PxsContactManager, PxsContext>& pool = mLLContext->getContactManagerPool(); while(nb--) { PxsContactManager* current = *managers++; if(!isPointerMarkedAsUsed(current)) pool.put(current); } } { PxU32 nb = mPreallocatedShapeInteractions.size(); ShapeInteraction** interactions = mPreallocatedShapeInteractions.begin(); PxPool<ShapeInteraction>& pool = mNPhaseCore->mShapeInteractionPool; while(nb--) { ShapeInteraction* current = *interactions++; if(!isPointerMarkedAsUsed(current)) pool.deallocate(current); } } { PxU32 nb = mPreallocatedInteractionMarkers.size(); ElementInteractionMarker** interactions = mPreallocatedInteractionMarkers.begin(); PxPool<ElementInteractionMarker>& pool = mNPhaseCore->mInteractionMarkerPool; while(nb--) { ElementInteractionMarker* current = *interactions++; if(!isPointerMarkedAsUsed(current)) pool.deallocate(current); } } } } /////////////////////////////////////////////////////////////////////////////// // PT: islandInsertion / registerContactManagers / registerInteractions / registerSceneInteractions run in parallel void Sc::Scene::islandInsertion(PxBaseTask* /*continuation*/) { PX_PROFILE_ZONE("Sim.processNewOverlaps.islandInsertion", mContextId); const PxU32 nbShapeIdxCreated = mPreallocatedShapeInteractions.size(); for(PxU32 a = 0; a < nbShapeIdxCreated; ++a) { ShapeInteraction* interaction = getUsedPointer(mPreallocatedShapeInteractions[a]); if(interaction) { PxsContactManager* contactManager = const_cast<PxsContactManager*>(interaction->getContactManager()); const ActorSim& bs0 = interaction->getShape0().getActor(); const ActorSim& bs1 = interaction->getShape1().getActor(); const PxActorType::Enum actorTypeLargest = PxMax(bs0.getActorType(), bs1.getActorType()); PxNodeIndex nodeIndexB; if (!bs1.isStaticRigid()) nodeIndexB = bs1.getNodeIndex(); IG::Edge::EdgeType type = IG::Edge::eCONTACT_MANAGER; #if PX_SUPPORT_GPU_PHYSX if(actorTypeLargest == PxActorType::eSOFTBODY) type = IG::Edge::eSOFT_BODY_CONTACT; else if (actorTypeLargest == PxActorType::eFEMCLOTH) type = IG::Edge::eFEM_CLOTH_CONTACT; else if(isParticleSystem(actorTypeLargest)) type = IG::Edge::ePARTICLE_SYSTEM_CONTACT; else if (actorTypeLargest == PxActorType::eHAIRSYSTEM) type = IG::Edge::eHAIR_SYSTEM_CONTACT; #endif IG::EdgeIndex edgeIdx = mSimpleIslandManager->addContactManager(contactManager, bs0.getNodeIndex(), nodeIndexB, interaction, type); interaction->mEdgeIndex = edgeIdx; if(contactManager) contactManager->getWorkUnit().mEdgeIndex = edgeIdx; //If it is a soft body or particle overlap, treat it as a contact for now (we can hook up touch found/lost events later maybe) if(actorTypeLargest > PxActorType::eARTICULATION_LINK) mSimpleIslandManager->setEdgeConnected(edgeIdx, type); } } // - Wakes actors that lost touch if appropriate //processLostTouchPairs(); if(mCCDPass == 0) mSimpleIslandManager->firstPassIslandGen(); } /////////////////////////////////////////////////////////////////////////////// // PT: islandInsertion / registerContactManagers / registerInteractions / registerSceneInteractions run in parallel void Sc::Scene::registerContactManagers(PxBaseTask* /*continuation*/) { PX_PROFILE_ZONE("Sim.processNewOverlaps.registerCms", mContextId); // PT: we sometimes iterate over this array in vain (all ptrs are unused). Would be better // to store used pointers maybe in the overlap created tasks, and reuse these tasks here to // process only used pointers. PxvNphaseImplementationContext* nphaseContext = mLLContext->getNphaseImplementationContext(); nphaseContext->lock(); //nphaseContext->registerContactManagers(mPreallocatedContactManagers.begin(), mPreallocatedContactManagers.size(), mLLContext->getContactManagerPool().getMaxUsedIndex()); const PxU32 nbCmsCreated = mPreallocatedContactManagers.size(); for(PxU32 a = 0; a < nbCmsCreated; ++a) { PxsContactManager* cm = getUsedPointer(mPreallocatedContactManagers[a]); if(cm) { ShapeInteraction* interaction = getUsedPointer(mPreallocatedShapeInteractions[a]); nphaseContext->registerContactManager(cm, interaction, 0, 0); } } nphaseContext->unlock(); } /////////////////////////////////////////////////////////////////////////////// // PT: islandInsertion / registerContactManagers / registerInteractions / registerSceneInteractions run in parallel void Sc::Scene::registerInteractions(PxBaseTask* /*continuation*/) { PX_PROFILE_ZONE("Sim.processNewOverlaps.registerInteractions", mContextId); const PxU32 nbShapeIdxCreated = mPreallocatedShapeInteractions.size(); for(PxU32 a = 0; a < nbShapeIdxCreated; ++a) { ShapeInteraction* interaction = getUsedPointer(mPreallocatedShapeInteractions[a]); if(interaction) { // PT: this is similar to interaction->registerInActors(), which is usually called from // interaction ctors. ActorSim& actorSim0 = interaction->getActorSim0(); ActorSim& actorSim1 = interaction->getActorSim1(); actorSim0.registerInteractionInActor(interaction); actorSim1.registerInteractionInActor(interaction); // PT: the number of counted interactions is used for the sleeping system if(actorSim0.isDynamicRigid()) static_cast<BodySim*>(&actorSim0)->registerCountedInteraction(); if(actorSim1.isDynamicRigid()) static_cast<BodySim*>(&actorSim1)->registerCountedInteraction(); } } const PxU32 nbMarkersCreated = mPreallocatedInteractionMarkers.size(); for(PxU32 a = 0; a < nbMarkersCreated; ++a) { ElementInteractionMarker* interaction = getUsedPointer(mPreallocatedInteractionMarkers[a]); if(interaction) { // PT: no call to "interaction->onActivate()" here because it doesn't do anything interaction->registerInActors(); } } } /////////////////////////////////////////////////////////////////////////////// // PT: islandInsertion / registerContactManagers / registerInteractions / registerSceneInteractions run in parallel void Sc::Scene::registerSceneInteractions(PxBaseTask* /*continuation*/) { PX_PROFILE_ZONE("Sim.processNewOverlaps.registerInteractionsScene", mContextId); const PxU32 nbShapeIdxCreated = mPreallocatedShapeInteractions.size(); for(PxU32 a = 0; a < nbShapeIdxCreated; ++a) { ShapeInteraction* interaction = getUsedPointer(mPreallocatedShapeInteractions[a]); if(interaction) { registerInteraction(interaction, interaction->getContactManager() != NULL); const PxsContactManager* cm = interaction->getContactManager(); if(cm) mLLContext->setActiveContactManager(cm, cm->getCCD()); } } const PxU32 nbInteractionMarkers = mPreallocatedInteractionMarkers.size(); for(PxU32 a = 0; a < nbInteractionMarkers; ++a) { ElementInteractionMarker* interaction = getUsedPointer(mPreallocatedInteractionMarkers[a]); if(interaction) registerInteraction(interaction, false); } } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::finishBroadPhaseStage2(PxU32 ccdPass) { PX_PROFILE_ZONE("Sc::Scene::finishBroadPhase2", mContextId); Bp::AABBManagerBase* aabbMgr = mAABBManager; PxU32 nbLostPairs = 0; for(PxU32 i=0; i<Bp::ElementType::eCOUNT; i++) { PxU32 destroyedOverlapCount; aabbMgr->getDestroyedOverlaps(Bp::ElementType::Enum(i), destroyedOverlapCount); nbLostPairs += destroyedOverlapCount; } mLLContext->getSimStats().mNbLostPairs += nbLostPairs; // PT: TODO: move this to ccd file? //KS - we need to defer processing lost overlaps until later! if (ccdPass) { PX_PROFILE_ZONE("Sim.processLostOverlaps", mContextId); PxsContactManagerOutputIterator outputs = mLLContext->getNphaseImplementationContext()->getContactManagerOutputs(); PxU32 destroyedOverlapCount; // PT: for regular shapes { Bp::AABBOverlap* PX_RESTRICT p = aabbMgr->getDestroyedOverlaps(Bp::ElementType::eSHAPE, destroyedOverlapCount); while(destroyedOverlapCount--) { ElementSim* volume0 = reinterpret_cast<ElementSim*>(p->mUserData0); ElementSim* volume1 = reinterpret_cast<ElementSim*>(p->mUserData1); //KS - this is a bit ugly. We split the "onOverlapRemoved" for shape interactions to parallelize it and that means //that we have to call each of the individual stages of the remove here. //First, we have to get the interaction pointer... ElementSimInteraction* interaction = mNPhaseCore->findInteraction(volume0, volume1); p->mPairUserData = interaction; if(interaction) { if(interaction->getType() == InteractionType::eOVERLAP || interaction->getType() == InteractionType::eMARKER) { //If it's a standard "overlap" interaction, we have to send a lost touch report, unregister it, and destroy its manager and island gen data. if(interaction->getType() == InteractionType::eOVERLAP) { ShapeInteraction* si = static_cast<ShapeInteraction*>(interaction); mNPhaseCore->lostTouchReports(si, PxU32(PairReleaseFlag::eWAKE_ON_LOST_TOUCH), NULL, 0, outputs); //We must check to see if we have a contact manager here. There is an edge case where actors could be put to //sleep after discrete simulation, prior to CCD, causing their contactManager() to be destroyed. If their bounds //also ceased overlapping, then this code will try to destroy the manager again. if(si->getContactManager()) si->destroyManager(); si->clearIslandGenData(); } unregisterInteraction(interaction); } //Then call "onOverlapRemoved" to actually free the interaction mNPhaseCore->onOverlapRemoved(volume0, volume1, ccdPass, interaction, outputs); } p++; } } // PT: for triggers { Bp::AABBOverlap* PX_RESTRICT p = aabbMgr->getDestroyedOverlaps(Bp::ElementType::eTRIGGER, destroyedOverlapCount); while(destroyedOverlapCount--) { ElementSim* volume0 = reinterpret_cast<ElementSim*>(p->mUserData0); ElementSim* volume1 = reinterpret_cast<ElementSim*>(p->mUserData1); p->mPairUserData = NULL; //KS - this is a bit ugly. mNPhaseCore->onOverlapRemoved(volume0, volume1, ccdPass, NULL, outputs); p++; } } } // - Wakes actors that lost touch if appropriate processLostTouchPairs(); if (ccdPass) aabbMgr->freeBuffers(); } void Sc::Scene::postBroadPhaseStage3(PxBaseTask* /*continuation*/) { finishBroadPhaseStage2(0); PX_PROFILE_STOP_CROSSTHREAD("Basic.postBroadPhase", mContextId); PX_PROFILE_STOP_CROSSTHREAD("Basic.broadPhase", mContextId); } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::advanceStep(PxBaseTask* continuation) { PX_PROFILE_ZONE("Sim.solveQueueTasks", mContextId); if(mDt != 0.0f) { mFinalizationPhase.addDependent(*continuation); mFinalizationPhase.removeReference(); if(mPublicFlags & PxSceneFlag::eENABLE_CCD) { mUpdateCCDMultiPass.setContinuation(&mFinalizationPhase); mAfterIntegration.setContinuation(&mUpdateCCDMultiPass); mUpdateCCDMultiPass.removeReference(); } else { mAfterIntegration.setContinuation(&mFinalizationPhase); } mPostSolver.setContinuation(&mAfterIntegration); mUpdateSimulationController.setContinuation(&mPostSolver); mUpdateDynamics.setContinuation(&mUpdateSimulationController); mUpdateBodies.setContinuation(&mUpdateDynamics); mSolver.setContinuation(&mUpdateBodies); mPostIslandGen.setContinuation(&mSolver); mIslandGen.setContinuation(&mPostIslandGen); mPostNarrowPhase.addDependent(mIslandGen); mPostNarrowPhase.removeReference(); mSecondPassNarrowPhase.setContinuation(&mPostNarrowPhase); mFinalizationPhase.removeReference(); mAfterIntegration.removeReference(); mPostSolver.removeReference(); mUpdateSimulationController.removeReference(); mUpdateDynamics.removeReference(); mUpdateBodies.removeReference(); mSolver.removeReference(); mPostIslandGen.removeReference(); mIslandGen.removeReference(); mPostNarrowPhase.removeReference(); mSecondPassNarrowPhase.removeReference(); } } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::activateEdgesInternal(const IG::EdgeIndex* activatingEdges, const PxU32 nbActivatingEdges) { const IG::IslandSim& speculativeSim = mSimpleIslandManager->getSpeculativeIslandSim(); for(PxU32 i = 0; i < nbActivatingEdges; ++i) { Interaction* interaction = mSimpleIslandManager->getInteraction(activatingEdges[i]); if(interaction && !interaction->readInteractionFlag(InteractionFlag::eIS_ACTIVE)) { if(speculativeSim.getEdge(activatingEdges[i]).isActive()) { const bool proceed = activateInteraction(interaction, NULL); if(proceed && (interaction->getType() < InteractionType::eTRACKED_IN_SCENE_COUNT)) notifyInteractionActivated(interaction); } } } } void Sc::Scene::secondPassNarrowPhase(PxBaseTask* /*continuation*/) { PX_PROFILE_ZONE("Sim.secondPassNarrowPhase", mContextId); { PX_PROFILE_ZONE("Sim.postIslandGen", mContextId); mSimpleIslandManager->additionalSpeculativeActivation(); // wake interactions { PX_PROFILE_ZONE("ScScene.wakeInteractions", mContextId); const IG::IslandSim& speculativeSim = mSimpleIslandManager->getSpeculativeIslandSim(); //KS - only wake contact managers based on speculative state to trigger contact gen. Waking actors based on accurate state //should activate and joints. { //Wake speculatively based on rigid contacts, soft contacts and particle contacts activateEdgesInternal(speculativeSim.getActivatedEdges(IG::Edge::eCONTACT_MANAGER), speculativeSim.getNbActivatedEdges(IG::Edge::eCONTACT_MANAGER)); #if PX_SUPPORT_GPU_PHYSX activateEdgesInternal(speculativeSim.getActivatedEdges(IG::Edge::eSOFT_BODY_CONTACT), speculativeSim.getNbActivatedEdges(IG::Edge::eSOFT_BODY_CONTACT)); activateEdgesInternal(speculativeSim.getActivatedEdges(IG::Edge::eFEM_CLOTH_CONTACT), speculativeSim.getNbActivatedEdges(IG::Edge::eFEM_CLOTH_CONTACT)); activateEdgesInternal(speculativeSim.getActivatedEdges(IG::Edge::ePARTICLE_SYSTEM_CONTACT), speculativeSim.getNbActivatedEdges(IG::Edge::ePARTICLE_SYSTEM_CONTACT)); activateEdgesInternal(speculativeSim.getActivatedEdges(IG::Edge::eHAIR_SYSTEM_CONTACT), speculativeSim.getNbActivatedEdges(IG::Edge::eHAIR_SYSTEM_CONTACT)); #endif } } } mLLContext->secondPassUpdateContactManager(mDt, &mPostNarrowPhase); // Starts update of contact managers } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::releaseConstraints(bool endOfScene) { PX_ASSERT(mLLContext); if(mEnableStabilization) { //If stabilization is enabled, we're caching contacts for next frame if(!endOfScene) { //So we only clear memory (flip buffers) when not at the end-of-scene. //This means we clear after narrow phase completed so we can //release the previous frame's contact buffers before we enter the solve phase. mLLContext->getNpMemBlockPool().releaseContacts(); } } else if(endOfScene) { //We now have a double-buffered pool of mem blocks so we must //release both pools (which actually triggers the memory used this //frame to be released mLLContext->getNpMemBlockPool().releaseContacts(); mLLContext->getNpMemBlockPool().releaseContacts(); } } void Sc::Scene::postNarrowPhase(PxBaseTask* /*continuation*/) { setCollisionPhaseToInactive(); mHasContactDistanceChanged = false; mLLContext->fetchUpdateContactManager(); //Sync on contact gen results! if(!mCCDBp && isUsingGpuDynamicsOrBp()) mSimulationController->sortContacts(); releaseConstraints(false); PX_PROFILE_STOP_CROSSTHREAD("Basic.narrowPhase", mContextId); PX_PROFILE_STOP_CROSSTHREAD("Basic.collision", mContextId); } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::processNarrowPhaseTouchEvents() { PX_PROFILE_ZONE("Sim.preIslandGen", mContextId); PxsContext* context = mLLContext; // Update touch states from LL PxU32 newTouchCount, lostTouchCount; PxU32 ccdTouchCount = 0; { PX_PROFILE_ZONE("Sim.preIslandGen.managerTouchEvents", mContextId); context->getManagerTouchEventCount(reinterpret_cast<PxI32*>(&newTouchCount), reinterpret_cast<PxI32*>(&lostTouchCount), NULL); //PX_ALLOCA(newTouches, PxvContactManagerTouchEvent, newTouchCount); //PX_ALLOCA(lostTouches, PxvContactManagerTouchEvent, lostTouchCount); mTouchFoundEvents.forceSize_Unsafe(0); mTouchFoundEvents.reserve(newTouchCount); mTouchFoundEvents.forceSize_Unsafe(newTouchCount); mTouchLostEvents.forceSize_Unsafe(0); mTouchLostEvents.reserve(lostTouchCount); mTouchLostEvents.forceSize_Unsafe(lostTouchCount); context->fillManagerTouchEvents(mTouchFoundEvents.begin(), reinterpret_cast<PxI32&>(newTouchCount), mTouchLostEvents.begin(), reinterpret_cast<PxI32&>(lostTouchCount), NULL, reinterpret_cast<PxI32&>(ccdTouchCount)); mTouchFoundEvents.forceSize_Unsafe(newTouchCount); mTouchLostEvents.forceSize_Unsafe(lostTouchCount); } context->getSimStats().mNbNewTouches = newTouchCount; context->getSimStats().mNbLostTouches = lostTouchCount; } void Sc::Scene::islandGen(PxBaseTask* continuation) { PX_PROFILE_ZONE("Sc::Scene::islandGen", mContextId); //mLLContext->runModifiableContactManagers(); //KS - moved here so that we can get up-to-date touch found/lost events in IG processNarrowPhaseTouchEvents(); // PT: could we merge processNarrowPhaseTouchEventsStage2 with processNarrowPhaseTouchEvents ? mProcessFoundPatchesTask.setContinuation(continuation); mProcessLostPatchesTask.setContinuation(&mProcessFoundPatchesTask); mProcessLostPatchesTask.removeReference(); mProcessFoundPatchesTask.removeReference(); // extracting information for the contact callbacks must happen before the solver writes the post-solve // velocities and positions into the solver bodies processNarrowPhaseTouchEventsStage2(&mUpdateDynamics); } /////////////////////////////////////////////////////////////////////////////// static PX_FORCE_INLINE ShapeInteraction* getSI(PxvContactManagerTouchEvent& evt) { return reinterpret_cast<ShapeInteraction*>(evt.getCMTouchEventUserData()); } namespace { class InteractionNewTouchTask : public Cm::Task { PxvContactManagerTouchEvent* mEvents; const PxU32 mNbEvents; PxsContactManagerOutputIterator mOutputs; NPhaseCore* mNphaseCore; public: InteractionNewTouchTask(PxU64 contextID, PxvContactManagerTouchEvent* events, PxU32 nbEvents, PxsContactManagerOutputIterator& outputs, NPhaseCore* nPhaseCore) : Cm::Task (contextID), mEvents (events), mNbEvents (nbEvents), mOutputs (outputs), mNphaseCore (nPhaseCore) { } virtual const char* getName() const { return "InteractionNewTouchTask"; } virtual void runInternal() { mNphaseCore->lockReports(); for(PxU32 i = 0; i < mNbEvents; ++i) { ShapeInteraction* si = getSI(mEvents[i]); PX_ASSERT(si); mNphaseCore->managerNewTouch(*si); si->managerNewTouch(0, true, mOutputs); } mNphaseCore->unlockReports(); } private: PX_NOCOPY(InteractionNewTouchTask) }; } void Sc::Scene::processNarrowPhaseTouchEventsStage2(PxBaseTask* continuation) { PX_PROFILE_ZONE("Sc::Scene::processNarrowPhaseTouchEventsStage2", mContextId); PxvNphaseImplementationContext* ctx = mLLContext->getNphaseImplementationContext(); PxsContactManagerOutputIterator outputs = ctx->getContactManagerOutputs(); const PxU32 newTouchCount = mTouchFoundEvents.size(); { Cm::FlushPool& flushPool = mLLContext->getTaskPool(); // PT: why not a delegate task here? We seem to be creating a single InteractionNewTouchTask ? InteractionNewTouchTask* task = PX_PLACEMENT_NEW(flushPool.allocate(sizeof(InteractionNewTouchTask)), InteractionNewTouchTask)(mContextId, mTouchFoundEvents.begin(), newTouchCount, outputs, mNPhaseCore); startTask(task, continuation); } /*{ PX_PROFILE_ZONE("Sim.preIslandGen.newTouchesInteraction", mContextId); for (PxU32 i = 0; i < newTouchCount; ++i) { ShapeInteraction* si = reinterpret_cast<ShapeInteraction*>(mTouchFoundEvents[i].userData); PX_ASSERT(si); mNPhaseCore->managerNewTouch(*si); si->managerNewTouch(0, true, outputs, useAdaptiveForce); } }*/ } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::postIslandGen(PxBaseTask* continuation) { PX_PROFILE_ZONE("Sim.postIslandGen", mContextId); // // Trigger overlap processing (1) shall run in parallel with some parts of island // management (2) (connecting edges, running second island gen pass, object activation...) // For this to work without clashes, the work has to be split into pieces. Things to // keep in mind: // // (1) can deactivate trigger pairs while (2) can activate trigger pairs (both might // happen for the same pair). The active interaction tracking arrays are not thread safe // (Sc::Scene::notifyInteractionDeactivated, ::notifyInteractionActivated) plus the // natural order is to process activation first (deactivation should be based on the // state after activation). Thus, (1) is split into a part (1a) that does the overlap checks // and a part (1b) that checks if trigger pairs can be deactivated. (1a) will run in parallel // with (2). (1b) will run after (2). // Leaves the question of what happens to the trigger pairs activated in (2)? Should those // not get overlap processing too? The rational for why this does not seem necessary is: // If a trigger interaction is activated, then it was inactive before. If inactive, the // overlap state can not have changed since the end of last sim step, unless: // - the user changed the position of one of the invovled actors or shapes // - the user changed the geometry of one of the involved shapes // - the pair is new // However, for all these cases, the trigger interaction is marked in a way that enforces // processing and the interaction gets activated too. // PxBaseTask* setEdgesConnectedContinuationTask = continuation; PxBaseTask* concludingTriggerTask = mNPhaseCore->prepareForTriggerInteractionProcessing(continuation); if (concludingTriggerTask) { setEdgesConnectedContinuationTask = concludingTriggerTask; } mSetEdgesConnectedTask.setContinuation(setEdgesConnectedContinuationTask); mSetEdgesConnectedTask.removeReference(); // - Performs collision detection for trigger interactions if (concludingTriggerTask) { mNPhaseCore->processTriggerInteractions(*concludingTriggerTask); concludingTriggerTask->removeReference(); } } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::setEdgesConnected(PxBaseTask*) { PX_PROFILE_ZONE("Sim.preIslandGen.islandTouches", mContextId); { PX_PROFILE_ZONE("Sim.preIslandGen.setEdgesConnected", mContextId); const PxU32 newTouchCount = mTouchFoundEvents.size(); for(PxU32 i = 0; i < newTouchCount; ++i) { ShapeInteraction* si = getSI(mTouchFoundEvents[i]); // jcarius: defensive coding for OM-99507. If this assert hits, you maybe hit the same issue, please report! if(si == NULL || si->getEdgeIndex() == IG_INVALID_EDGE) { outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "Sc::Scene::setEdgesConnected: adding an invalid edge. Skipping."); PX_ALWAYS_ASSERT(); continue; } if(!si->readFlag(ShapeInteraction::CONTACTS_RESPONSE_DISABLED)) mSimpleIslandManager->setEdgeConnected(si->getEdgeIndex(), IG::Edge::eCONTACT_MANAGER); } } mSimpleIslandManager->secondPassIslandGen(); wakeObjectsUp(); } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::solver(PxBaseTask* continuation) { PX_PROFILE_START_CROSSTHREAD("Basic.rigidBodySolver", mContextId); //Update forces per body in parallel. This can overlap with the other work in this phase. beforeSolver(continuation); PX_PROFILE_ZONE("Sim.postNarrowPhaseSecondPass", mContextId); //Narrowphase is completely finished so the streams can be swapped. mLLContext->swapStreams(); //PxsContactManagerOutputIterator outputs = this->mLLContext->getNphaseImplementationContext()->getContactManagerOutputs(); //mNPhaseCore->processPersistentContactEvents(outputs, continuation); } /////////////////////////////////////////////////////////////////////////////// namespace { class ScBeforeSolverTask : public Cm::Task { public: static const PxU32 MaxBodiesPerTask = 256; PxNodeIndex mBodies[MaxBodiesPerTask]; PxU32 mNumBodies; const PxReal mDt; IG::SimpleIslandManager* mIslandManager; PxsSimulationController* mSimulationController; public: ScBeforeSolverTask(PxReal dt, IG::SimpleIslandManager* islandManager, PxsSimulationController* simulationController, PxU64 contextID) : Cm::Task (contextID), mDt (dt), mIslandManager (islandManager), mSimulationController (simulationController) { } virtual void runInternal() { PX_PROFILE_ZONE("Sim.ScBeforeSolverTask", mContextID); const IG::IslandSim& islandSim = mIslandManager->getAccurateIslandSim(); const PxU32 rigidBodyOffset = BodySim::getRigidBodyOffset(); PxsRigidBody* updatedBodySims[MaxBodiesPerTask]; PxU32 updatedBodyNodeIndices[MaxBodiesPerTask]; PxU32 nbUpdatedBodySims = 0; PxU32 nb = mNumBodies; const PxNodeIndex* bodies = mBodies; while(nb--) { const PxNodeIndex index = *bodies++; if(islandSim.getActiveNodeIndex(index) != PX_INVALID_NODE) { if(islandSim.getNode(index).mType == IG::Node::eRIGID_BODY_TYPE) { PxsRigidBody* body = islandSim.getRigidBody(index); BodySim* bodySim = reinterpret_cast<BodySim*>(reinterpret_cast<PxU8*>(body) - rigidBodyOffset); bodySim->updateForces(mDt, updatedBodySims, updatedBodyNodeIndices, nbUpdatedBodySims, NULL); } } } if(nbUpdatedBodySims) mSimulationController->updateBodies(updatedBodySims, updatedBodyNodeIndices, nbUpdatedBodySims); } virtual const char* getName() const { return "ScScene.beforeSolver"; } private: PX_NOCOPY(ScBeforeSolverTask) }; class ScArticBeforeSolverTask : public Cm::Task { public: ArticulationSim* const* mArticSims; const PxU32 mNumArticulations; const PxReal mDt; IG::SimpleIslandManager* mIslandManager; public: ScArticBeforeSolverTask(ArticulationSim* const* articSims, PxU32 nbArtics, PxReal dt, IG::SimpleIslandManager* islandManager, PxU64 contextID) : Cm::Task(contextID), mArticSims(articSims), mNumArticulations(nbArtics), mDt(dt), mIslandManager(islandManager) { } virtual void runInternal() { PX_PROFILE_ZONE("Sim.ScArticBeforeSolverTask", mContextID); //const IG::IslandSim& islandSim = mIslandManager->getAccurateIslandSim(); for(PxU32 a = 0; a < mNumArticulations; ++a) { ArticulationSim* PX_RESTRICT articSim = mArticSims[a]; //articSim->checkResize(); articSim->updateForces(mDt); articSim->setDirtyFlag(ArticulationSimDirtyFlag::eNONE); } } virtual const char* getName() const { return "ScScene.ScArticBeforeSolverTask"; } private: PX_NOCOPY(ScArticBeforeSolverTask) }; class ScArticBeforeSolverCCDTask : public Cm::Task { public: const PxNodeIndex* const mArticIndices; const PxU32 mNumArticulations; const PxReal mDt; IG::SimpleIslandManager* mIslandManager; public: ScArticBeforeSolverCCDTask(const PxNodeIndex* const articIndices, PxU32 nbArtics, PxReal dt, IG::SimpleIslandManager* islandManager, PxU64 contextID) : Cm::Task(contextID), mArticIndices(articIndices), mNumArticulations(nbArtics), mDt(dt), mIslandManager(islandManager) { } virtual void runInternal() { PX_PROFILE_ZONE("Sim.ScArticBeforeSolverCCDTask", mContextID); const IG::IslandSim& islandSim = mIslandManager->getAccurateIslandSim(); for(PxU32 a = 0; a < mNumArticulations; ++a) { ArticulationSim* articSim = islandSim.getArticulationSim(mArticIndices[a]); articSim->saveLastCCDTransform(); } } virtual const char* getName() const { return "ScScene.ScArticBeforeSolverCCDTask"; } private: PX_NOCOPY(ScArticBeforeSolverCCDTask) }; } void Sc::Scene::beforeSolver(PxBaseTask* continuation) { PX_PROFILE_ZONE("Sim.updateForces", mContextId); // Note: For contact notifications it is important that force threshold checks are done after new/lost touches have been processed // because pairs might get added to the list processed below // Atoms that passed contact force threshold ThresholdStream& thresholdStream = mDynamicsContext->getThresholdStream(); thresholdStream.clear(); const IG::IslandSim& islandSim = mSimpleIslandManager->getAccurateIslandSim(); const PxU32 nbActiveBodies = islandSim.getNbActiveNodes(IG::Node::eRIGID_BODY_TYPE); mNumDeactivatingNodes[IG::Node::eRIGID_BODY_TYPE] = 0;//islandSim.getNbNodesToDeactivate(IG::Node::eRIGID_BODY_TYPE); mNumDeactivatingNodes[IG::Node::eARTICULATION_TYPE] = 0;//islandSim.getNbNodesToDeactivate(IG::Node::eARTICULATION_TYPE); //#if PX_SUPPORT_GPU_PHYSX mNumDeactivatingNodes[IG::Node::eSOFTBODY_TYPE] = 0; mNumDeactivatingNodes[IG::Node::eFEMCLOTH_TYPE] = 0; mNumDeactivatingNodes[IG::Node::ePARTICLESYSTEM_TYPE] = 0; mNumDeactivatingNodes[IG::Node::eHAIRSYSTEM_TYPE] = 0; //#endif const PxU32 MaxBodiesPerTask = ScBeforeSolverTask::MaxBodiesPerTask; Cm::FlushPool& flushPool = mLLContext->getTaskPool(); mSimulationController->reserve(nbActiveBodies); { PxBitMap::Iterator iter(mVelocityModifyMap); // PT: TASK-CREATION TAG for (PxU32 i = iter.getNext(); i != PxBitMap::Iterator::DONE; /*i = iter.getNext()*/) { ScBeforeSolverTask* task = PX_PLACEMENT_NEW(flushPool.allocate(sizeof(ScBeforeSolverTask)), ScBeforeSolverTask(mDt, mSimpleIslandManager, mSimulationController, mContextId)); PxU32 count = 0; for(; count < MaxBodiesPerTask && i != PxBitMap::Iterator::DONE; i = iter.getNext()) { PxsRigidBody* body = islandSim.getRigidBody(PxNodeIndex(i)); bool retainsAccelerations = false; if(body) { task->mBodies[count++] = PxNodeIndex(i); retainsAccelerations = (body->mCore->mFlags & PxRigidBodyFlag::eRETAIN_ACCELERATIONS); } if(!retainsAccelerations) mVelocityModifyMap.reset(i); } task->mNumBodies = count; startTask(task, continuation); } } // PT: TASK-CREATION TAG const PxU32 nbArticsPerTask = 32; const PxU32 nbDirtyArticulations = mDirtyArticulationSims.size(); ArticulationSim* const* artiSim = mDirtyArticulationSims.getEntries(); for(PxU32 a = 0; a < nbDirtyArticulations; a += nbArticsPerTask) { const PxU32 nbToProcess = PxMin(PxU32(nbDirtyArticulations - a), nbArticsPerTask); ScArticBeforeSolverTask* task = PX_PLACEMENT_NEW(flushPool.allocate(sizeof(ScArticBeforeSolverTask)), ScArticBeforeSolverTask(artiSim + a, nbToProcess, mDt, mSimpleIslandManager, mContextId)); startTask(task, continuation); } //if the scene has ccd flag on, we should call ScArticBeforeSolverCCDTask to copy the last transform to the current transform if(mPublicFlags & PxSceneFlag::eENABLE_CCD) { //CCD const PxU32 nbActiveArticulations = islandSim.getNbActiveNodes(IG::Node::eARTICULATION_TYPE); const PxNodeIndex* const articIndices = islandSim.getActiveNodes(IG::Node::eARTICULATION_TYPE); // PT: TASK-CREATION TAG for(PxU32 a = 0; a < nbActiveArticulations; a += nbArticsPerTask) { const PxU32 nbToProcess = PxMin(PxU32(nbActiveArticulations - a), nbArticsPerTask); ScArticBeforeSolverCCDTask* task = PX_PLACEMENT_NEW(flushPool.allocate(sizeof(ScArticBeforeSolverCCDTask)), ScArticBeforeSolverCCDTask(articIndices + a, nbToProcess, mDt, mSimpleIslandManager, mContextId)); startTask(task, continuation); } } // AD: need to raise dirty flags serially because the PxgBodySimManager::updateArticulation() is not thread-safe. for (PxU32 a = 0; a < nbDirtyArticulations; ++a) { if (artiSim[a]->getLowLevelArticulation()->mGPUDirtyFlags & (Dy::ArticulationDirtyFlag::eDIRTY_EXT_ACCEL)) { mSimulationController->updateArticulationExtAccel(artiSim[a]->getLowLevelArticulation(), artiSim[a]->getIslandNodeIndex()); } } } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::updateBodies(PxBaseTask* continuation) { //dma bodies and articulation data to gpu mSimulationController->updateBodies(continuation); } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::updateDynamics(PxBaseTask* continuation) { PX_PROFILE_START_CROSSTHREAD("Basic.dynamics", mContextId); //Allow processLostContactsTask to run until after 2nd pass of solver completes (update bodies, run sleeping logic etc.) mProcessLostContactsTask3.setContinuation(static_cast<PxLightCpuTask*>(continuation)->getContinuation()); mProcessLostContactsTask2.setContinuation(&mProcessLostContactsTask3); mProcessLostContactsTask.setContinuation(&mProcessLostContactsTask2); ////dma bodies and shapes data to gpu //mSimulationController->updateBodiesAndShapes(); mLLContext->getNpMemBlockPool().acquireConstraintMemory(); const PxU32 maxPatchCount = mLLContext->getMaxPatchCount(); mAABBManager->reallocateChangedAABBMgActorHandleMap(getElementIDPool().getMaxID()); //mNPhaseCore->processPersistentContactEvents(outputs, continuation); PxvNphaseImplementationContext* nphase = mLLContext->getNphaseImplementationContext(); mDynamicsContext->update(*mSimpleIslandManager, continuation, &mProcessLostContactsTask, nphase, maxPatchCount, mMaxNbArticulationLinks, mDt, mGravity, mAABBManager->getChangedAABBMgActorHandleMap()); mSimpleIslandManager->clearDestroyedEdges(); mProcessLostContactsTask3.removeReference(); mProcessLostContactsTask2.removeReference(); mProcessLostContactsTask.removeReference(); } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::processLostContacts(PxBaseTask* continuation) { PX_PROFILE_ZONE("Sc::Scene::processLostContacts", mContextId); mProcessNarrowPhaseLostTouchTasks.setContinuation(continuation); mProcessNarrowPhaseLostTouchTasks.removeReference(); //mLostTouchReportsTask.setContinuation(&mProcessLostContactsTask3); mProcessNPLostTouchEvents.setContinuation(continuation); mProcessNPLostTouchEvents.removeReference(); { PX_PROFILE_ZONE("Sim.findInteractionsPtrs", mContextId); Bp::AABBManagerBase* aabbMgr = mAABBManager; PxU32 destroyedOverlapCount; Bp::AABBOverlap* PX_RESTRICT p = aabbMgr->getDestroyedOverlaps(Bp::ElementType::eSHAPE, destroyedOverlapCount); while(destroyedOverlapCount--) { ElementSim* volume0 = reinterpret_cast<ElementSim*>(p->mUserData0); ElementSim* volume1 = reinterpret_cast<ElementSim*>(p->mUserData1); // PT: this looks useless on lost pairs but it is used in processLostContacts2 and processLostContacts3 // PT: it seems very questionable to store this within the BP structures at this point. If anything // we should have stored that there when the overlap was created, and we wouldn't have to look for the // interaction here. p->mPairUserData = mNPhaseCore->findInteraction(volume0, volume1); p++; } } } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::processNarrowPhaseLostTouchEventsIslands(PxBaseTask*) { PX_PROFILE_ZONE("Sc::Scene.islandLostTouches", mContextId); const PxU32 count = mTouchLostEvents.size(); for(PxU32 i=0; i <count; ++i) { ShapeInteraction* si = getSI(mTouchLostEvents[i]); mSimpleIslandManager->setEdgeDisconnected(si->getEdgeIndex()); } } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::addToLostTouchList(ActorSim& body1, ActorSim& body2) { PX_ASSERT(!body1.isStaticRigid()); PX_ASSERT(!body2.isStaticRigid()); SimpleBodyPair p = { &body1, &body2, body1.getActorID(), body2.getActorID() }; mLostTouchPairs.pushBack(p); } void Sc::Scene::processNarrowPhaseLostTouchEvents(PxBaseTask*) { PX_PROFILE_ZONE("Sc::Scene.processNarrowPhaseLostTouchEvents", mContextId); PxvNphaseImplementationContext* ctx = mLLContext->getNphaseImplementationContext(); PxsContactManagerOutputIterator outputs = ctx->getContactManagerOutputs(); const PxU32 count = mTouchLostEvents.size(); for(PxU32 i=0; i<count; ++i) { ShapeInteraction* si = getSI(mTouchLostEvents[i]); PX_ASSERT(si); if(si->managerLostTouch(0, true, outputs) && !si->readFlag(ShapeInteraction::CONTACTS_RESPONSE_DISABLED)) addToLostTouchList(si->getShape0().getActor(), si->getShape1().getActor()); } } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::processLostContacts2(PxBaseTask* continuation) { mDestroyManagersTask.setContinuation(continuation); mLostTouchReportsTask.setContinuation(&mDestroyManagersTask); mLostTouchReportsTask.removeReference(); mUnregisterInteractionsTask.setContinuation(continuation); mUnregisterInteractionsTask.removeReference(); { PX_PROFILE_ZONE("Sim.clearIslandData", mContextId); Bp::AABBManagerBase* aabbMgr = mAABBManager; PxU32 destroyedOverlapCount; { Bp::AABBOverlap* PX_RESTRICT p = aabbMgr->getDestroyedOverlaps(Bp::ElementType::eSHAPE, destroyedOverlapCount); while(destroyedOverlapCount--) { ElementSimInteraction* pair = reinterpret_cast<ElementSimInteraction*>(p->mPairUserData); if(pair) { if(pair->getType() == InteractionType::eOVERLAP) { ShapeInteraction* si = static_cast<ShapeInteraction*>(pair); si->clearIslandGenData(); } } p++; } } } mDestroyManagersTask.removeReference(); } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::lostTouchReports(PxBaseTask*) { PX_PROFILE_ZONE("Sim.lostTouchReports", mContextId); PxsContactManagerOutputIterator outputs = mLLContext->getNphaseImplementationContext()->getContactManagerOutputs(); mNPhaseCore->lockReports(); { PxU32 destroyedOverlapCount; const Bp::AABBOverlap* PX_RESTRICT p = mAABBManager->getDestroyedOverlaps(Bp::ElementType::eSHAPE, destroyedOverlapCount); while(destroyedOverlapCount--) { if(p->mPairUserData) { ElementSimInteraction* elemInteraction = reinterpret_cast<ElementSimInteraction*>(p->mPairUserData); if(elemInteraction->getType() == InteractionType::eOVERLAP) mNPhaseCore->lostTouchReports(static_cast<ShapeInteraction*>(elemInteraction), PxU32(PairReleaseFlag::eWAKE_ON_LOST_TOUCH), NULL, 0, outputs); } p++; } } mNPhaseCore->unlockReports(); } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::unregisterInteractions(PxBaseTask*) { PX_PROFILE_ZONE("Sim.unregisterInteractions", mContextId); PxU32 destroyedOverlapCount; const Bp::AABBOverlap* PX_RESTRICT p = mAABBManager->getDestroyedOverlaps(Bp::ElementType::eSHAPE, destroyedOverlapCount); while(destroyedOverlapCount--) { if(p->mPairUserData) { ElementSimInteraction* elemInteraction = reinterpret_cast<ElementSimInteraction*>(p->mPairUserData); if(elemInteraction->getType() == InteractionType::eOVERLAP || elemInteraction->getType() == InteractionType::eMARKER) unregisterInteraction(elemInteraction); } p++; } } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::destroyManagers(PxBaseTask*) { PX_PROFILE_ZONE("Sim.destroyManagers", mContextId); mPostThirdPassIslandGenTask.setContinuation(mProcessLostContactsTask3.getContinuation()); mSimpleIslandManager->thirdPassIslandGen(&mPostThirdPassIslandGenTask); PxU32 destroyedOverlapCount; const Bp::AABBOverlap* PX_RESTRICT p = mAABBManager->getDestroyedOverlaps(Bp::ElementType::eSHAPE, destroyedOverlapCount); while(destroyedOverlapCount--) { if(p->mPairUserData) { ElementSimInteraction* elemInteraction = reinterpret_cast<ElementSimInteraction*>(p->mPairUserData); if(elemInteraction->getType() == InteractionType::eOVERLAP) { ShapeInteraction* si = static_cast<ShapeInteraction*>(elemInteraction); if(si->getContactManager()) si->destroyManager(); } } p++; } } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::processLostContacts3(PxBaseTask* /*continuation*/) { { PX_PROFILE_ZONE("Sim.processLostOverlapsStage2", mContextId); PxsContactManagerOutputIterator outputs = mLLContext->getNphaseImplementationContext()->getContactManagerOutputs(); Bp::AABBManagerBase* aabbMgr = mAABBManager; PxU32 destroyedOverlapCount; // PT: for regular shapes { const Bp::AABBOverlap* PX_RESTRICT p = aabbMgr->getDestroyedOverlaps(Bp::ElementType::eSHAPE, destroyedOverlapCount); while(destroyedOverlapCount--) { ElementSim* volume0 = reinterpret_cast<ElementSim*>(p->mUserData0); ElementSim* volume1 = reinterpret_cast<ElementSim*>(p->mUserData1); mNPhaseCore->onOverlapRemoved(volume0, volume1, false, p->mPairUserData, outputs); p++; } } // PT: for triggers { const Bp::AABBOverlap* PX_RESTRICT p = aabbMgr->getDestroyedOverlaps(Bp::ElementType::eTRIGGER, destroyedOverlapCount); while(destroyedOverlapCount--) { ElementSim* volume0 = reinterpret_cast<ElementSim*>(p->mUserData0); ElementSim* volume1 = reinterpret_cast<ElementSim*>(p->mUserData1); mNPhaseCore->onOverlapRemoved(volume0, volume1, false, NULL, outputs); p++; } } aabbMgr->freeBuffers(); } mPostThirdPassIslandGenTask.removeReference(); } /////////////////////////////////////////////////////////////////////////////// /*static*/ bool deactivateInteraction(Interaction* interaction, const InteractionType::Enum type); void Sc::Scene::postThirdPassIslandGen(PxBaseTask* /*continuation*/) { PX_PROFILE_ZONE("Sc::Scene::postThirdPassIslandGen", mContextId); putObjectsToSleep(); { PX_PROFILE_ZONE("Sc::Scene::putInteractionsToSleep", mContextId); const IG::IslandSim& islandSim = mSimpleIslandManager->getSpeculativeIslandSim(); //KS - only deactivate contact managers based on speculative state to trigger contact gen. When the actors were deactivated based on accurate state //joints should have been deactivated. const PxU32 NbTypes = 5; const IG::Edge::EdgeType types[NbTypes] = { IG::Edge::eCONTACT_MANAGER, IG::Edge::eSOFT_BODY_CONTACT, IG::Edge::eFEM_CLOTH_CONTACT, IG::Edge::ePARTICLE_SYSTEM_CONTACT, IG::Edge::eHAIR_SYSTEM_CONTACT }; for(PxU32 t = 0; t < NbTypes; ++t) { const PxU32 nbDeactivatingEdges = islandSim.getNbDeactivatingEdges(types[t]); const IG::EdgeIndex* deactivatingEdgeIds = islandSim.getDeactivatingEdges(types[t]); for(PxU32 i = 0; i < nbDeactivatingEdges; ++i) { Interaction* interaction = mSimpleIslandManager->getInteraction(deactivatingEdgeIds[i]); if(interaction && interaction->readInteractionFlag(InteractionFlag::eIS_ACTIVE)) { if(!islandSim.getEdge(deactivatingEdgeIds[i]).isActive()) { const InteractionType::Enum type = interaction->getType(); const bool proceed = deactivateInteraction(interaction, type); if(proceed && (type < InteractionType::eTRACKED_IN_SCENE_COUNT)) notifyInteractionDeactivated(interaction); } } } } } PxvNphaseImplementationContext* implCtx = mLLContext->getNphaseImplementationContext(); PxsContactManagerOutputIterator outputs = implCtx->getContactManagerOutputs(); mNPhaseCore->processPersistentContactEvents(outputs); } /////////////////////////////////////////////////////////////////////////////// //This is called after solver finish void Sc::Scene::updateSimulationController(PxBaseTask* continuation) { PX_PROFILE_ZONE("Sim.updateSimulationController", mContextId); PxsTransformCache& cache = getLowLevelContext()->getTransformCache(); Bp::BoundsArray& boundArray = getBoundsArray(); PxBitMapPinned& changedAABBMgrActorHandles = mAABBManager->getChangedAABBMgActorHandleMap(); mSimulationController->gpuDmabackData(cache, boundArray, changedAABBMgrActorHandles, mPublicFlags & PxSceneFlag::eENABLE_DIRECT_GPU_API); //for pxgdynamicscontext: copy solver body data to body core { PX_PROFILE_ZONE("Sim.updateBodyCore", mContextId); mDynamicsContext->updateBodyCore(continuation); } //mSimulationController->update(cache, boundArray, changedAABBMgrActorHandles); /*mProcessLostPatchesTask.setContinuation(&mFinalizationPhase); mProcessLostPatchesTask.removeReference();*/ } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::postSolver(PxBaseTask* /*continuation*/) { PX_PROFILE_ZONE("Sc::Scene::postSolver", mContextId); PxcNpMemBlockPool& blockPool = mLLContext->getNpMemBlockPool(); //Merge... mDynamicsContext->mergeResults(); blockPool.releaseConstraintMemory(); //Swap friction! blockPool.swapFrictionStreams(); mCcdBodies.clear(); #if PX_ENABLE_SIM_STATS mLLContext->getSimStats().mPeakConstraintBlockAllocations = blockPool.getPeakConstraintBlockCount(); #else PX_CATCH_UNDEFINED_ENABLE_SIM_STATS #endif integrateKinematicPose(); { const PxU32 size = mDirtyArticulationSims.size(); ArticulationSim* const* articSims = mDirtyArticulationSims.getEntries(); //clear the acceleration term for articulation if the application raised PxForceMode::eIMPULSE in addForce function. This change //will make sure articulation and rigid body behave the same const float dt = mDt; for(PxU32 i=0; i<size; ++i) articSims[i]->clearAcceleration(dt); //clear the dirty articulation list mDirtyArticulationSims.clear(); } //afterIntegration(continuation); } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::constraintProjection(PxBaseTask* /*continuation*/) { } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::checkForceThresholdContactEvents(PxU32 ccdPass) { PX_PROFILE_ZONE("Sim.checkForceThresholdContactEvents", mContextId); // Note: For contact notifications it is important that force threshold checks are done after new/lost touches have been processed // because pairs might get added to the list processed below // Bodies that passed contact force threshold PxsContactManagerOutputIterator outputs = mLLContext->getNphaseImplementationContext()->getContactManagerOutputs(); ThresholdStream& thresholdStream = mDynamicsContext->getForceChangedThresholdStream(); const PxU32 nbThresholdElements = thresholdStream.size(); for(PxU32 i = 0; i< nbThresholdElements; ++i) { ThresholdStreamElement& elem = thresholdStream[i]; ShapeInteraction* si = elem.shapeInteraction; //If there is a shapeInteraction and the shapeInteraction points to a contactManager (i.e. the CM was not destroyed in parallel with the solver) if(si) { PxU32 pairFlags = si->getPairFlags(); if(pairFlags & ShapeInteraction::CONTACT_FORCE_THRESHOLD_PAIRS) { si->swapAndClearForceThresholdExceeded(); if(elem.accumulatedForce > elem.threshold * mDt) { si->raiseFlag(ShapeInteraction::FORCE_THRESHOLD_EXCEEDED_NOW); PX_ASSERT(si->hasTouch()); //If the accumulatedForce is large than the threshold in the current frame and the accumulatedForce is less than the threshold in the previous frame, //and the user request notify for found event, we will raise eNOTIFY_THRESHOLD_FORCE_FOUND if((!si->readFlag(ShapeInteraction::FORCE_THRESHOLD_EXCEEDED_BEFORE)) && (pairFlags & PxPairFlag::eNOTIFY_THRESHOLD_FORCE_FOUND)) si->processUserNotification(PxPairFlag::eNOTIFY_THRESHOLD_FORCE_FOUND, 0, false, ccdPass, false, outputs); else if(si->readFlag(ShapeInteraction::FORCE_THRESHOLD_EXCEEDED_BEFORE) && (pairFlags & PxPairFlag::eNOTIFY_THRESHOLD_FORCE_PERSISTS)) si->processUserNotification(PxPairFlag::eNOTIFY_THRESHOLD_FORCE_PERSISTS, 0, false, ccdPass, false, outputs); } else { //If the accumulatedForce is less than the threshold in the current frame and the accumulatedForce is large than the threshold in the previous frame, //and the user request notify for found event, we will raise eNOTIFY_THRESHOLD_FORCE_LOST if(si->readFlag(ShapeInteraction::FORCE_THRESHOLD_EXCEEDED_BEFORE) && (pairFlags & PxPairFlag::eNOTIFY_THRESHOLD_FORCE_LOST)) si->processUserNotification(PxPairFlag::eNOTIFY_THRESHOLD_FORCE_LOST, 0, false, ccdPass, false, outputs); } } } } } void Sc::Scene::afterIntegration(PxBaseTask* continuation) { PX_PROFILE_ZONE("Sc::Scene::afterIntegration", mContextId); mLLContext->getTransformCache().resetChangedState(); //Reset the changed state. If anything outside of the GPU kernels updates any shape's transforms, this will be raised again getBoundsArray().resetChangedState(); PxsTransformCache& cache = getLowLevelContext()->getTransformCache(); Bp::BoundsArray& boundArray = getBoundsArray(); { PX_PROFILE_ZONE("AfterIntegration::lockStage", mContextId); mLLContext->getLock().lock(); { PX_PROFILE_ZONE("SimController", mContextId); mSimulationController->updateScBodyAndShapeSim(cache, boundArray, continuation); } const IG::IslandSim& islandSim = mSimpleIslandManager->getAccurateIslandSim(); const PxU32 rigidBodyOffset = BodySim::getRigidBodyOffset(); const PxU32 numBodiesToDeactivate = islandSim.getNbNodesToDeactivate(IG::Node::eRIGID_BODY_TYPE); const PxNodeIndex*const deactivatingIndices = islandSim.getNodesToDeactivate(IG::Node::eRIGID_BODY_TYPE); PxU32 previousNumBodiesToDeactivate = mNumDeactivatingNodes[IG::Node::eRIGID_BODY_TYPE]; { PX_PROFILE_ZONE("AfterIntegration::deactivateStage", mContextId); PxBitMapPinned& changedAABBMgrActorHandles = mAABBManager->getChangedAABBMgActorHandleMap(); for(PxU32 i = previousNumBodiesToDeactivate; i < numBodiesToDeactivate; i++) { PxsRigidBody* rigid = islandSim.getRigidBody(deactivatingIndices[i]); BodySim* bodySim = reinterpret_cast<BodySim*>(reinterpret_cast<PxU8*>(rigid) - rigidBodyOffset); //we need to set the rigid body back to the previous pose for the deactivated objects. This emulates the previous behavior where island gen ran before the solver, ensuring //that bodies that should be deactivated this frame never reach the solver. We now run the solver in parallel with island gen, so objects that should be deactivated this frame //still reach the solver and are integrated. However, on the frame when they should be deactivated, we roll back to their state at the beginning of the frame to ensure that the //user perceives the same behavior as before. PxsBodyCore& bodyCore = bodySim->getBodyCore().getCore(); //if(!islandSim.getNode(bodySim->getNodeIndex()).isActive()) rigid->setPose(rigid->getLastCCDTransform()); bodySim->updateCached(&changedAABBMgrActorHandles); updateBodySim(*bodySim); //solver is running in parallel with IG(so solver might solving the body which IG identify as deactivatedNodes). After we moved sleepCheck into the solver after integration, sleepChecks //might have processed bodies that are now considered deactivated. This could have resulted in either freezing or unfreezing one of these bodies this frame, so we need to process those //events to ensure that the SqManager's bounds arrays are consistently maintained. Also, we need to clear the frame flags for these bodies. if(rigid->isFreezeThisFrame()) bodySim->freezeTransforms(&mAABBManager->getChangedAABBMgActorHandleMap()); //KS - the IG deactivates bodies in parallel with the solver. It appears that under certain circumstances, the solver's integration (which performs //sleep checks) could decide that the body is no longer a candidate for sleeping on the same frame that the island gen decides to deactivate the island //that the body is contained in. This is a rare occurrence but the behavior we want to emulate is that of IG running before solver so we should therefore //permit the IG to make the authoritative decision over whether the body should be active or inactive. bodyCore.wakeCounter = 0.0f; bodyCore.linearVelocity = PxVec3(0.0f); bodyCore.angularVelocity = PxVec3(0.0f); rigid->clearAllFrameFlags(); } } updateKinematicCached(continuation); mLLContext->getLock().unlock(); } IG::IslandSim& islandSim = mSimpleIslandManager->getAccurateIslandSim(); const PxU32 nbActiveArticulations = islandSim.getNbActiveNodes(IG::Node::eARTICULATION_TYPE); if(nbActiveArticulations) mSimulationController->updateArticulationAfterIntegration(mLLContext, mAABBManager, mCcdBodies, continuation, islandSim, mDt); const PxU32 numArticsToDeactivate = islandSim.getNbNodesToDeactivate(IG::Node::eARTICULATION_TYPE); const PxNodeIndex*const deactivatingArticIndices = islandSim.getNodesToDeactivate(IG::Node::eARTICULATION_TYPE); PxU32 previousNumArticsToDeactivate = mNumDeactivatingNodes[IG::Node::eARTICULATION_TYPE]; for(PxU32 i = previousNumArticsToDeactivate; i < numArticsToDeactivate; ++i) { ArticulationSim* artic = islandSim.getArticulationSim(deactivatingArticIndices[i]); artic->putToSleep(); } //PxU32 previousNumClothToDeactivate = mNumDeactivatingNodes[IG::Node::eFEMCLOTH_TYPE]; //const PxU32 numClothToDeactivate = islandSim.getNbNodesToDeactivate(IG::Node::eFEMCLOTH_TYPE); //const IG::NodeIndex*const deactivatingClothIndices = islandSim.getNodesToDeactivate(IG::Node::eFEMCLOTH_TYPE); //for (PxU32 i = previousNumClothToDeactivate; i < numClothToDeactivate; ++i) //{ // FEMCloth* cloth = islandSim.getLLFEMCloth(deactivatingClothIndices[i]); // mSimulationController->deactivateCloth(cloth); //} //PxU32 previousNumSoftBodiesToDeactivate = mNumDeactivatingNodes[IG::Node::eSOFTBODY_TYPE]; //const PxU32 numSoftBodiesToDeactivate = islandSim.getNbNodesToDeactivate(IG::Node::eSOFTBODY_TYPE); //const IG::NodeIndex*const deactivatingSoftBodiesIndices = islandSim.getNodesToDeactivate(IG::Node::eSOFTBODY_TYPE); //for (PxU32 i = previousNumSoftBodiesToDeactivate; i < numSoftBodiesToDeactivate; ++i) //{ // Dy::SoftBody* softbody = islandSim.getLLSoftBody(deactivatingSoftBodiesIndices[i]); // printf("after Integration: Deactivating soft body %i\n", softbody->getGpuRemapId()); // //mSimulationController->deactivateSoftbody(softbody); // softbody->getSoftBodySim()->setActive(false, 0); //} PX_PROFILE_STOP_CROSSTHREAD("Basic.dynamics", mContextId); checkForceThresholdContactEvents(0); } /////////////////////////////////////////////////////////////////////////////// void Sc::Scene::fireOnAdvanceCallback() { if(!mSimulationEventCallback) return; const PxU32 nbPosePreviews = mPosePreviewBodies.size(); if(!nbPosePreviews) return; mClientPosePreviewBodies.clear(); mClientPosePreviewBodies.reserve(nbPosePreviews); mClientPosePreviewBuffer.clear(); mClientPosePreviewBuffer.reserve(nbPosePreviews); const BodySim*const* PX_RESTRICT posePreviewBodies = mPosePreviewBodies.getEntries(); for(PxU32 i=0; i<nbPosePreviews; i++) { const BodySim& b = *posePreviewBodies[i]; if(!b.isFrozen()) { PxsBodyCore& c = b.getBodyCore().getCore(); mClientPosePreviewBodies.pushBack(static_cast<const PxRigidBody*>(b.getPxActor())); // PT:: tag: scalar transform*transform mClientPosePreviewBuffer.pushBack(c.body2World * c.getBody2Actor().getInverse()); } } const PxU32 bodyCount = mClientPosePreviewBodies.size(); if(bodyCount) mSimulationEventCallback->onAdvance(mClientPosePreviewBodies.begin(), mClientPosePreviewBuffer.begin(), bodyCount); } void Sc::Scene::finalizationPhase(PxBaseTask* /*continuation*/) { PX_PROFILE_ZONE("Sim.sceneFinalization", mContextId); if(mCCDContext) { if(mSimulationController->mGPU) // PT: skip this on CPU, see empty CPU function called in updateBodySim { //KS - force simulation controller to update any bodies updated by the CCD. When running GPU simulation, this would be required //to ensure that cached body states are updated const PxU32 nbUpdatedBodies = mCCDContext->getNumUpdatedBodies(); PxsRigidBody*const* updatedBodies = mCCDContext->getUpdatedBodies(); const PxU32 rigidBodyOffset = BodySim::getRigidBodyOffset(); for(PxU32 a=0; a<nbUpdatedBodies; ++a) { BodySim* bodySim = reinterpret_cast<BodySim*>(reinterpret_cast<PxU8*>(updatedBodies[a]) - rigidBodyOffset); updateBodySim(*bodySim); } } mCCDContext->clearUpdatedBodies(); } fireOnAdvanceCallback(); // placed here because it needs to be done after sleep check and after potential CCD passes checkConstraintBreakage(); // Performs breakage tests on breakable constraints PX_PROFILE_STOP_CROSSTHREAD("Basic.rigidBodySolver", mContextId); mTaskPool.clear(); mReportShapePairTimeStamp++; // important to do this before fetchResults() is called to make sure that delayed deleted actors/shapes get // separate pair entries in contact reports // AD: WIP, will be gone once we removed the warm-start with sim step. if (mPublicFlags & PxSceneFlag::eENABLE_DIRECT_GPU_API) setDirectGPUAPIInitialized(); } ///////////////////////////////////////////////////////////////////////////////
NVIDIA-Omniverse/PhysX/physx/source/simulationcontroller/src/ScShapeCore.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "foundation/PxErrorCallback.h" #include "ScShapeSim.h" #include "ScPhysics.h" #include "GuConvexMesh.h" #include "GuTriangleMesh.h" #include "GuHeightField.h" #include "GuTetrahedronMesh.h" using namespace physx; using namespace Gu; using namespace Cm; using namespace Sc; static PX_FORCE_INLINE Gu::ConvexMesh& getConvexMesh(PxConvexMesh* pxcm) { return *static_cast<Gu::ConvexMesh*>(pxcm); } // PT: TODO: optimize all these data copies void GeometryUnion::set(const PxGeometry& g) { // PT: preserve this field that can be used by higher-level code to store useful data const float saved = reinterpret_cast<const PxGeometry&>(mGeometry).mTypePadding; switch(g.getType()) { case PxGeometryType::eBOX: { reinterpret_cast<PxBoxGeometry&>(mGeometry) = static_cast<const PxBoxGeometry&>(g); } break; case PxGeometryType::eCAPSULE: { reinterpret_cast<PxCapsuleGeometry&>(mGeometry) = static_cast<const PxCapsuleGeometry&>(g); } break; case PxGeometryType::eSPHERE: { reinterpret_cast<PxSphereGeometry&>(mGeometry) = static_cast<const PxSphereGeometry&>(g); reinterpret_cast<PxCapsuleGeometry&>(mGeometry).halfHeight = 0.0f; //AM: make sphere geometry also castable as a zero height capsule. } break; case PxGeometryType::ePLANE: { reinterpret_cast<PxPlaneGeometry&>(mGeometry) = static_cast<const PxPlaneGeometry&>(g); } break; case PxGeometryType::eCONVEXMESH: { reinterpret_cast<PxConvexMeshGeometry&>(mGeometry) = static_cast<const PxConvexMeshGeometry&>(g); reinterpret_cast<PxConvexMeshGeometryLL&>(mGeometry).gpuCompatible = ::getConvexMesh(get<PxConvexMeshGeometryLL>().convexMesh).isGpuCompatible(); } break; case PxGeometryType::ePARTICLESYSTEM: { reinterpret_cast<PxParticleSystemGeometry&>(mGeometry) = static_cast<const PxParticleSystemGeometry&>(g); reinterpret_cast<PxParticleSystemGeometryLL&>(mGeometry).materialsLL = MaterialIndicesStruct(); } break; case PxGeometryType::eTRIANGLEMESH: { reinterpret_cast<PxTriangleMeshGeometry&>(mGeometry) = static_cast<const PxTriangleMeshGeometry&>(g); reinterpret_cast<PxTriangleMeshGeometryLL&>(mGeometry).materialsLL = MaterialIndicesStruct(); } break; case PxGeometryType::eTETRAHEDRONMESH: { reinterpret_cast<PxTetrahedronMeshGeometry&>(mGeometry) = static_cast<const PxTetrahedronMeshGeometry&>(g); reinterpret_cast<PxTetrahedronMeshGeometryLL&>(mGeometry).materialsLL = MaterialIndicesStruct(); } break; case PxGeometryType::eHEIGHTFIELD: { reinterpret_cast<PxHeightFieldGeometry&>(mGeometry) = static_cast<const PxHeightFieldGeometry&>(g); reinterpret_cast<PxHeightFieldGeometryLL&>(mGeometry).materialsLL = MaterialIndicesStruct(); } break; case PxGeometryType::eHAIRSYSTEM: { reinterpret_cast<PxHairSystemGeometry&>(mGeometry) = static_cast<const PxHairSystemGeometry&>(g); } break; case PxGeometryType::eCUSTOM: { reinterpret_cast<PxCustomGeometry&>(mGeometry) = static_cast<const PxCustomGeometry&>(g); } break; case PxGeometryType::eGEOMETRY_COUNT: case PxGeometryType::eINVALID: PX_ALWAYS_ASSERT_MESSAGE("geometry type not handled"); break; } reinterpret_cast<PxGeometry&>(mGeometry).mTypePadding = saved; } static PxConvexMeshGeometryLL extendForLL(const PxConvexMeshGeometry& hlGeom) { PxConvexMeshGeometryLL llGeom; static_cast<PxConvexMeshGeometry&>(llGeom) = hlGeom; llGeom.gpuCompatible = hlGeom.convexMesh->isGpuCompatible(); return llGeom; } static PxTriangleMeshGeometryLL extendForLL(const PxTriangleMeshGeometry& hlGeom) { PxTriangleMeshGeometryLL llGeom; static_cast<PxTriangleMeshGeometry&>(llGeom) = hlGeom; llGeom.materialsLL = static_cast<const PxTriangleMeshGeometryLL&>(hlGeom).materialsLL; return llGeom; } static PxHeightFieldGeometryLL extendForLL(const PxHeightFieldGeometry& hlGeom) { PxHeightFieldGeometryLL llGeom; static_cast<PxHeightFieldGeometry&>(llGeom) = hlGeom; llGeom.materialsLL = static_cast<const PxHeightFieldGeometryLL&>(hlGeom).materialsLL; return llGeom; } ShapeCore::ShapeCore(const PxGeometry& geometry, PxShapeFlags shapeFlags, const PxU16* materialIndices, PxU16 materialCount, bool isExclusive, PxShapeCoreFlag::Enum softOrClothFlags) : mExclusiveSim(NULL) { mCore.mShapeCoreFlags |= PxShapeCoreFlag::eOWNS_MATERIAL_IDX_MEMORY; if(isExclusive) mCore.mShapeCoreFlags |= PxShapeCoreFlag::eIS_EXCLUSIVE; mCore.mShapeCoreFlags |= softOrClothFlags; PX_ASSERT(materialCount > 0); const PxTolerancesScale& scale = Physics::getInstance().getTolerancesScale(); mCore.mGeometry.set(geometry); mCore.setTransform(PxTransform(PxIdentity)); mCore.mContactOffset = 0.02f * scale.length; mCore.mRestOffset = 0.0f; mCore.mTorsionalRadius = 0.0f; mCore.mMinTorsionalPatchRadius = 0.0f; mCore.mShapeFlags = shapeFlags; setMaterialIndices(materialIndices, materialCount); } // PX_SERIALIZATION ShapeCore::ShapeCore(const PxEMPTY) : mSimulationFilterData (PxEmpty), mCore (PxEmpty), mExclusiveSim (NULL) { mCore.mShapeCoreFlags.clear(PxShapeCoreFlag::eOWNS_MATERIAL_IDX_MEMORY); } //~PX_SERIALIZATION static PX_FORCE_INLINE const MaterialIndicesStruct* getMaterials(const GeometryUnion& gu) { const PxGeometryType::Enum type = gu.getType(); if(type == PxGeometryType::eTRIANGLEMESH) return &gu.get<PxTriangleMeshGeometryLL>().materialsLL; else if(type == PxGeometryType::eHEIGHTFIELD) return &gu.get<PxHeightFieldGeometryLL>().materialsLL; else if(type == PxGeometryType::eTETRAHEDRONMESH) return &gu.get<PxTetrahedronMeshGeometryLL>().materialsLL; else if(type == PxGeometryType::ePARTICLESYSTEM) return &gu.get<PxParticleSystemGeometryLL>().materialsLL; else return NULL; } ShapeCore::~ShapeCore() { if(mCore.mShapeCoreFlags.isSet(PxShapeCoreFlag::eOWNS_MATERIAL_IDX_MEMORY)) { MaterialIndicesStruct* materialsLL = const_cast<MaterialIndicesStruct*>(getMaterials(mCore.mGeometry)); if(materialsLL) materialsLL->deallocate(); } } PxU16 Sc::ShapeCore::getNbMaterialIndices() const { const MaterialIndicesStruct* materialsLL = getMaterials(mCore.mGeometry); return materialsLL ? materialsLL->numIndices : 1; } const PxU16* Sc::ShapeCore::getMaterialIndices() const { const MaterialIndicesStruct* materialsLL = getMaterials(mCore.mGeometry); return materialsLL ? materialsLL->indices : &mCore.mMaterialIndex; } PX_FORCE_INLINE void setMaterialsHelper(MaterialIndicesStruct& materials, const PxU16* materialIndices, PxU16 materialIndexCount, PxShapeCoreFlags& shapeCoreFlags) { if(materials.numIndices < materialIndexCount) { if(materials.indices && shapeCoreFlags.isSet(PxShapeCoreFlag::eOWNS_MATERIAL_IDX_MEMORY)) materials.deallocate(); materials.allocate(materialIndexCount); shapeCoreFlags |= PxShapeCoreFlag::eOWNS_MATERIAL_IDX_MEMORY; } PxMemCopy(materials.indices, materialIndices, sizeof(PxU16)*materialIndexCount); materials.numIndices = materialIndexCount; } void ShapeCore::setMaterialIndices(const PxU16* materialIndices, PxU16 materialIndexCount) { mCore.mMaterialIndex = materialIndices[0]; MaterialIndicesStruct* materialsLL = const_cast<MaterialIndicesStruct*>(getMaterials(mCore.mGeometry)); if(materialsLL) setMaterialsHelper(*materialsLL, materialIndices, materialIndexCount, mCore.mShapeCoreFlags); } void ShapeCore::setGeometry(const PxGeometry& geom) { const PxGeometryType::Enum newGeomType = geom.getType(); // copy material related data to restore it after the new geometry has been set MaterialIndicesStruct materials; PX_ASSERT(materials.numIndices == 0); const MaterialIndicesStruct* materialsLL = getMaterials(mCore.mGeometry); if(materialsLL) materials = *materialsLL; mCore.mGeometry.set(geom); if((newGeomType == PxGeometryType::eTRIANGLEMESH) || (newGeomType == PxGeometryType::eHEIGHTFIELD) || (newGeomType == PxGeometryType::eTETRAHEDRONMESH)|| (newGeomType == PxGeometryType::ePARTICLESYSTEM)) { MaterialIndicesStruct* newMaterials = const_cast<MaterialIndicesStruct*>(getMaterials(mCore.mGeometry)); PX_ASSERT(newMaterials); if(materials.numIndices != 0) // old type was mesh type *newMaterials = materials; else { // old type was non-mesh type newMaterials->allocate(1); *newMaterials->indices = mCore.mMaterialIndex; mCore.mShapeCoreFlags |= PxShapeCoreFlag::eOWNS_MATERIAL_IDX_MEMORY; } } else if((materials.numIndices != 0) && mCore.mShapeCoreFlags.isSet(PxShapeCoreFlag::eOWNS_MATERIAL_IDX_MEMORY)) { // geometry changed to non-mesh type materials.deallocate(); } } PxShape* ShapeCore::getPxShape() { return Sc::gOffsetTable.convertScShape2Px(this); } const PxShape* ShapeCore::getPxShape() const { return Sc::gOffsetTable.convertScShape2Px(this); } void ShapeCore::setContactOffset(const PxReal offset) { mCore.mContactOffset = offset; ShapeSim* exclusiveSim = getExclusiveSim(); if (exclusiveSim) { exclusiveSim->getScene().updateContactDistance(exclusiveSim->getElementID(), offset); } } // PX_SERIALIZATION PX_FORCE_INLINE void exportExtraDataMaterials(PxSerializationContext& stream, const MaterialIndicesStruct& materials) { stream.alignData(PX_SERIAL_ALIGN); stream.writeData(materials.indices, sizeof(PxU16)*materials.numIndices); } void ShapeCore::exportExtraData(PxSerializationContext& stream) { const MaterialIndicesStruct* materialsLL = getMaterials(mCore.mGeometry); if(materialsLL) exportExtraDataMaterials(stream, *materialsLL); } void ShapeCore::importExtraData(PxDeserializationContext& context) { MaterialIndicesStruct* materialsLL = const_cast<MaterialIndicesStruct*>(getMaterials(mCore.mGeometry)); if(materialsLL) materialsLL->indices = context.readExtraData<PxU16, PX_SERIAL_ALIGN>(materialsLL->numIndices); } void ShapeCore::resolveMaterialReference(PxU32 materialTableIndex, PxU16 materialIndex) { if(materialTableIndex == 0) mCore.mMaterialIndex = materialIndex; MaterialIndicesStruct* materialsLL = const_cast<MaterialIndicesStruct*>(getMaterials(mCore.mGeometry)); if(materialsLL) materialsLL->indices[materialTableIndex] = materialIndex; } void ShapeCore::resolveReferences(PxDeserializationContext& context) { // Resolve geometry pointers if needed PxGeometry& geom = const_cast<PxGeometry&>(mCore.mGeometry.getGeometry()); switch(geom.getType()) { case PxGeometryType::eCONVEXMESH: { PxConvexMeshGeometryLL& convexGeom = static_cast<PxConvexMeshGeometryLL&>(geom); context.translatePxBase(convexGeom.convexMesh); // update the hullData pointer static_cast<PxConvexMeshGeometryLL&>(geom) = extendForLL(convexGeom); } break; case PxGeometryType::eHEIGHTFIELD: { PxHeightFieldGeometryLL& hfGeom = static_cast<PxHeightFieldGeometryLL&>(geom); context.translatePxBase(hfGeom.heightField); // update hf pointers static_cast<PxHeightFieldGeometryLL&>(geom) = extendForLL(hfGeom); } break; case PxGeometryType::eTRIANGLEMESH: { PxTriangleMeshGeometryLL& meshGeom = static_cast<PxTriangleMeshGeometryLL&>(geom); context.translatePxBase(meshGeom.triangleMesh); // update mesh pointers static_cast<PxTriangleMeshGeometryLL&>(geom) = extendForLL(meshGeom); } break; case PxGeometryType::eTETRAHEDRONMESH: case PxGeometryType::ePARTICLESYSTEM: case PxGeometryType::eHAIRSYSTEM: case PxGeometryType::eCUSTOM: { // implement PX_ASSERT(0); } break; case PxGeometryType::eSPHERE: case PxGeometryType::ePLANE: case PxGeometryType::eCAPSULE: case PxGeometryType::eBOX: case PxGeometryType::eGEOMETRY_COUNT: case PxGeometryType::eINVALID: break; } } PxU32 ShapeCore::getInternalShapeIndex(PxsSimulationController& simulationController) const { return simulationController.getInternalShapeIndex(getCore()); } //~PX_SERIALIZATION